mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-21 05:53:12 +00:00
Enable BytesWarning across all tests ported to Python 3, fixing problems that caught.
This commit is contained in:
parent
298d3bc9e0
commit
70c0607789
@ -30,5 +30,5 @@ def remove_prefix(s_bytes, prefix):
|
|||||||
if s_bytes.startswith(prefix):
|
if s_bytes.startswith(prefix):
|
||||||
return s_bytes[len(prefix):]
|
return s_bytes[len(prefix):]
|
||||||
raise BadPrefixError(
|
raise BadPrefixError(
|
||||||
"did not see expected '{}' prefix".format(prefix)
|
"did not see expected '{!r}' prefix".format(prefix)
|
||||||
)
|
)
|
||||||
|
@ -164,8 +164,10 @@ class CompleteBinaryTreeMixin(object):
|
|||||||
def dump(self):
|
def dump(self):
|
||||||
lines = []
|
lines = []
|
||||||
for i,depth in self.depth_first():
|
for i,depth in self.depth_first():
|
||||||
lines.append("%s%3d: %s" % (" "*depth, i,
|
value = base32.b2a_or_none(self[i])
|
||||||
base32.b2a_or_none(self[i])))
|
if value is not None:
|
||||||
|
value = str(value, "utf-8")
|
||||||
|
lines.append("%s%3d: %s" % (" "*depth, i, value))
|
||||||
return "\n".join(lines) + "\n"
|
return "\n".join(lines) + "\n"
|
||||||
|
|
||||||
def get_leaf_index(self, leafnum):
|
def get_leaf_index(self, leafnum):
|
||||||
|
@ -67,12 +67,12 @@ class ValidatedExtendedURIProxy(object):
|
|||||||
self.crypttext_hash = None
|
self.crypttext_hash = None
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "<%s %s>" % (self.__class__.__name__, self._verifycap.to_string())
|
return "<%s %r>" % (self.__class__.__name__, self._verifycap.to_string())
|
||||||
|
|
||||||
def _check_integrity(self, data):
|
def _check_integrity(self, data):
|
||||||
h = uri_extension_hash(data)
|
h = uri_extension_hash(data)
|
||||||
if h != self._verifycap.uri_extension_hash:
|
if h != self._verifycap.uri_extension_hash:
|
||||||
msg = ("The copy of uri_extension we received from %s was bad: wanted %s, got %s" %
|
msg = ("The copy of uri_extension we received from %s was bad: wanted %r, got %r" %
|
||||||
(self._readbucketproxy,
|
(self._readbucketproxy,
|
||||||
base32.b2a(self._verifycap.uri_extension_hash),
|
base32.b2a(self._verifycap.uri_extension_hash),
|
||||||
base32.b2a(h)))
|
base32.b2a(h)))
|
||||||
@ -234,7 +234,7 @@ class ValidatedReadBucketProxy(log.PrefixingLogMixin):
|
|||||||
UEB"""
|
UEB"""
|
||||||
precondition(share_hash_tree[0] is not None, share_hash_tree)
|
precondition(share_hash_tree[0] is not None, share_hash_tree)
|
||||||
prefix = "%d-%s-%s" % (sharenum, bucket,
|
prefix = "%d-%s-%s" % (sharenum, bucket,
|
||||||
base32.b2a(share_hash_tree[0][:8])[:12])
|
str(base32.b2a(share_hash_tree[0][:8])[:12], "ascii"))
|
||||||
log.PrefixingLogMixin.__init__(self,
|
log.PrefixingLogMixin.__init__(self,
|
||||||
facility="tahoe.immutable.download",
|
facility="tahoe.immutable.download",
|
||||||
prefix=prefix)
|
prefix=prefix)
|
||||||
@ -427,7 +427,7 @@ class ValidatedReadBucketProxy(log.PrefixingLogMixin):
|
|||||||
received from the remote peer were bad.""")
|
received from the remote peer were bad.""")
|
||||||
self.log(" have candidate_share_hash: %s" % bool(candidate_share_hash))
|
self.log(" have candidate_share_hash: %s" % bool(candidate_share_hash))
|
||||||
self.log(" block length: %d" % len(blockdata))
|
self.log(" block length: %d" % len(blockdata))
|
||||||
self.log(" block hash: %s" % base32.b2a_or_none(blockhash))
|
self.log(" block hash: %r" % base32.b2a_or_none(blockhash))
|
||||||
if len(blockdata) < 100:
|
if len(blockdata) < 100:
|
||||||
self.log(" block data: %r" % (blockdata,))
|
self.log(" block data: %r" % (blockdata,))
|
||||||
else:
|
else:
|
||||||
|
@ -127,7 +127,7 @@ class SegmentFetcher(object):
|
|||||||
# we could have sent something if we'd been allowed to pull
|
# we could have sent something if we'd been allowed to pull
|
||||||
# more shares per server. Increase the limit and try again.
|
# more shares per server. Increase the limit and try again.
|
||||||
self._max_shares_per_server += 1
|
self._max_shares_per_server += 1
|
||||||
log.msg("SegmentFetcher(%s) increasing diversity limit to %d"
|
log.msg("SegmentFetcher(%r) increasing diversity limit to %d"
|
||||||
% (self._node._si_prefix, self._max_shares_per_server),
|
% (self._node._si_prefix, self._max_shares_per_server),
|
||||||
level=log.NOISY, umid="xY2pBA")
|
level=log.NOISY, umid="xY2pBA")
|
||||||
# Also ask for more shares, in the hopes of achieving better
|
# Also ask for more shares, in the hopes of achieving better
|
||||||
|
@ -500,7 +500,7 @@ class DownloadNode(object):
|
|||||||
return (offset, segment, decodetime)
|
return (offset, segment, decodetime)
|
||||||
except (BadHashError, NotEnoughHashesError):
|
except (BadHashError, NotEnoughHashesError):
|
||||||
format = ("hash failure in ciphertext_hash_tree:"
|
format = ("hash failure in ciphertext_hash_tree:"
|
||||||
" segnum=%(segnum)d, SI=%(si)s")
|
" segnum=%(segnum)d, SI=%(si)r")
|
||||||
log.msg(format=format, segnum=segnum, si=self._si_prefix,
|
log.msg(format=format, segnum=segnum, si=self._si_prefix,
|
||||||
failure=Failure(),
|
failure=Failure(),
|
||||||
level=log.WEIRD, parent=self._lp, umid="MTwNnw")
|
level=log.WEIRD, parent=self._lp, umid="MTwNnw")
|
||||||
|
@ -120,7 +120,7 @@ class Segmentation(object):
|
|||||||
# we didn't get the first byte, so we can't use this segment
|
# we didn't get the first byte, so we can't use this segment
|
||||||
log.msg("Segmentation handed wrong data:"
|
log.msg("Segmentation handed wrong data:"
|
||||||
" want [%d-%d), given [%d-%d), for segnum=%d,"
|
" want [%d-%d), given [%d-%d), for segnum=%d,"
|
||||||
" for si=%s"
|
" for si=%r"
|
||||||
% (self._offset, self._offset+self._size,
|
% (self._offset, self._offset+self._size,
|
||||||
segment_start, segment_start+len(segment),
|
segment_start, segment_start+len(segment),
|
||||||
wanted_segnum, self._node._si_prefix),
|
wanted_segnum, self._node._si_prefix),
|
||||||
|
@ -108,7 +108,7 @@ class Share(object):
|
|||||||
self.had_corruption = False # for unit tests
|
self.had_corruption = False # for unit tests
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "Share(sh%d-on-%s)" % (self._shnum, self._server.get_name())
|
return "Share(sh%d-on-%r)" % (self._shnum, self._server.get_name())
|
||||||
|
|
||||||
def is_alive(self):
|
def is_alive(self):
|
||||||
# XXX: reconsider. If the share sees a single error, should it remain
|
# XXX: reconsider. If the share sees a single error, should it remain
|
||||||
|
@ -175,7 +175,7 @@ class WriteBucketProxy(object):
|
|||||||
self._offset_data = offset_data
|
self._offset_data = offset_data
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<WriteBucketProxy for node %s>" % self._server.get_name()
|
return "<WriteBucketProxy for node %r>" % self._server.get_name()
|
||||||
|
|
||||||
def put_header(self):
|
def put_header(self):
|
||||||
return self._write(0, self._offset_data)
|
return self._write(0, self._offset_data)
|
||||||
@ -317,7 +317,7 @@ class ReadBucketProxy(object):
|
|||||||
return self._server.get_serverid()
|
return self._server.get_serverid()
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<ReadBucketProxy %s to peer [%s] SI %s>" % \
|
return "<ReadBucketProxy %r to peer [%r] SI %r>" % \
|
||||||
(id(self), self._server.get_name(), si_b2a(self._storage_index))
|
(id(self), self._server.get_name(), si_b2a(self._storage_index))
|
||||||
|
|
||||||
def _start_if_needed(self):
|
def _start_if_needed(self):
|
||||||
|
@ -81,7 +81,7 @@ class CHKCheckerAndUEBFetcher(object):
|
|||||||
def _got_response(self, buckets, server):
|
def _got_response(self, buckets, server):
|
||||||
# buckets is a dict: maps shum to an rref of the server who holds it
|
# buckets is a dict: maps shum to an rref of the server who holds it
|
||||||
shnums_s = ",".join([str(shnum) for shnum in buckets])
|
shnums_s = ",".join([str(shnum) for shnum in buckets])
|
||||||
self.log("got_response: [%s] has %d shares (%s)" %
|
self.log("got_response: [%r] has %d shares (%s)" %
|
||||||
(server.get_name(), len(buckets), shnums_s),
|
(server.get_name(), len(buckets), shnums_s),
|
||||||
level=log.NOISY)
|
level=log.NOISY)
|
||||||
self._found_shares.update(buckets.keys())
|
self._found_shares.update(buckets.keys())
|
||||||
@ -167,7 +167,7 @@ class CHKUploadHelper(Referenceable, upload.CHKUploader): # type: ignore # warn
|
|||||||
self._upload_status.set_storage_index(storage_index)
|
self._upload_status.set_storage_index(storage_index)
|
||||||
self._upload_status.set_status("fetching ciphertext")
|
self._upload_status.set_status("fetching ciphertext")
|
||||||
self._upload_status.set_progress(0, 1.0)
|
self._upload_status.set_progress(0, 1.0)
|
||||||
self._helper.log("CHKUploadHelper starting for SI %s" % self._upload_id,
|
self._helper.log("CHKUploadHelper starting for SI %r" % self._upload_id,
|
||||||
parent=log_number)
|
parent=log_number)
|
||||||
|
|
||||||
self._storage_broker = storage_broker
|
self._storage_broker = storage_broker
|
||||||
|
@ -278,7 +278,7 @@ class ServerTracker(object):
|
|||||||
self.cancel_secret = bucket_cancel_secret
|
self.cancel_secret = bucket_cancel_secret
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return ("<ServerTracker for server %s and SI %s>"
|
return ("<ServerTracker for server %r and SI %r>"
|
||||||
% (self._server.get_name(), si_b2a(self.storage_index)[:5]))
|
% (self._server.get_name(), si_b2a(self.storage_index)[:5]))
|
||||||
|
|
||||||
def get_server(self):
|
def get_server(self):
|
||||||
@ -338,7 +338,7 @@ class ServerTracker(object):
|
|||||||
|
|
||||||
|
|
||||||
def str_shareloc(shnum, bucketwriter):
|
def str_shareloc(shnum, bucketwriter):
|
||||||
return "%s: %s" % (shnum, bucketwriter.get_servername(),)
|
return "%s: %s" % (shnum, ensure_str(bucketwriter.get_servername()),)
|
||||||
|
|
||||||
|
|
||||||
@implementer(IPeerSelector)
|
@implementer(IPeerSelector)
|
||||||
@ -590,7 +590,7 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin):
|
|||||||
d = timeout_call(self._reactor, tracker.ask_about_existing_shares(), 15)
|
d = timeout_call(self._reactor, tracker.ask_about_existing_shares(), 15)
|
||||||
d.addBoth(self._handle_existing_response, tracker)
|
d.addBoth(self._handle_existing_response, tracker)
|
||||||
ds.append(d)
|
ds.append(d)
|
||||||
self.log("asking server %s for any existing shares" %
|
self.log("asking server %r for any existing shares" %
|
||||||
(tracker.get_name(),), level=log.NOISY)
|
(tracker.get_name(),), level=log.NOISY)
|
||||||
|
|
||||||
for tracker in write_trackers:
|
for tracker in write_trackers:
|
||||||
@ -605,7 +605,7 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin):
|
|||||||
d.addErrback(timed_out, tracker)
|
d.addErrback(timed_out, tracker)
|
||||||
d.addBoth(self._handle_existing_write_response, tracker, set())
|
d.addBoth(self._handle_existing_write_response, tracker, set())
|
||||||
ds.append(d)
|
ds.append(d)
|
||||||
self.log("asking server %s for any existing shares" %
|
self.log("asking server %r for any existing shares" %
|
||||||
(tracker.get_name(),), level=log.NOISY)
|
(tracker.get_name(),), level=log.NOISY)
|
||||||
|
|
||||||
trackers = set(write_trackers) | set(readonly_trackers)
|
trackers = set(write_trackers) | set(readonly_trackers)
|
||||||
@ -749,7 +749,7 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin):
|
|||||||
buckets = res
|
buckets = res
|
||||||
if buckets:
|
if buckets:
|
||||||
self.serverids_with_shares.add(serverid)
|
self.serverids_with_shares.add(serverid)
|
||||||
self.log("response to get_buckets() from server %s: alreadygot=%s"
|
self.log("response to get_buckets() from server %r: alreadygot=%s"
|
||||||
% (tracker.get_name(), tuple(sorted(buckets))),
|
% (tracker.get_name(), tuple(sorted(buckets))),
|
||||||
level=log.NOISY)
|
level=log.NOISY)
|
||||||
for bucket in buckets:
|
for bucket in buckets:
|
||||||
@ -818,7 +818,7 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin):
|
|||||||
self.homeless_shares.remove(shnum)
|
self.homeless_shares.remove(shnum)
|
||||||
|
|
||||||
if self._status:
|
if self._status:
|
||||||
self._status.set_status("Contacting Servers [%s] (first query),"
|
self._status.set_status("Contacting Servers [%r] (first query),"
|
||||||
" %d shares left.."
|
" %d shares left.."
|
||||||
% (tracker.get_name(),
|
% (tracker.get_name(),
|
||||||
len(self.homeless_shares)))
|
len(self.homeless_shares)))
|
||||||
@ -845,7 +845,7 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin):
|
|||||||
|
|
||||||
else:
|
else:
|
||||||
(alreadygot, allocated) = res
|
(alreadygot, allocated) = res
|
||||||
self.log("response to allocate_buckets() from server %s: alreadygot=%s, allocated=%s"
|
self.log("response to allocate_buckets() from server %r: alreadygot=%s, allocated=%s"
|
||||||
% (tracker.get_name(),
|
% (tracker.get_name(),
|
||||||
tuple(sorted(alreadygot)), tuple(sorted(allocated))),
|
tuple(sorted(alreadygot)), tuple(sorted(allocated))),
|
||||||
level=log.NOISY)
|
level=log.NOISY)
|
||||||
|
@ -300,7 +300,7 @@ class IntroducerService(service.MultiService, Referenceable):
|
|||||||
level=log.UNUSUAL, umid="jfGMXQ")
|
level=log.UNUSUAL, umid="jfGMXQ")
|
||||||
|
|
||||||
def remote_subscribe_v2(self, subscriber, service_name, subscriber_info):
|
def remote_subscribe_v2(self, subscriber, service_name, subscriber_info):
|
||||||
self.log("introducer: subscription[%s] request at %s"
|
self.log("introducer: subscription[%r] request at %r"
|
||||||
% (service_name, subscriber), umid="U3uzLg")
|
% (service_name, subscriber), umid="U3uzLg")
|
||||||
service_name = ensure_text(service_name)
|
service_name = ensure_text(service_name)
|
||||||
subscriber_info = dictutil.UnicodeKeyDict({
|
subscriber_info = dictutil.UnicodeKeyDict({
|
||||||
|
@ -9,6 +9,7 @@ from __future__ import unicode_literals
|
|||||||
from future.utils import PY2
|
from future.utils import PY2
|
||||||
if PY2:
|
if PY2:
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
from six import ensure_str
|
||||||
|
|
||||||
from allmydata.uri import from_string
|
from allmydata.uri import from_string
|
||||||
from allmydata.util import base32, log, dictutil
|
from allmydata.util import base32, log, dictutil
|
||||||
@ -202,7 +203,7 @@ class MutableChecker(object):
|
|||||||
serverid = server.get_serverid()
|
serverid = server.get_serverid()
|
||||||
locator = (server, self._storage_index, shnum)
|
locator = (server, self._storage_index, shnum)
|
||||||
corrupt_share_locators.append(locator)
|
corrupt_share_locators.append(locator)
|
||||||
s = "%s-sh%d" % (server.get_name(), shnum)
|
s = "%s-sh%d" % (ensure_str(server.get_name()), shnum)
|
||||||
if f.check(CorruptShareError):
|
if f.check(CorruptShareError):
|
||||||
ft = f.value.reason
|
ft = f.value.reason
|
||||||
else:
|
else:
|
||||||
|
@ -63,7 +63,7 @@ class CorruptShareError(BadShareError):
|
|||||||
self.shnum = shnum
|
self.shnum = shnum
|
||||||
self.reason = reason
|
self.reason = reason
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "<CorruptShareError server=%s shnum[%d]: %s" % \
|
return "<CorruptShareError server=%r shnum[%d]: %s" % \
|
||||||
(self.server.get_name(), self.shnum, self.reason)
|
(self.server.get_name(), self.shnum, self.reason)
|
||||||
|
|
||||||
class UnknownVersionError(BadShareError):
|
class UnknownVersionError(BadShareError):
|
||||||
|
@ -98,7 +98,7 @@ class MutableFileNode(object):
|
|||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
if hasattr(self, '_uri'):
|
if hasattr(self, '_uri'):
|
||||||
return "<%s %x %s %s>" % (self.__class__.__name__, id(self), self.is_readonly() and 'RO' or 'RW', self._uri.abbrev())
|
return "<%s %x %s %r>" % (self.__class__.__name__, id(self), self.is_readonly() and 'RO' or 'RW', self._uri.abbrev())
|
||||||
else:
|
else:
|
||||||
return "<%s %x %s %s>" % (self.__class__.__name__, id(self), None, None)
|
return "<%s %x %s %s>" % (self.__class__.__name__, id(self), None, None)
|
||||||
|
|
||||||
|
@ -915,7 +915,7 @@ class Publish(object):
|
|||||||
def log_goal(self, goal, message=""):
|
def log_goal(self, goal, message=""):
|
||||||
logmsg = [message]
|
logmsg = [message]
|
||||||
for (shnum, server) in sorted([(s,p) for (p,s) in goal], key=lambda t: (id(t[0]), id(t[1]))):
|
for (shnum, server) in sorted([(s,p) for (p,s) in goal], key=lambda t: (id(t[0]), id(t[1]))):
|
||||||
logmsg.append("sh%d to [%s]" % (shnum, server.get_name()))
|
logmsg.append("sh%d to [%r]" % (shnum, server.get_name()))
|
||||||
self.log("current goal: %s" % (", ".join(logmsg)), level=log.NOISY)
|
self.log("current goal: %s" % (", ".join(logmsg)), level=log.NOISY)
|
||||||
self.log("we are planning to push new seqnum=#%d" % self._new_seqnum,
|
self.log("we are planning to push new seqnum=#%d" % self._new_seqnum,
|
||||||
level=log.NOISY)
|
level=log.NOISY)
|
||||||
@ -999,7 +999,7 @@ class Publish(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
server = writer.server
|
server = writer.server
|
||||||
lp = self.log("_got_write_answer from %s, share %d" %
|
lp = self.log("_got_write_answer from %r, share %d" %
|
||||||
(server.get_name(), writer.shnum))
|
(server.get_name(), writer.shnum))
|
||||||
|
|
||||||
now = time.time()
|
now = time.time()
|
||||||
@ -1135,14 +1135,14 @@ class Publish(object):
|
|||||||
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
|
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
|
||||||
offsets_tuple) = expected_version
|
offsets_tuple) = expected_version
|
||||||
msg = ("somebody modified the share on us:"
|
msg = ("somebody modified the share on us:"
|
||||||
" shnum=%d: I thought they had #%d:R=%s," %
|
" shnum=%d: I thought they had #%d:R=%r," %
|
||||||
(shnum,
|
(shnum,
|
||||||
seqnum, base32.b2a(root_hash)[:4]))
|
seqnum, base32.b2a(root_hash)[:4]))
|
||||||
if unknown_format:
|
if unknown_format:
|
||||||
msg += (" but I don't know how to read share"
|
msg += (" but I don't know how to read share"
|
||||||
" format %d" % version)
|
" format %d" % version)
|
||||||
else:
|
else:
|
||||||
msg += " but testv reported #%d:R=%s" % \
|
msg += " but testv reported #%d:R=%r" % \
|
||||||
(other_seqnum, base32.b2a(other_roothash)[:4])
|
(other_seqnum, base32.b2a(other_roothash)[:4])
|
||||||
self.log(msg, parent=lp, level=log.NOISY)
|
self.log(msg, parent=lp, level=log.NOISY)
|
||||||
# if expected_version==None, then we didn't expect to see a
|
# if expected_version==None, then we didn't expect to see a
|
||||||
|
@ -574,7 +574,7 @@ class Retrieve(object):
|
|||||||
remote server (with no guarantee of success) that its share is
|
remote server (with no guarantee of success) that its share is
|
||||||
corrupt.
|
corrupt.
|
||||||
"""
|
"""
|
||||||
self.log("marking share %d on server %s as bad" % \
|
self.log("marking share %d on server %r as bad" % \
|
||||||
(shnum, server.get_name()))
|
(shnum, server.get_name()))
|
||||||
prefix = self.verinfo[-2]
|
prefix = self.verinfo[-2]
|
||||||
self.servermap.mark_bad_share(server, shnum, prefix)
|
self.servermap.mark_bad_share(server, shnum, prefix)
|
||||||
|
@ -11,6 +11,7 @@ if PY2:
|
|||||||
# Doesn't import str to prevent API leakage on Python 2
|
# Doesn't import str to prevent API leakage on Python 2
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, max, min # noqa: F401
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, max, min # noqa: F401
|
||||||
from past.builtins import unicode
|
from past.builtins import unicode
|
||||||
|
from six import ensure_str
|
||||||
|
|
||||||
import sys, time, copy
|
import sys, time, copy
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
@ -202,8 +203,8 @@ class ServerMap(object):
|
|||||||
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
|
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
|
||||||
offsets_tuple) = verinfo
|
offsets_tuple) = verinfo
|
||||||
print("[%s]: sh#%d seq%d-%s %d-of-%d len%d" %
|
print("[%s]: sh#%d seq%d-%s %d-of-%d len%d" %
|
||||||
(server.get_name(), shnum,
|
(unicode(server.get_name(), "utf-8"), shnum,
|
||||||
seqnum, base32.b2a(root_hash)[:4], k, N,
|
seqnum, unicode(base32.b2a(root_hash)[:4], "utf-8"), k, N,
|
||||||
datalength), file=out)
|
datalength), file=out)
|
||||||
if self._problems:
|
if self._problems:
|
||||||
print("%d PROBLEMS" % len(self._problems), file=out)
|
print("%d PROBLEMS" % len(self._problems), file=out)
|
||||||
@ -869,7 +870,7 @@ class ServermapUpdater(object):
|
|||||||
# versions.
|
# versions.
|
||||||
self.log(" found valid version %d-%s from %s-sh%d: %d-%d/%d/%d"
|
self.log(" found valid version %d-%s from %s-sh%d: %d-%d/%d/%d"
|
||||||
% (seqnum, unicode(base32.b2a(root_hash)[:4], "utf-8"),
|
% (seqnum, unicode(base32.b2a(root_hash)[:4], "utf-8"),
|
||||||
server.get_name(), shnum,
|
ensure_str(server.get_name()), shnum,
|
||||||
k, n, segsize, datalen),
|
k, n, segsize, datalen),
|
||||||
parent=lp)
|
parent=lp)
|
||||||
self._valid_versions.add(verinfo)
|
self._valid_versions.add(verinfo)
|
||||||
@ -943,13 +944,13 @@ class ServermapUpdater(object):
|
|||||||
alleged_privkey_s = self._node._decrypt_privkey(enc_privkey)
|
alleged_privkey_s = self._node._decrypt_privkey(enc_privkey)
|
||||||
alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s)
|
alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s)
|
||||||
if alleged_writekey != self._node.get_writekey():
|
if alleged_writekey != self._node.get_writekey():
|
||||||
self.log("invalid privkey from %s shnum %d" %
|
self.log("invalid privkey from %r shnum %d" %
|
||||||
(server.get_name(), shnum),
|
(server.get_name(), shnum),
|
||||||
parent=lp, level=log.WEIRD, umid="aJVccw")
|
parent=lp, level=log.WEIRD, umid="aJVccw")
|
||||||
return
|
return
|
||||||
|
|
||||||
# it's good
|
# it's good
|
||||||
self.log("got valid privkey from shnum %d on serverid %s" %
|
self.log("got valid privkey from shnum %d on serverid %r" %
|
||||||
(shnum, server.get_name()),
|
(shnum, server.get_name()),
|
||||||
parent=lp)
|
parent=lp)
|
||||||
privkey, _ = rsa.create_signing_keypair_from_string(alleged_privkey_s)
|
privkey, _ = rsa.create_signing_keypair_from_string(alleged_privkey_s)
|
||||||
@ -1211,9 +1212,9 @@ class ServermapUpdater(object):
|
|||||||
break
|
break
|
||||||
more_queries.append(self.extra_servers.pop(0))
|
more_queries.append(self.extra_servers.pop(0))
|
||||||
|
|
||||||
self.log(format="sending %(more)d more queries: %(who)s",
|
self.log(format="sending %(more)d more queries: %(who)d",
|
||||||
more=len(more_queries),
|
more=len(more_queries),
|
||||||
who=" ".join(["[%s]" % s.get_name() for s in more_queries]),
|
who=" ".join(["[%r]" % s.get_name() for s in more_queries]),
|
||||||
level=log.NOISY)
|
level=log.NOISY)
|
||||||
|
|
||||||
for server in more_queries:
|
for server in more_queries:
|
||||||
|
@ -6,6 +6,7 @@ except ImportError:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
from future.utils import bchr
|
from future.utils import bchr
|
||||||
|
from past.builtins import unicode
|
||||||
|
|
||||||
# do not import any allmydata modules at this level. Do that from inside
|
# do not import any allmydata modules at this level. Do that from inside
|
||||||
# individual functions instead.
|
# individual functions instead.
|
||||||
@ -90,27 +91,34 @@ def dump_immutable_chk_share(f, out, options):
|
|||||||
"crypttext_hash", "crypttext_root_hash",
|
"crypttext_hash", "crypttext_root_hash",
|
||||||
"share_root_hash", "UEB_hash")
|
"share_root_hash", "UEB_hash")
|
||||||
display_keys = {"size": "file_size"}
|
display_keys = {"size": "file_size"}
|
||||||
|
|
||||||
|
def to_string(v):
|
||||||
|
if isinstance(v, bytes):
|
||||||
|
return unicode(v, "utf-8")
|
||||||
|
else:
|
||||||
|
return str(v)
|
||||||
|
|
||||||
for k in keys1:
|
for k in keys1:
|
||||||
if k in unpacked:
|
if k in unpacked:
|
||||||
dk = display_keys.get(k, k)
|
dk = display_keys.get(k, k)
|
||||||
print("%20s: %s" % (dk, unpacked[k]), file=out)
|
print("%20s: %s" % (dk, to_string(unpacked[k])), file=out)
|
||||||
print(file=out)
|
print(file=out)
|
||||||
for k in keys2:
|
for k in keys2:
|
||||||
if k in unpacked:
|
if k in unpacked:
|
||||||
dk = display_keys.get(k, k)
|
dk = display_keys.get(k, k)
|
||||||
print("%20s: %s" % (dk, unpacked[k]), file=out)
|
print("%20s: %s" % (dk, to_string(unpacked[k])), file=out)
|
||||||
print(file=out)
|
print(file=out)
|
||||||
for k in keys3:
|
for k in keys3:
|
||||||
if k in unpacked:
|
if k in unpacked:
|
||||||
dk = display_keys.get(k, k)
|
dk = display_keys.get(k, k)
|
||||||
print("%20s: %s" % (dk, unpacked[k]), file=out)
|
print("%20s: %s" % (dk, to_string(unpacked[k])), file=out)
|
||||||
|
|
||||||
leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
|
leftover = set(unpacked.keys()) - set(keys1 + keys2 + keys3)
|
||||||
if leftover:
|
if leftover:
|
||||||
print(file=out)
|
print(file=out)
|
||||||
print("LEFTOVER:", file=out)
|
print("LEFTOVER:", file=out)
|
||||||
for k in sorted(leftover):
|
for k in sorted(leftover):
|
||||||
print("%20s: %s" % (k, unpacked[k]), file=out)
|
print("%20s: %s" % (k, to_string(unpacked[k])), file=out)
|
||||||
|
|
||||||
# the storage index isn't stored in the share itself, so we depend upon
|
# the storage index isn't stored in the share itself, so we depend upon
|
||||||
# knowing the parent directory name to get it
|
# knowing the parent directory name to get it
|
||||||
@ -197,7 +205,7 @@ def dump_mutable_share(options):
|
|||||||
print(file=out)
|
print(file=out)
|
||||||
print("Mutable slot found:", file=out)
|
print("Mutable slot found:", file=out)
|
||||||
print(" share_type: %s" % share_type, file=out)
|
print(" share_type: %s" % share_type, file=out)
|
||||||
print(" write_enabler: %s" % base32.b2a(WE), file=out)
|
print(" write_enabler: %s" % unicode(base32.b2a(WE), "utf-8"), file=out)
|
||||||
print(" WE for nodeid: %s" % idlib.nodeid_b2a(nodeid), file=out)
|
print(" WE for nodeid: %s" % idlib.nodeid_b2a(nodeid), file=out)
|
||||||
print(" num_extra_leases: %d" % num_extra_leases, file=out)
|
print(" num_extra_leases: %d" % num_extra_leases, file=out)
|
||||||
print(" container_size: %d" % container_size, file=out)
|
print(" container_size: %d" % container_size, file=out)
|
||||||
@ -209,8 +217,8 @@ def dump_mutable_share(options):
|
|||||||
print(" ownerid: %d" % lease.owner_num, file=out)
|
print(" ownerid: %d" % lease.owner_num, file=out)
|
||||||
when = format_expiration_time(lease.expiration_time)
|
when = format_expiration_time(lease.expiration_time)
|
||||||
print(" expires in %s" % when, file=out)
|
print(" expires in %s" % when, file=out)
|
||||||
print(" renew_secret: %s" % base32.b2a(lease.renew_secret), file=out)
|
print(" renew_secret: %s" % unicode(base32.b2a(lease.renew_secret), "utf-8"), file=out)
|
||||||
print(" cancel_secret: %s" % base32.b2a(lease.cancel_secret), file=out)
|
print(" cancel_secret: %s" % unicode(base32.b2a(lease.cancel_secret), "utf-8"), file=out)
|
||||||
print(" secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid), file=out)
|
print(" secrets are for nodeid: %s" % idlib.nodeid_b2a(lease.nodeid), file=out)
|
||||||
else:
|
else:
|
||||||
print("No leases.", file=out)
|
print("No leases.", file=out)
|
||||||
@ -258,8 +266,8 @@ def dump_SDMF_share(m, length, options):
|
|||||||
|
|
||||||
print(" SDMF contents:", file=out)
|
print(" SDMF contents:", file=out)
|
||||||
print(" seqnum: %d" % seqnum, file=out)
|
print(" seqnum: %d" % seqnum, file=out)
|
||||||
print(" root_hash: %s" % base32.b2a(root_hash), file=out)
|
print(" root_hash: %s" % unicode(base32.b2a(root_hash), "utf-8"), file=out)
|
||||||
print(" IV: %s" % base32.b2a(IV), file=out)
|
print(" IV: %s" % unicode(base32.b2a(IV), "utf-8"), file=out)
|
||||||
print(" required_shares: %d" % k, file=out)
|
print(" required_shares: %d" % k, file=out)
|
||||||
print(" total_shares: %d" % N, file=out)
|
print(" total_shares: %d" % N, file=out)
|
||||||
print(" segsize: %d" % segsize, file=out)
|
print(" segsize: %d" % segsize, file=out)
|
||||||
@ -352,7 +360,7 @@ def dump_MDMF_share(m, length, options):
|
|||||||
|
|
||||||
print(" MDMF contents:", file=out)
|
print(" MDMF contents:", file=out)
|
||||||
print(" seqnum: %d" % seqnum, file=out)
|
print(" seqnum: %d" % seqnum, file=out)
|
||||||
print(" root_hash: %s" % base32.b2a(root_hash), file=out)
|
print(" root_hash: %s" % unicode(base32.b2a(root_hash), "utf-8"), file=out)
|
||||||
#print(" IV: %s" % base32.b2a(IV), file=out)
|
#print(" IV: %s" % base32.b2a(IV), file=out)
|
||||||
print(" required_shares: %d" % k, file=out)
|
print(" required_shares: %d" % k, file=out)
|
||||||
print(" total_shares: %d" % N, file=out)
|
print(" total_shares: %d" % N, file=out)
|
||||||
@ -745,7 +753,7 @@ def describe_share(abs_sharefile, si_s, shnum_s, now, out):
|
|||||||
|
|
||||||
print("SDMF %s %d/%d %d #%d:%s %d %s" % \
|
print("SDMF %s %d/%d %d #%d:%s %d %s" % \
|
||||||
(si_s, k, N, datalen,
|
(si_s, k, N, datalen,
|
||||||
seqnum, base32.b2a(root_hash),
|
seqnum, unicode(base32.b2a(root_hash), "utf-8"),
|
||||||
expiration, quote_output(abs_sharefile)), file=out)
|
expiration, quote_output(abs_sharefile)), file=out)
|
||||||
elif share_type == "MDMF":
|
elif share_type == "MDMF":
|
||||||
from allmydata.mutable.layout import MDMFSlotReadProxy
|
from allmydata.mutable.layout import MDMFSlotReadProxy
|
||||||
@ -774,7 +782,7 @@ def describe_share(abs_sharefile, si_s, shnum_s, now, out):
|
|||||||
offsets) = verinfo
|
offsets) = verinfo
|
||||||
print("MDMF %s %d/%d %d #%d:%s %d %s" % \
|
print("MDMF %s %d/%d %d #%d:%s %d %s" % \
|
||||||
(si_s, k, N, datalen,
|
(si_s, k, N, datalen,
|
||||||
seqnum, base32.b2a(root_hash),
|
seqnum, unicode(base32.b2a(root_hash), "utf-8"),
|
||||||
expiration, quote_output(abs_sharefile)), file=out)
|
expiration, quote_output(abs_sharefile)), file=out)
|
||||||
else:
|
else:
|
||||||
print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)
|
print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)
|
||||||
@ -808,8 +816,8 @@ def describe_share(abs_sharefile, si_s, shnum_s, now, out):
|
|||||||
ueb_hash = unpacked["UEB_hash"]
|
ueb_hash = unpacked["UEB_hash"]
|
||||||
|
|
||||||
print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
|
print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
|
||||||
ueb_hash, expiration,
|
unicode(ueb_hash, "utf-8"), expiration,
|
||||||
quote_output(abs_sharefile)), file=out)
|
quote_output(abs_sharefile)), file=out)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out)
|
print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out)
|
||||||
|
@ -38,7 +38,6 @@ from future.utils import PY2
|
|||||||
if PY2:
|
if PY2:
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
|
|
||||||
import re, time, hashlib
|
import re, time, hashlib
|
||||||
|
|
||||||
# On Python 2 this will be the backport.
|
# On Python 2 this will be the backport.
|
||||||
@ -820,7 +819,7 @@ class NativeStorageServer(service.MultiService):
|
|||||||
return self
|
return self
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<NativeStorageServer for %s>" % self.get_name()
|
return "<NativeStorageServer for %r>" % self.get_name()
|
||||||
def get_serverid(self):
|
def get_serverid(self):
|
||||||
return self._server_id
|
return self._server_id
|
||||||
def get_version(self):
|
def get_version(self):
|
||||||
@ -844,10 +843,10 @@ class NativeStorageServer(service.MultiService):
|
|||||||
version = self.get_version()
|
version = self.get_version()
|
||||||
if version is None:
|
if version is None:
|
||||||
return None
|
return None
|
||||||
protocol_v1_version = version.get('http://allmydata.org/tahoe/protocols/storage/v1', UnicodeKeyDict())
|
protocol_v1_version = version.get(b'http://allmydata.org/tahoe/protocols/storage/v1', BytesKeyDict())
|
||||||
available_space = protocol_v1_version.get('available-space')
|
available_space = protocol_v1_version.get(b'available-space')
|
||||||
if available_space is None:
|
if available_space is None:
|
||||||
available_space = protocol_v1_version.get('maximum-immutable-share-size', None)
|
available_space = protocol_v1_version.get(b'maximum-immutable-share-size', None)
|
||||||
return available_space
|
return available_space
|
||||||
|
|
||||||
def start_connecting(self, trigger_cb):
|
def start_connecting(self, trigger_cb):
|
||||||
|
@ -14,13 +14,23 @@ Rather than defining interesting APIs for other code to use, this just causes
|
|||||||
some side-effects which make things better when the test suite runs.
|
some side-effects which make things better when the test suite runs.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from future.utils import PY3
|
||||||
|
|
||||||
|
import warnings
|
||||||
from traceback import extract_stack, format_list
|
from traceback import extract_stack, format_list
|
||||||
|
|
||||||
from foolscap.pb import Listener
|
from foolscap.pb import Listener
|
||||||
from twisted.python.log import err
|
from twisted.python.log import err
|
||||||
from twisted.application import service
|
from twisted.application import service
|
||||||
|
|
||||||
|
|
||||||
from foolscap.logging.incident import IncidentQualifier
|
from foolscap.logging.incident import IncidentQualifier
|
||||||
|
|
||||||
|
if PY3:
|
||||||
|
# Error on BytesWarnings, to catch things like str(b""), but only for
|
||||||
|
# allmydata code.
|
||||||
|
warnings.filterwarnings("error", category=BytesWarning, module="allmydata.*")
|
||||||
|
|
||||||
|
|
||||||
class NonQualifier(IncidentQualifier, object):
|
class NonQualifier(IncidentQualifier, object):
|
||||||
def check_event(self, ev):
|
def check_event(self, ev):
|
||||||
return False
|
return False
|
||||||
|
@ -114,9 +114,9 @@ class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
|
|||||||
# with problems and display them separately
|
# with problems and display them separately
|
||||||
gotmods = [mo.span() for mo in re.finditer(b'([A-Z]+)', got)]
|
gotmods = [mo.span() for mo in re.finditer(b'([A-Z]+)', got)]
|
||||||
expmods = [mo.span() for mo in re.finditer(b'([A-Z]+)', expected)]
|
expmods = [mo.span() for mo in re.finditer(b'([A-Z]+)', expected)]
|
||||||
gotspans = ["%d:%d=%s" % (start,end,got[start:end])
|
gotspans = ["%d:%d=%r" % (start,end,got[start:end])
|
||||||
for (start,end) in gotmods]
|
for (start,end) in gotmods]
|
||||||
expspans = ["%d:%d=%s" % (start,end,expected[start:end])
|
expspans = ["%d:%d=%r" % (start,end,expected[start:end])
|
||||||
for (start,end) in expmods]
|
for (start,end) in expmods]
|
||||||
#print("expecting: %s" % expspans)
|
#print("expecting: %s" % expspans)
|
||||||
|
|
||||||
|
@ -14,6 +14,8 @@ from future.utils import PY2
|
|||||||
if PY2:
|
if PY2:
|
||||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
|
||||||
from twisted.python.reflect import (
|
from twisted.python.reflect import (
|
||||||
namedModule,
|
namedModule,
|
||||||
)
|
)
|
||||||
|
@ -118,17 +118,17 @@ class NativeStorageServerWithVersion(NativeStorageServer): # type: ignore # ta
|
|||||||
class TestNativeStorageServer(unittest.TestCase):
|
class TestNativeStorageServer(unittest.TestCase):
|
||||||
def test_get_available_space_new(self):
|
def test_get_available_space_new(self):
|
||||||
nss = NativeStorageServerWithVersion(
|
nss = NativeStorageServerWithVersion(
|
||||||
{ "http://allmydata.org/tahoe/protocols/storage/v1":
|
{ b"http://allmydata.org/tahoe/protocols/storage/v1":
|
||||||
{ "maximum-immutable-share-size": 111,
|
{ b"maximum-immutable-share-size": 111,
|
||||||
"available-space": 222,
|
b"available-space": 222,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
self.failUnlessEqual(nss.get_available_space(), 222)
|
self.failUnlessEqual(nss.get_available_space(), 222)
|
||||||
|
|
||||||
def test_get_available_space_old(self):
|
def test_get_available_space_old(self):
|
||||||
nss = NativeStorageServerWithVersion(
|
nss = NativeStorageServerWithVersion(
|
||||||
{ "http://allmydata.org/tahoe/protocols/storage/v1":
|
{ b"http://allmydata.org/tahoe/protocols/storage/v1":
|
||||||
{ "maximum-immutable-share-size": 111,
|
{ b"maximum-immutable-share-size": 111,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
self.failUnlessEqual(nss.get_available_space(), 111)
|
self.failUnlessEqual(nss.get_available_space(), 111)
|
||||||
|
@ -1072,7 +1072,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
d.addCallback(_do_upload)
|
d.addCallback(_do_upload)
|
||||||
def _upload_done(results):
|
def _upload_done(results):
|
||||||
theuri = results.get_uri()
|
theuri = results.get_uri()
|
||||||
log.msg("upload finished: uri is %s" % (theuri,))
|
log.msg("upload finished: uri is %r" % (theuri,))
|
||||||
self.uri = theuri
|
self.uri = theuri
|
||||||
assert isinstance(self.uri, bytes), self.uri
|
assert isinstance(self.uri, bytes), self.uri
|
||||||
self.cap = uri.from_string(self.uri)
|
self.cap = uri.from_string(self.uri)
|
||||||
|
@ -1144,7 +1144,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi
|
|||||||
" overdue= unused= need 3. Last failure: None")
|
" overdue= unused= need 3. Last failure: None")
|
||||||
msg2 = msgbase + (" ran out of shares:"
|
msg2 = msgbase + (" ran out of shares:"
|
||||||
" complete="
|
" complete="
|
||||||
" pending=Share(sh0-on-ysbz4st7)"
|
" pending=Share(sh0-on-'ysbz4st7')"
|
||||||
" overdue= unused= need 3. Last failure: None")
|
" overdue= unused= need 3. Last failure: None")
|
||||||
self.failUnless(body == msg1 or body == msg2, body)
|
self.failUnless(body == msg1 or body == msg2, body)
|
||||||
d.addCallback(_check_one_share)
|
d.addCallback(_check_one_share)
|
||||||
|
@ -99,7 +99,7 @@ class CHKFileURI(_BaseURI):
|
|||||||
def init_from_string(cls, uri):
|
def init_from_string(cls, uri):
|
||||||
mo = cls.STRING_RE.search(uri)
|
mo = cls.STRING_RE.search(uri)
|
||||||
if not mo:
|
if not mo:
|
||||||
raise BadURIError("'%s' doesn't look like a %s cap" % (uri, cls))
|
raise BadURIError("%r doesn't look like a %s cap" % (uri, cls))
|
||||||
return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2)),
|
return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2)),
|
||||||
int(mo.group(3)), int(mo.group(4)), int(mo.group(5)))
|
int(mo.group(3)), int(mo.group(4)), int(mo.group(5)))
|
||||||
|
|
||||||
@ -290,7 +290,7 @@ class ReadonlySSKFileURI(_BaseURI):
|
|||||||
def init_from_string(cls, uri):
|
def init_from_string(cls, uri):
|
||||||
mo = cls.STRING_RE.search(uri)
|
mo = cls.STRING_RE.search(uri)
|
||||||
if not mo:
|
if not mo:
|
||||||
raise BadURIError("'%s' doesn't look like a %s cap" % (uri, cls))
|
raise BadURIError("%r doesn't look like a %s cap" % (uri, cls))
|
||||||
return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2)))
|
return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2)))
|
||||||
|
|
||||||
def to_string(self):
|
def to_string(self):
|
||||||
@ -300,7 +300,7 @@ class ReadonlySSKFileURI(_BaseURI):
|
|||||||
base32.b2a(self.fingerprint))
|
base32.b2a(self.fingerprint))
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<%s %s>" % (self.__class__.__name__, self.abbrev())
|
return "<%s %r>" % (self.__class__.__name__, self.abbrev())
|
||||||
|
|
||||||
def abbrev(self):
|
def abbrev(self):
|
||||||
return base32.b2a(self.readkey[:5])
|
return base32.b2a(self.readkey[:5])
|
||||||
@ -336,7 +336,7 @@ class SSKVerifierURI(_BaseURI):
|
|||||||
def init_from_string(cls, uri):
|
def init_from_string(cls, uri):
|
||||||
mo = cls.STRING_RE.search(uri)
|
mo = cls.STRING_RE.search(uri)
|
||||||
if not mo:
|
if not mo:
|
||||||
raise BadURIError("'%s' doesn't look like a %s cap" % (uri, cls))
|
raise BadURIError("%r doesn't look like a %s cap" % (uri, cls))
|
||||||
return cls(si_a2b(mo.group(1)), base32.a2b(mo.group(2)))
|
return cls(si_a2b(mo.group(1)), base32.a2b(mo.group(2)))
|
||||||
|
|
||||||
def to_string(self):
|
def to_string(self):
|
||||||
@ -375,7 +375,7 @@ class WriteableMDMFFileURI(_BaseURI):
|
|||||||
def init_from_string(cls, uri):
|
def init_from_string(cls, uri):
|
||||||
mo = cls.STRING_RE.search(uri)
|
mo = cls.STRING_RE.search(uri)
|
||||||
if not mo:
|
if not mo:
|
||||||
raise BadURIError("'%s' doesn't look like a %s cap" % (uri, cls))
|
raise BadURIError("%r doesn't look like a %s cap" % (uri, cls))
|
||||||
return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2)))
|
return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2)))
|
||||||
|
|
||||||
def to_string(self):
|
def to_string(self):
|
||||||
@ -386,7 +386,7 @@ class WriteableMDMFFileURI(_BaseURI):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<%s %s>" % (self.__class__.__name__, self.abbrev())
|
return "<%s %r>" % (self.__class__.__name__, self.abbrev())
|
||||||
|
|
||||||
def abbrev(self):
|
def abbrev(self):
|
||||||
return base32.b2a(self.writekey[:5])
|
return base32.b2a(self.writekey[:5])
|
||||||
@ -423,7 +423,7 @@ class ReadonlyMDMFFileURI(_BaseURI):
|
|||||||
def init_from_string(cls, uri):
|
def init_from_string(cls, uri):
|
||||||
mo = cls.STRING_RE.search(uri)
|
mo = cls.STRING_RE.search(uri)
|
||||||
if not mo:
|
if not mo:
|
||||||
raise BadURIError("'%s' doesn't look like a %s cap" % (uri, cls))
|
raise BadURIError("%r doesn't look like a %s cap" % (uri, cls))
|
||||||
|
|
||||||
return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2)))
|
return cls(base32.a2b(mo.group(1)), base32.a2b(mo.group(2)))
|
||||||
|
|
||||||
@ -435,7 +435,7 @@ class ReadonlyMDMFFileURI(_BaseURI):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<%s %s>" % (self.__class__.__name__, self.abbrev())
|
return "<%s %r>" % (self.__class__.__name__, self.abbrev())
|
||||||
|
|
||||||
def abbrev(self):
|
def abbrev(self):
|
||||||
return base32.b2a(self.readkey[:5])
|
return base32.b2a(self.readkey[:5])
|
||||||
@ -471,7 +471,7 @@ class MDMFVerifierURI(_BaseURI):
|
|||||||
def init_from_string(cls, uri):
|
def init_from_string(cls, uri):
|
||||||
mo = cls.STRING_RE.search(uri)
|
mo = cls.STRING_RE.search(uri)
|
||||||
if not mo:
|
if not mo:
|
||||||
raise BadURIError("'%s' doesn't look like a %s cap" % (uri, cls))
|
raise BadURIError("%r doesn't look like a %s cap" % (uri, cls))
|
||||||
return cls(si_a2b(mo.group(1)), base32.a2b(mo.group(2)))
|
return cls(si_a2b(mo.group(1)), base32.a2b(mo.group(2)))
|
||||||
|
|
||||||
def to_string(self):
|
def to_string(self):
|
||||||
@ -500,13 +500,13 @@ class _DirectoryBaseURI(_BaseURI):
|
|||||||
self._filenode_uri = filenode_uri
|
self._filenode_uri = filenode_uri
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return "<%s %s>" % (self.__class__.__name__, self.abbrev())
|
return "<%s %r>" % (self.__class__.__name__, self.abbrev())
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def init_from_string(cls, uri):
|
def init_from_string(cls, uri):
|
||||||
mo = cls.BASE_STRING_RE.search(uri)
|
mo = cls.BASE_STRING_RE.search(uri)
|
||||||
if not mo:
|
if not mo:
|
||||||
raise BadURIError("'%s' doesn't look like a %s cap" % (uri, cls))
|
raise BadURIError("%r doesn't look like a %s cap" % (uri, cls))
|
||||||
bits = uri[mo.end():]
|
bits = uri[mo.end():]
|
||||||
fn = cls.INNER_URI_CLASS.init_from_string(
|
fn = cls.INNER_URI_CLASS.init_from_string(
|
||||||
cls.INNER_URI_CLASS.BASE_STRING+bits)
|
cls.INNER_URI_CLASS.BASE_STRING+bits)
|
||||||
|
@ -76,7 +76,7 @@ class UploadResultsRendererMixin(Element):
|
|||||||
ul = tags.ul()
|
ul = tags.ul()
|
||||||
for server, shnums in sorted(servermap.items(), key=id):
|
for server, shnums in sorted(servermap.items(), key=id):
|
||||||
shares_s = ",".join(["#%d" % shnum for shnum in shnums])
|
shares_s = ",".join(["#%d" % shnum for shnum in shnums])
|
||||||
ul(tags.li("[%s] got share%s: %s" % (server.get_name(),
|
ul(tags.li("[%s] got share%s: %s" % (unicode(server.get_name(), "utf-8"),
|
||||||
plural(shnums), shares_s)))
|
plural(shnums), shares_s)))
|
||||||
return ul
|
return ul
|
||||||
d.addCallback(_render)
|
d.addCallback(_render)
|
||||||
@ -230,7 +230,9 @@ class UploadStatusElement(UploadResultsRendererMixin):
|
|||||||
si_s = base32.b2a_or_none(self._upload_status.get_storage_index())
|
si_s = base32.b2a_or_none(self._upload_status.get_storage_index())
|
||||||
if si_s is None:
|
if si_s is None:
|
||||||
si_s = "(None)"
|
si_s = "(None)"
|
||||||
return tag(str(si_s))
|
else:
|
||||||
|
si_s = unicode(si_s, "utf-8")
|
||||||
|
return tag(si_s)
|
||||||
|
|
||||||
@renderer
|
@renderer
|
||||||
def helper(self, req, tag):
|
def helper(self, req, tag):
|
||||||
@ -920,7 +922,7 @@ class RetrieveStatusElement(Element):
|
|||||||
for server in sorted(per_server.keys(), key=lambda s: s.get_name()):
|
for server in sorted(per_server.keys(), key=lambda s: s.get_name()):
|
||||||
times_s = ", ".join([abbreviate_time(t)
|
times_s = ", ".join([abbreviate_time(t)
|
||||||
for t in per_server[server]])
|
for t in per_server[server]])
|
||||||
l(tags.li("[%s]: %s" % (server.get_name(), times_s)))
|
l(tags.li("[%s]: %s" % (unicode(server.get_name(), "utf-8"), times_s)))
|
||||||
return tags.li("Per-Server Fetch Response Times: ", l)
|
return tags.li("Per-Server Fetch Response Times: ", l)
|
||||||
|
|
||||||
|
|
||||||
@ -958,7 +960,9 @@ class PublishStatusElement(Element):
|
|||||||
si_s = base32.b2a_or_none(self._publish_status.get_storage_index())
|
si_s = base32.b2a_or_none(self._publish_status.get_storage_index())
|
||||||
if si_s is None:
|
if si_s is None:
|
||||||
si_s = "(None)"
|
si_s = "(None)"
|
||||||
return tag(str(si_s))
|
else:
|
||||||
|
si_s = unicode("utf-8")
|
||||||
|
return tag(si_s)
|
||||||
|
|
||||||
@renderer
|
@renderer
|
||||||
def helper(self, req, tag):
|
def helper(self, req, tag):
|
||||||
@ -996,7 +1000,7 @@ class PublishStatusElement(Element):
|
|||||||
sharemap = servermap.make_sharemap()
|
sharemap = servermap.make_sharemap()
|
||||||
for shnum in sorted(sharemap.keys()):
|
for shnum in sorted(sharemap.keys()):
|
||||||
l(tags.li("%d -> Placed on " % shnum,
|
l(tags.li("%d -> Placed on " % shnum,
|
||||||
", ".join(["[%s]" % server.get_name()
|
", ".join(["[%s]" % unicode(server.get_name(), "utf-8")
|
||||||
for server in sharemap[shnum]])))
|
for server in sharemap[shnum]])))
|
||||||
return tag("Sharemap:", l)
|
return tag("Sharemap:", l)
|
||||||
|
|
||||||
@ -1078,7 +1082,7 @@ class PublishStatusElement(Element):
|
|||||||
for server in sorted(per_server.keys(), key=lambda s: s.get_name()):
|
for server in sorted(per_server.keys(), key=lambda s: s.get_name()):
|
||||||
times_s = ", ".join([abbreviate_time(t)
|
times_s = ", ".join([abbreviate_time(t)
|
||||||
for t in per_server[server]])
|
for t in per_server[server]])
|
||||||
l(tags.li("[%s]: %s" % (server.get_name(), times_s)))
|
l(tags.li("[%s]: %s" % (unicode(server.get_name(), "utf-8"), times_s)))
|
||||||
return tags.li("Per-Server Response Times: ", l)
|
return tags.li("Per-Server Response Times: ", l)
|
||||||
|
|
||||||
|
|
||||||
@ -1204,7 +1208,7 @@ class MapupdateStatusElement(Element):
|
|||||||
else:
|
else:
|
||||||
times.append("privkey(" + abbreviate_time(t) + ")")
|
times.append("privkey(" + abbreviate_time(t) + ")")
|
||||||
times_s = ", ".join(times)
|
times_s = ", ".join(times)
|
||||||
l(tags.li("[%s]: %s" % (server.get_name(), times_s)))
|
l(tags.li("[%s]: %s" % (unicode(server.get_name(), "utf-8"), times_s)))
|
||||||
return tags.li("Per-Server Response Times: ", l)
|
return tags.li("Per-Server Response Times: ", l)
|
||||||
|
|
||||||
|
|
||||||
|
15
tox.ini
15
tox.ini
@ -44,7 +44,7 @@ deps =
|
|||||||
# more useful results.
|
# more useful results.
|
||||||
usedevelop = False
|
usedevelop = False
|
||||||
# We use extras=test to get things like "mock" that are required for our unit
|
# We use extras=test to get things like "mock" that are required for our unit
|
||||||
# tests.
|
# tests.8
|
||||||
extras = test
|
extras = test
|
||||||
|
|
||||||
setenv =
|
setenv =
|
||||||
@ -62,20 +62,19 @@ commands =
|
|||||||
|
|
||||||
tahoe --version
|
tahoe --version
|
||||||
|
|
||||||
!coverage: trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors} {posargs:{env:TEST_SUITE}}
|
# Run tests with -b to catch bugs like `"%s" % (some_bytes,)`. -b makes
|
||||||
|
# Python emit BytesWarnings, and warnings configuration in
|
||||||
|
# src/allmydata/tests/__init__.py turns allmydata's BytesWarnings into
|
||||||
|
# exceptions.
|
||||||
|
!coverage: python -b -m twisted.trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors} {posargs:{env:TEST_SUITE}}
|
||||||
|
|
||||||
# measuring coverage is somewhat slower than not measuring coverage
|
# measuring coverage is somewhat slower than not measuring coverage
|
||||||
# so only do it on request.
|
# so only do it on request.
|
||||||
coverage: coverage run -m twisted.trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors --reporter=timing} {posargs:{env:TEST_SUITE}}
|
coverage: python -b -m coverage run -m twisted.trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors --reporter=timing} {posargs:{env:TEST_SUITE}}
|
||||||
coverage: coverage combine
|
coverage: coverage combine
|
||||||
coverage: coverage xml
|
coverage: coverage xml
|
||||||
coverage: coverage report
|
coverage: coverage report
|
||||||
|
|
||||||
# Also run tests with -bb to catch bugs like `"%s" % (some_bytes,)`.
|
|
||||||
# Eventually everything should run with this, but so far only fixed
|
|
||||||
# some of the Python3-ported modules.
|
|
||||||
python -bb -m twisted.trial --rterrors allmydata.test.web
|
|
||||||
|
|
||||||
[testenv:integration]
|
[testenv:integration]
|
||||||
setenv =
|
setenv =
|
||||||
COVERAGE_PROCESS_START=.coveragerc
|
COVERAGE_PROCESS_START=.coveragerc
|
||||||
|
Loading…
Reference in New Issue
Block a user