Merge pull request #799 from tahoe-lafs/3397.test-storage-python-3

Port allmydata.test.test_storage to Python 3

Fixes ticket:3397
This commit is contained in:
Itamar Turner-Trauring 2020-09-08 15:32:06 -04:00 committed by GitHub
commit 4c90247d99
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 558 additions and 520 deletions

0
newsfragments/3397.minor Normal file
View File

View File

@ -171,7 +171,7 @@ class WriteBucketProxy(object):
def put_block(self, segmentnum, data):
offset = self._offsets['data'] + segmentnum * self._block_size
assert offset + len(data) <= self._offsets['uri_extension']
assert isinstance(data, str)
assert isinstance(data, bytes)
if segmentnum < self._num_segments-1:
precondition(len(data) == self._block_size,
len(data), self._block_size)
@ -185,7 +185,7 @@ class WriteBucketProxy(object):
def put_crypttext_hashes(self, hashes):
offset = self._offsets['crypttext_hash_tree']
assert isinstance(hashes, list)
data = "".join(hashes)
data = b"".join(hashes)
precondition(len(data) == self._segment_hash_size,
len(data), self._segment_hash_size)
precondition(offset + len(data) <= self._offsets['block_hashes'],
@ -196,7 +196,7 @@ class WriteBucketProxy(object):
def put_block_hashes(self, blockhashes):
offset = self._offsets['block_hashes']
assert isinstance(blockhashes, list)
data = "".join(blockhashes)
data = b"".join(blockhashes)
precondition(len(data) == self._segment_hash_size,
len(data), self._segment_hash_size)
precondition(offset + len(data) <= self._offsets['share_hashes'],
@ -209,7 +209,7 @@ class WriteBucketProxy(object):
# as 2+32=34 bytes each
offset = self._offsets['share_hashes']
assert isinstance(sharehashes, list)
data = "".join([struct.pack(">H", hashnum) + hashvalue
data = b"".join([struct.pack(">H", hashnum) + hashvalue
for hashnum,hashvalue in sharehashes])
precondition(len(data) == self._share_hashtree_size,
len(data), self._share_hashtree_size)
@ -220,7 +220,7 @@ class WriteBucketProxy(object):
def put_uri_extension(self, data):
offset = self._offsets['uri_extension']
assert isinstance(data, str)
assert isinstance(data, bytes)
precondition(len(data) <= self._uri_extension_size_max,
len(data), self._uri_extension_size_max)
length = struct.pack(self.fieldstruct, len(data))

View File

@ -1,3 +1,4 @@
from past.utils import old_div
import struct
from allmydata.mutable.common import NeedMoreDataError, UnknownVersionError, \
@ -180,11 +181,11 @@ def pack_offsets(verification_key_length, signature_length,
def pack_share(prefix, verification_key, signature,
share_hash_chain, block_hash_tree,
share_data, encprivkey):
share_hash_chain_s = "".join([struct.pack(">H32s", i, share_hash_chain[i])
for i in sorted(share_hash_chain.keys())])
share_hash_chain_s = b"".join([struct.pack(">H32s", i, share_hash_chain[i])
for i in sorted(share_hash_chain.keys())])
for h in block_hash_tree:
assert len(h) == 32
block_hash_tree_s = "".join(block_hash_tree)
block_hash_tree_s = b"".join(block_hash_tree)
offsets = pack_offsets(len(verification_key),
len(signature),
@ -192,14 +193,14 @@ def pack_share(prefix, verification_key, signature,
len(block_hash_tree_s),
len(share_data),
len(encprivkey))
final_share = "".join([prefix,
offsets,
verification_key,
signature,
share_hash_chain_s,
block_hash_tree_s,
share_data,
encprivkey])
final_share = b"".join([prefix,
offsets,
verification_key,
signature,
share_hash_chain_s,
block_hash_tree_s,
share_data,
encprivkey])
return final_share
def pack_prefix(seqnum, root_hash, IV,
@ -255,7 +256,7 @@ class SDMFSlotWriteProxy(object):
self._required_shares)
assert expected_segment_size == segment_size
self._block_size = self._segment_size / self._required_shares
self._block_size = old_div(self._segment_size, self._required_shares)
# This is meant to mimic how SDMF files were built before MDMF
# entered the picture: we generate each share in its entirety,
@ -296,7 +297,7 @@ class SDMFSlotWriteProxy(object):
salt)
else:
checkstring = checkstring_or_seqnum
self._testvs = [(0, len(checkstring), "eq", checkstring)]
self._testvs = [(0, len(checkstring), b"eq", checkstring)]
def get_checkstring(self):
@ -306,7 +307,7 @@ class SDMFSlotWriteProxy(object):
"""
if self._testvs:
return self._testvs[0][3]
return ""
return b""
def put_block(self, data, segnum, salt):
@ -343,7 +344,7 @@ class SDMFSlotWriteProxy(object):
assert len(h) == HASH_SIZE
# serialize the blockhashes, then set them.
blockhashes_s = "".join(blockhashes)
blockhashes_s = b"".join(blockhashes)
self._share_pieces['block_hash_tree'] = blockhashes_s
return defer.succeed(None)
@ -354,12 +355,12 @@ class SDMFSlotWriteProxy(object):
Add the share hash chain to the share.
"""
assert isinstance(sharehashes, dict)
for h in sharehashes.itervalues():
for h in sharehashes.values():
assert len(h) == HASH_SIZE
# serialize the sharehashes, then set them.
sharehashes_s = "".join([struct.pack(">H32s", i, sharehashes[i])
for i in sorted(sharehashes.keys())])
sharehashes_s = b"".join([struct.pack(">H32s", i, sharehashes[i])
for i in sorted(sharehashes.keys())])
self._share_pieces['share_hash_chain'] = sharehashes_s
return defer.succeed(None)
@ -383,7 +384,7 @@ class SDMFSlotWriteProxy(object):
assert len(salt) == SALT_SIZE
self._share_pieces['salt'] = salt
self._share_pieces['sharedata'] = ""
self._share_pieces['sharedata'] = b""
def get_signable(self):
@ -519,14 +520,14 @@ class SDMFSlotWriteProxy(object):
# to the remote server in one write.
offsets = self._pack_offsets()
prefix = self.get_signable()
final_share = "".join([prefix,
offsets,
self._share_pieces['verification_key'],
self._share_pieces['signature'],
self._share_pieces['share_hash_chain'],
self._share_pieces['block_hash_tree'],
self._share_pieces['sharedata'],
self._share_pieces['encprivkey']])
final_share = b"".join([prefix,
offsets,
self._share_pieces['verification_key'],
self._share_pieces['signature'],
self._share_pieces['share_hash_chain'],
self._share_pieces['block_hash_tree'],
self._share_pieces['sharedata'],
self._share_pieces['encprivkey']])
# Our only data vector is going to be writing the final share,
# in its entirely.
@ -537,7 +538,7 @@ class SDMFSlotWriteProxy(object):
# yet, so we assume that we are writing a new share, and set
# a test vector that will allow a new share to be written.
self._testvs = []
self._testvs.append(tuple([0, 1, "eq", ""]))
self._testvs.append(tuple([0, 1, b"eq", b""]))
tw_vectors = {}
tw_vectors[self.shnum] = (self._testvs, datavs, None)
@ -788,7 +789,7 @@ class MDMFSlotWriteProxy(object):
# and also because it provides a useful amount of bounds checking.
self._num_segments = mathutil.div_ceil(self._data_length,
self._segment_size)
self._block_size = self._segment_size / self._required_shares
self._block_size = old_div(self._segment_size, self._required_shares)
# We also calculate the share size, to help us with block
# constraints later.
tail_size = self._data_length % self._segment_size
@ -797,7 +798,7 @@ class MDMFSlotWriteProxy(object):
else:
self._tail_block_size = mathutil.next_multiple(tail_size,
self._required_shares)
self._tail_block_size /= self._required_shares
self._tail_block_size = old_div(self._tail_block_size, self._required_shares)
# We already know where the sharedata starts; right after the end
# of the header (which is defined as the signable part + the offsets)
@ -868,7 +869,7 @@ class MDMFSlotWriteProxy(object):
else:
checkstring = seqnum_or_checkstring
if checkstring == "":
if checkstring == b"":
# We special-case this, since len("") = 0, but we need
# length of 1 for the case of an empty share to work on the
# storage server, which is what a checkstring that is the
@ -876,7 +877,7 @@ class MDMFSlotWriteProxy(object):
self._testvs = []
else:
self._testvs = []
self._testvs.append((0, len(checkstring), "eq", checkstring))
self._testvs.append((0, len(checkstring), b"eq", checkstring))
def __repr__(self):
@ -893,7 +894,7 @@ class MDMFSlotWriteProxy(object):
if self._root_hash:
roothash = self._root_hash
else:
roothash = "\x00" * 32
roothash = b"\x00" * 32
return struct.pack(MDMFCHECKSTRING,
1,
self._seqnum,
@ -964,7 +965,7 @@ class MDMFSlotWriteProxy(object):
assert isinstance(blockhashes, list)
blockhashes_s = "".join(blockhashes)
blockhashes_s = b"".join(blockhashes)
self._offsets['EOF'] = self._offsets['block_hash_tree'] + len(blockhashes_s)
self._writevs.append(tuple([self._offsets['block_hash_tree'],
@ -998,7 +999,7 @@ class MDMFSlotWriteProxy(object):
if "verification_key" in self._offsets:
raise LayoutInvalid("You must write the share hash chain "
"before you write the signature")
sharehashes_s = "".join([struct.pack(">H32s", i, sharehashes[i])
sharehashes_s = b"".join([struct.pack(">H32s", i, sharehashes[i])
for i in sorted(sharehashes.keys())])
self._offsets['signature'] = self._offsets['share_hash_chain'] + \
len(sharehashes_s)
@ -1149,7 +1150,7 @@ class MDMFSlotWriteProxy(object):
tw_vectors = {}
if not self._testvs:
self._testvs = []
self._testvs.append(tuple([0, 1, "eq", ""]))
self._testvs.append(tuple([0, 1, b"eq", b""]))
if not self._written:
# Write a new checkstring to the share when we write it, so
# that we have something to check later.
@ -1157,7 +1158,7 @@ class MDMFSlotWriteProxy(object):
datavs.append((0, new_checkstring))
def _first_write():
self._written = True
self._testvs = [(0, len(new_checkstring), "eq", new_checkstring)]
self._testvs = [(0, len(new_checkstring), b"eq", new_checkstring)]
on_success = _first_write
tw_vectors[self.shnum] = (self._testvs, datavs, None)
d = self._storage_server.slot_testv_and_readv_and_writev(
@ -1194,7 +1195,7 @@ class MDMFSlotReadProxy(object):
storage_server,
storage_index,
shnum,
data="",
data=b"",
data_is_everything=False):
# Start the initialization process.
self._storage_server = storage_server
@ -1238,7 +1239,7 @@ class MDMFSlotReadProxy(object):
# None if there isn't any cached data, but the way we index the
# cached data requires a string, so convert None to "".
if self._data == None:
self._data = ""
self._data = b""
def _maybe_fetch_offsets_and_header(self, force_remote=False):
@ -1317,7 +1318,7 @@ class MDMFSlotReadProxy(object):
self._segment_size = segsize
self._data_length = datalen
self._block_size = self._segment_size / self._required_shares
self._block_size = old_div(self._segment_size, self._required_shares)
# We can upload empty files, and need to account for this fact
# so as to avoid zero-division and zero-modulo errors.
if datalen > 0:
@ -1329,7 +1330,7 @@ class MDMFSlotReadProxy(object):
else:
self._tail_block_size = mathutil.next_multiple(tail_size,
self._required_shares)
self._tail_block_size /= self._required_shares
self._tail_block_size = old_div(self._tail_block_size, self._required_shares)
return encoding_parameters
@ -1416,7 +1417,7 @@ class MDMFSlotReadProxy(object):
# when we fetched the header
data = results[self.shnum]
if not data:
data = ""
data = b""
else:
if len(data) != 1:
raise BadShareError("got %d vectors, not 1" % len(data))
@ -1425,7 +1426,7 @@ class MDMFSlotReadProxy(object):
else:
data = results[self.shnum]
if not data:
salt = data = ""
salt = data = b""
else:
salt_and_data = results[self.shnum][0]
salt = salt_and_data[:SALT_SIZE]
@ -1743,7 +1744,7 @@ class MDMFSlotReadProxy(object):
def _read(self, readvs, force_remote=False):
unsatisfiable = filter(lambda x: x[0] + x[1] > len(self._data), readvs)
unsatisfiable = list(filter(lambda x: x[0] + x[1] > len(self._data), readvs))
# TODO: It's entirely possible to tweak this so that it just
# fulfills the requests that it can, and not demand that all
# requests are satisfiable before running it.

View File

@ -1,3 +1,5 @@
from future.utils import bytes_to_native_str
import os, stat, struct, time
from foolscap.api import Referenceable
@ -85,7 +87,7 @@ class ShareFile(object):
seekpos = self._data_offset+offset
actuallength = max(0, min(length, self._lease_offset-seekpos))
if actuallength == 0:
return ""
return b""
with open(self.home, 'rb') as f:
f.seek(seekpos)
return f.read(actuallength)
@ -298,7 +300,9 @@ class BucketReader(Referenceable):
def __repr__(self):
return "<%s %s %s>" % (self.__class__.__name__,
base32.b2a(self.storage_index[:8])[:12],
bytes_to_native_str(
base32.b2a(self.storage_index[:8])[:12]
),
self.shnum)
def remote_read(self, offset, length):
@ -309,7 +313,7 @@ class BucketReader(Referenceable):
return data
def remote_advise_corrupt_share(self, reason):
return self.ss.remote_advise_corrupt_share("immutable",
return self.ss.remote_advise_corrupt_share(b"immutable",
self.storage_index,
self.shnum,
reason)

View File

@ -113,7 +113,7 @@ class MutableShareFile(object):
# start beyond the end of the data return an empty string.
length = max(0, data_length-offset)
if length == 0:
return ""
return b""
precondition(offset+length <= data_length)
f.seek(self.DATA_OFFSET+offset)
data = f.read(length)
@ -421,18 +421,18 @@ class MutableShareFile(object):
# self._change_container_size() here.
def testv_compare(a, op, b):
assert op in ("lt", "le", "eq", "ne", "ge", "gt")
if op == "lt":
assert op in (b"lt", b"le", b"eq", b"ne", b"ge", b"gt")
if op == b"lt":
return a < b
if op == "le":
if op == b"le":
return a <= b
if op == "eq":
if op == b"eq":
return a == b
if op == "ne":
if op == b"ne":
return a != b
if op == "ge":
if op == b"ge":
return a >= b
if op == "gt":
if op == b"gt":
return a > b
# never reached
@ -441,7 +441,7 @@ class EmptyShare(object):
def check_testv(self, testv):
test_good = True
for (offset, length, operator, specimen) in testv:
data = ""
data = b""
if not testv_compare(data, operator, specimen):
test_good = False
break

View File

@ -1,3 +1,4 @@
from future.utils import bytes_to_native_str
import os, re, struct, time
import weakref
import six
@ -51,6 +52,7 @@ class StorageServer(service.MultiService, Referenceable):
service.MultiService.__init__(self)
assert isinstance(nodeid, bytes)
assert len(nodeid) == 20
assert isinstance(nodeid, bytes)
self.my_nodeid = nodeid
self.storedir = storedir
sharedir = os.path.join(storedir, "shares")
@ -398,7 +400,7 @@ class StorageServer(service.MultiService, Referenceable):
# since all shares get the same lease data, we just grab the leases
# from the first share
try:
shnum, filename = self._get_bucket_shares(storage_index).next()
shnum, filename = next(self._get_bucket_shares(storage_index))
sf = ShareFile(filename)
return sf.get_leases()
except StopIteration:
@ -676,6 +678,10 @@ class StorageServer(service.MultiService, Referenceable):
def remote_advise_corrupt_share(self, share_type, storage_index, shnum,
reason):
# This is a remote API, I believe, so this has to be bytes for legacy
# protocol backwards compatibility reasons.
assert isinstance(share_type, bytes)
assert isinstance(reason, bytes)
fileutil.make_dirs(self.corruption_advisory_dir)
now = time_format.iso_utc(sep="T")
si_s = si_b2a(storage_index)
@ -684,11 +690,11 @@ class StorageServer(service.MultiService, Referenceable):
"%s--%s-%d" % (now, si_s, shnum)).replace(":","")
with open(fn, "w") as f:
f.write("report: Share Corruption\n")
f.write("type: %s\n" % share_type)
f.write("storage_index: %s\n" % si_s)
f.write("type: %s\n" % bytes_to_native_str(share_type))
f.write("storage_index: %s\n" % bytes_to_native_str(si_s))
f.write("share_number: %d\n" % shnum)
f.write("\n")
f.write(reason)
f.write(bytes_to_native_str(reason))
f.write("\n")
log.msg(format=("client claims corruption in (%(share_type)s) " +
"%(si)s-%(shnum)d: %(reason)s"),

File diff suppressed because it is too large Load Diff

View File

@ -92,6 +92,7 @@ PORTED_TEST_MODULES = [
"allmydata.test.test_python3",
"allmydata.test.test_spans",
"allmydata.test.test_statistics",
"allmydata.test.test_storage",
"allmydata.test.test_storage_web",
"allmydata.test.test_time_format",
"allmydata.test.test_uri",