mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-01-18 10:46:24 +00:00
Merge pull request #860 from tahoe-lafs/3473-mutable-tests-part-1-python-3
Port allmydata.mutable.tests to Python 3: part 1 of N Fixes ticket:3473
This commit is contained in:
commit
b094a00458
0
newsfragments/3473.minor
Normal file
0
newsfragments/3473.minor
Normal file
@ -147,9 +147,9 @@ class MutableFileNode(object):
|
||||
|
||||
def _get_initial_contents(self, contents):
|
||||
if contents is None:
|
||||
return MutableData("")
|
||||
return MutableData(b"")
|
||||
|
||||
if isinstance(contents, str):
|
||||
if isinstance(contents, bytes):
|
||||
return MutableData(contents)
|
||||
|
||||
if IMutableUploadable.providedBy(contents):
|
||||
@ -884,9 +884,9 @@ class MutableFileVersion(object):
|
||||
d = self._try_to_download_data()
|
||||
def _apply(old_contents):
|
||||
new_contents = modifier(old_contents, self._servermap, first_time)
|
||||
precondition((isinstance(new_contents, str) or
|
||||
precondition((isinstance(new_contents, bytes) or
|
||||
new_contents is None),
|
||||
"Modifier function must return a string "
|
||||
"Modifier function must return bytes "
|
||||
"or None")
|
||||
|
||||
if new_contents is None or new_contents == old_contents:
|
||||
@ -960,7 +960,7 @@ class MutableFileVersion(object):
|
||||
c = consumer.MemoryConsumer()
|
||||
# modify will almost certainly write, so we need the privkey.
|
||||
d = self._read(c, fetch_privkey=True)
|
||||
d.addCallback(lambda mc: "".join(mc.chunks))
|
||||
d.addCallback(lambda mc: b"".join(mc.chunks))
|
||||
return d
|
||||
|
||||
|
||||
@ -1076,7 +1076,7 @@ class MutableFileVersion(object):
|
||||
start = offset
|
||||
rest = offset + data.get_size()
|
||||
new = old[:start]
|
||||
new += "".join(data.read(data.get_size()))
|
||||
new += b"".join(data.read(data.get_size()))
|
||||
new += old[rest:]
|
||||
return new
|
||||
return self._modify(m, None)
|
||||
|
@ -1,5 +1,5 @@
|
||||
import os, time
|
||||
from six.moves import cStringIO as StringIO
|
||||
from io import BytesIO
|
||||
from itertools import count
|
||||
from zope.interface import implementer
|
||||
from twisted.internet import defer
|
||||
@ -46,7 +46,7 @@ class PublishStatus(object):
|
||||
self.size = None
|
||||
self.status = "Not started"
|
||||
self.progress = 0.0
|
||||
self.counter = self.statusid_counter.next()
|
||||
self.counter = next(self.statusid_counter)
|
||||
self.started = time.time()
|
||||
|
||||
def add_per_server_time(self, server, elapsed):
|
||||
@ -305,7 +305,7 @@ class Publish(object):
|
||||
# Our update process fetched these for us. We need to update
|
||||
# them in place as publishing happens.
|
||||
self.blockhashes = {} # (shnum, [blochashes])
|
||||
for (i, bht) in blockhashes.iteritems():
|
||||
for (i, bht) in list(blockhashes.items()):
|
||||
# We need to extract the leaves from our old hash tree.
|
||||
old_segcount = mathutil.div_ceil(version[4],
|
||||
version[3])
|
||||
@ -313,7 +313,7 @@ class Publish(object):
|
||||
bht = dict(enumerate(bht))
|
||||
h.set_hashes(bht)
|
||||
leaves = h[h.get_leaf_index(0):]
|
||||
for j in xrange(self.num_segments - len(leaves)):
|
||||
for j in range(self.num_segments - len(leaves)):
|
||||
leaves.append(None)
|
||||
|
||||
assert len(leaves) >= self.num_segments
|
||||
@ -509,10 +509,10 @@ class Publish(object):
|
||||
# This will eventually hold the block hash chain for each share
|
||||
# that we publish. We define it this way so that empty publishes
|
||||
# will still have something to write to the remote slot.
|
||||
self.blockhashes = dict([(i, []) for i in xrange(self.total_shares)])
|
||||
for i in xrange(self.total_shares):
|
||||
self.blockhashes = dict([(i, []) for i in range(self.total_shares)])
|
||||
for i in range(self.total_shares):
|
||||
blocks = self.blockhashes[i]
|
||||
for j in xrange(self.num_segments):
|
||||
for j in range(self.num_segments):
|
||||
blocks.append(None)
|
||||
self.sharehash_leaves = None # eventually [sharehashes]
|
||||
self.sharehashes = {} # shnum -> [sharehash leaves necessary to
|
||||
@ -526,7 +526,7 @@ class Publish(object):
|
||||
return self.done_deferred
|
||||
|
||||
def _get_some_writer(self):
|
||||
return list(self.writers.values()[0])[0]
|
||||
return list(list(self.writers.values())[0])[0]
|
||||
|
||||
def _update_status(self):
|
||||
self._status.set_status("Sending Shares: %d placed out of %d, "
|
||||
@ -684,7 +684,7 @@ class Publish(object):
|
||||
salt = os.urandom(16)
|
||||
assert self._version == SDMF_VERSION
|
||||
|
||||
for shnum, writers in self.writers.iteritems():
|
||||
for shnum, writers in self.writers.items():
|
||||
for writer in writers:
|
||||
writer.put_salt(salt)
|
||||
|
||||
@ -704,7 +704,7 @@ class Publish(object):
|
||||
self.log("Pushing segment %d of %d" % (segnum + 1, self.num_segments))
|
||||
data = self.data.read(segsize)
|
||||
# XXX: This is dumb. Why return a list?
|
||||
data = "".join(data)
|
||||
data = b"".join(data)
|
||||
|
||||
assert len(data) == segsize, len(data)
|
||||
|
||||
@ -732,7 +732,7 @@ class Publish(object):
|
||||
for i in range(len(crypttext_pieces)):
|
||||
offset = i * piece_size
|
||||
piece = crypttext[offset:offset+piece_size]
|
||||
piece = piece + "\x00"*(piece_size - len(piece)) # padding
|
||||
piece = piece + b"\x00"*(piece_size - len(piece)) # padding
|
||||
crypttext_pieces[i] = piece
|
||||
assert len(piece) == piece_size
|
||||
d = fec.encode(crypttext_pieces)
|
||||
@ -751,7 +751,7 @@ class Publish(object):
|
||||
results, salt = encoded_and_salt
|
||||
shares, shareids = results
|
||||
self._status.set_status("Pushing segment")
|
||||
for i in xrange(len(shares)):
|
||||
for i in range(len(shares)):
|
||||
sharedata = shares[i]
|
||||
shareid = shareids[i]
|
||||
if self._version == MDMF_VERSION:
|
||||
@ -786,7 +786,7 @@ class Publish(object):
|
||||
def push_encprivkey(self):
|
||||
encprivkey = self._encprivkey
|
||||
self._status.set_status("Pushing encrypted private key")
|
||||
for shnum, writers in self.writers.iteritems():
|
||||
for shnum, writers in self.writers.items():
|
||||
for writer in writers:
|
||||
writer.put_encprivkey(encprivkey)
|
||||
|
||||
@ -794,7 +794,7 @@ class Publish(object):
|
||||
def push_blockhashes(self):
|
||||
self.sharehash_leaves = [None] * len(self.blockhashes)
|
||||
self._status.set_status("Building and pushing block hash tree")
|
||||
for shnum, blockhashes in self.blockhashes.iteritems():
|
||||
for shnum, blockhashes in list(self.blockhashes.items()):
|
||||
t = hashtree.HashTree(blockhashes)
|
||||
self.blockhashes[shnum] = list(t)
|
||||
# set the leaf for future use.
|
||||
@ -808,7 +808,7 @@ class Publish(object):
|
||||
def push_sharehashes(self):
|
||||
self._status.set_status("Building and pushing share hash chain")
|
||||
share_hash_tree = hashtree.HashTree(self.sharehash_leaves)
|
||||
for shnum in xrange(len(self.sharehash_leaves)):
|
||||
for shnum in range(len(self.sharehash_leaves)):
|
||||
needed_indices = share_hash_tree.needed_hashes(shnum)
|
||||
self.sharehashes[shnum] = dict( [ (i, share_hash_tree[i])
|
||||
for i in needed_indices] )
|
||||
@ -824,7 +824,7 @@ class Publish(object):
|
||||
# - Get the checkstring of the resulting layout; sign that.
|
||||
# - Push the signature
|
||||
self._status.set_status("Pushing root hashes and signature")
|
||||
for shnum in xrange(self.total_shares):
|
||||
for shnum in range(self.total_shares):
|
||||
writers = self.writers[shnum]
|
||||
for writer in writers:
|
||||
writer.put_root_hash(self.root_hash)
|
||||
@ -852,7 +852,7 @@ class Publish(object):
|
||||
signable = self._get_some_writer().get_signable()
|
||||
self.signature = rsa.sign_data(self._privkey, signable)
|
||||
|
||||
for (shnum, writers) in self.writers.iteritems():
|
||||
for (shnum, writers) in self.writers.items():
|
||||
for writer in writers:
|
||||
writer.put_signature(self.signature)
|
||||
self._status.timings['sign'] = time.time() - started
|
||||
@ -867,7 +867,7 @@ class Publish(object):
|
||||
ds = []
|
||||
verification_key = rsa.der_string_from_verifying_key(self._pubkey)
|
||||
|
||||
for (shnum, writers) in self.writers.copy().iteritems():
|
||||
for (shnum, writers) in list(self.writers.copy().items()):
|
||||
for writer in writers:
|
||||
writer.put_verification_key(verification_key)
|
||||
self.num_outstanding += 1
|
||||
@ -1003,7 +1003,7 @@ class Publish(object):
|
||||
|
||||
# TODO: Precompute this.
|
||||
shares = []
|
||||
for shnum, writers in self.writers.iteritems():
|
||||
for shnum, writers in self.writers.items():
|
||||
shares.extend([x.shnum for x in writers if x.server == server])
|
||||
known_shnums = set(shares)
|
||||
surprise_shares -= known_shnums
|
||||
@ -1198,7 +1198,7 @@ class Publish(object):
|
||||
class MutableFileHandle(object):
|
||||
"""
|
||||
I am a mutable uploadable built around a filehandle-like object,
|
||||
usually either a StringIO instance or a handle to an actual file.
|
||||
usually either a BytesIO instance or a handle to an actual file.
|
||||
"""
|
||||
|
||||
def __init__(self, filehandle):
|
||||
@ -1268,14 +1268,14 @@ class MutableFileHandle(object):
|
||||
class MutableData(MutableFileHandle):
|
||||
"""
|
||||
I am a mutable uploadable built around a string, which I then cast
|
||||
into a StringIO and treat as a filehandle.
|
||||
into a BytesIO and treat as a filehandle.
|
||||
"""
|
||||
|
||||
def __init__(self, s):
|
||||
# Take a string and return a file-like uploadable.
|
||||
assert isinstance(s, str)
|
||||
assert isinstance(s, bytes)
|
||||
|
||||
MutableFileHandle.__init__(self, StringIO(s))
|
||||
MutableFileHandle.__init__(self, BytesIO(s))
|
||||
|
||||
|
||||
@implementer(IMutableUploadable)
|
||||
@ -1361,7 +1361,7 @@ class TransformingUploadable(object):
|
||||
|
||||
self.log("reading %d bytes of new data" % length)
|
||||
new_data = self._newdata.read(length)
|
||||
new_data = "".join(new_data)
|
||||
new_data = b"".join(new_data)
|
||||
|
||||
self._read_marker += len(old_start_data + new_data + old_end_data)
|
||||
|
||||
|
@ -1,17 +1,29 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
from twisted.trial import unittest
|
||||
from allmydata.mutable.publish import MutableData
|
||||
|
||||
class DataHandle(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.test_data = "Test Data" * 50000
|
||||
self.test_data = b"Test Data" * 50000
|
||||
self.uploadable = MutableData(self.test_data)
|
||||
|
||||
|
||||
def test_datahandle_read(self):
|
||||
chunk_size = 10
|
||||
for i in xrange(0, len(self.test_data), chunk_size):
|
||||
for i in range(0, len(self.test_data), chunk_size):
|
||||
data = self.uploadable.read(chunk_size)
|
||||
data = "".join(data)
|
||||
data = b"".join(data)
|
||||
start = i
|
||||
end = i + chunk_size
|
||||
self.failUnlessEqual(data, self.test_data[start:end])
|
||||
@ -28,7 +40,7 @@ class DataHandle(unittest.TestCase):
|
||||
# disturbing the location of the seek pointer.
|
||||
chunk_size = 100
|
||||
data = self.uploadable.read(chunk_size)
|
||||
self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
|
||||
self.failUnlessEqual(b"".join(data), self.test_data[:chunk_size])
|
||||
|
||||
# Now get the size.
|
||||
size = self.uploadable.get_size()
|
||||
@ -38,4 +50,4 @@ class DataHandle(unittest.TestCase):
|
||||
more_data = self.uploadable.read(chunk_size)
|
||||
start = chunk_size
|
||||
end = chunk_size * 2
|
||||
self.failUnlessEqual("".join(more_data), self.test_data[start:end])
|
||||
self.failUnlessEqual(b"".join(more_data), self.test_data[start:end])
|
||||
|
@ -1,3 +1,15 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
from twisted.trial import unittest
|
||||
from .util import FakeStorage, make_nodemaker
|
||||
|
||||
@ -10,7 +22,7 @@ class DifferentEncoding(unittest.TestCase):
|
||||
# create a file with 3-of-20, then modify it with a client configured
|
||||
# to do 3-of-10. #1510 tracks a failure here
|
||||
self.nodemaker.default_encoding_parameters["n"] = 20
|
||||
d = self.nodemaker.create_mutable_file("old contents")
|
||||
d = self.nodemaker.create_mutable_file(b"old contents")
|
||||
def _created(n):
|
||||
filecap = n.get_cap().to_string()
|
||||
del n # we want a new object, not the cached one
|
||||
@ -19,6 +31,6 @@ class DifferentEncoding(unittest.TestCase):
|
||||
return n2
|
||||
d.addCallback(_created)
|
||||
def modifier(old_contents, servermap, first_time):
|
||||
return "new contents"
|
||||
return b"new contents"
|
||||
d.addCallback(lambda n: n.modify(modifier))
|
||||
return d
|
||||
|
@ -1,21 +1,33 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
import os
|
||||
from six.moves import cStringIO as StringIO
|
||||
from io import BytesIO
|
||||
from twisted.trial import unittest
|
||||
from allmydata.mutable.publish import MutableFileHandle
|
||||
|
||||
class FileHandle(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.test_data = "Test Data" * 50000
|
||||
self.sio = StringIO(self.test_data)
|
||||
self.test_data = b"Test Data" * 50000
|
||||
self.sio = BytesIO(self.test_data)
|
||||
self.uploadable = MutableFileHandle(self.sio)
|
||||
|
||||
|
||||
def test_filehandle_read(self):
|
||||
self.basedir = "mutable/FileHandle/test_filehandle_read"
|
||||
chunk_size = 10
|
||||
for i in xrange(0, len(self.test_data), chunk_size):
|
||||
for i in range(0, len(self.test_data), chunk_size):
|
||||
data = self.uploadable.read(chunk_size)
|
||||
data = "".join(data)
|
||||
data = b"".join(data)
|
||||
start = i
|
||||
end = i + chunk_size
|
||||
self.failUnlessEqual(data, self.test_data[start:end])
|
||||
@ -33,7 +45,7 @@ class FileHandle(unittest.TestCase):
|
||||
# disturbing the location of the seek pointer.
|
||||
chunk_size = 100
|
||||
data = self.uploadable.read(chunk_size)
|
||||
self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
|
||||
self.failUnlessEqual(b"".join(data), self.test_data[:chunk_size])
|
||||
|
||||
# Now get the size.
|
||||
size = self.uploadable.get_size()
|
||||
@ -43,26 +55,26 @@ class FileHandle(unittest.TestCase):
|
||||
more_data = self.uploadable.read(chunk_size)
|
||||
start = chunk_size
|
||||
end = chunk_size * 2
|
||||
self.failUnlessEqual("".join(more_data), self.test_data[start:end])
|
||||
self.failUnlessEqual(b"".join(more_data), self.test_data[start:end])
|
||||
|
||||
|
||||
def test_filehandle_file(self):
|
||||
# Make sure that the MutableFileHandle works on a file as well
|
||||
# as a StringIO object, since in some cases it will be asked to
|
||||
# as a BytesIO object, since in some cases it will be asked to
|
||||
# deal with files.
|
||||
self.basedir = self.mktemp()
|
||||
# necessary? What am I doing wrong here?
|
||||
os.mkdir(self.basedir)
|
||||
f_path = os.path.join(self.basedir, "test_file")
|
||||
f = open(f_path, "w")
|
||||
f = open(f_path, "wb")
|
||||
f.write(self.test_data)
|
||||
f.close()
|
||||
f = open(f_path, "r")
|
||||
f = open(f_path, "rb")
|
||||
|
||||
uploadable = MutableFileHandle(f)
|
||||
|
||||
data = uploadable.read(len(self.test_data))
|
||||
self.failUnlessEqual("".join(data), self.test_data)
|
||||
self.failUnlessEqual(b"".join(data), self.test_data)
|
||||
size = uploadable.get_size()
|
||||
self.failUnlessEqual(size, len(self.test_data))
|
||||
|
||||
|
@ -1,4 +1,6 @@
|
||||
from six.moves import cStringIO as StringIO
|
||||
from past.builtins import long
|
||||
|
||||
from io import BytesIO
|
||||
import attr
|
||||
from twisted.internet import defer, reactor
|
||||
from foolscap.api import eventually, fireEventually
|
||||
@ -75,8 +77,8 @@ class FakeStorage(object):
|
||||
if peerid not in self._peers:
|
||||
self._peers[peerid] = {}
|
||||
shares = self._peers[peerid]
|
||||
f = StringIO()
|
||||
f.write(shares.get(shnum, ""))
|
||||
f = BytesIO()
|
||||
f.write(shares.get(shnum, b""))
|
||||
f.seek(offset)
|
||||
f.write(data)
|
||||
shares[shnum] = f.getvalue()
|
||||
@ -129,7 +131,7 @@ class FakeStorageServer(object):
|
||||
readv = {}
|
||||
for shnum, (testv, writev, new_length) in tw_vectors.items():
|
||||
for (offset, length, op, specimen) in testv:
|
||||
assert op in ("le", "eq", "ge")
|
||||
assert op in (b"le", b"eq", b"ge")
|
||||
# TODO: this isn't right, the read is controlled by read_vector,
|
||||
# not by testv
|
||||
readv[shnum] = [ specimen
|
||||
@ -222,10 +224,10 @@ def make_peer(s, i):
|
||||
|
||||
:rtype: ``Peer``
|
||||
"""
|
||||
peerid = base32.b2a(tagged_hash("peerid", "%d" % i)[:20])
|
||||
peerid = base32.b2a(tagged_hash(b"peerid", b"%d" % i)[:20])
|
||||
fss = FakeStorageServer(peerid, s)
|
||||
ann = {
|
||||
"anonymous-storage-FURL": "pb://%s@nowhere/fake" % (peerid,),
|
||||
"anonymous-storage-FURL": b"pb://%s@nowhere/fake" % (peerid,),
|
||||
"permutation-seed-base32": peerid,
|
||||
}
|
||||
return Peer(peerid=peerid, storage_server=fss, announcement=ann)
|
||||
@ -297,7 +299,7 @@ def make_nodemaker_with_storage_broker(storage_broker, keysize):
|
||||
|
||||
:param StorageFarmBroker peers: The storage broker to use.
|
||||
"""
|
||||
sh = client.SecretHolder("lease secret", "convergence secret")
|
||||
sh = client.SecretHolder(b"lease secret", b"convergence secret")
|
||||
keygen = client.KeyGenerator()
|
||||
if keysize:
|
||||
keygen.set_default_keysize(keysize)
|
||||
|
@ -89,6 +89,9 @@ PORTED_MODULES = [
|
||||
]
|
||||
|
||||
PORTED_TEST_MODULES = [
|
||||
"allmydata.test.mutable.test_datahandle",
|
||||
"allmydata.test.mutable.test_different_encoding",
|
||||
"allmydata.test.mutable.test_filehandle",
|
||||
"allmydata.test.test_abbreviate",
|
||||
"allmydata.test.test_base32",
|
||||
"allmydata.test.test_base62",
|
||||
|
Loading…
Reference in New Issue
Block a user