Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Jean-Paul Calderone 2021-11-04 10:02:54 -04:00
commit a71b1d31bf
27 changed files with 603 additions and 280 deletions

View File

@ -6,6 +6,23 @@ on:
- "master"
pull_request:
# Control to what degree jobs in this workflow will run concurrently with
# other instances of themselves.
#
# https://docs.github.com/en/actions/learn-github-actions/workflow-syntax-for-github-actions#concurrency
concurrency:
# We want every revision on master to run the workflow completely.
# "head_ref" is not set for the "push" event but it is set for the
# "pull_request" event. If it is set then it is the name of the branch and
# we can use it to make sure each branch has only one active workflow at a
# time. If it is not set then we can compute a unique string that gives
# every master/push workflow its own group.
group: "${{ github.head_ref || format('{0}-{1}', github.run_number, github.run_attempt) }}"
# Then, we say that if a new workflow wants to start in the same group as a
# running workflow, the running workflow should be cancelled.
cancel-in-progress: true
env:
# Tell Hypothesis which configuration we want it to use.
TAHOE_LAFS_HYPOTHESIS_PROFILE: "ci"

5
.readthedocs.yaml Normal file
View File

@ -0,0 +1,5 @@
version: 2
python:
install:
- requirements: docs/requirements.txt

4
docs/requirements.txt Normal file
View File

@ -0,0 +1,4 @@
sphinx
docutils<0.18 # https://github.com/sphinx-doc/sphinx/issues/9788
recommonmark
sphinx_rtd_theme

View File

@ -35,6 +35,9 @@ from allmydata.test.common import (
if sys.platform.startswith('win'):
pytest.skip('Skipping Tor tests on Windows', allow_module_level=True)
if PY2:
pytest.skip('Skipping Tor tests on Python 2 because dependencies are hard to come by', allow_module_level=True)
@pytest_twisted.inlineCallbacks
def test_onion_service_storage(reactor, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl):
yield _create_anonymous_node(reactor, 'carol', 8008, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl)

0
newsfragments/3829.minor Normal file
View File

0
newsfragments/3830.minor Normal file
View File

0
newsfragments/3831.minor Normal file
View File

0
newsfragments/3832.minor Normal file
View File

0
newsfragments/3833.minor Normal file
View File

0
newsfragments/3834.minor Normal file
View File

0
newsfragments/3835.minor Normal file
View File

0
newsfragments/3836.minor Normal file
View File

1
newsfragments/3837.other Normal file
View File

@ -0,0 +1 @@
Tahoe-LAFS no longer runs its Tor integration test suite on Python 2 due to the increased complexity of obtaining compatible versions of necessary dependencies.

View File

@ -15,15 +15,22 @@ try:
except ImportError:
pass
# do not import any allmydata modules at this level. Do that from inside
# individual functions instead.
import struct, time, os, sys
from twisted.python import usage, failure
from twisted.internet import defer
from foolscap.logging import cli as foolscap_cli
from allmydata.scripts.common import BaseOptions
from allmydata.scripts.common import BaseOptions
from allmydata import uri
from allmydata.storage.mutable import MutableShareFile
from allmydata.storage.immutable import ShareFile
from allmydata.mutable.layout import unpack_share
from allmydata.mutable.layout import MDMFSlotReadProxy
from allmydata.mutable.common import NeedMoreDataError
from allmydata.immutable.layout import ReadBucketProxy
from allmydata.util import base32
from allmydata.util.encodingutil import quote_output
class DumpOptions(BaseOptions):
def getSynopsis(self):
@ -56,13 +63,11 @@ def dump_share(options):
# check the version, to see if we have a mutable or immutable share
print("share filename: %s" % quote_output(options['filename']), file=out)
f = open(options['filename'], "rb")
prefix = f.read(32)
f.close()
if prefix == MutableShareFile.MAGIC:
return dump_mutable_share(options)
# otherwise assume it's immutable
return dump_immutable_share(options)
with open(options['filename'], "rb") as f:
if MutableShareFile.is_valid_header(f.read(32)):
return dump_mutable_share(options)
# otherwise assume it's immutable
return dump_immutable_share(options)
def dump_immutable_share(options):
from allmydata.storage.immutable import ShareFile
@ -170,7 +175,7 @@ def dump_immutable_lease_info(f, out):
leases = list(f.get_leases())
if leases:
for i,lease in enumerate(leases):
when = format_expiration_time(lease.expiration_time)
when = format_expiration_time(lease.get_expiration_time())
print(" Lease #%d: owner=%d, expire in %s" \
% (i, lease.owner_num, when), file=out)
else:
@ -223,7 +228,7 @@ def dump_mutable_share(options):
print(file=out)
print(" Lease #%d:" % leasenum, file=out)
print(" ownerid: %d" % lease.owner_num, file=out)
when = format_expiration_time(lease.expiration_time)
when = format_expiration_time(lease.get_expiration_time())
print(" expires in %s" % when, file=out)
print(" renew_secret: %s" % str(base32.b2a(lease.renew_secret), "utf-8"), file=out)
print(" cancel_secret: %s" % str(base32.b2a(lease.cancel_secret), "utf-8"), file=out)
@ -712,125 +717,122 @@ def call(c, *args, **kwargs):
return results[0]
def describe_share(abs_sharefile, si_s, shnum_s, now, out):
from allmydata import uri
from allmydata.storage.mutable import MutableShareFile
from allmydata.storage.immutable import ShareFile
from allmydata.mutable.layout import unpack_share
from allmydata.mutable.common import NeedMoreDataError
from allmydata.immutable.layout import ReadBucketProxy
from allmydata.util import base32
from allmydata.util.encodingutil import quote_output
import struct
f = open(abs_sharefile, "rb")
prefix = f.read(32)
if prefix == MutableShareFile.MAGIC:
# mutable share
m = MutableShareFile(abs_sharefile)
WE, nodeid = m._read_write_enabler_and_nodeid(f)
data_length = m._read_data_length(f)
expiration_time = min( [lease.expiration_time
for (i,lease) in m._enumerate_leases(f)] )
expiration = max(0, expiration_time - now)
share_type = "unknown"
f.seek(m.DATA_OFFSET)
version = f.read(1)
if version == b"\x00":
# this slot contains an SMDF share
share_type = "SDMF"
elif version == b"\x01":
share_type = "MDMF"
if share_type == "SDMF":
f.seek(m.DATA_OFFSET)
data = f.read(min(data_length, 2000))
try:
pieces = unpack_share(data)
except NeedMoreDataError as e:
# retry once with the larger size
size = e.needed_bytes
f.seek(m.DATA_OFFSET)
data = f.read(min(data_length, size))
pieces = unpack_share(data)
(seqnum, root_hash, IV, k, N, segsize, datalen,
pubkey, signature, share_hash_chain, block_hash_tree,
share_data, enc_privkey) = pieces
print("SDMF %s %d/%d %d #%d:%s %d %s" % \
(si_s, k, N, datalen,
seqnum, str(base32.b2a(root_hash), "utf-8"),
expiration, quote_output(abs_sharefile)), file=out)
elif share_type == "MDMF":
from allmydata.mutable.layout import MDMFSlotReadProxy
fake_shnum = 0
# TODO: factor this out with dump_MDMF_share()
class ShareDumper(MDMFSlotReadProxy):
def _read(self, readvs, force_remote=False, queue=False):
data = []
for (where,length) in readvs:
f.seek(m.DATA_OFFSET+where)
data.append(f.read(length))
return defer.succeed({fake_shnum: data})
p = ShareDumper(None, "fake-si", fake_shnum)
def extract(func):
stash = []
# these methods return Deferreds, but we happen to know that
# they run synchronously when not actually talking to a
# remote server
d = func()
d.addCallback(stash.append)
return stash[0]
verinfo = extract(p.get_verinfo)
(seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
offsets) = verinfo
print("MDMF %s %d/%d %d #%d:%s %d %s" % \
(si_s, k, N, datalen,
seqnum, str(base32.b2a(root_hash), "utf-8"),
expiration, quote_output(abs_sharefile)), file=out)
with open(abs_sharefile, "rb") as f:
prefix = f.read(32)
if MutableShareFile.is_valid_header(prefix):
_describe_mutable_share(abs_sharefile, f, now, si_s, out)
elif ShareFile.is_valid_header(prefix):
_describe_immutable_share(abs_sharefile, now, si_s, out)
else:
print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)
print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out)
elif struct.unpack(">L", prefix[:4]) == (1,):
# immutable
def _describe_mutable_share(abs_sharefile, f, now, si_s, out):
# mutable share
m = MutableShareFile(abs_sharefile)
WE, nodeid = m._read_write_enabler_and_nodeid(f)
data_length = m._read_data_length(f)
expiration_time = min( [lease.get_expiration_time()
for (i,lease) in m._enumerate_leases(f)] )
expiration = max(0, expiration_time - now)
class ImmediateReadBucketProxy(ReadBucketProxy):
def __init__(self, sf):
self.sf = sf
ReadBucketProxy.__init__(self, None, None, "")
def __repr__(self):
return "<ImmediateReadBucketProxy>"
def _read(self, offset, size):
return defer.succeed(sf.read_share_data(offset, size))
share_type = "unknown"
f.seek(m.DATA_OFFSET)
version = f.read(1)
if version == b"\x00":
# this slot contains an SMDF share
share_type = "SDMF"
elif version == b"\x01":
share_type = "MDMF"
# use a ReadBucketProxy to parse the bucket and find the uri extension
sf = ShareFile(abs_sharefile)
bp = ImmediateReadBucketProxy(sf)
if share_type == "SDMF":
f.seek(m.DATA_OFFSET)
expiration_time = min( [lease.expiration_time
for lease in sf.get_leases()] )
expiration = max(0, expiration_time - now)
# Read at least the mutable header length, if possible. If there's
# less data than that in the share, don't try to read more (we won't
# be able to unpack the header in this case but we surely don't want
# to try to unpack bytes *following* the data section as if they were
# header data). Rather than 2000 we could use HEADER_LENGTH from
# allmydata/mutable/layout.py, probably.
data = f.read(min(data_length, 2000))
UEB_data = call(bp.get_uri_extension)
unpacked = uri.unpack_extension_readable(UEB_data)
try:
pieces = unpack_share(data)
except NeedMoreDataError as e:
# retry once with the larger size
size = e.needed_bytes
f.seek(m.DATA_OFFSET)
data = f.read(min(data_length, size))
pieces = unpack_share(data)
(seqnum, root_hash, IV, k, N, segsize, datalen,
pubkey, signature, share_hash_chain, block_hash_tree,
share_data, enc_privkey) = pieces
k = unpacked["needed_shares"]
N = unpacked["total_shares"]
filesize = unpacked["size"]
ueb_hash = unpacked["UEB_hash"]
print("SDMF %s %d/%d %d #%d:%s %d %s" % \
(si_s, k, N, datalen,
seqnum, str(base32.b2a(root_hash), "utf-8"),
expiration, quote_output(abs_sharefile)), file=out)
elif share_type == "MDMF":
fake_shnum = 0
# TODO: factor this out with dump_MDMF_share()
class ShareDumper(MDMFSlotReadProxy):
def _read(self, readvs, force_remote=False, queue=False):
data = []
for (where,length) in readvs:
f.seek(m.DATA_OFFSET+where)
data.append(f.read(length))
return defer.succeed({fake_shnum: data})
print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
str(ueb_hash, "utf-8"), expiration,
quote_output(abs_sharefile)), file=out)
p = ShareDumper(None, "fake-si", fake_shnum)
def extract(func):
stash = []
# these methods return Deferreds, but we happen to know that
# they run synchronously when not actually talking to a
# remote server
d = func()
d.addCallback(stash.append)
return stash[0]
verinfo = extract(p.get_verinfo)
(seqnum, root_hash, salt_to_use, segsize, datalen, k, N, prefix,
offsets) = verinfo
print("MDMF %s %d/%d %d #%d:%s %d %s" % \
(si_s, k, N, datalen,
seqnum, str(base32.b2a(root_hash), "utf-8"),
expiration, quote_output(abs_sharefile)), file=out)
else:
print("UNKNOWN really-unknown %s" % quote_output(abs_sharefile), file=out)
print("UNKNOWN mutable %s" % quote_output(abs_sharefile), file=out)
def _describe_immutable_share(abs_sharefile, now, si_s, out):
class ImmediateReadBucketProxy(ReadBucketProxy):
def __init__(self, sf):
self.sf = sf
ReadBucketProxy.__init__(self, None, None, "")
def __repr__(self):
return "<ImmediateReadBucketProxy>"
def _read(self, offset, size):
return defer.succeed(sf.read_share_data(offset, size))
# use a ReadBucketProxy to parse the bucket and find the uri extension
sf = ShareFile(abs_sharefile)
bp = ImmediateReadBucketProxy(sf)
expiration_time = min(lease.get_expiration_time()
for lease in sf.get_leases())
expiration = max(0, expiration_time - now)
UEB_data = call(bp.get_uri_extension)
unpacked = uri.unpack_extension_readable(UEB_data)
k = unpacked["needed_shares"]
N = unpacked["total_shares"]
filesize = unpacked["size"]
ueb_hash = unpacked["UEB_hash"]
print("CHK %s %d/%d %d %s %d %s" % (si_s, k, N, filesize,
str(ueb_hash, "utf-8"), expiration,
quote_output(abs_sharefile)), file=out)
f.close()
def catalog_shares(options):
from allmydata.util.encodingutil import listdir_unicode, quote_output
@ -933,34 +935,35 @@ def corrupt_share(options):
f.write(d)
f.close()
f = open(fn, "rb")
prefix = f.read(32)
f.close()
if prefix == MutableShareFile.MAGIC:
# mutable
m = MutableShareFile(fn)
f = open(fn, "rb")
f.seek(m.DATA_OFFSET)
data = f.read(2000)
# make sure this slot contains an SMDF share
assert data[0:1] == b"\x00", "non-SDMF mutable shares not supported"
f.close()
with open(fn, "rb") as f:
prefix = f.read(32)
(version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
ig_datalen, offsets) = unpack_header(data)
if MutableShareFile.is_valid_header(prefix):
# mutable
m = MutableShareFile(fn)
with open(fn, "rb") as f:
f.seek(m.DATA_OFFSET)
# Read enough data to get a mutable header to unpack.
data = f.read(2000)
# make sure this slot contains an SMDF share
assert data[0:1] == b"\x00", "non-SDMF mutable shares not supported"
f.close()
assert version == 0, "we only handle v0 SDMF files"
start = m.DATA_OFFSET + offsets["share_data"]
end = m.DATA_OFFSET + offsets["enc_privkey"]
flip_bit(start, end)
else:
# otherwise assume it's immutable
f = ShareFile(fn)
bp = ReadBucketProxy(None, None, '')
offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
start = f._data_offset + offsets["data"]
end = f._data_offset + offsets["plaintext_hash_tree"]
flip_bit(start, end)
(version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
ig_datalen, offsets) = unpack_header(data)
assert version == 0, "we only handle v0 SDMF files"
start = m.DATA_OFFSET + offsets["share_data"]
end = m.DATA_OFFSET + offsets["enc_privkey"]
flip_bit(start, end)
else:
# otherwise assume it's immutable
f = ShareFile(fn)
bp = ReadBucketProxy(None, None, '')
offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
start = f._data_offset + offsets["data"]
end = f._data_offset + offsets["plaintext_hash_tree"]
flip_bit(start, end)

View File

@ -25,7 +25,6 @@ from allmydata.interfaces import (
)
from allmydata.util import base32, fileutil, log
from allmydata.util.assertutil import precondition
from allmydata.util.hashutil import timing_safe_compare
from allmydata.storage.lease import LeaseInfo
from allmydata.storage.common import UnknownImmutableContainerVersionError
@ -106,6 +105,21 @@ class ShareFile(object):
LEASE_SIZE = struct.calcsize(">L32s32sL")
sharetype = "immutable"
@classmethod
def is_valid_header(cls, header):
# type: (bytes) -> bool
"""
Determine if the given bytes constitute a valid header for this type of
container.
:param header: Some bytes from the beginning of a container.
:return: ``True`` if the bytes could belong to this container,
``False`` otherwise.
"""
(version,) = struct.unpack(">L", header[:4])
return version == 1
def __init__(self, filename, max_size=None, create=False, lease_count_format="L"):
"""
Initialize a ``ShareFile``.
@ -130,6 +144,7 @@ class ShareFile(object):
:raise ValueError: If the encoding of ``lease_count_format`` is too
large or if it is not a single format character.
"""
precondition((max_size is not None) or (not create), max_size, create)
self._lease_count_format = _fix_lease_count_format(lease_count_format)
@ -227,7 +242,7 @@ class ShareFile(object):
for i in range(num_leases):
data = f.read(self.LEASE_SIZE)
if data:
yield LeaseInfo().from_immutable_data(data)
yield LeaseInfo.from_immutable_data(data)
def add_lease(self, lease_info):
with open(self.home, 'rb+') as f:
@ -238,13 +253,24 @@ class ShareFile(object):
self._write_lease_record(f, num_leases, lease_info)
self._write_encoded_num_leases(f, new_lease_count)
def renew_lease(self, renew_secret, new_expire_time):
def renew_lease(self, renew_secret, new_expire_time, allow_backdate=False):
# type: (bytes, int, bool) -> None
"""
Update the expiration time on an existing lease.
:param allow_backdate: If ``True`` then allow the new expiration time
to be before the current expiration time. Otherwise, make no
change when this is the case.
:raise IndexError: If there is no lease matching the given renew
secret.
"""
for i,lease in enumerate(self.get_leases()):
if timing_safe_compare(lease.renew_secret, renew_secret):
if lease.is_renew_secret(renew_secret):
# yup. See if we need to update the owner time.
if new_expire_time > lease.expiration_time:
if allow_backdate or new_expire_time > lease.get_expiration_time():
# yes
lease.expiration_time = new_expire_time
lease = lease.renew(new_expire_time)
with open(self.home, 'rb+') as f:
self._write_lease_record(f, i, lease)
return
@ -267,7 +293,7 @@ class ShareFile(object):
"""
try:
self.renew_lease(lease_info.renew_secret,
lease_info.expiration_time)
lease_info.get_expiration_time())
except IndexError:
if lease_info.immutable_size() > available_space:
raise NoSpace()
@ -284,7 +310,7 @@ class ShareFile(object):
leases = list(self.get_leases())
num_leases_removed = 0
for i,lease in enumerate(leases):
if timing_safe_compare(lease.cancel_secret, cancel_secret):
if lease.is_cancel_secret(cancel_secret):
leases[i] = None
num_leases_removed += 1
if not num_leases_removed:

View File

@ -13,41 +13,111 @@ if PY2:
import struct, time
import attr
from allmydata.util.hashutil import timing_safe_compare
# struct format for representation of a lease in an immutable share
IMMUTABLE_FORMAT = ">L32s32sL"
# struct format for representation of a lease in a mutable share
MUTABLE_FORMAT = ">LL32s32s20s"
@attr.s(frozen=True)
class LeaseInfo(object):
def __init__(self, owner_num=None, renew_secret=None, cancel_secret=None,
expiration_time=None, nodeid=None):
self.owner_num = owner_num
self.renew_secret = renew_secret
self.cancel_secret = cancel_secret
self.expiration_time = expiration_time
if nodeid is not None:
assert isinstance(nodeid, bytes)
assert len(nodeid) == 20
self.nodeid = nodeid
"""
Represent the details of one lease, a marker which is intended to inform
the storage server how long to store a particular share.
"""
owner_num = attr.ib(default=None)
# Don't put secrets into the default string representation. This makes it
# slightly less likely the secrets will accidentally be leaked to
# someplace they're not meant to be.
renew_secret = attr.ib(default=None, repr=False)
cancel_secret = attr.ib(default=None, repr=False)
_expiration_time = attr.ib(default=None)
nodeid = attr.ib(default=None)
@nodeid.validator
def _validate_nodeid(self, attribute, value):
if value is not None:
if not isinstance(value, bytes):
raise ValueError(
"nodeid value must be bytes, not {!r}".format(value),
)
if len(value) != 20:
raise ValueError(
"nodeid value must be 20 bytes long, not {!r}".format(value),
)
return None
def get_expiration_time(self):
return self.expiration_time
# type: () -> float
"""
Retrieve a POSIX timestamp representing the time at which this lease is
set to expire.
"""
return self._expiration_time
def renew(self, new_expire_time):
# type: (float) -> LeaseInfo
"""
Create a new lease the same as this one but with a new expiration time.
:param new_expire_time: The new expiration time.
:return: The new lease info.
"""
return attr.assoc(
self,
_expiration_time=new_expire_time,
)
def is_renew_secret(self, candidate_secret):
# type: (bytes) -> bool
"""
Check a string to see if it is the correct renew secret.
:return: ``True`` if it is the correct renew secret, ``False``
otherwise.
"""
return timing_safe_compare(self.renew_secret, candidate_secret)
def is_cancel_secret(self, candidate_secret):
# type: (bytes) -> bool
"""
Check a string to see if it is the correct cancel secret.
:return: ``True`` if it is the correct cancel secret, ``False``
otherwise.
"""
return timing_safe_compare(self.cancel_secret, candidate_secret)
def get_grant_renew_time_time(self):
# hack, based upon fixed 31day expiration period
return self.expiration_time - 31*24*60*60
return self._expiration_time - 31*24*60*60
def get_age(self):
return time.time() - self.get_grant_renew_time_time()
def from_immutable_data(self, data):
(self.owner_num,
self.renew_secret,
self.cancel_secret,
self.expiration_time) = struct.unpack(IMMUTABLE_FORMAT, data)
self.nodeid = None
return self
@classmethod
def from_immutable_data(cls, data):
"""
Create a new instance from the encoded data given.
:param data: A lease serialized using the immutable-share-file format.
"""
names = [
"owner_num",
"renew_secret",
"cancel_secret",
"expiration_time",
]
values = struct.unpack(">L32s32sL", data)
return cls(nodeid=None, **dict(zip(names, values)))
def immutable_size(self):
"""
@ -67,18 +137,28 @@ class LeaseInfo(object):
return struct.pack(IMMUTABLE_FORMAT,
self.owner_num,
self.renew_secret, self.cancel_secret,
int(self.expiration_time))
int(self._expiration_time))
def to_mutable_data(self):
return struct.pack(MUTABLE_FORMAT,
self.owner_num,
int(self.expiration_time),
int(self._expiration_time),
self.renew_secret, self.cancel_secret,
self.nodeid)
def from_mutable_data(self, data):
(self.owner_num,
self.expiration_time,
self.renew_secret, self.cancel_secret,
self.nodeid) = struct.unpack(MUTABLE_FORMAT, data)
return self
@classmethod
def from_mutable_data(cls, data):
"""
Create a new instance from the encoded data given.
:param data: A lease serialized using the mutable-share-file format.
"""
names = [
"owner_num",
"expiration_time",
"renew_secret",
"cancel_secret",
"nodeid",
]
values = struct.unpack(">LL32s32s20s", data)
return cls(**dict(zip(names, values)))

View File

@ -70,6 +70,20 @@ class MutableShareFile(object):
MAX_SIZE = MAX_MUTABLE_SHARE_SIZE
# TODO: decide upon a policy for max share size
@classmethod
def is_valid_header(cls, header):
# type: (bytes) -> bool
"""
Determine if the given bytes constitute a valid header for this type of
container.
:param header: Some bytes from the beginning of a container.
:return: ``True`` if the bytes could belong to this container,
``False`` otherwise.
"""
return header.startswith(cls.MAGIC)
def __init__(self, filename, parent=None):
self.home = filename
if os.path.exists(self.home):
@ -80,7 +94,7 @@ class MutableShareFile(object):
write_enabler_nodeid, write_enabler,
data_length, extra_least_offset) = \
struct.unpack(">32s20s32sQQ", data)
if magic != self.MAGIC:
if not self.is_valid_header(data):
msg = "sharefile %s had magic '%r' but we wanted '%r'" % \
(filename, magic, self.MAGIC)
raise UnknownMutableContainerVersionError(msg)
@ -257,7 +271,7 @@ class MutableShareFile(object):
f.seek(offset)
assert f.tell() == offset
data = f.read(self.LEASE_SIZE)
lease_info = LeaseInfo().from_mutable_data(data)
lease_info = LeaseInfo.from_mutable_data(data)
if lease_info.owner_num == 0:
return None
return lease_info
@ -316,15 +330,26 @@ class MutableShareFile(object):
raise NoSpace()
self._write_lease_record(f, num_lease_slots, lease_info)
def renew_lease(self, renew_secret, new_expire_time):
def renew_lease(self, renew_secret, new_expire_time, allow_backdate=False):
# type: (bytes, int, bool) -> None
"""
Update the expiration time on an existing lease.
:param allow_backdate: If ``True`` then allow the new expiration time
to be before the current expiration time. Otherwise, make no
change when this is the case.
:raise IndexError: If there is no lease matching the given renew
secret.
"""
accepting_nodeids = set()
with open(self.home, 'rb+') as f:
for (leasenum,lease) in self._enumerate_leases(f):
if timing_safe_compare(lease.renew_secret, renew_secret):
if lease.is_renew_secret(renew_secret):
# yup. See if we need to update the owner time.
if new_expire_time > lease.expiration_time:
if allow_backdate or new_expire_time > lease.get_expiration_time():
# yes
lease.expiration_time = new_expire_time
lease = lease.renew(new_expire_time)
self._write_lease_record(f, leasenum, lease)
return
accepting_nodeids.add(lease.nodeid)
@ -342,7 +367,7 @@ class MutableShareFile(object):
precondition(lease_info.owner_num != 0) # 0 means "no lease here"
try:
self.renew_lease(lease_info.renew_secret,
lease_info.expiration_time)
lease_info.get_expiration_time())
except IndexError:
self.add_lease(available_space, lease_info)
@ -364,7 +389,7 @@ class MutableShareFile(object):
with open(self.home, 'rb+') as f:
for (leasenum,lease) in self._enumerate_leases(f):
accepting_nodeids.add(lease.nodeid)
if timing_safe_compare(lease.cancel_secret, cancel_secret):
if lease.is_cancel_secret(cancel_secret):
self._write_lease_record(f, leasenum, blank_lease)
modified += 1
else:
@ -395,7 +420,7 @@ class MutableShareFile(object):
write_enabler_nodeid, write_enabler,
data_length, extra_least_offset) = \
struct.unpack(">32s20s32sQQ", data)
assert magic == self.MAGIC
assert self.is_valid_header(data)
return (write_enabler, write_enabler_nodeid)
def readv(self, readv):

View File

@ -14,7 +14,7 @@ if PY2:
else:
from typing import Dict
import os, re, struct, time
import os, re, time
from foolscap.api import Referenceable
from foolscap.ipb import IRemoteReference
@ -371,12 +371,12 @@ class StorageServer(service.MultiService, Referenceable):
for shnum, filename in self._get_bucket_shares(storage_index):
with open(filename, 'rb') as f:
header = f.read(32)
if header[:32] == MutableShareFile.MAGIC:
if MutableShareFile.is_valid_header(header):
sf = MutableShareFile(filename, self)
# note: if the share has been migrated, the renew_lease()
# call will throw an exception, with information to help the
# client update the lease.
elif header[:4] == struct.pack(">L", 1):
elif ShareFile.is_valid_header(header):
sf = ShareFile(filename)
else:
continue # non-sharefile

View File

@ -17,8 +17,7 @@ from allmydata.storage.immutable import ShareFile
def get_share_file(filename):
with open(filename, "rb") as f:
prefix = f.read(32)
if prefix == MutableShareFile.MAGIC:
if MutableShareFile.is_valid_header(prefix):
return MutableShareFile(filename)
# otherwise assume it's immutable
return ShareFile(filename)

View File

@ -1105,7 +1105,7 @@ def _corrupt_offset_of_uri_extension_to_force_short_read(data, debug=False):
def _corrupt_mutable_share_data(data, debug=False):
prefix = data[:32]
assert prefix == MutableShareFile.MAGIC, "This function is designed to corrupt mutable shares of v1, and the magic number doesn't look right: %r vs %r" % (prefix, MutableShareFile.MAGIC)
assert MutableShareFile.is_valid_header(prefix), "This function is designed to corrupt mutable shares of v1, and the magic number doesn't look right: %r vs %r" % (prefix, MutableShareFile.MAGIC)
data_offset = MutableShareFile.DATA_OFFSET
sharetype = data[data_offset:data_offset+1]
assert sharetype == b"\x00", "non-SDMF mutable shares not supported"

View File

@ -672,11 +672,14 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
"""
iv_dir = self.getdir("introducer")
if not os.path.isdir(iv_dir):
_, port_endpoint = self.port_assigner.assign(reactor)
_, web_port_endpoint = self.port_assigner.assign(reactor)
main_location_hint, main_port_endpoint = self.port_assigner.assign(reactor)
introducer_config = (
u"[node]\n"
u"nickname = introducer \N{BLACK SMILING FACE}\n" +
u"web.port = {}\n".format(port_endpoint)
u"web.port = {}\n".format(web_port_endpoint) +
u"tub.port = {}\n".format(main_port_endpoint) +
u"tub.location = {}\n".format(main_location_hint)
).encode("utf-8")
fileutil.make_dirs(iv_dir)

View File

@ -25,6 +25,11 @@ if PY2:
from past.builtins import unicode
from six import ensure_text
try:
from typing import Dict, Callable
except ImportError:
pass
import os
from base64 import b32encode
from functools import (
@ -479,6 +484,18 @@ class GridTestMixin(object):
def set_up_grid(self, num_clients=1, num_servers=10,
client_config_hooks={}, oneshare=False):
"""
Create a Tahoe-LAFS storage grid.
:param num_clients: See ``NoNetworkGrid``
:param num_servers: See `NoNetworkGrid``
:param client_config_hooks: See ``NoNetworkGrid``
:param bool oneshare: If ``True`` then the first client node is
configured with ``n == k == happy == 1``.
:return: ``None``
"""
# self.basedir must be set
port_assigner = SameProcessStreamEndpointAssigner()
port_assigner.setUp()
@ -557,6 +574,15 @@ class GridTestMixin(object):
return sorted(shares)
def copy_shares(self, uri):
# type: (bytes) -> Dict[bytes, bytes]
"""
Read all of the share files for the given capability from the storage area
of the storage servers created by ``set_up_grid``.
:param bytes uri: A Tahoe-LAFS data capability.
:return: A ``dict`` mapping share file names to share file contents.
"""
shares = {}
for (shnum, serverid, sharefile) in self.find_uri_shares(uri):
with open(sharefile, "rb") as f:
@ -601,10 +627,15 @@ class GridTestMixin(object):
f.write(corruptdata)
def corrupt_all_shares(self, uri, corruptor, debug=False):
# type: (bytes, Callable[[bytes, bool], bytes], bool) -> None
"""
Apply ``corruptor`` to the contents of all share files associated with a
given capability and replace the share file contents with its result.
"""
for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri):
with open(i_sharefile, "rb") as f:
sharedata = f.read()
corruptdata = corruptor(sharedata, debug=debug)
corruptdata = corruptor(sharedata, debug)
with open(i_sharefile, "wb") as f:
f.write(corruptdata)

View File

@ -14,6 +14,11 @@ if PY2:
# a previous run. This asserts that the current code is capable of decoding
# shares from a previous version.
try:
from typing import Any
except ImportError:
pass
import six
import os
from twisted.trial import unittest
@ -951,12 +956,52 @@ class Corruption(_Base, unittest.TestCase):
self.corrupt_shares_numbered(imm_uri, [2], _corruptor)
def _corrupt_set(self, ign, imm_uri, which, newvalue):
# type: (Any, bytes, int, int) -> None
"""
Replace a single byte share file number 2 for the given capability with a
new byte.
:param imm_uri: Corrupt share number 2 belonging to this capability.
:param which: The byte position to replace.
:param newvalue: The new byte value to set in the share.
"""
log.msg("corrupt %d" % which)
def _corruptor(s, debug=False):
return s[:which] + bchr(newvalue) + s[which+1:]
self.corrupt_shares_numbered(imm_uri, [2], _corruptor)
def test_each_byte(self):
"""
Test share selection behavior of the downloader in the face of certain
kinds of data corruption.
1. upload a small share to the no-network grid
2. read all of the resulting share files out of the no-network storage servers
3. for each of
a. each byte of the share file version field
b. each byte of the immutable share version field
c. each byte of the immutable share data offset field
d. the most significant byte of the block_shares offset field
e. one of the bytes of one of the merkle trees
f. one of the bytes of the share hashes list
i. flip the least significant bit in all of the the share files
ii. perform the download/check/restore process
4. add 2 ** 24 to the share file version number
5. perform the download/check/restore process
6. add 2 ** 24 to the share version number
7. perform the download/check/restore process
The download/check/restore process is:
1. attempt to download the data
2. assert that the recovered plaintext is correct
3. assert that only the "correct" share numbers were used to reconstruct the plaintext
4. restore all of the share files to their pristine condition
"""
# Setting catalog_detection=True performs an exhaustive test of the
# Downloader's response to corruption in the lsb of each byte of the
# 2070-byte share, with two goals: make sure we tolerate all forms of
@ -1145,8 +1190,18 @@ class Corruption(_Base, unittest.TestCase):
return d
def _corrupt_flip_all(self, ign, imm_uri, which):
# type: (Any, bytes, int) -> None
"""
Flip the least significant bit at a given byte position in all share files
for the given capability.
"""
def _corruptor(s, debug=False):
return s[:which] + bchr(ord(s[which:which+1])^0x01) + s[which+1:]
# type: (bytes, bool) -> bytes
before_corruption = s[:which]
after_corruption = s[which+1:]
original_byte = s[which:which+1]
corrupt_byte = bchr(ord(original_byte) ^ 0x01)
return b"".join([before_corruption, corrupt_byte, after_corruption])
self.corrupt_all_shares(imm_uri, _corruptor)
class DownloadV2(_Base, unittest.TestCase):

View File

@ -124,38 +124,6 @@ class FakeStatsProvider(object):
pass
class LeaseInfoTests(SyncTestCase):
"""
Tests for ``LeaseInfo``.
"""
@given(
strategies.tuples(
strategies.integers(min_value=0, max_value=2 ** 31 - 1),
strategies.binary(min_size=32, max_size=32),
strategies.binary(min_size=32, max_size=32),
strategies.integers(min_value=0, max_value=2 ** 31 - 1),
strategies.binary(min_size=20, max_size=20),
),
)
def test_immutable_size(self, initializer_args):
"""
``LeaseInfo.immutable_size`` returns the length of the result of
``LeaseInfo.to_immutable_data``.
``LeaseInfo.mutable_size`` returns the length of the result of
``LeaseInfo.to_mutable_data``.
"""
info = LeaseInfo(*initializer_args)
self.expectThat(
info.to_immutable_data(),
HasLength(info.immutable_size()),
)
self.expectThat(
info.to_mutable_data(),
HasLength(info.mutable_size()),
)
class Bucket(unittest.TestCase):
def make_workdir(self, name):
basedir = os.path.join("storage", "Bucket", name)
@ -875,28 +843,28 @@ class Server(unittest.TestCase):
# Create a bucket:
rs0, cs0 = self.create_bucket_5_shares(ss, b"si0")
leases = list(ss.get_leases(b"si0"))
self.failUnlessEqual(len(leases), 1)
self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs0]))
(lease,) = ss.get_leases(b"si0")
self.assertTrue(lease.is_renew_secret(rs0))
rs1, cs1 = self.create_bucket_5_shares(ss, b"si1")
# take out a second lease on si1
rs2, cs2 = self.create_bucket_5_shares(ss, b"si1", 5, 0)
leases = list(ss.get_leases(b"si1"))
self.failUnlessEqual(len(leases), 2)
self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs1, rs2]))
(lease1, lease2) = ss.get_leases(b"si1")
self.assertTrue(lease1.is_renew_secret(rs1))
self.assertTrue(lease2.is_renew_secret(rs2))
# and a third lease, using add-lease
rs2a,cs2a = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)),
hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret)))
ss.remote_add_lease(b"si1", rs2a, cs2a)
leases = list(ss.get_leases(b"si1"))
self.failUnlessEqual(len(leases), 3)
self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs1, rs2, rs2a]))
(lease1, lease2, lease3) = ss.get_leases(b"si1")
self.assertTrue(lease1.is_renew_secret(rs1))
self.assertTrue(lease2.is_renew_secret(rs2))
self.assertTrue(lease3.is_renew_secret(rs2a))
# add-lease on a missing storage index is silently ignored
self.failUnlessEqual(ss.remote_add_lease(b"si18", b"", b""), None)
self.assertIsNone(ss.remote_add_lease(b"si18", b"", b""))
# check that si0 is readable
readers = ss.remote_get_buckets(b"si0")
@ -955,7 +923,7 @@ class Server(unittest.TestCase):
# Start out with single lease created with bucket:
renewal_secret, cancel_secret = self.create_bucket_5_shares(ss, b"si0")
[lease] = ss.get_leases(b"si0")
self.assertEqual(lease.expiration_time, 123 + DEFAULT_RENEWAL_TIME)
self.assertEqual(lease.get_expiration_time(), 123 + DEFAULT_RENEWAL_TIME)
# Time passes:
clock.advance(123456)
@ -963,7 +931,7 @@ class Server(unittest.TestCase):
# Adding a lease with matching renewal secret just renews it:
ss.remote_add_lease(b"si0", renewal_secret, cancel_secret)
[lease] = ss.get_leases(b"si0")
self.assertEqual(lease.expiration_time, 123 + 123456 + DEFAULT_RENEWAL_TIME)
self.assertEqual(lease.get_expiration_time(), 123 + 123456 + DEFAULT_RENEWAL_TIME)
def test_have_shares(self):
"""By default the StorageServer has no shares."""
@ -1392,17 +1360,6 @@ class MutableServer(unittest.TestCase):
self.failUnlessEqual(a.cancel_secret, b.cancel_secret)
self.failUnlessEqual(a.nodeid, b.nodeid)
def compare_leases(self, leases_a, leases_b):
self.failUnlessEqual(len(leases_a), len(leases_b))
for i in range(len(leases_a)):
a = leases_a[i]
b = leases_b[i]
self.failUnlessEqual(a.owner_num, b.owner_num)
self.failUnlessEqual(a.renew_secret, b.renew_secret)
self.failUnlessEqual(a.cancel_secret, b.cancel_secret)
self.failUnlessEqual(a.nodeid, b.nodeid)
self.failUnlessEqual(a.expiration_time, b.expiration_time)
def test_leases(self):
ss = self.create("test_leases")
def secrets(n):
@ -1483,11 +1440,11 @@ class MutableServer(unittest.TestCase):
self.failUnlessIn("I have leases accepted by nodeids:", e_s)
self.failUnlessIn("nodeids: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' .", e_s)
self.compare_leases(all_leases, list(s0.get_leases()))
self.assertEqual(all_leases, list(s0.get_leases()))
# reading shares should not modify the timestamp
read(b"si1", [], [(0,200)])
self.compare_leases(all_leases, list(s0.get_leases()))
self.assertEqual(all_leases, list(s0.get_leases()))
write(b"si1", secrets(0),
{0: ([], [(200, b"make me bigger")], None)}, [])
@ -1521,7 +1478,7 @@ class MutableServer(unittest.TestCase):
"shares", storage_index_to_dir(b"si1"))
s0 = MutableShareFile(os.path.join(bucket_dir, "0"))
[lease] = s0.get_leases()
self.assertEqual(lease.expiration_time, 235 + DEFAULT_RENEWAL_TIME)
self.assertEqual(lease.get_expiration_time(), 235 + DEFAULT_RENEWAL_TIME)
# Time passes...
clock.advance(835)
@ -1529,7 +1486,7 @@ class MutableServer(unittest.TestCase):
# Adding a lease renews it:
ss.remote_add_lease(b"si1", renew_secret, cancel_secret)
[lease] = s0.get_leases()
self.assertEqual(lease.expiration_time,
self.assertEqual(lease.get_expiration_time(),
235 + 835 + DEFAULT_RENEWAL_TIME)
def test_remove(self):
@ -3256,6 +3213,46 @@ class ShareFileTests(unittest.TestCase):
self.assertEqual(before_data, after_data)
def test_renew_secret(self):
"""
A lease loaded from an immutable share file can have its renew secret
verified.
"""
renew_secret = b"r" * 32
cancel_secret = b"c" * 32
expiration_time = 2 ** 31
sf = self.get_sharefile()
lease = LeaseInfo(
owner_num=0,
renew_secret=renew_secret,
cancel_secret=cancel_secret,
expiration_time=expiration_time,
)
sf.add_lease(lease)
(loaded_lease,) = sf.get_leases()
self.assertTrue(loaded_lease.is_renew_secret(renew_secret))
def test_cancel_secret(self):
"""
A lease loaded from an immutable share file can have its cancel secret
verified.
"""
renew_secret = b"r" * 32
cancel_secret = b"c" * 32
expiration_time = 2 ** 31
sf = self.get_sharefile()
lease = LeaseInfo(
owner_num=0,
renew_secret=renew_secret,
cancel_secret=cancel_secret,
expiration_time=expiration_time,
)
sf.add_lease(lease)
(loaded_lease,) = sf.get_leases()
self.assertTrue(loaded_lease.is_cancel_secret(cancel_secret))
class MutableShareFileTests(unittest.TestCase):
"""
@ -3365,3 +3362,91 @@ class MutableShareFileTests(unittest.TestCase):
# A read with a broken read vector is an error.
with self.assertRaises(AssertionError):
sf.readv(broken_readv)
class LeaseInfoTests(SyncTestCase):
"""
Tests for ``allmydata.storage.lease.LeaseInfo``.
"""
def test_is_renew_secret(self):
"""
``LeaseInfo.is_renew_secret`` returns ``True`` if the value given is the
renew secret.
"""
renew_secret = b"r" * 32
cancel_secret = b"c" * 32
lease = LeaseInfo(
owner_num=1,
renew_secret=renew_secret,
cancel_secret=cancel_secret,
)
self.assertTrue(lease.is_renew_secret(renew_secret))
def test_is_not_renew_secret(self):
"""
``LeaseInfo.is_renew_secret`` returns ``False`` if the value given is not
the renew secret.
"""
renew_secret = b"r" * 32
cancel_secret = b"c" * 32
lease = LeaseInfo(
owner_num=1,
renew_secret=renew_secret,
cancel_secret=cancel_secret,
)
self.assertFalse(lease.is_renew_secret(cancel_secret))
def test_is_cancel_secret(self):
"""
``LeaseInfo.is_cancel_secret`` returns ``True`` if the value given is the
cancel secret.
"""
renew_secret = b"r" * 32
cancel_secret = b"c" * 32
lease = LeaseInfo(
owner_num=1,
renew_secret=renew_secret,
cancel_secret=cancel_secret,
)
self.assertTrue(lease.is_cancel_secret(cancel_secret))
def test_is_not_cancel_secret(self):
"""
``LeaseInfo.is_cancel_secret`` returns ``False`` if the value given is not
the cancel secret.
"""
renew_secret = b"r" * 32
cancel_secret = b"c" * 32
lease = LeaseInfo(
owner_num=1,
renew_secret=renew_secret,
cancel_secret=cancel_secret,
)
self.assertFalse(lease.is_cancel_secret(renew_secret))
@given(
strategies.tuples(
strategies.integers(min_value=0, max_value=2 ** 31 - 1),
strategies.binary(min_size=32, max_size=32),
strategies.binary(min_size=32, max_size=32),
strategies.integers(min_value=0, max_value=2 ** 31 - 1),
strategies.binary(min_size=20, max_size=20),
),
)
def test_immutable_size(self, initializer_args):
"""
``LeaseInfo.immutable_size`` returns the length of the result of
``LeaseInfo.to_immutable_data``.
``LeaseInfo.mutable_size`` returns the length of the result of
``LeaseInfo.to_mutable_data``.
"""
info = LeaseInfo(*initializer_args)
self.expectThat(
info.to_immutable_data(),
HasLength(info.immutable_size()),
)
self.expectThat(
info.to_mutable_data(),
HasLength(info.mutable_size()),
)

View File

@ -485,17 +485,7 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin):
return d
def backdate_lease(self, sf, renew_secret, new_expire_time):
# ShareFile.renew_lease ignores attempts to back-date a lease (i.e.
# "renew" a lease with a new_expire_time that is older than what the
# current lease has), so we have to reach inside it.
for i,lease in enumerate(sf.get_leases()):
if lease.renew_secret == renew_secret:
lease.expiration_time = new_expire_time
f = open(sf.home, 'rb+')
sf._write_lease_record(f, i, lease)
f.close()
return
raise IndexError("unable to renew non-existent lease")
sf.renew_lease(renew_secret, new_expire_time, allow_backdate=True)
def test_expire_age(self):
basedir = "storage/LeaseCrawler/expire_age"

View File

@ -23,6 +23,7 @@ from twisted.internet import defer
from allmydata import uri
from allmydata.storage.mutable import MutableShareFile
from allmydata.storage.immutable import ShareFile
from allmydata.storage.server import si_a2b
from allmydata.immutable import offloaded, upload
from allmydata.immutable.literal import LiteralFileNode
@ -1290,9 +1291,9 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
# are sharefiles here
filename = os.path.join(dirpath, filenames[0])
# peek at the magic to see if it is a chk share
magic = open(filename, "rb").read(4)
if magic == b'\x00\x00\x00\x01':
break
with open(filename, "rb") as f:
if ShareFile.is_valid_header(f.read(32)):
break
else:
self.fail("unable to find any uri_extension files in %r"
% self.basedir)

View File

@ -217,13 +217,8 @@ commands =
# your web browser.
[testenv:docs]
# we pin docutils because of https://sourceforge.net/p/docutils/bugs/301/
# which asserts when it reads links to .svg files (e.g. about.rst)
deps =
sphinx
docutils==0.12
recommonmark
sphinx_rtd_theme
-r docs/requirements.txt
# normal install is not needed for docs, and slows things down
skip_install = True
commands =