mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-02-21 10:01:54 +00:00
Merge remote-tracking branch 'origin/master' into 3525.test_status-no-mock
This commit is contained in:
commit
e9b6d64c51
@ -53,6 +53,8 @@ workflows:
|
||||
# Other assorted tasks and configurations
|
||||
- "lint":
|
||||
{}
|
||||
- "codechecks3":
|
||||
{}
|
||||
- "pyinstaller":
|
||||
{}
|
||||
- "deprecations":
|
||||
@ -158,6 +160,24 @@ jobs:
|
||||
command: |
|
||||
~/.local/bin/tox -e codechecks
|
||||
|
||||
codechecks3:
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "circleci/python:3"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
|
||||
- run:
|
||||
name: "Install tox"
|
||||
command: |
|
||||
pip install --user tox
|
||||
|
||||
- run:
|
||||
name: "Static-ish code checks"
|
||||
command: |
|
||||
~/.local/bin/tox -e codechecks3
|
||||
|
||||
pyinstaller:
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
|
@ -13,6 +13,57 @@ Specifically, it should be possible to implement a Tahoe-LAFS storage server wit
|
||||
The Tahoe-LAFS client will also need to change but it is not expected that it will be noticably simplified by this change
|
||||
(though this may be the first step towards simplifying it).
|
||||
|
||||
Glossary
|
||||
--------
|
||||
|
||||
.. glossary::
|
||||
|
||||
`Foolscap <https://github.com/warner/foolscap/>`_
|
||||
an RPC/RMI (Remote Procedure Call / Remote Method Invocation) protocol for use with Twisted
|
||||
|
||||
storage server
|
||||
a Tahoe-LAFS process configured to offer storage and reachable over the network for store and retrieve operations
|
||||
|
||||
storage service
|
||||
a Python object held in memory in the storage server which provides the implementation of the storage protocol
|
||||
|
||||
introducer
|
||||
a Tahoe-LAFS process at a known location configured to re-publish announcements about the location of storage servers
|
||||
|
||||
fURL
|
||||
a self-authenticating URL-like string which can be used to locate a remote object using the Foolscap protocol
|
||||
(the storage service is an example of such an object)
|
||||
|
||||
NURL
|
||||
a self-authenticating URL-like string almost exactly like a fURL but without being tied to Foolscap
|
||||
|
||||
swissnum
|
||||
a short random string which is part of a fURL and which acts as a shared secret to authorize clients to use a storage service
|
||||
|
||||
lease
|
||||
state associated with a share informing a storage server of the duration of storage desired by a client
|
||||
|
||||
share
|
||||
a single unit of client-provided arbitrary data to be stored by a storage server
|
||||
(in practice, one of the outputs of applying ZFEC encoding to some ciphertext with some additional metadata attached)
|
||||
|
||||
bucket
|
||||
a group of one or more immutable shares held by a storage server and having a common storage index
|
||||
|
||||
slot
|
||||
a group of one or more mutable shares held by a storage server and having a common storage index
|
||||
(sometimes "slot" is considered a synonym for "storage index of a slot")
|
||||
|
||||
storage index
|
||||
a 16 byte string which can address a slot or a bucket
|
||||
(in practice, derived by hashing the encryption key associated with contents of that slot or bucket)
|
||||
|
||||
write enabler
|
||||
a short secret string which storage servers require to be presented before allowing mutation of any mutable share
|
||||
|
||||
lease renew secret
|
||||
a short secret string which storage servers required to be presented before allowing a particular lease to be renewed
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
@ -87,6 +138,8 @@ The Foolscap-based protocol offers:
|
||||
* A careful configuration of the TLS connection parameters *may* also offer **forward secrecy**.
|
||||
However, Tahoe-LAFS' use of Foolscap takes no steps to ensure this is the case.
|
||||
|
||||
* **Storage authorization** by way of a capability contained in the fURL addressing a storage service.
|
||||
|
||||
Discussion
|
||||
!!!!!!!!!!
|
||||
|
||||
@ -117,6 +170,10 @@ there is no way to write data which appears legitimate to a legitimate client).
|
||||
Therefore, **message confidentiality** is necessary when exchanging these secrets.
|
||||
**Forward secrecy** is preferred so that an attacker recording an exchange today cannot launch this attack at some future point after compromising the necessary keys.
|
||||
|
||||
A storage service offers service only to some clients.
|
||||
A client proves their authorization to use the storage service by presenting a shared secret taken from the fURL.
|
||||
In this way **storage authorization** is performed to prevent disallowed parties from consuming any storage resources.
|
||||
|
||||
Functionality
|
||||
-------------
|
||||
|
||||
@ -173,6 +230,10 @@ Additionally,
|
||||
by continuing to interact using TLS,
|
||||
Bob's client and Alice's storage node are assured of both **message authentication** and **message confidentiality**.
|
||||
|
||||
Bob's client further inspects the fURL for the *swissnum*.
|
||||
When Bob's client issues HTTP requests to Alice's storage node it includes the *swissnum* in its requests.
|
||||
**Storage authorization** has been achieved.
|
||||
|
||||
.. note::
|
||||
|
||||
Foolscap TubIDs are 20 bytes (SHA1 digest of the certificate).
|
||||
@ -302,6 +363,12 @@ one branch contains all of the share data;
|
||||
another branch contains all of the lease data;
|
||||
etc.
|
||||
|
||||
Authorization is required for all endpoints.
|
||||
The standard HTTP authorization protocol is used.
|
||||
The authentication *type* used is ``Tahoe-LAFS``.
|
||||
The swissnum from the NURL used to locate the storage service is used as the *credentials*.
|
||||
If credentials are not presented or the swissnum is not associated with a storage service then no storage processing is performed and the request receives an ``UNAUTHORIZED`` response.
|
||||
|
||||
General
|
||||
~~~~~~~
|
||||
|
||||
@ -328,19 +395,30 @@ For example::
|
||||
``PUT /v1/lease/:storage_index``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Create a new lease that applies to all shares for the given storage index.
|
||||
Either renew or create a new lease on the bucket addressed by ``storage_index``.
|
||||
The details of the lease are encoded in the request body.
|
||||
For example::
|
||||
|
||||
{"renew-secret": "abcd", "cancel-secret": "efgh"}
|
||||
|
||||
If there are no shares for the given ``storage_index``
|
||||
then do nothing and return ``NO CONTENT``.
|
||||
|
||||
If the ``renew-secret`` value matches an existing lease
|
||||
then that lease will be renewed instead.
|
||||
then the expiration time of that lease will be changed to 31 days after the time of this operation.
|
||||
If it does not match an existing lease
|
||||
then a new lease will be created with this ``renew-secret`` which expires 31 days after the time of this operation.
|
||||
|
||||
``renew-secret`` and ``cancel-secret`` values must be 32 bytes long.
|
||||
The server treats them as opaque values.
|
||||
:ref:`Share Leases` gives details about how the Tahoe-LAFS storage client constructs these values.
|
||||
|
||||
In these cases the response is ``NO CONTENT`` with an empty body.
|
||||
|
||||
It is possible that the storage server will have no shares for the given ``storage_index`` because:
|
||||
|
||||
* no such shares have ever been uploaded.
|
||||
* a previous lease expired and the storage server reclaimed the storage by deleting the shares.
|
||||
|
||||
In these cases the server takes no action and returns ``NOT FOUND``.
|
||||
|
||||
The lease expires after 31 days.
|
||||
|
||||
Discussion
|
||||
``````````
|
||||
@ -350,40 +428,13 @@ We chose to put these values into the request body to make the URL simpler.
|
||||
|
||||
Several behaviors here are blindly copied from the Foolscap-based storage server protocol.
|
||||
|
||||
* There is a cancel secret but there is no API to use it to cancel a lease.
|
||||
* There is a cancel secret but there is no API to use it to cancel a lease (see ticket:3768).
|
||||
* The lease period is hard-coded at 31 days.
|
||||
* There is no way to differentiate between success and an unknown **storage index**.
|
||||
* There are separate **add** and **renew** lease APIs.
|
||||
|
||||
These are not necessarily ideal behaviors
|
||||
but they are adopted to avoid any *semantic* changes between the Foolscap- and HTTP-based protocols.
|
||||
It is expected that some or all of these behaviors may change in a future revision of the HTTP-based protocol.
|
||||
|
||||
``POST /v1/lease/:storage_index``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Renew an existing lease for all shares for the given storage index.
|
||||
The details of the lease are encoded in the request body.
|
||||
For example::
|
||||
|
||||
{"renew-secret": "abcd"}
|
||||
|
||||
If there are no shares for the given ``storage_index``
|
||||
then ``NOT FOUND`` is returned.
|
||||
|
||||
If there is no lease with a matching ``renew-secret`` value on the given storage index
|
||||
then ``NOT FOUND`` is returned.
|
||||
In this case,
|
||||
if the storage index refers to mutable data
|
||||
then the response also includes a list of nodeids where the lease can be renewed.
|
||||
For example::
|
||||
|
||||
{"nodeids": ["aaa...", "bbb..."]}
|
||||
|
||||
Othewise,
|
||||
the matching lease's expiration time is changed to be 31 days from the time of this operation
|
||||
and ``NO CONTENT`` is returned.
|
||||
|
||||
Immutable
|
||||
---------
|
||||
|
||||
@ -422,23 +473,47 @@ However, we decided this does not matter because:
|
||||
therefore no proxy servers can perform any extra logging.
|
||||
* Tahoe-LAFS itself does not currently log HTTP request URLs.
|
||||
|
||||
The response includes ``already-have`` and ``allocated`` for two reasons:
|
||||
|
||||
* If an upload is interrupted and the client loses its local state that lets it know it already uploaded some shares
|
||||
then this allows it to discover this fact (by inspecting ``already-have``) and only upload the missing shares (indicated by ``allocated``).
|
||||
|
||||
* If an upload has completed a client may still choose to re-balance storage by moving shares between servers.
|
||||
This might be because a server has become unavailable and a remaining server needs to store more shares for the upload.
|
||||
It could also just be that the client's preferred servers have changed.
|
||||
|
||||
``PUT /v1/immutable/:storage_index/:share_number``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Write data for the indicated share.
|
||||
The share number must belong to the storage index.
|
||||
The request body is the raw share data (i.e., ``application/octet-stream``).
|
||||
*Content-Range* requests are encouraged for large transfers.
|
||||
*Content-Range* requests are encouraged for large transfers to allow partially complete uploads to be resumed.
|
||||
For example,
|
||||
for a 1MiB share the data can be broken in to 8 128KiB chunks.
|
||||
Each chunk can be *PUT* separately with the appropriate *Content-Range* header.
|
||||
a 1MiB share can be divided in to eight separate 128KiB chunks.
|
||||
Each chunk can be uploaded in a separate request.
|
||||
Each request can include a *Content-Range* value indicating its placement within the complete share.
|
||||
If any one of these requests fails then at most 128KiB of upload work needs to be retried.
|
||||
|
||||
The server must recognize when all of the data has been received and mark the share as complete
|
||||
(which it can do because it was informed of the size when the storage index was initialized).
|
||||
Clients should upload chunks in re-assembly order.
|
||||
Servers may reject out-of-order chunks for implementation simplicity.
|
||||
If an individual *PUT* fails then only a limited amount of effort is wasted on the necessary retry.
|
||||
|
||||
.. think about copying https://developers.google.com/drive/api/v2/resumable-upload
|
||||
* When a chunk that does not complete the share is successfully uploaded the response is ``OK``.
|
||||
* When the chunk that completes the share is successfully uploaded the response is ``CREATED``.
|
||||
* If the *Content-Range* for a request covers part of the share that has already been uploaded the response is ``CONFLICT``.
|
||||
The response body indicates the range of share data that has yet to be uploaded.
|
||||
That is::
|
||||
|
||||
{ "required":
|
||||
[ { "begin": <byte position, inclusive>
|
||||
, "end": <byte position, exclusive>
|
||||
}
|
||||
,
|
||||
...
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
``POST /v1/immutable/:storage_index/:share_number/corrupt``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
@ -576,6 +651,136 @@ Just like ``GET /v1/mutable/:storage_index``.
|
||||
Advise the server the data read from the indicated share was corrupt.
|
||||
Just like the immutable version.
|
||||
|
||||
Sample Interactions
|
||||
-------------------
|
||||
|
||||
Immutable Data
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
1. Create a bucket for storage index ``AAAAAAAAAAAAAAAA`` to hold two immutable shares, discovering that share ``1`` was already uploaded::
|
||||
|
||||
POST /v1/immutable/AAAAAAAAAAAAAAAA
|
||||
{"renew-secret": "efgh", "cancel-secret": "ijkl",
|
||||
"share-numbers": [1, 7], "allocated-size": 48}
|
||||
|
||||
200 OK
|
||||
{"already-have": [1], "allocated": [7]}
|
||||
|
||||
#. Upload the content for immutable share ``7``::
|
||||
|
||||
PUT /v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||
Content-Range: bytes 0-15/48
|
||||
<first 16 bytes of share data>
|
||||
|
||||
200 OK
|
||||
|
||||
PUT /v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||
Content-Range: bytes 16-31/48
|
||||
<second 16 bytes of share data>
|
||||
|
||||
200 OK
|
||||
|
||||
PUT /v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||
Content-Range: bytes 32-47/48
|
||||
<final 16 bytes of share data>
|
||||
|
||||
201 CREATED
|
||||
|
||||
#. Download the content of the previously uploaded immutable share ``7``::
|
||||
|
||||
GET /v1/immutable/AAAAAAAAAAAAAAAA?share=7&offset=0&size=48
|
||||
|
||||
200 OK
|
||||
<complete 48 bytes of previously uploaded data>
|
||||
|
||||
#. Renew the lease on all immutable shares in bucket ``AAAAAAAAAAAAAAAA``::
|
||||
|
||||
PUT /v1/lease/AAAAAAAAAAAAAAAA
|
||||
{"renew-secret": "efgh", "cancel-secret": "ijkl"}
|
||||
|
||||
204 NO CONTENT
|
||||
|
||||
Mutable Data
|
||||
~~~~~~~~~~~~
|
||||
|
||||
1. Create mutable share number ``3`` with ``10`` bytes of data in slot ``BBBBBBBBBBBBBBBB``::
|
||||
|
||||
POST /v1/mutable/BBBBBBBBBBBBBBBB/read-test-write
|
||||
{
|
||||
"secrets": {
|
||||
"write-enabler": "abcd",
|
||||
"lease-renew": "efgh",
|
||||
"lease-cancel": "ijkl"
|
||||
},
|
||||
"test-write-vectors": {
|
||||
3: {
|
||||
"test": [{
|
||||
"offset": 0,
|
||||
"size": 1,
|
||||
"operator": "eq",
|
||||
"specimen": ""
|
||||
}],
|
||||
"write": [{
|
||||
"offset": 0,
|
||||
"data": "xxxxxxxxxx"
|
||||
}],
|
||||
"new-length": 10
|
||||
}
|
||||
},
|
||||
"read-vector": []
|
||||
}
|
||||
|
||||
200 OK
|
||||
{
|
||||
"success": true,
|
||||
"data": []
|
||||
}
|
||||
|
||||
#. Safely rewrite the contents of a known version of mutable share number ``3`` (or fail)::
|
||||
|
||||
POST /v1/mutable/BBBBBBBBBBBBBBBB/read-test-write
|
||||
{
|
||||
"secrets": {
|
||||
"write-enabler": "abcd",
|
||||
"lease-renew": "efgh",
|
||||
"lease-cancel": "ijkl"
|
||||
},
|
||||
"test-write-vectors": {
|
||||
3: {
|
||||
"test": [{
|
||||
"offset": 0,
|
||||
"size": <checkstring size>,
|
||||
"operator": "eq",
|
||||
"specimen": "<checkstring>"
|
||||
}],
|
||||
"write": [{
|
||||
"offset": 0,
|
||||
"data": "yyyyyyyyyy"
|
||||
}],
|
||||
"new-length": 10
|
||||
}
|
||||
},
|
||||
"read-vector": []
|
||||
}
|
||||
|
||||
200 OK
|
||||
{
|
||||
"success": true,
|
||||
"data": []
|
||||
}
|
||||
|
||||
#. Download the contents of share number ``3``::
|
||||
|
||||
GET /v1/mutable/BBBBBBBBBBBBBBBB?share=3&offset=0&size=10
|
||||
<complete 16 bytes of previously uploaded data>
|
||||
|
||||
#. Renew the lease on previously uploaded mutable share in slot ``BBBBBBBBBBBBBBBB``::
|
||||
|
||||
PUT /v1/lease/BBBBBBBBBBBBBBBB
|
||||
{"renew-secret": "efgh", "cancel-secret": "ijkl"}
|
||||
|
||||
204 NO CONTENT
|
||||
|
||||
.. _RFC 7469: https://tools.ietf.org/html/rfc7469#section-2.4
|
||||
|
||||
.. _RFC 7049: https://tools.ietf.org/html/rfc7049#section-4
|
||||
|
87
docs/specifications/derive_renewal_secret.py
Normal file
87
docs/specifications/derive_renewal_secret.py
Normal file
@ -0,0 +1,87 @@
|
||||
|
||||
"""
|
||||
This is a reference implementation of the lease renewal secret derivation
|
||||
protocol in use by Tahoe-LAFS clients as of 1.16.0.
|
||||
"""
|
||||
|
||||
from allmydata.util.base32 import (
|
||||
a2b as b32decode,
|
||||
b2a as b32encode,
|
||||
)
|
||||
from allmydata.util.hashutil import (
|
||||
tagged_hash,
|
||||
tagged_pair_hash,
|
||||
)
|
||||
|
||||
|
||||
def derive_renewal_secret(lease_secret: bytes, storage_index: bytes, tubid: bytes) -> bytes:
|
||||
assert len(lease_secret) == 32
|
||||
assert len(storage_index) == 16
|
||||
assert len(tubid) == 20
|
||||
|
||||
bucket_renewal_tag = b"allmydata_bucket_renewal_secret_v1"
|
||||
file_renewal_tag = b"allmydata_file_renewal_secret_v1"
|
||||
client_renewal_tag = b"allmydata_client_renewal_secret_v1"
|
||||
|
||||
client_renewal_secret = tagged_hash(lease_secret, client_renewal_tag)
|
||||
file_renewal_secret = tagged_pair_hash(
|
||||
file_renewal_tag,
|
||||
client_renewal_secret,
|
||||
storage_index,
|
||||
)
|
||||
peer_id = tubid
|
||||
|
||||
return tagged_pair_hash(bucket_renewal_tag, file_renewal_secret, peer_id)
|
||||
|
||||
def demo():
|
||||
secret = b32encode(derive_renewal_secret(
|
||||
b"lease secretxxxxxxxxxxxxxxxxxxxx",
|
||||
b"storage indexxxx",
|
||||
b"tub idxxxxxxxxxxxxxx",
|
||||
)).decode("ascii")
|
||||
print("An example renewal secret: {}".format(secret))
|
||||
|
||||
def test():
|
||||
# These test vectors created by intrumenting Tahoe-LAFS
|
||||
# bb57fcfb50d4e01bbc4de2e23dbbf7a60c004031 to emit `self.renew_secret` in
|
||||
# allmydata.immutable.upload.ServerTracker.query and then uploading a
|
||||
# couple files to a couple different storage servers.
|
||||
test_vector = [
|
||||
dict(lease_secret=b"boity2cdh7jvl3ltaeebuiobbspjmbuopnwbde2yeh4k6x7jioga",
|
||||
storage_index=b"vrttmwlicrzbt7gh5qsooogr7u",
|
||||
tubid=b"v67jiisoty6ooyxlql5fuucitqiok2ic",
|
||||
expected=b"osd6wmc5vz4g3ukg64sitmzlfiaaordutrez7oxdp5kkze7zp5zq",
|
||||
),
|
||||
dict(lease_secret=b"boity2cdh7jvl3ltaeebuiobbspjmbuopnwbde2yeh4k6x7jioga",
|
||||
storage_index=b"75gmmfts772ww4beiewc234o5e",
|
||||
tubid=b"v67jiisoty6ooyxlql5fuucitqiok2ic",
|
||||
expected=b"35itmusj7qm2pfimh62snbyxp3imreofhx4djr7i2fweta75szda",
|
||||
),
|
||||
dict(lease_secret=b"boity2cdh7jvl3ltaeebuiobbspjmbuopnwbde2yeh4k6x7jioga",
|
||||
storage_index=b"75gmmfts772ww4beiewc234o5e",
|
||||
tubid=b"lh5fhobkjrmkqjmkxhy3yaonoociggpz",
|
||||
expected=b"srrlruge47ws3lm53vgdxprgqb6bz7cdblnuovdgtfkqrygrjm4q",
|
||||
),
|
||||
dict(lease_secret=b"vacviff4xfqxsbp64tdr3frg3xnkcsuwt5jpyat2qxcm44bwu75a",
|
||||
storage_index=b"75gmmfts772ww4beiewc234o5e",
|
||||
tubid=b"lh5fhobkjrmkqjmkxhy3yaonoociggpz",
|
||||
expected=b"b4jledjiqjqekbm2erekzqumqzblegxi23i5ojva7g7xmqqnl5pq",
|
||||
),
|
||||
]
|
||||
|
||||
for n, item in enumerate(test_vector):
|
||||
derived = b32encode(derive_renewal_secret(
|
||||
b32decode(item["lease_secret"]),
|
||||
b32decode(item["storage_index"]),
|
||||
b32decode(item["tubid"]),
|
||||
))
|
||||
assert derived == item["expected"] , \
|
||||
"Test vector {} failed: {} (expected) != {} (derived)".format(
|
||||
n,
|
||||
item["expected"],
|
||||
derived,
|
||||
)
|
||||
print("{} test vectors validated".format(len(test_vector)))
|
||||
|
||||
test()
|
||||
demo()
|
@ -14,5 +14,6 @@ the data formats used by Tahoe.
|
||||
URI-extension
|
||||
mutable
|
||||
dirnodes
|
||||
lease
|
||||
servers-of-happiness
|
||||
backends/raic
|
||||
|
69
docs/specifications/lease.rst
Normal file
69
docs/specifications/lease.rst
Normal file
@ -0,0 +1,69 @@
|
||||
.. -*- coding: utf-8 -*-
|
||||
|
||||
.. _share leases:
|
||||
|
||||
Share Leases
|
||||
============
|
||||
|
||||
A lease is a marker attached to a share indicating that some client has asked for that share to be retained for some amount of time.
|
||||
The intent is to allow clients and servers to collaborate to determine which data should still be retained and which can be discarded to reclaim storage space.
|
||||
Zero or more leases may be attached to any particular share.
|
||||
|
||||
Renewal Secrets
|
||||
---------------
|
||||
|
||||
Each lease is uniquely identified by its **renewal secret**.
|
||||
This is a 32 byte string which can be used to extend the validity period of that lease.
|
||||
|
||||
To a storage server a renewal secret is an opaque value which is only ever compared to other renewal secrets to determine equality.
|
||||
|
||||
Storage clients will typically want to follow a scheme to deterministically derive the renewal secret for a particular share from information the client already holds about that share.
|
||||
This allows a client to maintain and renew single long-lived lease without maintaining additional local state.
|
||||
|
||||
The scheme in use in Tahoe-LAFS as of 1.16.0 is as follows.
|
||||
|
||||
* The **netstring encoding** of a byte string is the concatenation of:
|
||||
|
||||
* the ascii encoding of the base 10 representation of the length of the string
|
||||
* ``":"``
|
||||
* the string itself
|
||||
* ``","``
|
||||
|
||||
* The **sha256d digest** is the **sha256 digest** of the **sha256 digest** of a string.
|
||||
* The **sha256d tagged digest** is the **sha256d digest** of the concatenation of the **netstring encoding** of one string with one other unmodified string.
|
||||
* The **sha256d tagged pair digest** the **sha256d digest** of the concatenation of the **netstring encodings** of each of three strings.
|
||||
* The **bucket renewal tag** is ``"allmydata_bucket_renewal_secret_v1"``.
|
||||
* The **file renewal tag** is ``"allmydata_file_renewal_secret_v1"``.
|
||||
* The **client renewal tag** is ``"allmydata_client_renewal_secret_v1"``.
|
||||
* The **lease secret** is a 32 byte string, typically randomly generated once and then persisted for all future uses.
|
||||
* The **client renewal secret** is the **sha256d tagged digest** of (**lease secret**, **client renewal tag**).
|
||||
* The **storage index** is constructed using a capability-type-specific scheme.
|
||||
See ``storage_index_hash`` and ``ssk_storage_index_hash`` calls in ``src/allmydata/uri.py``.
|
||||
* The **file renewal secret** is the **sha256d tagged pair digest** of (**file renewal tag**, **client renewal secret**, **storage index**).
|
||||
* The **base32 encoding** is ``base64.b32encode`` lowercased and with trailing ``=`` stripped.
|
||||
* The **peer id** is the **base32 encoding** of the SHA1 digest of the server's x509 certificate.
|
||||
* The **renewal secret** is the **sha256d tagged pair digest** of (**bucket renewal tag**, **file renewal secret**, **peer id**).
|
||||
|
||||
A reference implementation is available.
|
||||
|
||||
.. literalinclude:: derive_renewal_secret.py
|
||||
:language: python
|
||||
:linenos:
|
||||
|
||||
Cancel Secrets
|
||||
--------------
|
||||
|
||||
Lease cancellation is unimplemented.
|
||||
Nevertheless,
|
||||
a cancel secret is sent by storage clients to storage servers and stored in lease records.
|
||||
|
||||
The scheme for deriving **cancel secret** in use in Tahoe-LAFS as of 1.16.0 is similar to that used to derive the **renewal secret**.
|
||||
|
||||
The differences are:
|
||||
|
||||
* Use of **client renewal tag** is replaced by use of **client cancel tag**.
|
||||
* Use of **file renewal secret** is replaced by use of **file cancel tag**.
|
||||
* Use of **bucket renewal tag** is replaced by use of **bucket cancel tag**.
|
||||
* **client cancel tag** is ``"allmydata_client_cancel_secret_v1"``.
|
||||
* **file cancel tag** is ``"allmydata_file_cancel_secret_v1"``.
|
||||
* **bucket cancel tag** is ``"allmydata_bucket_cancel_secret_v1"``.
|
0
newsfragments/3563.minor
Normal file
0
newsfragments/3563.minor
Normal file
1
newsfragments/3757.other
Normal file
1
newsfragments/3757.other
Normal file
@ -0,0 +1 @@
|
||||
Refactored test_introducer in web tests to use custom base test cases
|
0
newsfragments/3760.minor
Normal file
0
newsfragments/3760.minor
Normal file
0
newsfragments/3763.minor
Normal file
0
newsfragments/3763.minor
Normal file
1
newsfragments/3764.documentation
Normal file
1
newsfragments/3764.documentation
Normal file
@ -0,0 +1 @@
|
||||
The Great Black Swamp proposed specification now includes sample interactions to demonstrate expected usage patterns.
|
1
newsfragments/3765.documentation
Normal file
1
newsfragments/3765.documentation
Normal file
@ -0,0 +1 @@
|
||||
The Great Black Swamp proposed specification now includes a glossary.
|
1
newsfragments/3769.documentation
Normal file
1
newsfragments/3769.documentation
Normal file
@ -0,0 +1 @@
|
||||
The Great Black Swamp specification now allows parallel upload of immutable share data.
|
0
newsfragments/3773.minor
Normal file
0
newsfragments/3773.minor
Normal file
1
newsfragments/3774.documentation
Normal file
1
newsfragments/3774.documentation
Normal file
@ -0,0 +1 @@
|
||||
There is now a specification for the scheme which Tahoe-LAFS storage clients use to derive their lease renewal secrets.
|
1
newsfragments/3785.documentation
Normal file
1
newsfragments/3785.documentation
Normal file
@ -0,0 +1 @@
|
||||
The Great Black Swamp specification now describes the required authorization scheme.
|
@ -154,25 +154,9 @@ class RIStorageServer(RemoteInterface):
|
||||
"""
|
||||
return Any() # returns None now, but future versions might change
|
||||
|
||||
def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
|
||||
"""
|
||||
Renew the lease on a given bucket, resetting the timer to 31 days.
|
||||
Some networks will use this, some will not. If there is no bucket for
|
||||
the given storage_index, IndexError will be raised.
|
||||
|
||||
For mutable shares, if the given renew_secret does not match an
|
||||
existing lease, IndexError will be raised with a note listing the
|
||||
server-nodeids on the existing leases, so leases on migrated shares
|
||||
can be renewed. For immutable shares, IndexError (without the note)
|
||||
will be raised.
|
||||
"""
|
||||
return Any()
|
||||
|
||||
def get_buckets(storage_index=StorageIndex):
|
||||
return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
|
||||
|
||||
|
||||
|
||||
def slot_readv(storage_index=StorageIndex,
|
||||
shares=ListOf(int), readv=ReadVector):
|
||||
"""Read a vector from the numbered shares associated with the given
|
||||
@ -343,14 +327,6 @@ class IStorageServer(Interface):
|
||||
:see: ``RIStorageServer.add_lease``
|
||||
"""
|
||||
|
||||
def renew_lease(
|
||||
storage_index,
|
||||
renew_secret,
|
||||
):
|
||||
"""
|
||||
:see: ``RIStorageServer.renew_lease``
|
||||
"""
|
||||
|
||||
def get_buckets(
|
||||
storage_index,
|
||||
):
|
||||
|
@ -793,7 +793,7 @@ def _tub_portlocation(config, get_local_addresses_sync, allocate_tcp_port):
|
||||
tubport = _convert_tub_port(cfg_tubport)
|
||||
|
||||
for port in tubport.split(","):
|
||||
if port in ("0", "tcp:0"):
|
||||
if port in ("0", "tcp:0", "tcp:port=0", "tcp:0:interface=127.0.0.1"):
|
||||
raise PortAssignmentRequired()
|
||||
|
||||
if cfg_location is None:
|
||||
|
@ -49,6 +49,10 @@ from allmydata.storage.expirer import LeaseCheckingCrawler
|
||||
NUM_RE=re.compile("^[0-9]+$")
|
||||
|
||||
|
||||
# Number of seconds to add to expiration time on lease renewal.
|
||||
# For now it's not actually configurable, but maybe someday.
|
||||
DEFAULT_RENEWAL_TIME = 31 * 24 * 60 * 60
|
||||
|
||||
|
||||
@implementer(RIStorageServer, IStatsProducer)
|
||||
class StorageServer(service.MultiService, Referenceable):
|
||||
@ -62,7 +66,8 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
expiration_mode="age",
|
||||
expiration_override_lease_duration=None,
|
||||
expiration_cutoff_date=None,
|
||||
expiration_sharetypes=("mutable", "immutable")):
|
||||
expiration_sharetypes=("mutable", "immutable"),
|
||||
get_current_time=time.time):
|
||||
service.MultiService.__init__(self)
|
||||
assert isinstance(nodeid, bytes)
|
||||
assert len(nodeid) == 20
|
||||
@ -114,6 +119,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
expiration_cutoff_date,
|
||||
expiration_sharetypes)
|
||||
self.lease_checker.setServiceParent(self)
|
||||
self._get_current_time = get_current_time
|
||||
|
||||
def __repr__(self):
|
||||
return "<StorageServer %s>" % (idlib.shortnodeid_b2a(self.my_nodeid),)
|
||||
@ -264,7 +270,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
# owner_num is not for clients to set, but rather it should be
|
||||
# curried into the PersonalStorageServer instance that is dedicated
|
||||
# to a particular owner.
|
||||
start = time.time()
|
||||
start = self._get_current_time()
|
||||
self.count("allocate")
|
||||
alreadygot = set()
|
||||
bucketwriters = {} # k: shnum, v: BucketWriter
|
||||
@ -277,7 +283,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
# goes into the share files themselves. It could also be put into a
|
||||
# separate database. Note that the lease should not be added until
|
||||
# the BucketWriter has been closed.
|
||||
expire_time = time.time() + 31*24*60*60
|
||||
expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME
|
||||
lease_info = LeaseInfo(owner_num,
|
||||
renew_secret, cancel_secret,
|
||||
expire_time, self.my_nodeid)
|
||||
@ -331,7 +337,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
if bucketwriters:
|
||||
fileutil.make_dirs(os.path.join(self.sharedir, si_dir))
|
||||
|
||||
self.add_latency("allocate", time.time() - start)
|
||||
self.add_latency("allocate", self._get_current_time() - start)
|
||||
return alreadygot, bucketwriters
|
||||
|
||||
def _iter_share_files(self, storage_index):
|
||||
@ -351,26 +357,26 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
|
||||
def remote_add_lease(self, storage_index, renew_secret, cancel_secret,
|
||||
owner_num=1):
|
||||
start = time.time()
|
||||
start = self._get_current_time()
|
||||
self.count("add-lease")
|
||||
new_expire_time = time.time() + 31*24*60*60
|
||||
new_expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME
|
||||
lease_info = LeaseInfo(owner_num,
|
||||
renew_secret, cancel_secret,
|
||||
new_expire_time, self.my_nodeid)
|
||||
for sf in self._iter_share_files(storage_index):
|
||||
sf.add_or_renew_lease(lease_info)
|
||||
self.add_latency("add-lease", time.time() - start)
|
||||
self.add_latency("add-lease", self._get_current_time() - start)
|
||||
return None
|
||||
|
||||
def remote_renew_lease(self, storage_index, renew_secret):
|
||||
start = time.time()
|
||||
start = self._get_current_time()
|
||||
self.count("renew")
|
||||
new_expire_time = time.time() + 31*24*60*60
|
||||
new_expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME
|
||||
found_buckets = False
|
||||
for sf in self._iter_share_files(storage_index):
|
||||
found_buckets = True
|
||||
sf.renew_lease(renew_secret, new_expire_time)
|
||||
self.add_latency("renew", time.time() - start)
|
||||
self.add_latency("renew", self._get_current_time() - start)
|
||||
if not found_buckets:
|
||||
raise IndexError("no such lease to renew")
|
||||
|
||||
@ -394,7 +400,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
pass
|
||||
|
||||
def remote_get_buckets(self, storage_index):
|
||||
start = time.time()
|
||||
start = self._get_current_time()
|
||||
self.count("get")
|
||||
si_s = si_b2a(storage_index)
|
||||
log.msg("storage: get_buckets %r" % si_s)
|
||||
@ -402,7 +408,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
for shnum, filename in self._get_bucket_shares(storage_index):
|
||||
bucketreaders[shnum] = BucketReader(self, filename,
|
||||
storage_index, shnum)
|
||||
self.add_latency("get", time.time() - start)
|
||||
self.add_latency("get", self._get_current_time() - start)
|
||||
return bucketreaders
|
||||
|
||||
def get_leases(self, storage_index):
|
||||
@ -563,7 +569,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
:return LeaseInfo: Information for a new lease for a share.
|
||||
"""
|
||||
ownerid = 1 # TODO
|
||||
expire_time = time.time() + 31*24*60*60 # one month
|
||||
expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME
|
||||
lease_info = LeaseInfo(ownerid,
|
||||
renew_secret, cancel_secret,
|
||||
expire_time, self.my_nodeid)
|
||||
@ -599,7 +605,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
See ``allmydata.interfaces.RIStorageServer`` for details about other
|
||||
parameters and return value.
|
||||
"""
|
||||
start = time.time()
|
||||
start = self._get_current_time()
|
||||
self.count("writev")
|
||||
si_s = si_b2a(storage_index)
|
||||
log.msg("storage: slot_writev %r" % si_s)
|
||||
@ -640,7 +646,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
self._add_or_renew_leases(remaining_shares, lease_info)
|
||||
|
||||
# all done
|
||||
self.add_latency("writev", time.time() - start)
|
||||
self.add_latency("writev", self._get_current_time() - start)
|
||||
return (testv_is_good, read_data)
|
||||
|
||||
def remote_slot_testv_and_readv_and_writev(self, storage_index,
|
||||
@ -666,7 +672,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
return share
|
||||
|
||||
def remote_slot_readv(self, storage_index, shares, readv):
|
||||
start = time.time()
|
||||
start = self._get_current_time()
|
||||
self.count("readv")
|
||||
si_s = si_b2a(storage_index)
|
||||
lp = log.msg("storage: slot_readv %r %r" % (si_s, shares),
|
||||
@ -675,7 +681,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
# shares exist if there is a file for them
|
||||
bucketdir = os.path.join(self.sharedir, si_dir)
|
||||
if not os.path.isdir(bucketdir):
|
||||
self.add_latency("readv", time.time() - start)
|
||||
self.add_latency("readv", self._get_current_time() - start)
|
||||
return {}
|
||||
datavs = {}
|
||||
for sharenum_s in os.listdir(bucketdir):
|
||||
@ -689,7 +695,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
datavs[sharenum] = msf.readv(readv)
|
||||
log.msg("returning shares %s" % (list(datavs.keys()),),
|
||||
facility="tahoe.storage", level=log.NOISY, parent=lp)
|
||||
self.add_latency("readv", time.time() - start)
|
||||
self.add_latency("readv", self._get_current_time() - start)
|
||||
return datavs
|
||||
|
||||
def remote_advise_corrupt_share(self, share_type, storage_index, shnum,
|
||||
|
@ -965,17 +965,6 @@ class _StorageServer(object):
|
||||
cancel_secret,
|
||||
)
|
||||
|
||||
def renew_lease(
|
||||
self,
|
||||
storage_index,
|
||||
renew_secret,
|
||||
):
|
||||
return self._rref.callRemote(
|
||||
"renew_lease",
|
||||
storage_index,
|
||||
renew_secret,
|
||||
)
|
||||
|
||||
def get_buckets(
|
||||
self,
|
||||
storage_index,
|
||||
|
@ -6,7 +6,7 @@ from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2, native_str
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
@ -530,6 +530,14 @@ def _stub_allocate_tcp_port():
|
||||
"""
|
||||
return 999
|
||||
|
||||
def _stub_none():
|
||||
"""
|
||||
A function like ``_stub_allocate_tcp`` or ``_stub_get_local_addresses_sync``
|
||||
but that return an empty list since ``allmydata.node._tub_portlocation`` requires a
|
||||
callable for paramter 1 and 2 counting from 0.
|
||||
"""
|
||||
return []
|
||||
|
||||
|
||||
class TestMissingPorts(unittest.TestCase):
|
||||
"""
|
||||
@ -550,7 +558,7 @@ class TestMissingPorts(unittest.TestCase):
|
||||
)
|
||||
config = config_from_string(self.basedir, "portnum", config_data)
|
||||
with self.assertRaises(PortAssignmentRequired):
|
||||
_tub_portlocation(config, None, None)
|
||||
_tub_portlocation(config, _stub_none, _stub_none)
|
||||
|
||||
def test_listen_on_zero_with_host(self):
|
||||
"""
|
||||
@ -563,10 +571,7 @@ class TestMissingPorts(unittest.TestCase):
|
||||
)
|
||||
config = config_from_string(self.basedir, "portnum", config_data)
|
||||
with self.assertRaises(PortAssignmentRequired):
|
||||
_tub_portlocation(config, None, None)
|
||||
test_listen_on_zero_with_host.todo = native_str( # type: ignore
|
||||
"https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3563"
|
||||
)
|
||||
_tub_portlocation(config, _stub_none, _stub_none)
|
||||
|
||||
def test_parsing_tcp(self):
|
||||
"""
|
||||
|
@ -24,11 +24,12 @@ import gc
|
||||
from twisted.trial import unittest
|
||||
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.task import Clock
|
||||
|
||||
import itertools
|
||||
from allmydata import interfaces
|
||||
from allmydata.util import fileutil, hashutil, base32
|
||||
from allmydata.storage.server import StorageServer
|
||||
from allmydata.storage.server import StorageServer, DEFAULT_RENEWAL_TIME
|
||||
from allmydata.storage.shares import get_share_file
|
||||
from allmydata.storage.mutable import MutableShareFile
|
||||
from allmydata.storage.immutable import BucketWriter, BucketReader, ShareFile
|
||||
@ -168,7 +169,7 @@ class Bucket(unittest.TestCase):
|
||||
assert len(renewsecret) == 32
|
||||
cancelsecret = b'THIS LETS ME KILL YOUR FILE HAHA'
|
||||
assert len(cancelsecret) == 32
|
||||
expirationtime = struct.pack('>L', 60*60*24*31) # 31 days in seconds
|
||||
expirationtime = struct.pack('>L', DEFAULT_RENEWAL_TIME) # 31 days in seconds
|
||||
|
||||
lease_data = ownernumber + renewsecret + cancelsecret + expirationtime
|
||||
|
||||
@ -354,10 +355,11 @@ class Server(unittest.TestCase):
|
||||
basedir = os.path.join("storage", "Server", name)
|
||||
return basedir
|
||||
|
||||
def create(self, name, reserved_space=0, klass=StorageServer):
|
||||
def create(self, name, reserved_space=0, klass=StorageServer, get_current_time=time.time):
|
||||
workdir = self.workdir(name)
|
||||
ss = klass(workdir, b"\x00" * 20, reserved_space=reserved_space,
|
||||
stats_provider=FakeStatsProvider())
|
||||
stats_provider=FakeStatsProvider(),
|
||||
get_current_time=get_current_time)
|
||||
ss.setServiceParent(self.sparent)
|
||||
return ss
|
||||
|
||||
@ -384,8 +386,8 @@ class Server(unittest.TestCase):
|
||||
self.failUnlessIn(b'available-space', sv1)
|
||||
|
||||
def allocate(self, ss, storage_index, sharenums, size, canary=None):
|
||||
renew_secret = hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret))
|
||||
cancel_secret = hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret))
|
||||
renew_secret = hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret))
|
||||
cancel_secret = hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))
|
||||
if not canary:
|
||||
canary = FakeCanary()
|
||||
return ss.remote_allocate_buckets(storage_index,
|
||||
@ -646,6 +648,27 @@ class Server(unittest.TestCase):
|
||||
f2 = open(filename, "rb")
|
||||
self.failUnlessEqual(f2.read(5), b"start")
|
||||
|
||||
def create_bucket_5_shares(
|
||||
self, ss, storage_index, expected_already=0, expected_writers=5
|
||||
):
|
||||
"""
|
||||
Given a StorageServer, create a bucket with 5 shares and return renewal
|
||||
and cancellation secrets.
|
||||
"""
|
||||
canary = FakeCanary()
|
||||
sharenums = list(range(5))
|
||||
size = 100
|
||||
|
||||
# Creating a bucket also creates a lease:
|
||||
rs, cs = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)),
|
||||
hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret)))
|
||||
already, writers = ss.remote_allocate_buckets(storage_index, rs, cs,
|
||||
sharenums, size, canary)
|
||||
self.failUnlessEqual(len(already), expected_already)
|
||||
self.failUnlessEqual(len(writers), expected_writers)
|
||||
for wb in writers.values():
|
||||
wb.remote_close()
|
||||
return rs, cs
|
||||
|
||||
def test_leases(self):
|
||||
ss = self.create("test_leases")
|
||||
@ -653,41 +676,23 @@ class Server(unittest.TestCase):
|
||||
sharenums = list(range(5))
|
||||
size = 100
|
||||
|
||||
rs0,cs0 = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
|
||||
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
|
||||
already,writers = ss.remote_allocate_buckets(b"si0", rs0, cs0,
|
||||
sharenums, size, canary)
|
||||
self.failUnlessEqual(len(already), 0)
|
||||
self.failUnlessEqual(len(writers), 5)
|
||||
for wb in writers.values():
|
||||
wb.remote_close()
|
||||
|
||||
# Create a bucket:
|
||||
rs0, cs0 = self.create_bucket_5_shares(ss, b"si0")
|
||||
leases = list(ss.get_leases(b"si0"))
|
||||
self.failUnlessEqual(len(leases), 1)
|
||||
self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs0]))
|
||||
|
||||
rs1,cs1 = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
|
||||
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
|
||||
already,writers = ss.remote_allocate_buckets(b"si1", rs1, cs1,
|
||||
sharenums, size, canary)
|
||||
for wb in writers.values():
|
||||
wb.remote_close()
|
||||
rs1, cs1 = self.create_bucket_5_shares(ss, b"si1")
|
||||
|
||||
# take out a second lease on si1
|
||||
rs2,cs2 = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
|
||||
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
|
||||
already,writers = ss.remote_allocate_buckets(b"si1", rs2, cs2,
|
||||
sharenums, size, canary)
|
||||
self.failUnlessEqual(len(already), 5)
|
||||
self.failUnlessEqual(len(writers), 0)
|
||||
|
||||
rs2, cs2 = self.create_bucket_5_shares(ss, b"si1", 5, 0)
|
||||
leases = list(ss.get_leases(b"si1"))
|
||||
self.failUnlessEqual(len(leases), 2)
|
||||
self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs1, rs2]))
|
||||
|
||||
# and a third lease, using add-lease
|
||||
rs2a,cs2a = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
|
||||
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
|
||||
rs2a,cs2a = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)),
|
||||
hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret)))
|
||||
ss.remote_add_lease(b"si1", rs2a, cs2a)
|
||||
leases = list(ss.get_leases(b"si1"))
|
||||
self.failUnlessEqual(len(leases), 3)
|
||||
@ -715,10 +720,10 @@ class Server(unittest.TestCase):
|
||||
"ss should not have a 'remote_cancel_lease' method/attribute")
|
||||
|
||||
# test overlapping uploads
|
||||
rs3,cs3 = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
|
||||
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
|
||||
rs4,cs4 = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
|
||||
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
|
||||
rs3,cs3 = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)),
|
||||
hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret)))
|
||||
rs4,cs4 = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)),
|
||||
hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret)))
|
||||
already,writers = ss.remote_allocate_buckets(b"si3", rs3, cs3,
|
||||
sharenums, size, canary)
|
||||
self.failUnlessEqual(len(already), 0)
|
||||
@ -741,6 +746,28 @@ class Server(unittest.TestCase):
|
||||
leases = list(ss.get_leases(b"si3"))
|
||||
self.failUnlessEqual(len(leases), 2)
|
||||
|
||||
def test_immutable_add_lease_renews(self):
|
||||
"""
|
||||
Adding a lease on an already leased immutable with the same secret just
|
||||
renews it.
|
||||
"""
|
||||
clock = Clock()
|
||||
clock.advance(123)
|
||||
ss = self.create("test_immutable_add_lease_renews", get_current_time=clock.seconds)
|
||||
|
||||
# Start out with single lease created with bucket:
|
||||
renewal_secret, cancel_secret = self.create_bucket_5_shares(ss, b"si0")
|
||||
[lease] = ss.get_leases(b"si0")
|
||||
self.assertEqual(lease.expiration_time, 123 + DEFAULT_RENEWAL_TIME)
|
||||
|
||||
# Time passes:
|
||||
clock.advance(123456)
|
||||
|
||||
# Adding a lease with matching renewal secret just renews it:
|
||||
ss.remote_add_lease(b"si0", renewal_secret, cancel_secret)
|
||||
[lease] = ss.get_leases(b"si0")
|
||||
self.assertEqual(lease.expiration_time, 123 + 123456 + DEFAULT_RENEWAL_TIME)
|
||||
|
||||
def test_have_shares(self):
|
||||
"""By default the StorageServer has no shares."""
|
||||
workdir = self.workdir("test_have_shares")
|
||||
@ -840,9 +867,10 @@ class MutableServer(unittest.TestCase):
|
||||
basedir = os.path.join("storage", "MutableServer", name)
|
||||
return basedir
|
||||
|
||||
def create(self, name):
|
||||
def create(self, name, get_current_time=time.time):
|
||||
workdir = self.workdir(name)
|
||||
ss = StorageServer(workdir, b"\x00" * 20)
|
||||
ss = StorageServer(workdir, b"\x00" * 20,
|
||||
get_current_time=get_current_time)
|
||||
ss.setServiceParent(self.sparent)
|
||||
return ss
|
||||
|
||||
@ -1379,6 +1407,41 @@ class MutableServer(unittest.TestCase):
|
||||
{0: ([], [(500, b"make me really bigger")], None)}, [])
|
||||
self.compare_leases_without_timestamps(all_leases, list(s0.get_leases()))
|
||||
|
||||
def test_mutable_add_lease_renews(self):
|
||||
"""
|
||||
Adding a lease on an already leased mutable with the same secret just
|
||||
renews it.
|
||||
"""
|
||||
clock = Clock()
|
||||
clock.advance(235)
|
||||
ss = self.create("test_mutable_add_lease_renews",
|
||||
get_current_time=clock.seconds)
|
||||
def secrets(n):
|
||||
return ( self.write_enabler(b"we1"),
|
||||
self.renew_secret(b"we1-%d" % n),
|
||||
self.cancel_secret(b"we1-%d" % n) )
|
||||
data = b"".join([ (b"%d" % i) * 10 for i in range(10) ])
|
||||
write = ss.remote_slot_testv_and_readv_and_writev
|
||||
write_enabler, renew_secret, cancel_secret = secrets(0)
|
||||
rc = write(b"si1", (write_enabler, renew_secret, cancel_secret),
|
||||
{0: ([], [(0,data)], None)}, [])
|
||||
self.failUnlessEqual(rc, (True, {}))
|
||||
|
||||
bucket_dir = os.path.join(self.workdir("test_mutable_add_lease_renews"),
|
||||
"shares", storage_index_to_dir(b"si1"))
|
||||
s0 = MutableShareFile(os.path.join(bucket_dir, "0"))
|
||||
[lease] = s0.get_leases()
|
||||
self.assertEqual(lease.expiration_time, 235 + DEFAULT_RENEWAL_TIME)
|
||||
|
||||
# Time passes...
|
||||
clock.advance(835)
|
||||
|
||||
# Adding a lease renews it:
|
||||
ss.remote_add_lease(b"si1", renew_secret, cancel_secret)
|
||||
[lease] = s0.get_leases()
|
||||
self.assertEqual(lease.expiration_time,
|
||||
235 + 835 + DEFAULT_RENEWAL_TIME)
|
||||
|
||||
def test_remove(self):
|
||||
ss = self.create("test_remove")
|
||||
self.allocate(ss, b"si1", b"we1", next(self._lease_secret),
|
||||
|
@ -15,9 +15,14 @@ from os.path import join
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet import reactor
|
||||
from twisted.internet import defer
|
||||
from testtools.twistedsupport import succeeded
|
||||
|
||||
from ..common import (
|
||||
SyncTestCase,
|
||||
AsyncTestCase,
|
||||
)
|
||||
|
||||
from foolscap.api import (
|
||||
fireEventually,
|
||||
@ -53,6 +58,11 @@ from ..common_web import (
|
||||
render,
|
||||
)
|
||||
|
||||
from testtools.matchers import (
|
||||
Equals,
|
||||
AfterPreprocessing,
|
||||
)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def create_introducer_webish(reactor, port_assigner, basedir):
|
||||
@ -86,11 +96,10 @@ def create_introducer_webish(reactor, port_assigner, basedir):
|
||||
|
||||
yield fireEventually(None)
|
||||
intro_node.startService()
|
||||
|
||||
defer.returnValue((intro_node, ws))
|
||||
|
||||
|
||||
class IntroducerWeb(unittest.TestCase):
|
||||
class IntroducerWeb(AsyncTestCase):
|
||||
"""
|
||||
Tests for web-facing functionality of an introducer node.
|
||||
"""
|
||||
@ -102,6 +111,7 @@ class IntroducerWeb(unittest.TestCase):
|
||||
# Anything using Foolscap leaves some timer trash in the reactor that
|
||||
# we have to arrange to have cleaned up.
|
||||
self.addCleanup(lambda: flushEventualQueue(None))
|
||||
return super(IntroducerWeb, self).setUp()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_welcome(self):
|
||||
@ -187,7 +197,7 @@ class IntroducerWeb(unittest.TestCase):
|
||||
self.assertEqual(data["announcement_summary"], {})
|
||||
|
||||
|
||||
class IntroducerRootTests(unittest.TestCase):
|
||||
class IntroducerRootTests(SyncTestCase):
|
||||
"""
|
||||
Tests for ``IntroducerRoot``.
|
||||
"""
|
||||
@ -223,15 +233,11 @@ class IntroducerRootTests(unittest.TestCase):
|
||||
)
|
||||
|
||||
resource = IntroducerRoot(introducer_node)
|
||||
response = json.loads(
|
||||
self.successResultOf(
|
||||
render(resource, {b"t": [b"json"]}),
|
||||
),
|
||||
)
|
||||
self.assertEqual(
|
||||
response = render(resource, {b"t": [b"json"]})
|
||||
expected = {
|
||||
u"subscription_summary": {"arbitrary": 2},
|
||||
u"announcement_summary": {"arbitrary": 1},
|
||||
}
|
||||
self.assertThat(
|
||||
response,
|
||||
{
|
||||
u"subscription_summary": {"arbitrary": 2},
|
||||
u"announcement_summary": {"arbitrary": 1},
|
||||
},
|
||||
)
|
||||
succeeded(AfterPreprocessing(json.loads, Equals(expected))))
|
||||
|
9
tox.ini
9
tox.ini
@ -114,13 +114,6 @@ commands =
|
||||
[testenv:codechecks]
|
||||
basepython = python2.7
|
||||
setenv =
|
||||
# Workaround an error when towncrier is run under the VCS hook,
|
||||
# https://stackoverflow.com/a/4027726/624787:
|
||||
# File "/home/rpatterson/src/work/sfu/tahoe-lafs/.tox/codechecks/lib/python2.7/site-packages/towncrier/check.py", line 44, in __main
|
||||
# .decode(getattr(sys.stdout, "encoding", "utf8"))
|
||||
# `TypeError: decode() argument 1 must be string, not None`
|
||||
PYTHONIOENCODING=utf_8
|
||||
|
||||
# If no positional arguments are given, try to run the checks on the
|
||||
# entire codebase, including various pieces of supporting code.
|
||||
DEFAULT_FILES=src integration static misc setup.py
|
||||
@ -190,7 +183,7 @@ passenv = TAHOE_LAFS_* PIP_* SUBUNITREPORTER_* USERPROFILE HOMEDRIVE HOMEPATH HO
|
||||
whitelist_externals =
|
||||
git
|
||||
deps =
|
||||
# see comment in [testenv] about "certifi"
|
||||
# see comment in [testenv] about "certifi"
|
||||
certifi
|
||||
towncrier==21.3.0
|
||||
commands =
|
||||
|
Loading…
x
Reference in New Issue
Block a user