Merge remote-tracking branch 'origin/master' into 3636.doc-toc-reorg

This commit is contained in:
Sajith Sasidharan 2021-10-19 12:14:49 -04:00
commit 40f4c0ea85
No known key found for this signature in database
GPG Key ID: 0C6DA6A29D5F02BA
92 changed files with 3853 additions and 2018 deletions

View File

@ -15,82 +15,69 @@ workflows:
ci:
jobs:
# Start with jobs testing various platforms.
# Every job that pulls a Docker image from Docker Hub needs to provide
# credentials for that pull operation to avoid being subjected to
# unauthenticated pull limits shared across all of CircleCI. Use this
# first job to define a yaml anchor that can be used to supply a
# CircleCI job context which makes Docker Hub credentials available in
# the environment.
#
# Contexts are managed in the CircleCI web interface:
#
# https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts
- "debian-9": &DOCKERHUB_CONTEXT
context: "dockerhub-auth"
- "debian-9":
{}
- "debian-10":
<<: *DOCKERHUB_CONTEXT
requires:
- "debian-9"
- "ubuntu-20-04":
<<: *DOCKERHUB_CONTEXT
{}
- "ubuntu-18-04":
<<: *DOCKERHUB_CONTEXT
requires:
- "ubuntu-20-04"
- "ubuntu-16-04":
<<: *DOCKERHUB_CONTEXT
requires:
- "ubuntu-20-04"
- "fedora-29":
<<: *DOCKERHUB_CONTEXT
{}
- "fedora-28":
<<: *DOCKERHUB_CONTEXT
requires:
- "fedora-29"
- "centos-8":
<<: *DOCKERHUB_CONTEXT
{}
- "nixos-19-09":
<<: *DOCKERHUB_CONTEXT
{}
- "nixos-21-05":
{}
# Test against PyPy 2.7
- "pypy27-buster":
<<: *DOCKERHUB_CONTEXT
{}
# Just one Python 3.6 configuration while the port is in-progress.
- "python36":
<<: *DOCKERHUB_CONTEXT
{}
# Other assorted tasks and configurations
- "lint":
<<: *DOCKERHUB_CONTEXT
{}
- "codechecks3":
{}
- "pyinstaller":
<<: *DOCKERHUB_CONTEXT
{}
- "deprecations":
<<: *DOCKERHUB_CONTEXT
{}
- "c-locale":
<<: *DOCKERHUB_CONTEXT
{}
# Any locale other than C or UTF-8.
- "another-locale":
<<: *DOCKERHUB_CONTEXT
{}
- "integration":
<<: *DOCKERHUB_CONTEXT
requires:
# If the unit test suite doesn't pass, don't bother running the
# integration tests.
- "debian-9"
- "typechecks":
<<: *DOCKERHUB_CONTEXT
{}
- "docs":
<<: *DOCKERHUB_CONTEXT
{}
images:
# Build the Docker images used by the ci jobs. This makes the ci jobs
@ -105,8 +92,16 @@ workflows:
- "master"
jobs:
- "build-image-debian-10":
<<: *DOCKERHUB_CONTEXT
# Every job that pushes a Docker image from Docker Hub needs to provide
# credentials. Use this first job to define a yaml anchor that can be
# used to supply a CircleCI job context which makes Docker Hub
# credentials available in the environment.
#
# Contexts are managed in the CircleCI web interface:
#
# https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts
- "build-image-debian-10": &DOCKERHUB_CONTEXT
context: "dockerhub-auth"
- "build-image-debian-9":
<<: *DOCKERHUB_CONTEXT
- "build-image-ubuntu-16-04":
@ -168,6 +163,24 @@ jobs:
command: |
~/.local/bin/tox -e codechecks
codechecks3:
docker:
- <<: *DOCKERHUB_AUTH
image: "circleci/python:3"
steps:
- "checkout"
- run:
name: "Install tox"
command: |
pip install --user tox
- run:
name: "Static-ish code checks"
command: |
~/.local/bin/tox -e codechecks3
pyinstaller:
docker:
- <<: *DOCKERHUB_AUTH
@ -261,6 +274,11 @@ jobs:
# in the project source checkout.
path: "/tmp/project/_trial_temp/test.log"
- store_artifacts: &STORE_ELIOT_LOG
# Despite passing --workdir /tmp to tox above, it still runs trial
# in the project source checkout.
path: "/tmp/project/eliot.log"
- store_artifacts: &STORE_OTHER_ARTIFACTS
# Store any other artifacts, too. This is handy to allow other jobs
# sharing most of the definition of this one to be able to
@ -403,6 +421,7 @@ jobs:
- run: *RUN_TESTS
- store_test_results: *STORE_TEST_RESULTS
- store_artifacts: *STORE_TEST_LOG
- store_artifacts: *STORE_ELIOT_LOG
- store_artifacts: *STORE_OTHER_ARTIFACTS
- run: *SUBMIT_COVERAGE
@ -422,8 +441,7 @@ jobs:
image: "tahoelafsci/fedora:29-py"
user: "nobody"
nixos-19-09:
nixos-19-09: &NIXOS
docker:
# Run in a highly Nix-capable environment.
- <<: *DOCKERHUB_AUTH
@ -431,6 +449,7 @@ jobs:
environment:
NIX_PATH: "nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.09-small.tar.gz"
SOURCE: "nix/"
steps:
- "checkout"
@ -447,7 +466,17 @@ jobs:
# build a couple simple little dependencies that don't take
# advantage of multiple cores and we get a little speedup by doing
# them in parallel.
nix-build --cores 3 --max-jobs 2 nix/
nix-build --cores 3 --max-jobs 2 "$SOURCE"
nixos-21-05:
<<: *NIXOS
environment:
# Note this doesn't look more similar to the 19.09 NIX_PATH URL because
# there was some internal shuffling by the NixOS project about how they
# publish stable revisions.
NIX_PATH: "nixpkgs=https://github.com/NixOS/nixpkgs/archive/d32b07e6df276d78e3640eb43882b80c9b2b3459.tar.gz"
SOURCE: "nix/py3.nix"
typechecks:
docker:

View File

@ -28,7 +28,7 @@ jobs:
- 3.9
include:
# On macOS don't bother with 3.6-3.8, just to get faster builds.
- os: macos-latest
- os: macos-10.15
python-version: 2.7
- os: macos-latest
python-version: 3.9
@ -76,13 +76,18 @@ jobs:
- name: Run tox for corresponding Python version
run: python -m tox
- name: Upload eliot.log in case of failure
- name: Upload eliot.log
uses: actions/upload-artifact@v1
if: failure()
with:
name: eliot.log
path: eliot.log
- name: Upload trial log
uses: actions/upload-artifact@v1
with:
name: test.log
path: _trial_temp/test.log
# Upload this job's coverage data to Coveralls. While there is a GitHub
# Action for this, as of Jan 2021 it does not support Python coverage
# files - only lcov files. Therefore, we use coveralls-python, the
@ -136,7 +141,7 @@ jobs:
# See notes about parallel builds on GitHub Actions at
# https://coveralls-python.readthedocs.io/en/latest/usage/configuration.html
finish-coverage-report:
needs:
needs:
- "coverage"
runs-on: "ubuntu-latest"
container: "python:3-slim"
@ -163,7 +168,7 @@ jobs:
- 3.9
include:
# On macOS don't bother with 3.6, just to get faster builds.
- os: macos-latest
- os: macos-10.15
python-version: 2.7
- os: macos-latest
python-version: 3.9
@ -173,12 +178,12 @@ jobs:
- name: Install Tor [Ubuntu]
if: matrix.os == 'ubuntu-latest'
run: sudo apt install tor
# TODO: See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3744.
# We have to use an older version of Tor for running integration
# tests on macOS.
- name: Install Tor [macOS, ${{ matrix.python-version }} ]
if: ${{ matrix.os == 'macos-latest' }}
if: ${{ contains(matrix.os, 'macos') }}
run: |
brew extract --version 0.4.5.8 tor homebrew/cask
brew install tor@0.4.5.8
@ -242,7 +247,7 @@ jobs:
fail-fast: false
matrix:
os:
- macos-latest
- macos-10.15
- windows-latest
- ubuntu-latest
python-version:

View File

@ -1188,7 +1188,7 @@ Precautions when Upgrading
.. _`#1915`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1915
.. _`#1926`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1926
.. _`message to the tahoe-dev mailing list`:
https://tahoe-lafs.org/pipermail/tahoe-dev/2013-March/008096.html
https://lists.tahoe-lafs.org/pipermail/tahoe-dev/2013-March/008079.html
Release 1.9.2 (2012-07-03)

View File

@ -1,5 +1,5 @@
======================================
Free and Open decentralized data store
Free and Open Decentralized Data Store
======================================
|image0|
@ -48,13 +48,17 @@ Please read more about Tahoe-LAFS architecture `here <docs/architecture.rst>`__.
✅ Installation
---------------
For more detailed instructions, read `docs/INSTALL.rst <docs/INSTALL.rst>`__ .
For more detailed instructions, read `Installing Tahoe-LAFS <docs/Installation/install-tahoe.rst>`__.
- `Building Tahoe-LAFS on Windows <docs/windows.rst>`__
- `OS-X Packaging <docs/OS-X.rst>`__
Once ``tahoe --version`` works, see `How to Run Tahoe-LAFS <docs/running.rst>`__ to learn how to set up your first Tahoe-LAFS node.
Once tahoe --version works, see `docs/running.rst <docs/running.rst>`__ to learn how to set up your first Tahoe-LAFS node.
🐍 Python 3 Support
--------------------
Python 3 support has been introduced starting with Tahoe-LAFS 1.16.0, alongside Python 2.
System administrators are advised to start running Tahoe on Python 3 and should expect Python 2 support to be dropped in a future version.
Please, feel free to file issues if you run into bugs while running Tahoe on Python 3.
🤖 Issues
@ -76,7 +80,7 @@ Get involved with the Tahoe-LAFS community:
- Join our `weekly conference calls <https://www.tahoe-lafs.org/trac/tahoe-lafs/wiki/WeeklyMeeting>`__ with core developers and interested community members.
- Subscribe to `the tahoe-dev mailing list <https://www.tahoe-lafs.org/cgi-bin/mailman/listinfo/tahoe-dev>`__, the community forum for discussion of Tahoe-LAFS design, implementation, and usage.
- Subscribe to `the tahoe-dev mailing list <https://lists.tahoe-lafs.org/mailman/listinfo/tahoe-dev>`__, the community forum for discussion of Tahoe-LAFS design, implementation, and usage.
🤗 Contributing
---------------
@ -98,6 +102,8 @@ Before authoring or reviewing a patch, please familiarize yourself with the `Cod
We would like to thank `Fosshost <https://fosshost.org>`__ for supporting us with hosting services. If your open source project needs help, you can apply for their support.
We are grateful to `Oregon State University Open Source Lab <https://osuosl.org/>`__ for hosting tahoe-dev mailing list.
❓ FAQ
------

View File

@ -65,4 +65,4 @@ If you are working on MacOS or a Linux distribution which does not have Tahoe-LA
If you are looking to hack on the source code or run pre-release code, we recommend you install Tahoe-LAFS on a `virtualenv` instance. To learn more, see :doc:`install-on-linux`.
You can always write to the `tahoe-dev mailing list <https://tahoe-lafs.org/cgi-bin/mailman/listinfo/tahoe-dev>`_ or chat on the `Libera.chat IRC <irc://irc.libera.chat/%23tahoe-lafs>`_ if you are not able to get Tahoe-LAFS up and running on your deployment.
You can always write to the `tahoe-dev mailing list <https://lists.tahoe-lafs.org/mailman/listinfo/tahoe-dev>`_ or chat on the `Libera.chat IRC <irc://irc.libera.chat/%23tahoe-lafs>`_ if you are not able to get Tahoe-LAFS up and running on your deployment.

View File

@ -177,7 +177,7 @@ mutable files, you may be able to avoid the potential for "rollback"
failure.
A future version of Tahoe will include a fix for this issue. Here is
[https://tahoe-lafs.org/pipermail/tahoe-dev/2008-May/000630.html the
[https://lists.tahoe-lafs.org/pipermail/tahoe-dev/2008-May/000628.html the
mailing list discussion] about how that future version will work.

View File

@ -268,7 +268,7 @@ For known security issues see
.PP
Tahoe-LAFS home page: <https://tahoe-lafs.org/>
.PP
tahoe-dev mailing list: <https://tahoe-lafs.org/cgi-bin/mailman/listinfo/tahoe-dev>
tahoe-dev mailing list: <https://lists.tahoe-lafs.org/mailman/listinfo/tahoe-dev>
.SH COPYRIGHT
.PP
Copyright \@ 2006\[en]2013 The Tahoe-LAFS Software Foundation

View File

@ -13,6 +13,57 @@ Specifically, it should be possible to implement a Tahoe-LAFS storage server wit
The Tahoe-LAFS client will also need to change but it is not expected that it will be noticably simplified by this change
(though this may be the first step towards simplifying it).
Glossary
--------
.. glossary::
`Foolscap <https://github.com/warner/foolscap/>`_
an RPC/RMI (Remote Procedure Call / Remote Method Invocation) protocol for use with Twisted
storage server
a Tahoe-LAFS process configured to offer storage and reachable over the network for store and retrieve operations
storage service
a Python object held in memory in the storage server which provides the implementation of the storage protocol
introducer
a Tahoe-LAFS process at a known location configured to re-publish announcements about the location of storage servers
fURL
a self-authenticating URL-like string which can be used to locate a remote object using the Foolscap protocol
(the storage service is an example of such an object)
NURL
a self-authenticating URL-like string almost exactly like a fURL but without being tied to Foolscap
swissnum
a short random string which is part of a fURL and which acts as a shared secret to authorize clients to use a storage service
lease
state associated with a share informing a storage server of the duration of storage desired by a client
share
a single unit of client-provided arbitrary data to be stored by a storage server
(in practice, one of the outputs of applying ZFEC encoding to some ciphertext with some additional metadata attached)
bucket
a group of one or more immutable shares held by a storage server and having a common storage index
slot
a group of one or more mutable shares held by a storage server and having a common storage index
(sometimes "slot" is considered a synonym for "storage index of a slot")
storage index
a 16 byte string which can address a slot or a bucket
(in practice, derived by hashing the encryption key associated with contents of that slot or bucket)
write enabler
a short secret string which storage servers require to be presented before allowing mutation of any mutable share
lease renew secret
a short secret string which storage servers required to be presented before allowing a particular lease to be renewed
Motivation
----------
@ -87,6 +138,8 @@ The Foolscap-based protocol offers:
* A careful configuration of the TLS connection parameters *may* also offer **forward secrecy**.
However, Tahoe-LAFS' use of Foolscap takes no steps to ensure this is the case.
* **Storage authorization** by way of a capability contained in the fURL addressing a storage service.
Discussion
!!!!!!!!!!
@ -117,6 +170,10 @@ there is no way to write data which appears legitimate to a legitimate client).
Therefore, **message confidentiality** is necessary when exchanging these secrets.
**Forward secrecy** is preferred so that an attacker recording an exchange today cannot launch this attack at some future point after compromising the necessary keys.
A storage service offers service only to some clients.
A client proves their authorization to use the storage service by presenting a shared secret taken from the fURL.
In this way **storage authorization** is performed to prevent disallowed parties from consuming any storage resources.
Functionality
-------------
@ -173,6 +230,10 @@ Additionally,
by continuing to interact using TLS,
Bob's client and Alice's storage node are assured of both **message authentication** and **message confidentiality**.
Bob's client further inspects the fURL for the *swissnum*.
When Bob's client issues HTTP requests to Alice's storage node it includes the *swissnum* in its requests.
**Storage authorization** has been achieved.
.. note::
Foolscap TubIDs are 20 bytes (SHA1 digest of the certificate).
@ -302,6 +363,12 @@ one branch contains all of the share data;
another branch contains all of the lease data;
etc.
Authorization is required for all endpoints.
The standard HTTP authorization protocol is used.
The authentication *type* used is ``Tahoe-LAFS``.
The swissnum from the NURL used to locate the storage service is used as the *credentials*.
If credentials are not presented or the swissnum is not associated with a storage service then no storage processing is performed and the request receives an ``UNAUTHORIZED`` response.
General
~~~~~~~
@ -328,19 +395,30 @@ For example::
``PUT /v1/lease/:storage_index``
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Create a new lease that applies to all shares for the given storage index.
Either renew or create a new lease on the bucket addressed by ``storage_index``.
The details of the lease are encoded in the request body.
For example::
{"renew-secret": "abcd", "cancel-secret": "efgh"}
If there are no shares for the given ``storage_index``
then do nothing and return ``NO CONTENT``.
If the ``renew-secret`` value matches an existing lease
then that lease will be renewed instead.
then the expiration time of that lease will be changed to 31 days after the time of this operation.
If it does not match an existing lease
then a new lease will be created with this ``renew-secret`` which expires 31 days after the time of this operation.
``renew-secret`` and ``cancel-secret`` values must be 32 bytes long.
The server treats them as opaque values.
:ref:`Share Leases` gives details about how the Tahoe-LAFS storage client constructs these values.
In these cases the response is ``NO CONTENT`` with an empty body.
It is possible that the storage server will have no shares for the given ``storage_index`` because:
* no such shares have ever been uploaded.
* a previous lease expired and the storage server reclaimed the storage by deleting the shares.
In these cases the server takes no action and returns ``NOT FOUND``.
The lease expires after 31 days.
Discussion
``````````
@ -350,40 +428,13 @@ We chose to put these values into the request body to make the URL simpler.
Several behaviors here are blindly copied from the Foolscap-based storage server protocol.
* There is a cancel secret but there is no API to use it to cancel a lease.
* There is a cancel secret but there is no API to use it to cancel a lease (see ticket:3768).
* The lease period is hard-coded at 31 days.
* There is no way to differentiate between success and an unknown **storage index**.
* There are separate **add** and **renew** lease APIs.
These are not necessarily ideal behaviors
but they are adopted to avoid any *semantic* changes between the Foolscap- and HTTP-based protocols.
It is expected that some or all of these behaviors may change in a future revision of the HTTP-based protocol.
``POST /v1/lease/:storage_index``
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Renew an existing lease for all shares for the given storage index.
The details of the lease are encoded in the request body.
For example::
{"renew-secret": "abcd"}
If there are no shares for the given ``storage_index``
then ``NOT FOUND`` is returned.
If there is no lease with a matching ``renew-secret`` value on the given storage index
then ``NOT FOUND`` is returned.
In this case,
if the storage index refers to mutable data
then the response also includes a list of nodeids where the lease can be renewed.
For example::
{"nodeids": ["aaa...", "bbb..."]}
Othewise,
the matching lease's expiration time is changed to be 31 days from the time of this operation
and ``NO CONTENT`` is returned.
Immutable
---------
@ -422,23 +473,81 @@ However, we decided this does not matter because:
therefore no proxy servers can perform any extra logging.
* Tahoe-LAFS itself does not currently log HTTP request URLs.
``PUT /v1/immutable/:storage_index/:share_number``
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
The response includes ``already-have`` and ``allocated`` for two reasons:
* If an upload is interrupted and the client loses its local state that lets it know it already uploaded some shares
then this allows it to discover this fact (by inspecting ``already-have``) and only upload the missing shares (indicated by ``allocated``).
* If an upload has completed a client may still choose to re-balance storage by moving shares between servers.
This might be because a server has become unavailable and a remaining server needs to store more shares for the upload.
It could also just be that the client's preferred servers have changed.
``PATCH /v1/immutable/:storage_index/:share_number``
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Write data for the indicated share.
The share number must belong to the storage index.
The request body is the raw share data (i.e., ``application/octet-stream``).
*Content-Range* requests are encouraged for large transfers.
*Content-Range* requests are encouraged for large transfers to allow partially complete uploads to be resumed.
For example,
for a 1MiB share the data can be broken in to 8 128KiB chunks.
Each chunk can be *PUT* separately with the appropriate *Content-Range* header.
a 1MiB share can be divided in to eight separate 128KiB chunks.
Each chunk can be uploaded in a separate request.
Each request can include a *Content-Range* value indicating its placement within the complete share.
If any one of these requests fails then at most 128KiB of upload work needs to be retried.
The server must recognize when all of the data has been received and mark the share as complete
(which it can do because it was informed of the size when the storage index was initialized).
Clients should upload chunks in re-assembly order.
Servers may reject out-of-order chunks for implementation simplicity.
If an individual *PUT* fails then only a limited amount of effort is wasted on the necessary retry.
.. think about copying https://developers.google.com/drive/api/v2/resumable-upload
* When a chunk that does not complete the share is successfully uploaded the response is ``OK``.
The response body indicates the range of share data that has yet to be uploaded.
That is::
{ "required":
[ { "begin": <byte position, inclusive>
, "end": <byte position, exclusive>
}
,
...
]
}
* When the chunk that completes the share is successfully uploaded the response is ``CREATED``.
* If the *Content-Range* for a request covers part of the share that has already,
and the data does not match already written data,
the response is ``CONFLICT``.
At this point the only thing to do is abort the upload and start from scratch (see below).
``PUT /v1/immutable/:storage_index/:share_number/abort``
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
This cancels an *in-progress* upload.
The response code:
* When the upload is still in progress and therefore the abort has succeeded,
the response is ``OK``.
Future uploads can start from scratch with no pre-existing upload state stored on the server.
* If the uploaded has already finished, the response is 405 (Method Not Allowed)
and no change is made.
Discussion
``````````
``PUT`` verbs are only supposed to be used to replace the whole resource,
thus the use of ``PATCH``.
From RFC 7231::
An origin server that allows PUT on a given target resource MUST send
a 400 (Bad Request) response to a PUT request that contains a
Content-Range header field (Section 4.2 of [RFC7233]), since the
payload is likely to be partial content that has been mistakenly PUT
as a full representation. Partial content updates are possible by
targeting a separately identified resource with state that overlaps a
portion of the larger resource, or by using a different method that
has been specifically defined for partial updates (for example, the
PATCH method defined in [RFC5789]).
``POST /v1/immutable/:storage_index/:share_number/corrupt``
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@ -464,31 +573,20 @@ For example::
[1, 5]
``GET /v1/immutable/:storage_index?share=:s0&share=:sN&offset=o1&size=z0&offset=oN&size=zN``
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
``GET /v1/immutable/:storage_index/:share_number``
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Read data from the indicated immutable shares.
If ``share`` query parameters are given, selecte only those shares for reading.
Otherwise, select all shares present.
If ``size`` and ``offset`` query parameters are given,
only the portions thus identified of the selected shares are returned.
Otherwise, all data is from the selected shares is returned.
The response body contains a mapping giving the read data.
For example::
{
3: ["foo", "bar"],
7: ["baz", "quux"]
}
Read a contiguous sequence of bytes from one share in one bucket.
The response body is the raw share data (i.e., ``application/octet-stream``).
The ``Range`` header may be used to request exactly one ``bytes`` range.
Interpretation and response behavior is as specified in RFC 7233 § 4.1.
Multiple ranges in a single request are *not* supported.
Discussion
``````````
Offset and size of the requested data are specified here as query arguments.
Instead, this information could be present in a ``Range`` header in the request.
This is the more obvious choice and leverages an HTTP feature built for exactly this use-case.
However, HTTP requires that the ``Content-Type`` of the response to "range requests" be ``multipart/...``.
Multiple ``bytes`` ranges are not supported.
HTTP requires that the ``Content-Type`` of the response in that case be ``multipart/...``.
The ``multipart`` major type brings along string sentinel delimiting as a means to frame the different response parts.
There are many drawbacks to this framing technique:
@ -496,6 +594,15 @@ There are many drawbacks to this framing technique:
2. It is resource-intensive to parse.
3. It is complex to parse safely [#]_ [#]_ [#]_ [#]_.
A previous revision of this specification allowed requesting one or more contiguous sequences from one or more shares.
This *superficially* mirrored the Foolscap based interface somewhat closely.
The interface was simplified to this version because this version is all that is required to let clients retrieve any desired information.
It only requires that the client issue multiple requests.
This can be done with pipelining or parallel requests to avoid an additional latency penalty.
In the future,
if there are performance goals,
benchmarks can demonstrate whether they are achieved by a more complicated interface or some other change.
Mutable
-------
@ -527,7 +634,6 @@ For example::
"test": [{
"offset": 3,
"size": 5,
"operator": "eq",
"specimen": "hello"
}, ...],
"write": [{
@ -553,6 +659,9 @@ For example::
}
}
A test vector or read vector that read beyond the boundaries of existing data will return nothing for any bytes past the end.
As a result, if there is no data at all, an empty bytestring is returned no matter what the offset or length.
Reading
~~~~~~~
@ -576,6 +685,137 @@ Just like ``GET /v1/mutable/:storage_index``.
Advise the server the data read from the indicated share was corrupt.
Just like the immutable version.
Sample Interactions
-------------------
Immutable Data
~~~~~~~~~~~~~~
1. Create a bucket for storage index ``AAAAAAAAAAAAAAAA`` to hold two immutable shares, discovering that share ``1`` was already uploaded::
POST /v1/immutable/AAAAAAAAAAAAAAAA
{"renew-secret": "efgh", "cancel-secret": "ijkl",
"share-numbers": [1, 7], "allocated-size": 48}
200 OK
{"already-have": [1], "allocated": [7]}
#. Upload the content for immutable share ``7``::
PATCH /v1/immutable/AAAAAAAAAAAAAAAA/7
Content-Range: bytes 0-15/48
<first 16 bytes of share data>
200 OK
PATCH /v1/immutable/AAAAAAAAAAAAAAAA/7
Content-Range: bytes 16-31/48
<second 16 bytes of share data>
200 OK
PATCH /v1/immutable/AAAAAAAAAAAAAAAA/7
Content-Range: bytes 32-47/48
<final 16 bytes of share data>
201 CREATED
#. Download the content of the previously uploaded immutable share ``7``::
GET /v1/immutable/AAAAAAAAAAAAAAAA?share=7&offset=0&size=48
200 OK
<complete 48 bytes of previously uploaded data>
#. Renew the lease on all immutable shares in bucket ``AAAAAAAAAAAAAAAA``::
PUT /v1/lease/AAAAAAAAAAAAAAAA
{"renew-secret": "efgh", "cancel-secret": "ijkl"}
204 NO CONTENT
Mutable Data
~~~~~~~~~~~~
1. Create mutable share number ``3`` with ``10`` bytes of data in slot ``BBBBBBBBBBBBBBBB``.
The special test vector of size 1 but empty bytes will only pass
if there is no existing share,
otherwise it will read a byte which won't match `b""`::
POST /v1/mutable/BBBBBBBBBBBBBBBB/read-test-write
{
"secrets": {
"write-enabler": "abcd",
"lease-renew": "efgh",
"lease-cancel": "ijkl"
},
"test-write-vectors": {
3: {
"test": [{
"offset": 0,
"size": 1,
"specimen": ""
}],
"write": [{
"offset": 0,
"data": "xxxxxxxxxx"
}],
"new-length": 10
}
},
"read-vector": []
}
200 OK
{
"success": true,
"data": []
}
#. Safely rewrite the contents of a known version of mutable share number ``3`` (or fail)::
POST /v1/mutable/BBBBBBBBBBBBBBBB/read-test-write
{
"secrets": {
"write-enabler": "abcd",
"lease-renew": "efgh",
"lease-cancel": "ijkl"
},
"test-write-vectors": {
3: {
"test": [{
"offset": 0,
"size": <length of checkstring>,
"specimen": "<checkstring>"
}],
"write": [{
"offset": 0,
"data": "yyyyyyyyyy"
}],
"new-length": 10
}
},
"read-vector": []
}
200 OK
{
"success": true,
"data": []
}
#. Download the contents of share number ``3``::
GET /v1/mutable/BBBBBBBBBBBBBBBB?share=3&offset=0&size=10
<complete 16 bytes of previously uploaded data>
#. Renew the lease on previously uploaded mutable share in slot ``BBBBBBBBBBBBBBBB``::
PUT /v1/lease/BBBBBBBBBBBBBBBB
{"renew-secret": "efgh", "cancel-secret": "ijkl"}
204 NO CONTENT
.. _RFC 7469: https://tools.ietf.org/html/rfc7469#section-2.4
.. _RFC 7049: https://tools.ietf.org/html/rfc7049#section-4

View File

@ -178,8 +178,8 @@ Announcing the Release Candidate
````````````````````````````````
The release-candidate should be announced by posting to the
mailing-list (tahoe-dev@tahoe-lafs.org). For example:
https://tahoe-lafs.org/pipermail/tahoe-dev/2020-October/009995.html
mailing-list (tahoe-dev@lists.tahoe-lafs.org). For example:
https://lists.tahoe-lafs.org/pipermail/tahoe-dev/2020-October/009978.html
Is The Release Done Yet?

View File

@ -238,7 +238,7 @@ You can chat with other users of and hackers of this software on the
#tahoe-lafs IRC channel at ``irc.libera.chat``, or on the `tahoe-dev mailing
list`_.
.. _tahoe-dev mailing list: https://tahoe-lafs.org/cgi-bin/mailman/listinfo/tahoe-dev
.. _tahoe-dev mailing list: https://lists.tahoe-lafs.org/mailman/listinfo/tahoe-dev
Complain

View File

@ -0,0 +1,87 @@
"""
This is a reference implementation of the lease renewal secret derivation
protocol in use by Tahoe-LAFS clients as of 1.16.0.
"""
from allmydata.util.base32 import (
a2b as b32decode,
b2a as b32encode,
)
from allmydata.util.hashutil import (
tagged_hash,
tagged_pair_hash,
)
def derive_renewal_secret(lease_secret: bytes, storage_index: bytes, tubid: bytes) -> bytes:
assert len(lease_secret) == 32
assert len(storage_index) == 16
assert len(tubid) == 20
bucket_renewal_tag = b"allmydata_bucket_renewal_secret_v1"
file_renewal_tag = b"allmydata_file_renewal_secret_v1"
client_renewal_tag = b"allmydata_client_renewal_secret_v1"
client_renewal_secret = tagged_hash(lease_secret, client_renewal_tag)
file_renewal_secret = tagged_pair_hash(
file_renewal_tag,
client_renewal_secret,
storage_index,
)
peer_id = tubid
return tagged_pair_hash(bucket_renewal_tag, file_renewal_secret, peer_id)
def demo():
secret = b32encode(derive_renewal_secret(
b"lease secretxxxxxxxxxxxxxxxxxxxx",
b"storage indexxxx",
b"tub idxxxxxxxxxxxxxx",
)).decode("ascii")
print("An example renewal secret: {}".format(secret))
def test():
# These test vectors created by intrumenting Tahoe-LAFS
# bb57fcfb50d4e01bbc4de2e23dbbf7a60c004031 to emit `self.renew_secret` in
# allmydata.immutable.upload.ServerTracker.query and then uploading a
# couple files to a couple different storage servers.
test_vector = [
dict(lease_secret=b"boity2cdh7jvl3ltaeebuiobbspjmbuopnwbde2yeh4k6x7jioga",
storage_index=b"vrttmwlicrzbt7gh5qsooogr7u",
tubid=b"v67jiisoty6ooyxlql5fuucitqiok2ic",
expected=b"osd6wmc5vz4g3ukg64sitmzlfiaaordutrez7oxdp5kkze7zp5zq",
),
dict(lease_secret=b"boity2cdh7jvl3ltaeebuiobbspjmbuopnwbde2yeh4k6x7jioga",
storage_index=b"75gmmfts772ww4beiewc234o5e",
tubid=b"v67jiisoty6ooyxlql5fuucitqiok2ic",
expected=b"35itmusj7qm2pfimh62snbyxp3imreofhx4djr7i2fweta75szda",
),
dict(lease_secret=b"boity2cdh7jvl3ltaeebuiobbspjmbuopnwbde2yeh4k6x7jioga",
storage_index=b"75gmmfts772ww4beiewc234o5e",
tubid=b"lh5fhobkjrmkqjmkxhy3yaonoociggpz",
expected=b"srrlruge47ws3lm53vgdxprgqb6bz7cdblnuovdgtfkqrygrjm4q",
),
dict(lease_secret=b"vacviff4xfqxsbp64tdr3frg3xnkcsuwt5jpyat2qxcm44bwu75a",
storage_index=b"75gmmfts772ww4beiewc234o5e",
tubid=b"lh5fhobkjrmkqjmkxhy3yaonoociggpz",
expected=b"b4jledjiqjqekbm2erekzqumqzblegxi23i5ojva7g7xmqqnl5pq",
),
]
for n, item in enumerate(test_vector):
derived = b32encode(derive_renewal_secret(
b32decode(item["lease_secret"]),
b32decode(item["storage_index"]),
b32decode(item["tubid"]),
))
assert derived == item["expected"] , \
"Test vector {} failed: {} (expected) != {} (derived)".format(
n,
item["expected"],
derived,
)
print("{} test vectors validated".format(len(test_vector)))
test()
demo()

View File

@ -14,5 +14,6 @@ the data formats used by Tahoe.
URI-extension
mutable
dirnodes
lease
servers-of-happiness
backends/raic

View File

@ -0,0 +1,69 @@
.. -*- coding: utf-8 -*-
.. _share leases:
Share Leases
============
A lease is a marker attached to a share indicating that some client has asked for that share to be retained for some amount of time.
The intent is to allow clients and servers to collaborate to determine which data should still be retained and which can be discarded to reclaim storage space.
Zero or more leases may be attached to any particular share.
Renewal Secrets
---------------
Each lease is uniquely identified by its **renewal secret**.
This is a 32 byte string which can be used to extend the validity period of that lease.
To a storage server a renewal secret is an opaque value which is only ever compared to other renewal secrets to determine equality.
Storage clients will typically want to follow a scheme to deterministically derive the renewal secret for a particular share from information the client already holds about that share.
This allows a client to maintain and renew single long-lived lease without maintaining additional local state.
The scheme in use in Tahoe-LAFS as of 1.16.0 is as follows.
* The **netstring encoding** of a byte string is the concatenation of:
* the ascii encoding of the base 10 representation of the length of the string
* ``":"``
* the string itself
* ``","``
* The **sha256d digest** is the **sha256 digest** of the **sha256 digest** of a string.
* The **sha256d tagged digest** is the **sha256d digest** of the concatenation of the **netstring encoding** of one string with one other unmodified string.
* The **sha256d tagged pair digest** the **sha256d digest** of the concatenation of the **netstring encodings** of each of three strings.
* The **bucket renewal tag** is ``"allmydata_bucket_renewal_secret_v1"``.
* The **file renewal tag** is ``"allmydata_file_renewal_secret_v1"``.
* The **client renewal tag** is ``"allmydata_client_renewal_secret_v1"``.
* The **lease secret** is a 32 byte string, typically randomly generated once and then persisted for all future uses.
* The **client renewal secret** is the **sha256d tagged digest** of (**lease secret**, **client renewal tag**).
* The **storage index** is constructed using a capability-type-specific scheme.
See ``storage_index_hash`` and ``ssk_storage_index_hash`` calls in ``src/allmydata/uri.py``.
* The **file renewal secret** is the **sha256d tagged pair digest** of (**file renewal tag**, **client renewal secret**, **storage index**).
* The **base32 encoding** is ``base64.b32encode`` lowercased and with trailing ``=`` stripped.
* The **peer id** is the **base32 encoding** of the SHA1 digest of the server's x509 certificate.
* The **renewal secret** is the **sha256d tagged pair digest** of (**bucket renewal tag**, **file renewal secret**, **peer id**).
A reference implementation is available.
.. literalinclude:: derive_renewal_secret.py
:language: python
:linenos:
Cancel Secrets
--------------
Lease cancellation is unimplemented.
Nevertheless,
a cancel secret is sent by storage clients to storage servers and stored in lease records.
The scheme for deriving **cancel secret** in use in Tahoe-LAFS as of 1.16.0 is similar to that used to derive the **renewal secret**.
The differences are:
* Use of **client renewal tag** is replaced by use of **client cancel tag**.
* Use of **file renewal secret** is replaced by use of **file cancel tag**.
* Use of **bucket renewal tag** is replaced by use of **bucket cancel tag**.
* **client cancel tag** is ``"allmydata_client_cancel_secret_v1"``.
* **file cancel tag** is ``"allmydata_file_cancel_secret_v1"``.
* **bucket cancel tag** is ``"allmydata_bucket_cancel_secret_v1"``.

244
integration/test_i2p.py Normal file
View File

@ -0,0 +1,244 @@
"""
Integration tests for I2P support.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from future.utils import PY2
if PY2:
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
import sys
from os.path import join, exists
from os import mkdir
from time import sleep
if PY2:
def which(path):
# This will result in skipping I2P tests on Python 2. Oh well.
return None
else:
from shutil import which
from eliot import log_call
import pytest
import pytest_twisted
from . import util
from twisted.python.filepath import (
FilePath,
)
from twisted.internet.error import ProcessExitedAlready
from allmydata.test.common import (
write_introducer,
)
if which("docker") is None:
pytest.skip('Skipping I2P tests since Docker is unavailable', allow_module_level=True)
# Docker on Windows machines sometimes expects Windows-y Docker images, so just
# don't bother.
if sys.platform.startswith('win'):
pytest.skip('Skipping I2P tests on Windows', allow_module_level=True)
@pytest.fixture
def i2p_network(reactor, temp_dir, request):
"""Fixture to start up local i2pd."""
proto = util._MagicTextProtocol("ephemeral keys")
reactor.spawnProcess(
proto,
which("docker"),
(
"docker", "run", "-p", "7656:7656", "purplei2p/i2pd",
# Bad URL for reseeds, so it can't talk to other routers.
"--reseed.urls", "http://localhost:1/",
),
)
def cleanup():
try:
proto.transport.signalProcess("KILL")
util.block_with_timeout(proto.exited, reactor)
except ProcessExitedAlready:
pass
request.addfinalizer(cleanup)
util.block_with_timeout(proto.magic_seen, reactor, timeout=30)
@pytest.fixture
@log_call(
action_type=u"integration:i2p:introducer",
include_args=["temp_dir", "flog_gatherer"],
include_result=False,
)
def i2p_introducer(reactor, temp_dir, flog_gatherer, request):
config = '''
[node]
nickname = introducer_i2p
web.port = 4561
log_gatherer.furl = {log_furl}
'''.format(log_furl=flog_gatherer)
intro_dir = join(temp_dir, 'introducer_i2p')
print("making introducer", intro_dir)
if not exists(intro_dir):
mkdir(intro_dir)
done_proto = util._ProcessExitedProtocol()
util._tahoe_runner_optional_coverage(
done_proto,
reactor,
request,
(
'create-introducer',
'--listen=i2p',
intro_dir,
),
)
pytest_twisted.blockon(done_proto.done)
# over-write the config file with our stuff
with open(join(intro_dir, 'tahoe.cfg'), 'w') as f:
f.write(config)
# "tahoe run" is consistent across Linux/macOS/Windows, unlike the old
# "start" command.
protocol = util._MagicTextProtocol('introducer running')
transport = util._tahoe_runner_optional_coverage(
protocol,
reactor,
request,
(
'run',
intro_dir,
),
)
def cleanup():
try:
transport.signalProcess('TERM')
util.block_with_timeout(protocol.exited, reactor)
except ProcessExitedAlready:
pass
request.addfinalizer(cleanup)
pytest_twisted.blockon(protocol.magic_seen)
return transport
@pytest.fixture
def i2p_introducer_furl(i2p_introducer, temp_dir):
furl_fname = join(temp_dir, 'introducer_i2p', 'private', 'introducer.furl')
while not exists(furl_fname):
print("Don't see {} yet".format(furl_fname))
sleep(.1)
furl = open(furl_fname, 'r').read()
return furl
@pytest_twisted.inlineCallbacks
def test_i2p_service_storage(reactor, request, temp_dir, flog_gatherer, i2p_network, i2p_introducer_furl):
yield _create_anonymous_node(reactor, 'carol_i2p', 8008, request, temp_dir, flog_gatherer, i2p_network, i2p_introducer_furl)
yield _create_anonymous_node(reactor, 'dave_i2p', 8009, request, temp_dir, flog_gatherer, i2p_network, i2p_introducer_furl)
# ensure both nodes are connected to "a grid" by uploading
# something via carol, and retrieve it using dave.
gold_path = join(temp_dir, "gold")
with open(gold_path, "w") as f:
f.write(
"The object-capability model is a computer security model. A "
"capability describes a transferable right to perform one (or "
"more) operations on a given object."
)
# XXX could use treq or similar to POST these to their respective
# WUIs instead ...
proto = util._CollectOutputProtocol()
reactor.spawnProcess(
proto,
sys.executable,
(
sys.executable, '-b', '-m', 'allmydata.scripts.runner',
'-d', join(temp_dir, 'carol_i2p'),
'put', gold_path,
)
)
yield proto.done
cap = proto.output.getvalue().strip().split()[-1]
print("TEH CAP!", cap)
proto = util._CollectOutputProtocol(capture_stderr=False)
reactor.spawnProcess(
proto,
sys.executable,
(
sys.executable, '-b', '-m', 'allmydata.scripts.runner',
'-d', join(temp_dir, 'dave_i2p'),
'get', cap,
)
)
yield proto.done
dave_got = proto.output.getvalue().strip()
assert dave_got == open(gold_path, 'rb').read().strip()
@pytest_twisted.inlineCallbacks
def _create_anonymous_node(reactor, name, control_port, request, temp_dir, flog_gatherer, i2p_network, introducer_furl):
node_dir = FilePath(temp_dir).child(name)
web_port = "tcp:{}:interface=localhost".format(control_port + 2000)
print("creating", node_dir.path)
node_dir.makedirs()
proto = util._DumpOutputProtocol(None)
reactor.spawnProcess(
proto,
sys.executable,
(
sys.executable, '-b', '-m', 'allmydata.scripts.runner',
'create-node',
'--nickname', name,
'--introducer', introducer_furl,
'--hide-ip',
'--listen', 'i2p',
node_dir.path,
)
)
yield proto.done
# Which services should this client connect to?
write_introducer(node_dir, "default", introducer_furl)
with node_dir.child('tahoe.cfg').open('w') as f:
node_config = '''
[node]
nickname = %(name)s
web.port = %(web_port)s
web.static = public_html
log_gatherer.furl = %(log_furl)s
[i2p]
enabled = true
[client]
shares.needed = 1
shares.happy = 1
shares.total = 2
''' % {
'name': name,
'web_port': web_port,
'log_furl': flog_gatherer,
}
node_config = node_config.encode("utf-8")
f.write(node_config)
print("running")
yield util._run_node(reactor, node_dir.path, request, None)
print("okay, launched")

View File

@ -16,7 +16,7 @@ if git diff-index --quiet HEAD; then
fi
git config user.name 'Build Automation'
git config user.email 'tahoe-dev@tahoe-lafs.org'
git config user.email 'tahoe-dev@lists.tahoe-lafs.org'
git add tahoe-deps.json tahoe-ported.json
git commit -m "\

0
newsfragments/3525.minor Normal file
View File

0
newsfragments/3528.minor Normal file
View File

0
newsfragments/3563.minor Normal file
View File

0
newsfragments/3743.minor Normal file
View File

View File

@ -0,0 +1 @@
Documentation and installation links in the README have been fixed.

0
newsfragments/3751.minor Normal file
View File

1
newsfragments/3757.other Normal file
View File

@ -0,0 +1 @@
Refactored test_introducer in web tests to use custom base test cases

0
newsfragments/3759.minor Normal file
View File

0
newsfragments/3760.minor Normal file
View File

0
newsfragments/3763.minor Normal file
View File

View File

@ -0,0 +1 @@
The Great Black Swamp proposed specification now includes sample interactions to demonstrate expected usage patterns.

View File

@ -0,0 +1 @@
The Great Black Swamp proposed specification now includes a glossary.

View File

@ -0,0 +1 @@
The Great Black Swamp specification now allows parallel upload of immutable share data.

0
newsfragments/3773.minor Normal file
View File

View File

@ -0,0 +1 @@
There is now a specification for the scheme which Tahoe-LAFS storage clients use to derive their lease renewal secrets.

View File

@ -0,0 +1 @@
The Great Black Swamp proposed specification now has a simplified interface for reading data from immutable shares.

View File

@ -0,0 +1 @@
Fixed bug where share corruption events were not logged on storage servers running on Windows.

0
newsfragments/3781.minor Normal file
View File

View File

@ -0,0 +1 @@
tahoe-dev mailing list is now at tahoe-dev@lists.tahoe-lafs.org.

0
newsfragments/3784.minor Normal file
View File

View File

@ -0,0 +1 @@
The Great Black Swamp specification now describes the required authorization scheme.

View File

@ -0,0 +1 @@
tahoe-lafs now provides its statistics also in OpenMetrics format (for Prometheus et. al.) at `/statistics?t=openmetrics`.

0
newsfragments/3792.minor Normal file
View File

0
newsfragments/3793.minor Normal file
View File

0
newsfragments/3795.minor Normal file
View File

0
newsfragments/3797.minor Normal file
View File

0
newsfragments/3798.minor Normal file
View File

0
newsfragments/3799.minor Normal file
View File

View File

@ -0,0 +1 @@
When uploading an immutable, overlapping writes that include conflicting data are rejected. In practice, this likely didn't happen in real-world usage.

0
newsfragments/3805.minor Normal file
View File

0
newsfragments/3806.minor Normal file
View File

View File

@ -0,0 +1 @@
Tahoe-LAFS now supports running on NixOS 21.05 with Python 3.

0
newsfragments/3810.minor Normal file
View File

0
newsfragments/3812.minor Normal file
View File

View File

@ -0,0 +1 @@
The news file for future releases will include a section for changes with a security impact.

View File

@ -0,0 +1,19 @@
{ lib, buildPythonPackage, fetchPypi }:
buildPythonPackage rec {
pname = "collections-extended";
version = "1.0.3";
src = fetchPypi {
inherit pname version;
sha256 = "0lb69x23asd68n0dgw6lzxfclavrp2764xsnh45jm97njdplznkw";
};
# Tests aren't in tarball, for 1.0.3 at least.
doCheck = false;
meta = with lib; {
homepage = https://github.com/mlenzen/collections-extended;
description = "Extra Python Collections - bags (multisets), setlists (unique list / indexed set), RangeMap and IndexedDict";
license = licenses.asl20;
};
}

View File

@ -2,22 +2,32 @@ self: super: {
python27 = super.python27.override {
packageOverrides = python-self: python-super: {
# eliot is not part of nixpkgs at all at this time.
eliot = python-self.callPackage ./eliot.nix { };
eliot = python-self.pythonPackages.callPackage ./eliot.nix { };
# NixOS autobahn package has trollius as a dependency, although
# it is optional. Trollius is unmaintained and fails on CI.
autobahn = python-super.callPackage ./autobahn.nix { };
autobahn = python-super.pythonPackages.callPackage ./autobahn.nix { };
# Porting to Python 3 is greatly aided by the future package. A
# slightly newer version than appears in nixos 19.09 is helpful.
future = python-super.callPackage ./future.nix { };
future = python-super.pythonPackages.callPackage ./future.nix { };
# Need version of pyutil that supports Python 3. The version in 19.09
# is too old.
pyutil = python-super.callPackage ./pyutil.nix { };
pyutil = python-super.pythonPackages.callPackage ./pyutil.nix { };
# Need a newer version of Twisted, too.
twisted = python-super.callPackage ./twisted.nix { };
twisted = python-super.pythonPackages.callPackage ./twisted.nix { };
# collections-extended is not part of nixpkgs at this time.
collections-extended = python-super.pythonPackages.callPackage ./collections-extended.nix { };
};
};
python39 = super.python39.override {
packageOverrides = python-self: python-super: {
# collections-extended is not part of nixpkgs at this time.
collections-extended = python-super.pythonPackages.callPackage ./collections-extended.nix { };
};
};
}

7
nix/py3.nix Normal file
View File

@ -0,0 +1,7 @@
# This is the main entrypoint for the Tahoe-LAFS derivation.
{ pkgs ? import <nixpkgs> { } }:
# Add our Python packages to nixpkgs to simplify the expression for the
# Tahoe-LAFS derivation.
let pkgs' = pkgs.extend (import ./overlays.nix);
# Evaluate the expression for our Tahoe-LAFS derivation.
in pkgs'.python39.pkgs.callPackage ./tahoe-lafs.nix { }

View File

@ -72,10 +72,6 @@ python.pkgs.buildPythonPackage rec {
rm src/allmydata/test/test_connections.py
rm src/allmydata/test/cli/test_create.py
# Since we're deleting files, this complains they're missing. For now Nix
# is Python 2-only, anyway, so these tests don't add anything yet.
rm src/allmydata/test/test_python3.py
# Generate _version.py ourselves since we can't rely on the Python code
# extracting the information from the .git directory we excluded.
cat > src/allmydata/_version.py <<EOF
@ -101,7 +97,7 @@ EOF
setuptoolsTrial pyasn1 zope_interface
service-identity pyyaml magic-wormhole treq
eliot autobahn cryptography netifaces setuptools
future pyutil distro configparser
future pyutil distro configparser collections-extended
];
checkInputs = with python.pkgs; [
@ -111,6 +107,7 @@ EOF
beautifulsoup4
html5lib
tenacity
prometheus_client
];
checkPhase = ''

View File

@ -155,7 +155,7 @@ Planet Earth
[4] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.15.1/COPYING.GPL
[5] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.15.1/COPYING.TGPPL.rst
[6] https://tahoe-lafs.readthedocs.org/en/tahoe-lafs-1.15.1/INSTALL.html
[7] https://tahoe-lafs.org/cgi-bin/mailman/listinfo/tahoe-dev
[7] https://lists.tahoe-lafs.org/mailman/listinfo/tahoe-dev
[8] https://tahoe-lafs.org/trac/tahoe-lafs/roadmap
[9] https://github.com/tahoe-lafs/tahoe-lafs/blob/master/CREDITS
[10] https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Dev

View File

@ -137,6 +137,9 @@ install_requires = [
# Backported configparser for Python 2:
"configparser ; python_version < '3.0'",
# For the RangeMap datastructure.
"collections-extended",
]
setup_requires = [
@ -359,7 +362,7 @@ setup(name="tahoe-lafs", # also set in __init__.py
description='secure, decentralized, fault-tolerant file store',
long_description=open('README.rst', 'r', encoding='utf-8').read(),
author='the Tahoe-LAFS project',
author_email='tahoe-dev@tahoe-lafs.org',
author_email='tahoe-dev@lists.tahoe-lafs.org',
url='https://tahoe-lafs.org/',
license='GNU GPL', # see README.rst -- there is an alternative licence
cmdclass={"update_version": UpdateVersion,
@ -404,6 +407,8 @@ setup(name="tahoe-lafs", # also set in __init__.py
"tenacity",
"paramiko",
"pytest-timeout",
# Does our OpenMetrics endpoint adhere to the spec:
"prometheus-client == 0.11.0",
] + tor_requires + i2p_requires,
"tor": tor_requires,
"i2p": i2p_requires,

View File

@ -475,7 +475,9 @@ class Share(object):
# there was corruption somewhere in the given range
reason = "corruption in share[%d-%d): %s" % (start, start+offset,
str(f.value))
self._rref.callRemoteOnly("advise_corrupt_share", reason.encode("utf-8"))
return self._rref.callRemote(
"advise_corrupt_share", reason.encode("utf-8")
).addErrback(log.err, "Error from remote call to advise_corrupt_share")
def _satisfy_block_hash_tree(self, needed_hashes):
o_bh = self.actual_offsets["block_hashes"]

View File

@ -15,7 +15,7 @@ from zope.interface import implementer
from twisted.internet import defer
from allmydata.interfaces import IStorageBucketWriter, IStorageBucketReader, \
FileTooLargeError, HASH_SIZE
from allmydata.util import mathutil, observer, pipeline
from allmydata.util import mathutil, observer, pipeline, log
from allmydata.util.assertutil import precondition
from allmydata.storage.server import si_b2a
@ -254,8 +254,7 @@ class WriteBucketProxy(object):
return d
def abort(self):
return self._rref.callRemoteOnly("abort")
return self._rref.callRemote("abort").addErrback(log.err, "Error from remote call to abort an immutable write bucket")
def get_servername(self):
return self._server.get_name()

View File

@ -53,6 +53,14 @@ LeaseRenewSecret = Hash # used to protect lease renewal requests
LeaseCancelSecret = Hash # was used to protect lease cancellation requests
class DataTooLargeError(Exception):
"""The write went past the expected size of the bucket."""
class ConflictingWriteError(Exception):
"""Two writes happened to same immutable with different data."""
class RIBucketWriter(RemoteInterface):
""" Objects of this kind live on the server side. """
def write(offset=Offset, data=ShareData):
@ -91,9 +99,9 @@ class RIBucketReader(RemoteInterface):
TestVector = ListOf(TupleOf(Offset, ReadSize, bytes, bytes))
# elements are (offset, length, operator, specimen)
# operator is one of "lt, le, eq, ne, ge, gt"
# nop always passes and is used to fetch data while writing.
# you should use length==len(specimen) for everything except nop
# operator must be b"eq", typically length==len(specimen), but one can ensure
# writes don't happen to empty shares by setting length to 1 and specimen to
# b"". The operator is still used for wire compatibility with old versions.
DataVector = ListOf(TupleOf(Offset, ShareData))
# (offset, data). This limits us to 30 writes of 1MiB each per call
TestAndWriteVectorsForShares = DictOf(int,
@ -154,25 +162,9 @@ class RIStorageServer(RemoteInterface):
"""
return Any() # returns None now, but future versions might change
def renew_lease(storage_index=StorageIndex, renew_secret=LeaseRenewSecret):
"""
Renew the lease on a given bucket, resetting the timer to 31 days.
Some networks will use this, some will not. If there is no bucket for
the given storage_index, IndexError will be raised.
For mutable shares, if the given renew_secret does not match an
existing lease, IndexError will be raised with a note listing the
server-nodeids on the existing leases, so leases on migrated shares
can be renewed. For immutable shares, IndexError (without the note)
will be raised.
"""
return Any()
def get_buckets(storage_index=StorageIndex):
return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
def slot_readv(storage_index=StorageIndex,
shares=ListOf(int), readv=ReadVector):
"""Read a vector from the numbered shares associated with the given
@ -343,14 +335,6 @@ class IStorageServer(Interface):
:see: ``RIStorageServer.add_lease``
"""
def renew_lease(
storage_index,
renew_secret,
):
"""
:see: ``RIStorageServer.renew_lease``
"""
def get_buckets(
storage_index,
):
@ -375,6 +359,12 @@ class IStorageServer(Interface):
):
"""
:see: ``RIStorageServer.slot_testv_readv_and_writev``
While the interface mostly matches, test vectors are simplified.
Instead of a tuple ``(offset, read_size, operator, expected_data)`` in
the original, for this method you need only pass in
``(offset, read_size, expected_data)``, with the operator implicitly
being ``b"eq"``.
"""
def advise_corrupt_share(

View File

@ -309,7 +309,7 @@ class SDMFSlotWriteProxy(object):
salt)
else:
checkstring = checkstring_or_seqnum
self._testvs = [(0, len(checkstring), b"eq", checkstring)]
self._testvs = [(0, len(checkstring), checkstring)]
def get_checkstring(self):
@ -318,7 +318,7 @@ class SDMFSlotWriteProxy(object):
server.
"""
if self._testvs:
return self._testvs[0][3]
return self._testvs[0][2]
return b""
@ -548,9 +548,9 @@ class SDMFSlotWriteProxy(object):
if not self._testvs:
# Our caller has not provided us with another checkstring
# yet, so we assume that we are writing a new share, and set
# a test vector that will allow a new share to be written.
# a test vector that will only allow a new share to be written.
self._testvs = []
self._testvs.append(tuple([0, 1, b"eq", b""]))
self._testvs.append(tuple([0, 1, b""]))
tw_vectors = {}
tw_vectors[self.shnum] = (self._testvs, datavs, None)
@ -889,7 +889,7 @@ class MDMFSlotWriteProxy(object):
self._testvs = []
else:
self._testvs = []
self._testvs.append((0, len(checkstring), b"eq", checkstring))
self._testvs.append((0, len(checkstring), checkstring))
def __repr__(self):
@ -1161,8 +1161,10 @@ class MDMFSlotWriteProxy(object):
"""I write the data vectors in datavs to the remote slot."""
tw_vectors = {}
if not self._testvs:
# Make sure we will only successfully write if the share didn't
# previously exist.
self._testvs = []
self._testvs.append(tuple([0, 1, b"eq", b""]))
self._testvs.append(tuple([0, 1, b""]))
if not self._written:
# Write a new checkstring to the share when we write it, so
# that we have something to check later.
@ -1170,7 +1172,7 @@ class MDMFSlotWriteProxy(object):
datavs.append((0, new_checkstring))
def _first_write():
self._written = True
self._testvs = [(0, len(new_checkstring), b"eq", new_checkstring)]
self._testvs = [(0, len(new_checkstring), new_checkstring)]
on_success = _first_write
tw_vectors[self.shnum] = (self._testvs, datavs, None)
d = self._storage_server.slot_testv_and_readv_and_writev(

View File

@ -607,13 +607,14 @@ class ServermapUpdater(object):
return d
def _do_read(self, server, storage_index, shnums, readv):
"""
If self._add_lease is true, a lease is added, and the result only fires
once the least has also been added.
"""
ss = server.get_storage_server()
if self._add_lease:
# send an add-lease message in parallel. The results are handled
# separately. This is sent before the slot_readv() so that we can
# be sure the add_lease is retired by the time slot_readv comes
# back (this relies upon our knowledge that the server code for
# add_lease is synchronous).
# separately.
renew_secret = self._node.get_renewal_secret(server)
cancel_secret = self._node.get_cancel_secret(server)
d2 = ss.add_lease(
@ -623,7 +624,16 @@ class ServermapUpdater(object):
)
# we ignore success
d2.addErrback(self._add_lease_failed, server, storage_index)
else:
d2 = defer.succeed(None)
d = ss.slot_readv(storage_index, shnums, readv)
def passthrough(result):
# Wait for d2, but fire with result of slot_readv() regardless of
# result of d2.
return d2.addBoth(lambda _: result)
d.addCallback(passthrough)
return d

View File

@ -793,7 +793,7 @@ def _tub_portlocation(config, get_local_addresses_sync, allocate_tcp_port):
tubport = _convert_tub_port(cfg_tubport)
for port in tubport.split(","):
if port in ("0", "tcp:0"):
if port in ("0", "tcp:0", "tcp:port=0", "tcp:0:interface=127.0.0.1"):
raise PortAssignmentRequired()
if cfg_location is None:

View File

@ -9,6 +9,7 @@ if PY2:
import os, sys
from six.moves import StringIO
from past.builtins import unicode
import six
try:
@ -115,23 +116,75 @@ for module in (create_node,):
def parse_options(argv, config=None):
if not config:
config = Options()
config.parseOptions(argv) # may raise usage.error
try:
config.parseOptions(argv)
except usage.error as e:
if six.PY2:
# On Python 2 the exception may hold non-ascii in a byte string.
# This makes it impossible to convert the exception to any kind of
# string using str() or unicode(). It could also hold non-ascii
# in a unicode string which still makes it difficult to convert it
# to a byte string later.
#
# So, reach inside and turn it into some entirely safe ascii byte
# strings that will survive being written to stdout without
# causing too much damage in the process.
#
# As a result, non-ascii will not be rendered correctly but
# instead as escape sequences. At least this can go away when
# we're done with Python 2 support.
raise usage.error(*(
arg.encode("ascii", errors="backslashreplace")
if isinstance(arg, unicode)
else arg.decode("utf-8").encode("ascii", errors="backslashreplace")
for arg
in e.args
))
raise
return config
def parse_or_exit(config, argv, stdout, stderr):
"""
Parse Tahoe-LAFS CLI arguments and return a configuration object if they
are valid.
def parse_or_exit_with_explanation(argv, stdout=sys.stdout):
config = Options()
If they are invalid, write an explanation to ``stdout`` and exit.
:param allmydata.scripts.runner.Options config: An instance of the
argument-parsing class to use.
:param [unicode] argv: The argument list to parse, including the name of the
program being run as ``argv[0]``.
:param stdout: The file-like object to use as stdout.
:param stderr: The file-like object to use as stderr.
:raise SystemExit: If there is an argument-parsing problem.
:return: ``config``, after using it to parse the argument list.
"""
try:
parse_options(argv, config=config)
parse_options(argv[1:], config=config)
except usage.error as e:
# `parse_options` may have the side-effect of initializing a
# "sub-option" of the given configuration, even if it ultimately
# raises an exception. For example, `tahoe run --invalid-option` will
# set `config.subOptions` to an instance of
# `allmydata.scripts.tahoe_run.RunOptions` and then raise a
# `usage.error` because `RunOptions` does not recognize
# `--invalid-option`. If `run` itself had a sub-options then the same
# thing could happen but with another layer of nesting. We can
# present the user with the most precise information about their usage
# error possible by finding the most "sub" of the sub-options and then
# showing that to the user along with the usage error.
c = config
while hasattr(c, 'subOptions'):
c = c.subOptions
print(str(c), file=stdout)
# On Python 2 the string may turn into a unicode string, e.g. the error
# may be unicode, in which case it will print funny. Once we're on
# Python 3 we can just drop the ensure_str().
print(six.ensure_str("%s: %s\n" % (sys.argv[0], e)), file=stdout)
exc_str = str(e)
exc_bytes = six.ensure_binary(exc_str, "utf-8")
msg_bytes = b"%s: %s\n" % (six.ensure_binary(argv[0]), exc_bytes)
print(six.ensure_text(msg_bytes, "utf-8"), file=stdout)
sys.exit(1)
return config
@ -183,31 +236,65 @@ def _maybe_enable_eliot_logging(options, reactor):
# Pass on the options so we can dispatch the subcommand.
return options
PYTHON_3_WARNING = ("Support for Python 3 is an incomplete work-in-progress."
" Use at your own risk.")
def run():
if six.PY3:
print(PYTHON_3_WARNING, file=sys.stderr)
def run(configFactory=Options, argv=sys.argv, stdout=sys.stdout, stderr=sys.stderr):
"""
Run a Tahoe-LAFS node.
:param configFactory: A zero-argument callable which creates the config
object to use to parse the argument list.
:param [str] argv: The argument list to use to configure the run.
:param stdout: The file-like object to use for stdout.
:param stderr: The file-like object to use for stderr.
:raise SystemExit: Always raised after the run is complete.
"""
if sys.platform == "win32":
from allmydata.windows.fixups import initialize
initialize()
# doesn't return: calls sys.exit(rc)
task.react(_run_with_reactor)
task.react(
lambda reactor: _run_with_reactor(
reactor,
configFactory(),
argv,
stdout,
stderr,
),
)
def _setup_coverage(reactor):
def _setup_coverage(reactor, argv):
"""
Arrange for coverage to be collected if the 'coverage' package is
installed
If coverage measurement was requested, start collecting coverage
measurements and arrange to record those measurements when the process is
done.
Coverage measurement is considered requested if ``"--coverage"`` is in
``argv`` (and it will be removed from ``argv`` if it is found). There
should be a ``.coveragerc`` file in the working directory if coverage
measurement is requested.
This is only necessary to support multi-process coverage measurement,
typically when the test suite is running, and with the pytest-based
*integration* test suite (at ``integration/`` in the root of the source
tree) foremost in mind. The idea is that if you are running Tahoe-LAFS in
a configuration where multiple processes are involved - for example, a
test process and a client node process, if you only measure coverage from
the test process then you will fail to observe most Tahoe-LAFS code that
is being run.
This function arranges to have any Tahoe-LAFS process (such as that
client node process) collect and report coverage measurements as well.
"""
# can we put this _setup_coverage call after we hit
# argument-parsing?
# ensure_str() only necessary on Python 2.
if six.ensure_str('--coverage') not in sys.argv:
return
sys.argv.remove('--coverage')
argv.remove('--coverage')
try:
import coverage
@ -238,14 +325,37 @@ def _setup_coverage(reactor):
reactor.addSystemEventTrigger('after', 'shutdown', write_coverage_data)
def _run_with_reactor(reactor):
def _run_with_reactor(reactor, config, argv, stdout, stderr):
"""
Run a Tahoe-LAFS node using the given reactor.
_setup_coverage(reactor)
:param reactor: The reactor to use. This implementation largely ignores
this and lets the rest of the implementation pick its own reactor.
Oops.
argv = list(map(argv_to_unicode, sys.argv[1:]))
d = defer.maybeDeferred(parse_or_exit_with_explanation, argv)
:param twisted.python.usage.Options config: The config object to use to
parse the argument list.
:param [str] argv: The argument list to parse, *excluding* the name of the
program being run.
:param stdout: See ``run``.
:param stderr: See ``run``.
:return: A ``Deferred`` that fires when the run is complete.
"""
_setup_coverage(reactor, argv)
argv = list(map(argv_to_unicode, argv))
d = defer.maybeDeferred(
parse_or_exit,
config,
argv,
stdout,
stderr,
)
d.addCallback(_maybe_enable_eliot_logging, reactor)
d.addCallback(dispatch)
d.addCallback(dispatch, stdout=stdout, stderr=stderr)
def _show_exception(f):
# when task.react() notices a non-SystemExit exception, it does
# log.err() with the failure and then exits with rc=1. We want this
@ -253,7 +363,7 @@ def _run_with_reactor(reactor):
# weren't using react().
if f.check(SystemExit):
return f # dispatch function handled it
f.printTraceback(file=sys.stderr)
f.printTraceback(file=stderr)
sys.exit(1)
d.addErrback(_show_exception)
return d

View File

@ -192,7 +192,7 @@ class DaemonizeTahoeNodePlugin(object):
return DaemonizeTheRealService(self.nodetype, self.basedir, so)
def run(config):
def run(config, runApp=twistd.runApp):
"""
Runs a Tahoe-LAFS node in the foreground.
@ -212,10 +212,11 @@ def run(config):
if not nodetype:
print("%s is not a recognizable node directory" % quoted_basedir, file=err)
return 1
# Now prepare to turn into a twistd process. This os.chdir is the point
# of no return.
os.chdir(basedir)
twistd_args = ["--nodaemon"]
twistd_args = ["--nodaemon", "--rundir", basedir]
if sys.platform != "win32":
pidfile = get_pidfile(basedir)
twistd_args.extend(["--pidfile", pidfile])
twistd_args.extend(config.twistd_args)
twistd_args.append("DaemonizeTahoeNode") # point at our DaemonizeTahoeNodePlugin
@ -232,12 +233,11 @@ def run(config):
twistd_config.loadedPlugins = {"DaemonizeTahoeNode": DaemonizeTahoeNodePlugin(nodetype, basedir)}
# handle invalid PID file (twistd might not start otherwise)
pidfile = get_pidfile(basedir)
if get_pid_from_pidfile(pidfile) == -1:
if sys.platform != "win32" and get_pid_from_pidfile(pidfile) == -1:
print("found invalid PID file in %s - deleting it" % basedir, file=err)
os.remove(pidfile)
# We always pass --nodaemon so twistd.runApp does not daemonize.
print("running node in %s" % (quoted_basedir,), file=out)
twistd.runApp(twistd_config)
runApp(twistd_config)
return 0

View File

@ -11,21 +11,48 @@ if PY2:
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
import os
from urllib.parse import urlencode, quote as url_quote
from sys import stdout as _sys_stdout
from urllib.parse import urlencode
import json
from .common import BaseOptions
from allmydata.scripts.common import get_default_nodedir
from allmydata.scripts.common_http import do_http, BadResponse
from allmydata.scripts.common_http import BadResponse
from allmydata.util.abbreviate import abbreviate_space, abbreviate_time
from allmydata.util.encodingutil import argv_to_abspath
def _get_json_for_fragment(options, fragment, method='GET', post_args=None):
_print = print
def print(*args, **kwargs):
"""
returns the JSON for a particular URI-fragment (to which is
pre-pended the node's URL)
Builtin ``print``-alike that will even write unicode which cannot be
encoded using the specified output file's encoding.
This differs from the builtin print in that it will use the "replace"
encoding error handler and then write the result whereas builtin print
uses the "strict" encoding error handler.
"""
from past.builtins import unicode
out = kwargs.pop("file", None)
if out is None:
out = _sys_stdout
encoding = out.encoding or "ascii"
def ensafe(o):
if isinstance(o, unicode):
return o.encode(encoding, errors="replace").decode(encoding)
return o
return _print(
*(ensafe(a) for a in args),
file=out,
**kwargs
)
def _get_request_parameters_for_fragment(options, fragment, method, post_args):
"""
Get parameters for ``do_http`` for requesting the given fragment.
:return dict: A dictionary suitable for use as keyword arguments to
``do_http``.
"""
nodeurl = options['node-url']
if nodeurl.endswith('/'):
@ -40,7 +67,17 @@ def _get_json_for_fragment(options, fragment, method='GET', post_args=None):
body = ''
if post_args is not None:
raise ValueError("post_args= only valid for POST method")
resp = do_http(method, url, body=body.encode("utf-8"))
return dict(
method=method,
url=url,
body=body.encode("utf-8"),
)
def _handle_response_for_fragment(resp, nodeurl):
"""
Inspect an HTTP response and return the parsed payload, if possible.
"""
if isinstance(resp, BadResponse):
# specifically NOT using format_http_error() here because the
# URL is pretty sensitive (we're doing /uri/<key>).
@ -55,12 +92,6 @@ def _get_json_for_fragment(options, fragment, method='GET', post_args=None):
return parsed
def _get_json_for_cap(options, cap):
return _get_json_for_fragment(
options,
'uri/%s?t=json' % url_quote(cap),
)
def pretty_progress(percent, size=10, output_ascii=False):
"""
Displays a unicode or ascii based progress bar of a certain
@ -251,7 +282,10 @@ def render_recent(verbose, stdout, status_data):
print(u" Skipped {} non-upload/download operations; use --verbose to see".format(skipped), file=stdout)
def do_status(options):
def do_status(options, do_http=None):
if do_http is None:
from allmydata.scripts.common_http import do_http
nodedir = options["node-directory"]
with open(os.path.join(nodedir, u'private', u'api_auth_token'), 'r') as f:
token = f.read().strip()
@ -260,25 +294,30 @@ def do_status(options):
# do *all* our data-retrievals first in case there's an error
try:
status_data = _get_json_for_fragment(
options,
'status?t=json',
method='POST',
post_args=dict(
t='json',
token=token,
)
status_data = _handle_response_for_fragment(
do_http(**_get_request_parameters_for_fragment(
options,
'status?t=json',
method='POST',
post_args=dict(
t='json',
token=token,
),
)),
options['node-url'],
)
statistics_data = _get_json_for_fragment(
options,
'statistics?t=json',
method='POST',
post_args=dict(
t='json',
token=token,
)
statistics_data = _handle_response_for_fragment(
do_http(**_get_request_parameters_for_fragment(
options,
'statistics?t=json',
method='POST',
post_args=dict(
t='json',
token=token,
),
)),
options['node-url'],
)
except Exception as e:
print(u"failed to retrieve data: %s" % str(e), file=options.stderr)
return 2

View File

@ -13,8 +13,9 @@ if PY2:
import os.path
from allmydata.util import base32
class DataTooLargeError(Exception):
pass
# Backwards compatibility.
from allmydata.interfaces import DataTooLargeError # noqa: F401
class UnknownMutableContainerVersionError(Exception):
pass
class UnknownImmutableContainerVersionError(Exception):

View File

@ -13,16 +13,20 @@ if PY2:
import os, stat, struct, time
from collections_extended import RangeMap
from foolscap.api import Referenceable
from zope.interface import implementer
from allmydata.interfaces import RIBucketWriter, RIBucketReader
from allmydata.interfaces import (
RIBucketWriter, RIBucketReader, ConflictingWriteError,
DataTooLargeError,
)
from allmydata.util import base32, fileutil, log
from allmydata.util.assertutil import precondition
from allmydata.util.hashutil import timing_safe_compare
from allmydata.storage.lease import LeaseInfo
from allmydata.storage.common import UnknownImmutableContainerVersionError, \
DataTooLargeError
from allmydata.storage.common import UnknownImmutableContainerVersionError
# each share file (in storage/shares/$SI/$SHNUM) contains lease information
# and share data. The share data is accessed by RIBucketWriter.write and
@ -204,19 +208,18 @@ class ShareFile(object):
@implementer(RIBucketWriter)
class BucketWriter(Referenceable): # type: ignore # warner/foolscap#78
def __init__(self, ss, incominghome, finalhome, max_size, lease_info, canary):
def __init__(self, ss, incominghome, finalhome, max_size, lease_info):
self.ss = ss
self.incominghome = incominghome
self.finalhome = finalhome
self._max_size = max_size # don't allow the client to write more than this
self._canary = canary
self._disconnect_marker = canary.notifyOnDisconnect(self._disconnected)
self.closed = False
self.throw_out_all_data = False
self._sharefile = ShareFile(incominghome, create=True, max_size=max_size)
# also, add our lease to the file now, so that other ones can be
# added by simultaneous uploaders
self._sharefile.add_lease(lease_info)
self._already_written = RangeMap()
def allocated_size(self):
return self._max_size
@ -226,7 +229,20 @@ class BucketWriter(Referenceable): # type: ignore # warner/foolscap#78
precondition(not self.closed)
if self.throw_out_all_data:
return
# Make sure we're not conflicting with existing data:
end = offset + len(data)
for (chunk_start, chunk_stop, _) in self._already_written.ranges(offset, end):
chunk_len = chunk_stop - chunk_start
actual_chunk = self._sharefile.read_share_data(chunk_start, chunk_len)
writing_chunk = data[chunk_start - offset:chunk_stop - offset]
if actual_chunk != writing_chunk:
raise ConflictingWriteError(
"Chunk {}-{} doesn't match already written data.".format(chunk_start, chunk_stop)
)
self._sharefile.write_share_data(offset, data)
self._already_written.set(True, offset, end)
self.ss.add_latency("write", time.time() - start)
self.ss.count("write")
@ -262,22 +278,19 @@ class BucketWriter(Referenceable): # type: ignore # warner/foolscap#78
pass
self._sharefile = None
self.closed = True
self._canary.dontNotifyOnDisconnect(self._disconnect_marker)
filelen = os.stat(self.finalhome)[stat.ST_SIZE]
self.ss.bucket_writer_closed(self, filelen)
self.ss.add_latency("close", time.time() - start)
self.ss.count("close")
def _disconnected(self):
def disconnected(self):
if not self.closed:
self._abort()
def remote_abort(self):
log.msg("storage: aborting sharefile %s" % self.incominghome,
facility="tahoe.storage", level=log.UNUSUAL)
if not self.closed:
self._canary.dontNotifyOnDisconnect(self._disconnect_marker)
self._abort()
self.ss.count("abort")

View File

@ -434,20 +434,9 @@ class MutableShareFile(object):
# self._change_container_size() here.
def testv_compare(a, op, b):
assert op in (b"lt", b"le", b"eq", b"ne", b"ge", b"gt")
if op == b"lt":
return a < b
if op == b"le":
return a <= b
if op == b"eq":
return a == b
if op == b"ne":
return a != b
if op == b"ge":
return a >= b
if op == b"gt":
return a > b
# never reached
assert op == b"eq"
return a == b
class EmptyShare(object):

View File

@ -11,13 +11,14 @@ if PY2:
# Omit open() to get native behavior where open("w") always accepts native
# strings. Omit bytes so we don't leak future's custom bytes.
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, pow, round, super, dict, list, object, range, str, max, min # noqa: F401
else:
from typing import Dict
import os, re, struct, time
import weakref
import six
from foolscap.api import Referenceable
from foolscap.ipb import IRemoteReference
from twisted.application import service
from zope.interface import implementer
@ -49,6 +50,10 @@ from allmydata.storage.expirer import LeaseCheckingCrawler
NUM_RE=re.compile("^[0-9]+$")
# Number of seconds to add to expiration time on lease renewal.
# For now it's not actually configurable, but maybe someday.
DEFAULT_RENEWAL_TIME = 31 * 24 * 60 * 60
@implementer(RIStorageServer, IStatsProducer)
class StorageServer(service.MultiService, Referenceable):
@ -62,7 +67,8 @@ class StorageServer(service.MultiService, Referenceable):
expiration_mode="age",
expiration_override_lease_duration=None,
expiration_cutoff_date=None,
expiration_sharetypes=("mutable", "immutable")):
expiration_sharetypes=("mutable", "immutable"),
get_current_time=time.time):
service.MultiService.__init__(self)
assert isinstance(nodeid, bytes)
assert len(nodeid) == 20
@ -84,7 +90,6 @@ class StorageServer(service.MultiService, Referenceable):
self.incomingdir = os.path.join(sharedir, 'incoming')
self._clean_incomplete()
fileutil.make_dirs(self.incomingdir)
self._active_writers = weakref.WeakKeyDictionary()
log.msg("StorageServer created", facility="tahoe.storage")
if reserved_space:
@ -114,6 +119,18 @@ class StorageServer(service.MultiService, Referenceable):
expiration_cutoff_date,
expiration_sharetypes)
self.lease_checker.setServiceParent(self)
self._get_current_time = get_current_time
# Currently being-written Bucketwriters. For Foolscap, lifetime is tied
# to connection: when disconnection happens, the BucketWriters are
# removed. For HTTP, this makes no sense, so there will be
# timeout-based cleanup; see
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3807.
# Map in-progress filesystem path -> BucketWriter:
self._bucket_writers = {} # type: Dict[str,BucketWriter]
# Canaries and disconnect markers for BucketWriters created via Foolscap:
self._bucket_writer_disconnect_markers = {} # type: Dict[BucketWriter,(IRemoteReference, object)]
def __repr__(self):
return "<StorageServer %s>" % (idlib.shortnodeid_b2a(self.my_nodeid),)
@ -232,7 +249,7 @@ class StorageServer(service.MultiService, Referenceable):
def allocated_size(self):
space = 0
for bw in self._active_writers:
for bw in self._bucket_writers.values():
space += bw.allocated_size()
return space
@ -257,14 +274,17 @@ class StorageServer(service.MultiService, Referenceable):
}
return version
def remote_allocate_buckets(self, storage_index,
renew_secret, cancel_secret,
sharenums, allocated_size,
canary, owner_num=0):
def _allocate_buckets(self, storage_index,
renew_secret, cancel_secret,
sharenums, allocated_size,
owner_num=0):
"""
Generic bucket allocation API.
"""
# owner_num is not for clients to set, but rather it should be
# curried into the PersonalStorageServer instance that is dedicated
# to a particular owner.
start = time.time()
start = self._get_current_time()
self.count("allocate")
alreadygot = set()
bucketwriters = {} # k: shnum, v: BucketWriter
@ -277,7 +297,7 @@ class StorageServer(service.MultiService, Referenceable):
# goes into the share files themselves. It could also be put into a
# separate database. Note that the lease should not be added until
# the BucketWriter has been closed.
expire_time = time.time() + 31*24*60*60
expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME
lease_info = LeaseInfo(owner_num,
renew_secret, cancel_secret,
expire_time, self.my_nodeid)
@ -309,7 +329,7 @@ class StorageServer(service.MultiService, Referenceable):
# great! we already have it. easy.
pass
elif os.path.exists(incominghome):
# Note that we don't create BucketWriters for shnums that
# For Foolscap we don't create BucketWriters for shnums that
# have a partial share (in incoming/), so if a second upload
# occurs while the first is still in progress, the second
# uploader will use different storage servers.
@ -317,11 +337,11 @@ class StorageServer(service.MultiService, Referenceable):
elif (not limited) or (remaining_space >= max_space_per_bucket):
# ok! we need to create the new share file.
bw = BucketWriter(self, incominghome, finalhome,
max_space_per_bucket, lease_info, canary)
max_space_per_bucket, lease_info)
if self.no_storage:
bw.throw_out_all_data = True
bucketwriters[shnum] = bw
self._active_writers[bw] = 1
self._bucket_writers[incominghome] = bw
if limited:
remaining_space -= max_space_per_bucket
else:
@ -331,7 +351,22 @@ class StorageServer(service.MultiService, Referenceable):
if bucketwriters:
fileutil.make_dirs(os.path.join(self.sharedir, si_dir))
self.add_latency("allocate", time.time() - start)
self.add_latency("allocate", self._get_current_time() - start)
return alreadygot, bucketwriters
def remote_allocate_buckets(self, storage_index,
renew_secret, cancel_secret,
sharenums, allocated_size,
canary, owner_num=0):
"""Foolscap-specific ``allocate_buckets()`` API."""
alreadygot, bucketwriters = self._allocate_buckets(
storage_index, renew_secret, cancel_secret, sharenums, allocated_size,
owner_num=owner_num,
)
# Abort BucketWriters if disconnection happens.
for bw in bucketwriters.values():
disconnect_marker = canary.notifyOnDisconnect(bw.disconnected)
self._bucket_writer_disconnect_markers[bw] = (canary, disconnect_marker)
return alreadygot, bucketwriters
def _iter_share_files(self, storage_index):
@ -351,33 +386,36 @@ class StorageServer(service.MultiService, Referenceable):
def remote_add_lease(self, storage_index, renew_secret, cancel_secret,
owner_num=1):
start = time.time()
start = self._get_current_time()
self.count("add-lease")
new_expire_time = time.time() + 31*24*60*60
new_expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME
lease_info = LeaseInfo(owner_num,
renew_secret, cancel_secret,
new_expire_time, self.my_nodeid)
for sf in self._iter_share_files(storage_index):
sf.add_or_renew_lease(lease_info)
self.add_latency("add-lease", time.time() - start)
self.add_latency("add-lease", self._get_current_time() - start)
return None
def remote_renew_lease(self, storage_index, renew_secret):
start = time.time()
start = self._get_current_time()
self.count("renew")
new_expire_time = time.time() + 31*24*60*60
new_expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME
found_buckets = False
for sf in self._iter_share_files(storage_index):
found_buckets = True
sf.renew_lease(renew_secret, new_expire_time)
self.add_latency("renew", time.time() - start)
self.add_latency("renew", self._get_current_time() - start)
if not found_buckets:
raise IndexError("no such lease to renew")
def bucket_writer_closed(self, bw, consumed_size):
if self.stats_provider:
self.stats_provider.count('storage_server.bytes_added', consumed_size)
del self._active_writers[bw]
del self._bucket_writers[bw.incominghome]
if bw in self._bucket_writer_disconnect_markers:
canary, disconnect_marker = self._bucket_writer_disconnect_markers.pop(bw)
canary.dontNotifyOnDisconnect(disconnect_marker)
def _get_bucket_shares(self, storage_index):
"""Return a list of (shnum, pathname) tuples for files that hold
@ -394,7 +432,7 @@ class StorageServer(service.MultiService, Referenceable):
pass
def remote_get_buckets(self, storage_index):
start = time.time()
start = self._get_current_time()
self.count("get")
si_s = si_b2a(storage_index)
log.msg("storage: get_buckets %r" % si_s)
@ -402,7 +440,7 @@ class StorageServer(service.MultiService, Referenceable):
for shnum, filename in self._get_bucket_shares(storage_index):
bucketreaders[shnum] = BucketReader(self, filename,
storage_index, shnum)
self.add_latency("get", time.time() - start)
self.add_latency("get", self._get_current_time() - start)
return bucketreaders
def get_leases(self, storage_index):
@ -563,7 +601,7 @@ class StorageServer(service.MultiService, Referenceable):
:return LeaseInfo: Information for a new lease for a share.
"""
ownerid = 1 # TODO
expire_time = time.time() + 31*24*60*60 # one month
expire_time = self._get_current_time() + DEFAULT_RENEWAL_TIME
lease_info = LeaseInfo(ownerid,
renew_secret, cancel_secret,
expire_time, self.my_nodeid)
@ -599,7 +637,7 @@ class StorageServer(service.MultiService, Referenceable):
See ``allmydata.interfaces.RIStorageServer`` for details about other
parameters and return value.
"""
start = time.time()
start = self._get_current_time()
self.count("writev")
si_s = si_b2a(storage_index)
log.msg("storage: slot_writev %r" % si_s)
@ -640,7 +678,7 @@ class StorageServer(service.MultiService, Referenceable):
self._add_or_renew_leases(remaining_shares, lease_info)
# all done
self.add_latency("writev", time.time() - start)
self.add_latency("writev", self._get_current_time() - start)
return (testv_is_good, read_data)
def remote_slot_testv_and_readv_and_writev(self, storage_index,
@ -666,7 +704,7 @@ class StorageServer(service.MultiService, Referenceable):
return share
def remote_slot_readv(self, storage_index, shares, readv):
start = time.time()
start = self._get_current_time()
self.count("readv")
si_s = si_b2a(storage_index)
lp = log.msg("storage: slot_readv %r %r" % (si_s, shares),
@ -675,7 +713,7 @@ class StorageServer(service.MultiService, Referenceable):
# shares exist if there is a file for them
bucketdir = os.path.join(self.sharedir, si_dir)
if not os.path.isdir(bucketdir):
self.add_latency("readv", time.time() - start)
self.add_latency("readv", self._get_current_time() - start)
return {}
datavs = {}
for sharenum_s in os.listdir(bucketdir):
@ -689,7 +727,7 @@ class StorageServer(service.MultiService, Referenceable):
datavs[sharenum] = msf.readv(readv)
log.msg("returning shares %s" % (list(datavs.keys()),),
facility="tahoe.storage", level=log.NOISY, parent=lp)
self.add_latency("readv", time.time() - start)
self.add_latency("readv", self._get_current_time() - start)
return datavs
def remote_advise_corrupt_share(self, share_type, storage_index, shnum,
@ -702,8 +740,10 @@ class StorageServer(service.MultiService, Referenceable):
now = time_format.iso_utc(sep="T")
si_s = si_b2a(storage_index)
# windows can't handle colons in the filename
fn = os.path.join(self.corruption_advisory_dir,
"%s--%s-%d" % (now, str(si_s, "utf-8"), shnum)).replace(":","")
fn = os.path.join(
self.corruption_advisory_dir,
("%s--%s-%d" % (now, str(si_s, "utf-8"), shnum)).replace(":","")
)
with open(fn, "w") as f:
f.write("report: Share Corruption\n")
f.write("type: %s\n" % bytes_to_native_str(share_type))

View File

@ -965,17 +965,6 @@ class _StorageServer(object):
cancel_secret,
)
def renew_lease(
self,
storage_index,
renew_secret,
):
return self._rref.callRemote(
"renew_lease",
storage_index,
renew_secret,
)
def get_buckets(
self,
storage_index,
@ -1005,11 +994,19 @@ class _StorageServer(object):
tw_vectors,
r_vector,
):
# Match the wire protocol, which requires 4-tuples for test vectors.
wire_format_tw_vectors = {
key: (
[(start, length, b"eq", data) for (start, length, data) in value[0]],
value[1],
value[2],
) for (key, value) in tw_vectors.items()
}
return self._rref.callRemote(
"slot_testv_and_readv_and_writev",
storage_index,
secrets,
tw_vectors,
wire_format_tw_vectors,
r_vector,
)
@ -1020,10 +1017,10 @@ class _StorageServer(object):
shnum,
reason,
):
return self._rref.callRemoteOnly(
return self._rref.callRemote(
"advise_corrupt_share",
share_type,
storage_index,
shnum,
reason,
)
).addErrback(log.err, "Error from remote call to advise_corrupt_share")

View File

@ -11,23 +11,22 @@ if PY2:
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
from six.moves import cStringIO as StringIO
from six import ensure_text, ensure_str
import re
from six import ensure_text
import os.path
import sys
import re
from mock import patch, Mock
from urllib.parse import quote as url_quote
from twisted.trial import unittest
from twisted.python.monkey import MonkeyPatcher
from twisted.internet import task
from twisted.python.filepath import FilePath
from twisted.internet.testing import (
MemoryReactor,
)
from twisted.internet.test.modulehelpers import (
AlternateReactor,
)
import allmydata
from allmydata.crypto import ed25519
from allmydata.util import fileutil, hashutil, base32
from allmydata.util.namespace import Namespace
from allmydata import uri
from allmydata.immutable import upload
from allmydata.dirnode import normalize
@ -524,42 +523,34 @@ class CLI(CLITestMixin, unittest.TestCase):
self.failUnlessIn(normalize(file), filenames)
def test_exception_catcher(self):
"""
An exception that is otherwise unhandled during argument dispatch is
written to stderr and causes the process to exit with code 1.
"""
self.basedir = "cli/exception_catcher"
stderr = StringIO()
exc = Exception("canary")
ns = Namespace()
class BrokenOptions(object):
def parseOptions(self, argv):
raise exc
ns.parse_called = False
def call_parse_or_exit(args):
ns.parse_called = True
raise exc
stderr = StringIO()
ns.sys_exit_called = False
def call_sys_exit(exitcode):
ns.sys_exit_called = True
self.failUnlessEqual(exitcode, 1)
reactor = MemoryReactor()
def fake_react(f):
reactor = Mock()
d = f(reactor)
# normally this Deferred would be errbacked with SystemExit, but
# since we mocked out sys.exit, it will be fired with None. So
# it's safe to drop it on the floor.
del d
with AlternateReactor(reactor):
with self.assertRaises(SystemExit) as ctx:
runner.run(
configFactory=BrokenOptions,
argv=["tahoe"],
stderr=stderr,
)
patcher = MonkeyPatcher((runner, 'parse_or_exit_with_explanation',
call_parse_or_exit),
(sys, 'argv', ["tahoe"]),
(sys, 'exit', call_sys_exit),
(sys, 'stderr', stderr),
(task, 'react', fake_react),
)
patcher.runWithPatches(runner.run)
self.assertTrue(reactor.hasRun)
self.assertFalse(reactor.running)
self.failUnless(ns.parse_called)
self.failUnless(ns.sys_exit_called)
self.failUnlessIn(str(exc), stderr.getvalue())
self.assertEqual(1, ctx.exception.code)
class Help(unittest.TestCase):
@ -1331,30 +1322,3 @@ class Options(ReallyEqualMixin, unittest.TestCase):
["--node-directory=there", "run", some_twistd_option])
self.failUnlessRaises(usage.UsageError, self.parse,
["run", "--basedir=here", some_twistd_option])
class Run(unittest.TestCase):
@patch('allmydata.scripts.tahoe_run.os.chdir')
@patch('allmydata.scripts.tahoe_run.twistd')
def test_non_numeric_pid(self, mock_twistd, chdir):
"""
If the pidfile exists but does not contain a numeric value, a complaint to
this effect is written to stderr.
"""
basedir = FilePath(ensure_str(self.mktemp()))
basedir.makedirs()
basedir.child(u"twistd.pid").setContent(b"foo")
basedir.child(u"tahoe-client.tac").setContent(b"")
config = tahoe_run.RunOptions()
config.stdout = StringIO()
config.stderr = StringIO()
config['basedir'] = ensure_text(basedir.path)
config.twistd_args = []
result_code = tahoe_run.run(config)
self.assertIn("invalid PID file", config.stderr.getvalue())
self.assertTrue(len(mock_twistd.mock_calls), 1)
self.assertEqual(mock_twistd.mock_calls[0][0], 'runApp')
self.assertEqual(0, result_code)

View File

@ -16,11 +16,19 @@ from six.moves import (
StringIO,
)
from testtools import (
skipIf,
)
from testtools.matchers import (
Contains,
Equals,
HasLength,
)
from twisted.python.runtime import (
platform,
)
from twisted.python.filepath import (
FilePath,
)
@ -33,6 +41,8 @@ from twisted.internet.test.modulehelpers import (
from ...scripts.tahoe_run import (
DaemonizeTheRealService,
RunOptions,
run,
)
from ...scripts.runner import (
@ -135,3 +145,40 @@ class DaemonizeTheRealServiceTests(SyncTestCase):
""",
"Privacy requested",
)
class RunTests(SyncTestCase):
"""
Tests for ``run``.
"""
@skipIf(platform.isWindows(), "There are no PID files on Windows.")
def test_non_numeric_pid(self):
"""
If the pidfile exists but does not contain a numeric value, a complaint to
this effect is written to stderr.
"""
basedir = FilePath(self.mktemp()).asTextMode()
basedir.makedirs()
basedir.child(u"twistd.pid").setContent(b"foo")
basedir.child(u"tahoe-client.tac").setContent(b"")
config = RunOptions()
config.stdout = StringIO()
config.stderr = StringIO()
config['basedir'] = basedir.path
config.twistd_args = []
runs = []
result_code = run(config, runApp=runs.append)
self.assertThat(
config.stderr.getvalue(),
Contains("found invalid PID file in"),
)
self.assertThat(
runs,
HasLength(1),
)
self.assertThat(
result_code,
Equals(0),
)

View File

@ -12,7 +12,6 @@ if PY2:
from six import ensure_text
import os
import mock
import tempfile
from io import BytesIO, StringIO
from os.path import join
@ -22,8 +21,8 @@ from twisted.internet import defer
from allmydata.mutable.publish import MutableData
from allmydata.scripts.common_http import BadResponse
from allmydata.scripts.tahoe_status import _get_json_for_fragment
from allmydata.scripts.tahoe_status import _get_json_for_cap
from allmydata.scripts.tahoe_status import _handle_response_for_fragment
from allmydata.scripts.tahoe_status import _get_request_parameters_for_fragment
from allmydata.scripts.tahoe_status import pretty_progress
from allmydata.scripts.tahoe_status import do_status
from allmydata.web.status import marshal_json
@ -140,17 +139,12 @@ class CommandStatus(unittest.TestCase):
"""
These tests just exercise the renderers and ensure they don't
catastrophically fail.
They could be enhanced to look for "some" magic strings in the
results and assert they're in the output.
"""
def setUp(self):
self.options = _FakeOptions()
@mock.patch('allmydata.scripts.tahoe_status.do_http')
@mock.patch('sys.stdout', StringIO())
def test_no_operations(self, http):
def test_no_operations(self):
values = [
StringIO(ensure_text(json.dumps({
"active": [],
@ -165,12 +159,11 @@ class CommandStatus(unittest.TestCase):
}
}))),
]
http.side_effect = lambda *args, **kw: values.pop(0)
do_status(self.options)
def do_http(*args, **kw):
return values.pop(0)
do_status(self.options, do_http)
@mock.patch('allmydata.scripts.tahoe_status.do_http')
@mock.patch('sys.stdout', StringIO())
def test_simple(self, http):
def test_simple(self):
recent_items = active_items = [
UploadStatus(),
DownloadStatus(b"abcd", 12345),
@ -201,80 +194,72 @@ class CommandStatus(unittest.TestCase):
}
}).encode("utf-8")),
]
http.side_effect = lambda *args, **kw: values.pop(0)
do_status(self.options)
def do_http(*args, **kw):
return values.pop(0)
do_status(self.options, do_http)
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_fetch_error(self, http):
def boom(*args, **kw):
def test_fetch_error(self):
def do_http(*args, **kw):
raise RuntimeError("boom")
http.side_effect = boom
do_status(self.options)
do_status(self.options, do_http)
class JsonHelpers(unittest.TestCase):
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_bad_response(self, http):
http.return_value = BadResponse('the url', 'some err')
def test_bad_response(self):
def do_http(*args, **kw):
return
with self.assertRaises(RuntimeError) as ctx:
_get_json_for_fragment({'node-url': 'http://localhost:1234'}, '/fragment')
self.assertTrue(
"Failed to get" in str(ctx.exception)
_handle_response_for_fragment(
BadResponse('the url', 'some err'),
'http://localhost:1234',
)
self.assertIn(
"Failed to get",
str(ctx.exception),
)
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_happy_path(self, http):
http.return_value = StringIO('{"some": "json"}')
resp = _get_json_for_fragment({'node-url': 'http://localhost:1234/'}, '/fragment/')
self.assertEqual(resp, dict(some='json'))
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_happy_path_post(self, http):
http.return_value = StringIO('{"some": "json"}')
resp = _get_json_for_fragment(
{'node-url': 'http://localhost:1234/'},
'/fragment/',
method='POST',
post_args={'foo': 'bar'}
def test_happy_path(self):
resp = _handle_response_for_fragment(
StringIO('{"some": "json"}'),
'http://localhost:1234/',
)
self.assertEqual(resp, dict(some='json'))
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_happy_path_for_cap(self, http):
http.return_value = StringIO('{"some": "json"}')
resp = _get_json_for_cap({'node-url': 'http://localhost:1234'}, 'fake cap')
def test_happy_path_post(self):
resp = _handle_response_for_fragment(
StringIO('{"some": "json"}'),
'http://localhost:1234/',
)
self.assertEqual(resp, dict(some='json'))
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_no_data_returned(self, http):
http.return_value = StringIO('null')
def test_no_data_returned(self):
with self.assertRaises(RuntimeError) as ctx:
_get_json_for_cap({'node-url': 'http://localhost:1234'}, 'fake cap')
self.assertTrue('No data from' in str(ctx.exception))
_handle_response_for_fragment(StringIO('null'), 'http://localhost:1234')
self.assertIn('No data from', str(ctx.exception))
def test_no_post_args(self):
with self.assertRaises(ValueError) as ctx:
_get_json_for_fragment(
_get_request_parameters_for_fragment(
{'node-url': 'http://localhost:1234'},
'/fragment',
method='POST',
post_args=None,
)
self.assertTrue(
"Must pass post_args" in str(ctx.exception)
self.assertIn(
"Must pass post_args",
str(ctx.exception),
)
def test_post_args_for_get(self):
with self.assertRaises(ValueError) as ctx:
_get_json_for_fragment(
_get_request_parameters_for_fragment(
{'node-url': 'http://localhost:1234'},
'/fragment',
method='GET',
post_args={'foo': 'bar'}
)
self.assertTrue(
"only valid for POST" in str(ctx.exception)
self.assertIn(
"only valid for POST",
str(ctx.exception),
)

View File

@ -35,6 +35,9 @@ from twisted.internet.error import (
from twisted.internet.interfaces import (
IProcessProtocol,
)
from twisted.python.log import (
msg,
)
from twisted.python.filepath import (
FilePath,
)
@ -99,7 +102,10 @@ class _ProcessProtocolAdapter(ProcessProtocol, object):
try:
proto = self._fds[childFD]
except KeyError:
pass
msg(format="Received unhandled output on %(fd)s: %(output)s",
fd=childFD,
output=data,
)
else:
proto.dataReceived(data)
@ -158,6 +164,9 @@ class CLINodeAPI(object):
u"-m",
u"allmydata.scripts.runner",
] + argv
msg(format="Executing %(argv)s",
argv=argv,
)
return self.reactor.spawnProcess(
processProtocol=process_protocol,
executable=exe,

View File

@ -0,0 +1,915 @@
"""
Test infrastructure for integration-y tests that run actual nodes, like those
in ``allmydata.test.test_system``.
Ported to Python 3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.utils import PY2
if PY2:
# Don't import bytes since it causes issues on (so far unported) modules on Python 2.
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, dict, list, object, range, max, min, str # noqa: F401
import os
from functools import partial
from twisted.internet import reactor
from twisted.internet import defer
from twisted.internet.defer import inlineCallbacks
from twisted.application import service
from foolscap.api import flushEventualQueue
from allmydata import client
from allmydata.introducer.server import create_introducer
from allmydata.util import fileutil, log, pollmixin
from twisted.python.filepath import (
FilePath,
)
from .common import (
TEST_RSA_KEY_SIZE,
SameProcessStreamEndpointAssigner,
)
from . import common_util as testutil
from ..scripts.common import (
write_introducer,
)
# our system test uses the same Tub certificates each time, to avoid the
# overhead of key generation
SYSTEM_TEST_CERTS = [
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1iNV
z07PYwZwucl87QlL2TFZvDxD4flZ/p3BZE3DCT5Efn9w2NT4sHXL1e+R/qsDFuNG
bw1y1TRM0DGK6Wr0XRT2mLQULNgB8y/HrhcSdONsYRyWdj+LimyECKjwh0iSkApv
Yj/7IOuq6dOoh67YXPdf75OHLShm4+8q8fuwhBL+nuuO4NhZDJKupYHcnuCkcF88
LN77HKrrgbpyVmeghUkwJMLeJCewvYVlambgWRiuGGexFgAm6laS3rWetOcdm9eg
FoA9PKNN6xvPatbj99MPoLpBbzsI64M0yT/wTSw1pj/Nom3rwfMa2OH8Kk7c8R/r
U3xj4ZY1DTlGERvejQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAwyQjQ3ZgtJ3JW
r3/EPdqSUBamTfXIpOh9rXmRjPpbe+MvenqIzl4q+GnkL5mdEb1e1hdKQZgFQ5Q5
tbcNIz6h5C07KaNtbqhZCx5c/RUEH87VeXuAuOqZHbZWJ18q0tnk+YgWER2TOkgE
RI2AslcsJBt88UUOjHX6/7J3KjPFaAjW1QV3TTsHxk14aYDYJwPdz+ijchgbOPQ0
i+ilhzcB+qQnOC1s4xQSFo+zblTO7EgqM9KpupYfOVFh46P1Mak2W8EDvhz0livl
OROXJ6nR/13lmQdfVX6T45d+ITBwtmW2nGAh3oI3JlArGKHaW+7qnuHR72q9FSES
cEYA/wmk
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDWI1XPTs9jBnC5
yXztCUvZMVm8PEPh+Vn+ncFkTcMJPkR+f3DY1PiwdcvV75H+qwMW40ZvDXLVNEzQ
MYrpavRdFPaYtBQs2AHzL8euFxJ042xhHJZ2P4uKbIQIqPCHSJKQCm9iP/sg66rp
06iHrthc91/vk4ctKGbj7yrx+7CEEv6e647g2FkMkq6lgdye4KRwXzws3vscquuB
unJWZ6CFSTAkwt4kJ7C9hWVqZuBZGK4YZ7EWACbqVpLetZ605x2b16AWgD08o03r
G89q1uP30w+gukFvOwjrgzTJP/BNLDWmP82ibevB8xrY4fwqTtzxH+tTfGPhljUN
OUYRG96NAgMBAAECggEAJ5xztBx0+nFnisZ9yG8uy6d4XPyc5gE1J4dRDdfgmyYc
j3XNjx6ePi4cHZ/qVryVnrc+AS7wrgW1q9FuS81QFKPbFdZB4SW3/p85BbgY3uxu
0Ovz3T3V9y4polx12eCP0/tKLVd+gdF2VTik9Sxfs5rC8VNN7wmJNuK4A/k15sgy
BIu/R8NlMNGQySNhtccp+dzB8uTyKx5zFZhVvnAK/3YX9BC2V4QBW9JxO4S8N0/9
48e9Sw/fGCfQ/EFPKGCvTvfuRqJ+4t5k10FygXJ+s+y70ifYi+aSsjJBuranbLJp
g5TwhuKnTWs8Nth3YRLbcJL4VBIOehjAWy8pDMMtlQKBgQD0O8cHb8cOTGW0BijC
NDofhA2GooQUUR3WL324PXWZq0DXuBDQhJVBKWO3AYonivhhd/qWO8lea9MEmU41
nKZ7maS4B8AJLJC08P8GL1uCIE/ezEXEi9JwC1zJiyl595Ap4lSAozH0DwjNvmGL
5mIdYg0BliqFXbloNJkNlb7INwKBgQDgdGEIWXc5Y1ncWNs6iDIV/t2MlL8vLrP0
hpkl/QiMndOQyD6JBo0+ZqvOQTSS4NTSxBROjPxvFbEJ3eH8Pmn8gHOf46fzP1OJ
wlYv0gYzkN4FE/tN6JnO2u9pN0euyyZLM1fnEcrMWColMN8JlWjtA7Gbxm8lkfa4
3vicaJtlWwKBgQCQYL4ZgVR0+Wit8W4qz+EEPHYafvwBXqp6sXxqa7qXawtb+q3F
9nqdGLCfwMNA+QA37ksugI1byfXmpBH902r/aiZbvAkj4zpwHH9F0r0PwbY1iSA9
PkLahX0Gj8OnHFgWynsVyGOBWVnk9oSHxVt+7zWtGG5uhKdUGLPZugocJQKBgB61
7bzduOFiRZ5PjhdxISE/UQL2Kz6Cbl7rt7Kp72yF/7eUnnHTMqoyFBnRdCcQmi4I
ZBrnUXbFigamlFAWHhxNWwSqeoVeychUjcRXQT/291nMhRsA02KpNA66YJV6+E9b
xBA6r/vLqGCUUkAWcFfVpIyC1xxV32MmJvAHpBN3AoGAPF3MUFiO0iKNZfst6Tm3
rzrldLawDo98DRZ7Yb2kWlWZYqUk/Nvryvo2cns75WGSMDYVbbRp+BY7kZmNYa9K
iQzKDL54ZRu6V+getJdeAO8yXoCmnZKxt5OHvOSrQMfAmFKSwLwxBbZBfXEyuune
yfusXLtCgajpreoVIa0xWdQ=
-----END PRIVATE KEY-----
""", # 0
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApDzW
4ZBeK9w4xpRaed6lXzeCO0Xmr3f0ynbueSdiZ89FWoAMgK+SiBIOViYV6hfm0Wah
lemSNzFGx5LvDSg2uwSqEP23DeM9O/SQPgIAiLeeEsYZJcgg2jz92YfFEaahsGdI
6qSP4XI2/5dgKRpPOYDGyw6R5PQR6w22Xq1WD1jBvImk/k09I9jHRn40pYbaJzbg
U2aIjvOruo2kqe4f6iDqE0piYimAZJUvemu1UoyV5NG590hGkDuWsMD77+d2FxCj
9Nzb+iuuG3ksnanHPyXi1hQmzp5OmzVWaevCHinNjWgsuSuLGO9H2SLf3wwp2UCs
EpKtzoKrnZdEg/anNwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQChxtr67o1aZZMJ
A6gESPtFjZLw6wG0j50JsrWKLvoXVts1ToJ9u2nx01aFKjBwb4Yg+vdJfDgIIAEm
jS56h6H2DfJlkTWHmi8Vx1wuusWnrNwYMI53tdlRIpD2+Ne7yeoLQZcVN2wuPmxD
Mbksg4AI4csmbkU/NPX5DtMy4EzM/pFvIcxNIVRUMVTFzn5zxhKfhyPqrMI4fxw1
UhUbEKO+QgIqTNp/dZ0lTbFs5HJQn6yirWyyvQKBPmaaK+pKd0RST/T38OU2oJ/J
LojRs7ugCJ+bxJqegmQrdcVqZZGbpYeK4O/5eIn8KOlgh0nUza1MyjJJemgBBWf7
HoXB8Fge
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCkPNbhkF4r3DjG
lFp53qVfN4I7Reavd/TKdu55J2Jnz0VagAyAr5KIEg5WJhXqF+bRZqGV6ZI3MUbH
ku8NKDa7BKoQ/bcN4z079JA+AgCIt54SxhklyCDaPP3Zh8URpqGwZ0jqpI/hcjb/
l2ApGk85gMbLDpHk9BHrDbZerVYPWMG8iaT+TT0j2MdGfjSlhtonNuBTZoiO86u6
jaSp7h/qIOoTSmJiKYBklS96a7VSjJXk0bn3SEaQO5awwPvv53YXEKP03Nv6K64b
eSydqcc/JeLWFCbOnk6bNVZp68IeKc2NaCy5K4sY70fZIt/fDCnZQKwSkq3Ogqud
l0SD9qc3AgMBAAECggEBAIu55uaIOFYASZ1IYaEFNpRHWVisI5Js76nAfSo9w46l
3E8eWYSx2mxBUEkipco/A3RraFVuHaMvHRR1gUMkT0vUsAs8jxwVk+cKLh1S/rlR
3f4C4yotlSWWdjE3PQXDShQWCwb1ciNPVFMmqfzOEVDOqlHe12h97TCYverWdT0f
3LZICLQsZd1WPKnPNXrsRRDCBuRLapdg+M0oJ+y6IiCdm+qM7Qvaoef6hlvm5ECz
LCM92db5BKTuPOQXMx2J8mjaBgU3aHxRV08IFgs7mI6q0t0FM7LlytIAJq1Hg5QU
36zDKo8tblkPijWZWlqlZCnlarrd3Ar/BiLEiuOGDMECgYEA1GOp0KHy0mbJeN13
+TDsgP7zhmqFcuJREu2xziNJPK2S06NfGYE8vuVqBGzBroLTQ3dK7rOJs9C6IjCE
mH7ZeHzfcKohpZnl443vHMSpgdh/bXTEO1aQZNbJ2hLYs8ie/VqqHR0u6YtpUqZL
LgaUA0U8GnlsO55B8kyCelckmDkCgYEAxfYQMPEEzg1tg2neqEfyoeY0qQTEJTeh
CPMztowSJpIyF1rQH6TaG0ZchkiAkw3W58RVDfvK72TuVlC5Kz00C2/uPnrqm0dX
iMPeML5rFlG3VGCrSTnAPI+az6P65q8zodqcTtA8xoxgPOlc/lINOxiTEMxLyeGF
8GyP+sCM2u8CgYEAvMBR05OJnEky9hJEpBZBqSZrQGL8dCwDh0HtCdi8JovPd/yx
8JW1aaWywXnx6uhjXoru8hJm54IxWV8rB+d716OKY7MfMfACqWejQDratgW0wY7L
MjztGGD2hLLJGYXLHjfsBPHBllaKZKRbHe1Er19hWdndQWKVEwPB1X4KjKkCgYEA
nWHmN3K2djbYtRyLR1CEBtDlVuaSJmCWp23q1BuCJqYeKtEpG69NM1f6IUws5Dyh
eXtuf4KKMU8V6QueW1D6OomPaJ8CO9c5MWM/F5ObwY/P58Y/ByVhvwQQeToONC5g
JzKNCF+nodZigKqrIwoKuMvtx/IT4vloKd+1jA5fLYMCgYBoT3HLCyATVdDSt1TZ
SbEDoLSYt23KRjQV93+INP949dYCagtgh/kTzxBopw5FljISLfdYizIRo2AzhhfP
WWpILlnt19kD+sNirJVqxJacfEZsu5baWTedI/yrCuVsAs/s3/EEY6q0Qywknxtp
Fwh1/8y5t14ib5fxOVhi8X1nEA==
-----END PRIVATE KEY-----
""", # 1
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwMTn
hXnpKHGAir3WYbOxefVrMA07OZNAsNa29nBwLA+NVIJNUFgquibMj7QYo8+M45oY
6LKr4yRcBryZVvyxfdr92xp8+kLeVApk2WLjkdBTRagHh9qdrY0hQmagCBN6/hLG
Xug8VksQUdhX3vu6ZyMvTLfKRkDOMRVkRGRGg/dOcvom7zpqMCGYenMG2FStr6UV
3s3dlCSZZTdTX5Uoq6yfUUJE3nITGKjpnpJKqIs3PWCIxdj7INIcjJKvIdUcavIV
2hEhh60A8ltmtdpQAXVBE+U7aZgS1fGAWS2A0a3UwuP2pkQp6OyKCUVHpZatbl9F
ahDN2QBzegv/rdJ1zwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAl4OQZ+FB9ZSUv
FL/KwLNt+ONU8Sve/xiX+8vKAvgKm2FrjyK+AZPwibnu+FSt2G4ndZBx4Wvpe5V+
gCsbzSXlh9cDn2SRXyprt2l/8Fj4eUMaThmLKOK200/N/s2SpmBtnuflBrhNaJpw
DEi2KEPuXsgvkuVzXN06j75cUHwn5LeWDAh0RalkVuGbEWBoFx9Hq8WECdlCy0YS
y09+yO01qz70y88C2rPThKw8kP4bX8aFZbvsnRHsLu/8nEQNlrELcfBarPVHjJ/9
imxOdymJkV152V58voiXP/PwXhynctQbF7e+0UZ+XEGdbAbZA0BMl7z+b09Z+jF2
afm4mVox
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDAxOeFeekocYCK
vdZhs7F59WswDTs5k0Cw1rb2cHAsD41Ugk1QWCq6JsyPtBijz4zjmhjosqvjJFwG
vJlW/LF92v3bGnz6Qt5UCmTZYuOR0FNFqAeH2p2tjSFCZqAIE3r+EsZe6DxWSxBR
2Ffe+7pnIy9Mt8pGQM4xFWREZEaD905y+ibvOmowIZh6cwbYVK2vpRXezd2UJJll
N1NflSirrJ9RQkTechMYqOmekkqoizc9YIjF2Psg0hyMkq8h1Rxq8hXaESGHrQDy
W2a12lABdUET5TtpmBLV8YBZLYDRrdTC4/amRCno7IoJRUellq1uX0VqEM3ZAHN6
C/+t0nXPAgMBAAECggEAF+2ZK4lZdsq4AQDVhqUuh4v+NSW/T0NHCWxto6OLWPzJ
N09BV5LKIvdD9yaM1HCj9XCgXOooyfYuciuhARo20f+H+VWNY+c+/8GWiSFsTCJG
4+Oao7NwVSWqljp07Ou2Hamo9AjxzGhe6znmlmg62CiW63f45MWQkqksHA0yb5jg
/onJ2//I+OI+aTKNfjt1G6h2x7oxeGTU1jJ0Hb2xSh+Mpqx9NDfb/KZyOndhSG5N
xRVosQ6uV+9mqHxTTwTZurTG31uhZzarkMuqxhcHS94ub7berEc/OlqvbyMKNZ3A
lzuvq0NBZhEUhAVgORAIS17r/q2BvyG4u5LFbG2p0QKBgQDeyyOl+A7xc4lPE2OL
Z3KHJPP4RuUnHnWFC+bNdr5Ag8K7jcjZIcasyUom9rOR0Fpuw9wmXpp3+6fyp9bJ
y6Bi5VioR0ZFP5X+nXxIN3yvgypu6AZvkhHrEFer+heGHxPlbwNKCKMbPzDZPBTZ
vlC7g7xUUcpNmGhrOKr3Qq5FlwKBgQDdgCmRvsHUyzicn8TI3IJBAOcaQG0Yr/R2
FzBqNfHHx7fUZlJfKJsnu9R9VRZmBi4B7MA2xcvz4QrdZWEtY8uoYp8TAGILfW1u
CP4ZHrzfDo/67Uzk2uTMTd0+JOqSm/HiVNguRPvC8EWBoFls+h129GKThMvKR1hP
1oarfAGIiQKBgQCIMAq5gHm59JMhqEt4QqMKo3cS9FtNX1wdGRpbzFMd4q0dstzs
ha4Jnv3Z9YHtBzzQap9fQQMRht6yARDVx8hhy6o3K2J0IBtTSfdXubtZGkfNBb4x
Y0vaseG1uam5jbO+0u5iygbSN/1nPUfNln2JMkzkCh8s8ZYavMgdX0BiPwKBgChR
QL/Hog5yoy5XIoGRKaBdYrNzkKgStwObuvNKOGUt5DckHNA3Wu6DkOzzRO1zKIKv
LlmJ7VLJ3qln36VcaeCPevcBddczkGyb9GxsHOLZCroY4YsykLzjW2cJXy0qd3/E
A8mAQvc7ttsebciZSi2x1BOX82QxUlDN8ptaKglJAoGBAMnLN1TQB0xtWYDPGcGV
2IvgX7OTRRlMVrTvIOvP5Julux9z1r0x0cesl/jaXupsBUlLLicPyBMSBJrXlr24
mrgkodk4TdqO1VtBCZBqak97DHVezstMrbpCGlUD5jBnsHVRLERvS09QlGhqMeNL
jpNQbWH9VhutzbvpYquKrhvK
-----END PRIVATE KEY-----
""", # 2
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAypqi
YTni3s60Uo8vgGcFvjWWkB5CD9Fx9pW/2KcxRJ/u137Y+BG8qWMA4lgII3ZIuvo4
6rLDiXnAnDZqUtrvZ90O/gH6RyQqX3AI4EwPvCnRIIe0okRcxnxYBL/LfBY54xuv
46JRYZP4c9IImqQH9QVo2/egtEzcpbmT/mfhpf6NGQWC3Xps2BqDT2SV/DrX/wPA
8P1atE1AxNp8ENxK/cjFAteEyDZOsDSa757ZHKAdM7L8rZ1Fd2xAA1Dq7IyYpTNE
IX72xytWxllcNvSUPLT+oicsSZBadc/p3moc3tR/rNdgrHKybedadru/f9Gwpa+v
0sllZlEcVPSYddAzWwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCmk60Nj5FPvemx
DSSQjJPyJoIDpTxQ4luSzIq4hPwlUXw7dqrvHyCWgn2YVe9xZsGrT/+n376ecmgu
sw4s4qVhR9bzKkTMewjC2wUooTA5v9HYsNWZy3Ah7hHPbDHlMADYobjB5/XolNUP
bCM9xALEdM9DxpC4vjUZexlRKmjww9QKE22jIM+bqsK0zqDSq+zHpfHNGGcS3vva
OvI6FPc1fAr3pZpVzevMSN2zufIJwjL4FT5/uzwOCaSCwgR1ztD5CSbQLTLlwIsX
S7h2WF9078XumeRjKejdjEjyH4abKRq8+5LVLcjKEpg7OvktuRpPoGPCEToaAzuv
h+RSQwwY
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKmqJhOeLezrRS
jy+AZwW+NZaQHkIP0XH2lb/YpzFEn+7Xftj4EbypYwDiWAgjdki6+jjqssOJecCc
NmpS2u9n3Q7+AfpHJCpfcAjgTA+8KdEgh7SiRFzGfFgEv8t8FjnjG6/jolFhk/hz
0giapAf1BWjb96C0TNyluZP+Z+Gl/o0ZBYLdemzYGoNPZJX8Otf/A8Dw/Vq0TUDE
2nwQ3Er9yMUC14TINk6wNJrvntkcoB0zsvytnUV3bEADUOrsjJilM0QhfvbHK1bG
WVw29JQ8tP6iJyxJkFp1z+neahze1H+s12CscrJt51p2u79/0bClr6/SyWVmURxU
9Jh10DNbAgMBAAECggEBALv7Q+Rf+C7wrQDZF6LUc9CrGfq4CGVy2IGJKgqT/jOF
DO9nI1rv4hNr55sbQNneWtcZaYvht2mrzNlj57zepDjDM7DcFuLBHIuWgLXT/NmC
FyZOo3vXYBlNr8EgT2XfnXAp9UWJCmc2CtUzsIYC4dsmXMeTd8kyc5tUl4r5ybTf
1g+RTck/IGgqdfzpuTsNl79FW2rP9z111Py6dbqgQzhuSAune9dnLFvZst8dyL8j
FStETMxBM6jrCF1UcKXzG7trDHiCdzJ8WUhx6opN/8OasQGndwpXto6FZuBy/AVP
4kVQNpUXImYcLEpva0MqGRHg+YN+c84C71CMchnF4aECgYEA7J2go4CkCcZNKCy5
R5XVCqNFYRHjekR+UwH8cnCa7pMKKfP+lTCiBrO2q8zwWwknRMyuycS5g/xbSpg1
L6hi92CV1YQy1/JhlQedekjejNTTuLOPKf78AFNSfc7axDnes2v4Bvcdp9gsbUIO
10cXh0tOSLE7P9y+yC86KQkFAPECgYEA2zO0M2nvbPHv2jjtymY3pflYm0HzhM/T
kPtue3GxOgbEPsHffBGssShBTE3yCOX3aAONXJucMrSAPL9iwUfgfGx6ADdkwBsA
OjDlkxvTbP/9trE6/lsSPtGpWRdJNHqXN4Hx7gXJizRwG7Ym+oHvIIh53aIjdFoE
HLQLpxObuQsCgYAuMQ99G83qQpYpc6GwAeYXL4yJyK453kky9z5LMQRt8rKXQhS/
F0FqQYc1vsplW0IZQkQVC5yT0Z4Yz+ICLcM0O9zEVAyA78ZxC42Io9UedSXn9tXK
Awc7IQkHmmxGxm1dZYSEB5X4gFEb+zted3h2ZxMfScohS3zLI70c6a/aYQKBgQCU
phRuxUkrTUpFZ1PCbN0R/ezbpLbaewFTEV7T8b6oxgvxLxI6FdZRcSYO89DNvf2w
GLCVe6VKMWPBTlxPDEostndpjCcTq3vU+nHE+BrBkTvh14BVGzddSFsaYpMvNm8z
ojiJHH2XnCDmefkm6lRacJKL/Tcj4SNmv6YjUEXLDwKBgF8WV9lzez3d/X5dphLy
2S7osRegH99iFanw0v5VK2HqDcYO9A7AD31D9nwX46QVYfgEwa6cHtVCZbpLeJpw
qXnYXe/hUU3yn5ipdNJ0Dm/ZhJPDD8TeqhnRRhxbZmsXs8EzfwB2tcUbASvjb3qA
vAaPlOSU1wXqhAsG9aVs8gtL
-----END PRIVATE KEY-----
""", # 3
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzNFoXDTIxMDEwMTAxNDAzNFowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzUqQ
M08E7F2ZE99bFHvpsR6LmgIJOOoGMXacTcEUhRF63E6+730FjxER2a30synv9GGS
3G9FstUmfhyimufkbTumri8Novw5CWZQLiE1rmMBI5nPcR2wAzy9z2odR6bfAwms
yyc3IPYg1BEDBPZl0LCQrQRRU/rVOrbCf7IMq+ATazmBg01gXMzq2M953ieorkQX
MsHVR/kyW0Q0yzhYF1OtIqbXxrdiZ+laTLWNqivj/FdegiWPCf8OcqpcpbgEjlDW
gBcC/vre+0E+16nfUV8xHL5jseJMJqfT508OtHxAzp+2D7b54NvYNIvbOAP+F9gj
aXy5mOvjXclK+hNmDwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAjZzTFKG7uoXxm
BPHfQvsKHIB/Cx9zMKj6pLwJzCPHQBzKOMoUen09oq+fb77RM7WvdX0pvFgEXaJW
q/ImooRMo+paf8GOZAuPwdafb2/OGdHZGZ2Cbo/ICGo1wGDCdMvbxTxrDNq1Yae+
m+2epN2pXAO1rlc7ktRkojM/qi3zXtbLjTs3IoPDXWhYPHdI1ThkneRmvxpzB1rW
2SBqj2snvyI+/3k3RHmldcdOrTlgWQ9hq05jWR8IVtRUFFVn9A+yQC3gnnLIUhwP
HJWwTIPuYW25TuxFxYZXIbnAiluZL0UIjd3IAwxaafvB6uhI7v0K789DKj2vRUkY
E8ptxZH4
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDNSpAzTwTsXZkT
31sUe+mxHouaAgk46gYxdpxNwRSFEXrcTr7vfQWPERHZrfSzKe/0YZLcb0Wy1SZ+
HKKa5+RtO6auLw2i/DkJZlAuITWuYwEjmc9xHbADPL3Pah1Hpt8DCazLJzcg9iDU
EQME9mXQsJCtBFFT+tU6tsJ/sgyr4BNrOYGDTWBczOrYz3neJ6iuRBcywdVH+TJb
RDTLOFgXU60iptfGt2Jn6VpMtY2qK+P8V16CJY8J/w5yqlyluASOUNaAFwL++t77
QT7Xqd9RXzEcvmOx4kwmp9PnTw60fEDOn7YPtvng29g0i9s4A/4X2CNpfLmY6+Nd
yUr6E2YPAgMBAAECggEBAIiL6uQl0AmDrBj6vHMghGzp+0MBza6MgngOA6L4JTTp
ToYQ3pEe4D6rxOq7+QHeiBtNd0ilvn9XpVXGqCVOzrIVNiWvaGubRjjJU9WLA1Ct
y4kpekAr1fIhScMXOsh45ub3XXZ27AVBkM5dTlvTpB8uAd0C/TFVqtR10WLsQ99h
Zm9Jczgs/6InYTssnAaqdeCLAf1LbmO4zwFsJfJOeSGGT6WBwlpHwMAgPhg8OLEu
kVWG7BEJ0hxcODk/es/vce9SN7BSyIzNY+qHcGtsrx/o0eO2Av/Z7ltV4Sz6UN1K
0y0OTiDyT/l62U2OugSN3wQ4xPTwlrWl7ZUHJmvpEaECgYEA+w2JoB2i1OV2JTPl
Y0TKSKcZYdwn7Nwh4fxMAJNJ8UbpPqrZEo37nxqlWNJrY/jKX3wHVk4ESSTaxXgF
UY7yKT0gRuD9+vE0gCbUmJQJTwbceNJUu4XrJ6SBtf72WgmphL+MtyKdwV8XltVl
Yp0hkswGmxl+5+Js6Crh7WznPl8CgYEA0VYtKs2YaSmT1zraY6Fv3AIQZq012vdA
7nVxmQ6jKDdc401OWARmiv0PrZaVNiEJ1YV8KxaPrKTfwhWqxNegmEBgA1FZ66NN
SAm8P9OCbt8alEaVkcATveXTeOCvfpZUO3sqZdDOiYLiLCsokHblkcenK85n0yT6
CzhTbvzDllECgYEAu9mfVy2Vv5OK2b+BLsw0SDSwa2cegL8eo0fzXqLXOzCCKqAQ
GTAgTSbU/idEr+NjGhtmKg/qaQioogVyhVpenLjeQ+rqYDDHxfRIM3rhlD5gDg/j
0wUbtegEHrgOgcSlEW16zzWZsS2EKxq16BoHGx6K+tcS/FOShg5ASzWnuiUCgYEA
sMz+0tLX8aG7CqHbRyBW8FMR9RY/kRMY1Q1+Bw40wMeZfSSSkYYN8T9wWWT/2rqm
qp7V0zJ34BFUJoDUPPH84fok3Uh9EKZYpAoM4z9JP0jREwBWXMYEJnOQWtwxfFGN
DLumgF2Nwtg3G6TL2s+AbtJYH4hxagQl5woIdYmnyzECgYEAsLASpou16A3uXG5J
+5ZgF2appS9Yfrqfh6TKywMsGG/JuiH3djdYhbJFIRGeHIIDb4XEXOHrg/SFflas
If0IjFRh9WCvQxnoRha3/pKRSc3OEka1MR/ZREK/d/LQEPmsRJVzY6ABKqmPAMDD
5CnG6Hz/rP87BiEKd1+3PGp8GCw=
-----END PRIVATE KEY-----
""", # 4
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzNFoXDTIxMDEwMTAxNDAzNFowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0sap
75YbbkEL85LFava3FrO1jpgVteQ4NGxxy1Nu9w2hPfMMeCPWjB8UfAwFk+LVPyvW
LAXd1zWL5rGpQ2ytIVQlTraR5EnALA1sMcQYbFz1ISPTYB031bEN/Ch8JWYwCG5A
X2H4D6BC7NgT6YyWDt8vxQnqAisPHQ/OK4ABD15CwkTyPimek2/ufYN2dapg1xhG
IUD96gqetJv9bu0r869s688kADIComsYG+8KKfFN67S3rSHMIpZPuGTtoHGnVO89
XBm0vNe0UxQkJEGJzZPn0tdec0LTC4GNtTaz5JuCjx/VsJBqrnTnHHjx0wFz8pff
afCimRwA+LCopxPE1QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBOkAnpBb3nY+dG
mKCjiLqSsuEPqpNiBYR+ue/8aVDnOKLKqAyQuyRZttQ7bPpKHaw7pwyCZH8iHnt6
pMCLCftNSlV2Fa8msRmuf5AiGjUvR1M8VtHWNYE8pedWrJqUgBhF/405B99yd8CT
kQJXKF18LObj7YKNsWRoMkVgqlQzWDMEqbfmy9MhuLx2EZPsTB1L0BHNGGDVBd9o
cpPLUixcc12u+RPMKq8x3KgwsnUf5vX/pCnoGcCy4JahWdDgcZlf0hUKGT7PUem5
CWW8SMeqSWQX9XpE5Qlm1+W/QXdDXLbbHqDtvBeUy3iFQe3C9RSkp0qdutxkAlFk
f5QHXfJ7
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDSxqnvlhtuQQvz
ksVq9rcWs7WOmBW15Dg0bHHLU273DaE98wx4I9aMHxR8DAWT4tU/K9YsBd3XNYvm
salDbK0hVCVOtpHkScAsDWwxxBhsXPUhI9NgHTfVsQ38KHwlZjAIbkBfYfgPoELs
2BPpjJYO3y/FCeoCKw8dD84rgAEPXkLCRPI+KZ6Tb+59g3Z1qmDXGEYhQP3qCp60
m/1u7Svzr2zrzyQAMgKiaxgb7wop8U3rtLetIcwilk+4ZO2gcadU7z1cGbS817RT
FCQkQYnNk+fS115zQtMLgY21NrPkm4KPH9WwkGqudOccePHTAXPyl99p8KKZHAD4
sKinE8TVAgMBAAECggEALU5EotoqJUXYEtAenUJQ0pFoWjE4oXNf3Wzd/O1/MZ19
ZjqDGKPjbxUTKyLOZB5i5gQ/MhFEwQiifMD9eB+5CyvyJPw7Wc28f/uWoQ/cjBZj
Hm979PHy2X0IW4Y8QTG462b/cUE2t+0j1ZMQnKf6bVHuC7V41mR5CC8oitMl5y5g
34yJmWXlIA0ep/WotLMqvil6DnSM/2V8Ch4SxjnzPpjbe4Kj+woucGNr4UKstZER
8iuHTsR64LjoGktRnnMwZxGZQI7EC428zsliInuWMdXe//w2chLdkirqpSrIQwSZ
3jNWStqBXGYaRg5Z1ilBvHtXxkzDzbAlzRBzqfEwwQKBgQDqYdMRrzHJaXWLdsyU
6jAuNX9tLh7PcicjP93SbPujS6mWcNb+D/au+VhWD+dZQDPRZttXck7wvKY1lw1V
MK0TYI7ydf8h3DFx3Mi6ZD4JVSU1MH233C3fv/FHenDoOvMXXRjUZxaRmuzFJvzt
6QlKIfSvwT+1wrOACNfteXfZUQKBgQDmN3Uuk01qvsETPwtWBp5RNcYhS/zGEQ7o
Q4K+teU453r1v8BGsQrCqulIZ3clMkDru2UroeKn1pzyVAS2AgajgXzfXh3VeZh1
vHTLP91BBYZTTWggalEN4aAkf9bxX/hA+9Bw/dzZcQW2aNV7WrYuCSvp3SDCMina
anQq/PaSRQKBgHjw23HfnegZI89AENaydQQTFNqolrtiYvGcbgC7vakITMzVEwrr
/9VP0pYuBKmYKGTgF0RrNnKgVX+HnxibUmOSSpCv9GNrdJQVYfpT6XL1XYqxp91s
nrs7FuxUMNiUOoWOw1Yuj4W4lH4y3QaCXgnDtbfPFunaOrdRWOIv8HjRAoGAV3NT
mSitbNIfR69YIAqNky3JIJbb42VRc1tJzCYOd+o+pCF96ZyRCNehnDZpZQDM9n8N
9GAfWEBHCCpwS69DVFL422TGEnSJPJglCZwt8OgnWXd7CW05cvt1OMgzHyekhxLg
4Dse7J5pXBxAlAYmVCB5xPGR4xLpISX1EOtcwr0CgYEA5rA2IUfjZYb4mvFHMKyM
xWZuV9mnl3kg0ULttPeOl3ppwjgRbWpyNgOXl8nVMYzxwT/A+xCPA18P0EcgNAWc
frJqQYg3NMf+f0K1wSaswUSLEVrQOj25OZJNpb21JEiNfEd5DinVVj4BtVc6KSpS
kvjbn2WhEUatc3lPL3V0Fkw=
-----END PRIVATE KEY-----
""", # 5
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExM1oXDTIxMDEwMTAxNTExM1owFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1c5y
S9IZHF9MIuwdafzhMkgP37I3RVpHEbpnPwnLFqSWelS5m2eDkwWd5SkfGjrmQ5q0
PEpqLlh3zHGw9yQjnHS3CCS1PwQ1kmwvpIK3HM5y8GM7ry1zkam8ZR4iX6Y7VG9g
9mhiVVFoVhe1gHeiC/3Mp6XeNuEiD0buM+8qZx9B21I+iwzy4wva7Gw0fJeq9G1c
lq2rhpD1LlIEodimWOi7lOEkNmUiO1SvpdrGdxUDpTgbdg6r5pCGjOXLd74tAQHP
P/LuqRNJDXtwvHtLIVQnW6wjjy4oiWZ8DXOdc9SkepwQLIF5Wh8O7MzF5hrd6Cvw
SOD3EEsJbyycAob6RwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBDNcbKVUyGOAVm
k3iVuzkkkymlTAMm/gsIs6loLJrkSqNg160FdVKJoZFjQtqoqLgLrntdCJ377nZ9
1i+yzbZsA4DA7nxj0IEdnd7rRYgGLspGqWeKSTROATeT4faLTXenecm0v2Rpxqc7
dSyeZJXOd2OoUu+Q64hzXCDXC6LNM+xZufxV9qv+8d+CipV6idSQZaUWSVuqFCwD
PT0R4eWfkMMaM8QqtNot/hVCEaKT+9rG0mbpRe/b/qBy5SR0u+XgGEEIV+33L59T
FXY+DpI1Dpt/bJFoUrfj6XohxdTdqYVCn1F8in98TsRcFHyH1xlkS3Y0RIiznc1C
BwAoGZ4B
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDVznJL0hkcX0wi
7B1p/OEySA/fsjdFWkcRumc/CcsWpJZ6VLmbZ4OTBZ3lKR8aOuZDmrQ8SmouWHfM
cbD3JCOcdLcIJLU/BDWSbC+kgrccznLwYzuvLXORqbxlHiJfpjtUb2D2aGJVUWhW
F7WAd6IL/cynpd424SIPRu4z7ypnH0HbUj6LDPLjC9rsbDR8l6r0bVyWrauGkPUu
UgSh2KZY6LuU4SQ2ZSI7VK+l2sZ3FQOlOBt2DqvmkIaM5ct3vi0BAc8/8u6pE0kN
e3C8e0shVCdbrCOPLiiJZnwNc51z1KR6nBAsgXlaHw7szMXmGt3oK/BI4PcQSwlv
LJwChvpHAgMBAAECggEBAK0KLeUBgIM++Y7WDCRInzYjrn08bpE5tIU7mO4jDfQg
dw1A3wtQZuOpyxW6B0siWlRis/aLv44M2cBkT3ZmEFBDAhOcKfh7fqQn3RNHG847
pDi8B4UKwxskBa7NCcLh9eirUA19hABLJ6dt/t6fdE5CNc2FZ+iAoyE8JfNwYKAd
6Fa3HqUBPNWt8ryj4ftgpMNBdfmLugEM4N20SXJA28hOq2lUcwNKQQ1xQrovl0ig
iMbMWytV4gUPKC9Wra66OYIkk/K8teiUNIYA4JwAUVTs1NEWoyfwUTz1onutCkMl
5vY7JAqRoDWoSUX6FI+IHUdyqPAMdOMhC37gjrxoo2ECgYEA7trDMu6xsOwEckDh
iz148kejMlnTTuCNetOFBw3njFgxISx0PrDLWmJmnHMxPv9AAjXYb2+UCCm3fj6Q
OB8o4ZJm0n504qbFHcb2aI22U5hZ99ERvqx8WBnJ2RarIBmg06y0ktxq8gFR2qxF
0hWAOcDn1DWQ8QI0XBiFFcJTGtcCgYEA5SdlIXRnVZDKi5YufMAORG9i74dXUi0Y
02UoVxJ+q8VFu+TT8wrC5UQehG3gX+79Cz7hthhDqOSCv6zTyE4Evb6vf9OLgnVe
E5iLF033zCxLSS9MgiZ+jTO+wK3RsapXDtGcSEk2P82Pj5seNf4Ei1GNCRlm1DbX
71wlikprHhECgYABqmLcExAIJM0vIsav2uDiB5/atQelMCmsZpcx4mXv85l8GrxA
x6jTW4ZNpvv77Xm7yjZVKJkGqYvPBI6q5YS6dfPjmeAkyHbtazrCpeJUmOZftQSD
qN5BGwTuT5sn4SXe9ABaWdEhGONCPBtMiLvZK0AymaEGHTbSQZWD/lPoBwKBgGhk
qg2zmd/BNoSgxkzOsbE7jTbR0VX+dXDYhKgmJM7b8AjJFkWCgYcwoTZzV+RcW6rj
2q+6HhizAV2QvmpiIIbQd+Mj3EpybYk/1R2ox1qcUy/j/FbOcpihGiVtCjqF/2Mg
2rGTqMMoQl6JrBmsvyU44adjixTiZz0EHZYCkQoBAoGBAMRdmoR4mgIIWFPgSNDM
ISLJxKvSFPYDLyAepLfo38NzKfPB/XuZrcOoMEWRBnLl6dNN0msuzXnPRcn1gc1t
TG7db+hivAyUoRkIW3dB8pRj9dDUqO9OohjKsJxJaQCyH5vPkQFSLbTIgWrHhU+3
oSPiK/YngDV1AOmPDH7i62po
-----END PRIVATE KEY-----
""", #6
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExMloXDTIxMDEwMTAxNTExMlowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAojGu
fQaTVT9DJWJ/zogGfrryEJXYVy9c441O5MrLlRx7nCIWIUs2NEhHDJdqJjYOTdmk
K98VhdMpDPZwxjgvvZrh43lStBRIW3zZxv747rSl2VtpSqD/6UNWJe5u4SR7oga4
JfITOKHg/+ASxnOxp/iu6oT6jBL6T7KSPh6Rf2+it2rsjhktRreFDJ2hyroNq1w4
ZVNCcNPgUIyos8u9RQKAWRNchFh0p0FCS9xNrn3e+yHnt+p6mOOF2gMzfXT/M2hq
KQNmc5D3yNoH2smWoz7F3XsRjIB1Ie4VWoRRaGEy7RwcwiDfcaemD0rQug6iqH7N
oomF6f3R4DyvVVLUkQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB/8SX6qyKsOyex
v3wubgN3FPyU9PqMfEzrFM6X5sax0VMVbSnekZrrXpdnXYV+3FBu2GLLQc900ojj
vKD+409JIriTcwdFGdLrQPTCRWkEOae8TlXpTxuNqJfCPVNxFN0znoat1bSRsX1U
K0mfEETQ3ARwlTkrF9CM+jkU3k/pnc9MoCLif8P7OAF38AmIbuTUG6Gpzy8RytJn
m5AiA3sds5R0rpGUu8mFeBpT6jIA1QF2g+QNHKOQcfJdCdfqTjKw5y34hjFqbWG9
RxWGeGNZkhC/jADCt+m+R6+hlyboLuIcVp8NJw6CGbr1+k136z/Dj+Fdhm6FzF7B
qULeRQJ+
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCiMa59BpNVP0Ml
Yn/OiAZ+uvIQldhXL1zjjU7kysuVHHucIhYhSzY0SEcMl2omNg5N2aQr3xWF0ykM
9nDGOC+9muHjeVK0FEhbfNnG/vjutKXZW2lKoP/pQ1Yl7m7hJHuiBrgl8hM4oeD/
4BLGc7Gn+K7qhPqMEvpPspI+HpF/b6K3auyOGS1Gt4UMnaHKug2rXDhlU0Jw0+BQ
jKizy71FAoBZE1yEWHSnQUJL3E2ufd77Iee36nqY44XaAzN9dP8zaGopA2ZzkPfI
2gfayZajPsXdexGMgHUh7hVahFFoYTLtHBzCIN9xp6YPStC6DqKofs2iiYXp/dHg
PK9VUtSRAgMBAAECggEANjn0A3rqUUr4UQxwfIV/3mj0O1VN4kBEhxOcd+PRUsYW
EapXycPSmII9ttj8tU/HUoHcYIqSMI7bn6jZJXxtga/BrALJAsnxMx031k8yvOQK
uvPT7Q6M4NkReVcRHRbMeuxSLuWTRZDhn8qznEPb9rOvD1tsRN6nb3PdbwVbUcZh
2F6JDrTyI/Df6nrYQAWOEe2ay7tzgrNYE4vh+DW7oVmyHRgFYA+DIG5Q+7OVWeW5
bwYYPKlo4/B0L+GfMKfMVZ+5TvFWAK0YD1e/CW1Gv+i/8dWm4O7UNGg5mTnrIcy1
g5wkKbyea02/np2B/XBsSWXDl6rTDHL7ay0rH2hjEQKBgQDMKSm3miQTIcL/F2kG
ieapmRtSc7cedP967IwUfjz4+pxPa4LiU47OCGp1bmUTuJAItyQyu/5O3uLpAriD
PTU+oVlhqt+lI6+SJ4SIYw01/iWI3EF2STwXVnohWG1EgzuFM/EqoB+mrodNONfG
UmP58vI9Is8fdugXgpTz4Yq9pQKBgQDLYJoyMVrYTvUn5oWft8ptsWZn6JZXt5Bd
aXh+YhNmtCrSORL3XjcH4yjlcn7X8Op33WQTbPo7QAJ1CumJzAI88BZ/8za638xb
nLueviZApCt0bNMEEdxDffxHFc5TyHE+obMKFfApbCnD0ggO6lrZ8jK9prArLOCp
mRU9SSRffQKBgAjoBszeqZI4F9SfBdLmMyzU5A89wxBOFFMdfKLsOua1sBn627PZ
51Hvpg1HaptoosfujWK1NsvkB0wY9UmsYuU/jrGnDaibnO4oUSzN/WaMlsCYszZg
zYFLIXrQ67tgajlOYcf1Qkw4MujYgPlC4N+njI/EM/rwagGUjcDx5uaNAoGASyqz
EuYG63eTSGH89SEaohw0+yaNmnHv23aF4EAjZ4wjX3tUtTSPJk0g6ly84Nbb8d1T
hZJ7kbaAsf2Mfy91jEw4JKYhjkP05c8x0OP6g12p6efmvdRUEmXX/fXjQjgNEtb0
sz+UedrOPN+9trWLSo4njsyyw+JcTpKTtQj5dokCgYEAg9Y3msg+GvR5t/rPVlKd
keZkrAp0xBJZgqG7CTPXWS1FjwbAPo7x4ZOwtsgjCuE52lar4j+r2Il+CDYeLfxN
h/Jfn6S9ThUh+B1PMvKMMnJUahg8cVL8uQuBcbAy8HPRK78WO2BTnje44wFAJwTc
0liuYqVxZIRlFLRl8nGqog8=
-----END PRIVATE KEY-----
""", #7
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExMloXDTIxMDEwMTAxNTExMlowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu9oO
cFlNukUcLfFrfkEaUiilcHLmn5OokQbj95CGd2ehQCCVwrkunYLBisthRaancFFb
/yM998B0IUsKTsoLi5DAN3/SkSm6GiQIGO05E4eBPljwJ61QQMxh8+1TwQ9HTun1
ZE1lhVN1aRmI9VsbyTQLjXh9OFNLSJEKb29mXsgzYwYwNOvo+idzXpy4bMyNoGxY
Y+s2FIKehNHHCv4ravDn8rf6DtDOvyN4d0/QyNws9FpAZMXmLwtBJ9exOqKFW43w
97NxgdNiTFyttrTKTi0b+9v3GVdcEZw5b2RMIKi6ZzPof6/0OlThK6C3xzFK3Bp4
PMjTfXw5yyRGVBnZZwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA4Ms6LqzMu757z
bxISiErRls6fcnq0fpSmiPNHNKM7YwG9KHYwPT6A0UMt30zDwNOXCQBI19caGeeO
MLPWa7Gcqm2XZB2jQwvLRPeFSy9fm6RzJFeyhrh/uFEwUetwYmi/cqeIFDRDBQKn
bOaXkBk0AaSmI5nRYfuqpMMjaKOFIFcoADw4l9wWhv6DmnrqANzIdsvoSXi5m8RL
FcZQDZyHFlHh3P3tLkmQ7ErM2/JDwWWPEEJMlDm/q47FTOQSXZksTI3WRqbbKVv3
iQlJjpgi9yAuxZwoM3M4975iWH4LCZVMCSqmKCBt1h9wv4LxqX/3kfZhRdy1gG+j
41NOSwJ/
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC72g5wWU26RRwt
8Wt+QRpSKKVwcuafk6iRBuP3kIZ3Z6FAIJXCuS6dgsGKy2FFpqdwUVv/Iz33wHQh
SwpOyguLkMA3f9KRKboaJAgY7TkTh4E+WPAnrVBAzGHz7VPBD0dO6fVkTWWFU3Vp
GYj1WxvJNAuNeH04U0tIkQpvb2ZeyDNjBjA06+j6J3NenLhszI2gbFhj6zYUgp6E
0ccK/itq8Ofyt/oO0M6/I3h3T9DI3Cz0WkBkxeYvC0En17E6ooVbjfD3s3GB02JM
XK22tMpOLRv72/cZV1wRnDlvZEwgqLpnM+h/r/Q6VOEroLfHMUrcGng8yNN9fDnL
JEZUGdlnAgMBAAECggEALlZdlW0R9U6y4spYf65Dddy84n4VUWu0+wE+HoUyBiYz
6oOfLYdMbmIgp8H/XpT7XINVNBxXXtPEUaoXAtRoAKdWItqO8Gvgki4tKSjrGVwl
j2GU69SepT1FNExoiojgSCEB/RnyXu71WVWJKSyuL/V8nAsKqGgze9T7Q/2wvNQt
SQqLxZlrWF0P8WqaAiSrHV4GnDrdeF+k1KBo2+pSaDNv6cNwOyVG8EII9tqhF8kj
6nD6846ish6OqmlSisaSGopJZL1DCQzszFMxKd2+iBDY7Kn6hVIhRaNnaZUFhpKM
dNh6hBqOycMepAp0sz5pdo+fxpifkoR/cPWgyC3QkQKBgQDixe9VsiZ7u2joxF/9
JcAExKhqE28OUmIwt6/j+uzYShxN6Oo9FUo3ICtAPCCFsjhvb3Qum7FspmxrqtNy
fzclibZJPO8ey2PzqaiOfiVfgJmNSvoCOdgM4OqFLtRO6eSTzhJeI4VPrPcq/5la
0FuOi1WZs/Au9llqLqGSDH3UAwKBgQDUD/bSJbOk5SvNjFtFm0ClVJr66mJ5e4uN
4VGv8KGFAJ+ExIxujAukfKdwLjS1wEy2RePcshfT8Y9FVh/Q1KzzrQi3Gwmfq1G6
Dpu2HlJpaZl+9T81x2KS8GP3QNczWMe2nh7Lj+6st+b4F+6FYbVTFnHaae27sXrD
XPX15+uxzQKBgGy+pBWBF4kwBo/QU4NuTdU7hNNRPGkuwl1ASH1Xv6m8aDRII8Nk
6TDkITltW98g5oUxehI7oOpMKCO9SCZYsNY0YpBeQwCOYgDfc6/Y+A0C+x9RO/BD
UsJiPLPfD/pDmNPz9sTj3bKma+RXq29sCOujD0pkiiHLCnerotkJWnGHAoGAAkCJ
JoIv/jhQ1sX+0iZr8VWMr819bjzZppAWBgBQNtFi4E4WD7Z9CSopvQ9AkA2SwvzL
BrT9e8q88sePXvBjRdM4nHk1CPUQ0SEGllCMH4J3ltmT6kZLzbOv3BhcMLdop4/W
U+MbbcomMcxPRCtdeZxraR5m3+9qlliOZCYqYqECgYA5eLdxgyHxCS33QGFHRvXI
TLAHIrr7wK1xwgkmZlLzYSQ8Oqh1UEbgoMt4ulRczP2g7TCfvANw2Sw0H2Q5a6Fj
cnwVcXJ38DLg0GCPMwzE8dK7d8tKtV6kGiKy+KFvoKChPjE6uxhKKmCJaSwtQEPS
vsjX3iiIgUQPsSz8RrNFfQ==
-----END PRIVATE KEY-----
""", #8
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExMloXDTIxMDEwMTAxNTExMlowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5DNu
CKhhl6wCbgoCkFemwJh3ATbAjhInHpvQWIFDfSK1USElCKxqosIxiBQCx3Zs2d/U
GeIA7QAM2atNdXaateacEaKMmGE9LEtO0Dg5lmT43WzmGkG9NmCwK3JjAekc5S9d
HKNtEQo7o8RKfj81zlDSq2kzliy98cimk24VBBGkS2Cn7Vy/mxMCqWjQazTXbpoS
lXw6LiY5wFXQmXOB5GTSHvqyCtBQbOSSbJB77z/fm7bufTDObufTbJIq53WPt00Y
f+JNnzkX1X0MaBCUztoZwoMaExWucMe/7xsQ46hDn6KB4b0lZk+gsK45QHxvPE1R
72+ZkkIrGS/ljIKahQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQDib1653CneSmy2
gYzGeMlrI05Jqo3JuHNMQHzAjIrb4ee57VA4PTQa1ygGol/hVv6eTvZr3p2ospDS
5Kfwj1HLO4jSMX1Bnm1FG0naQogz2CD3xfYjbYOVRhAxpld1MNyRveIOhDRARY7N
XNAaNPZ1ALrwbENSYArr18xDzgGWe/dgyRCEpCFIsztiA+7jGvrmAZgceIE8K3h3
fkvNmXBH58ZHAGTiyRriBZqS+DXrBrQOztXSJwFnOZnRt6/efeBupt8j5hxVpBLW
vtjpBc23uUcbbHOY2AW2Bf+vIr4/LmJ/MheKV+maa2990vmC93tvWlFfc74mgUkW
HJfXDmR6
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDkM24IqGGXrAJu
CgKQV6bAmHcBNsCOEicem9BYgUN9IrVRISUIrGqiwjGIFALHdmzZ39QZ4gDtAAzZ
q011dpq15pwRooyYYT0sS07QODmWZPjdbOYaQb02YLArcmMB6RzlL10co20RCjuj
xEp+PzXOUNKraTOWLL3xyKaTbhUEEaRLYKftXL+bEwKpaNBrNNdumhKVfDouJjnA
VdCZc4HkZNIe+rIK0FBs5JJskHvvP9+btu59MM5u59NskirndY+3TRh/4k2fORfV
fQxoEJTO2hnCgxoTFa5wx7/vGxDjqEOfooHhvSVmT6CwrjlAfG88TVHvb5mSQisZ
L+WMgpqFAgMBAAECggEABTdPuo7uvCLIY2+DI319aEWT4sk3mYe8sSxqlLtPqZqT
fmk9iXc3cMTzkOK0NY71af19waGy17f6kzchLCAr5SCCTLzkbc87MLn/8S530oI4
VgdZMxxxkL6hCD0zGiYT7QEqJa9unMcZGeMwuLYFKtQaHKTo8vPO26n0dMY9YLxj
cNUxsKLcKk8dbfKKt4B4fZgB7nU0BG9YbKYZ3iZ7/3mG+6jA6u+VYc/WHYQjTmpL
oLFN7NOe3R7jIx/kJ1OqNWqsFoLpyiiWd1Mr0l3EdD1kCudptMgD8hd++nx2Yk2w
K4+CpOVIN/eCxDDaAOJgYjCtOayVwUkDAxRRt9VnAQKBgQD5s1j6RJtBNTlChVxS
W3WpcG4q8933AiUY/Chx0YTyopOiTi7AGUaA8AOLFBcO2npa+vzC+lvuOyrgOtVW
sD10H2v5jNKlbeBp+Q9rux2LAyp4TvzdXWKhVyZrdtITF0hn6vEYNp7MtyWRFb1O
3Ie5HQBPHtzllFOMynacjOdjpQKBgQDp9TrbfOmwGWmwPKmaFKuy8BKxjJM+ct0X
4Xs1uSy9Z9Y8QlDNbNaooI8DA1NY0jDVHwemiGC4bYsBNKNRcbI0s2nr0hQMft42
P/NpugHv0YXiVz+5bfim4woTiHHbfREqchlIGo3ryClAiDU9fYZwTOtb9jPIhX3G
9v+OsoMlYQKBgQDJUQW90S5zJlwh+69xXvfAQjswOimNCpeqSzK4gTn0/YqV4v7i
Nf6X2eqhaPMmMJNRYuYCtSMFMYLiAc0a9UC2rNa6/gSfB7VU+06phtTMzSKimNxa
BP6OIduB7Ox2I+Fmlw8GfJMPbeHF1YcpW7e5UV58a9+g4TNzYZC7qwarWQKBgQCA
FFaCbmHonCD18F/REFvm+/Lf7Ft3pp5PQouXH6bUkhIArzVZIKpramqgdaOdToSZ
SAGCM8rvbFja8hwurBWpMEdeaIW9SX8RJ/Vz/fateYDYJnemZgPoKQcNJnded5t8
Jzab+J2VZODgiTDMVvnQZOu8To6OyjXPRM0nK6cMQQKBgQDyX44PHRRhEXDgJFLU
qp2ODL54Qadc/thp2m+JmAvqmCCLwuYlGpRKVkLLuZW9W6RlVqarOC3VD3wX5PRZ
IsyCGLi+Jbrv9JIrYUXE80xNeQVNhrrf02OW0KHbqGxRaNOmp1THPw98VUGR2J/q
YAp6XUXU7LEBUrowye+Ty2o7Lg==
-----END PRIVATE KEY-----
""", #9
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExMVoXDTIxMDEwMTAxNTExMVowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1k2R
PWYihftppo3CoxeseFwgg7guxZVkP7aAur5uBzSeAB7sBG1G2bRrwMX71S4xPwot
zYiEoxUrTStUqEKjL2aozfHsXnHZ7kwwUgZFDZUg+ve2tZDA3HCUr4tLYKlyFqpx
2nCouc45MjQ4wAxRl4rQxIUG2uSTzvP+xXtjoJYMIEEyCpcsRXfqfVkEUe9nrPsF
0Ibzk7Cyt75HDI4uEzBuHux0DYuGy6R02jz/vf/dIZ4WepjSY06xpblTHZgieDRX
fU2+YOcvb0eDHyA8Q5p8ropK71MNIP5+kffFd90SVr4EkCA8S+cd6FdKQasRr+jF
9MUhMS4ObvlrYTG+hwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCy62MZ3+59/VpX
c9Hsmb4/BMWt0irLJit4w4SkuYKGFLCMKZI4LN4pEkXaiE1eqF2DNS1qOvl5luty
Zz4oggrqilwuFeH98o9Zeg9SYnouuypORVP/3DPbJF/jiQg5J8kJb1sy+BjRiT8I
5X6/cCBYT+MljFz5tpqWOtWTgA30e1BV8JFj8F4dgUcWsAVT/I4l9zgMLUnhcO6E
wLtEE0I6aT1RHJB28ndwJzj4La98Oirw7LAEAWbExWYB90ypLaGY+JVJe3f5fijC
fJpQ2mbs4syXDmb5bU2C2pGPTKZPcyx15iQrq1uHInD0facOw+pmllAFxuG96lA1
+o2VzKwP
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDWTZE9ZiKF+2mm
jcKjF6x4XCCDuC7FlWQ/toC6vm4HNJ4AHuwEbUbZtGvAxfvVLjE/Ci3NiISjFStN
K1SoQqMvZqjN8execdnuTDBSBkUNlSD697a1kMDccJSvi0tgqXIWqnHacKi5zjky
NDjADFGXitDEhQba5JPO8/7Fe2OglgwgQTIKlyxFd+p9WQRR72es+wXQhvOTsLK3
vkcMji4TMG4e7HQNi4bLpHTaPP+9/90hnhZ6mNJjTrGluVMdmCJ4NFd9Tb5g5y9v
R4MfIDxDmnyuikrvUw0g/n6R98V33RJWvgSQIDxL5x3oV0pBqxGv6MX0xSExLg5u
+WthMb6HAgMBAAECggEAeCyRSNwQeg/NZD/UqP6qkegft52+ZMBssinWsGH/c3z3
KVwtwCHDfGvnjPe5TAeWSCKeIsbukkFZwfGNjLmppvgrqymCAkhYDICfDDBF4uMA
1pu40sJ01Gkxh+tV/sOmnb1BEVzh0Sgq/NM6C8ActR18CugKOw+5L3G2KeoSqUbT
2hcPUsnik10KwqW737GQW4LtEQEr/iRmQkxI3+HBzvPWjFZzjOcpUph+FW5TXtaU
T26mt1j+FjbdvvhCuRMY/VZBJ5h1RKU95r57F1AjW/C0RRJ8FxR1CeSy4IlmQBrh
6wAa3Tdm0k/n4ZspC9bF5eVTJEtb0AohiYZrIa8MuQKBgQD8yjCLYa41H304odCx
NwPRJcmlIk5YGxPrhHAT9GEgU6n/no7YMVx1L7fNLcMjAyx54jauEU7J19Aki7eV
SIdU9TwqmkOAFfM6TOEJZiOi66gABOxeK2yDyfmR6Apaw3caku4O058t4KVwHSCB
DanYCMzxCBqS9jUTTyAh0fMg6wKBgQDZBkIukg3FKPor5LzkUXIKnNHYPfHbERHw
piWS6GZwqhuWNlOCWxiBR4rEUU/RbFQZw/FCi5OuAk2lBC0LBmC0/Sz4/+xDdCbv
uNhMOTRcy9nFVpmpIWCx4N/KmXHEuFxli/JNXux7iki74AVC9VPrAt/kCvwf06Df
oDb8ljdR1QKBgQChVOD6c5Lc8IXYeN1Z3IShHH6+11AsxstFyjZFZff+y6Z5L1Z2
/7nESHoDhqs9Uy81cnv3R7CC/Ssnx8uYiLtmK0UE44Mk4d1jXeFZQEiKF+AWcw3v
Y8NTsLmItxC0sH75BMDN0Z2LiA3Nqaku8+trpuI1Cjj7hgqFkkAtlXKXlQKBgBMb
c/Q5s7CqHOyEZQUNDqdUiz0opwSMijHPzvsSLwK4V1lwSwXtE0k+jT8fkZF0oirq
j3E2bLgjR8bBiV2xIA6PQ8hgb+K4dT0h3xlG6A9Le07egwTbBXJjxBBIVjXlrWzb
V2fsdZGi6ShxXsU4aD0GscOYG/6JWV6W8oBmkVRJAoGAepIZ+OYmFjb7uxdh4EtP
hluEtx5bLOLuo6c0S149omUXUhbsuyzTZS6Ip9ySDMnK3954c4Q4WJ4yQKixQNVq
78aDfy4hP/8TE/Q9CRddUof2P33PJMhVNqzTRYMpqV+zxifvtw3hoDTLKHTQxCR2
M1+O4VvokU5pBqUpGXiMDfs=
-----END PRIVATE KEY-----
""", #10
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExMVoXDTIxMDEwMTAxNTExMVowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnbCU
M37hG7zrCyyJEI6pZmOomnI+CozbP5KAhWSV5y7R5H6lcAEG2UDV+lCUxHT2ufOa
i1H16bXyBt7VoMTHIH50S58NUCUEXcuRWVR16tr8CzcTHQAkfIrmhY2XffPilX7h
aw35UkoVmXcqSDNNJD6jmvWexvmbhzVWW8Vt5Pivet2/leVuqPXB54/alSbkC74m
x6X5XKQc6eyPsb1xvNBuiSpFzdqbEn7lUwj6jFTkh9tlixgmgx+J0XoQXbawyrAg
rcIQcse/Ww+KBA1KSccFze+XBTbIull4boYhbJqkb6DW5bY7/me2nNxE9DRGwq+S
kBsKq3YKeCf8LEhfqQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAD+tWGFhINYsWT
ibKWlCGgBc5uB7611cLCevx1yAL6SaOECVCQXzaaXIaETSbyY03UO2yBy3Pl10FV
GYXLrAWTFZsNVJm55XIibTNw1UBPNwdIoCSzAYuOgMF0GHhTTQU0hNYWstOnnE2T
6lSAZQZFkaW4ZKs6sUp42Em9Bu99PehyIgnw14qb9NPg5qKdi2GAvkImZCrGpMdK
OF31U7Ob0XQ0lxykcNgG4LlUACd+QxLfNpmLBZUGfikexYa1VqBFm3oAvTt8ybNQ
qr7AKXDFnW75aCBaMpQWzrstA7yYZ3D9XCd5ZNf6d08lGM/oerDAIGnZOZPJgs5U
FaWPHdS9
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCdsJQzfuEbvOsL
LIkQjqlmY6iacj4KjNs/koCFZJXnLtHkfqVwAQbZQNX6UJTEdPa585qLUfXptfIG
3tWgxMcgfnRLnw1QJQRdy5FZVHXq2vwLNxMdACR8iuaFjZd98+KVfuFrDflSShWZ
dypIM00kPqOa9Z7G+ZuHNVZbxW3k+K963b+V5W6o9cHnj9qVJuQLvibHpflcpBzp
7I+xvXG80G6JKkXN2psSfuVTCPqMVOSH22WLGCaDH4nRehBdtrDKsCCtwhByx79b
D4oEDUpJxwXN75cFNsi6WXhuhiFsmqRvoNbltjv+Z7ac3ET0NEbCr5KQGwqrdgp4
J/wsSF+pAgMBAAECggEAPSu1ofBTRN5ZU4FYPlsJLdX1Hsy4coFHv/aF8rkdSYwp
EflrFfLgBEEZgLvnqfoxh9sPFYKa4amaFL42ouIS2PEVDgzKLk/dzMDeRof0IkIG
yhb4TCS1ArcjS6WsociNGi8ZJN1L3Xctv9WxSkbUYv4Fm2Qyzr8fbSjssjb5NXwD
K11fsj6Pfy/mQrI0TSTlzWC7ARIlCMTWQ8G8zEU6bMFIG6DMjt2J4VgYVXUKetZA
VPuS+pwkH2obQe6FLRuiNxH4GitVAASYPea6foER4AggBMRp8q8F6+WssjoyEORb
0sJxmnxhznoTRMCuTsUj6XMgmOTOnA3lQXsIB0DYcQKBgQDO6mMRVVFWzgkE9Q5/
36n06KvGYF9TCRDL9vRC8kCqcGd1Hy6jRj0D8049KUHaN74pfWg6gsQjPkKzwKnC
vxNl72tVvLqm7Fo531BGfKK/46ZvxeWMMraNW4+9LhwMPu2LN5OEdwwCgyaURpxh
ktCp+RrGjz08Kn82X1jJPdwxDQKBgQDDGMvZ7ZUDGq5+RJkmHJ58lQtiaMZclmYV
R9YwOxJV6ino3EYrGOtUkqiemgAACdMWE/JMJlB1/JINawJwUsZ2XDp/9jNLPgLc
gphCmagaO34U/YMaJbJIK2gkCX7p8EcD+x45qWa0bEMPW38QfN/qQdUPjNmpuIiI
Zleyl1TqDQKBgQCvIoat0ighsAzETGN0aqzhJdrW8xVcJA06hpFi5MdFPBTldno0
KqxUXqj3badWe94SIhqJg8teBUHSAZ3uv2o82nRgQnk99km8OD8rGi1q+9YRP1C2
5OnNJhW4y4FkABNxxZ2v/k+FBNsvn8CXefvyEm3OaMks1s+MBxIQa7KnNQKBgFwX
HUo+GiN/+bPCf6P8yFa4J8qI+HEF0SPkZ9cWWx5QzP2M1FZNie++1nce7DcYbBo0
yh9lyn8W/H328AzDFckS2c5DEY1HtSQPRP3S+AWB5Y7U54h1GMV2L88q6ExWzb60
T10aeE9b9v+NydmniC5UatTPQIMbht8Tp/u18TAVAoGBAJphAfiqWWV2M5aBCSXq
WxLZ71AJ0PZBmRa/9iwtccwXQpMcW6wHK3YSQxci+sB97TElRa3/onlVSpohrUtg
VCvCwfSHX1LmrfWNSkoJZwCQt+YYuMqW86K0tzLzI1EMjIH9LgQvB6RR26PZQs+E
jr1ZvRc+wPTq6sxCF1h9ZAfN
-----END PRIVATE KEY-----
""", #11
]
# To disable the pre-computed tub certs, uncomment this line.
# SYSTEM_TEST_CERTS = []
def flush_but_dont_ignore(res):
d = flushEventualQueue()
def _done(ignored):
return res
d.addCallback(_done)
return d
def _render_config(config):
"""
Convert a ``dict`` of ``dict`` of ``unicode`` to an ini-format string.
"""
return u"\n\n".join(list(
_render_config_section(k, v)
for (k, v)
in config.items()
))
def _render_config_section(heading, values):
"""
Convert a ``unicode`` heading and a ``dict`` of ``unicode`` to an ini-format
section as ``unicode``.
"""
return u"[{}]\n{}\n".format(
heading, _render_section_values(values)
)
def _render_section_values(values):
"""
Convert a ``dict`` of ``unicode`` to the body of an ini-format section as
``unicode``.
"""
return u"\n".join(list(
u"{} = {}".format(k, v)
for (k, v)
in sorted(values.items())
))
class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
def setUp(self):
self.port_assigner = SameProcessStreamEndpointAssigner()
self.port_assigner.setUp()
self.addCleanup(self.port_assigner.tearDown)
self.sparent = service.MultiService()
self.sparent.startService()
def tearDown(self):
log.msg("shutting down SystemTest services")
d = self.sparent.stopService()
d.addBoth(flush_but_dont_ignore)
return d
def getdir(self, subdir):
return os.path.join(self.basedir, subdir)
def add_service(self, s):
s.setServiceParent(self.sparent)
return s
def _create_introducer(self):
"""
:returns: (via Deferred) an Introducer instance
"""
iv_dir = self.getdir("introducer")
if not os.path.isdir(iv_dir):
_, port_endpoint = self.port_assigner.assign(reactor)
introducer_config = (
u"[node]\n"
u"nickname = introducer \N{BLACK SMILING FACE}\n" +
u"web.port = {}\n".format(port_endpoint)
).encode("utf-8")
fileutil.make_dirs(iv_dir)
fileutil.write(
os.path.join(iv_dir, 'tahoe.cfg'),
introducer_config,
)
if SYSTEM_TEST_CERTS:
os.mkdir(os.path.join(iv_dir, "private"))
f = open(os.path.join(iv_dir, "private", "node.pem"), "w")
f.write(SYSTEM_TEST_CERTS[0])
f.close()
return create_introducer(basedir=iv_dir)
def _get_introducer_web(self):
with open(os.path.join(self.getdir("introducer"), "node.url"), "r") as f:
return f.read().strip()
@inlineCallbacks
def set_up_nodes(self, NUMCLIENTS=5):
"""
Create an introducer and ``NUMCLIENTS`` client nodes pointed at it. All
of the nodes are running in this process.
As a side-effect, set:
* ``numclients`` to ``NUMCLIENTS``
* ``introducer`` to the ``_IntroducerNode`` instance
* ``introweb_url`` to the introducer's HTTP API endpoint.
:param int NUMCLIENTS: The number of client nodes to create.
:return: A ``Deferred`` that fires when the nodes have connected to
each other.
"""
self.numclients = NUMCLIENTS
self.introducer = yield self._create_introducer()
self.add_service(self.introducer)
self.introweb_url = self._get_introducer_web()
yield self._set_up_client_nodes()
@inlineCallbacks
def _set_up_client_nodes(self):
q = self.introducer
self.introducer_furl = q.introducer_url
self.clients = []
basedirs = []
for i in range(self.numclients):
basedirs.append((yield self._set_up_client_node(i)))
# start clients[0], wait for it's tub to be ready (at which point it
# will have registered the helper furl).
c = yield client.create_client(basedirs[0])
c.setServiceParent(self.sparent)
self.clients.append(c)
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
with open(os.path.join(basedirs[0],"private","helper.furl"), "r") as f:
helper_furl = f.read()
self.helper_furl = helper_furl
if self.numclients >= 4:
with open(os.path.join(basedirs[3], 'tahoe.cfg'), 'a+') as f:
f.write(
"[client]\n"
"helper.furl = {}\n".format(helper_furl)
)
# this starts the rest of the clients
for i in range(1, self.numclients):
c = yield client.create_client(basedirs[i])
c.setServiceParent(self.sparent)
self.clients.append(c)
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
log.msg("STARTING")
yield self.wait_for_connections()
log.msg("CONNECTED")
# now find out where the web port was
self.webish_url = self.clients[0].getServiceNamed("webish").getURL()
if self.numclients >=4:
# and the helper-using webport
self.helper_webish_url = self.clients[3].getServiceNamed("webish").getURL()
def _generate_config(self, which, basedir):
config = {}
except1 = set(range(self.numclients)) - {1}
feature_matrix = {
("client", "nickname"): except1,
# client 1 has to auto-assign an address.
("node", "tub.port"): except1,
("node", "tub.location"): except1,
# client 0 runs a webserver and a helper
# client 3 runs a webserver but no helper
("node", "web.port"): {0, 3},
("node", "timeout.keepalive"): {0},
("node", "timeout.disconnect"): {3},
("helper", "enabled"): {0},
}
def setconf(config, which, section, feature, value):
if which in feature_matrix.get((section, feature), {which}):
config.setdefault(section, {})[feature] = value
setnode = partial(setconf, config, which, "node")
sethelper = partial(setconf, config, which, "helper")
setnode("nickname", u"client %d \N{BLACK SMILING FACE}" % (which,))
tub_location_hint, tub_port_endpoint = self.port_assigner.assign(reactor)
setnode("tub.port", tub_port_endpoint)
setnode("tub.location", tub_location_hint)
_, web_port_endpoint = self.port_assigner.assign(reactor)
setnode("web.port", web_port_endpoint)
setnode("timeout.keepalive", "600")
setnode("timeout.disconnect", "1800")
sethelper("enabled", "True")
iyaml = ("introducers:\n"
" petname2:\n"
" furl: %s\n") % self.introducer_furl
iyaml_fn = os.path.join(basedir, "private", "introducers.yaml")
fileutil.write(iyaml_fn, iyaml)
return _render_config(config)
def _set_up_client_node(self, which):
basedir = self.getdir("client%d" % (which,))
fileutil.make_dirs(os.path.join(basedir, "private"))
if len(SYSTEM_TEST_CERTS) > (which + 1):
f = open(os.path.join(basedir, "private", "node.pem"), "w")
f.write(SYSTEM_TEST_CERTS[which + 1])
f.close()
config = self._generate_config(which, basedir)
fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config)
return basedir
def bounce_client(self, num):
c = self.clients[num]
d = c.disownServiceParent()
# I think windows requires a moment to let the connection really stop
# and the port number made available for re-use. TODO: examine the
# behavior, see if this is really the problem, see if we can do
# better than blindly waiting for a second.
d.addCallback(self.stall, 1.0)
@defer.inlineCallbacks
def _stopped(res):
new_c = yield client.create_client(self.getdir("client%d" % num))
self.clients[num] = new_c
new_c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
new_c.setServiceParent(self.sparent)
d.addCallback(_stopped)
d.addCallback(lambda res: self.wait_for_connections())
def _maybe_get_webport(res):
if num == 0:
# now find out where the web port was
self.webish_url = self.clients[0].getServiceNamed("webish").getURL()
d.addCallback(_maybe_get_webport)
return d
@defer.inlineCallbacks
def add_extra_node(self, client_num, helper_furl=None,
add_to_sparent=False):
# usually this node is *not* parented to our self.sparent, so we can
# shut it down separately from the rest, to exercise the
# connection-lost code
basedir = FilePath(self.getdir("client%d" % client_num))
basedir.makedirs()
config = "[client]\n"
if helper_furl:
config += "helper.furl = %s\n" % helper_furl
basedir.child("tahoe.cfg").setContent(config.encode("utf-8"))
private = basedir.child("private")
private.makedirs()
write_introducer(
basedir,
"default",
self.introducer_furl,
)
c = yield client.create_client(basedir.path)
self.clients.append(c)
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
self.numclients += 1
if add_to_sparent:
c.setServiceParent(self.sparent)
else:
c.startService()
yield self.wait_for_connections()
defer.returnValue(c)
def _check_connections(self):
for i, c in enumerate(self.clients):
if not c.connected_to_introducer():
log.msg("%s not connected to introducer yet" % (i,))
return False
sb = c.get_storage_broker()
connected_servers = sb.get_connected_servers()
connected_names = sorted(list(
connected.get_nickname()
for connected
in sb.get_known_servers()
if connected.is_connected()
))
if len(connected_servers) != self.numclients:
wanted = sorted(list(
client.nickname
for client
in self.clients
))
log.msg(
"client %s storage broker connected to %s, missing %s" % (
i,
connected_names,
set(wanted) - set(connected_names),
)
)
return False
log.msg("client %s storage broker connected to %s, happy" % (
i, connected_names,
))
up = c.getServiceNamed("uploader")
if up._helper_furl and not up._helper:
log.msg("Helper fURL but no helper")
return False
return True
def wait_for_connections(self, ignored=None):
return self.poll(self._check_connections, timeout=200)

View File

@ -15,6 +15,9 @@ import os
import sys
import time
import signal
from functools import (
partial,
)
from random import randrange
if PY2:
from StringIO import StringIO
@ -98,7 +101,7 @@ def run_cli_native(verb, *args, **kwargs):
args=args,
nodeargs=nodeargs,
)
argv = nodeargs + [verb] + list(args)
argv = ["tahoe"] + nodeargs + [verb] + list(args)
stdin = kwargs.get("stdin", "")
if PY2:
# The original behavior, the Python 2 behavior, is to accept either
@ -128,10 +131,20 @@ def run_cli_native(verb, *args, **kwargs):
stdout = TextIOWrapper(BytesIO(), encoding)
stderr = TextIOWrapper(BytesIO(), encoding)
d = defer.succeed(argv)
d.addCallback(runner.parse_or_exit_with_explanation, stdout=stdout)
d.addCallback(runner.dispatch,
stdin=stdin,
stdout=stdout, stderr=stderr)
d.addCallback(
partial(
runner.parse_or_exit,
runner.Options(),
),
stdout=stdout,
stderr=stderr,
)
d.addCallback(
runner.dispatch,
stdin=stdin,
stdout=stdout,
stderr=stderr,
)
def _done(rc, stdout=stdout, stderr=stderr):
if return_bytes and PY3:
stdout = stdout.buffer
@ -301,6 +314,16 @@ class FakeCanary(object):
def getPeer(self):
return "<fake>"
def disconnected(self):
"""Disconnect the canary, to be called by test code.
Can only happen once.
"""
if self.disconnectors is not None:
for (f, args, kwargs) in list(self.disconnectors.values()):
f(*args, **kwargs)
self.disconnectors = None
class ShouldFailMixin(object):

View File

@ -96,8 +96,14 @@ class FakeStorage(object):
shares[shnum] = f.getvalue()
# This doesn't actually implement the whole interface, but adding a commented
# interface implementation annotation for grepping purposes.
#@implementer(RIStorageServer)
class FakeStorageServer(object):
"""
A fake Foolscap remote object, implemented by overriding callRemote() to
call local methods.
"""
def __init__(self, peerid, storage):
self.peerid = peerid
self.storage = storage
@ -143,7 +149,7 @@ class FakeStorageServer(object):
readv = {}
for shnum, (testv, writev, new_length) in list(tw_vectors.items()):
for (offset, length, op, specimen) in testv:
assert op in (b"le", b"eq", b"ge")
assert op == b"eq"
# TODO: this isn't right, the read is controlled by read_vector,
# not by testv
readv[shnum] = [ specimen

View File

@ -0,0 +1,870 @@
"""
Tests for the ``IStorageServer`` interface.
Note that for performance, in the future we might want the same node to be
reused across tests, so each test should be careful to generate unique storage
indexes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.utils import PY2, bchr
if PY2:
# fmt: off
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
# fmt: on
from random import Random
from twisted.internet.defer import inlineCallbacks, returnValue
from foolscap.api import Referenceable, RemoteException
from allmydata.interfaces import IStorageServer
from .common_system import SystemTestMixin
from .common import AsyncTestCase
# Use random generator with known seed, so results are reproducible if tests
# are run in the same order.
_RANDOM = Random(0)
def _randbytes(length):
# type: (int) -> bytes
"""Return random bytes string of given length."""
return b"".join([bchr(_RANDOM.randrange(0, 256)) for _ in range(length)])
def new_storage_index():
# type: () -> bytes
"""Return a new random storage index."""
return _randbytes(16)
def new_secret():
# type: () -> bytes
"""Return a new random secret (for lease renewal or cancellation)."""
return _randbytes(32)
class IStorageServerSharedAPIsTestsMixin(object):
"""
Tests for ``IStorageServer``'s shared APIs.
``self.storage_server`` is expected to provide ``IStorageServer``.
"""
@inlineCallbacks
def test_version(self):
"""
``IStorageServer`` returns a dictionary where the key is an expected
protocol version.
"""
result = yield self.storage_server.get_version()
self.assertIsInstance(result, dict)
self.assertIn(b"http://allmydata.org/tahoe/protocols/storage/v1", result)
class IStorageServerImmutableAPIsTestsMixin(object):
"""
Tests for ``IStorageServer``'s immutable APIs.
``self.storage_server`` is expected to provide ``IStorageServer``.
``self.disconnect()`` should disconnect and then reconnect, creating a new
``self.storage_server``. Some implementations may wish to skip tests using
this; HTTP has no notion of disconnection.
"""
@inlineCallbacks
def test_allocate_buckets_new(self):
"""
allocate_buckets() with a new storage index returns the matching
shares.
"""
(already_got, allocated) = yield self.storage_server.allocate_buckets(
new_storage_index(),
renew_secret=new_secret(),
cancel_secret=new_secret(),
sharenums=set(range(5)),
allocated_size=1024,
canary=Referenceable(),
)
self.assertEqual(already_got, set())
self.assertEqual(set(allocated.keys()), set(range(5)))
# We validate the bucket objects' interface in a later test.
@inlineCallbacks
def test_allocate_buckets_repeat(self):
"""
``IStorageServer.allocate_buckets()`` with the same storage index does not return
work-in-progress buckets, but will add any newly added buckets.
"""
storage_index, renew_secret, cancel_secret = (
new_storage_index(),
new_secret(),
new_secret(),
)
(already_got, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret,
cancel_secret,
sharenums=set(range(4)),
allocated_size=1024,
canary=Referenceable(),
)
(already_got2, allocated2) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret,
cancel_secret,
set(range(5)),
1024,
Referenceable(),
)
self.assertEqual(already_got, already_got2)
self.assertEqual(set(allocated2.keys()), {4})
@inlineCallbacks
def abort_or_disconnect_half_way(self, abort_or_disconnect):
"""
If we disconnect/abort in the middle of writing to a bucket, all data
is wiped, and it's even possible to write different data to the bucket.
(In the real world one shouldn't do that, but writing different data is
a good way to test that the original data really was wiped.)
``abort_or_disconnect`` is a callback that takes a bucket and aborts up
load, or perhaps disconnects the whole connection.
"""
storage_index, renew_secret, cancel_secret = (
new_storage_index(),
new_secret(),
new_secret(),
)
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret,
cancel_secret,
sharenums={0},
allocated_size=1024,
canary=Referenceable(),
)
# Bucket 1 is fully written in one go.
yield allocated[0].callRemote("write", 0, b"1" * 1024)
# Disconnect or abort, depending on the test:
yield abort_or_disconnect(allocated[0])
# Write different data with no complaint:
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret,
cancel_secret,
sharenums={0},
allocated_size=1024,
canary=Referenceable(),
)
yield allocated[0].callRemote("write", 0, b"2" * 1024)
def test_disconnection(self):
"""
If we disconnect in the middle of writing to a bucket, all data is
wiped, and it's even possible to write different data to the bucket.
(In the real world one shouldn't do that, but writing different data is
a good way to test that the original data really was wiped.)
HTTP protocol should skip this test, since disconnection is meaningless
concept; this is more about testing implicit contract the Foolscap
implementation depends on doesn't change as we refactor things.
"""
return self.abort_or_disconnect_half_way(lambda _: self.disconnect())
@inlineCallbacks
def test_written_shares_are_allocated(self):
"""
Shares that are fully written to show up as allocated in result from
``IStorageServer.allocate_buckets()``. Partially-written or empty
shares don't.
"""
storage_index, renew_secret, cancel_secret = (
new_storage_index(),
new_secret(),
new_secret(),
)
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret,
cancel_secret,
sharenums=set(range(5)),
allocated_size=1024,
canary=Referenceable(),
)
# Bucket 1 is fully written in one go.
yield allocated[1].callRemote("write", 0, b"1" * 1024)
yield allocated[1].callRemote("close")
# Bucket 2 is fully written in two steps.
yield allocated[2].callRemote("write", 0, b"1" * 512)
yield allocated[2].callRemote("write", 512, b"2" * 512)
yield allocated[2].callRemote("close")
# Bucket 0 has partial write.
yield allocated[0].callRemote("write", 0, b"1" * 512)
(already_got, _) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret,
cancel_secret,
sharenums=set(range(5)),
allocated_size=1024,
canary=Referenceable(),
)
self.assertEqual(already_got, {1, 2})
@inlineCallbacks
def test_written_shares_are_readable(self):
"""
Shares that are fully written to can be read.
The result is not affected by the order in which writes
happened, only by their offsets.
"""
storage_index, renew_secret, cancel_secret = (
new_storage_index(),
new_secret(),
new_secret(),
)
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret,
cancel_secret,
sharenums=set(range(5)),
allocated_size=1024,
canary=Referenceable(),
)
# Bucket 1 is fully written in order
yield allocated[1].callRemote("write", 0, b"1" * 512)
yield allocated[1].callRemote("write", 512, b"2" * 512)
yield allocated[1].callRemote("close")
# Bucket 2 is fully written in reverse.
yield allocated[2].callRemote("write", 512, b"4" * 512)
yield allocated[2].callRemote("write", 0, b"3" * 512)
yield allocated[2].callRemote("close")
buckets = yield self.storage_server.get_buckets(storage_index)
self.assertEqual(set(buckets.keys()), {1, 2})
self.assertEqual(
(yield buckets[1].callRemote("read", 0, 1024)), b"1" * 512 + b"2" * 512
)
self.assertEqual(
(yield buckets[2].callRemote("read", 0, 1024)), b"3" * 512 + b"4" * 512
)
@inlineCallbacks
def test_non_matching_overlapping_writes(self):
"""
When doing overlapping writes in immutable uploads, non-matching writes
fail.
"""
storage_index, renew_secret, cancel_secret = (
new_storage_index(),
new_secret(),
new_secret(),
)
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret,
cancel_secret,
sharenums={0},
allocated_size=30,
canary=Referenceable(),
)
yield allocated[0].callRemote("write", 0, b"1" * 25)
# Overlapping write that doesn't match:
with self.assertRaises(RemoteException):
yield allocated[0].callRemote("write", 20, b"2" * 10)
@inlineCallbacks
def test_matching_overlapping_writes(self):
"""
When doing overlapping writes in immutable uploads, matching writes
succeed.
"""
storage_index, renew_secret, cancel_secret = (
new_storage_index(),
new_secret(),
new_secret(),
)
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret,
cancel_secret,
sharenums={0},
allocated_size=25,
canary=Referenceable(),
)
yield allocated[0].callRemote("write", 0, b"1" * 10)
# Overlapping write that matches:
yield allocated[0].callRemote("write", 5, b"1" * 20)
yield allocated[0].callRemote("close")
buckets = yield self.storage_server.get_buckets(storage_index)
self.assertEqual(set(buckets.keys()), {0})
self.assertEqual((yield buckets[0].callRemote("read", 0, 25)), b"1" * 25)
def test_abort(self):
"""
If we call ``abort`` on the ``RIBucketWriter`` to disconnect in the
middle of writing to a bucket, all data is wiped, and it's even
possible to write different data to the bucket.
(In the real world one probably wouldn't do that, but writing different
data is a good way to test that the original data really was wiped.)
"""
return self.abort_or_disconnect_half_way(
lambda bucket: bucket.callRemote("abort")
)
@inlineCallbacks
def test_get_buckets_skips_unfinished_buckets(self):
"""
Buckets that are not fully written are not returned by
``IStorageServer.get_buckets()`` implementations.
"""
storage_index = new_storage_index()
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret=new_secret(),
cancel_secret=new_secret(),
sharenums=set(range(5)),
allocated_size=10,
canary=Referenceable(),
)
# Bucket 1 is fully written
yield allocated[1].callRemote("write", 0, b"1" * 10)
yield allocated[1].callRemote("close")
# Bucket 2 is partially written
yield allocated[2].callRemote("write", 0, b"1" * 5)
buckets = yield self.storage_server.get_buckets(storage_index)
self.assertEqual(set(buckets.keys()), {1})
@inlineCallbacks
def test_read_bucket_at_offset(self):
"""
Given a read bucket returned from ``IStorageServer.get_buckets()``, it
is possible to read at different offsets and lengths, with reads past
the end resulting in empty bytes.
"""
length = 256 * 17
storage_index = new_storage_index()
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret=new_secret(),
cancel_secret=new_secret(),
sharenums=set(range(1)),
allocated_size=length,
canary=Referenceable(),
)
total_data = _randbytes(256 * 17)
yield allocated[0].callRemote("write", 0, total_data)
yield allocated[0].callRemote("close")
buckets = yield self.storage_server.get_buckets(storage_index)
bucket = buckets[0]
for start, to_read in [
(0, 250), # fraction
(0, length), # whole thing
(100, 1024), # offset fraction
(length + 1, 100), # completely out of bounds
(length - 100, 200), # partially out of bounds
]:
data = yield bucket.callRemote("read", start, to_read)
self.assertEqual(
data,
total_data[start : start + to_read],
"Didn't match for start {}, length {}".format(start, to_read),
)
@inlineCallbacks
def create_share(self):
"""Create a share, return the storage index."""
storage_index = new_storage_index()
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret=new_secret(),
cancel_secret=new_secret(),
sharenums=set(range(1)),
allocated_size=10,
canary=Referenceable(),
)
yield allocated[0].callRemote("write", 0, b"0123456789")
yield allocated[0].callRemote("close")
returnValue(storage_index)
@inlineCallbacks
def test_bucket_advise_corrupt_share(self):
"""
Calling ``advise_corrupt_share()`` on a bucket returned by
``IStorageServer.get_buckets()`` does not result in error (other
behavior is opaque at this level of abstraction).
"""
storage_index = yield self.create_share()
buckets = yield self.storage_server.get_buckets(storage_index)
yield buckets[0].callRemote("advise_corrupt_share", b"OH NO")
@inlineCallbacks
def test_advise_corrupt_share(self):
"""
Calling ``advise_corrupt_share()`` on an immutable share does not
result in error (other behavior is opaque at this level of
abstraction).
"""
storage_index = yield self.create_share()
yield self.storage_server.advise_corrupt_share(
b"immutable", storage_index, 0, b"ono"
)
class IStorageServerMutableAPIsTestsMixin(object):
"""
Tests for ``IStorageServer``'s mutable APIs.
``self.storage_server`` is expected to provide ``IStorageServer``.
``STARAW`` is short for ``slot_testv_and_readv_and_writev``.
"""
def new_secrets(self):
"""Return a 3-tuple of secrets for STARAW calls."""
return (new_secret(), new_secret(), new_secret())
def staraw(self, *args, **kwargs):
"""Like ``slot_testv_and_readv_and_writev``, but less typing."""
return self.storage_server.slot_testv_and_readv_and_writev(*args, **kwargs)
@inlineCallbacks
def test_STARAW_reads_after_write(self):
"""
When data is written with
``IStorageServer.slot_testv_and_readv_and_writev``, it can then be read
by a separate call using that API.
"""
secrets = self.new_secrets()
storage_index = new_storage_index()
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([], [(0, b"abcdefg")], 7),
1: ([], [(0, b"0123"), (4, b"456")], 7),
},
r_vector=[],
)
self.assertEqual(written, True)
(_, reads) = yield self.staraw(
storage_index,
secrets,
tw_vectors={},
# Whole thing, partial, going beyond the edge, completely outside
# range:
r_vector=[(0, 7), (2, 3), (6, 8), (100, 10)],
)
self.assertEqual(
reads,
{0: [b"abcdefg", b"cde", b"g", b""], 1: [b"0123456", b"234", b"6", b""]},
)
@inlineCallbacks
def test_SATRAW_reads_happen_before_writes_in_single_query(self):
"""
If a ``IStorageServer.slot_testv_and_readv_and_writev`` command
contains both reads and writes, the read returns results that precede
the write.
"""
secrets = self.new_secrets()
storage_index = new_storage_index()
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([], [(0, b"abcdefg")], 7),
},
r_vector=[],
)
self.assertEqual(written, True)
# Read and write in same command; read happens before write:
(written, reads) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([], [(0, b"X" * 7)], 7),
},
r_vector=[(0, 7)],
)
self.assertEqual(written, True)
self.assertEqual(reads, {0: [b"abcdefg"]})
# The write is available in next read:
(_, reads) = yield self.staraw(
storage_index,
secrets,
tw_vectors={},
r_vector=[(0, 7)],
)
self.assertEqual(reads, {0: [b"X" * 7]})
@inlineCallbacks
def test_SATRAW_writes_happens_only_if_test_matches(self):
"""
If a ``IStorageServer.slot_testv_and_readv_and_writev`` includes both a
test and a write, the write succeeds if the test matches, and fails if
the test does not match.
"""
secrets = self.new_secrets()
storage_index = new_storage_index()
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([], [(0, b"1" * 7)], 7),
},
r_vector=[],
)
self.assertEqual(written, True)
# Test matches, so write happens:
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: (
[(0, 3, b"1" * 3), (3, 4, b"1" * 4)],
[(0, b"2" * 7)],
7,
),
},
r_vector=[],
)
self.assertEqual(written, True)
(_, reads) = yield self.staraw(
storage_index,
secrets,
tw_vectors={},
r_vector=[(0, 7)],
)
self.assertEqual(reads, {0: [b"2" * 7]})
# Test does not match, so write does not happen:
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([(0, 7, b"1" * 7)], [(0, b"3" * 7)], 7),
},
r_vector=[],
)
self.assertEqual(written, False)
(_, reads) = yield self.staraw(
storage_index,
secrets,
tw_vectors={},
r_vector=[(0, 7)],
)
self.assertEqual(reads, {0: [b"2" * 7]})
@inlineCallbacks
def test_SATRAW_tests_past_end_of_data(self):
"""
If a ``IStorageServer.slot_testv_and_readv_and_writev`` includes a test
vector that reads past the end of the data, the result is limited to
actual available data.
"""
secrets = self.new_secrets()
storage_index = new_storage_index()
# Since there is no data on server, the test vector will return empty
# string, which matches expected result, so write will succeed.
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([(0, 10, b"")], [(0, b"1" * 7)], 7),
},
r_vector=[],
)
self.assertEqual(written, True)
# Now the test vector is a 10-read off of a 7-byte value, but expected
# value is still 7 bytes, so the write will again succeed.
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([(0, 10, b"1" * 7)], [(0, b"2" * 7)], 7),
},
r_vector=[],
)
self.assertEqual(written, True)
@inlineCallbacks
def test_SATRAW_reads_past_end_of_data(self):
"""
If a ``IStorageServer.slot_testv_and_readv_and_writev`` reads past the
end of the data, the result is limited to actual available data.
"""
secrets = self.new_secrets()
storage_index = new_storage_index()
# Write some data
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([], [(0, b"12345")], 5),
},
r_vector=[],
)
self.assertEqual(written, True)
# Reads past end.
(_, reads) = yield self.staraw(
storage_index,
secrets,
tw_vectors={},
r_vector=[(0, 100), (2, 50)],
)
self.assertEqual(reads, {0: [b"12345", b"345"]})
@inlineCallbacks
def test_STARAW_write_enabler_must_match(self):
"""
If the write enabler secret passed to
``IStorageServer.slot_testv_and_readv_and_writev`` doesn't match
previous writes, the write fails.
"""
secrets = self.new_secrets()
storage_index = new_storage_index()
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([], [(0, b"1" * 7)], 7),
},
r_vector=[],
)
self.assertEqual(written, True)
# Write enabler secret does not match, so write does not happen:
bad_secrets = (new_secret(),) + secrets[1:]
with self.assertRaises(RemoteException):
yield self.staraw(
storage_index,
bad_secrets,
tw_vectors={
0: ([], [(0, b"2" * 7)], 7),
},
r_vector=[],
)
(_, reads) = yield self.staraw(
storage_index,
secrets,
tw_vectors={},
r_vector=[(0, 7)],
)
self.assertEqual(reads, {0: [b"1" * 7]})
@inlineCallbacks
def test_STARAW_zero_new_length_deletes(self):
"""
A zero new length passed to
``IStorageServer.slot_testv_and_readv_and_writev`` deletes the share.
"""
secrets = self.new_secrets()
storage_index = new_storage_index()
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([], [(0, b"1" * 7)], 7),
},
r_vector=[],
)
self.assertEqual(written, True)
# Write with new length of 0:
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([], [(0, b"1" * 7)], 0),
},
r_vector=[],
)
self.assertEqual(written, True)
# It's gone!
(_, reads) = yield self.staraw(
storage_index,
secrets,
tw_vectors={},
r_vector=[(0, 7)],
)
self.assertEqual(reads, {})
@inlineCallbacks
def test_slot_readv(self):
"""
Data written with ``IStorageServer.slot_testv_and_readv_and_writev()``
can be read using ``IStorageServer.slot_readv()``. Reads can't go past
the end of the data.
"""
secrets = self.new_secrets()
storage_index = new_storage_index()
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([], [(0, b"abcdefg")], 7),
1: ([], [(0, b"0123"), (4, b"456")], 7),
# This will never get read from, just here to show we only read
# from shares explicitly requested by slot_readv:
2: ([], [(0, b"XYZW")], 4),
},
r_vector=[],
)
self.assertEqual(written, True)
reads = yield self.storage_server.slot_readv(
storage_index,
shares=[0, 1],
# Whole thing, partial, going beyond the edge, completely outside
# range:
readv=[(0, 7), (2, 3), (6, 8), (100, 10)],
)
self.assertEqual(
reads,
{0: [b"abcdefg", b"cde", b"g", b""], 1: [b"0123456", b"234", b"6", b""]},
)
@inlineCallbacks
def test_slot_readv_no_shares(self):
"""
With no shares given, ``IStorageServer.slot_readv()`` reads from all shares.
"""
secrets = self.new_secrets()
storage_index = new_storage_index()
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([], [(0, b"abcdefg")], 7),
1: ([], [(0, b"0123456")], 7),
2: ([], [(0, b"9876543")], 7),
},
r_vector=[],
)
self.assertEqual(written, True)
reads = yield self.storage_server.slot_readv(
storage_index,
shares=[],
readv=[(0, 7)],
)
self.assertEqual(
reads,
{0: [b"abcdefg"], 1: [b"0123456"], 2: [b"9876543"]},
)
@inlineCallbacks
def test_advise_corrupt_share(self):
"""
Calling ``advise_corrupt_share()`` on a mutable share does not
result in error (other behavior is opaque at this level of
abstraction).
"""
secrets = self.new_secrets()
storage_index = new_storage_index()
(written, _) = yield self.staraw(
storage_index,
secrets,
tw_vectors={
0: ([], [(0, b"abcdefg")], 7),
},
r_vector=[],
)
self.assertEqual(written, True)
yield self.storage_server.advise_corrupt_share(
b"mutable", storage_index, 0, b"ono"
)
class _FoolscapMixin(SystemTestMixin):
"""Run tests on Foolscap version of ``IStorageServer."""
def _get_native_server(self):
return next(iter(self.clients[0].storage_broker.get_known_servers()))
@inlineCallbacks
def setUp(self):
AsyncTestCase.setUp(self)
self.basedir = "test_istorageserver/" + self.id()
yield SystemTestMixin.setUp(self)
yield self.set_up_nodes(1)
self.storage_server = self._get_native_server().get_storage_server()
self.assertTrue(IStorageServer.providedBy(self.storage_server))
@inlineCallbacks
def tearDown(self):
AsyncTestCase.tearDown(self)
yield SystemTestMixin.tearDown(self)
@inlineCallbacks
def disconnect(self):
"""
Disconnect and then reconnect with a new ``IStorageServer``.
"""
current = self.storage_server
yield self.bounce_client(0)
self.storage_server = self._get_native_server().get_storage_server()
assert self.storage_server is not current
class FoolscapSharedAPIsTests(
_FoolscapMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase
):
"""Foolscap-specific tests for shared ``IStorageServer`` APIs."""
class FoolscapImmutableAPIsTests(
_FoolscapMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase
):
"""Foolscap-specific tests for immutable ``IStorageServer`` APIs."""
class FoolscapMutableAPIsTests(
_FoolscapMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase
):
"""Foolscap-specific tests for immutable ``IStorageServer`` APIs."""

View File

@ -6,7 +6,7 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.utils import PY2, native_str
from future.utils import PY2
if PY2:
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
@ -530,6 +530,14 @@ def _stub_allocate_tcp_port():
"""
return 999
def _stub_none():
"""
A function like ``_stub_allocate_tcp`` or ``_stub_get_local_addresses_sync``
but that return an empty list since ``allmydata.node._tub_portlocation`` requires a
callable for paramter 1 and 2 counting from 0.
"""
return []
class TestMissingPorts(unittest.TestCase):
"""
@ -550,7 +558,7 @@ class TestMissingPorts(unittest.TestCase):
)
config = config_from_string(self.basedir, "portnum", config_data)
with self.assertRaises(PortAssignmentRequired):
_tub_portlocation(config, None, None)
_tub_portlocation(config, _stub_none, _stub_none)
def test_listen_on_zero_with_host(self):
"""
@ -563,10 +571,7 @@ class TestMissingPorts(unittest.TestCase):
)
config = config_from_string(self.basedir, "portnum", config_data)
with self.assertRaises(PortAssignmentRequired):
_tub_portlocation(config, None, None)
test_listen_on_zero_with_host.todo = native_str( # type: ignore
"https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3563"
)
_tub_portlocation(config, _stub_none, _stub_none)
def test_parsing_tcp(self):
"""

View File

@ -0,0 +1,273 @@
"""
Tests for ``/statistics?t=openmetrics``.
Ported to Python 3.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future.utils import PY2
if PY2:
# fmt: off
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
# fmt: on
from prometheus_client.openmetrics import parser
from treq.testing import RequestTraversalAgent
from twisted.web.http import OK
from twisted.web.client import readBody
from twisted.web.resource import Resource
from testtools.twistedsupport import succeeded
from testtools.matchers import (
AfterPreprocessing,
Equals,
MatchesAll,
MatchesStructure,
MatchesPredicate,
)
from testtools.content import text_content
from allmydata.web.status import Statistics
from allmydata.test.common import SyncTestCase
class FakeStatsProvider(object):
"""
A stats provider that hands backed a canned collection of performance
statistics.
"""
def get_stats(self):
# Parsed into a dict from a running tahoe's /statistics?t=json
stats = {
"stats": {
"storage_server.latencies.get.99_9_percentile": None,
"storage_server.latencies.close.10_0_percentile": 0.00021910667419433594,
"storage_server.latencies.read.01_0_percentile": 2.8848648071289062e-05,
"storage_server.latencies.writev.99_9_percentile": None,
"storage_server.latencies.read.99_9_percentile": None,
"storage_server.latencies.allocate.99_0_percentile": 0.000988006591796875,
"storage_server.latencies.writev.mean": 0.00045332245070571654,
"storage_server.latencies.close.99_9_percentile": None,
"cpu_monitor.15min_avg": 0.00017592000079223033,
"storage_server.disk_free_for_root": 103289454592,
"storage_server.latencies.get.99_0_percentile": 0.000347137451171875,
"storage_server.latencies.get.mean": 0.00021158285060171353,
"storage_server.latencies.read.90_0_percentile": 8.893013000488281e-05,
"storage_server.latencies.write.01_0_percentile": 3.600120544433594e-05,
"storage_server.latencies.write.99_9_percentile": 0.00017690658569335938,
"storage_server.latencies.close.90_0_percentile": 0.00033211708068847656,
"storage_server.disk_total": 103497859072,
"storage_server.latencies.close.95_0_percentile": 0.0003509521484375,
"storage_server.latencies.readv.samplesize": 1000,
"storage_server.disk_free_for_nonroot": 103289454592,
"storage_server.latencies.close.mean": 0.0002715024480059103,
"storage_server.latencies.writev.95_0_percentile": 0.0007410049438476562,
"storage_server.latencies.readv.90_0_percentile": 0.0003781318664550781,
"storage_server.latencies.readv.99_0_percentile": 0.0004050731658935547,
"storage_server.latencies.allocate.mean": 0.0007128627429454784,
"storage_server.latencies.close.samplesize": 326,
"storage_server.latencies.get.50_0_percentile": 0.0001819133758544922,
"storage_server.latencies.write.50_0_percentile": 4.482269287109375e-05,
"storage_server.latencies.readv.01_0_percentile": 0.0002970695495605469,
"storage_server.latencies.get.10_0_percentile": 0.00015687942504882812,
"storage_server.latencies.allocate.90_0_percentile": 0.0008189678192138672,
"storage_server.latencies.get.samplesize": 472,
"storage_server.total_bucket_count": 393,
"storage_server.latencies.read.mean": 5.936201880959903e-05,
"storage_server.latencies.allocate.01_0_percentile": 0.0004208087921142578,
"storage_server.latencies.allocate.99_9_percentile": None,
"storage_server.latencies.readv.mean": 0.00034061360359191893,
"storage_server.disk_used": 208404480,
"storage_server.latencies.allocate.50_0_percentile": 0.0007410049438476562,
"storage_server.latencies.read.99_0_percentile": 0.00011992454528808594,
"node.uptime": 3805759.8545179367,
"storage_server.latencies.writev.10_0_percentile": 0.00035190582275390625,
"storage_server.latencies.writev.90_0_percentile": 0.0006821155548095703,
"storage_server.latencies.close.01_0_percentile": 0.00021505355834960938,
"storage_server.latencies.close.50_0_percentile": 0.0002579689025878906,
"cpu_monitor.1min_avg": 0.0002130000000003444,
"storage_server.latencies.writev.50_0_percentile": 0.0004138946533203125,
"storage_server.latencies.read.95_0_percentile": 9.107589721679688e-05,
"storage_server.latencies.readv.95_0_percentile": 0.0003859996795654297,
"storage_server.latencies.write.10_0_percentile": 3.719329833984375e-05,
"storage_server.accepting_immutable_shares": 1,
"storage_server.latencies.writev.samplesize": 309,
"storage_server.latencies.get.95_0_percentile": 0.0003190040588378906,
"storage_server.latencies.readv.10_0_percentile": 0.00032210350036621094,
"storage_server.latencies.get.90_0_percentile": 0.0002999305725097656,
"storage_server.latencies.get.01_0_percentile": 0.0001239776611328125,
"cpu_monitor.total": 641.4941180000001,
"storage_server.latencies.write.samplesize": 1000,
"storage_server.latencies.write.95_0_percentile": 9.489059448242188e-05,
"storage_server.latencies.read.50_0_percentile": 6.890296936035156e-05,
"storage_server.latencies.writev.01_0_percentile": 0.00033211708068847656,
"storage_server.latencies.read.10_0_percentile": 3.0994415283203125e-05,
"storage_server.latencies.allocate.10_0_percentile": 0.0004949569702148438,
"storage_server.reserved_space": 0,
"storage_server.disk_avail": 103289454592,
"storage_server.latencies.write.99_0_percentile": 0.00011301040649414062,
"storage_server.latencies.write.90_0_percentile": 9.083747863769531e-05,
"cpu_monitor.5min_avg": 0.0002370666691157502,
"storage_server.latencies.write.mean": 5.8008909225463864e-05,
"storage_server.latencies.readv.50_0_percentile": 0.00033020973205566406,
"storage_server.latencies.close.99_0_percentile": 0.0004038810729980469,
"storage_server.allocated": 0,
"storage_server.latencies.writev.99_0_percentile": 0.0007710456848144531,
"storage_server.latencies.readv.99_9_percentile": 0.0004780292510986328,
"storage_server.latencies.read.samplesize": 170,
"storage_server.latencies.allocate.samplesize": 406,
"storage_server.latencies.allocate.95_0_percentile": 0.0008411407470703125,
},
"counters": {
"storage_server.writev": 309,
"storage_server.bytes_added": 197836146,
"storage_server.close": 326,
"storage_server.readv": 14299,
"storage_server.allocate": 406,
"storage_server.read": 170,
"storage_server.write": 3775,
"storage_server.get": 472,
},
}
return stats
class HackItResource(Resource, object):
"""
A bridge between ``RequestTraversalAgent`` and ``MultiFormatResource``
(used by ``Statistics``). ``MultiFormatResource`` expects the request
object to have a ``fields`` attribute but Twisted's ``IRequest`` has no
such attribute. Create it here.
"""
def getChildWithDefault(self, path, request):
request.fields = None
return Resource.getChildWithDefault(self, path, request)
class OpenMetrics(SyncTestCase):
"""
Tests for ``/statistics?t=openmetrics``.
"""
def test_spec_compliance(self):
"""
Does our output adhere to the `OpenMetrics <https://openmetrics.io/>` spec?
https://github.com/OpenObservability/OpenMetrics/
https://prometheus.io/docs/instrumenting/exposition_formats/
"""
root = HackItResource()
root.putChild(b"", Statistics(FakeStatsProvider()))
rta = RequestTraversalAgent(root)
d = rta.request(b"GET", b"http://localhost/?t=openmetrics")
self.assertThat(d, succeeded(matches_stats(self)))
def matches_stats(testcase):
"""
Create a matcher that matches a response that confirms to the OpenMetrics
specification.
* The ``Content-Type`` is **application/openmetrics-text; version=1.0.0; charset=utf-8**.
* The status is **OK**.
* The body can be parsed by an OpenMetrics parser.
* The metric families in the body are grouped and sorted.
* At least one of the expected families appears in the body.
:param testtools.TestCase testcase: The case to which to add detail about the matching process.
:return: A matcher.
"""
return MatchesAll(
MatchesStructure(
code=Equals(OK),
# "The content type MUST be..."
headers=has_header(
"content-type",
"application/openmetrics-text; version=1.0.0; charset=utf-8",
),
),
AfterPreprocessing(
readBodyText,
succeeded(
MatchesAll(
MatchesPredicate(add_detail(testcase, "response body"), "%s dummy"),
parses_as_openmetrics(),
)
),
),
)
def add_detail(testcase, name):
"""
Create a matcher that always matches and as a side-effect adds the matched
value as detail to the testcase.
:param testtools.TestCase testcase: The case to which to add the detail.
:return: A matcher.
"""
def predicate(value):
testcase.addDetail(name, text_content(value))
return True
return predicate
def readBodyText(response):
"""
Read the response body and decode it using UTF-8.
:param twisted.web.iweb.IResponse response: The response from which to
read the body.
:return: A ``Deferred`` that fires with the ``str`` body.
"""
d = readBody(response)
d.addCallback(lambda body: body.decode("utf-8"))
return d
def has_header(name, value):
"""
Create a matcher that matches a response object that includes the given
name / value pair.
:param str name: The name of the item in the HTTP header to match.
:param str value: The value of the item in the HTTP header to match by equality.
:return: A matcher.
"""
return AfterPreprocessing(
lambda headers: headers.getRawHeaders(name),
Equals([value]),
)
def parses_as_openmetrics():
"""
Create a matcher that matches a ``str`` string that can be parsed as an
OpenMetrics response and includes a certain well-known value expected by
the tests.
:return: A matcher.
"""
# The parser throws if it does not like its input.
# Wrapped in a list() to drain the generator.
return AfterPreprocessing(
lambda body: list(parser.text_string_to_metric_families(body)),
AfterPreprocessing(
lambda families: families[-1].name,
Equals("tahoe_stats_storage_server_total_bucket_count"),
),
)

View File

@ -1,122 +0,0 @@
"""
Tests related to the Python 3 porting effort itself.
This module has been ported to Python 3.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from future.utils import PY2, native_str
if PY2:
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
from twisted.python.modules import (
getModule,
)
from twisted.trial.unittest import (
SynchronousTestCase,
)
from allmydata.util._python3 import PORTED_MODULES, PORTED_TEST_MODULES
class Python3PortingEffortTests(SynchronousTestCase):
def test_finished_porting(self):
"""
Tahoe-LAFS has been ported to Python 3.
Once
https://tahoe-lafs.org/trac/tahoe-lafs/milestone/Support%20Python%203
is completed this test should pass (and can be deleted!).
"""
tahoe_lafs_module_names = set(all_module_names("allmydata"))
ported_names = set(ported_module_names())
self.assertEqual(
tahoe_lafs_module_names - ported_names,
set(),
"Some unported modules remain: {}".format(
unported_report(
tahoe_lafs_module_names,
ported_names,
),
),
)
test_finished_porting.todo = native_str( # type: ignore
"https://tahoe-lafs.org/trac/tahoe-lafs/milestone/Support%20Python%203 should be completed",
)
def test_ported_modules_exist(self):
"""
All modules listed as ported exist and belong to Tahoe-LAFS.
"""
tahoe_lafs_module_names = set(all_module_names("allmydata"))
ported_names = set(ported_module_names())
unknown = ported_names - tahoe_lafs_module_names
self.assertEqual(
unknown,
set(),
"Some supposedly-ported modules weren't found: {}.".format(sorted(unknown)),
)
def test_ported_modules_distinct(self):
"""
The ported modules list doesn't contain duplicates.
"""
ported_names_list = ported_module_names()
ported_names_list.sort()
ported_names_set = set(ported_names_list)
ported_names_unique_list = list(ported_names_set)
ported_names_unique_list.sort()
self.assertEqual(
ported_names_list,
ported_names_unique_list,
)
def all_module_names(toplevel):
"""
:param unicode toplevel: The name of a top-level Python package.
:return iterator[unicode]: An iterator of ``unicode`` giving the names of
all modules within the given top-level Python package.
"""
allmydata = getModule(toplevel)
for module in allmydata.walkModules():
name = module.name
if PY2:
name = name.decode("utf-8")
yield name
def ported_module_names():
"""
:return list[unicode]: A ``list`` of ``unicode`` giving the names of
Tahoe-LAFS modules which have been ported to Python 3.
"""
return PORTED_MODULES + PORTED_TEST_MODULES
def unported_report(tahoe_lafs_module_names, ported_names):
return """
Ported files: {} / {}
Ported lines: {} / {}
""".format(
len(ported_names),
len(tahoe_lafs_module_names),
sum(map(count_lines, ported_names)),
sum(map(count_lines, tahoe_lafs_module_names)),
)
def count_lines(module_name):
module = getModule(module_name)
try:
source = module.filePath.getContent()
except Exception as e:
print((module_name, e))
return 0
lines = source.splitlines()
nonblank = [_f for _f in lines if _f]
return len(nonblank)

View File

@ -19,6 +19,21 @@ import os.path, re, sys
from os import linesep
import locale
import six
from testtools import (
skipUnless,
)
from testtools.matchers import (
MatchesListwise,
MatchesAny,
Contains,
Equals,
Always,
)
from testtools.twistedsupport import (
succeeded,
)
from eliot import (
log_call,
)
@ -39,6 +54,10 @@ from allmydata.util import fileutil, pollmixin
from allmydata.util.encodingutil import unicode_to_argv
from allmydata.test import common_util
import allmydata
from allmydata.scripts.runner import (
parse_options,
)
from .common import (
PIPE,
Popen,
@ -46,6 +65,7 @@ from .common import (
from .common_util import (
parse_cli,
run_cli,
run_cli_unicode,
)
from .cli_node_api import (
CLINodeAPI,
@ -56,6 +76,9 @@ from .cli_node_api import (
from ..util.eliotutil import (
inline_callbacks,
)
from .common import (
SyncTestCase,
)
def get_root_from_file(src):
srcdir = os.path.dirname(os.path.dirname(os.path.normcase(os.path.realpath(src))))
@ -74,6 +97,56 @@ srcfile = allmydata.__file__
rootdir = get_root_from_file(srcfile)
class ParseOptionsTests(SyncTestCase):
"""
Tests for ``parse_options``.
"""
@skipUnless(six.PY2, "Only Python 2 exceptions must stringify to bytes.")
def test_nonascii_unknown_subcommand_python2(self):
"""
When ``parse_options`` is called with an argv indicating a subcommand that
does not exist and which also contains non-ascii characters, the
exception it raises includes the subcommand encoded as UTF-8.
"""
tricky = u"\u00F6"
try:
parse_options([tricky])
except usage.error as e:
self.assertEqual(
b"Unknown command: \\xf6",
b"{}".format(e),
)
class ParseOrExitTests(SyncTestCase):
"""
Tests for ``parse_or_exit``.
"""
def test_nonascii_error_content(self):
"""
``parse_or_exit`` can report errors that include non-ascii content.
"""
tricky = u"\u00F6"
self.assertThat(
run_cli_unicode(tricky, [], encoding="utf-8"),
succeeded(
MatchesListwise([
# returncode
Equals(1),
# stdout
MatchesAny(
# Python 2
Contains(u"Unknown command: \\xf6"),
# Python 3
Contains(u"Unknown command: \xf6"),
),
# stderr,
Always()
]),
),
)
@log_call(action_type="run-bin-tahoe")
def run_bintahoe(extra_argv, python_options=None):
"""
@ -110,8 +183,16 @@ class BinTahoe(common_util.SignalMixin, unittest.TestCase):
"""
tricky = u"\u00F6"
out, err, returncode = run_bintahoe([tricky])
if PY2:
expected = u"Unknown command: \\xf6"
else:
expected = u"Unknown command: \xf6"
self.assertEqual(returncode, 1)
self.assertIn(u"Unknown command: " + tricky, out)
self.assertIn(
expected,
out,
"expected {!r} not found in {!r}\nstderr: {!r}".format(expected, out, err),
)
def test_with_python_options(self):
"""
@ -305,7 +386,12 @@ class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin):
u"--hostname", u"127.0.0.1",
])
self.assertEqual(returncode, 0)
self.assertEqual(
returncode,
0,
"stdout: {!r}\n"
"stderr: {!r}\n",
)
# This makes sure that node.url is written, which allows us to
# detect when the introducer restarts in _node_has_restarted below.

View File

@ -8,7 +8,7 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.utils import native_str, PY2, bytes_to_native_str
from future.utils import native_str, PY2, bytes_to_native_str, bchr
if PY2:
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
from six import ensure_str
@ -19,20 +19,23 @@ import platform
import stat
import struct
import shutil
import gc
from uuid import uuid4
from twisted.trial import unittest
from twisted.internet import defer
from twisted.internet.task import Clock
from hypothesis import given, strategies
import itertools
from allmydata import interfaces
from allmydata.util import fileutil, hashutil, base32
from allmydata.storage.server import StorageServer
from allmydata.storage.server import StorageServer, DEFAULT_RENEWAL_TIME
from allmydata.storage.shares import get_share_file
from allmydata.storage.mutable import MutableShareFile
from allmydata.storage.immutable import BucketWriter, BucketReader, ShareFile
from allmydata.storage.common import DataTooLargeError, storage_index_to_dir, \
from allmydata.storage.common import storage_index_to_dir, \
UnknownMutableContainerVersionError, UnknownImmutableContainerVersionError, \
si_b2a, si_a2b
from allmydata.storage.lease import LeaseInfo
@ -46,7 +49,9 @@ from allmydata.mutable.layout import MDMFSlotWriteProxy, MDMFSlotReadProxy, \
SIGNATURE_SIZE, \
VERIFICATION_KEY_SIZE, \
SHARE_HASH_CHAIN_SIZE
from allmydata.interfaces import BadWriteEnablerError
from allmydata.interfaces import (
BadWriteEnablerError, DataTooLargeError, ConflictingWriteError,
)
from allmydata.test.no_network import NoNetworkServer
from allmydata.storage_client import (
_StorageServer,
@ -123,8 +128,7 @@ class Bucket(unittest.TestCase):
def test_create(self):
incoming, final = self.make_workdir("test_create")
bw = BucketWriter(self, incoming, final, 200, self.make_lease(),
FakeCanary())
bw = BucketWriter(self, incoming, final, 200, self.make_lease())
bw.remote_write(0, b"a"*25)
bw.remote_write(25, b"b"*25)
bw.remote_write(50, b"c"*25)
@ -133,8 +137,7 @@ class Bucket(unittest.TestCase):
def test_readwrite(self):
incoming, final = self.make_workdir("test_readwrite")
bw = BucketWriter(self, incoming, final, 200, self.make_lease(),
FakeCanary())
bw = BucketWriter(self, incoming, final, 200, self.make_lease())
bw.remote_write(0, b"a"*25)
bw.remote_write(25, b"b"*25)
bw.remote_write(50, b"c"*7) # last block may be short
@ -146,6 +149,88 @@ class Bucket(unittest.TestCase):
self.failUnlessEqual(br.remote_read(25, 25), b"b"*25)
self.failUnlessEqual(br.remote_read(50, 7), b"c"*7)
def test_write_past_size_errors(self):
"""Writing beyond the size of the bucket throws an exception."""
for (i, (offset, length)) in enumerate([(0, 201), (10, 191), (202, 34)]):
incoming, final = self.make_workdir(
"test_write_past_size_errors-{}".format(i)
)
bw = BucketWriter(self, incoming, final, 200, self.make_lease())
with self.assertRaises(DataTooLargeError):
bw.remote_write(offset, b"a" * length)
@given(
maybe_overlapping_offset=strategies.integers(min_value=0, max_value=98),
maybe_overlapping_length=strategies.integers(min_value=1, max_value=100),
)
def test_overlapping_writes_ok_if_matching(
self, maybe_overlapping_offset, maybe_overlapping_length
):
"""
Writes that overlap with previous writes are OK when the content is the
same.
"""
length = 100
expected_data = b"".join(bchr(i) for i in range(100))
incoming, final = self.make_workdir("overlapping_writes_{}".format(uuid4()))
bw = BucketWriter(
self, incoming, final, length, self.make_lease(),
)
# Three writes: 10-19, 30-39, 50-59. This allows for a bunch of holes.
bw.remote_write(10, expected_data[10:20])
bw.remote_write(30, expected_data[30:40])
bw.remote_write(50, expected_data[50:60])
# Then, an overlapping write but with matching data:
bw.remote_write(
maybe_overlapping_offset,
expected_data[
maybe_overlapping_offset:maybe_overlapping_offset + maybe_overlapping_length
]
)
# Now fill in the holes:
bw.remote_write(0, expected_data[0:10])
bw.remote_write(20, expected_data[20:30])
bw.remote_write(40, expected_data[40:50])
bw.remote_write(60, expected_data[60:])
bw.remote_close()
br = BucketReader(self, bw.finalhome)
self.assertEqual(br.remote_read(0, length), expected_data)
@given(
maybe_overlapping_offset=strategies.integers(min_value=0, max_value=98),
maybe_overlapping_length=strategies.integers(min_value=1, max_value=100),
)
def test_overlapping_writes_not_ok_if_different(
self, maybe_overlapping_offset, maybe_overlapping_length
):
"""
Writes that overlap with previous writes fail with an exception if the
contents don't match.
"""
length = 100
incoming, final = self.make_workdir("overlapping_writes_{}".format(uuid4()))
bw = BucketWriter(
self, incoming, final, length, self.make_lease(),
)
# Three writes: 10-19, 30-39, 50-59. This allows for a bunch of holes.
bw.remote_write(10, b"1" * 10)
bw.remote_write(30, b"1" * 10)
bw.remote_write(50, b"1" * 10)
# Then, write something that might overlap with some of them, but
# conflicts. Then fill in holes left by first three writes. Conflict is
# inevitable.
with self.assertRaises(ConflictingWriteError):
bw.remote_write(
maybe_overlapping_offset,
b'X' * min(maybe_overlapping_length, length - maybe_overlapping_offset),
)
bw.remote_write(0, b"1" * 10)
bw.remote_write(20, b"1" * 10)
bw.remote_write(40, b"1" * 10)
bw.remote_write(60, b"1" * 40)
def test_read_past_end_of_share_data(self):
# test vector for immutable files (hard-coded contents of an immutable share
# file):
@ -168,7 +253,7 @@ class Bucket(unittest.TestCase):
assert len(renewsecret) == 32
cancelsecret = b'THIS LETS ME KILL YOUR FILE HAHA'
assert len(cancelsecret) == 32
expirationtime = struct.pack('>L', 60*60*24*31) # 31 days in seconds
expirationtime = struct.pack('>L', DEFAULT_RENEWAL_TIME) # 31 days in seconds
lease_data = ownernumber + renewsecret + cancelsecret + expirationtime
@ -227,8 +312,7 @@ class BucketProxy(unittest.TestCase):
final = os.path.join(basedir, "bucket")
fileutil.make_dirs(basedir)
fileutil.make_dirs(os.path.join(basedir, "tmp"))
bw = BucketWriter(self, incoming, final, size, self.make_lease(),
FakeCanary())
bw = BucketWriter(self, incoming, final, size, self.make_lease())
rb = RemoteBucket(bw)
return bw, rb, final
@ -354,10 +438,11 @@ class Server(unittest.TestCase):
basedir = os.path.join("storage", "Server", name)
return basedir
def create(self, name, reserved_space=0, klass=StorageServer):
def create(self, name, reserved_space=0, klass=StorageServer, get_current_time=time.time):
workdir = self.workdir(name)
ss = klass(workdir, b"\x00" * 20, reserved_space=reserved_space,
stats_provider=FakeStatsProvider())
stats_provider=FakeStatsProvider(),
get_current_time=get_current_time)
ss.setServiceParent(self.sparent)
return ss
@ -384,8 +469,8 @@ class Server(unittest.TestCase):
self.failUnlessIn(b'available-space', sv1)
def allocate(self, ss, storage_index, sharenums, size, canary=None):
renew_secret = hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret))
cancel_secret = hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret))
renew_secret = hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret))
cancel_secret = hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))
if not canary:
canary = FakeCanary()
return ss.remote_allocate_buckets(storage_index,
@ -577,26 +662,24 @@ class Server(unittest.TestCase):
# the size we request.
OVERHEAD = 3*4
LEASE_SIZE = 4+32+32+4
canary = FakeCanary(True)
canary = FakeCanary()
already, writers = self.allocate(ss, b"vid1", [0,1,2], 1000, canary)
self.failUnlessEqual(len(writers), 3)
# now the StorageServer should have 3000 bytes provisionally
# allocated, allowing only 2000 more to be claimed
self.failUnlessEqual(len(ss._active_writers), 3)
self.failUnlessEqual(len(ss._bucket_writers), 3)
# allocating 1001-byte shares only leaves room for one
already2, writers2 = self.allocate(ss, b"vid2", [0,1,2], 1001, canary)
canary2 = FakeCanary()
already2, writers2 = self.allocate(ss, b"vid2", [0,1,2], 1001, canary2)
self.failUnlessEqual(len(writers2), 1)
self.failUnlessEqual(len(ss._active_writers), 4)
self.failUnlessEqual(len(ss._bucket_writers), 4)
# we abandon the first set, so their provisional allocation should be
# returned
canary.disconnected()
del already
del writers
gc.collect()
self.failUnlessEqual(len(ss._active_writers), 1)
self.failUnlessEqual(len(ss._bucket_writers), 1)
# now we have a provisional allocation of 1001 bytes
# and we close the second set, so their provisional allocation should
@ -605,25 +688,21 @@ class Server(unittest.TestCase):
for bw in writers2.values():
bw.remote_write(0, b"a"*25)
bw.remote_close()
del already2
del writers2
del bw
self.failUnlessEqual(len(ss._active_writers), 0)
self.failUnlessEqual(len(ss._bucket_writers), 0)
# this also changes the amount reported as available by call_get_disk_stats
allocated = 1001 + OVERHEAD + LEASE_SIZE
# now there should be ALLOCATED=1001+12+72=1085 bytes allocated, and
# 5000-1085=3915 free, therefore we can fit 39 100byte shares
already3, writers3 = self.allocate(ss, b"vid3", list(range(100)), 100, canary)
canary3 = FakeCanary()
already3, writers3 = self.allocate(ss, b"vid3", list(range(100)), 100, canary3)
self.failUnlessEqual(len(writers3), 39)
self.failUnlessEqual(len(ss._active_writers), 39)
self.failUnlessEqual(len(ss._bucket_writers), 39)
del already3
del writers3
gc.collect()
canary3.disconnected()
self.failUnlessEqual(len(ss._active_writers), 0)
self.failUnlessEqual(len(ss._bucket_writers), 0)
ss.disownServiceParent()
del ss
@ -646,6 +725,27 @@ class Server(unittest.TestCase):
f2 = open(filename, "rb")
self.failUnlessEqual(f2.read(5), b"start")
def create_bucket_5_shares(
self, ss, storage_index, expected_already=0, expected_writers=5
):
"""
Given a StorageServer, create a bucket with 5 shares and return renewal
and cancellation secrets.
"""
canary = FakeCanary()
sharenums = list(range(5))
size = 100
# Creating a bucket also creates a lease:
rs, cs = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)),
hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret)))
already, writers = ss.remote_allocate_buckets(storage_index, rs, cs,
sharenums, size, canary)
self.failUnlessEqual(len(already), expected_already)
self.failUnlessEqual(len(writers), expected_writers)
for wb in writers.values():
wb.remote_close()
return rs, cs
def test_leases(self):
ss = self.create("test_leases")
@ -653,41 +753,23 @@ class Server(unittest.TestCase):
sharenums = list(range(5))
size = 100
rs0,cs0 = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
already,writers = ss.remote_allocate_buckets(b"si0", rs0, cs0,
sharenums, size, canary)
self.failUnlessEqual(len(already), 0)
self.failUnlessEqual(len(writers), 5)
for wb in writers.values():
wb.remote_close()
# Create a bucket:
rs0, cs0 = self.create_bucket_5_shares(ss, b"si0")
leases = list(ss.get_leases(b"si0"))
self.failUnlessEqual(len(leases), 1)
self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs0]))
rs1,cs1 = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
already,writers = ss.remote_allocate_buckets(b"si1", rs1, cs1,
sharenums, size, canary)
for wb in writers.values():
wb.remote_close()
rs1, cs1 = self.create_bucket_5_shares(ss, b"si1")
# take out a second lease on si1
rs2,cs2 = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
already,writers = ss.remote_allocate_buckets(b"si1", rs2, cs2,
sharenums, size, canary)
self.failUnlessEqual(len(already), 5)
self.failUnlessEqual(len(writers), 0)
rs2, cs2 = self.create_bucket_5_shares(ss, b"si1", 5, 0)
leases = list(ss.get_leases(b"si1"))
self.failUnlessEqual(len(leases), 2)
self.failUnlessEqual(set([l.renew_secret for l in leases]), set([rs1, rs2]))
# and a third lease, using add-lease
rs2a,cs2a = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
rs2a,cs2a = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)),
hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret)))
ss.remote_add_lease(b"si1", rs2a, cs2a)
leases = list(ss.get_leases(b"si1"))
self.failUnlessEqual(len(leases), 3)
@ -715,10 +797,10 @@ class Server(unittest.TestCase):
"ss should not have a 'remote_cancel_lease' method/attribute")
# test overlapping uploads
rs3,cs3 = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
rs4,cs4 = (hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)),
hashutil.tagged_hash(b"blah", b"%d" % next(self._lease_secret)))
rs3,cs3 = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)),
hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret)))
rs4,cs4 = (hashutil.my_renewal_secret_hash(b"%d" % next(self._lease_secret)),
hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret)))
already,writers = ss.remote_allocate_buckets(b"si3", rs3, cs3,
sharenums, size, canary)
self.failUnlessEqual(len(already), 0)
@ -741,6 +823,28 @@ class Server(unittest.TestCase):
leases = list(ss.get_leases(b"si3"))
self.failUnlessEqual(len(leases), 2)
def test_immutable_add_lease_renews(self):
"""
Adding a lease on an already leased immutable with the same secret just
renews it.
"""
clock = Clock()
clock.advance(123)
ss = self.create("test_immutable_add_lease_renews", get_current_time=clock.seconds)
# Start out with single lease created with bucket:
renewal_secret, cancel_secret = self.create_bucket_5_shares(ss, b"si0")
[lease] = ss.get_leases(b"si0")
self.assertEqual(lease.expiration_time, 123 + DEFAULT_RENEWAL_TIME)
# Time passes:
clock.advance(123456)
# Adding a lease with matching renewal secret just renews it:
ss.remote_add_lease(b"si0", renewal_secret, cancel_secret)
[lease] = ss.get_leases(b"si0")
self.assertEqual(lease.expiration_time, 123 + 123456 + DEFAULT_RENEWAL_TIME)
def test_have_shares(self):
"""By default the StorageServer has no shares."""
workdir = self.workdir("test_have_shares")
@ -840,9 +944,10 @@ class MutableServer(unittest.TestCase):
basedir = os.path.join("storage", "MutableServer", name)
return basedir
def create(self, name):
def create(self, name, get_current_time=time.time):
workdir = self.workdir(name)
ss = StorageServer(workdir, b"\x00" * 20)
ss = StorageServer(workdir, b"\x00" * 20,
get_current_time=get_current_time)
ss.setServiceParent(self.sparent)
return ss
@ -1046,23 +1151,6 @@ class MutableServer(unittest.TestCase):
}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
# as should this one
answer = write(b"si1", secrets,
{0: ([(10, 5, b"lt", b"11111"),
],
[(0, b"x"*100)],
None),
},
[(10,5)],
)
self.failUnlessEqual(answer, (False,
{0: [b"11111"],
1: [b""],
2: [b""]},
))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
def test_operators(self):
# test operators, the data we're comparing is '11111' in all cases.
# test both fail+pass, reset data after each one.
@ -1082,63 +1170,6 @@ class MutableServer(unittest.TestCase):
reset()
# lt
answer = write(b"si1", secrets, {0: ([(10, 5, b"lt", b"11110"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
self.failUnlessEqual(read(b"si1", [], [(0,100)]), {0: [data]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"lt", b"11111"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"lt", b"11112"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
# le
answer = write(b"si1", secrets, {0: ([(10, 5, b"le", b"11110"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"le", b"11111"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"le", b"11112"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
# eq
answer = write(b"si1", secrets, {0: ([(10, 5, b"eq", b"11112"),
],
@ -1158,81 +1189,6 @@ class MutableServer(unittest.TestCase):
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
# ne
answer = write(b"si1", secrets, {0: ([(10, 5, b"ne", b"11111"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"ne", b"11112"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
# ge
answer = write(b"si1", secrets, {0: ([(10, 5, b"ge", b"11110"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"ge", b"11111"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"ge", b"11112"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
# gt
answer = write(b"si1", secrets, {0: ([(10, 5, b"gt", b"11110"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"gt", b"11111"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"gt", b"11112"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
# finally, test some operators against empty shares
answer = write(b"si1", secrets, {1: ([(10, 5, b"eq", b"11112"),
],
@ -1379,6 +1335,41 @@ class MutableServer(unittest.TestCase):
{0: ([], [(500, b"make me really bigger")], None)}, [])
self.compare_leases_without_timestamps(all_leases, list(s0.get_leases()))
def test_mutable_add_lease_renews(self):
"""
Adding a lease on an already leased mutable with the same secret just
renews it.
"""
clock = Clock()
clock.advance(235)
ss = self.create("test_mutable_add_lease_renews",
get_current_time=clock.seconds)
def secrets(n):
return ( self.write_enabler(b"we1"),
self.renew_secret(b"we1-%d" % n),
self.cancel_secret(b"we1-%d" % n) )
data = b"".join([ (b"%d" % i) * 10 for i in range(10) ])
write = ss.remote_slot_testv_and_readv_and_writev
write_enabler, renew_secret, cancel_secret = secrets(0)
rc = write(b"si1", (write_enabler, renew_secret, cancel_secret),
{0: ([], [(0,data)], None)}, [])
self.failUnlessEqual(rc, (True, {}))
bucket_dir = os.path.join(self.workdir("test_mutable_add_lease_renews"),
"shares", storage_index_to_dir(b"si1"))
s0 = MutableShareFile(os.path.join(bucket_dir, "0"))
[lease] = s0.get_leases()
self.assertEqual(lease.expiration_time, 235 + DEFAULT_RENEWAL_TIME)
# Time passes...
clock.advance(835)
# Adding a lease renews it:
ss.remote_add_lease(b"si1", renew_secret, cancel_secret)
[lease] = s0.get_leases()
self.assertEqual(lease.expiration_time,
235 + 835 + DEFAULT_RENEWAL_TIME)
def test_remove(self):
ss = self.create("test_remove")
self.allocate(ss, b"si1", b"we1", next(self._lease_secret),

View File

@ -15,24 +15,19 @@ from past.builtins import chr as byteschr, long
from six import ensure_text, ensure_str
import os, re, sys, time, json
from functools import partial
from bs4 import BeautifulSoup
from twisted.internet import reactor
from twisted.trial import unittest
from twisted.internet import defer
from twisted.internet.defer import inlineCallbacks
from twisted.application import service
from allmydata import client, uri
from allmydata.introducer.server import create_introducer
from allmydata import uri
from allmydata.storage.mutable import MutableShareFile
from allmydata.storage.server import si_a2b
from allmydata.immutable import offloaded, upload
from allmydata.immutable.literal import LiteralFileNode
from allmydata.immutable.filenode import ImmutableFileNode
from allmydata.util import idlib, mathutil, pollmixin, fileutil
from allmydata.util import idlib, mathutil
from allmydata.util import log, base32
from allmydata.util.encodingutil import quote_output, unicode_to_argv
from allmydata.util.fileutil import abspath_expanduser_unicode
@ -43,32 +38,20 @@ from allmydata.monitor import Monitor
from allmydata.mutable.common import NotWriteableError
from allmydata.mutable import layout as mutable_layout
from allmydata.mutable.publish import MutableData
from allmydata.scripts.runner import PYTHON_3_WARNING
from foolscap.api import DeadReferenceError, fireEventually, flushEventualQueue
from foolscap.api import DeadReferenceError, fireEventually
from twisted.python.failure import Failure
from twisted.python.filepath import (
FilePath,
)
from twisted.internet.utils import (
getProcessOutputAndValue,
)
from .common import (
TEST_RSA_KEY_SIZE,
SameProcessStreamEndpointAssigner,
)
from .common_web import do_http as do_http_bytes, Error
from .web.common import (
assert_soup_has_tag_with_attributes
)
# TODO: move this to common or common_util
from . import common_util as testutil
from .common_system import SystemTestMixin
from .common_util import run_cli_unicode
from ..scripts.common import (
write_introducer,
)
class RunBinTahoeMixin(object):
def run_bintahoe(self, args, stdin=None, python_options=[], env=None):
@ -117,874 +100,6 @@ This is some data to publish to the remote grid.., which needs to be large
enough to not fit inside a LIT uri.
"""
# our system test uses the same Tub certificates each time, to avoid the
# overhead of key generation
SYSTEM_TEST_CERTS = [
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1iNV
z07PYwZwucl87QlL2TFZvDxD4flZ/p3BZE3DCT5Efn9w2NT4sHXL1e+R/qsDFuNG
bw1y1TRM0DGK6Wr0XRT2mLQULNgB8y/HrhcSdONsYRyWdj+LimyECKjwh0iSkApv
Yj/7IOuq6dOoh67YXPdf75OHLShm4+8q8fuwhBL+nuuO4NhZDJKupYHcnuCkcF88
LN77HKrrgbpyVmeghUkwJMLeJCewvYVlambgWRiuGGexFgAm6laS3rWetOcdm9eg
FoA9PKNN6xvPatbj99MPoLpBbzsI64M0yT/wTSw1pj/Nom3rwfMa2OH8Kk7c8R/r
U3xj4ZY1DTlGERvejQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAwyQjQ3ZgtJ3JW
r3/EPdqSUBamTfXIpOh9rXmRjPpbe+MvenqIzl4q+GnkL5mdEb1e1hdKQZgFQ5Q5
tbcNIz6h5C07KaNtbqhZCx5c/RUEH87VeXuAuOqZHbZWJ18q0tnk+YgWER2TOkgE
RI2AslcsJBt88UUOjHX6/7J3KjPFaAjW1QV3TTsHxk14aYDYJwPdz+ijchgbOPQ0
i+ilhzcB+qQnOC1s4xQSFo+zblTO7EgqM9KpupYfOVFh46P1Mak2W8EDvhz0livl
OROXJ6nR/13lmQdfVX6T45d+ITBwtmW2nGAh3oI3JlArGKHaW+7qnuHR72q9FSES
cEYA/wmk
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDWI1XPTs9jBnC5
yXztCUvZMVm8PEPh+Vn+ncFkTcMJPkR+f3DY1PiwdcvV75H+qwMW40ZvDXLVNEzQ
MYrpavRdFPaYtBQs2AHzL8euFxJ042xhHJZ2P4uKbIQIqPCHSJKQCm9iP/sg66rp
06iHrthc91/vk4ctKGbj7yrx+7CEEv6e647g2FkMkq6lgdye4KRwXzws3vscquuB
unJWZ6CFSTAkwt4kJ7C9hWVqZuBZGK4YZ7EWACbqVpLetZ605x2b16AWgD08o03r
G89q1uP30w+gukFvOwjrgzTJP/BNLDWmP82ibevB8xrY4fwqTtzxH+tTfGPhljUN
OUYRG96NAgMBAAECggEAJ5xztBx0+nFnisZ9yG8uy6d4XPyc5gE1J4dRDdfgmyYc
j3XNjx6ePi4cHZ/qVryVnrc+AS7wrgW1q9FuS81QFKPbFdZB4SW3/p85BbgY3uxu
0Ovz3T3V9y4polx12eCP0/tKLVd+gdF2VTik9Sxfs5rC8VNN7wmJNuK4A/k15sgy
BIu/R8NlMNGQySNhtccp+dzB8uTyKx5zFZhVvnAK/3YX9BC2V4QBW9JxO4S8N0/9
48e9Sw/fGCfQ/EFPKGCvTvfuRqJ+4t5k10FygXJ+s+y70ifYi+aSsjJBuranbLJp
g5TwhuKnTWs8Nth3YRLbcJL4VBIOehjAWy8pDMMtlQKBgQD0O8cHb8cOTGW0BijC
NDofhA2GooQUUR3WL324PXWZq0DXuBDQhJVBKWO3AYonivhhd/qWO8lea9MEmU41
nKZ7maS4B8AJLJC08P8GL1uCIE/ezEXEi9JwC1zJiyl595Ap4lSAozH0DwjNvmGL
5mIdYg0BliqFXbloNJkNlb7INwKBgQDgdGEIWXc5Y1ncWNs6iDIV/t2MlL8vLrP0
hpkl/QiMndOQyD6JBo0+ZqvOQTSS4NTSxBROjPxvFbEJ3eH8Pmn8gHOf46fzP1OJ
wlYv0gYzkN4FE/tN6JnO2u9pN0euyyZLM1fnEcrMWColMN8JlWjtA7Gbxm8lkfa4
3vicaJtlWwKBgQCQYL4ZgVR0+Wit8W4qz+EEPHYafvwBXqp6sXxqa7qXawtb+q3F
9nqdGLCfwMNA+QA37ksugI1byfXmpBH902r/aiZbvAkj4zpwHH9F0r0PwbY1iSA9
PkLahX0Gj8OnHFgWynsVyGOBWVnk9oSHxVt+7zWtGG5uhKdUGLPZugocJQKBgB61
7bzduOFiRZ5PjhdxISE/UQL2Kz6Cbl7rt7Kp72yF/7eUnnHTMqoyFBnRdCcQmi4I
ZBrnUXbFigamlFAWHhxNWwSqeoVeychUjcRXQT/291nMhRsA02KpNA66YJV6+E9b
xBA6r/vLqGCUUkAWcFfVpIyC1xxV32MmJvAHpBN3AoGAPF3MUFiO0iKNZfst6Tm3
rzrldLawDo98DRZ7Yb2kWlWZYqUk/Nvryvo2cns75WGSMDYVbbRp+BY7kZmNYa9K
iQzKDL54ZRu6V+getJdeAO8yXoCmnZKxt5OHvOSrQMfAmFKSwLwxBbZBfXEyuune
yfusXLtCgajpreoVIa0xWdQ=
-----END PRIVATE KEY-----
""", # 0
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApDzW
4ZBeK9w4xpRaed6lXzeCO0Xmr3f0ynbueSdiZ89FWoAMgK+SiBIOViYV6hfm0Wah
lemSNzFGx5LvDSg2uwSqEP23DeM9O/SQPgIAiLeeEsYZJcgg2jz92YfFEaahsGdI
6qSP4XI2/5dgKRpPOYDGyw6R5PQR6w22Xq1WD1jBvImk/k09I9jHRn40pYbaJzbg
U2aIjvOruo2kqe4f6iDqE0piYimAZJUvemu1UoyV5NG590hGkDuWsMD77+d2FxCj
9Nzb+iuuG3ksnanHPyXi1hQmzp5OmzVWaevCHinNjWgsuSuLGO9H2SLf3wwp2UCs
EpKtzoKrnZdEg/anNwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQChxtr67o1aZZMJ
A6gESPtFjZLw6wG0j50JsrWKLvoXVts1ToJ9u2nx01aFKjBwb4Yg+vdJfDgIIAEm
jS56h6H2DfJlkTWHmi8Vx1wuusWnrNwYMI53tdlRIpD2+Ne7yeoLQZcVN2wuPmxD
Mbksg4AI4csmbkU/NPX5DtMy4EzM/pFvIcxNIVRUMVTFzn5zxhKfhyPqrMI4fxw1
UhUbEKO+QgIqTNp/dZ0lTbFs5HJQn6yirWyyvQKBPmaaK+pKd0RST/T38OU2oJ/J
LojRs7ugCJ+bxJqegmQrdcVqZZGbpYeK4O/5eIn8KOlgh0nUza1MyjJJemgBBWf7
HoXB8Fge
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCkPNbhkF4r3DjG
lFp53qVfN4I7Reavd/TKdu55J2Jnz0VagAyAr5KIEg5WJhXqF+bRZqGV6ZI3MUbH
ku8NKDa7BKoQ/bcN4z079JA+AgCIt54SxhklyCDaPP3Zh8URpqGwZ0jqpI/hcjb/
l2ApGk85gMbLDpHk9BHrDbZerVYPWMG8iaT+TT0j2MdGfjSlhtonNuBTZoiO86u6
jaSp7h/qIOoTSmJiKYBklS96a7VSjJXk0bn3SEaQO5awwPvv53YXEKP03Nv6K64b
eSydqcc/JeLWFCbOnk6bNVZp68IeKc2NaCy5K4sY70fZIt/fDCnZQKwSkq3Ogqud
l0SD9qc3AgMBAAECggEBAIu55uaIOFYASZ1IYaEFNpRHWVisI5Js76nAfSo9w46l
3E8eWYSx2mxBUEkipco/A3RraFVuHaMvHRR1gUMkT0vUsAs8jxwVk+cKLh1S/rlR
3f4C4yotlSWWdjE3PQXDShQWCwb1ciNPVFMmqfzOEVDOqlHe12h97TCYverWdT0f
3LZICLQsZd1WPKnPNXrsRRDCBuRLapdg+M0oJ+y6IiCdm+qM7Qvaoef6hlvm5ECz
LCM92db5BKTuPOQXMx2J8mjaBgU3aHxRV08IFgs7mI6q0t0FM7LlytIAJq1Hg5QU
36zDKo8tblkPijWZWlqlZCnlarrd3Ar/BiLEiuOGDMECgYEA1GOp0KHy0mbJeN13
+TDsgP7zhmqFcuJREu2xziNJPK2S06NfGYE8vuVqBGzBroLTQ3dK7rOJs9C6IjCE
mH7ZeHzfcKohpZnl443vHMSpgdh/bXTEO1aQZNbJ2hLYs8ie/VqqHR0u6YtpUqZL
LgaUA0U8GnlsO55B8kyCelckmDkCgYEAxfYQMPEEzg1tg2neqEfyoeY0qQTEJTeh
CPMztowSJpIyF1rQH6TaG0ZchkiAkw3W58RVDfvK72TuVlC5Kz00C2/uPnrqm0dX
iMPeML5rFlG3VGCrSTnAPI+az6P65q8zodqcTtA8xoxgPOlc/lINOxiTEMxLyeGF
8GyP+sCM2u8CgYEAvMBR05OJnEky9hJEpBZBqSZrQGL8dCwDh0HtCdi8JovPd/yx
8JW1aaWywXnx6uhjXoru8hJm54IxWV8rB+d716OKY7MfMfACqWejQDratgW0wY7L
MjztGGD2hLLJGYXLHjfsBPHBllaKZKRbHe1Er19hWdndQWKVEwPB1X4KjKkCgYEA
nWHmN3K2djbYtRyLR1CEBtDlVuaSJmCWp23q1BuCJqYeKtEpG69NM1f6IUws5Dyh
eXtuf4KKMU8V6QueW1D6OomPaJ8CO9c5MWM/F5ObwY/P58Y/ByVhvwQQeToONC5g
JzKNCF+nodZigKqrIwoKuMvtx/IT4vloKd+1jA5fLYMCgYBoT3HLCyATVdDSt1TZ
SbEDoLSYt23KRjQV93+INP949dYCagtgh/kTzxBopw5FljISLfdYizIRo2AzhhfP
WWpILlnt19kD+sNirJVqxJacfEZsu5baWTedI/yrCuVsAs/s3/EEY6q0Qywknxtp
Fwh1/8y5t14ib5fxOVhi8X1nEA==
-----END PRIVATE KEY-----
""", # 1
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwMTn
hXnpKHGAir3WYbOxefVrMA07OZNAsNa29nBwLA+NVIJNUFgquibMj7QYo8+M45oY
6LKr4yRcBryZVvyxfdr92xp8+kLeVApk2WLjkdBTRagHh9qdrY0hQmagCBN6/hLG
Xug8VksQUdhX3vu6ZyMvTLfKRkDOMRVkRGRGg/dOcvom7zpqMCGYenMG2FStr6UV
3s3dlCSZZTdTX5Uoq6yfUUJE3nITGKjpnpJKqIs3PWCIxdj7INIcjJKvIdUcavIV
2hEhh60A8ltmtdpQAXVBE+U7aZgS1fGAWS2A0a3UwuP2pkQp6OyKCUVHpZatbl9F
ahDN2QBzegv/rdJ1zwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAl4OQZ+FB9ZSUv
FL/KwLNt+ONU8Sve/xiX+8vKAvgKm2FrjyK+AZPwibnu+FSt2G4ndZBx4Wvpe5V+
gCsbzSXlh9cDn2SRXyprt2l/8Fj4eUMaThmLKOK200/N/s2SpmBtnuflBrhNaJpw
DEi2KEPuXsgvkuVzXN06j75cUHwn5LeWDAh0RalkVuGbEWBoFx9Hq8WECdlCy0YS
y09+yO01qz70y88C2rPThKw8kP4bX8aFZbvsnRHsLu/8nEQNlrELcfBarPVHjJ/9
imxOdymJkV152V58voiXP/PwXhynctQbF7e+0UZ+XEGdbAbZA0BMl7z+b09Z+jF2
afm4mVox
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDAxOeFeekocYCK
vdZhs7F59WswDTs5k0Cw1rb2cHAsD41Ugk1QWCq6JsyPtBijz4zjmhjosqvjJFwG
vJlW/LF92v3bGnz6Qt5UCmTZYuOR0FNFqAeH2p2tjSFCZqAIE3r+EsZe6DxWSxBR
2Ffe+7pnIy9Mt8pGQM4xFWREZEaD905y+ibvOmowIZh6cwbYVK2vpRXezd2UJJll
N1NflSirrJ9RQkTechMYqOmekkqoizc9YIjF2Psg0hyMkq8h1Rxq8hXaESGHrQDy
W2a12lABdUET5TtpmBLV8YBZLYDRrdTC4/amRCno7IoJRUellq1uX0VqEM3ZAHN6
C/+t0nXPAgMBAAECggEAF+2ZK4lZdsq4AQDVhqUuh4v+NSW/T0NHCWxto6OLWPzJ
N09BV5LKIvdD9yaM1HCj9XCgXOooyfYuciuhARo20f+H+VWNY+c+/8GWiSFsTCJG
4+Oao7NwVSWqljp07Ou2Hamo9AjxzGhe6znmlmg62CiW63f45MWQkqksHA0yb5jg
/onJ2//I+OI+aTKNfjt1G6h2x7oxeGTU1jJ0Hb2xSh+Mpqx9NDfb/KZyOndhSG5N
xRVosQ6uV+9mqHxTTwTZurTG31uhZzarkMuqxhcHS94ub7berEc/OlqvbyMKNZ3A
lzuvq0NBZhEUhAVgORAIS17r/q2BvyG4u5LFbG2p0QKBgQDeyyOl+A7xc4lPE2OL
Z3KHJPP4RuUnHnWFC+bNdr5Ag8K7jcjZIcasyUom9rOR0Fpuw9wmXpp3+6fyp9bJ
y6Bi5VioR0ZFP5X+nXxIN3yvgypu6AZvkhHrEFer+heGHxPlbwNKCKMbPzDZPBTZ
vlC7g7xUUcpNmGhrOKr3Qq5FlwKBgQDdgCmRvsHUyzicn8TI3IJBAOcaQG0Yr/R2
FzBqNfHHx7fUZlJfKJsnu9R9VRZmBi4B7MA2xcvz4QrdZWEtY8uoYp8TAGILfW1u
CP4ZHrzfDo/67Uzk2uTMTd0+JOqSm/HiVNguRPvC8EWBoFls+h129GKThMvKR1hP
1oarfAGIiQKBgQCIMAq5gHm59JMhqEt4QqMKo3cS9FtNX1wdGRpbzFMd4q0dstzs
ha4Jnv3Z9YHtBzzQap9fQQMRht6yARDVx8hhy6o3K2J0IBtTSfdXubtZGkfNBb4x
Y0vaseG1uam5jbO+0u5iygbSN/1nPUfNln2JMkzkCh8s8ZYavMgdX0BiPwKBgChR
QL/Hog5yoy5XIoGRKaBdYrNzkKgStwObuvNKOGUt5DckHNA3Wu6DkOzzRO1zKIKv
LlmJ7VLJ3qln36VcaeCPevcBddczkGyb9GxsHOLZCroY4YsykLzjW2cJXy0qd3/E
A8mAQvc7ttsebciZSi2x1BOX82QxUlDN8ptaKglJAoGBAMnLN1TQB0xtWYDPGcGV
2IvgX7OTRRlMVrTvIOvP5Julux9z1r0x0cesl/jaXupsBUlLLicPyBMSBJrXlr24
mrgkodk4TdqO1VtBCZBqak97DHVezstMrbpCGlUD5jBnsHVRLERvS09QlGhqMeNL
jpNQbWH9VhutzbvpYquKrhvK
-----END PRIVATE KEY-----
""", # 2
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzM1oXDTIxMDEwMTAxNDAzM1owFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAypqi
YTni3s60Uo8vgGcFvjWWkB5CD9Fx9pW/2KcxRJ/u137Y+BG8qWMA4lgII3ZIuvo4
6rLDiXnAnDZqUtrvZ90O/gH6RyQqX3AI4EwPvCnRIIe0okRcxnxYBL/LfBY54xuv
46JRYZP4c9IImqQH9QVo2/egtEzcpbmT/mfhpf6NGQWC3Xps2BqDT2SV/DrX/wPA
8P1atE1AxNp8ENxK/cjFAteEyDZOsDSa757ZHKAdM7L8rZ1Fd2xAA1Dq7IyYpTNE
IX72xytWxllcNvSUPLT+oicsSZBadc/p3moc3tR/rNdgrHKybedadru/f9Gwpa+v
0sllZlEcVPSYddAzWwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCmk60Nj5FPvemx
DSSQjJPyJoIDpTxQ4luSzIq4hPwlUXw7dqrvHyCWgn2YVe9xZsGrT/+n376ecmgu
sw4s4qVhR9bzKkTMewjC2wUooTA5v9HYsNWZy3Ah7hHPbDHlMADYobjB5/XolNUP
bCM9xALEdM9DxpC4vjUZexlRKmjww9QKE22jIM+bqsK0zqDSq+zHpfHNGGcS3vva
OvI6FPc1fAr3pZpVzevMSN2zufIJwjL4FT5/uzwOCaSCwgR1ztD5CSbQLTLlwIsX
S7h2WF9078XumeRjKejdjEjyH4abKRq8+5LVLcjKEpg7OvktuRpPoGPCEToaAzuv
h+RSQwwY
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKmqJhOeLezrRS
jy+AZwW+NZaQHkIP0XH2lb/YpzFEn+7Xftj4EbypYwDiWAgjdki6+jjqssOJecCc
NmpS2u9n3Q7+AfpHJCpfcAjgTA+8KdEgh7SiRFzGfFgEv8t8FjnjG6/jolFhk/hz
0giapAf1BWjb96C0TNyluZP+Z+Gl/o0ZBYLdemzYGoNPZJX8Otf/A8Dw/Vq0TUDE
2nwQ3Er9yMUC14TINk6wNJrvntkcoB0zsvytnUV3bEADUOrsjJilM0QhfvbHK1bG
WVw29JQ8tP6iJyxJkFp1z+neahze1H+s12CscrJt51p2u79/0bClr6/SyWVmURxU
9Jh10DNbAgMBAAECggEBALv7Q+Rf+C7wrQDZF6LUc9CrGfq4CGVy2IGJKgqT/jOF
DO9nI1rv4hNr55sbQNneWtcZaYvht2mrzNlj57zepDjDM7DcFuLBHIuWgLXT/NmC
FyZOo3vXYBlNr8EgT2XfnXAp9UWJCmc2CtUzsIYC4dsmXMeTd8kyc5tUl4r5ybTf
1g+RTck/IGgqdfzpuTsNl79FW2rP9z111Py6dbqgQzhuSAune9dnLFvZst8dyL8j
FStETMxBM6jrCF1UcKXzG7trDHiCdzJ8WUhx6opN/8OasQGndwpXto6FZuBy/AVP
4kVQNpUXImYcLEpva0MqGRHg+YN+c84C71CMchnF4aECgYEA7J2go4CkCcZNKCy5
R5XVCqNFYRHjekR+UwH8cnCa7pMKKfP+lTCiBrO2q8zwWwknRMyuycS5g/xbSpg1
L6hi92CV1YQy1/JhlQedekjejNTTuLOPKf78AFNSfc7axDnes2v4Bvcdp9gsbUIO
10cXh0tOSLE7P9y+yC86KQkFAPECgYEA2zO0M2nvbPHv2jjtymY3pflYm0HzhM/T
kPtue3GxOgbEPsHffBGssShBTE3yCOX3aAONXJucMrSAPL9iwUfgfGx6ADdkwBsA
OjDlkxvTbP/9trE6/lsSPtGpWRdJNHqXN4Hx7gXJizRwG7Ym+oHvIIh53aIjdFoE
HLQLpxObuQsCgYAuMQ99G83qQpYpc6GwAeYXL4yJyK453kky9z5LMQRt8rKXQhS/
F0FqQYc1vsplW0IZQkQVC5yT0Z4Yz+ICLcM0O9zEVAyA78ZxC42Io9UedSXn9tXK
Awc7IQkHmmxGxm1dZYSEB5X4gFEb+zted3h2ZxMfScohS3zLI70c6a/aYQKBgQCU
phRuxUkrTUpFZ1PCbN0R/ezbpLbaewFTEV7T8b6oxgvxLxI6FdZRcSYO89DNvf2w
GLCVe6VKMWPBTlxPDEostndpjCcTq3vU+nHE+BrBkTvh14BVGzddSFsaYpMvNm8z
ojiJHH2XnCDmefkm6lRacJKL/Tcj4SNmv6YjUEXLDwKBgF8WV9lzez3d/X5dphLy
2S7osRegH99iFanw0v5VK2HqDcYO9A7AD31D9nwX46QVYfgEwa6cHtVCZbpLeJpw
qXnYXe/hUU3yn5ipdNJ0Dm/ZhJPDD8TeqhnRRhxbZmsXs8EzfwB2tcUbASvjb3qA
vAaPlOSU1wXqhAsG9aVs8gtL
-----END PRIVATE KEY-----
""", # 3
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzNFoXDTIxMDEwMTAxNDAzNFowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzUqQ
M08E7F2ZE99bFHvpsR6LmgIJOOoGMXacTcEUhRF63E6+730FjxER2a30synv9GGS
3G9FstUmfhyimufkbTumri8Novw5CWZQLiE1rmMBI5nPcR2wAzy9z2odR6bfAwms
yyc3IPYg1BEDBPZl0LCQrQRRU/rVOrbCf7IMq+ATazmBg01gXMzq2M953ieorkQX
MsHVR/kyW0Q0yzhYF1OtIqbXxrdiZ+laTLWNqivj/FdegiWPCf8OcqpcpbgEjlDW
gBcC/vre+0E+16nfUV8xHL5jseJMJqfT508OtHxAzp+2D7b54NvYNIvbOAP+F9gj
aXy5mOvjXclK+hNmDwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAjZzTFKG7uoXxm
BPHfQvsKHIB/Cx9zMKj6pLwJzCPHQBzKOMoUen09oq+fb77RM7WvdX0pvFgEXaJW
q/ImooRMo+paf8GOZAuPwdafb2/OGdHZGZ2Cbo/ICGo1wGDCdMvbxTxrDNq1Yae+
m+2epN2pXAO1rlc7ktRkojM/qi3zXtbLjTs3IoPDXWhYPHdI1ThkneRmvxpzB1rW
2SBqj2snvyI+/3k3RHmldcdOrTlgWQ9hq05jWR8IVtRUFFVn9A+yQC3gnnLIUhwP
HJWwTIPuYW25TuxFxYZXIbnAiluZL0UIjd3IAwxaafvB6uhI7v0K789DKj2vRUkY
E8ptxZH4
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDNSpAzTwTsXZkT
31sUe+mxHouaAgk46gYxdpxNwRSFEXrcTr7vfQWPERHZrfSzKe/0YZLcb0Wy1SZ+
HKKa5+RtO6auLw2i/DkJZlAuITWuYwEjmc9xHbADPL3Pah1Hpt8DCazLJzcg9iDU
EQME9mXQsJCtBFFT+tU6tsJ/sgyr4BNrOYGDTWBczOrYz3neJ6iuRBcywdVH+TJb
RDTLOFgXU60iptfGt2Jn6VpMtY2qK+P8V16CJY8J/w5yqlyluASOUNaAFwL++t77
QT7Xqd9RXzEcvmOx4kwmp9PnTw60fEDOn7YPtvng29g0i9s4A/4X2CNpfLmY6+Nd
yUr6E2YPAgMBAAECggEBAIiL6uQl0AmDrBj6vHMghGzp+0MBza6MgngOA6L4JTTp
ToYQ3pEe4D6rxOq7+QHeiBtNd0ilvn9XpVXGqCVOzrIVNiWvaGubRjjJU9WLA1Ct
y4kpekAr1fIhScMXOsh45ub3XXZ27AVBkM5dTlvTpB8uAd0C/TFVqtR10WLsQ99h
Zm9Jczgs/6InYTssnAaqdeCLAf1LbmO4zwFsJfJOeSGGT6WBwlpHwMAgPhg8OLEu
kVWG7BEJ0hxcODk/es/vce9SN7BSyIzNY+qHcGtsrx/o0eO2Av/Z7ltV4Sz6UN1K
0y0OTiDyT/l62U2OugSN3wQ4xPTwlrWl7ZUHJmvpEaECgYEA+w2JoB2i1OV2JTPl
Y0TKSKcZYdwn7Nwh4fxMAJNJ8UbpPqrZEo37nxqlWNJrY/jKX3wHVk4ESSTaxXgF
UY7yKT0gRuD9+vE0gCbUmJQJTwbceNJUu4XrJ6SBtf72WgmphL+MtyKdwV8XltVl
Yp0hkswGmxl+5+Js6Crh7WznPl8CgYEA0VYtKs2YaSmT1zraY6Fv3AIQZq012vdA
7nVxmQ6jKDdc401OWARmiv0PrZaVNiEJ1YV8KxaPrKTfwhWqxNegmEBgA1FZ66NN
SAm8P9OCbt8alEaVkcATveXTeOCvfpZUO3sqZdDOiYLiLCsokHblkcenK85n0yT6
CzhTbvzDllECgYEAu9mfVy2Vv5OK2b+BLsw0SDSwa2cegL8eo0fzXqLXOzCCKqAQ
GTAgTSbU/idEr+NjGhtmKg/qaQioogVyhVpenLjeQ+rqYDDHxfRIM3rhlD5gDg/j
0wUbtegEHrgOgcSlEW16zzWZsS2EKxq16BoHGx6K+tcS/FOShg5ASzWnuiUCgYEA
sMz+0tLX8aG7CqHbRyBW8FMR9RY/kRMY1Q1+Bw40wMeZfSSSkYYN8T9wWWT/2rqm
qp7V0zJ34BFUJoDUPPH84fok3Uh9EKZYpAoM4z9JP0jREwBWXMYEJnOQWtwxfFGN
DLumgF2Nwtg3G6TL2s+AbtJYH4hxagQl5woIdYmnyzECgYEAsLASpou16A3uXG5J
+5ZgF2appS9Yfrqfh6TKywMsGG/JuiH3djdYhbJFIRGeHIIDb4XEXOHrg/SFflas
If0IjFRh9WCvQxnoRha3/pKRSc3OEka1MR/ZREK/d/LQEPmsRJVzY6ABKqmPAMDD
5CnG6Hz/rP87BiEKd1+3PGp8GCw=
-----END PRIVATE KEY-----
""", # 4
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNDAzNFoXDTIxMDEwMTAxNDAzNFowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0sap
75YbbkEL85LFava3FrO1jpgVteQ4NGxxy1Nu9w2hPfMMeCPWjB8UfAwFk+LVPyvW
LAXd1zWL5rGpQ2ytIVQlTraR5EnALA1sMcQYbFz1ISPTYB031bEN/Ch8JWYwCG5A
X2H4D6BC7NgT6YyWDt8vxQnqAisPHQ/OK4ABD15CwkTyPimek2/ufYN2dapg1xhG
IUD96gqetJv9bu0r869s688kADIComsYG+8KKfFN67S3rSHMIpZPuGTtoHGnVO89
XBm0vNe0UxQkJEGJzZPn0tdec0LTC4GNtTaz5JuCjx/VsJBqrnTnHHjx0wFz8pff
afCimRwA+LCopxPE1QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBOkAnpBb3nY+dG
mKCjiLqSsuEPqpNiBYR+ue/8aVDnOKLKqAyQuyRZttQ7bPpKHaw7pwyCZH8iHnt6
pMCLCftNSlV2Fa8msRmuf5AiGjUvR1M8VtHWNYE8pedWrJqUgBhF/405B99yd8CT
kQJXKF18LObj7YKNsWRoMkVgqlQzWDMEqbfmy9MhuLx2EZPsTB1L0BHNGGDVBd9o
cpPLUixcc12u+RPMKq8x3KgwsnUf5vX/pCnoGcCy4JahWdDgcZlf0hUKGT7PUem5
CWW8SMeqSWQX9XpE5Qlm1+W/QXdDXLbbHqDtvBeUy3iFQe3C9RSkp0qdutxkAlFk
f5QHXfJ7
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDSxqnvlhtuQQvz
ksVq9rcWs7WOmBW15Dg0bHHLU273DaE98wx4I9aMHxR8DAWT4tU/K9YsBd3XNYvm
salDbK0hVCVOtpHkScAsDWwxxBhsXPUhI9NgHTfVsQ38KHwlZjAIbkBfYfgPoELs
2BPpjJYO3y/FCeoCKw8dD84rgAEPXkLCRPI+KZ6Tb+59g3Z1qmDXGEYhQP3qCp60
m/1u7Svzr2zrzyQAMgKiaxgb7wop8U3rtLetIcwilk+4ZO2gcadU7z1cGbS817RT
FCQkQYnNk+fS115zQtMLgY21NrPkm4KPH9WwkGqudOccePHTAXPyl99p8KKZHAD4
sKinE8TVAgMBAAECggEALU5EotoqJUXYEtAenUJQ0pFoWjE4oXNf3Wzd/O1/MZ19
ZjqDGKPjbxUTKyLOZB5i5gQ/MhFEwQiifMD9eB+5CyvyJPw7Wc28f/uWoQ/cjBZj
Hm979PHy2X0IW4Y8QTG462b/cUE2t+0j1ZMQnKf6bVHuC7V41mR5CC8oitMl5y5g
34yJmWXlIA0ep/WotLMqvil6DnSM/2V8Ch4SxjnzPpjbe4Kj+woucGNr4UKstZER
8iuHTsR64LjoGktRnnMwZxGZQI7EC428zsliInuWMdXe//w2chLdkirqpSrIQwSZ
3jNWStqBXGYaRg5Z1ilBvHtXxkzDzbAlzRBzqfEwwQKBgQDqYdMRrzHJaXWLdsyU
6jAuNX9tLh7PcicjP93SbPujS6mWcNb+D/au+VhWD+dZQDPRZttXck7wvKY1lw1V
MK0TYI7ydf8h3DFx3Mi6ZD4JVSU1MH233C3fv/FHenDoOvMXXRjUZxaRmuzFJvzt
6QlKIfSvwT+1wrOACNfteXfZUQKBgQDmN3Uuk01qvsETPwtWBp5RNcYhS/zGEQ7o
Q4K+teU453r1v8BGsQrCqulIZ3clMkDru2UroeKn1pzyVAS2AgajgXzfXh3VeZh1
vHTLP91BBYZTTWggalEN4aAkf9bxX/hA+9Bw/dzZcQW2aNV7WrYuCSvp3SDCMina
anQq/PaSRQKBgHjw23HfnegZI89AENaydQQTFNqolrtiYvGcbgC7vakITMzVEwrr
/9VP0pYuBKmYKGTgF0RrNnKgVX+HnxibUmOSSpCv9GNrdJQVYfpT6XL1XYqxp91s
nrs7FuxUMNiUOoWOw1Yuj4W4lH4y3QaCXgnDtbfPFunaOrdRWOIv8HjRAoGAV3NT
mSitbNIfR69YIAqNky3JIJbb42VRc1tJzCYOd+o+pCF96ZyRCNehnDZpZQDM9n8N
9GAfWEBHCCpwS69DVFL422TGEnSJPJglCZwt8OgnWXd7CW05cvt1OMgzHyekhxLg
4Dse7J5pXBxAlAYmVCB5xPGR4xLpISX1EOtcwr0CgYEA5rA2IUfjZYb4mvFHMKyM
xWZuV9mnl3kg0ULttPeOl3ppwjgRbWpyNgOXl8nVMYzxwT/A+xCPA18P0EcgNAWc
frJqQYg3NMf+f0K1wSaswUSLEVrQOj25OZJNpb21JEiNfEd5DinVVj4BtVc6KSpS
kvjbn2WhEUatc3lPL3V0Fkw=
-----END PRIVATE KEY-----
""", # 5
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExM1oXDTIxMDEwMTAxNTExM1owFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1c5y
S9IZHF9MIuwdafzhMkgP37I3RVpHEbpnPwnLFqSWelS5m2eDkwWd5SkfGjrmQ5q0
PEpqLlh3zHGw9yQjnHS3CCS1PwQ1kmwvpIK3HM5y8GM7ry1zkam8ZR4iX6Y7VG9g
9mhiVVFoVhe1gHeiC/3Mp6XeNuEiD0buM+8qZx9B21I+iwzy4wva7Gw0fJeq9G1c
lq2rhpD1LlIEodimWOi7lOEkNmUiO1SvpdrGdxUDpTgbdg6r5pCGjOXLd74tAQHP
P/LuqRNJDXtwvHtLIVQnW6wjjy4oiWZ8DXOdc9SkepwQLIF5Wh8O7MzF5hrd6Cvw
SOD3EEsJbyycAob6RwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQBDNcbKVUyGOAVm
k3iVuzkkkymlTAMm/gsIs6loLJrkSqNg160FdVKJoZFjQtqoqLgLrntdCJ377nZ9
1i+yzbZsA4DA7nxj0IEdnd7rRYgGLspGqWeKSTROATeT4faLTXenecm0v2Rpxqc7
dSyeZJXOd2OoUu+Q64hzXCDXC6LNM+xZufxV9qv+8d+CipV6idSQZaUWSVuqFCwD
PT0R4eWfkMMaM8QqtNot/hVCEaKT+9rG0mbpRe/b/qBy5SR0u+XgGEEIV+33L59T
FXY+DpI1Dpt/bJFoUrfj6XohxdTdqYVCn1F8in98TsRcFHyH1xlkS3Y0RIiznc1C
BwAoGZ4B
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDVznJL0hkcX0wi
7B1p/OEySA/fsjdFWkcRumc/CcsWpJZ6VLmbZ4OTBZ3lKR8aOuZDmrQ8SmouWHfM
cbD3JCOcdLcIJLU/BDWSbC+kgrccznLwYzuvLXORqbxlHiJfpjtUb2D2aGJVUWhW
F7WAd6IL/cynpd424SIPRu4z7ypnH0HbUj6LDPLjC9rsbDR8l6r0bVyWrauGkPUu
UgSh2KZY6LuU4SQ2ZSI7VK+l2sZ3FQOlOBt2DqvmkIaM5ct3vi0BAc8/8u6pE0kN
e3C8e0shVCdbrCOPLiiJZnwNc51z1KR6nBAsgXlaHw7szMXmGt3oK/BI4PcQSwlv
LJwChvpHAgMBAAECggEBAK0KLeUBgIM++Y7WDCRInzYjrn08bpE5tIU7mO4jDfQg
dw1A3wtQZuOpyxW6B0siWlRis/aLv44M2cBkT3ZmEFBDAhOcKfh7fqQn3RNHG847
pDi8B4UKwxskBa7NCcLh9eirUA19hABLJ6dt/t6fdE5CNc2FZ+iAoyE8JfNwYKAd
6Fa3HqUBPNWt8ryj4ftgpMNBdfmLugEM4N20SXJA28hOq2lUcwNKQQ1xQrovl0ig
iMbMWytV4gUPKC9Wra66OYIkk/K8teiUNIYA4JwAUVTs1NEWoyfwUTz1onutCkMl
5vY7JAqRoDWoSUX6FI+IHUdyqPAMdOMhC37gjrxoo2ECgYEA7trDMu6xsOwEckDh
iz148kejMlnTTuCNetOFBw3njFgxISx0PrDLWmJmnHMxPv9AAjXYb2+UCCm3fj6Q
OB8o4ZJm0n504qbFHcb2aI22U5hZ99ERvqx8WBnJ2RarIBmg06y0ktxq8gFR2qxF
0hWAOcDn1DWQ8QI0XBiFFcJTGtcCgYEA5SdlIXRnVZDKi5YufMAORG9i74dXUi0Y
02UoVxJ+q8VFu+TT8wrC5UQehG3gX+79Cz7hthhDqOSCv6zTyE4Evb6vf9OLgnVe
E5iLF033zCxLSS9MgiZ+jTO+wK3RsapXDtGcSEk2P82Pj5seNf4Ei1GNCRlm1DbX
71wlikprHhECgYABqmLcExAIJM0vIsav2uDiB5/atQelMCmsZpcx4mXv85l8GrxA
x6jTW4ZNpvv77Xm7yjZVKJkGqYvPBI6q5YS6dfPjmeAkyHbtazrCpeJUmOZftQSD
qN5BGwTuT5sn4SXe9ABaWdEhGONCPBtMiLvZK0AymaEGHTbSQZWD/lPoBwKBgGhk
qg2zmd/BNoSgxkzOsbE7jTbR0VX+dXDYhKgmJM7b8AjJFkWCgYcwoTZzV+RcW6rj
2q+6HhizAV2QvmpiIIbQd+Mj3EpybYk/1R2ox1qcUy/j/FbOcpihGiVtCjqF/2Mg
2rGTqMMoQl6JrBmsvyU44adjixTiZz0EHZYCkQoBAoGBAMRdmoR4mgIIWFPgSNDM
ISLJxKvSFPYDLyAepLfo38NzKfPB/XuZrcOoMEWRBnLl6dNN0msuzXnPRcn1gc1t
TG7db+hivAyUoRkIW3dB8pRj9dDUqO9OohjKsJxJaQCyH5vPkQFSLbTIgWrHhU+3
oSPiK/YngDV1AOmPDH7i62po
-----END PRIVATE KEY-----
""", #6
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExMloXDTIxMDEwMTAxNTExMlowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAojGu
fQaTVT9DJWJ/zogGfrryEJXYVy9c441O5MrLlRx7nCIWIUs2NEhHDJdqJjYOTdmk
K98VhdMpDPZwxjgvvZrh43lStBRIW3zZxv747rSl2VtpSqD/6UNWJe5u4SR7oga4
JfITOKHg/+ASxnOxp/iu6oT6jBL6T7KSPh6Rf2+it2rsjhktRreFDJ2hyroNq1w4
ZVNCcNPgUIyos8u9RQKAWRNchFh0p0FCS9xNrn3e+yHnt+p6mOOF2gMzfXT/M2hq
KQNmc5D3yNoH2smWoz7F3XsRjIB1Ie4VWoRRaGEy7RwcwiDfcaemD0rQug6iqH7N
oomF6f3R4DyvVVLUkQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQB/8SX6qyKsOyex
v3wubgN3FPyU9PqMfEzrFM6X5sax0VMVbSnekZrrXpdnXYV+3FBu2GLLQc900ojj
vKD+409JIriTcwdFGdLrQPTCRWkEOae8TlXpTxuNqJfCPVNxFN0znoat1bSRsX1U
K0mfEETQ3ARwlTkrF9CM+jkU3k/pnc9MoCLif8P7OAF38AmIbuTUG6Gpzy8RytJn
m5AiA3sds5R0rpGUu8mFeBpT6jIA1QF2g+QNHKOQcfJdCdfqTjKw5y34hjFqbWG9
RxWGeGNZkhC/jADCt+m+R6+hlyboLuIcVp8NJw6CGbr1+k136z/Dj+Fdhm6FzF7B
qULeRQJ+
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCiMa59BpNVP0Ml
Yn/OiAZ+uvIQldhXL1zjjU7kysuVHHucIhYhSzY0SEcMl2omNg5N2aQr3xWF0ykM
9nDGOC+9muHjeVK0FEhbfNnG/vjutKXZW2lKoP/pQ1Yl7m7hJHuiBrgl8hM4oeD/
4BLGc7Gn+K7qhPqMEvpPspI+HpF/b6K3auyOGS1Gt4UMnaHKug2rXDhlU0Jw0+BQ
jKizy71FAoBZE1yEWHSnQUJL3E2ufd77Iee36nqY44XaAzN9dP8zaGopA2ZzkPfI
2gfayZajPsXdexGMgHUh7hVahFFoYTLtHBzCIN9xp6YPStC6DqKofs2iiYXp/dHg
PK9VUtSRAgMBAAECggEANjn0A3rqUUr4UQxwfIV/3mj0O1VN4kBEhxOcd+PRUsYW
EapXycPSmII9ttj8tU/HUoHcYIqSMI7bn6jZJXxtga/BrALJAsnxMx031k8yvOQK
uvPT7Q6M4NkReVcRHRbMeuxSLuWTRZDhn8qznEPb9rOvD1tsRN6nb3PdbwVbUcZh
2F6JDrTyI/Df6nrYQAWOEe2ay7tzgrNYE4vh+DW7oVmyHRgFYA+DIG5Q+7OVWeW5
bwYYPKlo4/B0L+GfMKfMVZ+5TvFWAK0YD1e/CW1Gv+i/8dWm4O7UNGg5mTnrIcy1
g5wkKbyea02/np2B/XBsSWXDl6rTDHL7ay0rH2hjEQKBgQDMKSm3miQTIcL/F2kG
ieapmRtSc7cedP967IwUfjz4+pxPa4LiU47OCGp1bmUTuJAItyQyu/5O3uLpAriD
PTU+oVlhqt+lI6+SJ4SIYw01/iWI3EF2STwXVnohWG1EgzuFM/EqoB+mrodNONfG
UmP58vI9Is8fdugXgpTz4Yq9pQKBgQDLYJoyMVrYTvUn5oWft8ptsWZn6JZXt5Bd
aXh+YhNmtCrSORL3XjcH4yjlcn7X8Op33WQTbPo7QAJ1CumJzAI88BZ/8za638xb
nLueviZApCt0bNMEEdxDffxHFc5TyHE+obMKFfApbCnD0ggO6lrZ8jK9prArLOCp
mRU9SSRffQKBgAjoBszeqZI4F9SfBdLmMyzU5A89wxBOFFMdfKLsOua1sBn627PZ
51Hvpg1HaptoosfujWK1NsvkB0wY9UmsYuU/jrGnDaibnO4oUSzN/WaMlsCYszZg
zYFLIXrQ67tgajlOYcf1Qkw4MujYgPlC4N+njI/EM/rwagGUjcDx5uaNAoGASyqz
EuYG63eTSGH89SEaohw0+yaNmnHv23aF4EAjZ4wjX3tUtTSPJk0g6ly84Nbb8d1T
hZJ7kbaAsf2Mfy91jEw4JKYhjkP05c8x0OP6g12p6efmvdRUEmXX/fXjQjgNEtb0
sz+UedrOPN+9trWLSo4njsyyw+JcTpKTtQj5dokCgYEAg9Y3msg+GvR5t/rPVlKd
keZkrAp0xBJZgqG7CTPXWS1FjwbAPo7x4ZOwtsgjCuE52lar4j+r2Il+CDYeLfxN
h/Jfn6S9ThUh+B1PMvKMMnJUahg8cVL8uQuBcbAy8HPRK78WO2BTnje44wFAJwTc
0liuYqVxZIRlFLRl8nGqog8=
-----END PRIVATE KEY-----
""", #7
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExMloXDTIxMDEwMTAxNTExMlowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu9oO
cFlNukUcLfFrfkEaUiilcHLmn5OokQbj95CGd2ehQCCVwrkunYLBisthRaancFFb
/yM998B0IUsKTsoLi5DAN3/SkSm6GiQIGO05E4eBPljwJ61QQMxh8+1TwQ9HTun1
ZE1lhVN1aRmI9VsbyTQLjXh9OFNLSJEKb29mXsgzYwYwNOvo+idzXpy4bMyNoGxY
Y+s2FIKehNHHCv4ravDn8rf6DtDOvyN4d0/QyNws9FpAZMXmLwtBJ9exOqKFW43w
97NxgdNiTFyttrTKTi0b+9v3GVdcEZw5b2RMIKi6ZzPof6/0OlThK6C3xzFK3Bp4
PMjTfXw5yyRGVBnZZwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA4Ms6LqzMu757z
bxISiErRls6fcnq0fpSmiPNHNKM7YwG9KHYwPT6A0UMt30zDwNOXCQBI19caGeeO
MLPWa7Gcqm2XZB2jQwvLRPeFSy9fm6RzJFeyhrh/uFEwUetwYmi/cqeIFDRDBQKn
bOaXkBk0AaSmI5nRYfuqpMMjaKOFIFcoADw4l9wWhv6DmnrqANzIdsvoSXi5m8RL
FcZQDZyHFlHh3P3tLkmQ7ErM2/JDwWWPEEJMlDm/q47FTOQSXZksTI3WRqbbKVv3
iQlJjpgi9yAuxZwoM3M4975iWH4LCZVMCSqmKCBt1h9wv4LxqX/3kfZhRdy1gG+j
41NOSwJ/
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC72g5wWU26RRwt
8Wt+QRpSKKVwcuafk6iRBuP3kIZ3Z6FAIJXCuS6dgsGKy2FFpqdwUVv/Iz33wHQh
SwpOyguLkMA3f9KRKboaJAgY7TkTh4E+WPAnrVBAzGHz7VPBD0dO6fVkTWWFU3Vp
GYj1WxvJNAuNeH04U0tIkQpvb2ZeyDNjBjA06+j6J3NenLhszI2gbFhj6zYUgp6E
0ccK/itq8Ofyt/oO0M6/I3h3T9DI3Cz0WkBkxeYvC0En17E6ooVbjfD3s3GB02JM
XK22tMpOLRv72/cZV1wRnDlvZEwgqLpnM+h/r/Q6VOEroLfHMUrcGng8yNN9fDnL
JEZUGdlnAgMBAAECggEALlZdlW0R9U6y4spYf65Dddy84n4VUWu0+wE+HoUyBiYz
6oOfLYdMbmIgp8H/XpT7XINVNBxXXtPEUaoXAtRoAKdWItqO8Gvgki4tKSjrGVwl
j2GU69SepT1FNExoiojgSCEB/RnyXu71WVWJKSyuL/V8nAsKqGgze9T7Q/2wvNQt
SQqLxZlrWF0P8WqaAiSrHV4GnDrdeF+k1KBo2+pSaDNv6cNwOyVG8EII9tqhF8kj
6nD6846ish6OqmlSisaSGopJZL1DCQzszFMxKd2+iBDY7Kn6hVIhRaNnaZUFhpKM
dNh6hBqOycMepAp0sz5pdo+fxpifkoR/cPWgyC3QkQKBgQDixe9VsiZ7u2joxF/9
JcAExKhqE28OUmIwt6/j+uzYShxN6Oo9FUo3ICtAPCCFsjhvb3Qum7FspmxrqtNy
fzclibZJPO8ey2PzqaiOfiVfgJmNSvoCOdgM4OqFLtRO6eSTzhJeI4VPrPcq/5la
0FuOi1WZs/Au9llqLqGSDH3UAwKBgQDUD/bSJbOk5SvNjFtFm0ClVJr66mJ5e4uN
4VGv8KGFAJ+ExIxujAukfKdwLjS1wEy2RePcshfT8Y9FVh/Q1KzzrQi3Gwmfq1G6
Dpu2HlJpaZl+9T81x2KS8GP3QNczWMe2nh7Lj+6st+b4F+6FYbVTFnHaae27sXrD
XPX15+uxzQKBgGy+pBWBF4kwBo/QU4NuTdU7hNNRPGkuwl1ASH1Xv6m8aDRII8Nk
6TDkITltW98g5oUxehI7oOpMKCO9SCZYsNY0YpBeQwCOYgDfc6/Y+A0C+x9RO/BD
UsJiPLPfD/pDmNPz9sTj3bKma+RXq29sCOujD0pkiiHLCnerotkJWnGHAoGAAkCJ
JoIv/jhQ1sX+0iZr8VWMr819bjzZppAWBgBQNtFi4E4WD7Z9CSopvQ9AkA2SwvzL
BrT9e8q88sePXvBjRdM4nHk1CPUQ0SEGllCMH4J3ltmT6kZLzbOv3BhcMLdop4/W
U+MbbcomMcxPRCtdeZxraR5m3+9qlliOZCYqYqECgYA5eLdxgyHxCS33QGFHRvXI
TLAHIrr7wK1xwgkmZlLzYSQ8Oqh1UEbgoMt4ulRczP2g7TCfvANw2Sw0H2Q5a6Fj
cnwVcXJ38DLg0GCPMwzE8dK7d8tKtV6kGiKy+KFvoKChPjE6uxhKKmCJaSwtQEPS
vsjX3iiIgUQPsSz8RrNFfQ==
-----END PRIVATE KEY-----
""", #8
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExMloXDTIxMDEwMTAxNTExMlowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5DNu
CKhhl6wCbgoCkFemwJh3ATbAjhInHpvQWIFDfSK1USElCKxqosIxiBQCx3Zs2d/U
GeIA7QAM2atNdXaateacEaKMmGE9LEtO0Dg5lmT43WzmGkG9NmCwK3JjAekc5S9d
HKNtEQo7o8RKfj81zlDSq2kzliy98cimk24VBBGkS2Cn7Vy/mxMCqWjQazTXbpoS
lXw6LiY5wFXQmXOB5GTSHvqyCtBQbOSSbJB77z/fm7bufTDObufTbJIq53WPt00Y
f+JNnzkX1X0MaBCUztoZwoMaExWucMe/7xsQ46hDn6KB4b0lZk+gsK45QHxvPE1R
72+ZkkIrGS/ljIKahQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQDib1653CneSmy2
gYzGeMlrI05Jqo3JuHNMQHzAjIrb4ee57VA4PTQa1ygGol/hVv6eTvZr3p2ospDS
5Kfwj1HLO4jSMX1Bnm1FG0naQogz2CD3xfYjbYOVRhAxpld1MNyRveIOhDRARY7N
XNAaNPZ1ALrwbENSYArr18xDzgGWe/dgyRCEpCFIsztiA+7jGvrmAZgceIE8K3h3
fkvNmXBH58ZHAGTiyRriBZqS+DXrBrQOztXSJwFnOZnRt6/efeBupt8j5hxVpBLW
vtjpBc23uUcbbHOY2AW2Bf+vIr4/LmJ/MheKV+maa2990vmC93tvWlFfc74mgUkW
HJfXDmR6
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDkM24IqGGXrAJu
CgKQV6bAmHcBNsCOEicem9BYgUN9IrVRISUIrGqiwjGIFALHdmzZ39QZ4gDtAAzZ
q011dpq15pwRooyYYT0sS07QODmWZPjdbOYaQb02YLArcmMB6RzlL10co20RCjuj
xEp+PzXOUNKraTOWLL3xyKaTbhUEEaRLYKftXL+bEwKpaNBrNNdumhKVfDouJjnA
VdCZc4HkZNIe+rIK0FBs5JJskHvvP9+btu59MM5u59NskirndY+3TRh/4k2fORfV
fQxoEJTO2hnCgxoTFa5wx7/vGxDjqEOfooHhvSVmT6CwrjlAfG88TVHvb5mSQisZ
L+WMgpqFAgMBAAECggEABTdPuo7uvCLIY2+DI319aEWT4sk3mYe8sSxqlLtPqZqT
fmk9iXc3cMTzkOK0NY71af19waGy17f6kzchLCAr5SCCTLzkbc87MLn/8S530oI4
VgdZMxxxkL6hCD0zGiYT7QEqJa9unMcZGeMwuLYFKtQaHKTo8vPO26n0dMY9YLxj
cNUxsKLcKk8dbfKKt4B4fZgB7nU0BG9YbKYZ3iZ7/3mG+6jA6u+VYc/WHYQjTmpL
oLFN7NOe3R7jIx/kJ1OqNWqsFoLpyiiWd1Mr0l3EdD1kCudptMgD8hd++nx2Yk2w
K4+CpOVIN/eCxDDaAOJgYjCtOayVwUkDAxRRt9VnAQKBgQD5s1j6RJtBNTlChVxS
W3WpcG4q8933AiUY/Chx0YTyopOiTi7AGUaA8AOLFBcO2npa+vzC+lvuOyrgOtVW
sD10H2v5jNKlbeBp+Q9rux2LAyp4TvzdXWKhVyZrdtITF0hn6vEYNp7MtyWRFb1O
3Ie5HQBPHtzllFOMynacjOdjpQKBgQDp9TrbfOmwGWmwPKmaFKuy8BKxjJM+ct0X
4Xs1uSy9Z9Y8QlDNbNaooI8DA1NY0jDVHwemiGC4bYsBNKNRcbI0s2nr0hQMft42
P/NpugHv0YXiVz+5bfim4woTiHHbfREqchlIGo3ryClAiDU9fYZwTOtb9jPIhX3G
9v+OsoMlYQKBgQDJUQW90S5zJlwh+69xXvfAQjswOimNCpeqSzK4gTn0/YqV4v7i
Nf6X2eqhaPMmMJNRYuYCtSMFMYLiAc0a9UC2rNa6/gSfB7VU+06phtTMzSKimNxa
BP6OIduB7Ox2I+Fmlw8GfJMPbeHF1YcpW7e5UV58a9+g4TNzYZC7qwarWQKBgQCA
FFaCbmHonCD18F/REFvm+/Lf7Ft3pp5PQouXH6bUkhIArzVZIKpramqgdaOdToSZ
SAGCM8rvbFja8hwurBWpMEdeaIW9SX8RJ/Vz/fateYDYJnemZgPoKQcNJnded5t8
Jzab+J2VZODgiTDMVvnQZOu8To6OyjXPRM0nK6cMQQKBgQDyX44PHRRhEXDgJFLU
qp2ODL54Qadc/thp2m+JmAvqmCCLwuYlGpRKVkLLuZW9W6RlVqarOC3VD3wX5PRZ
IsyCGLi+Jbrv9JIrYUXE80xNeQVNhrrf02OW0KHbqGxRaNOmp1THPw98VUGR2J/q
YAp6XUXU7LEBUrowye+Ty2o7Lg==
-----END PRIVATE KEY-----
""", #9
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExMVoXDTIxMDEwMTAxNTExMVowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1k2R
PWYihftppo3CoxeseFwgg7guxZVkP7aAur5uBzSeAB7sBG1G2bRrwMX71S4xPwot
zYiEoxUrTStUqEKjL2aozfHsXnHZ7kwwUgZFDZUg+ve2tZDA3HCUr4tLYKlyFqpx
2nCouc45MjQ4wAxRl4rQxIUG2uSTzvP+xXtjoJYMIEEyCpcsRXfqfVkEUe9nrPsF
0Ibzk7Cyt75HDI4uEzBuHux0DYuGy6R02jz/vf/dIZ4WepjSY06xpblTHZgieDRX
fU2+YOcvb0eDHyA8Q5p8ropK71MNIP5+kffFd90SVr4EkCA8S+cd6FdKQasRr+jF
9MUhMS4ObvlrYTG+hwIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQCy62MZ3+59/VpX
c9Hsmb4/BMWt0irLJit4w4SkuYKGFLCMKZI4LN4pEkXaiE1eqF2DNS1qOvl5luty
Zz4oggrqilwuFeH98o9Zeg9SYnouuypORVP/3DPbJF/jiQg5J8kJb1sy+BjRiT8I
5X6/cCBYT+MljFz5tpqWOtWTgA30e1BV8JFj8F4dgUcWsAVT/I4l9zgMLUnhcO6E
wLtEE0I6aT1RHJB28ndwJzj4La98Oirw7LAEAWbExWYB90ypLaGY+JVJe3f5fijC
fJpQ2mbs4syXDmb5bU2C2pGPTKZPcyx15iQrq1uHInD0facOw+pmllAFxuG96lA1
+o2VzKwP
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDWTZE9ZiKF+2mm
jcKjF6x4XCCDuC7FlWQ/toC6vm4HNJ4AHuwEbUbZtGvAxfvVLjE/Ci3NiISjFStN
K1SoQqMvZqjN8execdnuTDBSBkUNlSD697a1kMDccJSvi0tgqXIWqnHacKi5zjky
NDjADFGXitDEhQba5JPO8/7Fe2OglgwgQTIKlyxFd+p9WQRR72es+wXQhvOTsLK3
vkcMji4TMG4e7HQNi4bLpHTaPP+9/90hnhZ6mNJjTrGluVMdmCJ4NFd9Tb5g5y9v
R4MfIDxDmnyuikrvUw0g/n6R98V33RJWvgSQIDxL5x3oV0pBqxGv6MX0xSExLg5u
+WthMb6HAgMBAAECggEAeCyRSNwQeg/NZD/UqP6qkegft52+ZMBssinWsGH/c3z3
KVwtwCHDfGvnjPe5TAeWSCKeIsbukkFZwfGNjLmppvgrqymCAkhYDICfDDBF4uMA
1pu40sJ01Gkxh+tV/sOmnb1BEVzh0Sgq/NM6C8ActR18CugKOw+5L3G2KeoSqUbT
2hcPUsnik10KwqW737GQW4LtEQEr/iRmQkxI3+HBzvPWjFZzjOcpUph+FW5TXtaU
T26mt1j+FjbdvvhCuRMY/VZBJ5h1RKU95r57F1AjW/C0RRJ8FxR1CeSy4IlmQBrh
6wAa3Tdm0k/n4ZspC9bF5eVTJEtb0AohiYZrIa8MuQKBgQD8yjCLYa41H304odCx
NwPRJcmlIk5YGxPrhHAT9GEgU6n/no7YMVx1L7fNLcMjAyx54jauEU7J19Aki7eV
SIdU9TwqmkOAFfM6TOEJZiOi66gABOxeK2yDyfmR6Apaw3caku4O058t4KVwHSCB
DanYCMzxCBqS9jUTTyAh0fMg6wKBgQDZBkIukg3FKPor5LzkUXIKnNHYPfHbERHw
piWS6GZwqhuWNlOCWxiBR4rEUU/RbFQZw/FCi5OuAk2lBC0LBmC0/Sz4/+xDdCbv
uNhMOTRcy9nFVpmpIWCx4N/KmXHEuFxli/JNXux7iki74AVC9VPrAt/kCvwf06Df
oDb8ljdR1QKBgQChVOD6c5Lc8IXYeN1Z3IShHH6+11AsxstFyjZFZff+y6Z5L1Z2
/7nESHoDhqs9Uy81cnv3R7CC/Ssnx8uYiLtmK0UE44Mk4d1jXeFZQEiKF+AWcw3v
Y8NTsLmItxC0sH75BMDN0Z2LiA3Nqaku8+trpuI1Cjj7hgqFkkAtlXKXlQKBgBMb
c/Q5s7CqHOyEZQUNDqdUiz0opwSMijHPzvsSLwK4V1lwSwXtE0k+jT8fkZF0oirq
j3E2bLgjR8bBiV2xIA6PQ8hgb+K4dT0h3xlG6A9Le07egwTbBXJjxBBIVjXlrWzb
V2fsdZGi6ShxXsU4aD0GscOYG/6JWV6W8oBmkVRJAoGAepIZ+OYmFjb7uxdh4EtP
hluEtx5bLOLuo6c0S149omUXUhbsuyzTZS6Ip9ySDMnK3954c4Q4WJ4yQKixQNVq
78aDfy4hP/8TE/Q9CRddUof2P33PJMhVNqzTRYMpqV+zxifvtw3hoDTLKHTQxCR2
M1+O4VvokU5pBqUpGXiMDfs=
-----END PRIVATE KEY-----
""", #10
"""-----BEGIN CERTIFICATE-----
MIICojCCAYoCAQEwDQYJKoZIhvcNAQELBQAwFzEVMBMGA1UEAwwMbmV3cGJfdGhp
bmd5MB4XDTIwMDEwMjAxNTExMVoXDTIxMDEwMTAxNTExMVowFzEVMBMGA1UEAwwM
bmV3cGJfdGhpbmd5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAnbCU
M37hG7zrCyyJEI6pZmOomnI+CozbP5KAhWSV5y7R5H6lcAEG2UDV+lCUxHT2ufOa
i1H16bXyBt7VoMTHIH50S58NUCUEXcuRWVR16tr8CzcTHQAkfIrmhY2XffPilX7h
aw35UkoVmXcqSDNNJD6jmvWexvmbhzVWW8Vt5Pivet2/leVuqPXB54/alSbkC74m
x6X5XKQc6eyPsb1xvNBuiSpFzdqbEn7lUwj6jFTkh9tlixgmgx+J0XoQXbawyrAg
rcIQcse/Ww+KBA1KSccFze+XBTbIull4boYhbJqkb6DW5bY7/me2nNxE9DRGwq+S
kBsKq3YKeCf8LEhfqQIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQAD+tWGFhINYsWT
ibKWlCGgBc5uB7611cLCevx1yAL6SaOECVCQXzaaXIaETSbyY03UO2yBy3Pl10FV
GYXLrAWTFZsNVJm55XIibTNw1UBPNwdIoCSzAYuOgMF0GHhTTQU0hNYWstOnnE2T
6lSAZQZFkaW4ZKs6sUp42Em9Bu99PehyIgnw14qb9NPg5qKdi2GAvkImZCrGpMdK
OF31U7Ob0XQ0lxykcNgG4LlUACd+QxLfNpmLBZUGfikexYa1VqBFm3oAvTt8ybNQ
qr7AKXDFnW75aCBaMpQWzrstA7yYZ3D9XCd5ZNf6d08lGM/oerDAIGnZOZPJgs5U
FaWPHdS9
-----END CERTIFICATE-----
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCdsJQzfuEbvOsL
LIkQjqlmY6iacj4KjNs/koCFZJXnLtHkfqVwAQbZQNX6UJTEdPa585qLUfXptfIG
3tWgxMcgfnRLnw1QJQRdy5FZVHXq2vwLNxMdACR8iuaFjZd98+KVfuFrDflSShWZ
dypIM00kPqOa9Z7G+ZuHNVZbxW3k+K963b+V5W6o9cHnj9qVJuQLvibHpflcpBzp
7I+xvXG80G6JKkXN2psSfuVTCPqMVOSH22WLGCaDH4nRehBdtrDKsCCtwhByx79b
D4oEDUpJxwXN75cFNsi6WXhuhiFsmqRvoNbltjv+Z7ac3ET0NEbCr5KQGwqrdgp4
J/wsSF+pAgMBAAECggEAPSu1ofBTRN5ZU4FYPlsJLdX1Hsy4coFHv/aF8rkdSYwp
EflrFfLgBEEZgLvnqfoxh9sPFYKa4amaFL42ouIS2PEVDgzKLk/dzMDeRof0IkIG
yhb4TCS1ArcjS6WsociNGi8ZJN1L3Xctv9WxSkbUYv4Fm2Qyzr8fbSjssjb5NXwD
K11fsj6Pfy/mQrI0TSTlzWC7ARIlCMTWQ8G8zEU6bMFIG6DMjt2J4VgYVXUKetZA
VPuS+pwkH2obQe6FLRuiNxH4GitVAASYPea6foER4AggBMRp8q8F6+WssjoyEORb
0sJxmnxhznoTRMCuTsUj6XMgmOTOnA3lQXsIB0DYcQKBgQDO6mMRVVFWzgkE9Q5/
36n06KvGYF9TCRDL9vRC8kCqcGd1Hy6jRj0D8049KUHaN74pfWg6gsQjPkKzwKnC
vxNl72tVvLqm7Fo531BGfKK/46ZvxeWMMraNW4+9LhwMPu2LN5OEdwwCgyaURpxh
ktCp+RrGjz08Kn82X1jJPdwxDQKBgQDDGMvZ7ZUDGq5+RJkmHJ58lQtiaMZclmYV
R9YwOxJV6ino3EYrGOtUkqiemgAACdMWE/JMJlB1/JINawJwUsZ2XDp/9jNLPgLc
gphCmagaO34U/YMaJbJIK2gkCX7p8EcD+x45qWa0bEMPW38QfN/qQdUPjNmpuIiI
Zleyl1TqDQKBgQCvIoat0ighsAzETGN0aqzhJdrW8xVcJA06hpFi5MdFPBTldno0
KqxUXqj3badWe94SIhqJg8teBUHSAZ3uv2o82nRgQnk99km8OD8rGi1q+9YRP1C2
5OnNJhW4y4FkABNxxZ2v/k+FBNsvn8CXefvyEm3OaMks1s+MBxIQa7KnNQKBgFwX
HUo+GiN/+bPCf6P8yFa4J8qI+HEF0SPkZ9cWWx5QzP2M1FZNie++1nce7DcYbBo0
yh9lyn8W/H328AzDFckS2c5DEY1HtSQPRP3S+AWB5Y7U54h1GMV2L88q6ExWzb60
T10aeE9b9v+NydmniC5UatTPQIMbht8Tp/u18TAVAoGBAJphAfiqWWV2M5aBCSXq
WxLZ71AJ0PZBmRa/9iwtccwXQpMcW6wHK3YSQxci+sB97TElRa3/onlVSpohrUtg
VCvCwfSHX1LmrfWNSkoJZwCQt+YYuMqW86K0tzLzI1EMjIH9LgQvB6RR26PZQs+E
jr1ZvRc+wPTq6sxCF1h9ZAfN
-----END PRIVATE KEY-----
""", #11
]
# To disable the pre-computed tub certs, uncomment this line.
# SYSTEM_TEST_CERTS = []
def flush_but_dont_ignore(res):
d = flushEventualQueue()
def _done(ignored):
return res
d.addCallback(_done)
return d
def _render_config(config):
"""
Convert a ``dict`` of ``dict`` of ``unicode`` to an ini-format string.
"""
return u"\n\n".join(list(
_render_config_section(k, v)
for (k, v)
in config.items()
))
def _render_config_section(heading, values):
"""
Convert a ``unicode`` heading and a ``dict`` of ``unicode`` to an ini-format
section as ``unicode``.
"""
return u"[{}]\n{}\n".format(
heading, _render_section_values(values)
)
def _render_section_values(values):
"""
Convert a ``dict`` of ``unicode`` to the body of an ini-format section as
``unicode``.
"""
return u"\n".join(list(
u"{} = {}".format(k, v)
for (k, v)
in sorted(values.items())
))
class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
def setUp(self):
self.port_assigner = SameProcessStreamEndpointAssigner()
self.port_assigner.setUp()
self.addCleanup(self.port_assigner.tearDown)
self.sparent = service.MultiService()
self.sparent.startService()
def tearDown(self):
log.msg("shutting down SystemTest services")
d = self.sparent.stopService()
d.addBoth(flush_but_dont_ignore)
return d
def getdir(self, subdir):
return os.path.join(self.basedir, subdir)
def add_service(self, s):
s.setServiceParent(self.sparent)
return s
def _create_introducer(self):
"""
:returns: (via Deferred) an Introducer instance
"""
iv_dir = self.getdir("introducer")
if not os.path.isdir(iv_dir):
_, port_endpoint = self.port_assigner.assign(reactor)
introducer_config = (
u"[node]\n"
u"nickname = introducer \N{BLACK SMILING FACE}\n" +
u"web.port = {}\n".format(port_endpoint)
).encode("utf-8")
fileutil.make_dirs(iv_dir)
fileutil.write(
os.path.join(iv_dir, 'tahoe.cfg'),
introducer_config,
)
if SYSTEM_TEST_CERTS:
os.mkdir(os.path.join(iv_dir, "private"))
f = open(os.path.join(iv_dir, "private", "node.pem"), "w")
f.write(SYSTEM_TEST_CERTS[0])
f.close()
return create_introducer(basedir=iv_dir)
def _get_introducer_web(self):
with open(os.path.join(self.getdir("introducer"), "node.url"), "r") as f:
return f.read().strip()
@inlineCallbacks
def set_up_nodes(self, NUMCLIENTS=5):
"""
Create an introducer and ``NUMCLIENTS`` client nodes pointed at it. All
of the nodes are running in this process.
As a side-effect, set:
* ``numclients`` to ``NUMCLIENTS``
* ``introducer`` to the ``_IntroducerNode`` instance
* ``introweb_url`` to the introducer's HTTP API endpoint.
:param int NUMCLIENTS: The number of client nodes to create.
:return: A ``Deferred`` that fires when the nodes have connected to
each other.
"""
self.numclients = NUMCLIENTS
self.introducer = yield self._create_introducer()
self.add_service(self.introducer)
self.introweb_url = self._get_introducer_web()
yield self._set_up_client_nodes()
@inlineCallbacks
def _set_up_client_nodes(self):
q = self.introducer
self.introducer_furl = q.introducer_url
self.clients = []
basedirs = []
for i in range(self.numclients):
basedirs.append((yield self._set_up_client_node(i)))
# start clients[0], wait for it's tub to be ready (at which point it
# will have registered the helper furl).
c = yield client.create_client(basedirs[0])
c.setServiceParent(self.sparent)
self.clients.append(c)
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
with open(os.path.join(basedirs[0],"private","helper.furl"), "r") as f:
helper_furl = f.read()
self.helper_furl = helper_furl
if self.numclients >= 4:
with open(os.path.join(basedirs[3], 'tahoe.cfg'), 'a+') as f:
f.write(
"[client]\n"
"helper.furl = {}\n".format(helper_furl)
)
# this starts the rest of the clients
for i in range(1, self.numclients):
c = yield client.create_client(basedirs[i])
c.setServiceParent(self.sparent)
self.clients.append(c)
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
log.msg("STARTING")
yield self.wait_for_connections()
log.msg("CONNECTED")
# now find out where the web port was
self.webish_url = self.clients[0].getServiceNamed("webish").getURL()
if self.numclients >=4:
# and the helper-using webport
self.helper_webish_url = self.clients[3].getServiceNamed("webish").getURL()
def _generate_config(self, which, basedir):
config = {}
except1 = set(range(self.numclients)) - {1}
feature_matrix = {
("client", "nickname"): except1,
# client 1 has to auto-assign an address.
("node", "tub.port"): except1,
("node", "tub.location"): except1,
# client 0 runs a webserver and a helper
# client 3 runs a webserver but no helper
("node", "web.port"): {0, 3},
("node", "timeout.keepalive"): {0},
("node", "timeout.disconnect"): {3},
("helper", "enabled"): {0},
}
def setconf(config, which, section, feature, value):
if which in feature_matrix.get((section, feature), {which}):
config.setdefault(section, {})[feature] = value
setnode = partial(setconf, config, which, "node")
sethelper = partial(setconf, config, which, "helper")
setnode("nickname", u"client %d \N{BLACK SMILING FACE}" % (which,))
tub_location_hint, tub_port_endpoint = self.port_assigner.assign(reactor)
setnode("tub.port", tub_port_endpoint)
setnode("tub.location", tub_location_hint)
_, web_port_endpoint = self.port_assigner.assign(reactor)
setnode("web.port", web_port_endpoint)
setnode("timeout.keepalive", "600")
setnode("timeout.disconnect", "1800")
sethelper("enabled", "True")
iyaml = ("introducers:\n"
" petname2:\n"
" furl: %s\n") % self.introducer_furl
iyaml_fn = os.path.join(basedir, "private", "introducers.yaml")
fileutil.write(iyaml_fn, iyaml)
return _render_config(config)
def _set_up_client_node(self, which):
basedir = self.getdir("client%d" % (which,))
fileutil.make_dirs(os.path.join(basedir, "private"))
if len(SYSTEM_TEST_CERTS) > (which + 1):
f = open(os.path.join(basedir, "private", "node.pem"), "w")
f.write(SYSTEM_TEST_CERTS[which + 1])
f.close()
config = self._generate_config(which, basedir)
fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config)
return basedir
def bounce_client(self, num):
c = self.clients[num]
d = c.disownServiceParent()
# I think windows requires a moment to let the connection really stop
# and the port number made available for re-use. TODO: examine the
# behavior, see if this is really the problem, see if we can do
# better than blindly waiting for a second.
d.addCallback(self.stall, 1.0)
@defer.inlineCallbacks
def _stopped(res):
new_c = yield client.create_client(self.getdir("client%d" % num))
self.clients[num] = new_c
new_c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
new_c.setServiceParent(self.sparent)
d.addCallback(_stopped)
d.addCallback(lambda res: self.wait_for_connections())
def _maybe_get_webport(res):
if num == 0:
# now find out where the web port was
self.webish_url = self.clients[0].getServiceNamed("webish").getURL()
d.addCallback(_maybe_get_webport)
return d
@defer.inlineCallbacks
def add_extra_node(self, client_num, helper_furl=None,
add_to_sparent=False):
# usually this node is *not* parented to our self.sparent, so we can
# shut it down separately from the rest, to exercise the
# connection-lost code
basedir = FilePath(self.getdir("client%d" % client_num))
basedir.makedirs()
config = "[client]\n"
if helper_furl:
config += "helper.furl = %s\n" % helper_furl
basedir.child("tahoe.cfg").setContent(config.encode("utf-8"))
private = basedir.child("private")
private.makedirs()
write_introducer(
basedir,
"default",
self.introducer_furl,
)
c = yield client.create_client(basedir.path)
self.clients.append(c)
c.set_default_mutable_keysize(TEST_RSA_KEY_SIZE)
self.numclients += 1
if add_to_sparent:
c.setServiceParent(self.sparent)
else:
c.startService()
yield self.wait_for_connections()
defer.returnValue(c)
def _check_connections(self):
for i, c in enumerate(self.clients):
if not c.connected_to_introducer():
log.msg("%s not connected to introducer yet" % (i,))
return False
sb = c.get_storage_broker()
connected_servers = sb.get_connected_servers()
connected_names = sorted(list(
connected.get_nickname()
for connected
in sb.get_known_servers()
if connected.is_connected()
))
if len(connected_servers) != self.numclients:
wanted = sorted(list(
client.nickname
for client
in self.clients
))
log.msg(
"client %s storage broker connected to %s, missing %s" % (
i,
connected_names,
set(wanted) - set(connected_names),
)
)
return False
log.msg("client %s storage broker connected to %s, happy" % (
i, connected_names,
))
up = c.getServiceNamed("uploader")
if up._helper_furl and not up._helper:
log.msg("Helper fURL but no helper")
return False
return True
def wait_for_connections(self, ignored=None):
return self.poll(self._check_connections, timeout=200)
class CountingDataUploadable(upload.Data):
bytes_read = 0
@ -999,6 +114,7 @@ class CountingDataUploadable(upload.Data):
self.interrupt_after_d.callback(self)
return upload.Data.read(self, length)
class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
timeout = 180
@ -2632,18 +1748,16 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
newargs = ["--node-directory", self.getdir("client0"), verb] + list(args)
return self.run_bintahoe(newargs, stdin=stdin, env=env)
def _check_succeeded(res, check_stderr=True):
def _check_succeeded(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0, str(res))
if check_stderr:
self.assertIn(err.strip(), (b"", PYTHON_3_WARNING.encode("ascii")))
d.addCallback(_run_in_subprocess, "create-alias", "newalias")
d.addCallback(_check_succeeded)
STDIN_DATA = b"This is the file to upload from stdin."
d.addCallback(_run_in_subprocess, "put", "-", "newalias:tahoe-file", stdin=STDIN_DATA)
d.addCallback(_check_succeeded, check_stderr=False)
d.addCallback(_check_succeeded)
def _mv_with_http_proxy(ign):
env = os.environ
@ -2656,7 +1770,6 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
def _check_ls(res):
out, err, rc_or_sig = res
self.failUnlessEqual(rc_or_sig, 0, str(res))
self.assertIn(err.strip(), (b"", PYTHON_3_WARNING.encode("ascii")))
self.failUnlessIn(b"tahoe-moved", out)
self.failIfIn(b"tahoe-file", out)
d.addCallback(_check_ls)

View File

@ -122,7 +122,15 @@ class SetDEPMixin(object):
}
self.node.encoding_params = p
# This doesn't actually implement the whole interface, but adding a commented
# interface implementation annotation for grepping purposes.
#@implementer(RIStorageServer)
class FakeStorageServer(object):
"""
A fake Foolscap remote object, implemented by overriding callRemote() to
call local methods.
"""
def __init__(self, mode, reactor=None):
self.mode = mode
self.allocated = []

View File

@ -15,9 +15,14 @@ from os.path import join
from bs4 import BeautifulSoup
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.internet import defer
from testtools.twistedsupport import succeeded
from ..common import (
SyncTestCase,
AsyncTestCase,
)
from foolscap.api import (
fireEventually,
@ -53,6 +58,11 @@ from ..common_web import (
render,
)
from testtools.matchers import (
Equals,
AfterPreprocessing,
)
@defer.inlineCallbacks
def create_introducer_webish(reactor, port_assigner, basedir):
@ -86,11 +96,10 @@ def create_introducer_webish(reactor, port_assigner, basedir):
yield fireEventually(None)
intro_node.startService()
defer.returnValue((intro_node, ws))
class IntroducerWeb(unittest.TestCase):
class IntroducerWeb(AsyncTestCase):
"""
Tests for web-facing functionality of an introducer node.
"""
@ -102,6 +111,7 @@ class IntroducerWeb(unittest.TestCase):
# Anything using Foolscap leaves some timer trash in the reactor that
# we have to arrange to have cleaned up.
self.addCleanup(lambda: flushEventualQueue(None))
return super(IntroducerWeb, self).setUp()
@defer.inlineCallbacks
def test_welcome(self):
@ -187,7 +197,7 @@ class IntroducerWeb(unittest.TestCase):
self.assertEqual(data["announcement_summary"], {})
class IntroducerRootTests(unittest.TestCase):
class IntroducerRootTests(SyncTestCase):
"""
Tests for ``IntroducerRoot``.
"""
@ -223,15 +233,11 @@ class IntroducerRootTests(unittest.TestCase):
)
resource = IntroducerRoot(introducer_node)
response = json.loads(
self.successResultOf(
render(resource, {b"t": [b"json"]}),
),
)
self.assertEqual(
response = render(resource, {b"t": [b"json"]})
expected = {
u"subscription_summary": {"arbitrary": 2},
u"announcement_summary": {"arbitrary": 1},
}
self.assertThat(
response,
{
u"subscription_summary": {"arbitrary": 2},
u"announcement_summary": {"arbitrary": 1},
},
)
succeeded(AfterPreprocessing(json.loads, Equals(expected))))

View File

@ -1,311 +0,0 @@
"""
Track the port to Python 3.
At this point all unit tests have been ported to Python 3, so you can just run
them normally.
This module has been ported to Python 3.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from future.utils import PY2
if PY2:
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
PORTED_INTEGRATION_TESTS = [
"integration.test_aaa_aardvark",
"integration.test_servers_of_happiness",
"integration.test_sftp",
"integration.test_streaming_logs",
"integration.test_tor",
"integration.test_web",
]
PORTED_INTEGRATION_MODULES = [
"integration",
"integration.conftest",
"integration.util",
]
# Keep these sorted alphabetically, to reduce merge conflicts:
PORTED_MODULES = [
"allmydata",
"allmydata.__main__",
"allmydata._auto_deps",
"allmydata._monkeypatch",
"allmydata.blacklist",
"allmydata.check_results",
"allmydata.client",
"allmydata.codec",
"allmydata.control",
"allmydata.crypto",
"allmydata.crypto.aes",
"allmydata.crypto.ed25519",
"allmydata.crypto.error",
"allmydata.crypto.rsa",
"allmydata.crypto.util",
"allmydata.deep_stats",
"allmydata.dirnode",
"allmydata.frontends",
"allmydata.frontends.auth",
"allmydata.frontends.sftpd",
"allmydata.hashtree",
"allmydata.history",
"allmydata.immutable",
"allmydata.immutable.checker",
"allmydata.immutable.downloader",
"allmydata.immutable.downloader.common",
"allmydata.immutable.downloader.fetcher",
"allmydata.immutable.downloader.finder",
"allmydata.immutable.downloader.node",
"allmydata.immutable.downloader.segmentation",
"allmydata.immutable.downloader.share",
"allmydata.immutable.downloader.status",
"allmydata.immutable.encode",
"allmydata.immutable.filenode",
"allmydata.immutable.happiness_upload",
"allmydata.immutable.layout",
"allmydata.immutable.literal",
"allmydata.immutable.offloaded",
"allmydata.immutable.repairer",
"allmydata.immutable.upload",
"allmydata.interfaces",
"allmydata.introducer",
"allmydata.introducer.client",
"allmydata.introducer.common",
"allmydata.introducer.interfaces",
"allmydata.introducer.server",
"allmydata.monitor",
"allmydata.mutable",
"allmydata.mutable.checker",
"allmydata.mutable.common",
"allmydata.mutable.filenode",
"allmydata.mutable.layout",
"allmydata.mutable.publish",
"allmydata.mutable.repairer",
"allmydata.mutable.retrieve",
"allmydata.mutable.servermap",
"allmydata.node",
"allmydata.nodemaker",
"allmydata.scripts",
"allmydata.scripts.admin",
"allmydata.scripts.backupdb",
"allmydata.scripts.cli",
"allmydata.scripts.common_http",
"allmydata.scripts.common",
"allmydata.scripts.create_node",
"allmydata.scripts.debug",
"allmydata.scripts.default_nodedir",
"allmydata.scripts.runner",
"allmydata.scripts.slow_operation",
"allmydata.scripts.tahoe_add_alias",
"allmydata.scripts.tahoe_backup",
"allmydata.scripts.tahoe_check",
"allmydata.scripts.tahoe_cp",
"allmydata.scripts.tahoe_get",
"allmydata.scripts.tahoe_invite",
"allmydata.scripts.tahoe_ls",
"allmydata.scripts.tahoe_manifest",
"allmydata.scripts.tahoe_mkdir",
"allmydata.scripts.tahoe_mv",
"allmydata.scripts.tahoe_put",
"allmydata.scripts.tahoe_run",
"allmydata.scripts.tahoe_status",
"allmydata.scripts.tahoe_unlink",
"allmydata.scripts.tahoe_webopen",
"allmydata.scripts.types_",
"allmydata.stats",
"allmydata.storage_client",
"allmydata.storage",
"allmydata.storage.common",
"allmydata.storage.crawler",
"allmydata.storage.expirer",
"allmydata.storage.immutable",
"allmydata.storage.lease",
"allmydata.storage.mutable",
"allmydata.storage.server",
"allmydata.storage.shares",
"allmydata.test",
"allmydata.test.cli",
"allmydata.test.cli.common",
"allmydata.test.cli_node_api",
"allmydata.test.common",
"allmydata.test.common_util",
"allmydata.test.common_web",
"allmydata.test.eliotutil",
"allmydata.test.no_network",
"allmydata.test.matchers",
"allmydata.test.mutable",
"allmydata.test.mutable.util",
"allmydata.test.storage_plugin",
"allmydata.test.strategies",
"allmydata.test.web",
"allmydata.test.web.common",
"allmydata.test.web.matchers",
"allmydata.testing",
"allmydata.testing.web",
"allmydata.unknown",
"allmydata.uri",
"allmydata.util",
"allmydata.util._python3",
"allmydata.util.abbreviate",
"allmydata.util.assertutil",
"allmydata.util.base32",
"allmydata.util.base62",
"allmydata.util.configutil",
"allmydata.util.connection_status",
"allmydata.util.consumer",
"allmydata.util.dbutil",
"allmydata.util.deferredutil",
"allmydata.util.dictutil",
"allmydata.util.eliotutil",
"allmydata.util.encodingutil",
"allmydata.util.fileutil",
"allmydata.util.gcutil",
"allmydata.util.happinessutil",
"allmydata.util.hashutil",
"allmydata.util.humanreadable",
"allmydata.util.i2p_provider",
"allmydata.util.idlib",
"allmydata.util.iputil",
"allmydata.util.jsonbytes",
"allmydata.util.log",
"allmydata.util.mathutil",
"allmydata.util.namespace",
"allmydata.util.netstring",
"allmydata.util.observer",
"allmydata.util.pipeline",
"allmydata.util.pollmixin",
"allmydata.util.rrefutil",
"allmydata.util.spans",
"allmydata.util.statistics",
"allmydata.util.time_format",
"allmydata.util.tor_provider",
"allmydata.util.yamlutil",
"allmydata.web",
"allmydata.web.check_results",
"allmydata.web.common",
"allmydata.web.directory",
"allmydata.web.filenode",
"allmydata.web.info",
"allmydata.web.introweb",
"allmydata.web.logs",
"allmydata.web.operations",
"allmydata.web.private",
"allmydata.web.root",
"allmydata.web.status",
"allmydata.web.storage",
"allmydata.web.storage_plugins",
"allmydata.web.unlinked",
"allmydata.webish",
"allmydata.windows",
]
PORTED_TEST_MODULES = [
"allmydata.test.cli.test_alias",
"allmydata.test.cli.test_backup",
"allmydata.test.cli.test_backupdb",
"allmydata.test.cli.test_check",
"allmydata.test.cli.test_cli",
"allmydata.test.cli.test_cp",
"allmydata.test.cli.test_create",
"allmydata.test.cli.test_create_alias",
"allmydata.test.cli.test_invite",
"allmydata.test.cli.test_list",
"allmydata.test.cli.test_mv",
"allmydata.test.cli.test_put",
"allmydata.test.cli.test_run",
"allmydata.test.cli.test_status",
"allmydata.test.mutable.test_checker",
"allmydata.test.mutable.test_datahandle",
"allmydata.test.mutable.test_different_encoding",
"allmydata.test.mutable.test_exceptions",
"allmydata.test.mutable.test_filehandle",
"allmydata.test.mutable.test_filenode",
"allmydata.test.mutable.test_interoperability",
"allmydata.test.mutable.test_multiple_encodings",
"allmydata.test.mutable.test_multiple_versions",
"allmydata.test.mutable.test_problems",
"allmydata.test.mutable.test_repair",
"allmydata.test.mutable.test_roundtrip",
"allmydata.test.mutable.test_servermap",
"allmydata.test.mutable.test_update",
"allmydata.test.mutable.test_version",
"allmydata.test.test_abbreviate",
"allmydata.test.test_auth",
"allmydata.test.test_base32",
"allmydata.test.test_base62",
"allmydata.test.test_checker",
"allmydata.test.test_client",
"allmydata.test.test_codec",
"allmydata.test.test_common_util",
"allmydata.test.test_configutil",
"allmydata.test.test_connections",
"allmydata.test.test_connection_status",
"allmydata.test.test_consumer",
"allmydata.test.test_crawler",
"allmydata.test.test_crypto",
"allmydata.test.test_deepcheck",
"allmydata.test.test_deferredutil",
"allmydata.test.test_dictutil",
"allmydata.test.test_dirnode",
"allmydata.test.test_download",
"allmydata.test.test_eliotutil",
"allmydata.test.test_encode",
"allmydata.test.test_encodingutil",
"allmydata.test.test_filenode",
"allmydata.test.test_happiness",
"allmydata.test.test_hashtree",
"allmydata.test.test_hashutil",
"allmydata.test.test_helper",
"allmydata.test.test_humanreadable",
"allmydata.test.test_hung_server",
"allmydata.test.test_i2p_provider",
"allmydata.test.test_immutable",
"allmydata.test.test_introducer",
"allmydata.test.test_iputil",
"allmydata.test.test_json_metadata",
"allmydata.test.test_log",
"allmydata.test.test_monitor",
"allmydata.test.test_multi_introducers",
"allmydata.test.test_netstring",
"allmydata.test.test_no_network",
"allmydata.test.test_node",
"allmydata.test.test_observer",
"allmydata.test.test_pipeline",
"allmydata.test.test_python2_regressions",
"allmydata.test.test_python3",
"allmydata.test.test_repairer",
"allmydata.test.test_runner",
"allmydata.test.test_sftp",
"allmydata.test.test_spans",
"allmydata.test.test_statistics",
"allmydata.test.test_stats",
"allmydata.test.test_storage",
"allmydata.test.test_storage_client",
"allmydata.test.test_storage_web",
"allmydata.test.test_system",
"allmydata.test.test_testing",
"allmydata.test.test_time_format",
"allmydata.test.test_tor_provider",
"allmydata.test.test_upload",
"allmydata.test.test_uri",
"allmydata.test.test_util",
"allmydata.test.web.test_common",
"allmydata.test.web.test_grid",
"allmydata.test.web.test_introducer",
"allmydata.test.web.test_logs",
"allmydata.test.web.test_private",
"allmydata.test.web.test_root",
"allmydata.test.web.test_status",
"allmydata.test.web.test_util",
"allmydata.test.web.test_web",
"allmydata.test.web.test_webish",
"allmydata.test.test_windows",
]

View File

@ -127,7 +127,7 @@ def argv_to_abspath(s, **kwargs):
return abspath_expanduser_unicode(decoded, **kwargs)
def unicode_to_argv(s, mangle=False):
def unicode_to_argv(s):
"""
Make the given unicode string suitable for use in an argv list.

View File

@ -14,6 +14,7 @@ from past.builtins import long
import itertools
import hashlib
import re
from twisted.internet import defer
from twisted.python.filepath import FilePath
from twisted.web.resource import Resource
@ -1551,6 +1552,37 @@ class Statistics(MultiFormatResource):
req.setHeader("content-type", "text/plain")
return json.dumps(stats, indent=1) + "\n"
@render_exception
def render_OPENMETRICS(self, req):
"""
Render our stats in `OpenMetrics <https://openmetrics.io/>` format.
For example Prometheus and Victoriametrics can parse this.
Point the scraper to ``/statistics?t=openmetrics`` (instead of the
default ``/metrics``).
"""
req.setHeader("content-type", "application/openmetrics-text; version=1.0.0; charset=utf-8")
stats = self._provider.get_stats()
ret = []
def mangle_name(name):
return re.sub(
u"_(\d\d)_(\d)_percentile",
u'{quantile="0.\g<1>\g<2>"}',
name.replace(u".", u"_")
)
def mangle_value(val):
return str(val) if val is not None else u"NaN"
for (k, v) in sorted(stats['counters'].items()):
ret.append(u"tahoe_counters_%s %s" % (mangle_name(k), mangle_value(v)))
for (k, v) in sorted(stats['stats'].items()):
ret.append(u"tahoe_stats_%s %s" % (mangle_name(k), mangle_value(v)))
ret.append(u"# EOF\n")
return u"\n".join(ret)
class StatisticsElement(Element):
loader = XMLFile(FilePath(__file__).sibling("statistics.xhtml"))

View File

@ -188,7 +188,17 @@ def initialize():
# for example, the Python interpreter or any options passed to it, or runner
# scripts such as 'coverage run'. It works even if there are no such arguments,
# as in the case of a frozen executable created by bb-freeze or similar.
sys.argv = argv[-len(sys.argv):]
#
# Also, modify sys.argv in place. If any code has already taken a
# reference to the original argument list object then this ensures that
# code sees the new values. This reliance on mutation of shared state is,
# of course, awful. Why does this function even modify sys.argv? Why not
# have a function that *returns* the properly initialized argv as a new
# list? I don't know.
#
# At least Python 3 gets sys.argv correct so before very much longer we
# should be able to fix this bad design by deleting it.
sys.argv[:] = argv[-len(sys.argv):]
def a_console(handle):

View File

@ -12,6 +12,11 @@
"~",
]
[[tool.towncrier.type]]
directory = "security"
name = "Security-related Changes"
showcontent = true
[[tool.towncrier.type]]
directory = "incompat"
name = "Backwards Incompatible Changes"

11
tox.ini
View File

@ -74,6 +74,8 @@ commands =
tahoe --version
python -c "import sys; print('sys.stdout.encoding:', sys.stdout.encoding)"
# Run tests with -b to catch bugs like `"%s" % (some_bytes,)`. -b makes
# Python emit BytesWarnings, and warnings configuration in
# src/allmydata/tests/__init__.py turns allmydata's BytesWarnings into
@ -114,13 +116,6 @@ commands =
[testenv:codechecks]
basepython = python2.7
setenv =
# Workaround an error when towncrier is run under the VCS hook,
# https://stackoverflow.com/a/4027726/624787:
# File "/home/rpatterson/src/work/sfu/tahoe-lafs/.tox/codechecks/lib/python2.7/site-packages/towncrier/check.py", line 44, in __main
# .decode(getattr(sys.stdout, "encoding", "utf8"))
# `TypeError: decode() argument 1 must be string, not None`
PYTHONIOENCODING=utf_8
# If no positional arguments are given, try to run the checks on the
# entire codebase, including various pieces of supporting code.
DEFAULT_FILES=src integration static misc setup.py
@ -190,7 +185,7 @@ passenv = TAHOE_LAFS_* PIP_* SUBUNITREPORTER_* USERPROFILE HOMEDRIVE HOMEPATH HO
whitelist_externals =
git
deps =
# see comment in [testenv] about "certifi"
# see comment in [testenv] about "certifi"
certifi
towncrier==21.3.0
commands =