mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-01-18 02:40:07 +00:00
Merge remote-tracking branch 'origin/master' into 3956-mutable-uploads
This commit is contained in:
commit
7c506057b5
@ -15,16 +15,11 @@ workflows:
|
||||
ci:
|
||||
jobs:
|
||||
# Start with jobs testing various platforms.
|
||||
- "debian-10":
|
||||
{}
|
||||
- "debian-11":
|
||||
{}
|
||||
|
||||
- "ubuntu-20-04":
|
||||
{}
|
||||
- "ubuntu-18-04":
|
||||
requires:
|
||||
- "ubuntu-20-04"
|
||||
|
||||
# Equivalent to RHEL 8; CentOS 8 is dead.
|
||||
- "oraclelinux-8":
|
||||
@ -85,11 +80,7 @@ workflows:
|
||||
# Contexts are managed in the CircleCI web interface:
|
||||
#
|
||||
# https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts
|
||||
- "build-image-debian-10": &DOCKERHUB_CONTEXT
|
||||
context: "dockerhub-auth"
|
||||
- "build-image-debian-11":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
- "build-image-ubuntu-18-04":
|
||||
- "build-image-debian-11": &DOCKERHUB_CONTEXT
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
- "build-image-ubuntu-20-04":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
@ -167,12 +158,7 @@ jobs:
|
||||
command: |
|
||||
dist/Tahoe-LAFS/tahoe --version
|
||||
|
||||
debian-10: &DEBIAN
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/debian:10-py3.7"
|
||||
user: "nobody"
|
||||
|
||||
debian-11: &DEBIAN
|
||||
environment: &UTF_8_ENVIRONMENT
|
||||
# In general, the test suite is not allowed to fail while the job
|
||||
# succeeds. But you can set this to "yes" if you want it to be
|
||||
@ -184,7 +170,7 @@ jobs:
|
||||
# filenames and argv).
|
||||
LANG: "en_US.UTF-8"
|
||||
# Select a tox environment to run for this job.
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py37"
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py39"
|
||||
# Additional arguments to pass to tox.
|
||||
TAHOE_LAFS_TOX_ARGS: ""
|
||||
# The path in which test artifacts will be placed.
|
||||
@ -252,15 +238,11 @@ jobs:
|
||||
/tmp/venv/bin/codecov
|
||||
fi
|
||||
|
||||
debian-11:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/debian:11-py3.9"
|
||||
user: "nobody"
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py39"
|
||||
|
||||
|
||||
# Restore later using PyPy3.8
|
||||
# pypy27-buster:
|
||||
@ -312,22 +294,6 @@ jobs:
|
||||
- run: *SETUP_VIRTUALENV
|
||||
- run: *RUN_TESTS
|
||||
|
||||
ubuntu-18-04: &UBUNTU_18_04
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/ubuntu:18.04-py3.7"
|
||||
user: "nobody"
|
||||
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
# The default trial args include --rterrors which is incompatible with
|
||||
# this reporter on Python 3. So drop that and just specify the
|
||||
# reporter.
|
||||
TAHOE_LAFS_TRIAL_ARGS: "--reporter=subunitv2-file"
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py37"
|
||||
|
||||
|
||||
ubuntu-20-04:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
@ -380,7 +346,7 @@ jobs:
|
||||
docker:
|
||||
# Run in a highly Nix-capable environment.
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "nixos/nix:2.3.16"
|
||||
image: "nixos/nix:2.10.3"
|
||||
|
||||
environment:
|
||||
# CACHIX_AUTH_TOKEN is manually set in the CircleCI web UI and
|
||||
@ -390,27 +356,21 @@ jobs:
|
||||
|
||||
steps:
|
||||
- "run":
|
||||
# The nixos/nix image does not include ssh. Install it so the
|
||||
# `checkout` step will succeed. We also want cachix for
|
||||
# Nix-friendly caching.
|
||||
# Get cachix for Nix-friendly caching.
|
||||
name: "Install Basic Dependencies"
|
||||
command: |
|
||||
NIXPKGS="https://github.com/nixos/nixpkgs/archive/nixos-<<parameters.nixpkgs>>.tar.gz"
|
||||
nix-env \
|
||||
--file https://github.com/nixos/nixpkgs/archive/nixos-<<parameters.nixpkgs>>.tar.gz \
|
||||
--file $NIXPKGS \
|
||||
--install \
|
||||
-A openssh cachix bash
|
||||
-A cachix bash
|
||||
# Activate it for "binary substitution". This sets up
|
||||
# configuration tht lets Nix download something from the cache
|
||||
# instead of building it locally, if possible.
|
||||
cachix use "${CACHIX_NAME}"
|
||||
|
||||
- "checkout"
|
||||
|
||||
- run:
|
||||
name: "Cachix setup"
|
||||
# Record the store paths that exist before we did much. There's no
|
||||
# reason to cache these, they're either in the image or have to be
|
||||
# retrieved before we can use cachix to restore from cache.
|
||||
command: |
|
||||
cachix use "${CACHIX_NAME}"
|
||||
nix path-info --all > /tmp/store-path-pre-build
|
||||
|
||||
- "run":
|
||||
# The Nix package doesn't know how to do this part, unfortunately.
|
||||
name: "Generate version"
|
||||
@ -432,55 +392,26 @@ jobs:
|
||||
# build a couple simple little dependencies that don't take
|
||||
# advantage of multiple cores and we get a little speedup by doing
|
||||
# them in parallel.
|
||||
nix-build --cores 3 --max-jobs 2 --argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>"
|
||||
source .circleci/lib.sh
|
||||
cache_if_able nix-build \
|
||||
--cores 3 \
|
||||
--max-jobs 2 \
|
||||
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>"
|
||||
|
||||
- "run":
|
||||
name: "Test"
|
||||
command: |
|
||||
# Let it go somewhat wild for the test suite itself
|
||||
nix-build --cores 8 --argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" tests.nix
|
||||
|
||||
- run:
|
||||
# Send any new store objects to cachix.
|
||||
name: "Push to Cachix"
|
||||
when: "always"
|
||||
command: |
|
||||
# Cribbed from
|
||||
# https://circleci.com/blog/managing-secrets-when-you-have-pull-requests-from-outside-contributors/
|
||||
if [ -n "$CIRCLE_PR_NUMBER" ]; then
|
||||
# I'm sure you're thinking "CIRCLE_PR_NUMBER must just be the
|
||||
# number of the PR being built". Sorry, dear reader, you have
|
||||
# guessed poorly. It is also conditionally set based on whether
|
||||
# this is a PR from a fork or not.
|
||||
#
|
||||
# https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables
|
||||
echo "Skipping Cachix push for forked PR."
|
||||
else
|
||||
# If this *isn't* a build from a fork then we have the Cachix
|
||||
# write key in our environment and we can push any new objects
|
||||
# to Cachix.
|
||||
#
|
||||
# To decide what to push, we inspect the list of store objects
|
||||
# that existed before and after we did most of our work. Any
|
||||
# that are new after the work is probably a useful thing to have
|
||||
# around so push it to the cache. We exclude all derivation
|
||||
# objects (.drv files) because they're cheap to reconstruct and
|
||||
# by the time you know their cache key you've already done all
|
||||
# the work anyway.
|
||||
#
|
||||
# This shell expression for finding the objects and pushing them
|
||||
# was from the Cachix docs:
|
||||
#
|
||||
# https://docs.cachix.org/continuous-integration-setup/circleci.html
|
||||
#
|
||||
# but they seem to have removed it now.
|
||||
bash -c "comm -13 <(sort /tmp/store-path-pre-build | grep -v '\.drv$') <(nix path-info --all | grep -v '\.drv$' | sort) | cachix push $CACHIX_NAME"
|
||||
fi
|
||||
source .circleci/lib.sh
|
||||
cache_if_able nix-build \
|
||||
--cores 8 \
|
||||
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
|
||||
tests.nix
|
||||
|
||||
typechecks:
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/ubuntu:18.04-py3.7"
|
||||
image: "tahoelafsci/ubuntu:20.04-py3.9"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
@ -492,7 +423,7 @@ jobs:
|
||||
docs:
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/ubuntu:18.04-py3.7"
|
||||
image: "tahoelafsci/ubuntu:20.04-py3.9"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
@ -543,15 +474,6 @@ jobs:
|
||||
docker push tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION}
|
||||
|
||||
|
||||
build-image-debian-10:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "debian"
|
||||
TAG: "10"
|
||||
PYTHON_VERSION: "3.7"
|
||||
|
||||
|
||||
build-image-debian-11:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
@ -560,14 +482,6 @@ jobs:
|
||||
TAG: "11"
|
||||
PYTHON_VERSION: "3.9"
|
||||
|
||||
build-image-ubuntu-18-04:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "ubuntu"
|
||||
TAG: "18.04"
|
||||
PYTHON_VERSION: "3.7"
|
||||
|
||||
|
||||
build-image-ubuntu-20-04:
|
||||
<<: *BUILD_IMAGE
|
||||
|
26
.circleci/lib.sh
Normal file
26
.circleci/lib.sh
Normal file
@ -0,0 +1,26 @@
|
||||
# Run a command, enabling cache writes to cachix if possible. The command is
|
||||
# accepted as a variable number of positional arguments (like argv).
|
||||
function cache_if_able() {
|
||||
# The `cachix watch-exec ...` does our cache population. When it sees
|
||||
# something added to the store (I guess) it pushes it to the named cache.
|
||||
#
|
||||
# We can only *push* to it if we have a CACHIX_AUTH_TOKEN, though.
|
||||
# in-repo jobs will get this from CircleCI configuration but jobs from
|
||||
# forks may not.
|
||||
echo "Building PR from user/org: ${CIRCLE_PROJECT_USERNAME}"
|
||||
if [ -v CACHIX_AUTH_TOKEN ]; then
|
||||
echo "Cachix credentials present; will attempt to write to cache."
|
||||
cachix watch-exec "${CACHIX_NAME}" -- "$@"
|
||||
else
|
||||
# If we're building a from a forked repository then we're allowed to
|
||||
# not have the credentials (but it's also fine if the owner of the
|
||||
# fork supplied their own).
|
||||
if [ "${CIRCLE_PROJECT_USERNAME}" == "tahoe-lafs" ]; then
|
||||
echo "Required credentials (CACHIX_AUTH_TOKEN) are missing."
|
||||
return 1
|
||||
else
|
||||
echo "Cachix credentials missing; will not attempt cache writes."
|
||||
"$@"
|
||||
fi
|
||||
fi
|
||||
}
|
42
.github/workflows/ci.yml
vendored
42
.github/workflows/ci.yml
vendored
@ -48,21 +48,20 @@ jobs:
|
||||
- windows-latest
|
||||
- ubuntu-latest
|
||||
python-version:
|
||||
- "3.7"
|
||||
- "3.8"
|
||||
- "3.9"
|
||||
- "3.10"
|
||||
include:
|
||||
# On macOS don't bother with 3.7-3.8, just to get faster builds.
|
||||
# On macOS don't bother with 3.8, just to get faster builds.
|
||||
- os: macos-latest
|
||||
python-version: "3.9"
|
||||
- os: macos-latest
|
||||
python-version: "3.10"
|
||||
# We only support PyPy on Linux at the moment.
|
||||
- os: ubuntu-latest
|
||||
python-version: "pypy-3.7"
|
||||
- os: ubuntu-latest
|
||||
python-version: "pypy-3.8"
|
||||
- os: ubuntu-latest
|
||||
python-version: "pypy-3.9"
|
||||
|
||||
steps:
|
||||
# See https://github.com/actions/checkout. A fetch-depth of 0
|
||||
@ -153,19 +152,18 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- windows-latest
|
||||
include:
|
||||
- os: macos-latest
|
||||
python-version: "3.9"
|
||||
force-foolscap: false
|
||||
- os: windows-latest
|
||||
python-version: "3.9"
|
||||
force-foolscap: false
|
||||
# 22.04 has some issue with Tor at the moment:
|
||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3943
|
||||
- ubuntu-20.04
|
||||
python-version:
|
||||
- 3.7
|
||||
- 3.9
|
||||
include:
|
||||
# On macOS don't bother with 3.7, just to get faster builds.
|
||||
- os: macos-latest
|
||||
python-version: 3.9
|
||||
|
||||
- os: ubuntu-20.04
|
||||
python-version: "3.9"
|
||||
force-foolscap: false
|
||||
steps:
|
||||
|
||||
- name: Install Tor [Ubuntu]
|
||||
@ -206,12 +204,24 @@ jobs:
|
||||
run: python misc/build_helpers/show-tool-versions.py
|
||||
|
||||
- name: Run "Python 3 integration tests"
|
||||
if: "${{ !matrix.force-foolscap }}"
|
||||
env:
|
||||
# On macOS this is necessary to ensure unix socket paths for tor
|
||||
# aren't too long. On Windows tox won't pass it through so it has no
|
||||
# effect. On Linux it doesn't make a difference one way or another.
|
||||
TMPDIR: "/tmp"
|
||||
run: tox -e integration
|
||||
run: |
|
||||
tox -e integration
|
||||
|
||||
- name: Run "Python 3 integration tests (force Foolscap)"
|
||||
if: "${{ matrix.force-foolscap }}"
|
||||
env:
|
||||
# On macOS this is necessary to ensure unix socket paths for tor
|
||||
# aren't too long. On Windows tox won't pass it through so it has no
|
||||
# effect. On Linux it doesn't make a difference one way or another.
|
||||
TMPDIR: "/tmp"
|
||||
run: |
|
||||
tox -e integration -- --force-foolscap integration/
|
||||
|
||||
- name: Upload eliot.log in case of failure
|
||||
uses: actions/upload-artifact@v3
|
||||
|
@ -56,7 +56,7 @@ Once ``tahoe --version`` works, see `How to Run Tahoe-LAFS <docs/running.rst>`__
|
||||
🐍 Python 2
|
||||
-----------
|
||||
|
||||
Python 3.7 or later is now required.
|
||||
Python 3.8 or later is required.
|
||||
If you are still using Python 2.7, use Tahoe-LAFS version 1.17.1.
|
||||
|
||||
|
||||
|
@ -29,7 +29,7 @@ in
|
||||
, pypiData ? sources.pypi-deps-db # the pypi package database snapshot to use
|
||||
# for dependency resolution
|
||||
|
||||
, pythonVersion ? "python37" # a string choosing the python derivation from
|
||||
, pythonVersion ? "python39" # a string choosing the python derivation from
|
||||
# nixpkgs to target
|
||||
|
||||
, extras ? [ "tor" "i2p" ] # a list of strings identifying tahoe-lafs extras,
|
||||
|
@ -1,15 +1,6 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
import sys
|
||||
import shutil
|
||||
from time import sleep
|
||||
@ -66,6 +57,13 @@ def pytest_addoption(parser):
|
||||
"--coverage", action="store_true", dest="coverage",
|
||||
help="Collect coverage statistics",
|
||||
)
|
||||
parser.addoption(
|
||||
"--force-foolscap", action="store_true", default=False,
|
||||
dest="force_foolscap",
|
||||
help=("If set, force Foolscap only for the storage protocol. " +
|
||||
"Otherwise HTTP will be used.")
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='session')
|
||||
def eliot_logging():
|
||||
|
@ -1,14 +1,6 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
import sys
|
||||
import time
|
||||
@ -300,6 +292,14 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
||||
u'log_gatherer.furl',
|
||||
flog_gatherer,
|
||||
)
|
||||
force_foolscap = request.config.getoption("force_foolscap")
|
||||
assert force_foolscap in (True, False)
|
||||
set_config(
|
||||
config,
|
||||
'storage',
|
||||
'force_foolscap',
|
||||
str(force_foolscap),
|
||||
)
|
||||
write_config(FilePath(config_path), config)
|
||||
created_d.addCallback(created)
|
||||
|
||||
|
0
newsfragments/3870.minor
Normal file
0
newsfragments/3870.minor
Normal file
0
newsfragments/3914.minor
Normal file
0
newsfragments/3914.minor
Normal file
0
newsfragments/3937.minor
Normal file
0
newsfragments/3937.minor
Normal file
1
newsfragments/3942.minor
Normal file
1
newsfragments/3942.minor
Normal file
@ -0,0 +1 @@
|
||||
|
0
newsfragments/3947.minor
Normal file
0
newsfragments/3947.minor
Normal file
0
newsfragments/3953.minor
Normal file
0
newsfragments/3953.minor
Normal file
0
newsfragments/3954.minor
Normal file
0
newsfragments/3954.minor
Normal file
1
newsfragments/3964.removed
Normal file
1
newsfragments/3964.removed
Normal file
@ -0,0 +1 @@
|
||||
Python 3.7 is no longer supported, and Debian 10 and Ubuntu 18.04 are no longer tested.
|
13
setup.py
13
setup.py
@ -96,7 +96,9 @@ install_requires = [
|
||||
# an sftp extra in Tahoe-LAFS, there is no point in having one.
|
||||
# * Twisted 19.10 introduces Site.getContentFile which we use to get
|
||||
# temporary upload files placed into a per-node temporary directory.
|
||||
"Twisted[tls,conch] >= 19.10.0",
|
||||
# * Twisted 22.8.0 added support for coroutine-returning functions in many
|
||||
# places (mainly via `maybeDeferred`)
|
||||
"Twisted[tls,conch] >= 22.8.0",
|
||||
|
||||
"PyYAML >= 3.11",
|
||||
|
||||
@ -225,7 +227,7 @@ def run_command(args, cwd=None):
|
||||
use_shell = sys.platform == "win32"
|
||||
try:
|
||||
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd, shell=use_shell)
|
||||
except EnvironmentError as e: # if this gives a SyntaxError, note that Tahoe-LAFS requires Python 3.7+
|
||||
except EnvironmentError as e: # if this gives a SyntaxError, note that Tahoe-LAFS requires Python 3.8+
|
||||
print("Warning: unable to run %r." % (" ".join(args),))
|
||||
print(e)
|
||||
return None
|
||||
@ -376,8 +378,8 @@ setup(name="tahoe-lafs", # also set in __init__.py
|
||||
package_dir = {'':'src'},
|
||||
packages=find_packages('src') + ['allmydata.test.plugins'],
|
||||
classifiers=trove_classifiers,
|
||||
# We support Python 3.7 or later. 3.11 is not supported yet.
|
||||
python_requires=">=3.7, <3.11",
|
||||
# We support Python 3.8 or later. 3.11 is not supported yet.
|
||||
python_requires=">=3.8, <3.11",
|
||||
install_requires=install_requires,
|
||||
extras_require={
|
||||
# Duplicate the Twisted pywin32 dependency here. See
|
||||
@ -390,9 +392,6 @@ setup(name="tahoe-lafs", # also set in __init__.py
|
||||
],
|
||||
"test": [
|
||||
"flake8",
|
||||
# On Python 3.7, importlib_metadata v5 breaks flake8.
|
||||
# https://github.com/python/importlib_metadata/issues/407
|
||||
"importlib_metadata<5; python_version < '3.8'",
|
||||
# Pin a specific pyflakes so we don't have different folks
|
||||
# disagreeing on what is or is not a lint issue. We can bump
|
||||
# this version from time to time, but we will do it
|
||||
|
@ -12,14 +12,9 @@ on any of their methods.
|
||||
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import annotations
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
from functools import partial
|
||||
|
||||
from cryptography.exceptions import InvalidSignature
|
||||
from cryptography.hazmat.backends import default_backend
|
||||
@ -72,20 +67,39 @@ def create_signing_keypair_from_string(private_key_der):
|
||||
|
||||
:returns: 2-tuple of (private_key, public_key)
|
||||
"""
|
||||
priv_key = load_der_private_key(
|
||||
load = partial(
|
||||
load_der_private_key,
|
||||
private_key_der,
|
||||
password=None,
|
||||
backend=default_backend(),
|
||||
)
|
||||
if not isinstance(priv_key, rsa.RSAPrivateKey):
|
||||
|
||||
try:
|
||||
# Load it once without the potentially expensive OpenSSL validation
|
||||
# checks. These have superlinear complexity. We *will* run them just
|
||||
# below - but first we'll apply our own constant-time checks.
|
||||
unsafe_priv_key = load(unsafe_skip_rsa_key_validation=True)
|
||||
except TypeError:
|
||||
# cryptography<39 does not support this parameter, so just load the
|
||||
# key with validation...
|
||||
unsafe_priv_key = load()
|
||||
# But avoid *reloading* it since that will run the expensive
|
||||
# validation *again*.
|
||||
load = lambda: unsafe_priv_key
|
||||
|
||||
if not isinstance(unsafe_priv_key, rsa.RSAPrivateKey):
|
||||
raise ValueError(
|
||||
"Private Key did not decode to an RSA key"
|
||||
)
|
||||
if priv_key.key_size != 2048:
|
||||
if unsafe_priv_key.key_size != 2048:
|
||||
raise ValueError(
|
||||
"Private Key must be 2048 bits"
|
||||
)
|
||||
return priv_key, priv_key.public_key()
|
||||
|
||||
# Now re-load it with OpenSSL's validation applied.
|
||||
safe_priv_key = load()
|
||||
|
||||
return safe_priv_key, safe_priv_key.public_key()
|
||||
|
||||
|
||||
def der_string_from_signing_key(private_key):
|
||||
|
@ -323,6 +323,7 @@ class StorageClient(object):
|
||||
swissnum = nurl.path[0].encode("ascii")
|
||||
certificate_hash = nurl.user.encode("ascii")
|
||||
pool = HTTPConnectionPool(reactor)
|
||||
pool.maxPersistentPerHost = 20
|
||||
|
||||
if cls.TEST_MODE_REGISTER_HTTP_POOL is not None:
|
||||
cls.TEST_MODE_REGISTER_HTTP_POOL(pool)
|
||||
|
@ -117,7 +117,7 @@ def _authorization_decorator(required_secrets):
|
||||
@wraps(f)
|
||||
def route(self, request, *args, **kwargs):
|
||||
if not timing_safe_compare(
|
||||
request.requestHeaders.getRawHeaders("Authorization", [None])[0].encode(
|
||||
request.requestHeaders.getRawHeaders("Authorization", [""])[0].encode(
|
||||
"utf-8"
|
||||
),
|
||||
swissnum_auth_header(self._swissnum),
|
||||
|
@ -1,19 +1,12 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
Tests related to the way ``allmydata.mutable`` handles different versions
|
||||
of data for an object.
|
||||
"""
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
from io import StringIO
|
||||
import os
|
||||
from six.moves import cStringIO as StringIO
|
||||
from typing import Optional
|
||||
|
||||
from twisted.internet import defer
|
||||
from ..common import AsyncTestCase
|
||||
from testtools.matchers import (
|
||||
Equals,
|
||||
@ -47,343 +40,268 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
||||
self.small_data = b"test data" * 10 # 90 B; SDMF
|
||||
|
||||
|
||||
def do_upload_mdmf(self, data=None):
|
||||
async def do_upload_mdmf(self, data: Optional[bytes] = None) -> MutableFileNode:
|
||||
if data is None:
|
||||
data = self.data
|
||||
d = self.nm.create_mutable_file(MutableData(data),
|
||||
version=MDMF_VERSION)
|
||||
def _then(n):
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.assertThat(n._protocol_version, Equals(MDMF_VERSION))
|
||||
self.mdmf_node = n
|
||||
return n
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
n = await self.nm.create_mutable_file(MutableData(data),
|
||||
version=MDMF_VERSION)
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.assertThat(n._protocol_version, Equals(MDMF_VERSION))
|
||||
self.mdmf_node = n
|
||||
return n
|
||||
|
||||
def do_upload_sdmf(self, data=None):
|
||||
async def do_upload_sdmf(self, data: Optional[bytes] = None) -> MutableFileNode:
|
||||
if data is None:
|
||||
data = self.small_data
|
||||
d = self.nm.create_mutable_file(MutableData(data))
|
||||
def _then(n):
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
||||
self.sdmf_node = n
|
||||
return n
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
n = await self.nm.create_mutable_file(MutableData(data))
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
||||
self.sdmf_node = n
|
||||
return n
|
||||
|
||||
def do_upload_empty_sdmf(self):
|
||||
d = self.nm.create_mutable_file(MutableData(b""))
|
||||
def _then(n):
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.sdmf_zero_length_node = n
|
||||
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
||||
return n
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
async def do_upload_empty_sdmf(self) -> MutableFileNode:
|
||||
n = await self.nm.create_mutable_file(MutableData(b""))
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.sdmf_zero_length_node = n
|
||||
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
||||
return n
|
||||
|
||||
def do_upload(self):
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(lambda ign: self.do_upload_sdmf())
|
||||
return d
|
||||
async def do_upload(self) -> MutableFileNode:
|
||||
await self.do_upload_mdmf()
|
||||
return await self.do_upload_sdmf()
|
||||
|
||||
def test_debug(self):
|
||||
d = self.do_upload_mdmf()
|
||||
def _debug(n):
|
||||
fso = debug.FindSharesOptions()
|
||||
storage_index = base32.b2a(n.get_storage_index())
|
||||
fso.si_s = str(storage_index, "utf-8") # command-line options are unicode on Python 3
|
||||
fso.nodedirs = [os.path.dirname(abspath_expanduser_unicode(str(storedir)))
|
||||
for (i,ss,storedir)
|
||||
in self.iterate_servers()]
|
||||
fso.stdout = StringIO()
|
||||
fso.stderr = StringIO()
|
||||
debug.find_shares(fso)
|
||||
sharefiles = fso.stdout.getvalue().splitlines()
|
||||
expected = self.nm.default_encoding_parameters["n"]
|
||||
self.assertThat(sharefiles, HasLength(expected))
|
||||
async def test_debug(self) -> None:
|
||||
n = await self.do_upload_mdmf()
|
||||
fso = debug.FindSharesOptions()
|
||||
storage_index = base32.b2a(n.get_storage_index())
|
||||
fso.si_s = str(storage_index, "utf-8") # command-line options are unicode on Python 3
|
||||
fso.nodedirs = [os.path.dirname(abspath_expanduser_unicode(str(storedir)))
|
||||
for (i,ss,storedir)
|
||||
in self.iterate_servers()]
|
||||
fso.stdout = StringIO()
|
||||
fso.stderr = StringIO()
|
||||
debug.find_shares(fso)
|
||||
sharefiles = fso.stdout.getvalue().splitlines()
|
||||
expected = self.nm.default_encoding_parameters["n"]
|
||||
self.assertThat(sharefiles, HasLength(expected))
|
||||
|
||||
do = debug.DumpOptions()
|
||||
do["filename"] = sharefiles[0]
|
||||
do.stdout = StringIO()
|
||||
debug.dump_share(do)
|
||||
output = do.stdout.getvalue()
|
||||
lines = set(output.splitlines())
|
||||
self.assertTrue("Mutable slot found:" in lines, output)
|
||||
self.assertTrue(" share_type: MDMF" in lines, output)
|
||||
self.assertTrue(" num_extra_leases: 0" in lines, output)
|
||||
self.assertTrue(" MDMF contents:" in lines, output)
|
||||
self.assertTrue(" seqnum: 1" in lines, output)
|
||||
self.assertTrue(" required_shares: 3" in lines, output)
|
||||
self.assertTrue(" total_shares: 10" in lines, output)
|
||||
self.assertTrue(" segsize: 131073" in lines, output)
|
||||
self.assertTrue(" datalen: %d" % len(self.data) in lines, output)
|
||||
vcap = str(n.get_verify_cap().to_string(), "utf-8")
|
||||
self.assertTrue(" verify-cap: %s" % vcap in lines, output)
|
||||
cso = debug.CatalogSharesOptions()
|
||||
cso.nodedirs = fso.nodedirs
|
||||
cso.stdout = StringIO()
|
||||
cso.stderr = StringIO()
|
||||
debug.catalog_shares(cso)
|
||||
shares = cso.stdout.getvalue().splitlines()
|
||||
oneshare = shares[0] # all shares should be MDMF
|
||||
self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
|
||||
self.assertTrue(oneshare.startswith("MDMF"), oneshare)
|
||||
fields = oneshare.split()
|
||||
self.assertThat(fields[0], Equals("MDMF"))
|
||||
self.assertThat(fields[1].encode("ascii"), Equals(storage_index))
|
||||
self.assertThat(fields[2], Equals("3/10"))
|
||||
self.assertThat(fields[3], Equals("%d" % len(self.data)))
|
||||
self.assertTrue(fields[4].startswith("#1:"), fields[3])
|
||||
# the rest of fields[4] is the roothash, which depends upon
|
||||
# encryption salts and is not constant. fields[5] is the
|
||||
# remaining time on the longest lease, which is timing dependent.
|
||||
# The rest of the line is the quoted pathname to the share.
|
||||
d.addCallback(_debug)
|
||||
return d
|
||||
do = debug.DumpOptions()
|
||||
do["filename"] = sharefiles[0]
|
||||
do.stdout = StringIO()
|
||||
debug.dump_share(do)
|
||||
output = do.stdout.getvalue()
|
||||
lines = set(output.splitlines())
|
||||
self.assertTrue("Mutable slot found:" in lines, output)
|
||||
self.assertTrue(" share_type: MDMF" in lines, output)
|
||||
self.assertTrue(" num_extra_leases: 0" in lines, output)
|
||||
self.assertTrue(" MDMF contents:" in lines, output)
|
||||
self.assertTrue(" seqnum: 1" in lines, output)
|
||||
self.assertTrue(" required_shares: 3" in lines, output)
|
||||
self.assertTrue(" total_shares: 10" in lines, output)
|
||||
self.assertTrue(" segsize: 131073" in lines, output)
|
||||
self.assertTrue(" datalen: %d" % len(self.data) in lines, output)
|
||||
vcap = str(n.get_verify_cap().to_string(), "utf-8")
|
||||
self.assertTrue(" verify-cap: %s" % vcap in lines, output)
|
||||
cso = debug.CatalogSharesOptions()
|
||||
cso.nodedirs = fso.nodedirs
|
||||
cso.stdout = StringIO()
|
||||
cso.stderr = StringIO()
|
||||
debug.catalog_shares(cso)
|
||||
shares = cso.stdout.getvalue().splitlines()
|
||||
oneshare = shares[0] # all shares should be MDMF
|
||||
self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
|
||||
self.assertTrue(oneshare.startswith("MDMF"), oneshare)
|
||||
fields = oneshare.split()
|
||||
self.assertThat(fields[0], Equals("MDMF"))
|
||||
self.assertThat(fields[1].encode("ascii"), Equals(storage_index))
|
||||
self.assertThat(fields[2], Equals("3/10"))
|
||||
self.assertThat(fields[3], Equals("%d" % len(self.data)))
|
||||
self.assertTrue(fields[4].startswith("#1:"), fields[3])
|
||||
# the rest of fields[4] is the roothash, which depends upon
|
||||
# encryption salts and is not constant. fields[5] is the
|
||||
# remaining time on the longest lease, which is timing dependent.
|
||||
# The rest of the line is the quoted pathname to the share.
|
||||
|
||||
async def test_get_sequence_number(self) -> None:
|
||||
await self.do_upload()
|
||||
bv = await self.mdmf_node.get_best_readable_version()
|
||||
self.assertThat(bv.get_sequence_number(), Equals(1))
|
||||
bv = await self.sdmf_node.get_best_readable_version()
|
||||
self.assertThat(bv.get_sequence_number(), Equals(1))
|
||||
|
||||
def test_get_sequence_number(self):
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.assertThat(bv.get_sequence_number(), Equals(1)))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.assertThat(bv.get_sequence_number(), Equals(1)))
|
||||
# Now update. The sequence number in both cases should be 1 in
|
||||
# both cases.
|
||||
def _do_update(ignored):
|
||||
new_data = MutableData(b"foo bar baz" * 100000)
|
||||
new_small_data = MutableData(b"foo bar baz" * 10)
|
||||
d1 = self.mdmf_node.overwrite(new_data)
|
||||
d2 = self.sdmf_node.overwrite(new_small_data)
|
||||
dl = gatherResults([d1, d2])
|
||||
return dl
|
||||
d.addCallback(_do_update)
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.assertThat(bv.get_sequence_number(), Equals(2)))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.assertThat(bv.get_sequence_number(), Equals(2)))
|
||||
return d
|
||||
new_data = MutableData(b"foo bar baz" * 100000)
|
||||
new_small_data = MutableData(b"foo bar baz" * 10)
|
||||
d1 = self.mdmf_node.overwrite(new_data)
|
||||
d2 = self.sdmf_node.overwrite(new_small_data)
|
||||
await gatherResults([d1, d2])
|
||||
bv = await self.mdmf_node.get_best_readable_version()
|
||||
self.assertThat(bv.get_sequence_number(), Equals(2))
|
||||
bv = await self.sdmf_node.get_best_readable_version()
|
||||
self.assertThat(bv.get_sequence_number(), Equals(2))
|
||||
|
||||
|
||||
def test_cap_after_upload(self):
|
||||
async def test_cap_after_upload(self) -> None:
|
||||
# If we create a new mutable file and upload things to it, and
|
||||
# it's an MDMF file, we should get an MDMF cap back from that
|
||||
# file and should be able to use that.
|
||||
# That's essentially what MDMF node is, so just check that.
|
||||
d = self.do_upload_mdmf()
|
||||
def _then(ign):
|
||||
mdmf_uri = self.mdmf_node.get_uri()
|
||||
cap = uri.from_string(mdmf_uri)
|
||||
self.assertTrue(isinstance(cap, uri.WriteableMDMFFileURI))
|
||||
readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
|
||||
cap = uri.from_string(readonly_mdmf_uri)
|
||||
self.assertTrue(isinstance(cap, uri.ReadonlyMDMFFileURI))
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
await self.do_upload_mdmf()
|
||||
mdmf_uri = self.mdmf_node.get_uri()
|
||||
cap = uri.from_string(mdmf_uri)
|
||||
self.assertTrue(isinstance(cap, uri.WriteableMDMFFileURI))
|
||||
readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
|
||||
cap = uri.from_string(readonly_mdmf_uri)
|
||||
self.assertTrue(isinstance(cap, uri.ReadonlyMDMFFileURI))
|
||||
|
||||
def test_mutable_version(self):
|
||||
async def test_mutable_version(self) -> None:
|
||||
# assert that getting parameters from the IMutableVersion object
|
||||
# gives us the same data as getting them from the filenode itself
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
|
||||
def _check_mdmf(bv):
|
||||
n = self.mdmf_node
|
||||
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
||||
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
||||
self.assertFalse(bv.is_readonly())
|
||||
d.addCallback(_check_mdmf)
|
||||
d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
|
||||
def _check_sdmf(bv):
|
||||
n = self.sdmf_node
|
||||
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
||||
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
||||
self.assertFalse(bv.is_readonly())
|
||||
d.addCallback(_check_sdmf)
|
||||
return d
|
||||
await self.do_upload()
|
||||
bv = await self.mdmf_node.get_best_mutable_version()
|
||||
n = self.mdmf_node
|
||||
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
||||
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
||||
self.assertFalse(bv.is_readonly())
|
||||
|
||||
bv = await self.sdmf_node.get_best_mutable_version()
|
||||
n = self.sdmf_node
|
||||
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
||||
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
||||
self.assertFalse(bv.is_readonly())
|
||||
|
||||
|
||||
def test_get_readonly_version(self):
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv: self.assertTrue(bv.is_readonly()))
|
||||
async def test_get_readonly_version(self) -> None:
|
||||
await self.do_upload()
|
||||
bv = await self.mdmf_node.get_best_readable_version()
|
||||
self.assertTrue(bv.is_readonly())
|
||||
|
||||
# Attempting to get a mutable version of a mutable file from a
|
||||
# filenode initialized with a readcap should return a readonly
|
||||
# version of that same node.
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_readonly())
|
||||
d.addCallback(lambda ro: ro.get_best_mutable_version())
|
||||
d.addCallback(lambda v: self.assertTrue(v.is_readonly()))
|
||||
ro = self.mdmf_node.get_readonly()
|
||||
v = await ro.get_best_mutable_version()
|
||||
self.assertTrue(v.is_readonly())
|
||||
|
||||
d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv: self.assertTrue(bv.is_readonly()))
|
||||
bv = await self.sdmf_node.get_best_readable_version()
|
||||
self.assertTrue(bv.is_readonly())
|
||||
|
||||
d.addCallback(lambda ign: self.sdmf_node.get_readonly())
|
||||
d.addCallback(lambda ro: ro.get_best_mutable_version())
|
||||
d.addCallback(lambda v: self.assertTrue(v.is_readonly()))
|
||||
return d
|
||||
ro = self.sdmf_node.get_readonly()
|
||||
v = await ro.get_best_mutable_version()
|
||||
self.assertTrue(v.is_readonly())
|
||||
|
||||
|
||||
def test_toplevel_overwrite(self):
|
||||
async def test_toplevel_overwrite(self) -> None:
|
||||
new_data = MutableData(b"foo bar baz" * 100000)
|
||||
new_small_data = MutableData(b"foo bar baz" * 10)
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Equals(b"foo bar baz" * 100000)))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.overwrite(new_small_data))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Equals(b"foo bar baz" * 10)))
|
||||
return d
|
||||
await self.do_upload()
|
||||
await self.mdmf_node.overwrite(new_data)
|
||||
data = await self.mdmf_node.download_best_version()
|
||||
self.assertThat(data, Equals(b"foo bar baz" * 100000))
|
||||
await self.sdmf_node.overwrite(new_small_data)
|
||||
data = await self.sdmf_node.download_best_version()
|
||||
self.assertThat(data, Equals(b"foo bar baz" * 10))
|
||||
|
||||
|
||||
def test_toplevel_modify(self):
|
||||
d = self.do_upload()
|
||||
async def test_toplevel_modify(self) -> None:
|
||||
await self.do_upload()
|
||||
def modifier(old_contents, servermap, first_time):
|
||||
return old_contents + b"modified"
|
||||
d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Contains(b"modified")))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Contains(b"modified")))
|
||||
return d
|
||||
await self.mdmf_node.modify(modifier)
|
||||
data = await self.mdmf_node.download_best_version()
|
||||
self.assertThat(data, Contains(b"modified"))
|
||||
await self.sdmf_node.modify(modifier)
|
||||
data = await self.sdmf_node.download_best_version()
|
||||
self.assertThat(data, Contains(b"modified"))
|
||||
|
||||
|
||||
def test_version_modify(self):
|
||||
async def test_version_modify(self) -> None:
|
||||
# TODO: When we can publish multiple versions, alter this test
|
||||
# to modify a version other than the best usable version, then
|
||||
# test to see that the best recoverable version is that.
|
||||
d = self.do_upload()
|
||||
await self.do_upload()
|
||||
def modifier(old_contents, servermap, first_time):
|
||||
return old_contents + b"modified"
|
||||
d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Contains(b"modified")))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Contains(b"modified")))
|
||||
return d
|
||||
await self.mdmf_node.modify(modifier)
|
||||
data = await self.mdmf_node.download_best_version()
|
||||
self.assertThat(data, Contains(b"modified"))
|
||||
await self.sdmf_node.modify(modifier)
|
||||
data = await self.sdmf_node.download_best_version()
|
||||
self.assertThat(data, Contains(b"modified"))
|
||||
|
||||
|
||||
def test_download_version(self):
|
||||
d = self.publish_multiple()
|
||||
async def test_download_version(self) -> None:
|
||||
await self.publish_multiple()
|
||||
# We want to have two recoverable versions on the grid.
|
||||
d.addCallback(lambda res:
|
||||
self._set_versions({0:0,2:0,4:0,6:0,8:0,
|
||||
1:1,3:1,5:1,7:1,9:1}))
|
||||
self._set_versions({0:0,2:0,4:0,6:0,8:0,
|
||||
1:1,3:1,5:1,7:1,9:1})
|
||||
# Now try to download each version. We should get the plaintext
|
||||
# associated with that version.
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.get_servermap(mode=MODE_READ))
|
||||
def _got_servermap(smap):
|
||||
versions = smap.recoverable_versions()
|
||||
assert len(versions) == 2
|
||||
smap = await self._fn.get_servermap(mode=MODE_READ)
|
||||
versions = smap.recoverable_versions()
|
||||
assert len(versions) == 2
|
||||
|
||||
self.servermap = smap
|
||||
self.version1, self.version2 = versions
|
||||
assert self.version1 != self.version2
|
||||
self.servermap = smap
|
||||
self.version1, self.version2 = versions
|
||||
assert self.version1 != self.version2
|
||||
|
||||
self.version1_seqnum = self.version1[0]
|
||||
self.version2_seqnum = self.version2[0]
|
||||
self.version1_index = self.version1_seqnum - 1
|
||||
self.version2_index = self.version2_seqnum - 1
|
||||
self.version1_seqnum = self.version1[0]
|
||||
self.version2_seqnum = self.version2[0]
|
||||
self.version1_index = self.version1_seqnum - 1
|
||||
self.version2_index = self.version2_seqnum - 1
|
||||
|
||||
d.addCallback(_got_servermap)
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.download_version(self.servermap, self.version1))
|
||||
d.addCallback(lambda results:
|
||||
self.assertThat(self.CONTENTS[self.version1_index],
|
||||
Equals(results)))
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.download_version(self.servermap, self.version2))
|
||||
d.addCallback(lambda results:
|
||||
self.assertThat(self.CONTENTS[self.version2_index],
|
||||
Equals(results)))
|
||||
return d
|
||||
results = await self._fn.download_version(self.servermap, self.version1)
|
||||
self.assertThat(self.CONTENTS[self.version1_index],
|
||||
Equals(results))
|
||||
results = await self._fn.download_version(self.servermap, self.version2)
|
||||
self.assertThat(self.CONTENTS[self.version2_index],
|
||||
Equals(results))
|
||||
|
||||
|
||||
def test_download_nonexistent_version(self):
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
|
||||
def _set_servermap(servermap):
|
||||
self.servermap = servermap
|
||||
d.addCallback(_set_servermap)
|
||||
d.addCallback(lambda ignored:
|
||||
self.shouldFail(UnrecoverableFileError, "nonexistent version",
|
||||
None,
|
||||
self.mdmf_node.download_version, self.servermap,
|
||||
"not a version"))
|
||||
return d
|
||||
async def test_download_nonexistent_version(self) -> None:
|
||||
await self.do_upload_mdmf()
|
||||
servermap = await self.mdmf_node.get_servermap(mode=MODE_WRITE)
|
||||
await self.shouldFail(UnrecoverableFileError, "nonexistent version",
|
||||
None,
|
||||
self.mdmf_node.download_version, servermap,
|
||||
"not a version")
|
||||
|
||||
|
||||
def _test_partial_read(self, node, expected, modes, step):
|
||||
d = node.get_best_readable_version()
|
||||
async def _test_partial_read(self, node, expected, modes, step) -> None:
|
||||
version = await node.get_best_readable_version()
|
||||
for (name, offset, length) in modes:
|
||||
d.addCallback(self._do_partial_read, name, expected, offset, length)
|
||||
await self._do_partial_read(version, name, expected, offset, length)
|
||||
# then read the whole thing, but only a few bytes at a time, and see
|
||||
# that the results are what we expect.
|
||||
def _read_data(version):
|
||||
c = consumer.MemoryConsumer()
|
||||
d2 = defer.succeed(None)
|
||||
for i in range(0, len(expected), step):
|
||||
d2.addCallback(lambda ignored, i=i: version.read(c, i, step))
|
||||
d2.addCallback(lambda ignored:
|
||||
self.assertThat(expected, Equals(b"".join(c.chunks))))
|
||||
return d2
|
||||
d.addCallback(_read_data)
|
||||
return d
|
||||
|
||||
def _do_partial_read(self, version, name, expected, offset, length):
|
||||
c = consumer.MemoryConsumer()
|
||||
d = version.read(c, offset, length)
|
||||
for i in range(0, len(expected), step):
|
||||
await version.read(c, i, step)
|
||||
self.assertThat(expected, Equals(b"".join(c.chunks)))
|
||||
|
||||
async def _do_partial_read(self, version, name, expected, offset, length) -> None:
|
||||
c = consumer.MemoryConsumer()
|
||||
await version.read(c, offset, length)
|
||||
if length is None:
|
||||
expected_range = expected[offset:]
|
||||
else:
|
||||
expected_range = expected[offset:offset+length]
|
||||
d.addCallback(lambda ignored: b"".join(c.chunks))
|
||||
def _check(results):
|
||||
if results != expected_range:
|
||||
print("read([%d]+%s) got %d bytes, not %d" % \
|
||||
(offset, length, len(results), len(expected_range)))
|
||||
print("got: %s ... %s" % (results[:20], results[-20:]))
|
||||
print("exp: %s ... %s" % (expected_range[:20], expected_range[-20:]))
|
||||
self.fail("results[%s] != expected_range" % name)
|
||||
return version # daisy-chained to next call
|
||||
d.addCallback(_check)
|
||||
return d
|
||||
results = b"".join(c.chunks)
|
||||
if results != expected_range:
|
||||
print("read([%d]+%s) got %d bytes, not %d" % \
|
||||
(offset, length, len(results), len(expected_range)))
|
||||
print("got: %r ... %r" % (results[:20], results[-20:]))
|
||||
print("exp: %r ... %r" % (expected_range[:20], expected_range[-20:]))
|
||||
self.fail("results[%s] != expected_range" % name)
|
||||
|
||||
def test_partial_read_mdmf_0(self):
|
||||
async def test_partial_read_mdmf_0(self) -> None:
|
||||
data = b""
|
||||
d = self.do_upload_mdmf(data=data)
|
||||
result = await self.do_upload_mdmf(data=data)
|
||||
modes = [("all1", 0,0),
|
||||
("all2", 0,None),
|
||||
]
|
||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
||||
return d
|
||||
await self._test_partial_read(result, data, modes, 1)
|
||||
|
||||
def test_partial_read_mdmf_large(self):
|
||||
async def test_partial_read_mdmf_large(self) -> None:
|
||||
segment_boundary = mathutil.next_multiple(128 * 1024, 3)
|
||||
modes = [("start_on_segment_boundary", segment_boundary, 50),
|
||||
("ending_one_byte_after_segment_boundary", segment_boundary-50, 51),
|
||||
@ -393,20 +311,18 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
||||
("complete_file1", 0, len(self.data)),
|
||||
("complete_file2", 0, None),
|
||||
]
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(self._test_partial_read, self.data, modes, 10000)
|
||||
return d
|
||||
result = await self.do_upload_mdmf()
|
||||
await self._test_partial_read(result, self.data, modes, 10000)
|
||||
|
||||
def test_partial_read_sdmf_0(self):
|
||||
async def test_partial_read_sdmf_0(self) -> None:
|
||||
data = b""
|
||||
modes = [("all1", 0,0),
|
||||
("all2", 0,None),
|
||||
]
|
||||
d = self.do_upload_sdmf(data=data)
|
||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
||||
return d
|
||||
result = await self.do_upload_sdmf(data=data)
|
||||
await self._test_partial_read(result, data, modes, 1)
|
||||
|
||||
def test_partial_read_sdmf_2(self):
|
||||
async def test_partial_read_sdmf_2(self) -> None:
|
||||
data = b"hi"
|
||||
modes = [("one_byte", 0, 1),
|
||||
("last_byte", 1, 1),
|
||||
@ -414,11 +330,10 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
||||
("complete_file", 0, 2),
|
||||
("complete_file2", 0, None),
|
||||
]
|
||||
d = self.do_upload_sdmf(data=data)
|
||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
||||
return d
|
||||
result = await self.do_upload_sdmf(data=data)
|
||||
await self._test_partial_read(result, data, modes, 1)
|
||||
|
||||
def test_partial_read_sdmf_90(self):
|
||||
async def test_partial_read_sdmf_90(self) -> None:
|
||||
modes = [("start_at_middle", 50, 40),
|
||||
("start_at_middle2", 50, None),
|
||||
("zero_length_at_start", 0, 0),
|
||||
@ -427,11 +342,10 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
||||
("complete_file1", 0, None),
|
||||
("complete_file2", 0, 90),
|
||||
]
|
||||
d = self.do_upload_sdmf()
|
||||
d.addCallback(self._test_partial_read, self.small_data, modes, 10)
|
||||
return d
|
||||
result = await self.do_upload_sdmf()
|
||||
await self._test_partial_read(result, self.small_data, modes, 10)
|
||||
|
||||
def test_partial_read_sdmf_100(self):
|
||||
async def test_partial_read_sdmf_100(self) -> None:
|
||||
data = b"test data "*10
|
||||
modes = [("start_at_middle", 50, 50),
|
||||
("start_at_middle2", 50, None),
|
||||
@ -440,42 +354,30 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
||||
("complete_file1", 0, 100),
|
||||
("complete_file2", 0, None),
|
||||
]
|
||||
d = self.do_upload_sdmf(data=data)
|
||||
d.addCallback(self._test_partial_read, data, modes, 10)
|
||||
return d
|
||||
result = await self.do_upload_sdmf(data=data)
|
||||
await self._test_partial_read(result, data, modes, 10)
|
||||
|
||||
async def _test_read_and_download(self, node, expected) -> None:
|
||||
version = await node.get_best_readable_version()
|
||||
c = consumer.MemoryConsumer()
|
||||
await version.read(c)
|
||||
self.assertThat(expected, Equals(b"".join(c.chunks)))
|
||||
|
||||
def _test_read_and_download(self, node, expected):
|
||||
d = node.get_best_readable_version()
|
||||
def _read_data(version):
|
||||
c = consumer.MemoryConsumer()
|
||||
c2 = consumer.MemoryConsumer()
|
||||
d2 = defer.succeed(None)
|
||||
d2.addCallback(lambda ignored: version.read(c))
|
||||
d2.addCallback(lambda ignored:
|
||||
self.assertThat(expected, Equals(b"".join(c.chunks))))
|
||||
c2 = consumer.MemoryConsumer()
|
||||
await version.read(c2, offset=0, size=len(expected))
|
||||
self.assertThat(expected, Equals(b"".join(c2.chunks)))
|
||||
|
||||
d2.addCallback(lambda ignored: version.read(c2, offset=0,
|
||||
size=len(expected)))
|
||||
d2.addCallback(lambda ignored:
|
||||
self.assertThat(expected, Equals(b"".join(c2.chunks))))
|
||||
return d2
|
||||
d.addCallback(_read_data)
|
||||
d.addCallback(lambda ignored: node.download_best_version())
|
||||
d.addCallback(lambda data: self.assertThat(expected, Equals(data)))
|
||||
return d
|
||||
data = await node.download_best_version()
|
||||
self.assertThat(expected, Equals(data))
|
||||
|
||||
def test_read_and_download_mdmf(self):
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(self._test_read_and_download, self.data)
|
||||
return d
|
||||
async def test_read_and_download_mdmf(self) -> None:
|
||||
result = await self.do_upload_mdmf()
|
||||
await self._test_read_and_download(result, self.data)
|
||||
|
||||
def test_read_and_download_sdmf(self):
|
||||
d = self.do_upload_sdmf()
|
||||
d.addCallback(self._test_read_and_download, self.small_data)
|
||||
return d
|
||||
async def test_read_and_download_sdmf(self) -> None:
|
||||
result = await self.do_upload_sdmf()
|
||||
await self._test_read_and_download(result, self.small_data)
|
||||
|
||||
def test_read_and_download_sdmf_zero_length(self):
|
||||
d = self.do_upload_empty_sdmf()
|
||||
d.addCallback(self._test_read_and_download, b"")
|
||||
return d
|
||||
async def test_read_and_download_sdmf_zero_length(self) -> None:
|
||||
result = await self.do_upload_empty_sdmf()
|
||||
await self._test_read_and_download(result, b"")
|
||||
|
@ -37,6 +37,7 @@ from twisted.web import http
|
||||
from twisted.web.http_headers import Headers
|
||||
from werkzeug import routing
|
||||
from werkzeug.exceptions import NotFound as WNotFound
|
||||
from testtools.matchers import Equals
|
||||
|
||||
from .common import SyncTestCase
|
||||
from ..storage.http_common import get_content_type, CBOR_MIME_TYPE
|
||||
@ -555,6 +556,20 @@ class GenericHTTPAPITests(SyncTestCase):
|
||||
super(GenericHTTPAPITests, self).setUp()
|
||||
self.http = self.useFixture(HttpTestFixture())
|
||||
|
||||
def test_missing_authentication(self) -> None:
|
||||
"""
|
||||
If nothing is given in the ``Authorization`` header at all an
|
||||
``Unauthorized`` response is returned.
|
||||
"""
|
||||
client = StubTreq(self.http.http_server.get_resource())
|
||||
response = self.http.result_of_with_flush(
|
||||
client.request(
|
||||
"GET",
|
||||
"http://127.0.0.1/storage/v1/version",
|
||||
),
|
||||
)
|
||||
self.assertThat(response.code, Equals(http.UNAUTHORIZED))
|
||||
|
||||
def test_bad_authentication(self):
|
||||
"""
|
||||
If the wrong swissnum is used, an ``Unauthorized`` response code is
|
||||
|
@ -5,7 +5,7 @@ in
|
||||
{ pkgsVersion ? "nixpkgs-21.11"
|
||||
, pkgs ? import sources.${pkgsVersion} { }
|
||||
, pypiData ? sources.pypi-deps-db
|
||||
, pythonVersion ? "python37"
|
||||
, pythonVersion ? "python39"
|
||||
, mach-nix ? import sources.mach-nix {
|
||||
inherit pkgs pypiData;
|
||||
python = pythonVersion;
|
||||
@ -21,7 +21,7 @@ let
|
||||
inherit pkgs;
|
||||
lib = pkgs.lib;
|
||||
};
|
||||
tests_require = (mach-lib.extract "python37" ./. "extras_require" ).extras_require.test;
|
||||
tests_require = (mach-lib.extract "python39" ./. "extras_require" ).extras_require.test;
|
||||
|
||||
# Get the Tahoe-LAFS package itself. This does not include test
|
||||
# requirements and we don't ask for test requirements so that we can just
|
||||
|
6
tox.ini
6
tox.ini
@ -7,11 +7,9 @@
|
||||
# the tox-gh-actions package.
|
||||
[gh-actions]
|
||||
python =
|
||||
3.7: py37-coverage,typechecks,codechecks
|
||||
3.8: py38-coverage
|
||||
3.9: py39-coverage
|
||||
3.10: py310-coverage
|
||||
pypy-3.7: pypy37
|
||||
pypy-3.8: pypy38
|
||||
pypy-3.9: pypy39
|
||||
|
||||
@ -19,7 +17,7 @@ python =
|
||||
twisted = 1
|
||||
|
||||
[tox]
|
||||
envlist = typechecks,codechecks,py{37,38,39,310}-{coverage},pypy27,pypy37,pypy38,pypy39,integration
|
||||
envlist = typechecks,codechecks,py{38,39,310}-{coverage},pypy27,pypy38,pypy39,integration
|
||||
minversion = 2.4
|
||||
|
||||
[testenv]
|
||||
@ -49,8 +47,6 @@ deps =
|
||||
# regressions in new releases of this package that cause us the kind of
|
||||
# suffering we're trying to avoid with the above pins.
|
||||
certifi
|
||||
# VCS hooks support
|
||||
py37,!coverage: pre-commit
|
||||
|
||||
# We add usedevelop=False because testing against a true installation gives
|
||||
# more useful results.
|
||||
|
Loading…
Reference in New Issue
Block a user