mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-06-18 23:38:18 +00:00
Merge branch 'master' into 2916.grid-manager-proposal.6
This commit is contained in:
78
.circleci/circleci.txt
Normal file
78
.circleci/circleci.txt
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
# A master build looks like this:
|
||||||
|
|
||||||
|
# BASH_ENV=/tmp/.bash_env-63d018969ca480003a031e62-0-build
|
||||||
|
# CI=true
|
||||||
|
# CIRCLECI=true
|
||||||
|
# CIRCLE_BRANCH=master
|
||||||
|
# CIRCLE_BUILD_NUM=76545
|
||||||
|
# CIRCLE_BUILD_URL=https://circleci.com/gh/tahoe-lafs/tahoe-lafs/76545
|
||||||
|
# CIRCLE_JOB=NixOS 21.11
|
||||||
|
# CIRCLE_NODE_INDEX=0
|
||||||
|
# CIRCLE_NODE_TOTAL=1
|
||||||
|
# CIRCLE_PROJECT_REPONAME=tahoe-lafs
|
||||||
|
# CIRCLE_PROJECT_USERNAME=tahoe-lafs
|
||||||
|
# CIRCLE_REPOSITORY_URL=git@github.com:tahoe-lafs/tahoe-lafs.git
|
||||||
|
# CIRCLE_SHA1=ed0bda2d7456f4a2cd60870072e1fe79864a49a1
|
||||||
|
# CIRCLE_SHELL_ENV=/tmp/.bash_env-63d018969ca480003a031e62-0-build
|
||||||
|
# CIRCLE_USERNAME=alice
|
||||||
|
# CIRCLE_WORKFLOW_ID=6d9bb71c-be3a-4659-bf27-60954180619b
|
||||||
|
# CIRCLE_WORKFLOW_JOB_ID=0793c975-7b9f-489f-909b-8349b72d2785
|
||||||
|
# CIRCLE_WORKFLOW_WORKSPACE_ID=6d9bb71c-be3a-4659-bf27-60954180619b
|
||||||
|
# CIRCLE_WORKING_DIRECTORY=~/project
|
||||||
|
|
||||||
|
# A build of an in-repo PR looks like this:
|
||||||
|
|
||||||
|
# BASH_ENV=/tmp/.bash_env-63d1971a0298086d8841287e-0-build
|
||||||
|
# CI=true
|
||||||
|
# CIRCLECI=true
|
||||||
|
# CIRCLE_BRANCH=3946-less-chatty-downloads
|
||||||
|
# CIRCLE_BUILD_NUM=76612
|
||||||
|
# CIRCLE_BUILD_URL=https://circleci.com/gh/tahoe-lafs/tahoe-lafs/76612
|
||||||
|
# CIRCLE_JOB=NixOS 21.11
|
||||||
|
# CIRCLE_NODE_INDEX=0
|
||||||
|
# CIRCLE_NODE_TOTAL=1
|
||||||
|
# CIRCLE_PROJECT_REPONAME=tahoe-lafs
|
||||||
|
# CIRCLE_PROJECT_USERNAME=tahoe-lafs
|
||||||
|
# CIRCLE_PULL_REQUEST=https://github.com/tahoe-lafs/tahoe-lafs/pull/1251
|
||||||
|
# CIRCLE_PULL_REQUESTS=https://github.com/tahoe-lafs/tahoe-lafs/pull/1251
|
||||||
|
# CIRCLE_REPOSITORY_URL=git@github.com:tahoe-lafs/tahoe-lafs.git
|
||||||
|
# CIRCLE_SHA1=921a2083dcefdb5f431cdac195fc9ac510605349
|
||||||
|
# CIRCLE_SHELL_ENV=/tmp/.bash_env-63d1971a0298086d8841287e-0-build
|
||||||
|
# CIRCLE_USERNAME=bob
|
||||||
|
# CIRCLE_WORKFLOW_ID=5e32c12e-be37-4868-9fa8-6a6929fec2f1
|
||||||
|
# CIRCLE_WORKFLOW_JOB_ID=316ca408-81b4-4c96-bbdd-644e4c3e01e5
|
||||||
|
# CIRCLE_WORKFLOW_WORKSPACE_ID=5e32c12e-be37-4868-9fa8-6a6929fec2f1
|
||||||
|
# CIRCLE_WORKING_DIRECTORY=~/project
|
||||||
|
# CI_PULL_REQUEST=https://github.com/tahoe-lafs/tahoe-lafs/pull/1251
|
||||||
|
|
||||||
|
# A build of a PR from a fork looks like this:
|
||||||
|
|
||||||
|
# BASH_ENV=/tmp/.bash_env-63d40f7b2e89cd3de10e0db9-0-build
|
||||||
|
# CI=true
|
||||||
|
# CIRCLECI=true
|
||||||
|
# CIRCLE_BRANCH=pull/1252
|
||||||
|
# CIRCLE_BUILD_NUM=76678
|
||||||
|
# CIRCLE_BUILD_URL=https://circleci.com/gh/tahoe-lafs/tahoe-lafs/76678
|
||||||
|
# CIRCLE_JOB=NixOS 21.05
|
||||||
|
# CIRCLE_NODE_INDEX=0
|
||||||
|
# CIRCLE_NODE_TOTAL=1
|
||||||
|
# CIRCLE_PROJECT_REPONAME=tahoe-lafs
|
||||||
|
# CIRCLE_PROJECT_USERNAME=tahoe-lafs
|
||||||
|
# CIRCLE_PR_NUMBER=1252
|
||||||
|
# CIRCLE_PR_REPONAME=tahoe-lafs
|
||||||
|
# CIRCLE_PR_USERNAME=carol
|
||||||
|
# CIRCLE_PULL_REQUEST=https://github.com/tahoe-lafs/tahoe-lafs/pull/1252
|
||||||
|
# CIRCLE_PULL_REQUESTS=https://github.com/tahoe-lafs/tahoe-lafs/pull/1252
|
||||||
|
# CIRCLE_REPOSITORY_URL=git@github.com:tahoe-lafs/tahoe-lafs.git
|
||||||
|
# CIRCLE_SHA1=15c7916e0812e6baa2a931cd54b18f3382a8456e
|
||||||
|
# CIRCLE_SHELL_ENV=/tmp/.bash_env-63d40f7b2e89cd3de10e0db9-0-build
|
||||||
|
# CIRCLE_USERNAME=
|
||||||
|
# CIRCLE_WORKFLOW_ID=19c917c8-3a38-4b20-ac10-3265259fa03e
|
||||||
|
# CIRCLE_WORKFLOW_JOB_ID=58e95215-eccf-4664-a231-1dba7fd2d323
|
||||||
|
# CIRCLE_WORKFLOW_WORKSPACE_ID=19c917c8-3a38-4b20-ac10-3265259fa03e
|
||||||
|
# CIRCLE_WORKING_DIRECTORY=~/project
|
||||||
|
# CI_PULL_REQUEST=https://github.com/tahoe-lafs/tahoe-lafs/pull/1252
|
||||||
|
|
||||||
|
# A build of a PR from a fork where the owner has enabled CircleCI looks
|
||||||
|
# the same as a build of an in-repo PR, except it runs on th owner's
|
||||||
|
# CircleCI namespace.
|
@ -11,20 +11,60 @@
|
|||||||
#
|
#
|
||||||
version: 2.1
|
version: 2.1
|
||||||
|
|
||||||
|
# A template that can be shared between the two different image-building
|
||||||
|
# workflows.
|
||||||
|
.images: &IMAGES
|
||||||
|
jobs:
|
||||||
|
# Every job that pushes a Docker image from Docker Hub needs to provide
|
||||||
|
# credentials. Use this first job to define a yaml anchor that can be
|
||||||
|
# used to supply a CircleCI job context which makes Docker Hub credentials
|
||||||
|
# available in the environment.
|
||||||
|
#
|
||||||
|
# Contexts are managed in the CircleCI web interface:
|
||||||
|
#
|
||||||
|
# https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts
|
||||||
|
- "build-image-debian-11": &DOCKERHUB_CONTEXT
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-ubuntu-20-04":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-fedora-35":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-oraclelinux-8":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
# Restore later as PyPy38
|
||||||
|
#- "build-image-pypy27-buster":
|
||||||
|
# <<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
|
parameters:
|
||||||
|
# Control whether the image-building workflow runs as part of this pipeline.
|
||||||
|
# Generally we do not want this to run because we don't need our
|
||||||
|
# dependencies to move around all the time and because building the image
|
||||||
|
# takes a couple minutes.
|
||||||
|
#
|
||||||
|
# An easy way to trigger a pipeline with this set to true is with the
|
||||||
|
# rebuild-images.sh tool in this directory. You can also do so via the
|
||||||
|
# CircleCI web UI.
|
||||||
|
build-images:
|
||||||
|
default: false
|
||||||
|
type: "boolean"
|
||||||
|
|
||||||
|
# Control whether the test-running workflow runs as part of this pipeline.
|
||||||
|
# Generally we do want this to run because running the tests is the primary
|
||||||
|
# purpose of this pipeline.
|
||||||
|
run-tests:
|
||||||
|
default: true
|
||||||
|
type: "boolean"
|
||||||
|
|
||||||
workflows:
|
workflows:
|
||||||
ci:
|
ci:
|
||||||
|
when: "<< pipeline.parameters.run-tests >>"
|
||||||
jobs:
|
jobs:
|
||||||
# Start with jobs testing various platforms.
|
# Start with jobs testing various platforms.
|
||||||
- "debian-10":
|
|
||||||
{}
|
|
||||||
- "debian-11":
|
- "debian-11":
|
||||||
{}
|
{}
|
||||||
|
|
||||||
- "ubuntu-20-04":
|
- "ubuntu-20-04":
|
||||||
{}
|
{}
|
||||||
- "ubuntu-18-04":
|
|
||||||
requires:
|
|
||||||
- "ubuntu-20-04"
|
|
||||||
|
|
||||||
# Equivalent to RHEL 8; CentOS 8 is dead.
|
# Equivalent to RHEL 8; CentOS 8 is dead.
|
||||||
- "oraclelinux-8":
|
- "oraclelinux-8":
|
||||||
@ -54,6 +94,9 @@ workflows:
|
|||||||
{}
|
{}
|
||||||
|
|
||||||
- "integration":
|
- "integration":
|
||||||
|
# Run even the slow integration tests here. We need the `--` to
|
||||||
|
# sneak past tox and get to pytest.
|
||||||
|
tox-args: "-- --runslow integration"
|
||||||
requires:
|
requires:
|
||||||
# If the unit test suite doesn't pass, don't bother running the
|
# If the unit test suite doesn't pass, don't bother running the
|
||||||
# integration tests.
|
# integration tests.
|
||||||
@ -65,41 +108,10 @@ workflows:
|
|||||||
{}
|
{}
|
||||||
|
|
||||||
images:
|
images:
|
||||||
# Build the Docker images used by the ci jobs. This makes the ci jobs
|
<<: *IMAGES
|
||||||
# faster and takes various spurious failures out of the critical path.
|
|
||||||
triggers:
|
|
||||||
# Build once a day
|
|
||||||
- schedule:
|
|
||||||
cron: "0 0 * * *"
|
|
||||||
filters:
|
|
||||||
branches:
|
|
||||||
only:
|
|
||||||
- "master"
|
|
||||||
|
|
||||||
jobs:
|
# Build as part of the workflow but only if requested.
|
||||||
# Every job that pushes a Docker image from Docker Hub needs to provide
|
when: "<< pipeline.parameters.build-images >>"
|
||||||
# credentials. Use this first job to define a yaml anchor that can be
|
|
||||||
# used to supply a CircleCI job context which makes Docker Hub
|
|
||||||
# credentials available in the environment.
|
|
||||||
#
|
|
||||||
# Contexts are managed in the CircleCI web interface:
|
|
||||||
#
|
|
||||||
# https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts
|
|
||||||
- "build-image-debian-10": &DOCKERHUB_CONTEXT
|
|
||||||
context: "dockerhub-auth"
|
|
||||||
- "build-image-debian-11":
|
|
||||||
<<: *DOCKERHUB_CONTEXT
|
|
||||||
- "build-image-ubuntu-18-04":
|
|
||||||
<<: *DOCKERHUB_CONTEXT
|
|
||||||
- "build-image-ubuntu-20-04":
|
|
||||||
<<: *DOCKERHUB_CONTEXT
|
|
||||||
- "build-image-fedora-35":
|
|
||||||
<<: *DOCKERHUB_CONTEXT
|
|
||||||
- "build-image-oraclelinux-8":
|
|
||||||
<<: *DOCKERHUB_CONTEXT
|
|
||||||
# Restore later as PyPy38
|
|
||||||
#- "build-image-pypy27-buster":
|
|
||||||
# <<: *DOCKERHUB_CONTEXT
|
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@ -133,10 +145,10 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- "checkout"
|
- "checkout"
|
||||||
|
|
||||||
- run:
|
- run: &INSTALL_TOX
|
||||||
name: "Install tox"
|
name: "Install tox"
|
||||||
command: |
|
command: |
|
||||||
pip install --user tox
|
pip install --user 'tox~=3.0'
|
||||||
|
|
||||||
- run:
|
- run:
|
||||||
name: "Static-ish code checks"
|
name: "Static-ish code checks"
|
||||||
@ -152,9 +164,7 @@ jobs:
|
|||||||
- "checkout"
|
- "checkout"
|
||||||
|
|
||||||
- run:
|
- run:
|
||||||
name: "Install tox"
|
<<: *INSTALL_TOX
|
||||||
command: |
|
|
||||||
pip install --user tox
|
|
||||||
|
|
||||||
- run:
|
- run:
|
||||||
name: "Make PyInstaller executable"
|
name: "Make PyInstaller executable"
|
||||||
@ -169,12 +179,7 @@ jobs:
|
|||||||
command: |
|
command: |
|
||||||
dist/Tahoe-LAFS/tahoe --version
|
dist/Tahoe-LAFS/tahoe --version
|
||||||
|
|
||||||
debian-10: &DEBIAN
|
debian-11: &DEBIAN
|
||||||
docker:
|
|
||||||
- <<: *DOCKERHUB_AUTH
|
|
||||||
image: "tahoelafsci/debian:10-py3.7"
|
|
||||||
user: "nobody"
|
|
||||||
|
|
||||||
environment: &UTF_8_ENVIRONMENT
|
environment: &UTF_8_ENVIRONMENT
|
||||||
# In general, the test suite is not allowed to fail while the job
|
# In general, the test suite is not allowed to fail while the job
|
||||||
# succeeds. But you can set this to "yes" if you want it to be
|
# succeeds. But you can set this to "yes" if you want it to be
|
||||||
@ -186,7 +191,7 @@ jobs:
|
|||||||
# filenames and argv).
|
# filenames and argv).
|
||||||
LANG: "en_US.UTF-8"
|
LANG: "en_US.UTF-8"
|
||||||
# Select a tox environment to run for this job.
|
# Select a tox environment to run for this job.
|
||||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py37"
|
TAHOE_LAFS_TOX_ENVIRONMENT: "py39"
|
||||||
# Additional arguments to pass to tox.
|
# Additional arguments to pass to tox.
|
||||||
TAHOE_LAFS_TOX_ARGS: ""
|
TAHOE_LAFS_TOX_ARGS: ""
|
||||||
# The path in which test artifacts will be placed.
|
# The path in which test artifacts will be placed.
|
||||||
@ -254,15 +259,11 @@ jobs:
|
|||||||
/tmp/venv/bin/codecov
|
/tmp/venv/bin/codecov
|
||||||
fi
|
fi
|
||||||
|
|
||||||
debian-11:
|
|
||||||
<<: *DEBIAN
|
|
||||||
docker:
|
docker:
|
||||||
- <<: *DOCKERHUB_AUTH
|
- <<: *DOCKERHUB_AUTH
|
||||||
image: "tahoelafsci/debian:11-py3.9"
|
image: "tahoelafsci/debian:11-py3.9"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
environment:
|
|
||||||
<<: *UTF_8_ENVIRONMENT
|
|
||||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py39"
|
|
||||||
|
|
||||||
# Restore later using PyPy3.8
|
# Restore later using PyPy3.8
|
||||||
# pypy27-buster:
|
# pypy27-buster:
|
||||||
@ -296,6 +297,14 @@ jobs:
|
|||||||
|
|
||||||
integration:
|
integration:
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
|
|
||||||
|
parameters:
|
||||||
|
tox-args:
|
||||||
|
description: >-
|
||||||
|
Additional arguments to pass to the tox command.
|
||||||
|
type: "string"
|
||||||
|
default: ""
|
||||||
|
|
||||||
docker:
|
docker:
|
||||||
- <<: *DOCKERHUB_AUTH
|
- <<: *DOCKERHUB_AUTH
|
||||||
image: "tahoelafsci/debian:11-py3.9"
|
image: "tahoelafsci/debian:11-py3.9"
|
||||||
@ -308,28 +317,15 @@ jobs:
|
|||||||
# Disable artifact collection because py.test can't produce any.
|
# Disable artifact collection because py.test can't produce any.
|
||||||
ARTIFACTS_OUTPUT_PATH: ""
|
ARTIFACTS_OUTPUT_PATH: ""
|
||||||
|
|
||||||
|
# Pass on anything we got in our parameters.
|
||||||
|
TAHOE_LAFS_TOX_ARGS: "<< parameters.tox-args >>"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- "checkout"
|
- "checkout"
|
||||||
# DRY, YAML-style. See the debian-9 steps.
|
# DRY, YAML-style. See the debian-9 steps.
|
||||||
- run: *SETUP_VIRTUALENV
|
- run: *SETUP_VIRTUALENV
|
||||||
- run: *RUN_TESTS
|
- run: *RUN_TESTS
|
||||||
|
|
||||||
ubuntu-18-04: &UBUNTU_18_04
|
|
||||||
<<: *DEBIAN
|
|
||||||
docker:
|
|
||||||
- <<: *DOCKERHUB_AUTH
|
|
||||||
image: "tahoelafsci/ubuntu:18.04-py3.7"
|
|
||||||
user: "nobody"
|
|
||||||
|
|
||||||
environment:
|
|
||||||
<<: *UTF_8_ENVIRONMENT
|
|
||||||
# The default trial args include --rterrors which is incompatible with
|
|
||||||
# this reporter on Python 3. So drop that and just specify the
|
|
||||||
# reporter.
|
|
||||||
TAHOE_LAFS_TRIAL_ARGS: "--reporter=subunitv2-file"
|
|
||||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py37"
|
|
||||||
|
|
||||||
|
|
||||||
ubuntu-20-04:
|
ubuntu-20-04:
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
docker:
|
docker:
|
||||||
@ -382,7 +378,7 @@ jobs:
|
|||||||
docker:
|
docker:
|
||||||
# Run in a highly Nix-capable environment.
|
# Run in a highly Nix-capable environment.
|
||||||
- <<: *DOCKERHUB_AUTH
|
- <<: *DOCKERHUB_AUTH
|
||||||
image: "nixos/nix:2.3.16"
|
image: "nixos/nix:2.10.3"
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
# CACHIX_AUTH_TOKEN is manually set in the CircleCI web UI and
|
# CACHIX_AUTH_TOKEN is manually set in the CircleCI web UI and
|
||||||
@ -392,27 +388,21 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
- "run":
|
- "run":
|
||||||
# The nixos/nix image does not include ssh. Install it so the
|
# Get cachix for Nix-friendly caching.
|
||||||
# `checkout` step will succeed. We also want cachix for
|
|
||||||
# Nix-friendly caching.
|
|
||||||
name: "Install Basic Dependencies"
|
name: "Install Basic Dependencies"
|
||||||
command: |
|
command: |
|
||||||
|
NIXPKGS="https://github.com/nixos/nixpkgs/archive/nixos-<<parameters.nixpkgs>>.tar.gz"
|
||||||
nix-env \
|
nix-env \
|
||||||
--file https://github.com/nixos/nixpkgs/archive/nixos-<<parameters.nixpkgs>>.tar.gz \
|
--file $NIXPKGS \
|
||||||
--install \
|
--install \
|
||||||
-A openssh cachix bash
|
-A cachix bash
|
||||||
|
# Activate it for "binary substitution". This sets up
|
||||||
|
# configuration tht lets Nix download something from the cache
|
||||||
|
# instead of building it locally, if possible.
|
||||||
|
cachix use "${CACHIX_NAME}"
|
||||||
|
|
||||||
- "checkout"
|
- "checkout"
|
||||||
|
|
||||||
- run:
|
|
||||||
name: "Cachix setup"
|
|
||||||
# Record the store paths that exist before we did much. There's no
|
|
||||||
# reason to cache these, they're either in the image or have to be
|
|
||||||
# retrieved before we can use cachix to restore from cache.
|
|
||||||
command: |
|
|
||||||
cachix use "${CACHIX_NAME}"
|
|
||||||
nix path-info --all > /tmp/store-path-pre-build
|
|
||||||
|
|
||||||
- "run":
|
- "run":
|
||||||
# The Nix package doesn't know how to do this part, unfortunately.
|
# The Nix package doesn't know how to do this part, unfortunately.
|
||||||
name: "Generate version"
|
name: "Generate version"
|
||||||
@ -434,55 +424,26 @@ jobs:
|
|||||||
# build a couple simple little dependencies that don't take
|
# build a couple simple little dependencies that don't take
|
||||||
# advantage of multiple cores and we get a little speedup by doing
|
# advantage of multiple cores and we get a little speedup by doing
|
||||||
# them in parallel.
|
# them in parallel.
|
||||||
nix-build --cores 3 --max-jobs 2 --argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>"
|
source .circleci/lib.sh
|
||||||
|
cache_if_able nix-build \
|
||||||
|
--cores 3 \
|
||||||
|
--max-jobs 2 \
|
||||||
|
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>"
|
||||||
|
|
||||||
- "run":
|
- "run":
|
||||||
name: "Test"
|
name: "Test"
|
||||||
command: |
|
command: |
|
||||||
# Let it go somewhat wild for the test suite itself
|
# Let it go somewhat wild for the test suite itself
|
||||||
nix-build --cores 8 --argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" tests.nix
|
source .circleci/lib.sh
|
||||||
|
cache_if_able nix-build \
|
||||||
- run:
|
--cores 8 \
|
||||||
# Send any new store objects to cachix.
|
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
|
||||||
name: "Push to Cachix"
|
tests.nix
|
||||||
when: "always"
|
|
||||||
command: |
|
|
||||||
# Cribbed from
|
|
||||||
# https://circleci.com/blog/managing-secrets-when-you-have-pull-requests-from-outside-contributors/
|
|
||||||
if [ -n "$CIRCLE_PR_NUMBER" ]; then
|
|
||||||
# I'm sure you're thinking "CIRCLE_PR_NUMBER must just be the
|
|
||||||
# number of the PR being built". Sorry, dear reader, you have
|
|
||||||
# guessed poorly. It is also conditionally set based on whether
|
|
||||||
# this is a PR from a fork or not.
|
|
||||||
#
|
|
||||||
# https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables
|
|
||||||
echo "Skipping Cachix push for forked PR."
|
|
||||||
else
|
|
||||||
# If this *isn't* a build from a fork then we have the Cachix
|
|
||||||
# write key in our environment and we can push any new objects
|
|
||||||
# to Cachix.
|
|
||||||
#
|
|
||||||
# To decide what to push, we inspect the list of store objects
|
|
||||||
# that existed before and after we did most of our work. Any
|
|
||||||
# that are new after the work is probably a useful thing to have
|
|
||||||
# around so push it to the cache. We exclude all derivation
|
|
||||||
# objects (.drv files) because they're cheap to reconstruct and
|
|
||||||
# by the time you know their cache key you've already done all
|
|
||||||
# the work anyway.
|
|
||||||
#
|
|
||||||
# This shell expression for finding the objects and pushing them
|
|
||||||
# was from the Cachix docs:
|
|
||||||
#
|
|
||||||
# https://docs.cachix.org/continuous-integration-setup/circleci.html
|
|
||||||
#
|
|
||||||
# but they seem to have removed it now.
|
|
||||||
bash -c "comm -13 <(sort /tmp/store-path-pre-build | grep -v '\.drv$') <(nix path-info --all | grep -v '\.drv$' | sort) | cachix push $CACHIX_NAME"
|
|
||||||
fi
|
|
||||||
|
|
||||||
typechecks:
|
typechecks:
|
||||||
docker:
|
docker:
|
||||||
- <<: *DOCKERHUB_AUTH
|
- <<: *DOCKERHUB_AUTH
|
||||||
image: "tahoelafsci/ubuntu:18.04-py3.7"
|
image: "tahoelafsci/ubuntu:20.04-py3.9"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- "checkout"
|
- "checkout"
|
||||||
@ -494,7 +455,7 @@ jobs:
|
|||||||
docs:
|
docs:
|
||||||
docker:
|
docker:
|
||||||
- <<: *DOCKERHUB_AUTH
|
- <<: *DOCKERHUB_AUTH
|
||||||
image: "tahoelafsci/ubuntu:18.04-py3.7"
|
image: "tahoelafsci/ubuntu:20.04-py3.9"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- "checkout"
|
- "checkout"
|
||||||
@ -545,15 +506,6 @@ jobs:
|
|||||||
docker push tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION}
|
docker push tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION}
|
||||||
|
|
||||||
|
|
||||||
build-image-debian-10:
|
|
||||||
<<: *BUILD_IMAGE
|
|
||||||
|
|
||||||
environment:
|
|
||||||
DISTRO: "debian"
|
|
||||||
TAG: "10"
|
|
||||||
PYTHON_VERSION: "3.7"
|
|
||||||
|
|
||||||
|
|
||||||
build-image-debian-11:
|
build-image-debian-11:
|
||||||
<<: *BUILD_IMAGE
|
<<: *BUILD_IMAGE
|
||||||
|
|
||||||
@ -562,14 +514,6 @@ jobs:
|
|||||||
TAG: "11"
|
TAG: "11"
|
||||||
PYTHON_VERSION: "3.9"
|
PYTHON_VERSION: "3.9"
|
||||||
|
|
||||||
build-image-ubuntu-18-04:
|
|
||||||
<<: *BUILD_IMAGE
|
|
||||||
|
|
||||||
environment:
|
|
||||||
DISTRO: "ubuntu"
|
|
||||||
TAG: "18.04"
|
|
||||||
PYTHON_VERSION: "3.7"
|
|
||||||
|
|
||||||
|
|
||||||
build-image-ubuntu-20-04:
|
build-image-ubuntu-20-04:
|
||||||
<<: *BUILD_IMAGE
|
<<: *BUILD_IMAGE
|
||||||
|
119
.circleci/lib.sh
Normal file
119
.circleci/lib.sh
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
# Run a command, enabling cache writes to cachix if possible. The command is
|
||||||
|
# accepted as a variable number of positional arguments (like argv).
|
||||||
|
function cache_if_able() {
|
||||||
|
# Dump some info about our build environment.
|
||||||
|
describe_build
|
||||||
|
|
||||||
|
if is_cache_writeable; then
|
||||||
|
# If the cache is available we'll use it. This lets fork owners set
|
||||||
|
# up their own caching if they want.
|
||||||
|
echo "Cachix credentials present; will attempt to write to cache."
|
||||||
|
|
||||||
|
# The `cachix watch-exec ...` does our cache population. When it sees
|
||||||
|
# something added to the store (I guess) it pushes it to the named
|
||||||
|
# cache.
|
||||||
|
cachix watch-exec "${CACHIX_NAME}" -- "$@"
|
||||||
|
else
|
||||||
|
if is_cache_required; then
|
||||||
|
echo "Required credentials (CACHIX_AUTH_TOKEN) are missing."
|
||||||
|
return 1
|
||||||
|
else
|
||||||
|
echo "Cachix credentials missing; will not attempt cache writes."
|
||||||
|
"$@"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function is_cache_writeable() {
|
||||||
|
# We can only *push* to the cache if we have a CACHIX_AUTH_TOKEN. in-repo
|
||||||
|
# jobs will get this from CircleCI configuration but jobs from forks may
|
||||||
|
# not.
|
||||||
|
[ -v CACHIX_AUTH_TOKEN ]
|
||||||
|
}
|
||||||
|
|
||||||
|
function is_cache_required() {
|
||||||
|
# If we're building in tahoe-lafs/tahoe-lafs then we must use the cache.
|
||||||
|
# If we're building anything from a fork then we're allowed to not have
|
||||||
|
# the credentials.
|
||||||
|
is_upstream
|
||||||
|
}
|
||||||
|
|
||||||
|
# Return success if the origin of this build is the tahoe-lafs/tahoe-lafs
|
||||||
|
# repository itself (and so we expect to have cache credentials available),
|
||||||
|
# failure otherwise.
|
||||||
|
#
|
||||||
|
# See circleci.txt for notes about how this determination is made.
|
||||||
|
function is_upstream() {
|
||||||
|
# CIRCLE_PROJECT_USERNAME is set to the org the build is happening for.
|
||||||
|
# If a PR targets a fork of the repo then this is set to something other
|
||||||
|
# than "tahoe-lafs".
|
||||||
|
[ "$CIRCLE_PROJECT_USERNAME" == "tahoe-lafs" ] &&
|
||||||
|
|
||||||
|
# CIRCLE_BRANCH is set to the real branch name for in-repo PRs and
|
||||||
|
# "pull/NNNN" for pull requests from forks.
|
||||||
|
#
|
||||||
|
# CIRCLE_PULL_REQUESTS is set to a comma-separated list of the full
|
||||||
|
# URLs of the PR pages which share an underlying branch, with one of
|
||||||
|
# them ended with that same "pull/NNNN" for PRs from forks.
|
||||||
|
! any_element_endswith "/$CIRCLE_BRANCH" "," "$CIRCLE_PULL_REQUESTS"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Return success if splitting $3 on $2 results in an array with any element
|
||||||
|
# that ends with $1, failure otherwise.
|
||||||
|
function any_element_endswith() {
|
||||||
|
suffix=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
sep=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
haystack=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
IFS="${sep}" read -r -a elements <<< "$haystack"
|
||||||
|
for elem in "${elements[@]}"; do
|
||||||
|
if endswith "$suffix" "$elem"; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
# Return success if $2 ends with $1, failure otherwise.
|
||||||
|
function endswith() {
|
||||||
|
suffix=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
haystack=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
case "$haystack" in
|
||||||
|
*${suffix})
|
||||||
|
return 0
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
return 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
function describe_build() {
|
||||||
|
echo "Building PR for user/org: ${CIRCLE_PROJECT_USERNAME}"
|
||||||
|
echo "Building branch: ${CIRCLE_BRANCH}"
|
||||||
|
if is_upstream; then
|
||||||
|
echo "Upstream build."
|
||||||
|
else
|
||||||
|
echo "Non-upstream build."
|
||||||
|
fi
|
||||||
|
if is_cache_required; then
|
||||||
|
echo "Cache is required."
|
||||||
|
else
|
||||||
|
echo "Cache not required."
|
||||||
|
fi
|
||||||
|
if is_cache_writeable; then
|
||||||
|
echo "Cache is writeable."
|
||||||
|
else
|
||||||
|
echo "Cache not writeable."
|
||||||
|
fi
|
||||||
|
}
|
@ -9,7 +9,7 @@ BASIC_DEPS="pip wheel"
|
|||||||
|
|
||||||
# Python packages we need to support the test infrastructure. *Not* packages
|
# Python packages we need to support the test infrastructure. *Not* packages
|
||||||
# Tahoe-LAFS itself (implementation or test suite) need.
|
# Tahoe-LAFS itself (implementation or test suite) need.
|
||||||
TEST_DEPS="tox codecov"
|
TEST_DEPS="tox~=3.0 codecov"
|
||||||
|
|
||||||
# Python packages we need to generate test reports for CI infrastructure.
|
# Python packages we need to generate test reports for CI infrastructure.
|
||||||
# *Not* packages Tahoe-LAFS itself (implement or test suite) need.
|
# *Not* packages Tahoe-LAFS itself (implement or test suite) need.
|
||||||
|
20
.circleci/rebuild-images.sh
Executable file
20
.circleci/rebuild-images.sh
Executable file
@ -0,0 +1,20 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# Get your API token here:
|
||||||
|
# https://app.circleci.com/settings/user/tokens
|
||||||
|
API_TOKEN=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
# Name the branch you want to trigger the build for
|
||||||
|
BRANCH=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
curl \
|
||||||
|
--verbose \
|
||||||
|
--request POST \
|
||||||
|
--url https://circleci.com/api/v2/project/gh/tahoe-lafs/tahoe-lafs/pipeline \
|
||||||
|
--header "Circle-Token: $API_TOKEN" \
|
||||||
|
--header "content-type: application/json" \
|
||||||
|
--data '{"branch":"'"$BRANCH"'","parameters":{"build-images":true,"run-tests":false}}'
|
@ -45,14 +45,15 @@ fi
|
|||||||
|
|
||||||
# A prefix for the test command that ensure it will exit after no more than a
|
# A prefix for the test command that ensure it will exit after no more than a
|
||||||
# certain amount of time. Ideally, we would only enforce a "silent" period
|
# certain amount of time. Ideally, we would only enforce a "silent" period
|
||||||
# timeout but there isn't obviously a ready-made tool for that. The test
|
# timeout but there isn't obviously a ready-made tool for that. The unit test
|
||||||
# suite only takes about 5 - 6 minutes on CircleCI right now. 15 minutes
|
# suite only takes about 5 - 6 minutes on CircleCI right now. The integration
|
||||||
# seems like a moderately safe window.
|
# tests are a bit longer than that. 45 minutes seems like a moderately safe
|
||||||
|
# window.
|
||||||
#
|
#
|
||||||
# This is primarily aimed at catching hangs on the PyPy job which runs for
|
# This is primarily aimed at catching hangs on the PyPy job which runs for
|
||||||
# about 21 minutes and then gets killed by CircleCI in a way that fails the
|
# about 21 minutes and then gets killed by CircleCI in a way that fails the
|
||||||
# job and bypasses our "allowed failure" logic.
|
# job and bypasses our "allowed failure" logic.
|
||||||
TIMEOUT="timeout --kill-after 1m 15m"
|
TIMEOUT="timeout --kill-after 1m 45m"
|
||||||
|
|
||||||
# Run the test suite as a non-root user. This is the expected usage some
|
# Run the test suite as a non-root user. This is the expected usage some
|
||||||
# small areas of the test suite assume non-root privileges (such as unreadable
|
# small areas of the test suite assume non-root privileges (such as unreadable
|
||||||
|
140
.github/workflows/ci.yml
vendored
140
.github/workflows/ci.yml
vendored
@ -6,6 +6,16 @@ on:
|
|||||||
- "master"
|
- "master"
|
||||||
pull_request:
|
pull_request:
|
||||||
|
|
||||||
|
# At the start of each workflow run, GitHub creates a unique
|
||||||
|
# GITHUB_TOKEN secret to use in the workflow. It is a good idea for
|
||||||
|
# this GITHUB_TOKEN to have the minimum of permissions. See:
|
||||||
|
#
|
||||||
|
# - https://docs.github.com/en/actions/security-guides/automatic-token-authentication
|
||||||
|
# - https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#permissions
|
||||||
|
#
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
# Control to what degree jobs in this workflow will run concurrently with
|
# Control to what degree jobs in this workflow will run concurrently with
|
||||||
# other instances of themselves.
|
# other instances of themselves.
|
||||||
#
|
#
|
||||||
@ -38,73 +48,66 @@ jobs:
|
|||||||
- windows-latest
|
- windows-latest
|
||||||
- ubuntu-latest
|
- ubuntu-latest
|
||||||
python-version:
|
python-version:
|
||||||
- "3.7"
|
|
||||||
- "3.8"
|
- "3.8"
|
||||||
- "3.9"
|
- "3.9"
|
||||||
- "3.10"
|
- "3.10"
|
||||||
include:
|
include:
|
||||||
# On macOS don't bother with 3.7-3.8, just to get faster builds.
|
# On macOS don't bother with 3.8, just to get faster builds.
|
||||||
- os: macos-latest
|
- os: macos-latest
|
||||||
python-version: "3.9"
|
python-version: "3.9"
|
||||||
- os: macos-latest
|
- os: macos-latest
|
||||||
python-version: "3.10"
|
python-version: "3.10"
|
||||||
# We only support PyPy on Linux at the moment.
|
# We only support PyPy on Linux at the moment.
|
||||||
- os: ubuntu-latest
|
|
||||||
python-version: "pypy-3.7"
|
|
||||||
- os: ubuntu-latest
|
- os: ubuntu-latest
|
||||||
python-version: "pypy-3.8"
|
python-version: "pypy-3.8"
|
||||||
|
- os: ubuntu-latest
|
||||||
|
python-version: "pypy-3.9"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
# See https://github.com/actions/checkout. A fetch-depth of 0
|
# See https://github.com/actions/checkout. A fetch-depth of 0
|
||||||
# fetches all tags and branches.
|
# fetches all tags and branches.
|
||||||
- name: Check out Tahoe-LAFS sources
|
- name: Check out Tahoe-LAFS sources
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
# To use pip caching with GitHub Actions in an OS-independent
|
|
||||||
# manner, we need `pip cache dir` command, which became
|
|
||||||
# available since pip v20.1+. At the time of writing this,
|
|
||||||
# GitHub Actions offers pip v20.3.3 for both ubuntu-latest and
|
|
||||||
# windows-latest, and pip v20.3.1 for macos-latest.
|
|
||||||
- name: Get pip cache directory
|
|
||||||
id: pip-cache
|
|
||||||
run: |
|
|
||||||
echo "::set-output name=dir::$(pip cache dir)"
|
|
||||||
|
|
||||||
# See https://github.com/actions/cache
|
|
||||||
- name: Use pip cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: ${{ steps.pip-cache.outputs.dir }}
|
|
||||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-pip-
|
|
||||||
|
|
||||||
- name: Install Python packages
|
- name: Install Python packages
|
||||||
run: |
|
run: |
|
||||||
pip install --upgrade codecov tox tox-gh-actions setuptools
|
pip install --upgrade codecov "tox<4" tox-gh-actions setuptools
|
||||||
pip list
|
pip list
|
||||||
|
|
||||||
- name: Display tool versions
|
- name: Display tool versions
|
||||||
run: python misc/build_helpers/show-tool-versions.py
|
run: python misc/build_helpers/show-tool-versions.py
|
||||||
|
|
||||||
- name: Run tox for corresponding Python version
|
- name: Run tox for corresponding Python version
|
||||||
|
if: ${{ !contains(matrix.os, 'windows') }}
|
||||||
run: python -m tox
|
run: python -m tox
|
||||||
|
|
||||||
|
# On Windows, a non-blocking pipe might respond (when emulating Unix-y
|
||||||
|
# API) with ENOSPC to indicate buffer full. Trial doesn't handle this
|
||||||
|
# well, so it breaks test runs. To attempt to solve this, we pipe the
|
||||||
|
# output through passthrough.py that will hopefully be able to do the right
|
||||||
|
# thing by using Windows APIs.
|
||||||
|
- name: Run tox for corresponding Python version
|
||||||
|
if: ${{ contains(matrix.os, 'windows') }}
|
||||||
|
run: |
|
||||||
|
pip install twisted pywin32
|
||||||
|
python -m tox | python misc/windows-enospc/passthrough.py
|
||||||
|
|
||||||
- name: Upload eliot.log
|
- name: Upload eliot.log
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: eliot.log
|
name: eliot.log
|
||||||
path: eliot.log
|
path: eliot.log
|
||||||
|
|
||||||
- name: Upload trial log
|
- name: Upload trial log
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: test.log
|
name: test.log
|
||||||
path: _trial_temp/test.log
|
path: _trial_temp/test.log
|
||||||
@ -161,21 +164,22 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os:
|
|
||||||
- windows-latest
|
|
||||||
- ubuntu-latest
|
|
||||||
python-version:
|
|
||||||
- 3.7
|
|
||||||
- 3.9
|
|
||||||
include:
|
include:
|
||||||
# On macOS don't bother with 3.7, just to get faster builds.
|
|
||||||
- os: macos-latest
|
- os: macos-latest
|
||||||
python-version: 3.9
|
python-version: "3.9"
|
||||||
|
force-foolscap: false
|
||||||
|
- os: windows-latest
|
||||||
|
python-version: "3.9"
|
||||||
|
force-foolscap: false
|
||||||
|
# 22.04 has some issue with Tor at the moment:
|
||||||
|
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3943
|
||||||
|
- os: ubuntu-20.04
|
||||||
|
python-version: "3.9"
|
||||||
|
force-foolscap: false
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Install Tor [Ubuntu]
|
- name: Install Tor [Ubuntu]
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: ${{ contains(matrix.os, 'ubuntu') }}
|
||||||
run: sudo apt install tor
|
run: sudo apt install tor
|
||||||
|
|
||||||
# TODO: See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3744.
|
# TODO: See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3744.
|
||||||
@ -188,51 +192,51 @@ jobs:
|
|||||||
|
|
||||||
- name: Install Tor [Windows]
|
- name: Install Tor [Windows]
|
||||||
if: matrix.os == 'windows-latest'
|
if: matrix.os == 'windows-latest'
|
||||||
uses: crazy-max/ghaction-chocolatey@v1
|
uses: crazy-max/ghaction-chocolatey@v2
|
||||||
with:
|
with:
|
||||||
args: install tor
|
args: install tor
|
||||||
|
|
||||||
- name: Check out Tahoe-LAFS sources
|
- name: Check out Tahoe-LAFS sources
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
- name: Get pip cache directory
|
|
||||||
id: pip-cache
|
|
||||||
run: |
|
|
||||||
echo "::set-output name=dir::$(pip cache dir)"
|
|
||||||
|
|
||||||
- name: Use pip cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: ${{ steps.pip-cache.outputs.dir }}
|
|
||||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-pip-
|
|
||||||
|
|
||||||
- name: Install Python packages
|
- name: Install Python packages
|
||||||
run: |
|
run: |
|
||||||
pip install --upgrade tox
|
pip install --upgrade "tox<4"
|
||||||
pip list
|
pip list
|
||||||
|
|
||||||
- name: Display tool versions
|
- name: Display tool versions
|
||||||
run: python misc/build_helpers/show-tool-versions.py
|
run: python misc/build_helpers/show-tool-versions.py
|
||||||
|
|
||||||
- name: Run "Python 3 integration tests"
|
- name: Run "Python 3 integration tests"
|
||||||
|
if: "${{ !matrix.force-foolscap }}"
|
||||||
env:
|
env:
|
||||||
# On macOS this is necessary to ensure unix socket paths for tor
|
# On macOS this is necessary to ensure unix socket paths for tor
|
||||||
# aren't too long. On Windows tox won't pass it through so it has no
|
# aren't too long. On Windows tox won't pass it through so it has no
|
||||||
# effect. On Linux it doesn't make a difference one way or another.
|
# effect. On Linux it doesn't make a difference one way or another.
|
||||||
TMPDIR: "/tmp"
|
TMPDIR: "/tmp"
|
||||||
run: tox -e integration
|
run: |
|
||||||
|
tox -e integration
|
||||||
|
|
||||||
|
- name: Run "Python 3 integration tests (force Foolscap)"
|
||||||
|
if: "${{ matrix.force-foolscap }}"
|
||||||
|
env:
|
||||||
|
# On macOS this is necessary to ensure unix socket paths for tor
|
||||||
|
# aren't too long. On Windows tox won't pass it through so it has no
|
||||||
|
# effect. On Linux it doesn't make a difference one way or another.
|
||||||
|
TMPDIR: "/tmp"
|
||||||
|
run: |
|
||||||
|
tox -e integration -- --force-foolscap integration/
|
||||||
|
|
||||||
- name: Upload eliot.log in case of failure
|
- name: Upload eliot.log in case of failure
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v3
|
||||||
if: failure()
|
if: failure()
|
||||||
with:
|
with:
|
||||||
name: integration.eliot.json
|
name: integration.eliot.json
|
||||||
@ -253,31 +257,19 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
|
|
||||||
- name: Check out Tahoe-LAFS sources
|
- name: Check out Tahoe-LAFS sources
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v2
|
uses: actions/setup-python@v4
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
cache: 'pip' # caching pip dependencies
|
||||||
- name: Get pip cache directory
|
|
||||||
id: pip-cache
|
|
||||||
run: |
|
|
||||||
echo "::set-output name=dir::$(pip cache dir)"
|
|
||||||
|
|
||||||
- name: Use pip cache
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: ${{ steps.pip-cache.outputs.dir }}
|
|
||||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-pip-
|
|
||||||
|
|
||||||
- name: Install Python packages
|
- name: Install Python packages
|
||||||
run: |
|
run: |
|
||||||
pip install --upgrade tox
|
pip install --upgrade "tox<4"
|
||||||
pip list
|
pip list
|
||||||
|
|
||||||
- name: Display tool versions
|
- name: Display tool versions
|
||||||
@ -291,7 +283,7 @@ jobs:
|
|||||||
run: dist/Tahoe-LAFS/tahoe --version
|
run: dist/Tahoe-LAFS/tahoe --version
|
||||||
|
|
||||||
- name: Upload PyInstaller package
|
- name: Upload PyInstaller package
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: Tahoe-LAFS-${{ matrix.os }}-Python-${{ matrix.python-version }}
|
name: Tahoe-LAFS-${{ matrix.os }}-Python-${{ matrix.python-version }}
|
||||||
path: dist/Tahoe-LAFS-*-*.*
|
path: dist/Tahoe-LAFS-*-*.*
|
||||||
|
10
Dockerfile
10
Dockerfile
@ -1,10 +0,0 @@
|
|||||||
FROM python:2.7
|
|
||||||
|
|
||||||
ADD . /tahoe-lafs
|
|
||||||
RUN \
|
|
||||||
cd /tahoe-lafs && \
|
|
||||||
git pull --depth=100 && \
|
|
||||||
pip install . && \
|
|
||||||
rm -rf ~/.cache/
|
|
||||||
|
|
||||||
WORKDIR /root
|
|
@ -1,25 +0,0 @@
|
|||||||
FROM debian:9
|
|
||||||
LABEL maintainer "gordon@leastauthority.com"
|
|
||||||
RUN apt-get update
|
|
||||||
RUN DEBIAN_FRONTEND=noninteractive apt-get -yq upgrade
|
|
||||||
RUN DEBIAN_FRONTEND=noninteractive apt-get -yq install build-essential python-dev libffi-dev libssl-dev python-virtualenv git
|
|
||||||
RUN \
|
|
||||||
git clone https://github.com/tahoe-lafs/tahoe-lafs.git /root/tahoe-lafs; \
|
|
||||||
cd /root/tahoe-lafs; \
|
|
||||||
virtualenv --python=python2.7 venv; \
|
|
||||||
./venv/bin/pip install --upgrade setuptools; \
|
|
||||||
./venv/bin/pip install --editable .; \
|
|
||||||
./venv/bin/tahoe --version;
|
|
||||||
RUN \
|
|
||||||
cd /root; \
|
|
||||||
mkdir /root/.tahoe-client; \
|
|
||||||
mkdir /root/.tahoe-introducer; \
|
|
||||||
mkdir /root/.tahoe-server;
|
|
||||||
RUN /root/tahoe-lafs/venv/bin/tahoe create-introducer --location=tcp:introducer:3458 --port=tcp:3458 /root/.tahoe-introducer
|
|
||||||
RUN /root/tahoe-lafs/venv/bin/tahoe start /root/.tahoe-introducer
|
|
||||||
RUN /root/tahoe-lafs/venv/bin/tahoe create-node --location=tcp:server:3457 --port=tcp:3457 --introducer=$(cat /root/.tahoe-introducer/private/introducer.furl) /root/.tahoe-server
|
|
||||||
RUN /root/tahoe-lafs/venv/bin/tahoe create-client --webport=3456 --introducer=$(cat /root/.tahoe-introducer/private/introducer.furl) --basedir=/root/.tahoe-client --shares-needed=1 --shares-happy=1 --shares-total=1
|
|
||||||
VOLUME ["/root/.tahoe-client", "/root/.tahoe-server", "/root/.tahoe-introducer"]
|
|
||||||
EXPOSE 3456 3457 3458
|
|
||||||
ENTRYPOINT ["/root/tahoe-lafs/venv/bin/tahoe"]
|
|
||||||
CMD []
|
|
@ -56,7 +56,7 @@ Once ``tahoe --version`` works, see `How to Run Tahoe-LAFS <docs/running.rst>`__
|
|||||||
🐍 Python 2
|
🐍 Python 2
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
Python 3.7 or later is now required.
|
Python 3.8 or later is required.
|
||||||
If you are still using Python 2.7, use Tahoe-LAFS version 1.17.1.
|
If you are still using Python 2.7, use Tahoe-LAFS version 1.17.1.
|
||||||
|
|
||||||
|
|
||||||
|
106
benchmarks/upload_download.py
Normal file
106
benchmarks/upload_download.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
"""
|
||||||
|
First attempt at benchmarking uploads and downloads.
|
||||||
|
|
||||||
|
To run:
|
||||||
|
|
||||||
|
$ pytest benchmarks/upload_download.py -s -v -Wignore
|
||||||
|
|
||||||
|
TODO Parameterization (pytest?)
|
||||||
|
|
||||||
|
- Foolscap vs not foolscap
|
||||||
|
|
||||||
|
- Number of nodes
|
||||||
|
|
||||||
|
- Data size
|
||||||
|
|
||||||
|
- Number of needed/happy/total shares.
|
||||||
|
|
||||||
|
CAVEATS: The goal here isn't a realistic benchmark, or a benchmark that will be
|
||||||
|
measured over time, or is expected to be maintainable over time. This is just
|
||||||
|
a quick and easy way to measure the speed of certain operations, compare HTTP
|
||||||
|
and Foolscap, and see the short-term impact of changes.
|
||||||
|
|
||||||
|
Eventually this will be replaced by a real benchmark suite that can be run over
|
||||||
|
time to measure something more meaningful.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from time import time, process_time
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from tempfile import mkdtemp
|
||||||
|
import os
|
||||||
|
|
||||||
|
from twisted.trial.unittest import TestCase
|
||||||
|
|
||||||
|
from allmydata.util.deferredutil import async_to_deferred
|
||||||
|
from allmydata.util.consumer import MemoryConsumer
|
||||||
|
from allmydata.test.common_system import SystemTestMixin
|
||||||
|
from allmydata.immutable.upload import Data as UData
|
||||||
|
from allmydata.mutable.publish import MutableData
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def timeit(name):
|
||||||
|
start = time()
|
||||||
|
start_cpu = process_time()
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
print(
|
||||||
|
f"{name}: {time() - start:.3f} elapsed, {process_time() - start_cpu:.3f} CPU"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ImmutableBenchmarks(SystemTestMixin, TestCase):
|
||||||
|
"""Benchmarks for immutables."""
|
||||||
|
|
||||||
|
# To use Foolscap, change to True:
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE = False
|
||||||
|
|
||||||
|
@async_to_deferred
|
||||||
|
async def setUp(self):
|
||||||
|
SystemTestMixin.setUp(self)
|
||||||
|
self.basedir = os.path.join(mkdtemp(), "nodes")
|
||||||
|
|
||||||
|
# 2 nodes
|
||||||
|
await self.set_up_nodes(2)
|
||||||
|
|
||||||
|
# 1 share
|
||||||
|
for c in self.clients:
|
||||||
|
c.encoding_params["k"] = 1
|
||||||
|
c.encoding_params["happy"] = 1
|
||||||
|
c.encoding_params["n"] = 1
|
||||||
|
|
||||||
|
print()
|
||||||
|
|
||||||
|
@async_to_deferred
|
||||||
|
async def test_upload_and_download_immutable(self):
|
||||||
|
# To test larger files, change this:
|
||||||
|
DATA = b"Some data to upload\n" * 10
|
||||||
|
|
||||||
|
for i in range(5):
|
||||||
|
# 1. Upload:
|
||||||
|
with timeit(" upload"):
|
||||||
|
uploader = self.clients[0].getServiceNamed("uploader")
|
||||||
|
results = await uploader.upload(UData(DATA, convergence=None))
|
||||||
|
|
||||||
|
# 2. Download:
|
||||||
|
with timeit("download"):
|
||||||
|
uri = results.get_uri()
|
||||||
|
node = self.clients[1].create_node_from_uri(uri)
|
||||||
|
mc = await node.read(MemoryConsumer(), 0, None)
|
||||||
|
self.assertEqual(b"".join(mc.chunks), DATA)
|
||||||
|
|
||||||
|
@async_to_deferred
|
||||||
|
async def test_upload_and_download_mutable(self):
|
||||||
|
# To test larger files, change this:
|
||||||
|
DATA = b"Some data to upload\n" * 10
|
||||||
|
|
||||||
|
for i in range(5):
|
||||||
|
# 1. Upload:
|
||||||
|
with timeit(" upload"):
|
||||||
|
result = await self.clients[0].create_mutable_file(MutableData(DATA))
|
||||||
|
|
||||||
|
# 2. Download:
|
||||||
|
with timeit("download"):
|
||||||
|
data = await result.download_best_version()
|
||||||
|
self.assertEqual(data, DATA)
|
@ -29,7 +29,7 @@ in
|
|||||||
, pypiData ? sources.pypi-deps-db # the pypi package database snapshot to use
|
, pypiData ? sources.pypi-deps-db # the pypi package database snapshot to use
|
||||||
# for dependency resolution
|
# for dependency resolution
|
||||||
|
|
||||||
, pythonVersion ? "python37" # a string choosing the python derivation from
|
, pythonVersion ? "python39" # a string choosing the python derivation from
|
||||||
# nixpkgs to target
|
# nixpkgs to target
|
||||||
|
|
||||||
, extras ? [ "tor" "i2p" ] # a list of strings identifying tahoe-lafs extras,
|
, extras ? [ "tor" "i2p" ] # a list of strings identifying tahoe-lafs extras,
|
||||||
|
@ -1,49 +0,0 @@
|
|||||||
version: '2'
|
|
||||||
services:
|
|
||||||
client:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: ./Dockerfile.dev
|
|
||||||
volumes:
|
|
||||||
- ./misc:/root/tahoe-lafs/misc
|
|
||||||
- ./integration:/root/tahoe-lafs/integration
|
|
||||||
- ./src:/root/tahoe-lafs/static
|
|
||||||
- ./setup.cfg:/root/tahoe-lafs/setup.cfg
|
|
||||||
- ./setup.py:/root/tahoe-lafs/setup.py
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:3456:3456"
|
|
||||||
depends_on:
|
|
||||||
- "introducer"
|
|
||||||
- "server"
|
|
||||||
entrypoint: /root/tahoe-lafs/venv/bin/tahoe
|
|
||||||
command: ["run", "/root/.tahoe-client"]
|
|
||||||
server:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: ./Dockerfile.dev
|
|
||||||
volumes:
|
|
||||||
- ./misc:/root/tahoe-lafs/misc
|
|
||||||
- ./integration:/root/tahoe-lafs/integration
|
|
||||||
- ./src:/root/tahoe-lafs/static
|
|
||||||
- ./setup.cfg:/root/tahoe-lafs/setup.cfg
|
|
||||||
- ./setup.py:/root/tahoe-lafs/setup.py
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:3457:3457"
|
|
||||||
depends_on:
|
|
||||||
- "introducer"
|
|
||||||
entrypoint: /root/tahoe-lafs/venv/bin/tahoe
|
|
||||||
command: ["run", "/root/.tahoe-server"]
|
|
||||||
introducer:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: ./Dockerfile.dev
|
|
||||||
volumes:
|
|
||||||
- ./misc:/root/tahoe-lafs/misc
|
|
||||||
- ./integration:/root/tahoe-lafs/integration
|
|
||||||
- ./src:/root/tahoe-lafs/static
|
|
||||||
- ./setup.cfg:/root/tahoe-lafs/setup.cfg
|
|
||||||
- ./setup.py:/root/tahoe-lafs/setup.py
|
|
||||||
ports:
|
|
||||||
- "127.0.0.1:3458:3458"
|
|
||||||
entrypoint: /root/tahoe-lafs/venv/bin/tahoe
|
|
||||||
command: ["run", "/root/.tahoe-introducer"]
|
|
@ -30,12 +30,12 @@ Glossary
|
|||||||
introducer
|
introducer
|
||||||
a Tahoe-LAFS process at a known location configured to re-publish announcements about the location of storage servers
|
a Tahoe-LAFS process at a known location configured to re-publish announcements about the location of storage servers
|
||||||
|
|
||||||
fURL
|
:ref:`fURLs <fURLs>`
|
||||||
a self-authenticating URL-like string which can be used to locate a remote object using the Foolscap protocol
|
a self-authenticating URL-like string which can be used to locate a remote object using the Foolscap protocol
|
||||||
(the storage service is an example of such an object)
|
(the storage service is an example of such an object)
|
||||||
|
|
||||||
NURL
|
:ref:`NURLs <NURLs>`
|
||||||
a self-authenticating URL-like string almost exactly like a NURL but without being tied to Foolscap
|
a self-authenticating URL-like string almost exactly like a fURL but without being tied to Foolscap
|
||||||
|
|
||||||
swissnum
|
swissnum
|
||||||
a short random string which is part of a fURL/NURL and which acts as a shared secret to authorize clients to use a storage service
|
a short random string which is part of a fURL/NURL and which acts as a shared secret to authorize clients to use a storage service
|
||||||
@ -579,24 +579,6 @@ Responses:
|
|||||||
the response is ``CONFLICT``.
|
the response is ``CONFLICT``.
|
||||||
At this point the only thing to do is abort the upload and start from scratch (see below).
|
At this point the only thing to do is abort the upload and start from scratch (see below).
|
||||||
|
|
||||||
``PUT /storage/v1/immutable/:storage_index/:share_number/abort``
|
|
||||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
|
||||||
|
|
||||||
This cancels an *in-progress* upload.
|
|
||||||
|
|
||||||
The request must include a ``X-Tahoe-Authorization`` header that includes the upload secret::
|
|
||||||
|
|
||||||
X-Tahoe-Authorization: upload-secret <base64-upload-secret>
|
|
||||||
|
|
||||||
The response code:
|
|
||||||
|
|
||||||
* When the upload is still in progress and therefore the abort has succeeded,
|
|
||||||
the response is ``OK``.
|
|
||||||
Future uploads can start from scratch with no pre-existing upload state stored on the server.
|
|
||||||
* If the uploaded has already finished, the response is 405 (Method Not Allowed)
|
|
||||||
and no change is made.
|
|
||||||
|
|
||||||
|
|
||||||
Discussion
|
Discussion
|
||||||
``````````
|
``````````
|
||||||
|
|
||||||
@ -615,6 +597,25 @@ From RFC 7231::
|
|||||||
PATCH method defined in [RFC5789]).
|
PATCH method defined in [RFC5789]).
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
``PUT /storage/v1/immutable/:storage_index/:share_number/abort``
|
||||||
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
|
|
||||||
|
This cancels an *in-progress* upload.
|
||||||
|
|
||||||
|
The request must include a ``X-Tahoe-Authorization`` header that includes the upload secret::
|
||||||
|
|
||||||
|
X-Tahoe-Authorization: upload-secret <base64-upload-secret>
|
||||||
|
|
||||||
|
The response code:
|
||||||
|
|
||||||
|
* When the upload is still in progress and therefore the abort has succeeded,
|
||||||
|
the response is ``OK``.
|
||||||
|
Future uploads can start from scratch with no pre-existing upload state stored on the server.
|
||||||
|
* If the uploaded has already finished, the response is 405 (Method Not Allowed)
|
||||||
|
and no change is made.
|
||||||
|
|
||||||
|
|
||||||
``POST /storage/v1/immutable/:storage_index/:share_number/corrupt``
|
``POST /storage/v1/immutable/:storage_index/:share_number/corrupt``
|
||||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
|
|
||||||
@ -624,7 +625,7 @@ corruption. It also includes potentially important details about the share.
|
|||||||
|
|
||||||
For example::
|
For example::
|
||||||
|
|
||||||
{"reason": u"expected hash abcd, got hash efgh"}
|
{"reason": "expected hash abcd, got hash efgh"}
|
||||||
|
|
||||||
.. share-type, storage-index, and share-number are inferred from the URL
|
.. share-type, storage-index, and share-number are inferred from the URL
|
||||||
|
|
||||||
@ -798,6 +799,7 @@ Immutable Data
|
|||||||
<first 16 bytes of share data>
|
<first 16 bytes of share data>
|
||||||
|
|
||||||
200 OK
|
200 OK
|
||||||
|
{ "required": [ {"begin": 16, "end": 48 } ] }
|
||||||
|
|
||||||
PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7
|
PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||||
Authorization: Tahoe-LAFS nurl-swissnum
|
Authorization: Tahoe-LAFS nurl-swissnum
|
||||||
@ -806,6 +808,7 @@ Immutable Data
|
|||||||
<second 16 bytes of share data>
|
<second 16 bytes of share data>
|
||||||
|
|
||||||
200 OK
|
200 OK
|
||||||
|
{ "required": [ {"begin": 32, "end": 48 } ] }
|
||||||
|
|
||||||
PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7
|
PATCH /storage/v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||||
Authorization: Tahoe-LAFS nurl-swissnum
|
Authorization: Tahoe-LAFS nurl-swissnum
|
||||||
@ -822,6 +825,7 @@ Immutable Data
|
|||||||
Range: bytes=0-47
|
Range: bytes=0-47
|
||||||
|
|
||||||
200 OK
|
200 OK
|
||||||
|
Content-Range: bytes 0-47/48
|
||||||
<complete 48 bytes of previously uploaded data>
|
<complete 48 bytes of previously uploaded data>
|
||||||
|
|
||||||
#. Renew the lease on all immutable shares in bucket ``AAAAAAAAAAAAAAAA``::
|
#. Renew the lease on all immutable shares in bucket ``AAAAAAAAAAAAAAAA``::
|
||||||
@ -905,9 +909,12 @@ otherwise it will read a byte which won't match `b""`::
|
|||||||
|
|
||||||
#. Download the contents of share number ``3``::
|
#. Download the contents of share number ``3``::
|
||||||
|
|
||||||
GET /storage/v1/mutable/BBBBBBBBBBBBBBBB?share=3&offset=0&size=10
|
GET /storage/v1/mutable/BBBBBBBBBBBBBBBB?share=3
|
||||||
Authorization: Tahoe-LAFS nurl-swissnum
|
Authorization: Tahoe-LAFS nurl-swissnum
|
||||||
|
Range: bytes=0-16
|
||||||
|
|
||||||
|
200 OK
|
||||||
|
Content-Range: bytes 0-15/16
|
||||||
<complete 16 bytes of previously uploaded data>
|
<complete 16 bytes of previously uploaded data>
|
||||||
|
|
||||||
#. Renew the lease on previously uploaded mutable share in slot ``BBBBBBBBBBBBBBBB``::
|
#. Renew the lease on previously uploaded mutable share in slot ``BBBBBBBBBBBBBBBB``::
|
||||||
|
@ -7,6 +7,8 @@ These are not to be confused with the URI-like capabilities Tahoe-LAFS uses to r
|
|||||||
An attempt is also made to outline the rationale for certain choices about these URLs.
|
An attempt is also made to outline the rationale for certain choices about these URLs.
|
||||||
The intended audience for this document is Tahoe-LAFS maintainers and other developers interested in interoperating with Tahoe-LAFS or these URLs.
|
The intended audience for this document is Tahoe-LAFS maintainers and other developers interested in interoperating with Tahoe-LAFS or these URLs.
|
||||||
|
|
||||||
|
.. _furls:
|
||||||
|
|
||||||
Background
|
Background
|
||||||
----------
|
----------
|
||||||
|
|
||||||
@ -31,6 +33,8 @@ The client's use of the swissnum is what allows the server to authorize the clie
|
|||||||
|
|
||||||
.. _`swiss number`: http://wiki.erights.org/wiki/Swiss_number
|
.. _`swiss number`: http://wiki.erights.org/wiki/Swiss_number
|
||||||
|
|
||||||
|
.. _NURLs:
|
||||||
|
|
||||||
NURLs
|
NURLs
|
||||||
-----
|
-----
|
||||||
|
|
||||||
|
@ -1,15 +1,6 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Ported to Python 3.
|
||||||
"""
|
"""
|
||||||
from __future__ import unicode_literals
|
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import shutil
|
import shutil
|
||||||
from time import sleep
|
from time import sleep
|
||||||
@ -49,7 +40,6 @@ from .util import (
|
|||||||
await_client_ready,
|
await_client_ready,
|
||||||
TahoeProcess,
|
TahoeProcess,
|
||||||
cli,
|
cli,
|
||||||
_run_node,
|
|
||||||
generate_ssh_key,
|
generate_ssh_key,
|
||||||
block_with_timeout,
|
block_with_timeout,
|
||||||
)
|
)
|
||||||
@ -66,6 +56,29 @@ def pytest_addoption(parser):
|
|||||||
"--coverage", action="store_true", dest="coverage",
|
"--coverage", action="store_true", dest="coverage",
|
||||||
help="Collect coverage statistics",
|
help="Collect coverage statistics",
|
||||||
)
|
)
|
||||||
|
parser.addoption(
|
||||||
|
"--force-foolscap", action="store_true", default=False,
|
||||||
|
dest="force_foolscap",
|
||||||
|
help=("If set, force Foolscap only for the storage protocol. " +
|
||||||
|
"Otherwise HTTP will be used.")
|
||||||
|
)
|
||||||
|
parser.addoption(
|
||||||
|
"--runslow", action="store_true", default=False,
|
||||||
|
dest="runslow",
|
||||||
|
help="If set, run tests marked as slow.",
|
||||||
|
)
|
||||||
|
|
||||||
|
def pytest_collection_modifyitems(session, config, items):
|
||||||
|
if not config.option.runslow:
|
||||||
|
# The --runslow option was not given; keep only collected items not
|
||||||
|
# marked as slow.
|
||||||
|
items[:] = [
|
||||||
|
item
|
||||||
|
for item
|
||||||
|
in items
|
||||||
|
if item.get_closest_marker("slow") is None
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(autouse=True, scope='session')
|
@pytest.fixture(autouse=True, scope='session')
|
||||||
def eliot_logging():
|
def eliot_logging():
|
||||||
@ -410,10 +423,9 @@ alice-key ssh-rsa {ssh_public_key} {rwcap}
|
|||||||
""".format(rwcap=rwcap, ssh_public_key=ssh_public_key))
|
""".format(rwcap=rwcap, ssh_public_key=ssh_public_key))
|
||||||
|
|
||||||
# 4. Restart the node with new SFTP config.
|
# 4. Restart the node with new SFTP config.
|
||||||
process.kill()
|
pytest_twisted.blockon(process.restart_async(reactor, request))
|
||||||
pytest_twisted.blockon(_run_node(reactor, process.node_dir, request, None))
|
|
||||||
|
|
||||||
await_client_ready(process)
|
await_client_ready(process)
|
||||||
|
print(f"Alice pid: {process.transport.pid}")
|
||||||
return process
|
return process
|
||||||
|
|
||||||
|
|
||||||
|
119
integration/test_vectors.py
Normal file
119
integration/test_vectors.py
Normal file
@ -0,0 +1,119 @@
|
|||||||
|
"""
|
||||||
|
Verify certain results against test vectors with well-known results.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
from typing import AsyncGenerator, Iterator
|
||||||
|
from itertools import starmap, product
|
||||||
|
|
||||||
|
from attrs import evolve
|
||||||
|
|
||||||
|
from pytest import mark
|
||||||
|
from pytest_twisted import ensureDeferred
|
||||||
|
|
||||||
|
from . import vectors
|
||||||
|
from .vectors import parameters
|
||||||
|
from .util import reconfigure, upload, TahoeProcess
|
||||||
|
|
||||||
|
@mark.parametrize('convergence', parameters.CONVERGENCE_SECRETS)
|
||||||
|
def test_convergence(convergence):
|
||||||
|
"""
|
||||||
|
Convergence secrets are 16 bytes.
|
||||||
|
"""
|
||||||
|
assert isinstance(convergence, bytes), "Convergence secret must be bytes"
|
||||||
|
assert len(convergence) == 16, "Convergence secret must by 16 bytes"
|
||||||
|
|
||||||
|
|
||||||
|
@mark.slow
|
||||||
|
@mark.parametrize('case,expected', vectors.capabilities.items())
|
||||||
|
@ensureDeferred
|
||||||
|
async def test_capability(reactor, request, alice, case, expected):
|
||||||
|
"""
|
||||||
|
The capability that results from uploading certain well-known data
|
||||||
|
with certain well-known parameters results in exactly the previously
|
||||||
|
computed value.
|
||||||
|
"""
|
||||||
|
# rewrite alice's config to match params and convergence
|
||||||
|
await reconfigure(reactor, request, alice, (1, case.params.required, case.params.total), case.convergence)
|
||||||
|
|
||||||
|
# upload data in the correct format
|
||||||
|
actual = upload(alice, case.fmt, case.data)
|
||||||
|
|
||||||
|
# compare the resulting cap to the expected result
|
||||||
|
assert actual == expected
|
||||||
|
|
||||||
|
|
||||||
|
@ensureDeferred
|
||||||
|
async def skiptest_generate(reactor, request, alice):
|
||||||
|
"""
|
||||||
|
This is a helper for generating the test vectors.
|
||||||
|
|
||||||
|
You can re-generate the test vectors by fixing the name of the test and
|
||||||
|
running it. Normally this test doesn't run because it ran once and we
|
||||||
|
captured its output. Other tests run against that output and we want them
|
||||||
|
to run against the results produced originally, not a possibly
|
||||||
|
ever-changing set of outputs.
|
||||||
|
"""
|
||||||
|
space = starmap(
|
||||||
|
# segment_size could be a parameter someday but it's not easy to vary
|
||||||
|
# using the Python implementation so it isn't one for now.
|
||||||
|
partial(vectors.Case, segment_size=parameters.SEGMENT_SIZE),
|
||||||
|
product(
|
||||||
|
parameters.ZFEC_PARAMS,
|
||||||
|
parameters.CONVERGENCE_SECRETS,
|
||||||
|
parameters.OBJECT_DESCRIPTIONS,
|
||||||
|
parameters.FORMATS,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
iterresults = generate(reactor, request, alice, space)
|
||||||
|
|
||||||
|
results = []
|
||||||
|
async for result in iterresults:
|
||||||
|
# Accumulate the new result
|
||||||
|
results.append(result)
|
||||||
|
# Then rewrite the whole output file with the new accumulator value.
|
||||||
|
# This means that if we fail partway through, we will still have
|
||||||
|
# recorded partial results -- instead of losing them all.
|
||||||
|
vectors.save_capabilities(results)
|
||||||
|
|
||||||
|
async def generate(
|
||||||
|
reactor,
|
||||||
|
request,
|
||||||
|
alice: TahoeProcess,
|
||||||
|
cases: Iterator[vectors.Case],
|
||||||
|
) -> AsyncGenerator[[vectors.Case, str], None]:
|
||||||
|
"""
|
||||||
|
Generate all of the test vectors using the given node.
|
||||||
|
|
||||||
|
:param reactor: The reactor to use to restart the Tahoe-LAFS node when it
|
||||||
|
needs to be reconfigured.
|
||||||
|
|
||||||
|
:param request: The pytest request object to use to arrange process
|
||||||
|
cleanup.
|
||||||
|
|
||||||
|
:param format: The name of the encryption/data format to use.
|
||||||
|
|
||||||
|
:param alice: The Tahoe-LAFS node to use to generate the test vectors.
|
||||||
|
|
||||||
|
:param case: The inputs for which to generate a value.
|
||||||
|
|
||||||
|
:return: The capability for the case.
|
||||||
|
"""
|
||||||
|
# Share placement doesn't affect the resulting capability. For maximum
|
||||||
|
# reliability of this generator, be happy if we can put shares anywhere
|
||||||
|
happy = 1
|
||||||
|
for case in cases:
|
||||||
|
await reconfigure(
|
||||||
|
reactor,
|
||||||
|
request,
|
||||||
|
alice,
|
||||||
|
(happy, case.params.required, case.params.total),
|
||||||
|
case.convergence
|
||||||
|
)
|
||||||
|
|
||||||
|
# Give the format a chance to make an RSA key if it needs it.
|
||||||
|
case = evolve(case, fmt=case.fmt.customize())
|
||||||
|
cap = upload(alice, case.fmt, case.data)
|
||||||
|
yield case, cap
|
@ -7,18 +7,9 @@ Most of the tests have cursory asserts and encode 'what the WebAPI did
|
|||||||
at the time of testing' -- not necessarily a cohesive idea of what the
|
at the time of testing' -- not necessarily a cohesive idea of what the
|
||||||
WebAPI *should* do in every situation. It's not clear the latter
|
WebAPI *should* do in every situation. It's not clear the latter
|
||||||
exists anywhere, however.
|
exists anywhere, however.
|
||||||
|
|
||||||
Ported to Python 3.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import unicode_literals
|
from __future__ import annotations
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
import time
|
import time
|
||||||
from urllib.parse import unquote as url_unquote, quote as url_quote
|
from urllib.parse import unquote as url_unquote, quote as url_quote
|
||||||
@ -32,6 +23,7 @@ import requests
|
|||||||
import html5lib
|
import html5lib
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
from pytest_twisted import ensureDeferred
|
||||||
|
|
||||||
def test_index(alice):
|
def test_index(alice):
|
||||||
"""
|
"""
|
||||||
@ -252,10 +244,18 @@ def test_status(alice):
|
|||||||
assert found_download, "Failed to find the file we downloaded in the status-page"
|
assert found_download, "Failed to find the file we downloaded in the status-page"
|
||||||
|
|
||||||
|
|
||||||
def test_directory_deep_check(alice):
|
@ensureDeferred
|
||||||
|
async def test_directory_deep_check(reactor, request, alice):
|
||||||
"""
|
"""
|
||||||
use deep-check and confirm the result pages work
|
use deep-check and confirm the result pages work
|
||||||
"""
|
"""
|
||||||
|
# Make sure the node is configured compatibly with expectations of this
|
||||||
|
# test.
|
||||||
|
happy = 3
|
||||||
|
required = 2
|
||||||
|
total = 4
|
||||||
|
|
||||||
|
await util.reconfigure(reactor, request, alice, (happy, required, total), convergence=None)
|
||||||
|
|
||||||
# create a directory
|
# create a directory
|
||||||
resp = requests.post(
|
resp = requests.post(
|
||||||
@ -313,7 +313,7 @@ def test_directory_deep_check(alice):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def check_repair_data(checkdata):
|
def check_repair_data(checkdata):
|
||||||
assert checkdata["healthy"] is True
|
assert checkdata["healthy"]
|
||||||
assert checkdata["count-happiness"] == 4
|
assert checkdata["count-happiness"] == 4
|
||||||
assert checkdata["count-good-share-hosts"] == 4
|
assert checkdata["count-good-share-hosts"] == 4
|
||||||
assert checkdata["count-shares-good"] == 4
|
assert checkdata["count-shares-good"] == 4
|
||||||
|
@ -1,22 +1,19 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
General functionality useful for the implementation of integration tests.
|
||||||
"""
|
"""
|
||||||
from __future__ import unicode_literals
|
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
from future.utils import PY2
|
from __future__ import annotations
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from typing import Any
|
||||||
|
from typing_extensions import Literal
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import json
|
import json
|
||||||
from os import mkdir, environ
|
from os import mkdir, environ
|
||||||
from os.path import exists, join
|
from os.path import exists, join
|
||||||
from io import StringIO, BytesIO
|
from io import StringIO, BytesIO
|
||||||
from functools import partial
|
|
||||||
from subprocess import check_output
|
from subprocess import check_output
|
||||||
|
|
||||||
from twisted.python.filepath import (
|
from twisted.python.filepath import (
|
||||||
@ -26,12 +23,23 @@ from twisted.internet.defer import Deferred, succeed
|
|||||||
from twisted.internet.protocol import ProcessProtocol
|
from twisted.internet.protocol import ProcessProtocol
|
||||||
from twisted.internet.error import ProcessExitedAlready, ProcessDone
|
from twisted.internet.error import ProcessExitedAlready, ProcessDone
|
||||||
from twisted.internet.threads import deferToThread
|
from twisted.internet.threads import deferToThread
|
||||||
|
from twisted.internet.interfaces import IProcessTransport, IReactorProcess
|
||||||
|
|
||||||
|
from attrs import frozen, evolve
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from cryptography.hazmat.primitives.asymmetric import rsa
|
||||||
|
from cryptography.hazmat.backends import default_backend
|
||||||
|
from cryptography.hazmat.primitives.serialization import (
|
||||||
|
Encoding,
|
||||||
|
PrivateFormat,
|
||||||
|
NoEncryption,
|
||||||
|
)
|
||||||
|
|
||||||
from paramiko.rsakey import RSAKey
|
from paramiko.rsakey import RSAKey
|
||||||
from boltons.funcutils import wraps
|
from boltons.funcutils import wraps
|
||||||
|
|
||||||
|
from allmydata.util import base32
|
||||||
from allmydata.util.configutil import (
|
from allmydata.util.configutil import (
|
||||||
get_config,
|
get_config,
|
||||||
set_config,
|
set_config,
|
||||||
@ -142,9 +150,40 @@ class _MagicTextProtocol(ProcessProtocol):
|
|||||||
sys.stdout.write(data)
|
sys.stdout.write(data)
|
||||||
|
|
||||||
|
|
||||||
def _cleanup_tahoe_process(tahoe_transport, exited):
|
def _cleanup_process_async(transport: IProcessTransport, allow_missing: bool) -> None:
|
||||||
"""
|
"""
|
||||||
Terminate the given process with a kill signal (SIGKILL on POSIX,
|
If the given process transport seems to still be associated with a
|
||||||
|
running process, send a SIGTERM to that process.
|
||||||
|
|
||||||
|
:param transport: The transport to use.
|
||||||
|
|
||||||
|
:param allow_missing: If ``True`` then it is not an error for the
|
||||||
|
transport to have no associated process. Otherwise, an exception will
|
||||||
|
be raised in that case.
|
||||||
|
|
||||||
|
:raise: ``ValueError`` if ``allow_missing`` is ``False`` and the transport
|
||||||
|
has no process.
|
||||||
|
"""
|
||||||
|
if transport.pid is None:
|
||||||
|
if allow_missing:
|
||||||
|
print("Process already cleaned up and that's okay.")
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
raise ValueError("Process is not running")
|
||||||
|
print("signaling {} with TERM".format(transport.pid))
|
||||||
|
try:
|
||||||
|
transport.signalProcess('TERM')
|
||||||
|
except ProcessExitedAlready:
|
||||||
|
# The transport object thought it still had a process but the real OS
|
||||||
|
# process has already exited. That's fine. We accomplished what we
|
||||||
|
# wanted to. We don't care about ``allow_missing`` here because
|
||||||
|
# there's no way we could have known the real OS process already
|
||||||
|
# exited.
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _cleanup_tahoe_process(tahoe_transport, exited, allow_missing=False):
|
||||||
|
"""
|
||||||
|
Terminate the given process with a kill signal (SIGTERM on POSIX,
|
||||||
TerminateProcess on Windows).
|
TerminateProcess on Windows).
|
||||||
|
|
||||||
:param tahoe_transport: The `IProcessTransport` representing the process.
|
:param tahoe_transport: The `IProcessTransport` representing the process.
|
||||||
@ -153,14 +192,10 @@ def _cleanup_tahoe_process(tahoe_transport, exited):
|
|||||||
:return: After the process has exited.
|
:return: After the process has exited.
|
||||||
"""
|
"""
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
try:
|
_cleanup_process_async(tahoe_transport, allow_missing=allow_missing)
|
||||||
print("signaling {} with TERM".format(tahoe_transport.pid))
|
|
||||||
tahoe_transport.signalProcess('TERM')
|
|
||||||
print("signaled, blocking on exit")
|
print("signaled, blocking on exit")
|
||||||
block_with_timeout(exited, reactor)
|
block_with_timeout(exited, reactor)
|
||||||
print("exited, goodbye")
|
print("exited, goodbye")
|
||||||
except ProcessExitedAlready:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def _tahoe_runner_optional_coverage(proto, reactor, request, other_args):
|
def _tahoe_runner_optional_coverage(proto, reactor, request, other_args):
|
||||||
@ -207,8 +242,33 @@ class TahoeProcess(object):
|
|||||||
|
|
||||||
def kill(self):
|
def kill(self):
|
||||||
"""Kill the process, block until it's done."""
|
"""Kill the process, block until it's done."""
|
||||||
|
print(f"TahoeProcess.kill({self.transport.pid} / {self.node_dir})")
|
||||||
_cleanup_tahoe_process(self.transport, self.transport.exited)
|
_cleanup_tahoe_process(self.transport, self.transport.exited)
|
||||||
|
|
||||||
|
def kill_async(self):
|
||||||
|
"""
|
||||||
|
Kill the process, return a Deferred that fires when it's done.
|
||||||
|
"""
|
||||||
|
print(f"TahoeProcess.kill_async({self.transport.pid} / {self.node_dir})")
|
||||||
|
_cleanup_process_async(self.transport, allow_missing=False)
|
||||||
|
return self.transport.exited
|
||||||
|
|
||||||
|
def restart_async(self, reactor: IReactorProcess, request: Any) -> Deferred:
|
||||||
|
"""
|
||||||
|
Stop and then re-start the associated process.
|
||||||
|
|
||||||
|
:return: A Deferred that fires after the new process is ready to
|
||||||
|
handle requests.
|
||||||
|
"""
|
||||||
|
d = self.kill_async()
|
||||||
|
d.addCallback(lambda ignored: _run_node(reactor, self.node_dir, request, None, finalize=False))
|
||||||
|
def got_new_process(proc):
|
||||||
|
# Grab the new transport since the one we had before is no longer
|
||||||
|
# valid after the stop/start cycle.
|
||||||
|
self._process_transport = proc.transport
|
||||||
|
d.addCallback(got_new_process)
|
||||||
|
return d
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "<TahoeProcess in '{}'>".format(self._node_dir)
|
return "<TahoeProcess in '{}'>".format(self._node_dir)
|
||||||
|
|
||||||
@ -237,19 +297,17 @@ def _run_node(reactor, node_dir, request, magic_text, finalize=True):
|
|||||||
)
|
)
|
||||||
transport.exited = protocol.exited
|
transport.exited = protocol.exited
|
||||||
|
|
||||||
if finalize:
|
tahoe_process = TahoeProcess(
|
||||||
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
|
||||||
|
|
||||||
# XXX abusing the Deferred; should use .when_magic_seen() pattern
|
|
||||||
|
|
||||||
def got_proto(proto):
|
|
||||||
transport._protocol = proto
|
|
||||||
return TahoeProcess(
|
|
||||||
transport,
|
transport,
|
||||||
node_dir,
|
node_dir,
|
||||||
)
|
)
|
||||||
protocol.magic_seen.addCallback(got_proto)
|
|
||||||
return protocol.magic_seen
|
if finalize:
|
||||||
|
request.addfinalizer(tahoe_process.kill)
|
||||||
|
|
||||||
|
d = protocol.magic_seen
|
||||||
|
d.addCallback(lambda ignored: tahoe_process)
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, name, web_port,
|
def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, name, web_port,
|
||||||
@ -300,6 +358,14 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
|||||||
u'log_gatherer.furl',
|
u'log_gatherer.furl',
|
||||||
flog_gatherer,
|
flog_gatherer,
|
||||||
)
|
)
|
||||||
|
force_foolscap = request.config.getoption("force_foolscap")
|
||||||
|
assert force_foolscap in (True, False)
|
||||||
|
set_config(
|
||||||
|
config,
|
||||||
|
'storage',
|
||||||
|
'force_foolscap',
|
||||||
|
str(force_foolscap),
|
||||||
|
)
|
||||||
write_config(FilePath(config_path), config)
|
write_config(FilePath(config_path), config)
|
||||||
created_d.addCallback(created)
|
created_d.addCallback(created)
|
||||||
|
|
||||||
@ -572,3 +638,158 @@ def run_in_thread(f):
|
|||||||
def test(*args, **kwargs):
|
def test(*args, **kwargs):
|
||||||
return deferToThread(lambda: f(*args, **kwargs))
|
return deferToThread(lambda: f(*args, **kwargs))
|
||||||
return test
|
return test
|
||||||
|
|
||||||
|
@frozen
|
||||||
|
class CHK:
|
||||||
|
"""
|
||||||
|
Represent the CHK encoding sufficiently to run a ``tahoe put`` command
|
||||||
|
using it.
|
||||||
|
"""
|
||||||
|
kind = "chk"
|
||||||
|
max_shares = 256
|
||||||
|
|
||||||
|
def customize(self) -> CHK:
|
||||||
|
# Nothing to do.
|
||||||
|
return self
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load(cls, params: None) -> CHK:
|
||||||
|
assert params is None
|
||||||
|
return cls()
|
||||||
|
|
||||||
|
def to_json(self) -> None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def to_argv(self) -> None:
|
||||||
|
yield []
|
||||||
|
|
||||||
|
@frozen
|
||||||
|
class SSK:
|
||||||
|
"""
|
||||||
|
Represent the SSK encodings (SDMF and MDMF) sufficiently to run a
|
||||||
|
``tahoe put`` command using one of them.
|
||||||
|
"""
|
||||||
|
kind = "ssk"
|
||||||
|
|
||||||
|
# SDMF and MDMF encode share counts (N and k) into the share itself as an
|
||||||
|
# unsigned byte. They could have encoded (share count - 1) to fit the
|
||||||
|
# full range supported by ZFEC into the unsigned byte - but they don't.
|
||||||
|
# So 256 is inaccessible to those formats and we set the upper bound at
|
||||||
|
# 255.
|
||||||
|
max_shares = 255
|
||||||
|
|
||||||
|
name: Literal["sdmf", "mdmf"]
|
||||||
|
key: None | bytes
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load(cls, params: dict) -> SSK:
|
||||||
|
assert params.keys() == {"format", "mutable", "key"}
|
||||||
|
return cls(params["format"], params["key"].encode("ascii"))
|
||||||
|
|
||||||
|
def customize(self) -> SSK:
|
||||||
|
"""
|
||||||
|
Return an SSK with a newly generated random RSA key.
|
||||||
|
"""
|
||||||
|
return evolve(self, key=generate_rsa_key())
|
||||||
|
|
||||||
|
def to_json(self) -> dict[str, str]:
|
||||||
|
return {
|
||||||
|
"format": self.name,
|
||||||
|
"mutable": None,
|
||||||
|
"key": self.key.decode("ascii"),
|
||||||
|
}
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def to_argv(self) -> None:
|
||||||
|
with NamedTemporaryFile() as f:
|
||||||
|
f.write(self.key)
|
||||||
|
f.flush()
|
||||||
|
yield [f"--format={self.name}", "--mutable", f"--private-key-path={f.name}"]
|
||||||
|
|
||||||
|
|
||||||
|
def upload(alice: TahoeProcess, fmt: CHK | SSK, data: bytes) -> str:
|
||||||
|
"""
|
||||||
|
Upload the given data to the given node.
|
||||||
|
|
||||||
|
:param alice: The node to upload to.
|
||||||
|
|
||||||
|
:param fmt: The name of the format for the upload. CHK, SDMF, or MDMF.
|
||||||
|
|
||||||
|
:param data: The data to upload.
|
||||||
|
|
||||||
|
:return: The capability for the uploaded data.
|
||||||
|
"""
|
||||||
|
|
||||||
|
with NamedTemporaryFile() as f:
|
||||||
|
f.write(data)
|
||||||
|
f.flush()
|
||||||
|
with fmt.to_argv() as fmt_argv:
|
||||||
|
argv = [alice, "put"] + fmt_argv + [f.name]
|
||||||
|
return cli(*argv).decode("utf-8").strip()
|
||||||
|
|
||||||
|
|
||||||
|
async def reconfigure(reactor, request, node: TahoeProcess, params: tuple[int, int, int], convergence: None | bytes) -> None:
|
||||||
|
"""
|
||||||
|
Reconfigure a Tahoe-LAFS node with different ZFEC parameters and
|
||||||
|
convergence secret.
|
||||||
|
|
||||||
|
If the current configuration is different from the specified
|
||||||
|
configuration, the node will be restarted so it takes effect.
|
||||||
|
|
||||||
|
:param reactor: A reactor to use to restart the process.
|
||||||
|
:param request: The pytest request object to use to arrange process
|
||||||
|
cleanup.
|
||||||
|
:param node: The Tahoe-LAFS node to reconfigure.
|
||||||
|
:param params: The ``happy``, ``needed``, and ``total`` ZFEC encoding
|
||||||
|
parameters.
|
||||||
|
:param convergence: If given, the convergence secret. If not given, the
|
||||||
|
existing convergence secret will be left alone.
|
||||||
|
|
||||||
|
:return: ``None`` after the node configuration has been rewritten, the
|
||||||
|
node has been restarted, and the node is ready to provide service.
|
||||||
|
"""
|
||||||
|
happy, needed, total = params
|
||||||
|
config = node.get_config()
|
||||||
|
|
||||||
|
changed = False
|
||||||
|
cur_happy = int(config.get_config("client", "shares.happy"))
|
||||||
|
cur_needed = int(config.get_config("client", "shares.needed"))
|
||||||
|
cur_total = int(config.get_config("client", "shares.total"))
|
||||||
|
|
||||||
|
if (happy, needed, total) != (cur_happy, cur_needed, cur_total):
|
||||||
|
changed = True
|
||||||
|
config.set_config("client", "shares.happy", str(happy))
|
||||||
|
config.set_config("client", "shares.needed", str(needed))
|
||||||
|
config.set_config("client", "shares.total", str(total))
|
||||||
|
|
||||||
|
if convergence is not None:
|
||||||
|
cur_convergence = config.get_private_config("convergence").encode("ascii")
|
||||||
|
if base32.a2b(cur_convergence) != convergence:
|
||||||
|
changed = True
|
||||||
|
config.write_private_config("convergence", base32.b2a(convergence))
|
||||||
|
|
||||||
|
if changed:
|
||||||
|
# restart the node
|
||||||
|
print(f"Restarting {node.node_dir} for ZFEC reconfiguration")
|
||||||
|
await node.restart_async(reactor, request)
|
||||||
|
print("Restarted. Waiting for ready state.")
|
||||||
|
await_client_ready(node)
|
||||||
|
print("Ready.")
|
||||||
|
else:
|
||||||
|
print("Config unchanged, not restarting.")
|
||||||
|
|
||||||
|
|
||||||
|
def generate_rsa_key() -> bytes:
|
||||||
|
"""
|
||||||
|
Generate a 2048 bit RSA key suitable for use with SSKs.
|
||||||
|
"""
|
||||||
|
return rsa.generate_private_key(
|
||||||
|
public_exponent=65537,
|
||||||
|
key_size=2048,
|
||||||
|
backend=default_backend()
|
||||||
|
).private_bytes(
|
||||||
|
encoding=Encoding.PEM,
|
||||||
|
format=PrivateFormat.TraditionalOpenSSL,
|
||||||
|
encryption_algorithm=NoEncryption(),
|
||||||
|
)
|
||||||
|
30
integration/vectors/__init__.py
Normal file
30
integration/vectors/__init__.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
__all__ = [
|
||||||
|
"DATA_PATH",
|
||||||
|
"CURRENT_VERSION",
|
||||||
|
"MAX_SHARES",
|
||||||
|
|
||||||
|
"Case",
|
||||||
|
"Sample",
|
||||||
|
"SeedParam",
|
||||||
|
"encode_bytes",
|
||||||
|
"save_capabilities",
|
||||||
|
|
||||||
|
"capabilities",
|
||||||
|
]
|
||||||
|
|
||||||
|
from .vectors import (
|
||||||
|
DATA_PATH,
|
||||||
|
CURRENT_VERSION,
|
||||||
|
|
||||||
|
Case,
|
||||||
|
Sample,
|
||||||
|
SeedParam,
|
||||||
|
encode_bytes,
|
||||||
|
save_capabilities,
|
||||||
|
|
||||||
|
capabilities,
|
||||||
|
)
|
||||||
|
|
||||||
|
from .parameters import (
|
||||||
|
MAX_SHARES,
|
||||||
|
)
|
58
integration/vectors/model.py
Normal file
58
integration/vectors/model.py
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
"""
|
||||||
|
Simple data type definitions useful in the definition/verification of test
|
||||||
|
vectors.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from attrs import frozen
|
||||||
|
|
||||||
|
# CHK have a max of 256 shares. SDMF / MDMF have a max of 255 shares!
|
||||||
|
# Represent max symbolically and resolve it when we know what format we're
|
||||||
|
# dealing with.
|
||||||
|
MAX_SHARES = "max"
|
||||||
|
|
||||||
|
@frozen
|
||||||
|
class Sample:
|
||||||
|
"""
|
||||||
|
Some instructions for building a long byte string.
|
||||||
|
|
||||||
|
:ivar seed: Some bytes to repeat some times to produce the string.
|
||||||
|
:ivar length: The length of the desired byte string.
|
||||||
|
"""
|
||||||
|
seed: bytes
|
||||||
|
length: int
|
||||||
|
|
||||||
|
@frozen
|
||||||
|
class Param:
|
||||||
|
"""
|
||||||
|
Some ZFEC parameters.
|
||||||
|
"""
|
||||||
|
required: int
|
||||||
|
total: int
|
||||||
|
|
||||||
|
@frozen
|
||||||
|
class SeedParam:
|
||||||
|
"""
|
||||||
|
Some ZFEC parameters, almost.
|
||||||
|
|
||||||
|
:ivar required: The number of required shares.
|
||||||
|
|
||||||
|
:ivar total: Either the number of total shares or the constant
|
||||||
|
``MAX_SHARES`` to indicate that the total number of shares should be
|
||||||
|
the maximum number supported by the object format.
|
||||||
|
"""
|
||||||
|
required: int
|
||||||
|
total: int | str
|
||||||
|
|
||||||
|
def realize(self, max_total: int) -> Param:
|
||||||
|
"""
|
||||||
|
Create a ``Param`` from this object's values, possibly
|
||||||
|
substituting the given real value for total if necessary.
|
||||||
|
|
||||||
|
:param max_total: The value to use to replace ``MAX_SHARES`` if
|
||||||
|
necessary.
|
||||||
|
"""
|
||||||
|
if self.total == MAX_SHARES:
|
||||||
|
return Param(self.required, max_total)
|
||||||
|
return Param(self.required, self.total)
|
93
integration/vectors/parameters.py
Normal file
93
integration/vectors/parameters.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
"""
|
||||||
|
Define input parameters for test vector generation.
|
||||||
|
|
||||||
|
:ivar CONVERGENCE_SECRETS: Convergence secrets.
|
||||||
|
|
||||||
|
:ivar SEGMENT_SIZE: The single segment size that the Python implementation
|
||||||
|
currently supports without a lot of refactoring.
|
||||||
|
|
||||||
|
:ivar OBJECT_DESCRIPTIONS: Small objects with instructions which can be
|
||||||
|
expanded into a possibly large byte string. These are intended to be used
|
||||||
|
as plaintext inputs.
|
||||||
|
|
||||||
|
:ivar ZFEC_PARAMS: Input parameters to ZFEC.
|
||||||
|
|
||||||
|
:ivar FORMATS: Encoding/encryption formats.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from hashlib import sha256
|
||||||
|
|
||||||
|
from .model import MAX_SHARES
|
||||||
|
from .vectors import Sample, SeedParam
|
||||||
|
from ..util import CHK, SSK
|
||||||
|
|
||||||
|
def digest(bs: bytes) -> bytes:
|
||||||
|
"""
|
||||||
|
Digest bytes to bytes.
|
||||||
|
"""
|
||||||
|
return sha256(bs).digest()
|
||||||
|
|
||||||
|
|
||||||
|
def hexdigest(bs: bytes) -> str:
|
||||||
|
"""
|
||||||
|
Digest bytes to text.
|
||||||
|
"""
|
||||||
|
return sha256(bs).hexdigest()
|
||||||
|
|
||||||
|
# Just a couple convergence secrets. The only thing we do with this value is
|
||||||
|
# feed it into a tagged hash. It certainly makes a difference to the output
|
||||||
|
# but the hash should destroy any structure in the input so it doesn't seem
|
||||||
|
# like there's a reason to test a lot of different values.
|
||||||
|
CONVERGENCE_SECRETS: list[bytes] = [
|
||||||
|
b"aaaaaaaaaaaaaaaa",
|
||||||
|
digest(b"Hello world")[:16],
|
||||||
|
]
|
||||||
|
|
||||||
|
SEGMENT_SIZE: int = 128 * 1024
|
||||||
|
|
||||||
|
# Exercise at least a handful of different sizes, trying to cover:
|
||||||
|
#
|
||||||
|
# 1. Some cases smaller than one "segment" (128k).
|
||||||
|
# This covers shrinking of some parameters to match data size.
|
||||||
|
# This includes one case of the smallest possible CHK.
|
||||||
|
#
|
||||||
|
# 2. Some cases right on the edges of integer segment multiples.
|
||||||
|
# Because boundaries are tricky.
|
||||||
|
#
|
||||||
|
# 4. Some cases that involve quite a few segments.
|
||||||
|
# This exercises merkle tree construction more thoroughly.
|
||||||
|
#
|
||||||
|
# See ``stretch`` for construction of the actual test data.
|
||||||
|
OBJECT_DESCRIPTIONS: list[Sample] = [
|
||||||
|
# The smallest possible. 55 bytes and smaller are LIT.
|
||||||
|
Sample(b"a", 56),
|
||||||
|
Sample(b"a", 1024),
|
||||||
|
Sample(b"c", 4096),
|
||||||
|
Sample(digest(b"foo"), SEGMENT_SIZE - 1),
|
||||||
|
Sample(digest(b"bar"), SEGMENT_SIZE + 1),
|
||||||
|
Sample(digest(b"baz"), SEGMENT_SIZE * 16 - 1),
|
||||||
|
Sample(digest(b"quux"), SEGMENT_SIZE * 16 + 1),
|
||||||
|
Sample(digest(b"bazquux"), SEGMENT_SIZE * 32),
|
||||||
|
Sample(digest(b"foobar"), SEGMENT_SIZE * 64 - 1),
|
||||||
|
Sample(digest(b"barbaz"), SEGMENT_SIZE * 64 + 1),
|
||||||
|
]
|
||||||
|
|
||||||
|
ZFEC_PARAMS: list[SeedParam] = [
|
||||||
|
SeedParam(1, 1),
|
||||||
|
SeedParam(1, 3),
|
||||||
|
SeedParam(2, 3),
|
||||||
|
SeedParam(3, 10),
|
||||||
|
SeedParam(71, 255),
|
||||||
|
SeedParam(101, MAX_SHARES),
|
||||||
|
]
|
||||||
|
|
||||||
|
FORMATS: list[CHK | SSK] = [
|
||||||
|
CHK(),
|
||||||
|
|
||||||
|
# These start out unaware of a key but various keys will be supplied
|
||||||
|
# during generation.
|
||||||
|
SSK(name="sdmf", key=None),
|
||||||
|
SSK(name="mdmf", key=None),
|
||||||
|
]
|
18002
integration/vectors/test_vectors.yaml
Executable file
18002
integration/vectors/test_vectors.yaml
Executable file
File diff suppressed because it is too large
Load Diff
155
integration/vectors/vectors.py
Normal file
155
integration/vectors/vectors.py
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
"""
|
||||||
|
A module that loads pre-generated test vectors.
|
||||||
|
|
||||||
|
:ivar DATA_PATH: The path of the file containing test vectors.
|
||||||
|
|
||||||
|
:ivar capabilities: The capability test vectors.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import TextIO
|
||||||
|
from attrs import frozen
|
||||||
|
from yaml import safe_load, safe_dump
|
||||||
|
from base64 import b64encode, b64decode
|
||||||
|
|
||||||
|
from twisted.python.filepath import FilePath
|
||||||
|
|
||||||
|
from .model import Param, Sample, SeedParam
|
||||||
|
from ..util import CHK, SSK
|
||||||
|
|
||||||
|
DATA_PATH: FilePath = FilePath(__file__).sibling("test_vectors.yaml")
|
||||||
|
|
||||||
|
# The version of the persisted test vector data this code can interpret.
|
||||||
|
CURRENT_VERSION: str = "2023-01-16.2"
|
||||||
|
|
||||||
|
@frozen
|
||||||
|
class Case:
|
||||||
|
"""
|
||||||
|
Represent one case for which we want/have a test vector.
|
||||||
|
"""
|
||||||
|
seed_params: Param
|
||||||
|
convergence: bytes
|
||||||
|
seed_data: Sample
|
||||||
|
fmt: CHK | SSK
|
||||||
|
segment_size: int
|
||||||
|
|
||||||
|
@property
|
||||||
|
def data(self):
|
||||||
|
return stretch(self.seed_data.seed, self.seed_data.length)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def params(self):
|
||||||
|
return self.seed_params.realize(self.fmt.max_shares)
|
||||||
|
|
||||||
|
|
||||||
|
def encode_bytes(b: bytes) -> str:
|
||||||
|
"""
|
||||||
|
Base64 encode some bytes to text so they are representable in JSON.
|
||||||
|
"""
|
||||||
|
return b64encode(b).decode("ascii")
|
||||||
|
|
||||||
|
|
||||||
|
def decode_bytes(b: str) -> bytes:
|
||||||
|
"""
|
||||||
|
Base64 decode some text to bytes.
|
||||||
|
"""
|
||||||
|
return b64decode(b.encode("ascii"))
|
||||||
|
|
||||||
|
|
||||||
|
def stretch(seed: bytes, size: int) -> bytes:
|
||||||
|
"""
|
||||||
|
Given a simple description of a byte string, return the byte string
|
||||||
|
itself.
|
||||||
|
"""
|
||||||
|
assert isinstance(seed, bytes)
|
||||||
|
assert isinstance(size, int)
|
||||||
|
assert size > 0
|
||||||
|
assert len(seed) > 0
|
||||||
|
|
||||||
|
multiples = size // len(seed) + 1
|
||||||
|
return (seed * multiples)[:size]
|
||||||
|
|
||||||
|
|
||||||
|
def save_capabilities(results: list[tuple[Case, str]], path: FilePath = DATA_PATH) -> None:
|
||||||
|
"""
|
||||||
|
Save some test vector cases and their expected values.
|
||||||
|
|
||||||
|
This is logically the inverse of ``load_capabilities``.
|
||||||
|
"""
|
||||||
|
path.setContent(safe_dump({
|
||||||
|
"version": CURRENT_VERSION,
|
||||||
|
"vector": [
|
||||||
|
{
|
||||||
|
"convergence": encode_bytes(case.convergence),
|
||||||
|
"format": {
|
||||||
|
"kind": case.fmt.kind,
|
||||||
|
"params": case.fmt.to_json(),
|
||||||
|
},
|
||||||
|
"sample": {
|
||||||
|
"seed": encode_bytes(case.seed_data.seed),
|
||||||
|
"length": case.seed_data.length,
|
||||||
|
},
|
||||||
|
"zfec": {
|
||||||
|
"segmentSize": case.segment_size,
|
||||||
|
"required": case.params.required,
|
||||||
|
"total": case.params.total,
|
||||||
|
},
|
||||||
|
"expected": cap,
|
||||||
|
}
|
||||||
|
for (case, cap)
|
||||||
|
in results
|
||||||
|
],
|
||||||
|
}).encode("ascii"))
|
||||||
|
|
||||||
|
|
||||||
|
def load_format(serialized: dict) -> CHK | SSK:
|
||||||
|
"""
|
||||||
|
Load an encrypted object format from a simple description of it.
|
||||||
|
|
||||||
|
:param serialized: A ``dict`` describing either CHK or SSK, possibly with
|
||||||
|
some parameters.
|
||||||
|
"""
|
||||||
|
if serialized["kind"] == "chk":
|
||||||
|
return CHK.load(serialized["params"])
|
||||||
|
elif serialized["kind"] == "ssk":
|
||||||
|
return SSK.load(serialized["params"])
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unrecognized format: {serialized}")
|
||||||
|
|
||||||
|
|
||||||
|
def load_capabilities(f: TextIO) -> dict[Case, str]:
|
||||||
|
"""
|
||||||
|
Load some test vector cases and their expected results from the given
|
||||||
|
file.
|
||||||
|
|
||||||
|
This is logically the inverse of ``save_capabilities``.
|
||||||
|
"""
|
||||||
|
data = safe_load(f)
|
||||||
|
if data is None:
|
||||||
|
return {}
|
||||||
|
if data["version"] != CURRENT_VERSION:
|
||||||
|
print(
|
||||||
|
f"Current version is {CURRENT_VERSION}; "
|
||||||
|
f"cannot load version {data['version']} data."
|
||||||
|
)
|
||||||
|
return {}
|
||||||
|
|
||||||
|
return {
|
||||||
|
Case(
|
||||||
|
seed_params=SeedParam(case["zfec"]["required"], case["zfec"]["total"]),
|
||||||
|
segment_size=case["zfec"]["segmentSize"],
|
||||||
|
convergence=decode_bytes(case["convergence"]),
|
||||||
|
seed_data=Sample(decode_bytes(case["sample"]["seed"]), case["sample"]["length"]),
|
||||||
|
fmt=load_format(case["format"]),
|
||||||
|
): case["expected"]
|
||||||
|
for case
|
||||||
|
in data["vector"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
with DATA_PATH.open() as f:
|
||||||
|
capabilities: dict[Case, str] = load_capabilities(f)
|
||||||
|
except FileNotFoundError:
|
||||||
|
capabilities = {}
|
36
misc/windows-enospc/passthrough.py
Normal file
36
misc/windows-enospc/passthrough.py
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
"""
|
||||||
|
Writing to non-blocking pipe can result in ENOSPC when using Unix APIs on
|
||||||
|
Windows. So, this program passes through data from stdin to stdout, using
|
||||||
|
Windows APIs instead of Unix-y APIs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from twisted.internet.stdio import StandardIO
|
||||||
|
from twisted.internet import reactor
|
||||||
|
from twisted.internet.protocol import Protocol
|
||||||
|
from twisted.internet.interfaces import IHalfCloseableProtocol
|
||||||
|
from twisted.internet.error import ReactorNotRunning
|
||||||
|
from zope.interface import implementer
|
||||||
|
|
||||||
|
@implementer(IHalfCloseableProtocol)
|
||||||
|
class Passthrough(Protocol):
|
||||||
|
def readConnectionLost(self):
|
||||||
|
self.transport.loseConnection()
|
||||||
|
|
||||||
|
def writeConnectionLost(self):
|
||||||
|
try:
|
||||||
|
reactor.stop()
|
||||||
|
except ReactorNotRunning:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def dataReceived(self, data):
|
||||||
|
self.transport.write(data)
|
||||||
|
|
||||||
|
def connectionLost(self, reason):
|
||||||
|
try:
|
||||||
|
reactor.stop()
|
||||||
|
except ReactorNotRunning:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
std = StandardIO(Passthrough())
|
||||||
|
reactor.run()
|
7
mypy.ini
7
mypy.ini
@ -1,3 +1,10 @@
|
|||||||
[mypy]
|
[mypy]
|
||||||
ignore_missing_imports = True
|
ignore_missing_imports = True
|
||||||
plugins=mypy_zope:plugin
|
plugins=mypy_zope:plugin
|
||||||
|
show_column_numbers = True
|
||||||
|
pretty = True
|
||||||
|
show_error_codes = True
|
||||||
|
warn_unused_configs =True
|
||||||
|
no_implicit_optional = True
|
||||||
|
warn_redundant_casts = True
|
||||||
|
strict_equality = True
|
0
newsfragments/3783.minor
Normal file
0
newsfragments/3783.minor
Normal file
0
newsfragments/3870.minor
Normal file
0
newsfragments/3870.minor
Normal file
0
newsfragments/3874.minor
Normal file
0
newsfragments/3874.minor
Normal file
0
newsfragments/3914.minor
Normal file
0
newsfragments/3914.minor
Normal file
5
newsfragments/3921.feature
Normal file
5
newsfragments/3921.feature
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
`tahoe run ...` will now exit when its stdin is closed.
|
||||||
|
|
||||||
|
This facilitates subprocess management, specifically cleanup.
|
||||||
|
When a parent process is running tahoe and exits without time to do "proper" cleanup at least the stdin descriptor will be closed.
|
||||||
|
Subsequently "tahoe run" notices this and exits.
|
1
newsfragments/3922.documentation
Normal file
1
newsfragments/3922.documentation
Normal file
@ -0,0 +1 @@
|
|||||||
|
Several minor errors in the Great Black Swamp proposed specification document have been fixed.
|
0
newsfragments/3927.minor
Normal file
0
newsfragments/3927.minor
Normal file
0
newsfragments/3937.minor
Normal file
0
newsfragments/3937.minor
Normal file
1
newsfragments/3938.bugfix
Normal file
1
newsfragments/3938.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
Work with (and require) newer versions of pycddl.
|
1
newsfragments/3939.bugfix
Normal file
1
newsfragments/3939.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
Uploading immutables will now better use available bandwidth, which should allow for faster uploads in many cases.
|
0
newsfragments/3940.minor
Normal file
0
newsfragments/3940.minor
Normal file
1
newsfragments/3942.minor
Normal file
1
newsfragments/3942.minor
Normal file
@ -0,0 +1 @@
|
|||||||
|
|
0
newsfragments/3944.minor
Normal file
0
newsfragments/3944.minor
Normal file
0
newsfragments/3947.minor
Normal file
0
newsfragments/3947.minor
Normal file
0
newsfragments/3950.minor
Normal file
0
newsfragments/3950.minor
Normal file
0
newsfragments/3952.minor
Normal file
0
newsfragments/3952.minor
Normal file
0
newsfragments/3953.minor
Normal file
0
newsfragments/3953.minor
Normal file
0
newsfragments/3954.minor
Normal file
0
newsfragments/3954.minor
Normal file
0
newsfragments/3956.minor
Normal file
0
newsfragments/3956.minor
Normal file
0
newsfragments/3958.minor
Normal file
0
newsfragments/3958.minor
Normal file
0
newsfragments/3960.minor
Normal file
0
newsfragments/3960.minor
Normal file
1
newsfragments/3961.other
Normal file
1
newsfragments/3961.other
Normal file
@ -0,0 +1 @@
|
|||||||
|
The integration test suite now includes a set of capability test vectors (``integration/vectors/test_vectors.yaml``) which can be used to verify compatibility between Tahoe-LAFS and other implementations.
|
1
newsfragments/3962.feature
Normal file
1
newsfragments/3962.feature
Normal file
@ -0,0 +1 @@
|
|||||||
|
Mutable objects can now be created with a pre-determined "signature key" using the ``tahoe put`` CLI or the HTTP API. This enables deterministic creation of mutable capabilities. This feature must be used with care to preserve the normal security and reliability properties.
|
1
newsfragments/3964.removed
Normal file
1
newsfragments/3964.removed
Normal file
@ -0,0 +1 @@
|
|||||||
|
Python 3.7 is no longer supported, and Debian 10 and Ubuntu 18.04 are no longer tested.
|
1
newsfragments/3966.bugfix
Normal file
1
newsfragments/3966.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
Fix incompatibility with transitive dependency charset_normalizer >= 3 when using PyInstaller.
|
0
newsfragments/3967.minor
Normal file
0
newsfragments/3967.minor
Normal file
0
newsfragments/3969.minor
Normal file
0
newsfragments/3969.minor
Normal file
1
newsfragments/3971.minor
Normal file
1
newsfragments/3971.minor
Normal file
@ -0,0 +1 @@
|
|||||||
|
Changes made to mypy.ini to make mypy more 'strict' and prevent future regressions.
|
0
newsfragments/3974.minor
Normal file
0
newsfragments/3974.minor
Normal file
1
newsfragments/3975.minor
Normal file
1
newsfragments/3975.minor
Normal file
@ -0,0 +1 @@
|
|||||||
|
Fixes truthy conditional in status.py
|
1
newsfragments/3976.minor
Normal file
1
newsfragments/3976.minor
Normal file
@ -0,0 +1 @@
|
|||||||
|
Fixes variable name same as built-in type.
|
@ -1,14 +1,14 @@
|
|||||||
{
|
{
|
||||||
"mach-nix": {
|
"mach-nix": {
|
||||||
"branch": "master",
|
"branch": "switch-to-nix-pypi-fetcher-2",
|
||||||
"description": "Create highly reproducible python environments",
|
"description": "Create highly reproducible python environments",
|
||||||
"homepage": "",
|
"homepage": "",
|
||||||
"owner": "davhau",
|
"owner": "PrivateStorageio",
|
||||||
"repo": "mach-nix",
|
"repo": "mach-nix",
|
||||||
"rev": "bdc97ba6b2ecd045a467b008cff4ae337b6a7a6b",
|
"rev": "f6d1a1841d8778c199326f95d0703c16bee2f8c4",
|
||||||
"sha256": "12b3jc0g0ak6s93g3ifvdpwxbyqx276k1kl66bpwz8a67qjbcbwf",
|
"sha256": "0krc4yhnpbzc4yhja9frnmym2vqm5zyacjnqb3fq9z9gav8vs9ls",
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://github.com/davhau/mach-nix/archive/bdc97ba6b2ecd045a467b008cff4ae337b6a7a6b.tar.gz",
|
"url": "https://github.com/PrivateStorageio/mach-nix/archive/f6d1a1841d8778c199326f95d0703c16bee2f8c4.tar.gz",
|
||||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||||
},
|
},
|
||||||
"niv": {
|
"niv": {
|
||||||
@ -53,10 +53,10 @@
|
|||||||
"homepage": "",
|
"homepage": "",
|
||||||
"owner": "DavHau",
|
"owner": "DavHau",
|
||||||
"repo": "pypi-deps-db",
|
"repo": "pypi-deps-db",
|
||||||
"rev": "76b8f1e44a8ec051b853494bcf3cc8453a294a6a",
|
"rev": "5440c9c76f6431f300fb6a1ecae762a5444de5f6",
|
||||||
"sha256": "18fgqyh4z578jjhk26n1xi2cw2l98vrqp962rgz9a6wa5yh1nm4x",
|
"sha256": "08r3iiaxzw9v2gq15y1m9bwajshyyz9280g6aia7mkgnjs9hnd1n",
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://github.com/DavHau/pypi-deps-db/archive/76b8f1e44a8ec051b853494bcf3cc8453a294a6a.tar.gz",
|
"url": "https://github.com/DavHau/pypi-deps-db/archive/5440c9c76f6431f300fb6a1ecae762a5444de5f6.tar.gz",
|
||||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,6 +36,7 @@ hidden_imports = [
|
|||||||
'allmydata.stats',
|
'allmydata.stats',
|
||||||
'base64',
|
'base64',
|
||||||
'cffi',
|
'cffi',
|
||||||
|
'charset_normalizer.md__mypyc',
|
||||||
'collections',
|
'collections',
|
||||||
'commands',
|
'commands',
|
||||||
'Crypto',
|
'Crypto',
|
||||||
|
3
pytest.ini
Normal file
3
pytest.ini
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
[pytest]
|
||||||
|
markers =
|
||||||
|
slow: marks tests as slow (not run by default; run them with '--runslow')
|
29
relnotes.txt
29
relnotes.txt
@ -1,6 +1,6 @@
|
|||||||
ANNOUNCING Tahoe, the Least-Authority File Store, v1.17.1
|
ANNOUNCING Tahoe, the Least-Authority File Store, v1.18.0
|
||||||
|
|
||||||
The Tahoe-LAFS team is pleased to announce version 1.17.1 of
|
The Tahoe-LAFS team is pleased to announce version 1.18.0 of
|
||||||
Tahoe-LAFS, an extremely reliable decentralized storage
|
Tahoe-LAFS, an extremely reliable decentralized storage
|
||||||
system. Get it with "pip install tahoe-lafs", or download a
|
system. Get it with "pip install tahoe-lafs", or download a
|
||||||
tarball here:
|
tarball here:
|
||||||
@ -15,10 +15,12 @@ unique security and fault-tolerance properties:
|
|||||||
|
|
||||||
https://tahoe-lafs.readthedocs.org/en/latest/about.html
|
https://tahoe-lafs.readthedocs.org/en/latest/about.html
|
||||||
|
|
||||||
The previous stable release of Tahoe-LAFS was v1.17.0, released on
|
The previous stable release of Tahoe-LAFS was v1.17.1, released on
|
||||||
December 6, 2021.
|
January 7, 2022.
|
||||||
|
|
||||||
This release fixes two Python3-releated regressions and 4 minor bugs.
|
This release drops support for Python 2 and for Python 3.6 and earlier.
|
||||||
|
twistd.pid is no longer used (in favour of one with pid + process creation time).
|
||||||
|
A collection of minor bugs and issues were also fixed.
|
||||||
|
|
||||||
Please see ``NEWS.rst`` [1] for a complete list of changes.
|
Please see ``NEWS.rst`` [1] for a complete list of changes.
|
||||||
|
|
||||||
@ -132,24 +134,23 @@ Of Fame" [13].
|
|||||||
|
|
||||||
ACKNOWLEDGEMENTS
|
ACKNOWLEDGEMENTS
|
||||||
|
|
||||||
This is the nineteenth release of Tahoe-LAFS to be created
|
This is the twentieth release of Tahoe-LAFS to be created solely as a
|
||||||
solely as a labor of love by volunteers. Thank you very much
|
labor of love by volunteers. Thank you very much to the team of
|
||||||
to the team of "hackers in the public interest" who make
|
"hackers in the public interest" who make Tahoe-LAFS possible.
|
||||||
Tahoe-LAFS possible.
|
|
||||||
|
|
||||||
meejah
|
meejah
|
||||||
on behalf of the Tahoe-LAFS team
|
on behalf of the Tahoe-LAFS team
|
||||||
|
|
||||||
January 7, 2022
|
October 1, 2022
|
||||||
Planet Earth
|
Planet Earth
|
||||||
|
|
||||||
|
|
||||||
[1] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.17.1/NEWS.rst
|
[1] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.18.0/NEWS.rst
|
||||||
[2] https://github.com/tahoe-lafs/tahoe-lafs/blob/master/docs/known_issues.rst
|
[2] https://github.com/tahoe-lafs/tahoe-lafs/blob/master/docs/known_issues.rst
|
||||||
[3] https://tahoe-lafs.org/trac/tahoe-lafs/wiki/RelatedProjects
|
[3] https://tahoe-lafs.org/trac/tahoe-lafs/wiki/RelatedProjects
|
||||||
[4] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.17.1/COPYING.GPL
|
[4] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.18.0/COPYING.GPL
|
||||||
[5] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.17.1/COPYING.TGPPL.rst
|
[5] https://github.com/tahoe-lafs/tahoe-lafs/blob/tahoe-lafs-1.18.0/COPYING.TGPPL.rst
|
||||||
[6] https://tahoe-lafs.readthedocs.org/en/tahoe-lafs-1.17.1/INSTALL.html
|
[6] https://tahoe-lafs.readthedocs.org/en/tahoe-lafs-1.18.0/INSTALL.html
|
||||||
[7] https://lists.tahoe-lafs.org/mailman/listinfo/tahoe-dev
|
[7] https://lists.tahoe-lafs.org/mailman/listinfo/tahoe-dev
|
||||||
[8] https://tahoe-lafs.org/trac/tahoe-lafs/roadmap
|
[8] https://tahoe-lafs.org/trac/tahoe-lafs/roadmap
|
||||||
[9] https://github.com/tahoe-lafs/tahoe-lafs/blob/master/CREDITS
|
[9] https://github.com/tahoe-lafs/tahoe-lafs/blob/master/CREDITS
|
||||||
|
20
setup.py
20
setup.py
@ -96,7 +96,9 @@ install_requires = [
|
|||||||
# an sftp extra in Tahoe-LAFS, there is no point in having one.
|
# an sftp extra in Tahoe-LAFS, there is no point in having one.
|
||||||
# * Twisted 19.10 introduces Site.getContentFile which we use to get
|
# * Twisted 19.10 introduces Site.getContentFile which we use to get
|
||||||
# temporary upload files placed into a per-node temporary directory.
|
# temporary upload files placed into a per-node temporary directory.
|
||||||
"Twisted[tls,conch] >= 19.10.0",
|
# * Twisted 22.8.0 added support for coroutine-returning functions in many
|
||||||
|
# places (mainly via `maybeDeferred`)
|
||||||
|
"Twisted[tls,conch] >= 22.8.0",
|
||||||
|
|
||||||
"PyYAML >= 3.11",
|
"PyYAML >= 3.11",
|
||||||
|
|
||||||
@ -137,7 +139,10 @@ install_requires = [
|
|||||||
"werkzeug != 2.2.0",
|
"werkzeug != 2.2.0",
|
||||||
"treq",
|
"treq",
|
||||||
"cbor2",
|
"cbor2",
|
||||||
"pycddl",
|
|
||||||
|
# 0.4 adds the ability to pass in mmap() values which greatly reduces the
|
||||||
|
# amount of copying involved.
|
||||||
|
"pycddl >= 0.4",
|
||||||
|
|
||||||
# Command-line parsing
|
# Command-line parsing
|
||||||
"click >= 7.0",
|
"click >= 7.0",
|
||||||
@ -224,7 +229,7 @@ def run_command(args, cwd=None):
|
|||||||
use_shell = sys.platform == "win32"
|
use_shell = sys.platform == "win32"
|
||||||
try:
|
try:
|
||||||
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd, shell=use_shell)
|
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd, shell=use_shell)
|
||||||
except EnvironmentError as e: # if this gives a SyntaxError, note that Tahoe-LAFS requires Python 3.7+
|
except EnvironmentError as e: # if this gives a SyntaxError, note that Tahoe-LAFS requires Python 3.8+
|
||||||
print("Warning: unable to run %r." % (" ".join(args),))
|
print("Warning: unable to run %r." % (" ".join(args),))
|
||||||
print(e)
|
print(e)
|
||||||
return None
|
return None
|
||||||
@ -375,8 +380,8 @@ setup(name="tahoe-lafs", # also set in __init__.py
|
|||||||
package_dir = {'':'src'},
|
package_dir = {'':'src'},
|
||||||
packages=find_packages('src') + ['allmydata.test.plugins'],
|
packages=find_packages('src') + ['allmydata.test.plugins'],
|
||||||
classifiers=trove_classifiers,
|
classifiers=trove_classifiers,
|
||||||
# We support Python 3.7 or later. 3.11 is not supported yet.
|
# We support Python 3.8 or later. 3.11 is not supported yet.
|
||||||
python_requires=">=3.7, <3.11",
|
python_requires=">=3.8, <3.11",
|
||||||
install_requires=install_requires,
|
install_requires=install_requires,
|
||||||
extras_require={
|
extras_require={
|
||||||
# Duplicate the Twisted pywin32 dependency here. See
|
# Duplicate the Twisted pywin32 dependency here. See
|
||||||
@ -389,9 +394,6 @@ setup(name="tahoe-lafs", # also set in __init__.py
|
|||||||
],
|
],
|
||||||
"test": [
|
"test": [
|
||||||
"flake8",
|
"flake8",
|
||||||
# On Python 3.7, importlib_metadata v5 breaks flake8.
|
|
||||||
# https://github.com/python/importlib_metadata/issues/407
|
|
||||||
"importlib_metadata<5; python_version < '3.8'",
|
|
||||||
# Pin a specific pyflakes so we don't have different folks
|
# Pin a specific pyflakes so we don't have different folks
|
||||||
# disagreeing on what is or is not a lint issue. We can bump
|
# disagreeing on what is or is not a lint issue. We can bump
|
||||||
# this version from time to time, but we will do it
|
# this version from time to time, but we will do it
|
||||||
@ -399,7 +401,7 @@ setup(name="tahoe-lafs", # also set in __init__.py
|
|||||||
"pyflakes == 2.2.0",
|
"pyflakes == 2.2.0",
|
||||||
"coverage ~= 5.0",
|
"coverage ~= 5.0",
|
||||||
"mock",
|
"mock",
|
||||||
"tox",
|
"tox ~= 3.0",
|
||||||
"pytest",
|
"pytest",
|
||||||
"pytest-twisted",
|
"pytest-twisted",
|
||||||
"hypothesis >= 3.6.1",
|
"hypothesis >= 3.6.1",
|
||||||
|
@ -36,6 +36,7 @@ from allmydata.storage.server import StorageServer, FoolscapStorageServer
|
|||||||
from allmydata import storage_client
|
from allmydata import storage_client
|
||||||
from allmydata.immutable.upload import Uploader
|
from allmydata.immutable.upload import Uploader
|
||||||
from allmydata.immutable.offloaded import Helper
|
from allmydata.immutable.offloaded import Helper
|
||||||
|
from allmydata.mutable.filenode import MutableFileNode
|
||||||
from allmydata.introducer.client import IntroducerClient
|
from allmydata.introducer.client import IntroducerClient
|
||||||
from allmydata.util import (
|
from allmydata.util import (
|
||||||
hashutil, base32, pollmixin, log, idlib,
|
hashutil, base32, pollmixin, log, idlib,
|
||||||
@ -110,6 +111,7 @@ _client_config = configutil.ValidConfiguration(
|
|||||||
"storage_dir",
|
"storage_dir",
|
||||||
"plugins",
|
"plugins",
|
||||||
"grid_management",
|
"grid_management",
|
||||||
|
"force_foolscap",
|
||||||
),
|
),
|
||||||
"sftpd": (
|
"sftpd": (
|
||||||
"accounts.file",
|
"accounts.file",
|
||||||
@ -832,9 +834,10 @@ class _Client(node.Node, pollmixin.PollMixin):
|
|||||||
furl_file = self.config.get_private_path("storage.furl").encode(get_filesystem_encoding())
|
furl_file = self.config.get_private_path("storage.furl").encode(get_filesystem_encoding())
|
||||||
furl = self.tub.registerReference(FoolscapStorageServer(ss), furlFile=furl_file)
|
furl = self.tub.registerReference(FoolscapStorageServer(ss), furlFile=furl_file)
|
||||||
(_, _, swissnum) = decode_furl(furl)
|
(_, _, swissnum) = decode_furl(furl)
|
||||||
self.storage_nurls = self.tub.negotiationClass.add_storage_server(
|
if hasattr(self.tub.negotiationClass, "add_storage_server"):
|
||||||
ss, swissnum.encode("ascii")
|
nurls = self.tub.negotiationClass.add_storage_server(ss, swissnum.encode("ascii"))
|
||||||
)
|
self.storage_nurls = nurls
|
||||||
|
announcement[storage_client.ANONYMOUS_STORAGE_NURLS] = [n.to_text() for n in nurls]
|
||||||
announcement["anonymous-storage-FURL"] = furl
|
announcement["anonymous-storage-FURL"] = furl
|
||||||
|
|
||||||
enabled_storage_servers = self._enable_storage_servers(
|
enabled_storage_servers = self._enable_storage_servers(
|
||||||
@ -1103,9 +1106,40 @@ class _Client(node.Node, pollmixin.PollMixin):
|
|||||||
def create_immutable_dirnode(self, children, convergence=None):
|
def create_immutable_dirnode(self, children, convergence=None):
|
||||||
return self.nodemaker.create_immutable_directory(children, convergence)
|
return self.nodemaker.create_immutable_directory(children, convergence)
|
||||||
|
|
||||||
def create_mutable_file(self, contents=None, version=None):
|
def create_mutable_file(
|
||||||
|
self,
|
||||||
|
contents: bytes | None = None,
|
||||||
|
version: int | None = None,
|
||||||
|
*,
|
||||||
|
unique_keypair: tuple[rsa.PublicKey, rsa.PrivateKey] | None = None,
|
||||||
|
) -> MutableFileNode:
|
||||||
|
"""
|
||||||
|
Create *and upload* a new mutable object.
|
||||||
|
|
||||||
|
:param contents: If given, the initial contents for the new object.
|
||||||
|
|
||||||
|
:param version: If given, the mutable file format for the new object
|
||||||
|
(otherwise a format will be chosen automatically).
|
||||||
|
|
||||||
|
:param unique_keypair: **Warning** This value independently determines
|
||||||
|
the identity of the mutable object to create. There cannot be two
|
||||||
|
different mutable objects that share a keypair. They will merge
|
||||||
|
into one object (with undefined contents).
|
||||||
|
|
||||||
|
It is common to pass a None value (or not pass a valuye) for this
|
||||||
|
parameter. In these cases, a new random keypair will be
|
||||||
|
generated.
|
||||||
|
|
||||||
|
If non-None, the given public/private keypair will be used for the
|
||||||
|
new object. The expected use-case is for implementing compliance
|
||||||
|
tests.
|
||||||
|
|
||||||
|
:return: A Deferred which will fire with a representation of the new
|
||||||
|
mutable object after it has been uploaded.
|
||||||
|
"""
|
||||||
return self.nodemaker.create_mutable_file(contents,
|
return self.nodemaker.create_mutable_file(contents,
|
||||||
version=version)
|
version=version,
|
||||||
|
keypair=unique_keypair)
|
||||||
|
|
||||||
def upload(self, uploadable, reactor=None):
|
def upload(self, uploadable, reactor=None):
|
||||||
uploader = self.getServiceNamed("uploader")
|
uploader = self.getServiceNamed("uploader")
|
||||||
|
@ -9,17 +9,14 @@ features of any objects that `cryptography` documents.
|
|||||||
|
|
||||||
That is, the public and private keys are opaque objects; DO NOT depend
|
That is, the public and private keys are opaque objects; DO NOT depend
|
||||||
on any of their methods.
|
on any of their methods.
|
||||||
|
|
||||||
Ported to Python 3.
|
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
from __future__ import annotations
|
||||||
if PY2:
|
|
||||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
from typing_extensions import TypeAlias
|
||||||
|
from typing import Callable
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
from cryptography.exceptions import InvalidSignature
|
from cryptography.exceptions import InvalidSignature
|
||||||
from cryptography.hazmat.backends import default_backend
|
from cryptography.hazmat.backends import default_backend
|
||||||
@ -30,6 +27,8 @@ from cryptography.hazmat.primitives.serialization import load_der_private_key, l
|
|||||||
|
|
||||||
from allmydata.crypto.error import BadSignature
|
from allmydata.crypto.error import BadSignature
|
||||||
|
|
||||||
|
PublicKey: TypeAlias = rsa.RSAPublicKey
|
||||||
|
PrivateKey: TypeAlias = rsa.RSAPrivateKey
|
||||||
|
|
||||||
# This is the value that was used by `pycryptopp`, and we must continue to use it for
|
# This is the value that was used by `pycryptopp`, and we must continue to use it for
|
||||||
# both backwards compatibility and interoperability.
|
# both backwards compatibility and interoperability.
|
||||||
@ -46,12 +45,12 @@ RSA_PADDING = padding.PSS(
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
def create_signing_keypair(key_size):
|
def create_signing_keypair(key_size: int) -> tuple[PrivateKey, PublicKey]:
|
||||||
"""
|
"""
|
||||||
Create a new RSA signing (private) keypair from scratch. Can be used with
|
Create a new RSA signing (private) keypair from scratch. Can be used with
|
||||||
`sign_data` function.
|
`sign_data` function.
|
||||||
|
|
||||||
:param int key_size: length of key in bits
|
:param key_size: length of key in bits
|
||||||
|
|
||||||
:returns: 2-tuple of (private_key, public_key)
|
:returns: 2-tuple of (private_key, public_key)
|
||||||
"""
|
"""
|
||||||
@ -63,32 +62,62 @@ def create_signing_keypair(key_size):
|
|||||||
return priv_key, priv_key.public_key()
|
return priv_key, priv_key.public_key()
|
||||||
|
|
||||||
|
|
||||||
def create_signing_keypair_from_string(private_key_der):
|
def create_signing_keypair_from_string(private_key_der: bytes) -> tuple[PrivateKey, PublicKey]:
|
||||||
"""
|
"""
|
||||||
Create an RSA signing (private) key from previously serialized
|
Create an RSA signing (private) key from previously serialized
|
||||||
private key bytes.
|
private key bytes.
|
||||||
|
|
||||||
:param bytes private_key_der: blob as returned from `der_string_from_signing_keypair`
|
:param private_key_der: blob as returned from `der_string_from_signing_keypair`
|
||||||
|
|
||||||
:returns: 2-tuple of (private_key, public_key)
|
:returns: 2-tuple of (private_key, public_key)
|
||||||
"""
|
"""
|
||||||
priv_key = load_der_private_key(
|
_load = partial(
|
||||||
|
load_der_private_key,
|
||||||
private_key_der,
|
private_key_der,
|
||||||
password=None,
|
password=None,
|
||||||
backend=default_backend(),
|
backend=default_backend(),
|
||||||
)
|
)
|
||||||
if not isinstance(priv_key, rsa.RSAPrivateKey):
|
|
||||||
|
def load_with_validation() -> PrivateKey:
|
||||||
|
k = _load()
|
||||||
|
assert isinstance(k, PrivateKey)
|
||||||
|
return k
|
||||||
|
|
||||||
|
def load_without_validation() -> PrivateKey:
|
||||||
|
k = _load(unsafe_skip_rsa_key_validation=True)
|
||||||
|
assert isinstance(k, PrivateKey)
|
||||||
|
return k
|
||||||
|
|
||||||
|
# Load it once without the potentially expensive OpenSSL validation
|
||||||
|
# checks. These have superlinear complexity. We *will* run them just
|
||||||
|
# below - but first we'll apply our own constant-time checks.
|
||||||
|
load: Callable[[], PrivateKey] = load_without_validation
|
||||||
|
try:
|
||||||
|
unsafe_priv_key = load()
|
||||||
|
except TypeError:
|
||||||
|
# cryptography<39 does not support this parameter, so just load the
|
||||||
|
# key with validation...
|
||||||
|
unsafe_priv_key = load_with_validation()
|
||||||
|
# But avoid *reloading* it since that will run the expensive
|
||||||
|
# validation *again*.
|
||||||
|
load = lambda: unsafe_priv_key
|
||||||
|
|
||||||
|
if not isinstance(unsafe_priv_key, rsa.RSAPrivateKey):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Private Key did not decode to an RSA key"
|
"Private Key did not decode to an RSA key"
|
||||||
)
|
)
|
||||||
if priv_key.key_size != 2048:
|
if unsafe_priv_key.key_size != 2048:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Private Key must be 2048 bits"
|
"Private Key must be 2048 bits"
|
||||||
)
|
)
|
||||||
return priv_key, priv_key.public_key()
|
|
||||||
|
# Now re-load it with OpenSSL's validation applied.
|
||||||
|
safe_priv_key = load()
|
||||||
|
|
||||||
|
return safe_priv_key, safe_priv_key.public_key()
|
||||||
|
|
||||||
|
|
||||||
def der_string_from_signing_key(private_key):
|
def der_string_from_signing_key(private_key: PrivateKey) -> bytes:
|
||||||
"""
|
"""
|
||||||
Serializes a given RSA private key to a DER string
|
Serializes a given RSA private key to a DER string
|
||||||
|
|
||||||
@ -98,14 +127,14 @@ def der_string_from_signing_key(private_key):
|
|||||||
:returns: bytes representing `private_key`
|
:returns: bytes representing `private_key`
|
||||||
"""
|
"""
|
||||||
_validate_private_key(private_key)
|
_validate_private_key(private_key)
|
||||||
return private_key.private_bytes(
|
return private_key.private_bytes( # type: ignore[attr-defined]
|
||||||
encoding=Encoding.DER,
|
encoding=Encoding.DER,
|
||||||
format=PrivateFormat.PKCS8,
|
format=PrivateFormat.PKCS8,
|
||||||
encryption_algorithm=NoEncryption(),
|
encryption_algorithm=NoEncryption(),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def der_string_from_verifying_key(public_key):
|
def der_string_from_verifying_key(public_key: PublicKey) -> bytes:
|
||||||
"""
|
"""
|
||||||
Serializes a given RSA public key to a DER string.
|
Serializes a given RSA public key to a DER string.
|
||||||
|
|
||||||
@ -121,7 +150,7 @@ def der_string_from_verifying_key(public_key):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def create_verifying_key_from_string(public_key_der):
|
def create_verifying_key_from_string(public_key_der: bytes) -> PublicKey:
|
||||||
"""
|
"""
|
||||||
Create an RSA verifying key from a previously serialized public key
|
Create an RSA verifying key from a previously serialized public key
|
||||||
|
|
||||||
@ -134,15 +163,16 @@ def create_verifying_key_from_string(public_key_der):
|
|||||||
public_key_der,
|
public_key_der,
|
||||||
backend=default_backend(),
|
backend=default_backend(),
|
||||||
)
|
)
|
||||||
|
assert isinstance(pub_key, PublicKey)
|
||||||
return pub_key
|
return pub_key
|
||||||
|
|
||||||
|
|
||||||
def sign_data(private_key, data):
|
def sign_data(private_key: PrivateKey, data: bytes) -> bytes:
|
||||||
"""
|
"""
|
||||||
:param private_key: the private part of a keypair returned from
|
:param private_key: the private part of a keypair returned from
|
||||||
`create_signing_keypair_from_string` or `create_signing_keypair`
|
`create_signing_keypair_from_string` or `create_signing_keypair`
|
||||||
|
|
||||||
:param bytes data: the bytes to sign
|
:param data: the bytes to sign
|
||||||
|
|
||||||
:returns: bytes which are a signature of the bytes given as `data`.
|
:returns: bytes which are a signature of the bytes given as `data`.
|
||||||
"""
|
"""
|
||||||
@ -153,7 +183,7 @@ def sign_data(private_key, data):
|
|||||||
hashes.SHA256(),
|
hashes.SHA256(),
|
||||||
)
|
)
|
||||||
|
|
||||||
def verify_signature(public_key, alleged_signature, data):
|
def verify_signature(public_key: PublicKey, alleged_signature: bytes, data: bytes) -> None:
|
||||||
"""
|
"""
|
||||||
:param public_key: a verifying key, returned from `create_verifying_key_from_string` or `create_verifying_key_from_private_key`
|
:param public_key: a verifying key, returned from `create_verifying_key_from_string` or `create_verifying_key_from_private_key`
|
||||||
|
|
||||||
@ -173,23 +203,23 @@ def verify_signature(public_key, alleged_signature, data):
|
|||||||
raise BadSignature()
|
raise BadSignature()
|
||||||
|
|
||||||
|
|
||||||
def _validate_public_key(public_key):
|
def _validate_public_key(public_key: PublicKey) -> None:
|
||||||
"""
|
"""
|
||||||
Internal helper. Checks that `public_key` is a valid cryptography
|
Internal helper. Checks that `public_key` is a valid cryptography
|
||||||
object
|
object
|
||||||
"""
|
"""
|
||||||
if not isinstance(public_key, rsa.RSAPublicKey):
|
if not isinstance(public_key, rsa.RSAPublicKey):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"public_key must be an RSAPublicKey"
|
f"public_key must be an RSAPublicKey not {type(public_key)}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _validate_private_key(private_key):
|
def _validate_private_key(private_key: PrivateKey) -> None:
|
||||||
"""
|
"""
|
||||||
Internal helper. Checks that `public_key` is a valid cryptography
|
Internal helper. Checks that `public_key` is a valid cryptography
|
||||||
object
|
object
|
||||||
"""
|
"""
|
||||||
if not isinstance(private_key, rsa.RSAPrivateKey):
|
if not isinstance(private_key, rsa.RSAPrivateKey):
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"private_key must be an RSAPrivateKey"
|
f"private_key must be an RSAPrivateKey not {type(private_key)}"
|
||||||
)
|
)
|
||||||
|
@ -20,7 +20,7 @@ class History(object):
|
|||||||
MAX_UPLOAD_STATUSES = 10
|
MAX_UPLOAD_STATUSES = 10
|
||||||
MAX_MAPUPDATE_STATUSES = 20
|
MAX_MAPUPDATE_STATUSES = 20
|
||||||
MAX_PUBLISH_STATUSES = 20
|
MAX_PUBLISH_STATUSES = 20
|
||||||
MAX_RETRIEVE_STATUSES = 20
|
MAX_RETRIEVE_STATUSES = 40
|
||||||
|
|
||||||
def __init__(self, stats_provider=None):
|
def __init__(self, stats_provider=None):
|
||||||
self.stats_provider = stats_provider
|
self.stats_provider = stats_provider
|
||||||
|
@ -262,6 +262,8 @@ class Encoder(object):
|
|||||||
|
|
||||||
d.addCallback(lambda res: self.finish_hashing())
|
d.addCallback(lambda res: self.finish_hashing())
|
||||||
|
|
||||||
|
# These calls have to happen in order; layout.py now requires writes to
|
||||||
|
# be appended to the data written so far.
|
||||||
d.addCallback(lambda res:
|
d.addCallback(lambda res:
|
||||||
self.send_crypttext_hash_tree_to_all_shareholders())
|
self.send_crypttext_hash_tree_to_all_shareholders())
|
||||||
d.addCallback(lambda res: self.send_all_block_hash_trees())
|
d.addCallback(lambda res: self.send_all_block_hash_trees())
|
||||||
|
@ -1,21 +1,18 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Ported to Python 3.
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
from __future__ import annotations
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
import struct
|
import struct
|
||||||
|
from io import BytesIO
|
||||||
|
|
||||||
|
from attrs import define, field
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from allmydata.interfaces import IStorageBucketWriter, IStorageBucketReader, \
|
from allmydata.interfaces import IStorageBucketWriter, IStorageBucketReader, \
|
||||||
FileTooLargeError, HASH_SIZE
|
FileTooLargeError, HASH_SIZE
|
||||||
from allmydata.util import mathutil, observer, pipeline, log
|
from allmydata.util import mathutil, observer, log
|
||||||
from allmydata.util.assertutil import precondition
|
from allmydata.util.assertutil import precondition
|
||||||
from allmydata.storage.server import si_b2a
|
from allmydata.storage.server import si_b2a
|
||||||
|
|
||||||
@ -107,19 +104,58 @@ def make_write_bucket_proxy(rref, server,
|
|||||||
num_share_hashes, uri_extension_size)
|
num_share_hashes, uri_extension_size)
|
||||||
return wbp
|
return wbp
|
||||||
|
|
||||||
|
|
||||||
|
@define
|
||||||
|
class _WriteBuffer:
|
||||||
|
"""
|
||||||
|
Queue up small writes to be written in a single batched larger write.
|
||||||
|
"""
|
||||||
|
_batch_size: int
|
||||||
|
_to_write : BytesIO = field(factory=BytesIO)
|
||||||
|
_written_bytes : int = field(default=0)
|
||||||
|
|
||||||
|
def queue_write(self, data: bytes) -> bool:
|
||||||
|
"""
|
||||||
|
Queue a write. If the result is ``False``, no further action is needed
|
||||||
|
for now. If the result is some ``True``, it's time to call ``flush()``
|
||||||
|
and do a real write.
|
||||||
|
"""
|
||||||
|
self._to_write.write(data)
|
||||||
|
return self.get_queued_bytes() >= self._batch_size
|
||||||
|
|
||||||
|
def flush(self) -> tuple[int, bytes]:
|
||||||
|
"""Return offset and data to be written."""
|
||||||
|
offset = self._written_bytes
|
||||||
|
data = self._to_write.getvalue()
|
||||||
|
self._written_bytes += len(data)
|
||||||
|
self._to_write = BytesIO()
|
||||||
|
return (offset, data)
|
||||||
|
|
||||||
|
def get_queued_bytes(self) -> int:
|
||||||
|
"""Return number of queued, unwritten bytes."""
|
||||||
|
return self._to_write.tell()
|
||||||
|
|
||||||
|
def get_total_bytes(self) -> int:
|
||||||
|
"""Return how many bytes were written or queued in total."""
|
||||||
|
return self._written_bytes + self.get_queued_bytes()
|
||||||
|
|
||||||
|
|
||||||
@implementer(IStorageBucketWriter)
|
@implementer(IStorageBucketWriter)
|
||||||
class WriteBucketProxy(object):
|
class WriteBucketProxy(object):
|
||||||
|
"""
|
||||||
|
Note: The various ``put_`` methods need to be called in the order in which the
|
||||||
|
bytes will get written.
|
||||||
|
"""
|
||||||
fieldsize = 4
|
fieldsize = 4
|
||||||
fieldstruct = ">L"
|
fieldstruct = ">L"
|
||||||
|
|
||||||
def __init__(self, rref, server, data_size, block_size, num_segments,
|
def __init__(self, rref, server, data_size, block_size, num_segments,
|
||||||
num_share_hashes, uri_extension_size, pipeline_size=50000):
|
num_share_hashes, uri_extension_size, batch_size=1_000_000):
|
||||||
self._rref = rref
|
self._rref = rref
|
||||||
self._server = server
|
self._server = server
|
||||||
self._data_size = data_size
|
self._data_size = data_size
|
||||||
self._block_size = block_size
|
self._block_size = block_size
|
||||||
self._num_segments = num_segments
|
self._num_segments = num_segments
|
||||||
self._written_bytes = 0
|
|
||||||
|
|
||||||
effective_segments = mathutil.next_power_of_k(num_segments,2)
|
effective_segments = mathutil.next_power_of_k(num_segments,2)
|
||||||
self._segment_hash_size = (2*effective_segments - 1) * HASH_SIZE
|
self._segment_hash_size = (2*effective_segments - 1) * HASH_SIZE
|
||||||
@ -130,11 +166,13 @@ class WriteBucketProxy(object):
|
|||||||
|
|
||||||
self._create_offsets(block_size, data_size)
|
self._create_offsets(block_size, data_size)
|
||||||
|
|
||||||
# k=3, max_segment_size=128KiB gives us a typical segment of 43691
|
# With a ~1MB batch size, max upload speed is 1MB/(round-trip latency)
|
||||||
# bytes. Setting the default pipeline_size to 50KB lets us get two
|
# assuming the writing code waits for writes to finish, so 20MB/sec if
|
||||||
# segments onto the wire but not a third, which would keep the pipe
|
# latency is 50ms. In the US many people only have 1MB/sec upload speed
|
||||||
# filled.
|
# as of 2022 (standard Comcast). For further discussion of how one
|
||||||
self._pipeline = pipeline.Pipeline(pipeline_size)
|
# might set batch sizes see
|
||||||
|
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3787#comment:1.
|
||||||
|
self._write_buffer = _WriteBuffer(batch_size)
|
||||||
|
|
||||||
def get_allocated_size(self):
|
def get_allocated_size(self):
|
||||||
return (self._offsets['uri_extension'] + self.fieldsize +
|
return (self._offsets['uri_extension'] + self.fieldsize +
|
||||||
@ -179,7 +217,7 @@ class WriteBucketProxy(object):
|
|||||||
return "<WriteBucketProxy for node %r>" % self._server.get_name()
|
return "<WriteBucketProxy for node %r>" % self._server.get_name()
|
||||||
|
|
||||||
def put_header(self):
|
def put_header(self):
|
||||||
return self._write(0, self._offset_data)
|
return self._queue_write(0, self._offset_data)
|
||||||
|
|
||||||
def put_block(self, segmentnum, data):
|
def put_block(self, segmentnum, data):
|
||||||
offset = self._offsets['data'] + segmentnum * self._block_size
|
offset = self._offsets['data'] + segmentnum * self._block_size
|
||||||
@ -193,13 +231,13 @@ class WriteBucketProxy(object):
|
|||||||
(self._block_size *
|
(self._block_size *
|
||||||
(self._num_segments - 1))),
|
(self._num_segments - 1))),
|
||||||
len(data), self._block_size)
|
len(data), self._block_size)
|
||||||
return self._write(offset, data)
|
return self._queue_write(offset, data)
|
||||||
|
|
||||||
def put_crypttext_hashes(self, hashes):
|
def put_crypttext_hashes(self, hashes):
|
||||||
# plaintext_hash_tree precedes crypttext_hash_tree. It is not used, and
|
# plaintext_hash_tree precedes crypttext_hash_tree. It is not used, and
|
||||||
# so is not explicitly written, but we need to write everything, so
|
# so is not explicitly written, but we need to write everything, so
|
||||||
# fill it in with nulls.
|
# fill it in with nulls.
|
||||||
d = self._write(self._offsets['plaintext_hash_tree'], b"\x00" * self._segment_hash_size)
|
d = self._queue_write(self._offsets['plaintext_hash_tree'], b"\x00" * self._segment_hash_size)
|
||||||
d.addCallback(lambda _: self._really_put_crypttext_hashes(hashes))
|
d.addCallback(lambda _: self._really_put_crypttext_hashes(hashes))
|
||||||
return d
|
return d
|
||||||
|
|
||||||
@ -212,7 +250,7 @@ class WriteBucketProxy(object):
|
|||||||
precondition(offset + len(data) <= self._offsets['block_hashes'],
|
precondition(offset + len(data) <= self._offsets['block_hashes'],
|
||||||
offset, len(data), offset+len(data),
|
offset, len(data), offset+len(data),
|
||||||
self._offsets['block_hashes'])
|
self._offsets['block_hashes'])
|
||||||
return self._write(offset, data)
|
return self._queue_write(offset, data)
|
||||||
|
|
||||||
def put_block_hashes(self, blockhashes):
|
def put_block_hashes(self, blockhashes):
|
||||||
offset = self._offsets['block_hashes']
|
offset = self._offsets['block_hashes']
|
||||||
@ -223,7 +261,7 @@ class WriteBucketProxy(object):
|
|||||||
precondition(offset + len(data) <= self._offsets['share_hashes'],
|
precondition(offset + len(data) <= self._offsets['share_hashes'],
|
||||||
offset, len(data), offset+len(data),
|
offset, len(data), offset+len(data),
|
||||||
self._offsets['share_hashes'])
|
self._offsets['share_hashes'])
|
||||||
return self._write(offset, data)
|
return self._queue_write(offset, data)
|
||||||
|
|
||||||
def put_share_hashes(self, sharehashes):
|
def put_share_hashes(self, sharehashes):
|
||||||
# sharehashes is a list of (index, hash) tuples, so they get stored
|
# sharehashes is a list of (index, hash) tuples, so they get stored
|
||||||
@ -237,29 +275,45 @@ class WriteBucketProxy(object):
|
|||||||
precondition(offset + len(data) <= self._offsets['uri_extension'],
|
precondition(offset + len(data) <= self._offsets['uri_extension'],
|
||||||
offset, len(data), offset+len(data),
|
offset, len(data), offset+len(data),
|
||||||
self._offsets['uri_extension'])
|
self._offsets['uri_extension'])
|
||||||
return self._write(offset, data)
|
return self._queue_write(offset, data)
|
||||||
|
|
||||||
def put_uri_extension(self, data):
|
def put_uri_extension(self, data):
|
||||||
offset = self._offsets['uri_extension']
|
offset = self._offsets['uri_extension']
|
||||||
assert isinstance(data, bytes)
|
assert isinstance(data, bytes)
|
||||||
precondition(len(data) == self._uri_extension_size)
|
precondition(len(data) == self._uri_extension_size)
|
||||||
length = struct.pack(self.fieldstruct, len(data))
|
length = struct.pack(self.fieldstruct, len(data))
|
||||||
return self._write(offset, length+data)
|
return self._queue_write(offset, length+data)
|
||||||
|
|
||||||
def _write(self, offset, data):
|
def _queue_write(self, offset, data):
|
||||||
# use a Pipeline to pipeline several writes together. TODO: another
|
"""
|
||||||
# speedup would be to coalesce small writes into a single call: this
|
This queues up small writes to be written in a single batched larger
|
||||||
# would reduce the foolscap CPU overhead per share, but wouldn't
|
write.
|
||||||
# reduce the number of round trips, so it might not be worth the
|
|
||||||
# effort.
|
Callers of this function are expected to queue the data in order, with
|
||||||
self._written_bytes += len(data)
|
no holes. As such, the offset is technically unnecessary, but is used
|
||||||
return self._pipeline.add(len(data),
|
to check the inputs. Possibly we should get rid of it.
|
||||||
self._rref.callRemote, "write", offset, data)
|
"""
|
||||||
|
assert offset == self._write_buffer.get_total_bytes()
|
||||||
|
if self._write_buffer.queue_write(data):
|
||||||
|
return self._actually_write()
|
||||||
|
else:
|
||||||
|
return defer.succeed(False)
|
||||||
|
|
||||||
|
def _actually_write(self):
|
||||||
|
"""Write data to the server."""
|
||||||
|
offset, data = self._write_buffer.flush()
|
||||||
|
return self._rref.callRemote("write", offset, data)
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
assert self._written_bytes == self.get_allocated_size(), f"{self._written_bytes} != {self.get_allocated_size()}"
|
assert self._write_buffer.get_total_bytes() == self.get_allocated_size(), (
|
||||||
d = self._pipeline.add(0, self._rref.callRemote, "close")
|
f"{self._written_buffer.get_total_bytes_queued()} != {self.get_allocated_size()}"
|
||||||
d.addCallback(lambda ign: self._pipeline.flush())
|
)
|
||||||
|
if self._write_buffer.get_queued_bytes() > 0:
|
||||||
|
d = self._actually_write()
|
||||||
|
else:
|
||||||
|
# No data queued, don't send empty string write.
|
||||||
|
d = defer.succeed(True)
|
||||||
|
d.addCallback(lambda _: self._rref.callRemote("close"))
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def abort(self):
|
def abort(self):
|
||||||
@ -371,7 +425,7 @@ class ReadBucketProxy(object):
|
|||||||
self._fieldsize = fieldsize
|
self._fieldsize = fieldsize
|
||||||
self._fieldstruct = fieldstruct
|
self._fieldstruct = fieldstruct
|
||||||
|
|
||||||
for field in ( 'data',
|
for field_name in ( 'data',
|
||||||
'plaintext_hash_tree', # UNUSED
|
'plaintext_hash_tree', # UNUSED
|
||||||
'crypttext_hash_tree',
|
'crypttext_hash_tree',
|
||||||
'block_hashes',
|
'block_hashes',
|
||||||
@ -380,7 +434,7 @@ class ReadBucketProxy(object):
|
|||||||
):
|
):
|
||||||
offset = struct.unpack(fieldstruct, data[x:x+fieldsize])[0]
|
offset = struct.unpack(fieldstruct, data[x:x+fieldsize])[0]
|
||||||
x += fieldsize
|
x += fieldsize
|
||||||
self._offsets[field] = offset
|
self._offsets[field_name] = offset
|
||||||
return self._offsets
|
return self._offsets
|
||||||
|
|
||||||
def _get_block_data(self, unused, blocknum, blocksize, thisblocksize):
|
def _get_block_data(self, unused, blocknum, blocksize, thisblocksize):
|
||||||
|
@ -1,14 +1,7 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Ported to Python 3.
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
from __future__ import annotations
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
MODE_CHECK = "MODE_CHECK" # query all peers
|
MODE_CHECK = "MODE_CHECK" # query all peers
|
||||||
MODE_ANYTHING = "MODE_ANYTHING" # one recoverable version
|
MODE_ANYTHING = "MODE_ANYTHING" # one recoverable version
|
||||||
@ -17,6 +10,9 @@ MODE_WRITE = "MODE_WRITE" # replace all shares, probably.. not for initial
|
|||||||
MODE_READ = "MODE_READ"
|
MODE_READ = "MODE_READ"
|
||||||
MODE_REPAIR = "MODE_REPAIR" # query all peers, get the privkey
|
MODE_REPAIR = "MODE_REPAIR" # query all peers, get the privkey
|
||||||
|
|
||||||
|
from allmydata.crypto import aes, rsa
|
||||||
|
from allmydata.util import hashutil
|
||||||
|
|
||||||
class NotWriteableError(Exception):
|
class NotWriteableError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -68,3 +64,33 @@ class CorruptShareError(BadShareError):
|
|||||||
|
|
||||||
class UnknownVersionError(BadShareError):
|
class UnknownVersionError(BadShareError):
|
||||||
"""The share we received was of a version we don't recognize."""
|
"""The share we received was of a version we don't recognize."""
|
||||||
|
|
||||||
|
|
||||||
|
def encrypt_privkey(writekey: bytes, privkey: bytes) -> bytes:
|
||||||
|
"""
|
||||||
|
For SSK, encrypt a private ("signature") key using the writekey.
|
||||||
|
"""
|
||||||
|
encryptor = aes.create_encryptor(writekey)
|
||||||
|
crypttext = aes.encrypt_data(encryptor, privkey)
|
||||||
|
return crypttext
|
||||||
|
|
||||||
|
def decrypt_privkey(writekey: bytes, enc_privkey: bytes) -> rsa.PrivateKey:
|
||||||
|
"""
|
||||||
|
The inverse of ``encrypt_privkey``.
|
||||||
|
"""
|
||||||
|
decryptor = aes.create_decryptor(writekey)
|
||||||
|
privkey = aes.decrypt_data(decryptor, enc_privkey)
|
||||||
|
return privkey
|
||||||
|
|
||||||
|
def derive_mutable_keys(keypair: tuple[rsa.PublicKey, rsa.PrivateKey]) -> tuple[bytes, bytes, bytes]:
|
||||||
|
"""
|
||||||
|
Derive the SSK writekey, encrypted writekey, and fingerprint from the
|
||||||
|
public/private ("verification" / "signature") keypair.
|
||||||
|
"""
|
||||||
|
pubkey, privkey = keypair
|
||||||
|
pubkey_s = rsa.der_string_from_verifying_key(pubkey)
|
||||||
|
privkey_s = rsa.der_string_from_signing_key(privkey)
|
||||||
|
writekey = hashutil.ssk_writekey_hash(privkey_s)
|
||||||
|
encprivkey = encrypt_privkey(writekey, privkey_s)
|
||||||
|
fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s)
|
||||||
|
return writekey, encprivkey, fingerprint
|
||||||
|
@ -1,14 +1,7 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Ported to Python 3.
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
from __future__ import annotations
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
import random
|
import random
|
||||||
|
|
||||||
@ -16,8 +9,6 @@ from zope.interface import implementer
|
|||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
from foolscap.api import eventually
|
from foolscap.api import eventually
|
||||||
|
|
||||||
from allmydata.crypto import aes
|
|
||||||
from allmydata.crypto import rsa
|
|
||||||
from allmydata.interfaces import IMutableFileNode, ICheckable, ICheckResults, \
|
from allmydata.interfaces import IMutableFileNode, ICheckable, ICheckResults, \
|
||||||
NotEnoughSharesError, MDMF_VERSION, SDMF_VERSION, IMutableUploadable, \
|
NotEnoughSharesError, MDMF_VERSION, SDMF_VERSION, IMutableUploadable, \
|
||||||
IMutableFileVersion, IWriteable
|
IMutableFileVersion, IWriteable
|
||||||
@ -28,8 +19,14 @@ from allmydata.uri import WriteableSSKFileURI, ReadonlySSKFileURI, \
|
|||||||
from allmydata.monitor import Monitor
|
from allmydata.monitor import Monitor
|
||||||
from allmydata.mutable.publish import Publish, MutableData,\
|
from allmydata.mutable.publish import Publish, MutableData,\
|
||||||
TransformingUploadable
|
TransformingUploadable
|
||||||
from allmydata.mutable.common import MODE_READ, MODE_WRITE, MODE_CHECK, UnrecoverableFileError, \
|
from allmydata.mutable.common import (
|
||||||
UncoordinatedWriteError
|
MODE_READ,
|
||||||
|
MODE_WRITE,
|
||||||
|
MODE_CHECK,
|
||||||
|
UnrecoverableFileError,
|
||||||
|
UncoordinatedWriteError,
|
||||||
|
derive_mutable_keys,
|
||||||
|
)
|
||||||
from allmydata.mutable.servermap import ServerMap, ServermapUpdater
|
from allmydata.mutable.servermap import ServerMap, ServermapUpdater
|
||||||
from allmydata.mutable.retrieve import Retrieve
|
from allmydata.mutable.retrieve import Retrieve
|
||||||
from allmydata.mutable.checker import MutableChecker, MutableCheckAndRepairer
|
from allmydata.mutable.checker import MutableChecker, MutableCheckAndRepairer
|
||||||
@ -139,13 +136,10 @@ class MutableFileNode(object):
|
|||||||
Deferred that fires (with the MutableFileNode instance you should
|
Deferred that fires (with the MutableFileNode instance you should
|
||||||
use) when it completes.
|
use) when it completes.
|
||||||
"""
|
"""
|
||||||
(pubkey, privkey) = keypair
|
self._pubkey, self._privkey = keypair
|
||||||
self._pubkey, self._privkey = pubkey, privkey
|
self._writekey, self._encprivkey, self._fingerprint = derive_mutable_keys(
|
||||||
pubkey_s = rsa.der_string_from_verifying_key(self._pubkey)
|
keypair,
|
||||||
privkey_s = rsa.der_string_from_signing_key(self._privkey)
|
)
|
||||||
self._writekey = hashutil.ssk_writekey_hash(privkey_s)
|
|
||||||
self._encprivkey = self._encrypt_privkey(self._writekey, privkey_s)
|
|
||||||
self._fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s)
|
|
||||||
if version == MDMF_VERSION:
|
if version == MDMF_VERSION:
|
||||||
self._uri = WriteableMDMFFileURI(self._writekey, self._fingerprint)
|
self._uri = WriteableMDMFFileURI(self._writekey, self._fingerprint)
|
||||||
self._protocol_version = version
|
self._protocol_version = version
|
||||||
@ -171,16 +165,6 @@ class MutableFileNode(object):
|
|||||||
(contents, type(contents))
|
(contents, type(contents))
|
||||||
return contents(self)
|
return contents(self)
|
||||||
|
|
||||||
def _encrypt_privkey(self, writekey, privkey):
|
|
||||||
encryptor = aes.create_encryptor(writekey)
|
|
||||||
crypttext = aes.encrypt_data(encryptor, privkey)
|
|
||||||
return crypttext
|
|
||||||
|
|
||||||
def _decrypt_privkey(self, enc_privkey):
|
|
||||||
decryptor = aes.create_decryptor(self._writekey)
|
|
||||||
privkey = aes.decrypt_data(decryptor, enc_privkey)
|
|
||||||
return privkey
|
|
||||||
|
|
||||||
def _populate_pubkey(self, pubkey):
|
def _populate_pubkey(self, pubkey):
|
||||||
self._pubkey = pubkey
|
self._pubkey = pubkey
|
||||||
def _populate_required_shares(self, required_shares):
|
def _populate_required_shares(self, required_shares):
|
||||||
|
@ -1,15 +1,7 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Ported to Python 3.
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
from __future__ import annotations
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
# Don't import bytes and str, to prevent API leakage
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, dict, list, object, range, max, min # noqa: F401
|
|
||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
@ -32,7 +24,7 @@ from allmydata import hashtree, codec
|
|||||||
from allmydata.storage.server import si_b2a
|
from allmydata.storage.server import si_b2a
|
||||||
|
|
||||||
from allmydata.mutable.common import CorruptShareError, BadShareError, \
|
from allmydata.mutable.common import CorruptShareError, BadShareError, \
|
||||||
UncoordinatedWriteError
|
UncoordinatedWriteError, decrypt_privkey
|
||||||
from allmydata.mutable.layout import MDMFSlotReadProxy
|
from allmydata.mutable.layout import MDMFSlotReadProxy
|
||||||
|
|
||||||
@implementer(IRetrieveStatus)
|
@implementer(IRetrieveStatus)
|
||||||
@ -931,9 +923,10 @@ class Retrieve(object):
|
|||||||
|
|
||||||
|
|
||||||
def _try_to_validate_privkey(self, enc_privkey, reader, server):
|
def _try_to_validate_privkey(self, enc_privkey, reader, server):
|
||||||
alleged_privkey_s = self._node._decrypt_privkey(enc_privkey)
|
node_writekey = self._node.get_writekey()
|
||||||
|
alleged_privkey_s = decrypt_privkey(node_writekey, enc_privkey)
|
||||||
alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s)
|
alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s)
|
||||||
if alleged_writekey != self._node.get_writekey():
|
if alleged_writekey != node_writekey:
|
||||||
self.log("invalid privkey from %s shnum %d" %
|
self.log("invalid privkey from %s shnum %d" %
|
||||||
(reader, reader.shnum),
|
(reader, reader.shnum),
|
||||||
level=log.WEIRD, umid="YIw4tA")
|
level=log.WEIRD, umid="YIw4tA")
|
||||||
|
@ -1,16 +1,8 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Ported to Python 3.
|
||||||
"""
|
"""
|
||||||
from __future__ import print_function
|
from __future__ import annotations
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
# Doesn't import str to prevent API leakage on Python 2
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, max, min # noqa: F401
|
|
||||||
from past.builtins import unicode
|
|
||||||
from six import ensure_str
|
from six import ensure_str
|
||||||
|
|
||||||
import sys, time, copy
|
import sys, time, copy
|
||||||
@ -29,7 +21,7 @@ from allmydata.storage.server import si_b2a
|
|||||||
from allmydata.interfaces import IServermapUpdaterStatus
|
from allmydata.interfaces import IServermapUpdaterStatus
|
||||||
|
|
||||||
from allmydata.mutable.common import MODE_CHECK, MODE_ANYTHING, MODE_WRITE, \
|
from allmydata.mutable.common import MODE_CHECK, MODE_ANYTHING, MODE_WRITE, \
|
||||||
MODE_READ, MODE_REPAIR, CorruptShareError
|
MODE_READ, MODE_REPAIR, CorruptShareError, decrypt_privkey
|
||||||
from allmydata.mutable.layout import SIGNED_PREFIX_LENGTH, MDMFSlotReadProxy
|
from allmydata.mutable.layout import SIGNED_PREFIX_LENGTH, MDMFSlotReadProxy
|
||||||
|
|
||||||
@implementer(IServermapUpdaterStatus)
|
@implementer(IServermapUpdaterStatus)
|
||||||
@ -203,8 +195,8 @@ class ServerMap(object):
|
|||||||
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
|
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
|
||||||
offsets_tuple) = verinfo
|
offsets_tuple) = verinfo
|
||||||
print("[%s]: sh#%d seq%d-%s %d-of-%d len%d" %
|
print("[%s]: sh#%d seq%d-%s %d-of-%d len%d" %
|
||||||
(unicode(server.get_name(), "utf-8"), shnum,
|
(str(server.get_name(), "utf-8"), shnum,
|
||||||
seqnum, unicode(base32.b2a(root_hash)[:4], "utf-8"), k, N,
|
seqnum, str(base32.b2a(root_hash)[:4], "utf-8"), k, N,
|
||||||
datalength), file=out)
|
datalength), file=out)
|
||||||
if self._problems:
|
if self._problems:
|
||||||
print("%d PROBLEMS" % len(self._problems), file=out)
|
print("%d PROBLEMS" % len(self._problems), file=out)
|
||||||
@ -276,7 +268,7 @@ class ServerMap(object):
|
|||||||
"""Take a versionid, return a string that describes it."""
|
"""Take a versionid, return a string that describes it."""
|
||||||
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
|
(seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
|
||||||
offsets_tuple) = verinfo
|
offsets_tuple) = verinfo
|
||||||
return "seq%d-%s" % (seqnum, unicode(base32.b2a(root_hash)[:4], "utf-8"))
|
return "seq%d-%s" % (seqnum, str(base32.b2a(root_hash)[:4], "utf-8"))
|
||||||
|
|
||||||
def summarize_versions(self):
|
def summarize_versions(self):
|
||||||
"""Return a string describing which versions we know about."""
|
"""Return a string describing which versions we know about."""
|
||||||
@ -824,7 +816,7 @@ class ServermapUpdater(object):
|
|||||||
|
|
||||||
|
|
||||||
def notify_server_corruption(self, server, shnum, reason):
|
def notify_server_corruption(self, server, shnum, reason):
|
||||||
if isinstance(reason, unicode):
|
if isinstance(reason, str):
|
||||||
reason = reason.encode("utf-8")
|
reason = reason.encode("utf-8")
|
||||||
ss = server.get_storage_server()
|
ss = server.get_storage_server()
|
||||||
ss.advise_corrupt_share(
|
ss.advise_corrupt_share(
|
||||||
@ -879,7 +871,7 @@ class ServermapUpdater(object):
|
|||||||
# ok, it's a valid verinfo. Add it to the list of validated
|
# ok, it's a valid verinfo. Add it to the list of validated
|
||||||
# versions.
|
# versions.
|
||||||
self.log(" found valid version %d-%s from %s-sh%d: %d-%d/%d/%d"
|
self.log(" found valid version %d-%s from %s-sh%d: %d-%d/%d/%d"
|
||||||
% (seqnum, unicode(base32.b2a(root_hash)[:4], "utf-8"),
|
% (seqnum, str(base32.b2a(root_hash)[:4], "utf-8"),
|
||||||
ensure_str(server.get_name()), shnum,
|
ensure_str(server.get_name()), shnum,
|
||||||
k, n, segsize, datalen),
|
k, n, segsize, datalen),
|
||||||
parent=lp)
|
parent=lp)
|
||||||
@ -951,9 +943,10 @@ class ServermapUpdater(object):
|
|||||||
writekey stored in my node. If it is valid, then I set the
|
writekey stored in my node. If it is valid, then I set the
|
||||||
privkey and encprivkey properties of the node.
|
privkey and encprivkey properties of the node.
|
||||||
"""
|
"""
|
||||||
alleged_privkey_s = self._node._decrypt_privkey(enc_privkey)
|
node_writekey = self._node.get_writekey()
|
||||||
|
alleged_privkey_s = decrypt_privkey(node_writekey, enc_privkey)
|
||||||
alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s)
|
alleged_writekey = hashutil.ssk_writekey_hash(alleged_privkey_s)
|
||||||
if alleged_writekey != self._node.get_writekey():
|
if alleged_writekey != node_writekey:
|
||||||
self.log("invalid privkey from %r shnum %d" %
|
self.log("invalid privkey from %r shnum %d" %
|
||||||
(server.get_name(), shnum),
|
(server.get_name(), shnum),
|
||||||
parent=lp, level=log.WEIRD, umid="aJVccw")
|
parent=lp, level=log.WEIRD, umid="aJVccw")
|
||||||
|
@ -752,7 +752,7 @@ def create_connection_handlers(config, i2p_provider, tor_provider):
|
|||||||
|
|
||||||
|
|
||||||
def create_tub(tub_options, default_connection_handlers, foolscap_connection_handlers,
|
def create_tub(tub_options, default_connection_handlers, foolscap_connection_handlers,
|
||||||
handler_overrides={}, **kwargs):
|
handler_overrides={}, force_foolscap=False, **kwargs):
|
||||||
"""
|
"""
|
||||||
Create a Tub with the right options and handlers. It will be
|
Create a Tub with the right options and handlers. It will be
|
||||||
ephemeral unless the caller provides certFile= in kwargs
|
ephemeral unless the caller provides certFile= in kwargs
|
||||||
@ -762,9 +762,15 @@ def create_tub(tub_options, default_connection_handlers, foolscap_connection_han
|
|||||||
|
|
||||||
:param dict tub_options: every key-value pair in here will be set in
|
:param dict tub_options: every key-value pair in here will be set in
|
||||||
the new Tub via `Tub.setOption`
|
the new Tub via `Tub.setOption`
|
||||||
|
|
||||||
|
:param bool force_foolscap: If True, only allow Foolscap, not just HTTPS
|
||||||
|
storage protocol.
|
||||||
"""
|
"""
|
||||||
# We listen simulataneously for both Foolscap and HTTPS on the same port,
|
# We listen simultaneously for both Foolscap and HTTPS on the same port,
|
||||||
# so we have to create a special Foolscap Tub for that to work:
|
# so we have to create a special Foolscap Tub for that to work:
|
||||||
|
if force_foolscap:
|
||||||
|
tub = Tub(**kwargs)
|
||||||
|
else:
|
||||||
tub = create_tub_with_https_support(**kwargs)
|
tub = create_tub_with_https_support(**kwargs)
|
||||||
|
|
||||||
for (name, value) in list(tub_options.items()):
|
for (name, value) in list(tub_options.items()):
|
||||||
@ -956,14 +962,20 @@ def create_main_tub(config, tub_options,
|
|||||||
|
|
||||||
# FIXME? "node.pem" was the CERTFILE option/thing
|
# FIXME? "node.pem" was the CERTFILE option/thing
|
||||||
certfile = config.get_private_path("node.pem")
|
certfile = config.get_private_path("node.pem")
|
||||||
|
|
||||||
tub = create_tub(
|
tub = create_tub(
|
||||||
tub_options,
|
tub_options,
|
||||||
default_connection_handlers,
|
default_connection_handlers,
|
||||||
foolscap_connection_handlers,
|
foolscap_connection_handlers,
|
||||||
|
# TODO eventually we will want the default to be False, but for now we
|
||||||
|
# don't want to enable HTTP by default.
|
||||||
|
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3934
|
||||||
|
force_foolscap=config.get_config(
|
||||||
|
"storage", "force_foolscap", default=True, boolean=True
|
||||||
|
),
|
||||||
handler_overrides=handler_overrides,
|
handler_overrides=handler_overrides,
|
||||||
certFile=certfile,
|
certFile=certfile,
|
||||||
)
|
)
|
||||||
|
|
||||||
if portlocation is None:
|
if portlocation is None:
|
||||||
log.msg("Tub is not listening")
|
log.msg("Tub is not listening")
|
||||||
else:
|
else:
|
||||||
|
@ -1,17 +1,12 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Create file nodes of various types.
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
from __future__ import annotations
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
import weakref
|
import weakref
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
|
from twisted.internet.defer import succeed
|
||||||
from allmydata.util.assertutil import precondition
|
from allmydata.util.assertutil import precondition
|
||||||
from allmydata.interfaces import INodeMaker
|
from allmydata.interfaces import INodeMaker
|
||||||
from allmydata.immutable.literal import LiteralFileNode
|
from allmydata.immutable.literal import LiteralFileNode
|
||||||
@ -22,6 +17,7 @@ from allmydata.mutable.publish import MutableData
|
|||||||
from allmydata.dirnode import DirectoryNode, pack_children
|
from allmydata.dirnode import DirectoryNode, pack_children
|
||||||
from allmydata.unknown import UnknownNode
|
from allmydata.unknown import UnknownNode
|
||||||
from allmydata.blacklist import ProhibitedNode
|
from allmydata.blacklist import ProhibitedNode
|
||||||
|
from allmydata.crypto.rsa import PublicKey, PrivateKey
|
||||||
from allmydata import uri
|
from allmydata import uri
|
||||||
|
|
||||||
|
|
||||||
@ -126,12 +122,15 @@ class NodeMaker(object):
|
|||||||
return self._create_dirnode(filenode)
|
return self._create_dirnode(filenode)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def create_mutable_file(self, contents=None, version=None):
|
def create_mutable_file(self, contents=None, version=None, keypair: tuple[PublicKey, PrivateKey] | None = None):
|
||||||
if version is None:
|
if version is None:
|
||||||
version = self.mutable_file_default
|
version = self.mutable_file_default
|
||||||
n = MutableFileNode(self.storage_broker, self.secret_holder,
|
n = MutableFileNode(self.storage_broker, self.secret_holder,
|
||||||
self.default_encoding_parameters, self.history)
|
self.default_encoding_parameters, self.history)
|
||||||
|
if keypair is None:
|
||||||
d = self.key_generator.generate()
|
d = self.key_generator.generate()
|
||||||
|
else:
|
||||||
|
d = succeed(keypair)
|
||||||
d.addCallback(n.create_with_keys, contents, version=version)
|
d.addCallback(n.create_with_keys, contents, version=version)
|
||||||
d.addCallback(lambda res: n)
|
d.addCallback(lambda res: n)
|
||||||
return d
|
return d
|
||||||
|
@ -181,8 +181,20 @@ class PutOptions(FileStoreOptions):
|
|||||||
optFlags = [
|
optFlags = [
|
||||||
("mutable", "m", "Create a mutable file instead of an immutable one (like --format=SDMF)"),
|
("mutable", "m", "Create a mutable file instead of an immutable one (like --format=SDMF)"),
|
||||||
]
|
]
|
||||||
|
|
||||||
optParameters = [
|
optParameters = [
|
||||||
("format", None, None, "Create a file with the given format: SDMF and MDMF for mutable, CHK (default) for immutable. (case-insensitive)"),
|
("format", None, None, "Create a file with the given format: SDMF and MDMF for mutable, CHK (default) for immutable. (case-insensitive)"),
|
||||||
|
|
||||||
|
("private-key-path", None, None,
|
||||||
|
"***Warning*** "
|
||||||
|
"It is possible to use this option to spoil the normal security properties of mutable objects. "
|
||||||
|
"It is also possible to corrupt or destroy data with this option. "
|
||||||
|
"Most users will not need this option and can ignore it. "
|
||||||
|
"For mutables only, "
|
||||||
|
"this gives a file containing a PEM-encoded 2048 bit RSA private key to use as the signature key for the mutable. "
|
||||||
|
"The private key must be handled at least as strictly as the resulting capability string. "
|
||||||
|
"A single private key must not be used for more than one mutable."
|
||||||
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
def parseArgs(self, arg1=None, arg2=None):
|
def parseArgs(self, arg1=None, arg2=None):
|
||||||
|
@ -1,23 +1,32 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Implement the ``tahoe put`` command.
|
||||||
"""
|
"""
|
||||||
from __future__ import unicode_literals
|
from __future__ import annotations
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from urllib.parse import quote as url_quote
|
from urllib.parse import quote as url_quote
|
||||||
|
from base64 import urlsafe_b64encode
|
||||||
|
|
||||||
|
from cryptography.hazmat.primitives.serialization import load_pem_private_key
|
||||||
|
|
||||||
|
from twisted.python.filepath import FilePath
|
||||||
|
|
||||||
|
from allmydata.crypto.rsa import PrivateKey, der_string_from_signing_key
|
||||||
from allmydata.scripts.common_http import do_http, format_http_success, format_http_error
|
from allmydata.scripts.common_http import do_http, format_http_success, format_http_error
|
||||||
from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \
|
from allmydata.scripts.common import get_alias, DEFAULT_ALIAS, escape_path, \
|
||||||
UnknownAliasError
|
UnknownAliasError
|
||||||
from allmydata.util.encodingutil import quote_output
|
from allmydata.util.encodingutil import quote_output
|
||||||
|
|
||||||
|
def load_private_key(path: str) -> str:
|
||||||
|
"""
|
||||||
|
Load a private key from a file and return it in a format appropriate
|
||||||
|
to include in the HTTP request.
|
||||||
|
"""
|
||||||
|
privkey = load_pem_private_key(FilePath(path).getContent(), password=None)
|
||||||
|
assert isinstance(privkey, PrivateKey)
|
||||||
|
derbytes = der_string_from_signing_key(privkey)
|
||||||
|
return urlsafe_b64encode(derbytes).decode("ascii")
|
||||||
|
|
||||||
def put(options):
|
def put(options):
|
||||||
"""
|
"""
|
||||||
@param verbosity: 0, 1, or 2, meaning quiet, verbose, or very verbose
|
@param verbosity: 0, 1, or 2, meaning quiet, verbose, or very verbose
|
||||||
@ -29,6 +38,10 @@ def put(options):
|
|||||||
from_file = options.from_file
|
from_file = options.from_file
|
||||||
to_file = options.to_file
|
to_file = options.to_file
|
||||||
mutable = options['mutable']
|
mutable = options['mutable']
|
||||||
|
if options["private-key-path"] is None:
|
||||||
|
private_key = None
|
||||||
|
else:
|
||||||
|
private_key = load_private_key(options["private-key-path"])
|
||||||
format = options['format']
|
format = options['format']
|
||||||
if options['quiet']:
|
if options['quiet']:
|
||||||
verbosity = 0
|
verbosity = 0
|
||||||
@ -79,6 +92,12 @@ def put(options):
|
|||||||
queryargs = []
|
queryargs = []
|
||||||
if mutable:
|
if mutable:
|
||||||
queryargs.append("mutable=true")
|
queryargs.append("mutable=true")
|
||||||
|
if private_key is not None:
|
||||||
|
queryargs.append(f"private-key={private_key}")
|
||||||
|
else:
|
||||||
|
if private_key is not None:
|
||||||
|
raise Exception("Can only supply a private key for mutables.")
|
||||||
|
|
||||||
if format:
|
if format:
|
||||||
queryargs.append("format=%s" % format)
|
queryargs.append("format=%s" % format)
|
||||||
if queryargs:
|
if queryargs:
|
||||||
@ -92,9 +111,6 @@ def put(options):
|
|||||||
if verbosity > 0:
|
if verbosity > 0:
|
||||||
print("waiting for file data on stdin..", file=stderr)
|
print("waiting for file data on stdin..", file=stderr)
|
||||||
# We're uploading arbitrary files, so this had better be bytes:
|
# We're uploading arbitrary files, so this had better be bytes:
|
||||||
if PY2:
|
|
||||||
stdinb = stdin
|
|
||||||
else:
|
|
||||||
stdinb = stdin.buffer
|
stdinb = stdin.buffer
|
||||||
data = stdinb.read()
|
data = stdinb.read()
|
||||||
infileobj = BytesIO(data)
|
infileobj = BytesIO(data)
|
||||||
|
@ -21,7 +21,11 @@ from twisted.scripts import twistd
|
|||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
from twisted.python.filepath import FilePath
|
from twisted.python.filepath import FilePath
|
||||||
from twisted.python.reflect import namedAny
|
from twisted.python.reflect import namedAny
|
||||||
from twisted.internet.defer import maybeDeferred
|
from twisted.python.failure import Failure
|
||||||
|
from twisted.internet.defer import maybeDeferred, Deferred
|
||||||
|
from twisted.internet.protocol import Protocol
|
||||||
|
from twisted.internet.stdio import StandardIO
|
||||||
|
from twisted.internet.error import ReactorNotRunning
|
||||||
from twisted.application.service import Service
|
from twisted.application.service import Service
|
||||||
|
|
||||||
from allmydata.scripts.default_nodedir import _default_nodedir
|
from allmydata.scripts.default_nodedir import _default_nodedir
|
||||||
@ -155,6 +159,8 @@ class DaemonizeTheRealService(Service, HookMixin):
|
|||||||
|
|
||||||
def startService(self):
|
def startService(self):
|
||||||
|
|
||||||
|
from twisted.internet import reactor
|
||||||
|
|
||||||
def start():
|
def start():
|
||||||
node_to_instance = {
|
node_to_instance = {
|
||||||
u"client": lambda: maybeDeferred(namedAny("allmydata.client.create_client"), self.basedir),
|
u"client": lambda: maybeDeferred(namedAny("allmydata.client.create_client"), self.basedir),
|
||||||
@ -186,7 +192,7 @@ class DaemonizeTheRealService(Service, HookMixin):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.stderr.write("\nUnknown error\n")
|
self.stderr.write("\nUnknown error, here's the traceback:\n")
|
||||||
reason.printTraceback(self.stderr)
|
reason.printTraceback(self.stderr)
|
||||||
reactor.stop()
|
reactor.stop()
|
||||||
|
|
||||||
@ -194,12 +200,14 @@ class DaemonizeTheRealService(Service, HookMixin):
|
|||||||
|
|
||||||
def created(srv):
|
def created(srv):
|
||||||
srv.setServiceParent(self.parent)
|
srv.setServiceParent(self.parent)
|
||||||
|
# exiting on stdin-closed facilitates cleanup when run
|
||||||
|
# as a subprocess
|
||||||
|
on_stdin_close(reactor, reactor.stop)
|
||||||
d.addCallback(created)
|
d.addCallback(created)
|
||||||
d.addErrback(handle_config_error)
|
d.addErrback(handle_config_error)
|
||||||
d.addBoth(self._call_hook, 'running')
|
d.addBoth(self._call_hook, 'running')
|
||||||
return d
|
return d
|
||||||
|
|
||||||
from twisted.internet import reactor
|
|
||||||
reactor.callWhenRunning(start)
|
reactor.callWhenRunning(start)
|
||||||
|
|
||||||
|
|
||||||
@ -213,6 +221,46 @@ class DaemonizeTahoeNodePlugin(object):
|
|||||||
return DaemonizeTheRealService(self.nodetype, self.basedir, so)
|
return DaemonizeTheRealService(self.nodetype, self.basedir, so)
|
||||||
|
|
||||||
|
|
||||||
|
def on_stdin_close(reactor, fn):
|
||||||
|
"""
|
||||||
|
Arrange for the function `fn` to run when our stdin closes
|
||||||
|
"""
|
||||||
|
when_closed_d = Deferred()
|
||||||
|
|
||||||
|
class WhenClosed(Protocol):
|
||||||
|
"""
|
||||||
|
Notify a Deferred when our connection is lost .. as this is passed
|
||||||
|
to twisted's StandardIO class, it is used to detect our parent
|
||||||
|
going away.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def connectionLost(self, reason):
|
||||||
|
when_closed_d.callback(None)
|
||||||
|
|
||||||
|
def on_close(arg):
|
||||||
|
try:
|
||||||
|
fn()
|
||||||
|
except ReactorNotRunning:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
# for our "exit" use-case failures will _mostly_ just be
|
||||||
|
# ReactorNotRunning (because we're already shutting down
|
||||||
|
# when our stdin closes) but no matter what "bad thing"
|
||||||
|
# happens we just want to ignore it .. although other
|
||||||
|
# errors might be interesting so we'll log those
|
||||||
|
print(Failure())
|
||||||
|
return arg
|
||||||
|
|
||||||
|
when_closed_d.addBoth(on_close)
|
||||||
|
# we don't need to do anything with this instance because it gets
|
||||||
|
# hooked into the reactor and thus remembered .. but we return it
|
||||||
|
# for Windows testing purposes.
|
||||||
|
return StandardIO(
|
||||||
|
proto=WhenClosed(),
|
||||||
|
reactor=reactor,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def run(reactor, config, runApp=twistd.runApp):
|
def run(reactor, config, runApp=twistd.runApp):
|
||||||
"""
|
"""
|
||||||
Runs a Tahoe-LAFS node in the foreground.
|
Runs a Tahoe-LAFS node in the foreground.
|
||||||
|
@ -20,7 +20,11 @@ from twisted.web.http_headers import Headers
|
|||||||
from twisted.web import http
|
from twisted.web import http
|
||||||
from twisted.web.iweb import IPolicyForHTTPS
|
from twisted.web.iweb import IPolicyForHTTPS
|
||||||
from twisted.internet.defer import inlineCallbacks, returnValue, fail, Deferred, succeed
|
from twisted.internet.defer import inlineCallbacks, returnValue, fail, Deferred, succeed
|
||||||
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
|
from twisted.internet.interfaces import (
|
||||||
|
IOpenSSLClientConnectionCreator,
|
||||||
|
IReactorTime,
|
||||||
|
IDelayedCall,
|
||||||
|
)
|
||||||
from twisted.internet.ssl import CertificateOptions
|
from twisted.internet.ssl import CertificateOptions
|
||||||
from twisted.web.client import Agent, HTTPConnectionPool
|
from twisted.web.client import Agent, HTTPConnectionPool
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
@ -83,35 +87,35 @@ _SCHEMAS = {
|
|||||||
"allocate_buckets": Schema(
|
"allocate_buckets": Schema(
|
||||||
"""
|
"""
|
||||||
response = {
|
response = {
|
||||||
already-have: #6.258([* uint])
|
already-have: #6.258([0*256 uint])
|
||||||
allocated: #6.258([* uint])
|
allocated: #6.258([0*256 uint])
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
),
|
),
|
||||||
"immutable_write_share_chunk": Schema(
|
"immutable_write_share_chunk": Schema(
|
||||||
"""
|
"""
|
||||||
response = {
|
response = {
|
||||||
required: [* {begin: uint, end: uint}]
|
required: [0* {begin: uint, end: uint}]
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
),
|
),
|
||||||
"list_shares": Schema(
|
"list_shares": Schema(
|
||||||
"""
|
"""
|
||||||
response = #6.258([* uint])
|
response = #6.258([0*256 uint])
|
||||||
"""
|
"""
|
||||||
),
|
),
|
||||||
"mutable_read_test_write": Schema(
|
"mutable_read_test_write": Schema(
|
||||||
"""
|
"""
|
||||||
response = {
|
response = {
|
||||||
"success": bool,
|
"success": bool,
|
||||||
"data": {* share_number: [* bstr]}
|
"data": {0*256 share_number: [0* bstr]}
|
||||||
}
|
}
|
||||||
share_number = uint
|
share_number = uint
|
||||||
"""
|
"""
|
||||||
),
|
),
|
||||||
"mutable_list_shares": Schema(
|
"mutable_list_shares": Schema(
|
||||||
"""
|
"""
|
||||||
response = #6.258([* uint])
|
response = #6.258([0*256 uint])
|
||||||
"""
|
"""
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
@ -124,16 +128,22 @@ class _LengthLimitedCollector:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
remaining_length: int
|
remaining_length: int
|
||||||
|
timeout_on_silence: IDelayedCall
|
||||||
f: BytesIO = field(factory=BytesIO)
|
f: BytesIO = field(factory=BytesIO)
|
||||||
|
|
||||||
def __call__(self, data: bytes):
|
def __call__(self, data: bytes):
|
||||||
|
self.timeout_on_silence.reset(60)
|
||||||
self.remaining_length -= len(data)
|
self.remaining_length -= len(data)
|
||||||
if self.remaining_length < 0:
|
if self.remaining_length < 0:
|
||||||
raise ValueError("Response length was too long")
|
raise ValueError("Response length was too long")
|
||||||
self.f.write(data)
|
self.f.write(data)
|
||||||
|
|
||||||
|
|
||||||
def limited_content(response, max_length: int = 30 * 1024 * 1024) -> Deferred[BinaryIO]:
|
def limited_content(
|
||||||
|
response,
|
||||||
|
clock: IReactorTime,
|
||||||
|
max_length: int = 30 * 1024 * 1024,
|
||||||
|
) -> Deferred[BinaryIO]:
|
||||||
"""
|
"""
|
||||||
Like ``treq.content()``, but limit data read from the response to a set
|
Like ``treq.content()``, but limit data read from the response to a set
|
||||||
length. If the response is longer than the max allowed length, the result
|
length. If the response is longer than the max allowed length, the result
|
||||||
@ -142,39 +152,29 @@ def limited_content(response, max_length: int = 30 * 1024 * 1024) -> Deferred[Bi
|
|||||||
A potentially useful future improvement would be using a temporary file to
|
A potentially useful future improvement would be using a temporary file to
|
||||||
store the content; since filesystem buffering means that would use memory
|
store the content; since filesystem buffering means that would use memory
|
||||||
for small responses and disk for large responses.
|
for small responses and disk for large responses.
|
||||||
|
|
||||||
|
This will time out if no data is received for 60 seconds; so long as a
|
||||||
|
trickle of data continues to arrive, it will continue to run.
|
||||||
"""
|
"""
|
||||||
collector = _LengthLimitedCollector(max_length)
|
d = succeed(None)
|
||||||
|
timeout = clock.callLater(60, d.cancel)
|
||||||
|
collector = _LengthLimitedCollector(max_length, timeout)
|
||||||
|
|
||||||
# Make really sure everything gets called in Deferred context, treq might
|
# Make really sure everything gets called in Deferred context, treq might
|
||||||
# call collector directly...
|
# call collector directly...
|
||||||
d = succeed(None)
|
|
||||||
d.addCallback(lambda _: treq.collect(response, collector))
|
d.addCallback(lambda _: treq.collect(response, collector))
|
||||||
|
|
||||||
def done(_):
|
def done(_):
|
||||||
|
timeout.cancel()
|
||||||
collector.f.seek(0)
|
collector.f.seek(0)
|
||||||
return collector.f
|
return collector.f
|
||||||
|
|
||||||
d.addCallback(done)
|
def failed(f):
|
||||||
return d
|
if timeout.active():
|
||||||
|
timeout.cancel()
|
||||||
|
return f
|
||||||
|
|
||||||
|
return d.addCallbacks(done, failed)
|
||||||
def _decode_cbor(response, schema: Schema):
|
|
||||||
"""Given HTTP response, return decoded CBOR body."""
|
|
||||||
|
|
||||||
def got_content(f: BinaryIO):
|
|
||||||
data = f.read()
|
|
||||||
schema.validate_cbor(data)
|
|
||||||
return loads(data)
|
|
||||||
|
|
||||||
if response.code > 199 and response.code < 300:
|
|
||||||
content_type = get_content_type(response.headers)
|
|
||||||
if content_type == CBOR_MIME_TYPE:
|
|
||||||
return limited_content(response).addCallback(got_content)
|
|
||||||
else:
|
|
||||||
raise ClientException(-1, "Server didn't send CBOR")
|
|
||||||
else:
|
|
||||||
return treq.content(response).addCallback(
|
|
||||||
lambda data: fail(ClientException(response.code, response.phrase, data))
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@define
|
@define
|
||||||
@ -276,42 +276,68 @@ class _StorageClientHTTPSPolicy:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@define
|
@define(hash=True)
|
||||||
class StorageClient(object):
|
class StorageClient(object):
|
||||||
"""
|
"""
|
||||||
Low-level HTTP client that talks to the HTTP storage server.
|
Low-level HTTP client that talks to the HTTP storage server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# If set, we're doing unit testing and we should call this with
|
||||||
|
# HTTPConnectionPool we create.
|
||||||
|
TEST_MODE_REGISTER_HTTP_POOL = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def start_test_mode(cls, callback):
|
||||||
|
"""Switch to testing mode.
|
||||||
|
|
||||||
|
In testing mode we register the pool with test system using the given
|
||||||
|
callback so it can Do Things, most notably killing off idle HTTP
|
||||||
|
connections at test shutdown and, in some tests, in the midddle of the
|
||||||
|
test.
|
||||||
|
"""
|
||||||
|
cls.TEST_MODE_REGISTER_HTTP_POOL = callback
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def stop_test_mode(cls):
|
||||||
|
"""Stop testing mode."""
|
||||||
|
cls.TEST_MODE_REGISTER_HTTP_POOL = None
|
||||||
|
|
||||||
# The URL is a HTTPS URL ("https://..."). To construct from a NURL, use
|
# The URL is a HTTPS URL ("https://..."). To construct from a NURL, use
|
||||||
# ``StorageClient.from_nurl()``.
|
# ``StorageClient.from_nurl()``.
|
||||||
_base_url: DecodedURL
|
_base_url: DecodedURL
|
||||||
_swissnum: bytes
|
_swissnum: bytes
|
||||||
_treq: Union[treq, StubTreq, HTTPClient]
|
_treq: Union[treq, StubTreq, HTTPClient]
|
||||||
|
_clock: IReactorTime
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_nurl(
|
def from_nurl(
|
||||||
cls, nurl: DecodedURL, reactor, persistent: bool = True
|
cls,
|
||||||
|
nurl: DecodedURL,
|
||||||
|
reactor,
|
||||||
) -> StorageClient:
|
) -> StorageClient:
|
||||||
"""
|
"""
|
||||||
Create a ``StorageClient`` for the given NURL.
|
Create a ``StorageClient`` for the given NURL.
|
||||||
|
|
||||||
``persistent`` indicates whether to use persistent HTTP connections.
|
|
||||||
"""
|
"""
|
||||||
assert nurl.fragment == "v=1"
|
assert nurl.fragment == "v=1"
|
||||||
assert nurl.scheme == "pb"
|
assert nurl.scheme == "pb"
|
||||||
swissnum = nurl.path[0].encode("ascii")
|
swissnum = nurl.path[0].encode("ascii")
|
||||||
certificate_hash = nurl.user.encode("ascii")
|
certificate_hash = nurl.user.encode("ascii")
|
||||||
|
pool = HTTPConnectionPool(reactor)
|
||||||
|
pool.maxPersistentPerHost = 20
|
||||||
|
|
||||||
|
if cls.TEST_MODE_REGISTER_HTTP_POOL is not None:
|
||||||
|
cls.TEST_MODE_REGISTER_HTTP_POOL(pool)
|
||||||
|
|
||||||
treq_client = HTTPClient(
|
treq_client = HTTPClient(
|
||||||
Agent(
|
Agent(
|
||||||
reactor,
|
reactor,
|
||||||
_StorageClientHTTPSPolicy(expected_spki_hash=certificate_hash),
|
_StorageClientHTTPSPolicy(expected_spki_hash=certificate_hash),
|
||||||
pool=HTTPConnectionPool(reactor, persistent=persistent),
|
pool=pool,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
https_url = DecodedURL().replace(scheme="https", host=nurl.host, port=nurl.port)
|
https_url = DecodedURL().replace(scheme="https", host=nurl.host, port=nurl.port)
|
||||||
return cls(https_url, swissnum, treq_client)
|
return cls(https_url, swissnum, treq_client, reactor)
|
||||||
|
|
||||||
def relative_url(self, path):
|
def relative_url(self, path):
|
||||||
"""Get a URL relative to the base URL."""
|
"""Get a URL relative to the base URL."""
|
||||||
@ -337,6 +363,7 @@ class StorageClient(object):
|
|||||||
write_enabler_secret=None,
|
write_enabler_secret=None,
|
||||||
headers=None,
|
headers=None,
|
||||||
message_to_serialize=None,
|
message_to_serialize=None,
|
||||||
|
timeout: float = 60,
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
@ -345,6 +372,8 @@ class StorageClient(object):
|
|||||||
|
|
||||||
If ``message_to_serialize`` is set, it will be serialized (by default
|
If ``message_to_serialize`` is set, it will be serialized (by default
|
||||||
with CBOR) and set as the request body.
|
with CBOR) and set as the request body.
|
||||||
|
|
||||||
|
Default timeout is 60 seconds.
|
||||||
"""
|
"""
|
||||||
headers = self._get_headers(headers)
|
headers = self._get_headers(headers)
|
||||||
|
|
||||||
@ -376,16 +405,37 @@ class StorageClient(object):
|
|||||||
kwargs["data"] = dumps(message_to_serialize)
|
kwargs["data"] = dumps(message_to_serialize)
|
||||||
headers.addRawHeader("Content-Type", CBOR_MIME_TYPE)
|
headers.addRawHeader("Content-Type", CBOR_MIME_TYPE)
|
||||||
|
|
||||||
return self._treq.request(method, url, headers=headers, **kwargs)
|
return self._treq.request(
|
||||||
|
method, url, headers=headers, timeout=timeout, **kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
def decode_cbor(self, response, schema: Schema):
|
||||||
|
"""Given HTTP response, return decoded CBOR body."""
|
||||||
|
|
||||||
|
def got_content(f: BinaryIO):
|
||||||
|
data = f.read()
|
||||||
|
schema.validate_cbor(data)
|
||||||
|
return loads(data)
|
||||||
|
|
||||||
|
if response.code > 199 and response.code < 300:
|
||||||
|
content_type = get_content_type(response.headers)
|
||||||
|
if content_type == CBOR_MIME_TYPE:
|
||||||
|
return limited_content(response, self._clock).addCallback(got_content)
|
||||||
|
else:
|
||||||
|
raise ClientException(-1, "Server didn't send CBOR")
|
||||||
|
else:
|
||||||
|
return treq.content(response).addCallback(
|
||||||
|
lambda data: fail(ClientException(response.code, response.phrase, data))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@define(hash=True)
|
||||||
class StorageClientGeneral(object):
|
class StorageClientGeneral(object):
|
||||||
"""
|
"""
|
||||||
High-level HTTP APIs that aren't immutable- or mutable-specific.
|
High-level HTTP APIs that aren't immutable- or mutable-specific.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, client): # type: (StorageClient) -> None
|
_client: StorageClient
|
||||||
self._client = client
|
|
||||||
|
|
||||||
@inlineCallbacks
|
@inlineCallbacks
|
||||||
def get_version(self):
|
def get_version(self):
|
||||||
@ -394,7 +444,9 @@ class StorageClientGeneral(object):
|
|||||||
"""
|
"""
|
||||||
url = self._client.relative_url("/storage/v1/version")
|
url = self._client.relative_url("/storage/v1/version")
|
||||||
response = yield self._client.request("GET", url)
|
response = yield self._client.request("GET", url)
|
||||||
decoded_response = yield _decode_cbor(response, _SCHEMAS["get_version"])
|
decoded_response = yield self._client.decode_cbor(
|
||||||
|
response, _SCHEMAS["get_version"]
|
||||||
|
)
|
||||||
returnValue(decoded_response)
|
returnValue(decoded_response)
|
||||||
|
|
||||||
@inlineCallbacks
|
@inlineCallbacks
|
||||||
@ -461,6 +513,9 @@ def read_share_chunk(
|
|||||||
share_type, _encode_si(storage_index), share_number
|
share_type, _encode_si(storage_index), share_number
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
# The default 60 second timeout is for getting the response, so it doesn't
|
||||||
|
# include the time it takes to download the body... so we will will deal
|
||||||
|
# with that later, via limited_content().
|
||||||
response = yield client.request(
|
response = yield client.request(
|
||||||
"GET",
|
"GET",
|
||||||
url,
|
url,
|
||||||
@ -469,6 +524,7 @@ def read_share_chunk(
|
|||||||
# but Range constructor does that the conversion for us.
|
# but Range constructor does that the conversion for us.
|
||||||
{"range": [Range("bytes", [(offset, offset + length)]).to_header()]}
|
{"range": [Range("bytes", [(offset, offset + length)]).to_header()]}
|
||||||
),
|
),
|
||||||
|
unbuffered=True, # Don't buffer the response in memory.
|
||||||
)
|
)
|
||||||
|
|
||||||
if response.code == http.NO_CONTENT:
|
if response.code == http.NO_CONTENT:
|
||||||
@ -491,7 +547,7 @@ def read_share_chunk(
|
|||||||
raise ValueError("Server sent more than we asked for?!")
|
raise ValueError("Server sent more than we asked for?!")
|
||||||
# It might also send less than we asked for. That's (probably) OK, e.g.
|
# It might also send less than we asked for. That's (probably) OK, e.g.
|
||||||
# if we went past the end of the file.
|
# if we went past the end of the file.
|
||||||
body = yield limited_content(response, supposed_length)
|
body = yield limited_content(response, client._clock, supposed_length)
|
||||||
body.seek(0, SEEK_END)
|
body.seek(0, SEEK_END)
|
||||||
actual_length = body.tell()
|
actual_length = body.tell()
|
||||||
if actual_length != supposed_length:
|
if actual_length != supposed_length:
|
||||||
@ -534,7 +590,7 @@ async def advise_corrupt_share(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@define
|
@define(hash=True)
|
||||||
class StorageClientImmutables(object):
|
class StorageClientImmutables(object):
|
||||||
"""
|
"""
|
||||||
APIs for interacting with immutables.
|
APIs for interacting with immutables.
|
||||||
@ -578,7 +634,9 @@ class StorageClientImmutables(object):
|
|||||||
upload_secret=upload_secret,
|
upload_secret=upload_secret,
|
||||||
message_to_serialize=message,
|
message_to_serialize=message,
|
||||||
)
|
)
|
||||||
decoded_response = yield _decode_cbor(response, _SCHEMAS["allocate_buckets"])
|
decoded_response = yield self._client.decode_cbor(
|
||||||
|
response, _SCHEMAS["allocate_buckets"]
|
||||||
|
)
|
||||||
returnValue(
|
returnValue(
|
||||||
ImmutableCreateResult(
|
ImmutableCreateResult(
|
||||||
already_have=decoded_response["already-have"],
|
already_have=decoded_response["already-have"],
|
||||||
@ -654,7 +712,9 @@ class StorageClientImmutables(object):
|
|||||||
raise ClientException(
|
raise ClientException(
|
||||||
response.code,
|
response.code,
|
||||||
)
|
)
|
||||||
body = yield _decode_cbor(response, _SCHEMAS["immutable_write_share_chunk"])
|
body = yield self._client.decode_cbor(
|
||||||
|
response, _SCHEMAS["immutable_write_share_chunk"]
|
||||||
|
)
|
||||||
remaining = RangeMap()
|
remaining = RangeMap()
|
||||||
for chunk in body["required"]:
|
for chunk in body["required"]:
|
||||||
remaining.set(True, chunk["begin"], chunk["end"])
|
remaining.set(True, chunk["begin"], chunk["end"])
|
||||||
@ -683,7 +743,7 @@ class StorageClientImmutables(object):
|
|||||||
url,
|
url,
|
||||||
)
|
)
|
||||||
if response.code == http.OK:
|
if response.code == http.OK:
|
||||||
body = yield _decode_cbor(response, _SCHEMAS["list_shares"])
|
body = yield self._client.decode_cbor(response, _SCHEMAS["list_shares"])
|
||||||
returnValue(set(body))
|
returnValue(set(body))
|
||||||
else:
|
else:
|
||||||
raise ClientException(response.code)
|
raise ClientException(response.code)
|
||||||
@ -800,7 +860,9 @@ class StorageClientMutables:
|
|||||||
message_to_serialize=message,
|
message_to_serialize=message,
|
||||||
)
|
)
|
||||||
if response.code == http.OK:
|
if response.code == http.OK:
|
||||||
result = await _decode_cbor(response, _SCHEMAS["mutable_read_test_write"])
|
result = await self._client.decode_cbor(
|
||||||
|
response, _SCHEMAS["mutable_read_test_write"]
|
||||||
|
)
|
||||||
return ReadTestWriteResult(success=result["success"], reads=result["data"])
|
return ReadTestWriteResult(success=result["success"], reads=result["data"])
|
||||||
else:
|
else:
|
||||||
raise ClientException(response.code, (await response.content()))
|
raise ClientException(response.code, (await response.content()))
|
||||||
@ -829,7 +891,9 @@ class StorageClientMutables:
|
|||||||
)
|
)
|
||||||
response = await self._client.request("GET", url)
|
response = await self._client.request("GET", url)
|
||||||
if response.code == http.OK:
|
if response.code == http.OK:
|
||||||
return await _decode_cbor(response, _SCHEMAS["mutable_list_shares"])
|
return await self._client.decode_cbor(
|
||||||
|
response, _SCHEMAS["mutable_list_shares"]
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
raise ClientException(response.code)
|
raise ClientException(response.code)
|
||||||
|
|
||||||
|
@ -9,6 +9,8 @@ from functools import wraps
|
|||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
import binascii
|
import binascii
|
||||||
from tempfile import TemporaryFile
|
from tempfile import TemporaryFile
|
||||||
|
from os import SEEK_END, SEEK_SET
|
||||||
|
import mmap
|
||||||
|
|
||||||
from cryptography.x509 import Certificate as CryptoCertificate
|
from cryptography.x509 import Certificate as CryptoCertificate
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
@ -39,7 +41,7 @@ from cryptography.x509 import load_pem_x509_certificate
|
|||||||
|
|
||||||
|
|
||||||
# TODO Make sure to use pure Python versions?
|
# TODO Make sure to use pure Python versions?
|
||||||
from cbor2 import dump, loads
|
import cbor2
|
||||||
from pycddl import Schema, ValidationError as CDDLValidationError
|
from pycddl import Schema, ValidationError as CDDLValidationError
|
||||||
from .server import StorageServer
|
from .server import StorageServer
|
||||||
from .http_common import (
|
from .http_common import (
|
||||||
@ -100,7 +102,7 @@ def _authorization_decorator(required_secrets):
|
|||||||
@wraps(f)
|
@wraps(f)
|
||||||
def route(self, request, *args, **kwargs):
|
def route(self, request, *args, **kwargs):
|
||||||
if not timing_safe_compare(
|
if not timing_safe_compare(
|
||||||
request.requestHeaders.getRawHeaders("Authorization", [None])[0].encode(
|
request.requestHeaders.getRawHeaders("Authorization", [""])[0].encode(
|
||||||
"utf-8"
|
"utf-8"
|
||||||
),
|
),
|
||||||
swissnum_auth_header(self._swissnum),
|
swissnum_auth_header(self._swissnum),
|
||||||
@ -260,7 +262,7 @@ _SCHEMAS = {
|
|||||||
"allocate_buckets": Schema(
|
"allocate_buckets": Schema(
|
||||||
"""
|
"""
|
||||||
request = {
|
request = {
|
||||||
share-numbers: #6.258([*256 uint])
|
share-numbers: #6.258([0*256 uint])
|
||||||
allocated-size: uint
|
allocated-size: uint
|
||||||
}
|
}
|
||||||
"""
|
"""
|
||||||
@ -276,15 +278,13 @@ _SCHEMAS = {
|
|||||||
"""
|
"""
|
||||||
request = {
|
request = {
|
||||||
"test-write-vectors": {
|
"test-write-vectors": {
|
||||||
; TODO Add length limit here, after
|
0*256 share_number : {
|
||||||
; https://github.com/anweiss/cddl/issues/128 is fixed
|
"test": [0*30 {"offset": uint, "size": uint, "specimen": bstr}]
|
||||||
* share_number => {
|
"write": [* {"offset": uint, "data": bstr}]
|
||||||
"test": [*30 {"offset": uint, "size": uint, "specimen": bstr}]
|
|
||||||
"write": [*30 {"offset": uint, "data": bstr}]
|
|
||||||
"new-length": uint / null
|
"new-length": uint / null
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"read-vector": [*30 {"offset": uint, "size": uint}]
|
"read-vector": [0*30 {"offset": uint, "size": uint}]
|
||||||
}
|
}
|
||||||
share_number = uint
|
share_number = uint
|
||||||
"""
|
"""
|
||||||
@ -517,7 +517,7 @@ class HTTPServer(object):
|
|||||||
if accept.best == CBOR_MIME_TYPE:
|
if accept.best == CBOR_MIME_TYPE:
|
||||||
request.setHeader("Content-Type", CBOR_MIME_TYPE)
|
request.setHeader("Content-Type", CBOR_MIME_TYPE)
|
||||||
f = TemporaryFile()
|
f = TemporaryFile()
|
||||||
dump(data, f)
|
cbor2.dump(data, f)
|
||||||
|
|
||||||
def read_data(offset: int, length: int) -> bytes:
|
def read_data(offset: int, length: int) -> bytes:
|
||||||
f.seek(offset)
|
f.seek(offset)
|
||||||
@ -529,27 +529,47 @@ class HTTPServer(object):
|
|||||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3861
|
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3861
|
||||||
raise _HTTPError(http.NOT_ACCEPTABLE)
|
raise _HTTPError(http.NOT_ACCEPTABLE)
|
||||||
|
|
||||||
def _read_encoded(self, request, schema: Schema) -> Any:
|
def _read_encoded(
|
||||||
|
self, request, schema: Schema, max_size: int = 1024 * 1024
|
||||||
|
) -> Any:
|
||||||
"""
|
"""
|
||||||
Read encoded request body data, decoding it with CBOR by default.
|
Read encoded request body data, decoding it with CBOR by default.
|
||||||
|
|
||||||
Somewhat arbitrarily, limit body size to 1MB; this may be too low, we
|
Somewhat arbitrarily, limit body size to 1MiB by default.
|
||||||
may want to customize per query type, but this is the starting point
|
|
||||||
for now.
|
|
||||||
"""
|
"""
|
||||||
content_type = get_content_type(request.requestHeaders)
|
content_type = get_content_type(request.requestHeaders)
|
||||||
if content_type == CBOR_MIME_TYPE:
|
if content_type != CBOR_MIME_TYPE:
|
||||||
# Read 1 byte more than 1MB. We expect length to be 1MB or
|
|
||||||
# less; if it's more assume it's not a legitimate message.
|
|
||||||
message = request.content.read(1024 * 1024 + 1)
|
|
||||||
if len(message) > 1024 * 1024:
|
|
||||||
raise _HTTPError(http.REQUEST_ENTITY_TOO_LARGE)
|
|
||||||
schema.validate_cbor(message)
|
|
||||||
result = loads(message)
|
|
||||||
return result
|
|
||||||
else:
|
|
||||||
raise _HTTPError(http.UNSUPPORTED_MEDIA_TYPE)
|
raise _HTTPError(http.UNSUPPORTED_MEDIA_TYPE)
|
||||||
|
|
||||||
|
# Make sure it's not too large:
|
||||||
|
request.content.seek(SEEK_END, 0)
|
||||||
|
if request.content.tell() > max_size:
|
||||||
|
raise _HTTPError(http.REQUEST_ENTITY_TOO_LARGE)
|
||||||
|
request.content.seek(SEEK_SET, 0)
|
||||||
|
|
||||||
|
# We don't want to load the whole message into memory, cause it might
|
||||||
|
# be quite large. The CDDL validator takes a read-only bytes-like
|
||||||
|
# thing. Luckily, for large request bodies twisted.web will buffer the
|
||||||
|
# data in a file, so we can use mmap() to get a memory view. The CDDL
|
||||||
|
# validator will not make a copy, so it won't increase memory usage
|
||||||
|
# beyond that.
|
||||||
|
try:
|
||||||
|
fd = request.content.fileno()
|
||||||
|
except (ValueError, OSError):
|
||||||
|
fd = -1
|
||||||
|
if fd >= 0:
|
||||||
|
# It's a file, so we can use mmap() to save memory.
|
||||||
|
message = mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
|
||||||
|
else:
|
||||||
|
message = request.content.read()
|
||||||
|
schema.validate_cbor(message)
|
||||||
|
|
||||||
|
# The CBOR parser will allocate more memory, but at least we can feed
|
||||||
|
# it the file-like object, so that if it's large it won't be make two
|
||||||
|
# copies.
|
||||||
|
request.content.seek(SEEK_SET, 0)
|
||||||
|
return cbor2.load(request.content)
|
||||||
|
|
||||||
##### Generic APIs #####
|
##### Generic APIs #####
|
||||||
|
|
||||||
@_authorized_route(_app, set(), "/storage/v1/version", methods=["GET"])
|
@_authorized_route(_app, set(), "/storage/v1/version", methods=["GET"])
|
||||||
@ -748,7 +768,9 @@ class HTTPServer(object):
|
|||||||
)
|
)
|
||||||
def mutable_read_test_write(self, request, authorization, storage_index):
|
def mutable_read_test_write(self, request, authorization, storage_index):
|
||||||
"""Read/test/write combined operation for mutables."""
|
"""Read/test/write combined operation for mutables."""
|
||||||
rtw_request = self._read_encoded(request, _SCHEMAS["mutable_read_test_write"])
|
rtw_request = self._read_encoded(
|
||||||
|
request, _SCHEMAS["mutable_read_test_write"], max_size=2**48
|
||||||
|
)
|
||||||
secrets = (
|
secrets = (
|
||||||
authorization[Secrets.WRITE_ENABLER],
|
authorization[Secrets.WRITE_ENABLER],
|
||||||
authorization[Secrets.LEASE_RENEW],
|
authorization[Secrets.LEASE_RENEW],
|
||||||
|
@ -30,6 +30,8 @@ Ported to Python 3.
|
|||||||
#
|
#
|
||||||
# 6: implement other sorts of IStorageClient classes: S3, etc
|
# 6: implement other sorts of IStorageClient classes: S3, etc
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
from six import ensure_text
|
from six import ensure_text
|
||||||
|
|
||||||
from typing import Union
|
from typing import Union
|
||||||
@ -41,13 +43,16 @@ import hashlib
|
|||||||
from configparser import NoSectionError
|
from configparser import NoSectionError
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
|
from hyperlink import DecodedURL
|
||||||
from zope.interface import (
|
from zope.interface import (
|
||||||
Attribute,
|
Attribute,
|
||||||
Interface,
|
Interface,
|
||||||
implementer,
|
implementer,
|
||||||
)
|
)
|
||||||
|
from twisted.python.failure import Failure
|
||||||
from twisted.web import http
|
from twisted.web import http
|
||||||
from twisted.internet import defer
|
from twisted.internet.task import LoopingCall
|
||||||
|
from twisted.internet import defer, reactor
|
||||||
from twisted.application import service
|
from twisted.application import service
|
||||||
from twisted.plugin import (
|
from twisted.plugin import (
|
||||||
getPlugins,
|
getPlugins,
|
||||||
@ -85,6 +90,8 @@ from allmydata.storage.http_client import (
|
|||||||
ReadVector, TestWriteVectors, WriteVector, TestVector, ClientException
|
ReadVector, TestWriteVectors, WriteVector, TestVector, ClientException
|
||||||
)
|
)
|
||||||
|
|
||||||
|
ANONYMOUS_STORAGE_NURLS = "anonymous-storage-NURLs"
|
||||||
|
|
||||||
|
|
||||||
# who is responsible for de-duplication?
|
# who is responsible for de-duplication?
|
||||||
# both?
|
# both?
|
||||||
@ -109,8 +116,8 @@ class StorageClientConfig(object):
|
|||||||
|
|
||||||
:ivar preferred_peers: An iterable of the server-ids (``bytes``) of the
|
:ivar preferred_peers: An iterable of the server-ids (``bytes``) of the
|
||||||
storage servers where share placement is preferred, in order of
|
storage servers where share placement is preferred, in order of
|
||||||
decreasing preference. See the *[client]peers.preferred*
|
decreasing preference. See the *[client]peers.preferred* documentation
|
||||||
documentation for details.
|
for details.
|
||||||
|
|
||||||
:ivar dict[unicode, dict[unicode, unicode]] storage_plugins: A mapping from
|
:ivar dict[unicode, dict[unicode, unicode]] storage_plugins: A mapping from
|
||||||
names of ``IFoolscapStoragePlugin`` configured in *tahoe.cfg* to the
|
names of ``IFoolscapStoragePlugin`` configured in *tahoe.cfg* to the
|
||||||
@ -286,6 +293,10 @@ class StorageFarmBroker(service.MultiService):
|
|||||||
by the given announcement.
|
by the given announcement.
|
||||||
"""
|
"""
|
||||||
assert isinstance(server_id, bytes)
|
assert isinstance(server_id, bytes)
|
||||||
|
if len(server["ann"].get(ANONYMOUS_STORAGE_NURLS, [])) > 0:
|
||||||
|
s = HTTPNativeStorageServer(server_id, server["ann"])
|
||||||
|
s.on_status_changed(lambda _: self._got_connection())
|
||||||
|
return s
|
||||||
handler_overrides = server.get("connections", {})
|
handler_overrides = server.get("connections", {})
|
||||||
gm_verifier = create_grid_manager_verifier(
|
gm_verifier = create_grid_manager_verifier(
|
||||||
self.storage_client_config.grid_manager_keys,
|
self.storage_client_config.grid_manager_keys,
|
||||||
@ -569,6 +580,45 @@ class IFoolscapStorageServer(Interface):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_announcement(server_id: bytes, furl: bytes, ann: dict) -> tuple[str, bytes, bytes, bytes, bytes]:
|
||||||
|
"""
|
||||||
|
Parse the furl and announcement, return:
|
||||||
|
|
||||||
|
(nickname, permutation_seed, tubid, short_description, long_description)
|
||||||
|
"""
|
||||||
|
m = re.match(br'pb://(\w+)@', furl)
|
||||||
|
assert m, furl
|
||||||
|
tubid_s = m.group(1).lower()
|
||||||
|
tubid = base32.a2b(tubid_s)
|
||||||
|
if "permutation-seed-base32" in ann:
|
||||||
|
seed = ann["permutation-seed-base32"]
|
||||||
|
if isinstance(seed, str):
|
||||||
|
seed = seed.encode("utf-8")
|
||||||
|
ps = base32.a2b(seed)
|
||||||
|
elif re.search(br'^v0-[0-9a-zA-Z]{52}$', server_id):
|
||||||
|
ps = base32.a2b(server_id[3:])
|
||||||
|
else:
|
||||||
|
log.msg("unable to parse serverid '%(server_id)s as pubkey, "
|
||||||
|
"hashing it to get permutation-seed, "
|
||||||
|
"may not converge with other clients",
|
||||||
|
server_id=server_id,
|
||||||
|
facility="tahoe.storage_broker",
|
||||||
|
level=log.UNUSUAL, umid="qu86tw")
|
||||||
|
ps = hashlib.sha256(server_id).digest()
|
||||||
|
permutation_seed = ps
|
||||||
|
|
||||||
|
assert server_id
|
||||||
|
long_description = server_id
|
||||||
|
if server_id.startswith(b"v0-"):
|
||||||
|
# remove v0- prefix from abbreviated name
|
||||||
|
short_description = server_id[3:3+8]
|
||||||
|
else:
|
||||||
|
short_description = server_id[:8]
|
||||||
|
nickname = ann.get("nickname", "")
|
||||||
|
|
||||||
|
return (nickname, permutation_seed, tubid, short_description, long_description)
|
||||||
|
|
||||||
|
|
||||||
@implementer(IFoolscapStorageServer)
|
@implementer(IFoolscapStorageServer)
|
||||||
@attr.s(frozen=True)
|
@attr.s(frozen=True)
|
||||||
class _FoolscapStorage(object):
|
class _FoolscapStorage(object):
|
||||||
@ -613,43 +663,13 @@ class _FoolscapStorage(object):
|
|||||||
The furl will be a Unicode string on Python 3; on Python 2 it will be
|
The furl will be a Unicode string on Python 3; on Python 2 it will be
|
||||||
either a native (bytes) string or a Unicode string.
|
either a native (bytes) string or a Unicode string.
|
||||||
"""
|
"""
|
||||||
furl = furl.encode("utf-8")
|
(nickname, permutation_seed, tubid, short_description, long_description) = _parse_announcement(server_id, furl.encode("utf-8"), ann)
|
||||||
m = re.match(br'pb://(\w+)@', furl)
|
|
||||||
assert m, furl
|
|
||||||
tubid_s = m.group(1).lower()
|
|
||||||
tubid = base32.a2b(tubid_s)
|
|
||||||
if "permutation-seed-base32" in ann:
|
|
||||||
seed = ann["permutation-seed-base32"]
|
|
||||||
if isinstance(seed, str):
|
|
||||||
seed = seed.encode("utf-8")
|
|
||||||
ps = base32.a2b(seed)
|
|
||||||
elif re.search(br'^v0-[0-9a-zA-Z]{52}$', server_id):
|
|
||||||
ps = base32.a2b(server_id[3:])
|
|
||||||
else:
|
|
||||||
log.msg("unable to parse serverid '%(server_id)s as pubkey, "
|
|
||||||
"hashing it to get permutation-seed, "
|
|
||||||
"may not converge with other clients",
|
|
||||||
server_id=server_id,
|
|
||||||
facility="tahoe.storage_broker",
|
|
||||||
level=log.UNUSUAL, umid="qu86tw")
|
|
||||||
ps = hashlib.sha256(server_id).digest()
|
|
||||||
permutation_seed = ps
|
|
||||||
|
|
||||||
assert server_id
|
|
||||||
long_description = server_id
|
|
||||||
if server_id.startswith(b"v0-"):
|
|
||||||
# remove v0- prefix from abbreviated name
|
|
||||||
short_description = server_id[3:3+8]
|
|
||||||
else:
|
|
||||||
short_description = server_id[:8]
|
|
||||||
nickname = ann.get("nickname", "")
|
|
||||||
|
|
||||||
return cls(
|
return cls(
|
||||||
nickname=nickname,
|
nickname=nickname,
|
||||||
permutation_seed=permutation_seed,
|
permutation_seed=permutation_seed,
|
||||||
tubid=tubid,
|
tubid=tubid,
|
||||||
storage_server=storage_server,
|
storage_server=storage_server,
|
||||||
furl=furl,
|
furl=furl.encode("utf-8"),
|
||||||
short_description=short_description,
|
short_description=short_description,
|
||||||
long_description=long_description,
|
long_description=long_description,
|
||||||
)
|
)
|
||||||
@ -731,6 +751,16 @@ def _storage_from_foolscap_plugin(node_config, config, announcement, get_rref):
|
|||||||
raise AnnouncementNotMatched()
|
raise AnnouncementNotMatched()
|
||||||
|
|
||||||
|
|
||||||
|
def _available_space_from_version(version):
|
||||||
|
if version is None:
|
||||||
|
return None
|
||||||
|
protocol_v1_version = version.get(b'http://allmydata.org/tahoe/protocols/storage/v1', BytesKeyDict())
|
||||||
|
available_space = protocol_v1_version.get(b'available-space')
|
||||||
|
if available_space is None:
|
||||||
|
available_space = protocol_v1_version.get(b'maximum-immutable-share-size', None)
|
||||||
|
return available_space
|
||||||
|
|
||||||
|
|
||||||
@implementer(IServer)
|
@implementer(IServer)
|
||||||
class NativeStorageServer(service.MultiService):
|
class NativeStorageServer(service.MultiService):
|
||||||
"""I hold information about a storage server that we want to connect to.
|
"""I hold information about a storage server that we want to connect to.
|
||||||
@ -910,13 +940,7 @@ class NativeStorageServer(service.MultiService):
|
|||||||
|
|
||||||
def get_available_space(self):
|
def get_available_space(self):
|
||||||
version = self.get_version()
|
version = self.get_version()
|
||||||
if version is None:
|
return _available_space_from_version(version)
|
||||||
return None
|
|
||||||
protocol_v1_version = version.get(b'http://allmydata.org/tahoe/protocols/storage/v1', BytesKeyDict())
|
|
||||||
available_space = protocol_v1_version.get(b'available-space')
|
|
||||||
if available_space is None:
|
|
||||||
available_space = protocol_v1_version.get(b'maximum-immutable-share-size', None)
|
|
||||||
return available_space
|
|
||||||
|
|
||||||
def start_connecting(self, trigger_cb):
|
def start_connecting(self, trigger_cb):
|
||||||
self._tub = self._tub_maker(self._handler_overrides)
|
self._tub = self._tub_maker(self._handler_overrides)
|
||||||
@ -978,6 +1002,164 @@ class NativeStorageServer(service.MultiService):
|
|||||||
# used when the broker wants us to hurry up
|
# used when the broker wants us to hurry up
|
||||||
self._reconnector.reset()
|
self._reconnector.reset()
|
||||||
|
|
||||||
|
|
||||||
|
@implementer(IServer)
|
||||||
|
class HTTPNativeStorageServer(service.MultiService):
|
||||||
|
"""
|
||||||
|
Like ``NativeStorageServer``, but for HTTP clients.
|
||||||
|
|
||||||
|
The notion of being "connected" is less meaningful for HTTP; we just poll
|
||||||
|
occasionally, and if we've succeeded at last poll, we assume we're
|
||||||
|
"connected".
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, server_id: bytes, announcement, reactor=reactor):
|
||||||
|
service.MultiService.__init__(self)
|
||||||
|
assert isinstance(server_id, bytes)
|
||||||
|
self._server_id = server_id
|
||||||
|
self.announcement = announcement
|
||||||
|
self._on_status_changed = ObserverList()
|
||||||
|
self._reactor = reactor
|
||||||
|
furl = announcement["anonymous-storage-FURL"].encode("utf-8")
|
||||||
|
(
|
||||||
|
self._nickname,
|
||||||
|
self._permutation_seed,
|
||||||
|
self._tubid,
|
||||||
|
self._short_description,
|
||||||
|
self._long_description
|
||||||
|
) = _parse_announcement(server_id, furl, announcement)
|
||||||
|
# TODO need some way to do equivalent of Happy Eyeballs for multiple NURLs?
|
||||||
|
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3935
|
||||||
|
nurl = DecodedURL.from_text(announcement[ANONYMOUS_STORAGE_NURLS][0])
|
||||||
|
self._istorage_server = _HTTPStorageServer.from_http_client(
|
||||||
|
StorageClient.from_nurl(nurl, reactor)
|
||||||
|
)
|
||||||
|
|
||||||
|
self._connection_status = connection_status.ConnectionStatus.unstarted()
|
||||||
|
self._version = None
|
||||||
|
self._last_connect_time = None
|
||||||
|
self._connecting_deferred = None
|
||||||
|
|
||||||
|
def get_permutation_seed(self):
|
||||||
|
return self._permutation_seed
|
||||||
|
|
||||||
|
def get_name(self):
|
||||||
|
return self._short_description
|
||||||
|
|
||||||
|
def get_longname(self):
|
||||||
|
return self._long_description
|
||||||
|
|
||||||
|
def get_tubid(self):
|
||||||
|
return self._tubid
|
||||||
|
|
||||||
|
def get_lease_seed(self):
|
||||||
|
# Apparently this is what Foolscap version above does?!
|
||||||
|
return self._tubid
|
||||||
|
|
||||||
|
def get_foolscap_write_enabler_seed(self):
|
||||||
|
return self._tubid
|
||||||
|
|
||||||
|
def get_nickname(self):
|
||||||
|
return self._nickname
|
||||||
|
|
||||||
|
def on_status_changed(self, status_changed):
|
||||||
|
"""
|
||||||
|
:param status_changed: a callable taking a single arg (the
|
||||||
|
NativeStorageServer) that is notified when we become connected
|
||||||
|
"""
|
||||||
|
return self._on_status_changed.subscribe(status_changed)
|
||||||
|
|
||||||
|
# Special methods used by copy.copy() and copy.deepcopy(). When those are
|
||||||
|
# used in allmydata.immutable.filenode to copy CheckResults during
|
||||||
|
# repair, we want it to treat the IServer instances as singletons, and
|
||||||
|
# not attempt to duplicate them..
|
||||||
|
def __copy__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __deepcopy__(self, memodict):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<HTTPNativeStorageServer for %r>" % self.get_name()
|
||||||
|
|
||||||
|
def get_serverid(self):
|
||||||
|
return self._server_id
|
||||||
|
|
||||||
|
def get_version(self):
|
||||||
|
return self._version
|
||||||
|
|
||||||
|
def get_announcement(self):
|
||||||
|
return self.announcement
|
||||||
|
|
||||||
|
def get_connection_status(self):
|
||||||
|
return self._connection_status
|
||||||
|
|
||||||
|
def is_connected(self):
|
||||||
|
return self._connection_status.connected
|
||||||
|
|
||||||
|
def get_available_space(self):
|
||||||
|
version = self.get_version()
|
||||||
|
return _available_space_from_version(version)
|
||||||
|
|
||||||
|
def start_connecting(self, trigger_cb):
|
||||||
|
self._lc = LoopingCall(self._connect)
|
||||||
|
self._lc.start(1, True)
|
||||||
|
|
||||||
|
def _got_version(self, version):
|
||||||
|
self._last_connect_time = time.time()
|
||||||
|
self._version = version
|
||||||
|
self._connection_status = connection_status.ConnectionStatus(
|
||||||
|
True, "connected", [], self._last_connect_time, self._last_connect_time
|
||||||
|
)
|
||||||
|
self._on_status_changed.notify(self)
|
||||||
|
|
||||||
|
def _failed_to_connect(self, reason):
|
||||||
|
self._connection_status = connection_status.ConnectionStatus(
|
||||||
|
False, f"failure: {reason}", [], self._last_connect_time, self._last_connect_time
|
||||||
|
)
|
||||||
|
self._on_status_changed.notify(self)
|
||||||
|
|
||||||
|
def get_storage_server(self):
|
||||||
|
"""
|
||||||
|
See ``IServer.get_storage_server``.
|
||||||
|
"""
|
||||||
|
if self._connection_status.summary == "unstarted":
|
||||||
|
return None
|
||||||
|
return self._istorage_server
|
||||||
|
|
||||||
|
def stop_connecting(self):
|
||||||
|
self._lc.stop()
|
||||||
|
if self._connecting_deferred is not None:
|
||||||
|
self._connecting_deferred.cancel()
|
||||||
|
|
||||||
|
def try_to_connect(self):
|
||||||
|
self._connect()
|
||||||
|
|
||||||
|
def _connect(self):
|
||||||
|
result = self._istorage_server.get_version()
|
||||||
|
|
||||||
|
def remove_connecting_deferred(result):
|
||||||
|
self._connecting_deferred = None
|
||||||
|
return result
|
||||||
|
|
||||||
|
# Set a short timeout since we're relying on this for server liveness.
|
||||||
|
self._connecting_deferred = result.addTimeout(5, self._reactor).addBoth(
|
||||||
|
remove_connecting_deferred).addCallbacks(
|
||||||
|
self._got_version,
|
||||||
|
self._failed_to_connect
|
||||||
|
)
|
||||||
|
|
||||||
|
def stopService(self):
|
||||||
|
if self._connecting_deferred is not None:
|
||||||
|
self._connecting_deferred.cancel()
|
||||||
|
|
||||||
|
result = service.MultiService.stopService(self)
|
||||||
|
if self._lc.running:
|
||||||
|
self._lc.stop()
|
||||||
|
self._failed_to_connect("shut down")
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
class UnknownServerTypeError(Exception):
|
class UnknownServerTypeError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -1094,7 +1276,7 @@ class _StorageServer(object):
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
@attr.s
|
@attr.s(hash=True)
|
||||||
class _FakeRemoteReference(object):
|
class _FakeRemoteReference(object):
|
||||||
"""
|
"""
|
||||||
Emulate a Foolscap RemoteReference, calling a local object instead.
|
Emulate a Foolscap RemoteReference, calling a local object instead.
|
||||||
@ -1119,7 +1301,7 @@ class _HTTPBucketWriter(object):
|
|||||||
storage_index = attr.ib(type=bytes)
|
storage_index = attr.ib(type=bytes)
|
||||||
share_number = attr.ib(type=int)
|
share_number = attr.ib(type=int)
|
||||||
upload_secret = attr.ib(type=bytes)
|
upload_secret = attr.ib(type=bytes)
|
||||||
finished = attr.ib(type=bool, default=False)
|
finished = attr.ib(type=defer.Deferred[bool], factory=defer.Deferred)
|
||||||
|
|
||||||
def abort(self):
|
def abort(self):
|
||||||
return self.client.abort_upload(self.storage_index, self.share_number,
|
return self.client.abort_upload(self.storage_index, self.share_number,
|
||||||
@ -1131,18 +1313,27 @@ class _HTTPBucketWriter(object):
|
|||||||
self.storage_index, self.share_number, self.upload_secret, offset, data
|
self.storage_index, self.share_number, self.upload_secret, offset, data
|
||||||
)
|
)
|
||||||
if result.finished:
|
if result.finished:
|
||||||
self.finished = True
|
self.finished.callback(True)
|
||||||
defer.returnValue(None)
|
defer.returnValue(None)
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
# A no-op in HTTP protocol.
|
# We're not _really_ closed until all writes have succeeded and we
|
||||||
if not self.finished:
|
# finished writing all the data.
|
||||||
return defer.fail(RuntimeError("You didn't finish writing?!"))
|
return self.finished
|
||||||
return defer.succeed(None)
|
|
||||||
|
|
||||||
|
|
||||||
|
def _ignore_404(failure: Failure) -> Union[Failure, None]:
|
||||||
|
"""
|
||||||
|
Useful for advise_corrupt_share(), since it swallows unknown share numbers
|
||||||
|
in Foolscap.
|
||||||
|
"""
|
||||||
|
if failure.check(HTTPClientException) and failure.value.code == http.NOT_FOUND:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return failure
|
||||||
|
|
||||||
@attr.s
|
|
||||||
|
@attr.s(hash=True)
|
||||||
class _HTTPBucketReader(object):
|
class _HTTPBucketReader(object):
|
||||||
"""
|
"""
|
||||||
Emulate a ``RIBucketReader``, but use HTTP protocol underneath.
|
Emulate a ``RIBucketReader``, but use HTTP protocol underneath.
|
||||||
@ -1160,7 +1351,7 @@ class _HTTPBucketReader(object):
|
|||||||
return self.client.advise_corrupt_share(
|
return self.client.advise_corrupt_share(
|
||||||
self.storage_index, self.share_number,
|
self.storage_index, self.share_number,
|
||||||
str(reason, "utf-8", errors="backslashreplace")
|
str(reason, "utf-8", errors="backslashreplace")
|
||||||
)
|
).addErrback(_ignore_404)
|
||||||
|
|
||||||
|
|
||||||
# WORK IN PROGRESS, for now it doesn't actually implement whole thing.
|
# WORK IN PROGRESS, for now it doesn't actually implement whole thing.
|
||||||
@ -1260,7 +1451,7 @@ class _HTTPStorageServer(object):
|
|||||||
raise ValueError("Unknown share type")
|
raise ValueError("Unknown share type")
|
||||||
return client.advise_corrupt_share(
|
return client.advise_corrupt_share(
|
||||||
storage_index, shnum, str(reason, "utf-8", errors="backslashreplace")
|
storage_index, shnum, str(reason, "utf-8", errors="backslashreplace")
|
||||||
)
|
).addErrback(_ignore_404)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def slot_readv(self, storage_index, shares, readv):
|
def slot_readv(self, storage_index, shares, readv):
|
||||||
|
@ -1,210 +0,0 @@
|
|||||||
"""
|
|
||||||
This module is only necessary on Python 2. Once Python 2 code is dropped, it
|
|
||||||
can be deleted.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from future.utils import PY3
|
|
||||||
if PY3:
|
|
||||||
raise RuntimeError("Just use subprocess.Popen")
|
|
||||||
|
|
||||||
# This is necessary to pacify flake8 on Python 3, while we're still supporting
|
|
||||||
# Python 2.
|
|
||||||
from past.builtins import unicode
|
|
||||||
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
## Copyright (C) 2021 Valentin Lab
|
|
||||||
##
|
|
||||||
## Redistribution and use in source and binary forms, with or without
|
|
||||||
## modification, are permitted provided that the following conditions
|
|
||||||
## are met:
|
|
||||||
##
|
|
||||||
## 1. Redistributions of source code must retain the above copyright
|
|
||||||
## notice, this list of conditions and the following disclaimer.
|
|
||||||
##
|
|
||||||
## 2. Redistributions in binary form must reproduce the above
|
|
||||||
## copyright notice, this list of conditions and the following
|
|
||||||
## disclaimer in the documentation and/or other materials provided
|
|
||||||
## with the distribution.
|
|
||||||
##
|
|
||||||
## 3. Neither the name of the copyright holder nor the names of its
|
|
||||||
## contributors may be used to endorse or promote products derived
|
|
||||||
## from this software without specific prior written permission.
|
|
||||||
##
|
|
||||||
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
|
||||||
## FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
|
||||||
## COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
|
|
||||||
## INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
|
||||||
## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
||||||
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
||||||
## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
|
||||||
## STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
||||||
## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
|
||||||
## OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
##
|
|
||||||
|
|
||||||
## issue: https://bugs.python.org/issue19264
|
|
||||||
|
|
||||||
# See allmydata/windows/fixups.py
|
|
||||||
import sys
|
|
||||||
assert sys.platform == "win32"
|
|
||||||
|
|
||||||
import os
|
|
||||||
import ctypes
|
|
||||||
import subprocess
|
|
||||||
import _subprocess
|
|
||||||
from ctypes import byref, windll, c_char_p, c_wchar_p, c_void_p, \
|
|
||||||
Structure, sizeof, c_wchar, WinError
|
|
||||||
from ctypes.wintypes import BYTE, WORD, LPWSTR, BOOL, DWORD, LPVOID, \
|
|
||||||
HANDLE
|
|
||||||
|
|
||||||
|
|
||||||
##
|
|
||||||
## Types
|
|
||||||
##
|
|
||||||
|
|
||||||
CREATE_UNICODE_ENVIRONMENT = 0x00000400
|
|
||||||
LPCTSTR = c_char_p
|
|
||||||
LPTSTR = c_wchar_p
|
|
||||||
LPSECURITY_ATTRIBUTES = c_void_p
|
|
||||||
LPBYTE = ctypes.POINTER(BYTE)
|
|
||||||
|
|
||||||
class STARTUPINFOW(Structure):
|
|
||||||
_fields_ = [
|
|
||||||
("cb", DWORD), ("lpReserved", LPWSTR),
|
|
||||||
("lpDesktop", LPWSTR), ("lpTitle", LPWSTR),
|
|
||||||
("dwX", DWORD), ("dwY", DWORD),
|
|
||||||
("dwXSize", DWORD), ("dwYSize", DWORD),
|
|
||||||
("dwXCountChars", DWORD), ("dwYCountChars", DWORD),
|
|
||||||
("dwFillAtrribute", DWORD), ("dwFlags", DWORD),
|
|
||||||
("wShowWindow", WORD), ("cbReserved2", WORD),
|
|
||||||
("lpReserved2", LPBYTE), ("hStdInput", HANDLE),
|
|
||||||
("hStdOutput", HANDLE), ("hStdError", HANDLE),
|
|
||||||
]
|
|
||||||
|
|
||||||
LPSTARTUPINFOW = ctypes.POINTER(STARTUPINFOW)
|
|
||||||
|
|
||||||
|
|
||||||
class PROCESS_INFORMATION(Structure):
|
|
||||||
_fields_ = [
|
|
||||||
("hProcess", HANDLE), ("hThread", HANDLE),
|
|
||||||
("dwProcessId", DWORD), ("dwThreadId", DWORD),
|
|
||||||
]
|
|
||||||
|
|
||||||
LPPROCESS_INFORMATION = ctypes.POINTER(PROCESS_INFORMATION)
|
|
||||||
|
|
||||||
|
|
||||||
class DUMMY_HANDLE(ctypes.c_void_p):
|
|
||||||
|
|
||||||
def __init__(self, *a, **kw):
|
|
||||||
super(DUMMY_HANDLE, self).__init__(*a, **kw)
|
|
||||||
self.closed = False
|
|
||||||
|
|
||||||
def Close(self):
|
|
||||||
if not self.closed:
|
|
||||||
windll.kernel32.CloseHandle(self)
|
|
||||||
self.closed = True
|
|
||||||
|
|
||||||
def __int__(self):
|
|
||||||
return self.value
|
|
||||||
|
|
||||||
|
|
||||||
CreateProcessW = windll.kernel32.CreateProcessW
|
|
||||||
CreateProcessW.argtypes = [
|
|
||||||
LPCTSTR, LPTSTR, LPSECURITY_ATTRIBUTES,
|
|
||||||
LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCTSTR,
|
|
||||||
LPSTARTUPINFOW, LPPROCESS_INFORMATION,
|
|
||||||
]
|
|
||||||
CreateProcessW.restype = BOOL
|
|
||||||
|
|
||||||
|
|
||||||
##
|
|
||||||
## Patched functions/classes
|
|
||||||
##
|
|
||||||
|
|
||||||
def CreateProcess(executable, args, _p_attr, _t_attr,
|
|
||||||
inherit_handles, creation_flags, env, cwd,
|
|
||||||
startup_info):
|
|
||||||
"""Create a process supporting unicode executable and args for win32
|
|
||||||
|
|
||||||
Python implementation of CreateProcess using CreateProcessW for Win32
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
si = STARTUPINFOW(
|
|
||||||
dwFlags=startup_info.dwFlags,
|
|
||||||
wShowWindow=startup_info.wShowWindow,
|
|
||||||
cb=sizeof(STARTUPINFOW),
|
|
||||||
## XXXvlab: not sure of the casting here to ints.
|
|
||||||
hStdInput=int(startup_info.hStdInput),
|
|
||||||
hStdOutput=int(startup_info.hStdOutput),
|
|
||||||
hStdError=int(startup_info.hStdError),
|
|
||||||
)
|
|
||||||
|
|
||||||
wenv = None
|
|
||||||
if env is not None:
|
|
||||||
## LPCWSTR seems to be c_wchar_p, so let's say CWSTR is c_wchar
|
|
||||||
env = (unicode("").join([
|
|
||||||
unicode("%s=%s\0") % (k, v)
|
|
||||||
for k, v in env.items()])) + unicode("\0")
|
|
||||||
wenv = (c_wchar * len(env))()
|
|
||||||
wenv.value = env
|
|
||||||
|
|
||||||
pi = PROCESS_INFORMATION()
|
|
||||||
creation_flags |= CREATE_UNICODE_ENVIRONMENT
|
|
||||||
|
|
||||||
if CreateProcessW(executable, args, None, None,
|
|
||||||
inherit_handles, creation_flags,
|
|
||||||
wenv, cwd, byref(si), byref(pi)):
|
|
||||||
return (DUMMY_HANDLE(pi.hProcess), DUMMY_HANDLE(pi.hThread),
|
|
||||||
pi.dwProcessId, pi.dwThreadId)
|
|
||||||
raise WinError()
|
|
||||||
|
|
||||||
|
|
||||||
class Popen(subprocess.Popen):
|
|
||||||
"""This superseeds Popen and corrects a bug in cPython 2.7 implem"""
|
|
||||||
|
|
||||||
def _execute_child(self, args, executable, preexec_fn, close_fds,
|
|
||||||
cwd, env, universal_newlines,
|
|
||||||
startupinfo, creationflags, shell, to_close,
|
|
||||||
p2cread, p2cwrite,
|
|
||||||
c2pread, c2pwrite,
|
|
||||||
errread, errwrite):
|
|
||||||
"""Code from part of _execute_child from Python 2.7 (9fbb65e)
|
|
||||||
|
|
||||||
There are only 2 little changes concerning the construction of
|
|
||||||
the the final string in shell mode: we preempt the creation of
|
|
||||||
the command string when shell is True, because original function
|
|
||||||
will try to encode unicode args which we want to avoid to be able to
|
|
||||||
sending it as-is to ``CreateProcess``.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not isinstance(args, subprocess.types.StringTypes):
|
|
||||||
args = subprocess.list2cmdline(args)
|
|
||||||
|
|
||||||
if startupinfo is None:
|
|
||||||
startupinfo = subprocess.STARTUPINFO()
|
|
||||||
if shell:
|
|
||||||
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
|
|
||||||
startupinfo.wShowWindow = _subprocess.SW_HIDE
|
|
||||||
comspec = os.environ.get("COMSPEC", unicode("cmd.exe"))
|
|
||||||
args = unicode('{} /c "{}"').format(comspec, args)
|
|
||||||
if (_subprocess.GetVersion() >= 0x80000000 or
|
|
||||||
os.path.basename(comspec).lower() == "command.com"):
|
|
||||||
w9xpopen = self._find_w9xpopen()
|
|
||||||
args = unicode('"%s" %s') % (w9xpopen, args)
|
|
||||||
creationflags |= _subprocess.CREATE_NEW_CONSOLE
|
|
||||||
|
|
||||||
cp = _subprocess.CreateProcess
|
|
||||||
_subprocess.CreateProcess = CreateProcess
|
|
||||||
try:
|
|
||||||
super(Popen, self)._execute_child(
|
|
||||||
args, executable,
|
|
||||||
preexec_fn, close_fds, cwd, env, universal_newlines,
|
|
||||||
startupinfo, creationflags, False, to_close, p2cread,
|
|
||||||
p2cwrite, c2pread, c2pwrite, errread, errwrite,
|
|
||||||
)
|
|
||||||
finally:
|
|
||||||
_subprocess.CreateProcess = cp
|
|
@ -1,19 +1,18 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Tests for the ``tahoe put`` CLI tool.
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
from __future__ import annotations
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
|
from typing import Callable, Awaitable, TypeVar, Any
|
||||||
import os.path
|
import os.path
|
||||||
from twisted.trial import unittest
|
from twisted.trial import unittest
|
||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
|
from twisted.python.filepath import FilePath
|
||||||
|
|
||||||
|
from cryptography.hazmat.primitives.serialization import load_pem_private_key
|
||||||
|
|
||||||
|
from allmydata.crypto.rsa import PrivateKey
|
||||||
|
from allmydata.uri import from_string
|
||||||
from allmydata.util import fileutil
|
from allmydata.util import fileutil
|
||||||
from allmydata.scripts.common import get_aliases
|
from allmydata.scripts.common import get_aliases
|
||||||
from allmydata.scripts import cli
|
from allmydata.scripts import cli
|
||||||
@ -22,6 +21,9 @@ from ..common_util import skip_if_cannot_represent_filename
|
|||||||
from allmydata.util.encodingutil import get_io_encoding
|
from allmydata.util.encodingutil import get_io_encoding
|
||||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||||
from .common import CLITestMixin
|
from .common import CLITestMixin
|
||||||
|
from allmydata.mutable.common import derive_mutable_keys
|
||||||
|
|
||||||
|
T = TypeVar("T")
|
||||||
|
|
||||||
class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
||||||
|
|
||||||
@ -215,6 +217,65 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase):
|
|||||||
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
async def test_unlinked_mutable_specified_private_key(self) -> None:
|
||||||
|
"""
|
||||||
|
A new unlinked mutable can be created using a specified private
|
||||||
|
key.
|
||||||
|
"""
|
||||||
|
self.basedir = "cli/Put/unlinked-mutable-with-key"
|
||||||
|
await self._test_mutable_specified_key(
|
||||||
|
lambda do_cli, pempath, datapath: do_cli(
|
||||||
|
"put", "--mutable", "--private-key-path", pempath.path,
|
||||||
|
stdin=datapath.getContent(),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
async def test_linked_mutable_specified_private_key(self) -> None:
|
||||||
|
"""
|
||||||
|
A new linked mutable can be created using a specified private key.
|
||||||
|
"""
|
||||||
|
self.basedir = "cli/Put/linked-mutable-with-key"
|
||||||
|
await self._test_mutable_specified_key(
|
||||||
|
lambda do_cli, pempath, datapath: do_cli(
|
||||||
|
"put", "--mutable", "--private-key-path", pempath.path, datapath.path,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
async def _test_mutable_specified_key(
|
||||||
|
self,
|
||||||
|
run: Callable[[Any, FilePath, FilePath], Awaitable[tuple[int, bytes, bytes]]],
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
A helper for testing mutable creation.
|
||||||
|
|
||||||
|
:param run: A function to do the creation. It is called with
|
||||||
|
``self.do_cli`` and the path to a private key PEM file and a data
|
||||||
|
file. It returns whatever ``do_cli`` returns.
|
||||||
|
"""
|
||||||
|
self.set_up_grid(oneshare=True)
|
||||||
|
|
||||||
|
pempath = FilePath(__file__).parent().sibling("data").child("openssl-rsa-2048.txt")
|
||||||
|
datapath = FilePath(self.basedir).child("data")
|
||||||
|
datapath.setContent(b"Hello world" * 1024)
|
||||||
|
|
||||||
|
(rc, out, err) = await run(self.do_cli, pempath, datapath)
|
||||||
|
self.assertEqual(rc, 0, (out, err))
|
||||||
|
cap = from_string(out.strip())
|
||||||
|
# The capability is derived from the key we specified.
|
||||||
|
privkey = load_pem_private_key(pempath.getContent(), password=None)
|
||||||
|
assert isinstance(privkey, PrivateKey)
|
||||||
|
pubkey = privkey.public_key()
|
||||||
|
writekey, _, fingerprint = derive_mutable_keys((pubkey, privkey))
|
||||||
|
self.assertEqual(
|
||||||
|
(writekey, fingerprint),
|
||||||
|
(cap.writekey, cap.fingerprint),
|
||||||
|
)
|
||||||
|
# Also the capability we were given actually refers to the data we
|
||||||
|
# uploaded.
|
||||||
|
(rc, out, err) = await self.do_cli("get", out.strip())
|
||||||
|
self.assertEqual(rc, 0, (out, err))
|
||||||
|
self.assertEqual(out, datapath.getContent().decode("ascii"))
|
||||||
|
|
||||||
def test_mutable(self):
|
def test_mutable(self):
|
||||||
# echo DATA1 | tahoe put --mutable - uploaded.txt
|
# echo DATA1 | tahoe put --mutable - uploaded.txt
|
||||||
# echo DATA2 | tahoe put - uploaded.txt # should modify-in-place
|
# echo DATA2 | tahoe put - uploaded.txt # should modify-in-place
|
||||||
|
@ -1,14 +1,8 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Functionality related to a lot of the test suite.
|
||||||
"""
|
"""
|
||||||
from __future__ import print_function
|
from __future__ import annotations
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2, native_str
|
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
from past.builtins import chr as byteschr
|
from past.builtins import chr as byteschr
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
@ -111,25 +105,15 @@ from allmydata.scripts.common import (
|
|||||||
|
|
||||||
from ..crypto import (
|
from ..crypto import (
|
||||||
ed25519,
|
ed25519,
|
||||||
|
rsa,
|
||||||
)
|
)
|
||||||
from .eliotutil import (
|
from .eliotutil import (
|
||||||
EliotLoggedRunTest,
|
EliotLoggedRunTest,
|
||||||
)
|
)
|
||||||
from .common_util import ShouldFailMixin # noqa: F401
|
from .common_util import ShouldFailMixin # noqa: F401
|
||||||
|
|
||||||
if sys.platform == "win32" and PY2:
|
|
||||||
# Python 2.7 doesn't have good options for launching a process with
|
|
||||||
# non-ASCII in its command line. So use this alternative that does a
|
|
||||||
# better job. However, only use it on Windows because it doesn't work
|
|
||||||
# anywhere else.
|
|
||||||
from ._win_subprocess import (
|
|
||||||
Popen,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
from subprocess import (
|
from subprocess import (
|
||||||
Popen,
|
Popen,
|
||||||
)
|
|
||||||
from subprocess import (
|
|
||||||
PIPE,
|
PIPE,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -298,7 +282,7 @@ class UseNode(object):
|
|||||||
plugin_config = attr.ib()
|
plugin_config = attr.ib()
|
||||||
storage_plugin = attr.ib()
|
storage_plugin = attr.ib()
|
||||||
basedir = attr.ib(validator=attr.validators.instance_of(FilePath))
|
basedir = attr.ib(validator=attr.validators.instance_of(FilePath))
|
||||||
introducer_furl = attr.ib(validator=attr.validators.instance_of(native_str),
|
introducer_furl = attr.ib(validator=attr.validators.instance_of(str),
|
||||||
converter=six.ensure_str)
|
converter=six.ensure_str)
|
||||||
node_config = attr.ib(default=attr.Factory(dict))
|
node_config = attr.ib(default=attr.Factory(dict))
|
||||||
|
|
||||||
@ -639,15 +623,28 @@ class FakeMutableFileNode(object): # type: ignore # incomplete implementation
|
|||||||
|
|
||||||
MUTABLE_SIZELIMIT = 10000
|
MUTABLE_SIZELIMIT = 10000
|
||||||
|
|
||||||
def __init__(self, storage_broker, secret_holder,
|
_public_key: rsa.PublicKey | None
|
||||||
default_encoding_parameters, history, all_contents):
|
_private_key: rsa.PrivateKey | None
|
||||||
|
|
||||||
|
def __init__(self,
|
||||||
|
storage_broker,
|
||||||
|
secret_holder,
|
||||||
|
default_encoding_parameters,
|
||||||
|
history,
|
||||||
|
all_contents,
|
||||||
|
keypair: tuple[rsa.PublicKey, rsa.PrivateKey] | None
|
||||||
|
):
|
||||||
self.all_contents = all_contents
|
self.all_contents = all_contents
|
||||||
self.file_types = {} # storage index => MDMF_VERSION or SDMF_VERSION
|
self.file_types: dict[bytes, int] = {} # storage index => MDMF_VERSION or SDMF_VERSION
|
||||||
self.init_from_cap(make_mutable_file_cap())
|
self.init_from_cap(make_mutable_file_cap(keypair))
|
||||||
self._k = default_encoding_parameters['k']
|
self._k = default_encoding_parameters['k']
|
||||||
self._segsize = default_encoding_parameters['max_segment_size']
|
self._segsize = default_encoding_parameters['max_segment_size']
|
||||||
def create(self, contents, key_generator=None, keysize=None,
|
if keypair is None:
|
||||||
version=SDMF_VERSION):
|
self._public_key = self._private_key = None
|
||||||
|
else:
|
||||||
|
self._public_key, self._private_key = keypair
|
||||||
|
|
||||||
|
def create(self, contents, version=SDMF_VERSION):
|
||||||
if version == MDMF_VERSION and \
|
if version == MDMF_VERSION and \
|
||||||
isinstance(self.my_uri, (uri.ReadonlySSKFileURI,
|
isinstance(self.my_uri, (uri.ReadonlySSKFileURI,
|
||||||
uri.WriteableSSKFileURI)):
|
uri.WriteableSSKFileURI)):
|
||||||
@ -843,9 +840,28 @@ class FakeMutableFileNode(object): # type: ignore # incomplete implementation
|
|||||||
return defer.succeed(consumer)
|
return defer.succeed(consumer)
|
||||||
|
|
||||||
|
|
||||||
def make_mutable_file_cap():
|
def make_mutable_file_cap(
|
||||||
return uri.WriteableSSKFileURI(writekey=os.urandom(16),
|
keypair: tuple[rsa.PublicKey, rsa.PrivateKey] | None = None,
|
||||||
fingerprint=os.urandom(32))
|
) -> uri.WriteableSSKFileURI:
|
||||||
|
"""
|
||||||
|
Create a local representation of a mutable object.
|
||||||
|
|
||||||
|
:param keypair: If None, a random keypair will be generated for the new
|
||||||
|
object. Otherwise, this is the keypair for that object.
|
||||||
|
"""
|
||||||
|
if keypair is None:
|
||||||
|
writekey = os.urandom(16)
|
||||||
|
fingerprint = os.urandom(32)
|
||||||
|
else:
|
||||||
|
pubkey, privkey = keypair
|
||||||
|
pubkey_s = rsa.der_string_from_verifying_key(pubkey)
|
||||||
|
privkey_s = rsa.der_string_from_signing_key(privkey)
|
||||||
|
writekey = hashutil.ssk_writekey_hash(privkey_s)
|
||||||
|
fingerprint = hashutil.ssk_pubkey_fingerprint_hash(pubkey_s)
|
||||||
|
|
||||||
|
return uri.WriteableSSKFileURI(
|
||||||
|
writekey=writekey, fingerprint=fingerprint,
|
||||||
|
)
|
||||||
|
|
||||||
def make_mdmf_mutable_file_cap():
|
def make_mdmf_mutable_file_cap():
|
||||||
return uri.WriteableMDMFFileURI(writekey=os.urandom(16),
|
return uri.WriteableMDMFFileURI(writekey=os.urandom(16),
|
||||||
@ -875,7 +891,7 @@ def create_mutable_filenode(contents, mdmf=False, all_contents=None):
|
|||||||
encoding_params['max_segment_size'] = 128*1024
|
encoding_params['max_segment_size'] = 128*1024
|
||||||
|
|
||||||
filenode = FakeMutableFileNode(None, None, encoding_params, None,
|
filenode = FakeMutableFileNode(None, None, encoding_params, None,
|
||||||
all_contents)
|
all_contents, None)
|
||||||
filenode.init_from_cap(cap)
|
filenode.init_from_cap(cap)
|
||||||
if mdmf:
|
if mdmf:
|
||||||
filenode.create(MutableData(contents), version=MDMF_VERSION)
|
filenode.create(MutableData(contents), version=MDMF_VERSION)
|
||||||
|
@ -5,22 +5,14 @@ in ``allmydata.test.test_system``.
|
|||||||
Ported to Python 3.
|
Ported to Python 3.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from __future__ import absolute_import
|
from typing import Optional
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
# Don't import bytes since it causes issues on (so far unported) modules on Python 2.
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, dict, list, object, range, max, min, str # noqa: F401
|
|
||||||
|
|
||||||
import os
|
import os
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.internet.defer import inlineCallbacks
|
from twisted.internet.defer import inlineCallbacks
|
||||||
|
from twisted.internet.task import deferLater
|
||||||
from twisted.application import service
|
from twisted.application import service
|
||||||
|
|
||||||
from foolscap.api import flushEventualQueue
|
from foolscap.api import flushEventualQueue
|
||||||
@ -28,6 +20,12 @@ from foolscap.api import flushEventualQueue
|
|||||||
from allmydata import client
|
from allmydata import client
|
||||||
from allmydata.introducer.server import create_introducer
|
from allmydata.introducer.server import create_introducer
|
||||||
from allmydata.util import fileutil, log, pollmixin
|
from allmydata.util import fileutil, log, pollmixin
|
||||||
|
from allmydata.util.deferredutil import async_to_deferred
|
||||||
|
from allmydata.storage import http_client
|
||||||
|
from allmydata.storage_client import (
|
||||||
|
NativeStorageServer,
|
||||||
|
HTTPNativeStorageServer,
|
||||||
|
)
|
||||||
|
|
||||||
from twisted.python.filepath import (
|
from twisted.python.filepath import (
|
||||||
FilePath,
|
FilePath,
|
||||||
@ -642,9 +640,51 @@ def _render_section_values(values):
|
|||||||
))
|
))
|
||||||
|
|
||||||
|
|
||||||
|
@async_to_deferred
|
||||||
|
async def spin_until_cleanup_done(value=None, timeout=10):
|
||||||
|
"""
|
||||||
|
At the end of the test, spin until the reactor has no more DelayedCalls
|
||||||
|
and file descriptors (or equivalents) registered. This prevents dirty
|
||||||
|
reactor errors, while also not hard-coding a fixed amount of time, so it
|
||||||
|
can finish faster on faster computers.
|
||||||
|
|
||||||
|
There is also a timeout: if it takes more than 10 seconds (by default) for
|
||||||
|
the remaining reactor state to clean itself up, the presumption is that it
|
||||||
|
will never get cleaned up and the spinning stops.
|
||||||
|
|
||||||
|
Make sure to run as last thing in tearDown.
|
||||||
|
"""
|
||||||
|
def num_fds():
|
||||||
|
if hasattr(reactor, "handles"):
|
||||||
|
# IOCP!
|
||||||
|
return len(reactor.handles)
|
||||||
|
else:
|
||||||
|
# Normal reactor; having internal readers still registered is fine,
|
||||||
|
# that's not our code.
|
||||||
|
return len(
|
||||||
|
set(reactor.getReaders()) - set(reactor._internalReaders)
|
||||||
|
) + len(reactor.getWriters())
|
||||||
|
|
||||||
|
for i in range(timeout * 1000):
|
||||||
|
# There's a single DelayedCall for AsynchronousDeferredRunTest's
|
||||||
|
# timeout...
|
||||||
|
if (len(reactor.getDelayedCalls()) < 2 and num_fds() == 0):
|
||||||
|
break
|
||||||
|
await deferLater(reactor, 0.001)
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
||||||
|
|
||||||
|
# If set to True, use Foolscap for storage protocol. If set to False, HTTP
|
||||||
|
# will be used when possible. If set to None, this suggests a bug in the
|
||||||
|
# test code.
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE : Optional[bool] = None
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
|
self._http_client_pools = []
|
||||||
|
http_client.StorageClient.start_test_mode(self._got_new_http_connection_pool)
|
||||||
|
self.addCleanup(http_client.StorageClient.stop_test_mode)
|
||||||
self.port_assigner = SameProcessStreamEndpointAssigner()
|
self.port_assigner = SameProcessStreamEndpointAssigner()
|
||||||
self.port_assigner.setUp()
|
self.port_assigner.setUp()
|
||||||
self.addCleanup(self.port_assigner.tearDown)
|
self.addCleanup(self.port_assigner.tearDown)
|
||||||
@ -652,10 +692,35 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
|||||||
self.sparent = service.MultiService()
|
self.sparent = service.MultiService()
|
||||||
self.sparent.startService()
|
self.sparent.startService()
|
||||||
|
|
||||||
|
def _got_new_http_connection_pool(self, pool):
|
||||||
|
# Register the pool for shutdown later:
|
||||||
|
self._http_client_pools.append(pool)
|
||||||
|
# Disable retries:
|
||||||
|
pool.retryAutomatically = False
|
||||||
|
# Make a much more aggressive timeout for connections, we're connecting
|
||||||
|
# locally after all... and also make sure it's lower than the delay we
|
||||||
|
# add in tearDown, to prevent dirty reactor issues.
|
||||||
|
getConnection = pool.getConnection
|
||||||
|
|
||||||
|
def getConnectionWithTimeout(*args, **kwargs):
|
||||||
|
d = getConnection(*args, **kwargs)
|
||||||
|
d.addTimeout(1, reactor)
|
||||||
|
return d
|
||||||
|
|
||||||
|
pool.getConnection = getConnectionWithTimeout
|
||||||
|
|
||||||
|
def close_idle_http_connections(self):
|
||||||
|
"""Close all HTTP client connections that are just hanging around."""
|
||||||
|
return defer.gatherResults(
|
||||||
|
[pool.closeCachedConnections() for pool in self._http_client_pools]
|
||||||
|
)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
log.msg("shutting down SystemTest services")
|
log.msg("shutting down SystemTest services")
|
||||||
d = self.sparent.stopService()
|
d = self.sparent.stopService()
|
||||||
d.addBoth(flush_but_dont_ignore)
|
d.addBoth(flush_but_dont_ignore)
|
||||||
|
d.addBoth(lambda x: self.close_idle_http_connections().addCallback(lambda _: x))
|
||||||
|
d.addBoth(spin_until_cleanup_done)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def getdir(self, subdir):
|
def getdir(self, subdir):
|
||||||
@ -714,21 +779,31 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
|||||||
:return: A ``Deferred`` that fires when the nodes have connected to
|
:return: A ``Deferred`` that fires when the nodes have connected to
|
||||||
each other.
|
each other.
|
||||||
"""
|
"""
|
||||||
|
self.assertIn(
|
||||||
|
self.FORCE_FOOLSCAP_FOR_STORAGE, (True, False),
|
||||||
|
"You forgot to set FORCE_FOOLSCAP_FOR_STORAGE on {}".format(self.__class__)
|
||||||
|
)
|
||||||
self.numclients = NUMCLIENTS
|
self.numclients = NUMCLIENTS
|
||||||
|
|
||||||
self.introducer = yield self._create_introducer()
|
self.introducer = yield self._create_introducer()
|
||||||
self.add_service(self.introducer)
|
self.add_service(self.introducer)
|
||||||
self.introweb_url = self._get_introducer_web()
|
self.introweb_url = self._get_introducer_web()
|
||||||
yield self._set_up_client_nodes()
|
yield self._set_up_client_nodes(self.FORCE_FOOLSCAP_FOR_STORAGE)
|
||||||
|
native_server = next(iter(self.clients[0].storage_broker.get_known_servers()))
|
||||||
|
if self.FORCE_FOOLSCAP_FOR_STORAGE:
|
||||||
|
expected_storage_server_class = NativeStorageServer
|
||||||
|
else:
|
||||||
|
expected_storage_server_class = HTTPNativeStorageServer
|
||||||
|
self.assertIsInstance(native_server, expected_storage_server_class)
|
||||||
|
|
||||||
@inlineCallbacks
|
@inlineCallbacks
|
||||||
def _set_up_client_nodes(self):
|
def _set_up_client_nodes(self, force_foolscap):
|
||||||
q = self.introducer
|
q = self.introducer
|
||||||
self.introducer_furl = q.introducer_url
|
self.introducer_furl = q.introducer_url
|
||||||
self.clients = []
|
self.clients = []
|
||||||
basedirs = []
|
basedirs = []
|
||||||
for i in range(self.numclients):
|
for i in range(self.numclients):
|
||||||
basedirs.append((yield self._set_up_client_node(i)))
|
basedirs.append((yield self._set_up_client_node(i, force_foolscap)))
|
||||||
|
|
||||||
# start clients[0], wait for it's tub to be ready (at which point it
|
# start clients[0], wait for it's tub to be ready (at which point it
|
||||||
# will have registered the helper furl).
|
# will have registered the helper furl).
|
||||||
@ -761,7 +836,7 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
|||||||
# and the helper-using webport
|
# and the helper-using webport
|
||||||
self.helper_webish_url = self.clients[3].getServiceNamed("webish").getURL()
|
self.helper_webish_url = self.clients[3].getServiceNamed("webish").getURL()
|
||||||
|
|
||||||
def _generate_config(self, which, basedir):
|
def _generate_config(self, which, basedir, force_foolscap=False):
|
||||||
config = {}
|
config = {}
|
||||||
|
|
||||||
allclients = set(range(self.numclients))
|
allclients = set(range(self.numclients))
|
||||||
@ -791,6 +866,7 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
|||||||
sethelper = partial(setconf, config, which, "helper")
|
sethelper = partial(setconf, config, which, "helper")
|
||||||
|
|
||||||
setnode("nickname", u"client %d \N{BLACK SMILING FACE}" % (which,))
|
setnode("nickname", u"client %d \N{BLACK SMILING FACE}" % (which,))
|
||||||
|
setconf(config, which, "storage", "force_foolscap", str(force_foolscap))
|
||||||
|
|
||||||
tub_location_hint, tub_port_endpoint = self.port_assigner.assign(reactor)
|
tub_location_hint, tub_port_endpoint = self.port_assigner.assign(reactor)
|
||||||
setnode("tub.port", tub_port_endpoint)
|
setnode("tub.port", tub_port_endpoint)
|
||||||
@ -808,17 +884,16 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin):
|
|||||||
" furl: %s\n") % self.introducer_furl
|
" furl: %s\n") % self.introducer_furl
|
||||||
iyaml_fn = os.path.join(basedir, "private", "introducers.yaml")
|
iyaml_fn = os.path.join(basedir, "private", "introducers.yaml")
|
||||||
fileutil.write(iyaml_fn, iyaml)
|
fileutil.write(iyaml_fn, iyaml)
|
||||||
|
|
||||||
return _render_config(config)
|
return _render_config(config)
|
||||||
|
|
||||||
def _set_up_client_node(self, which):
|
def _set_up_client_node(self, which, force_foolscap):
|
||||||
basedir = self.getdir("client%d" % (which,))
|
basedir = self.getdir("client%d" % (which,))
|
||||||
fileutil.make_dirs(os.path.join(basedir, "private"))
|
fileutil.make_dirs(os.path.join(basedir, "private"))
|
||||||
if len(SYSTEM_TEST_CERTS) > (which + 1):
|
if len(SYSTEM_TEST_CERTS) > (which + 1):
|
||||||
f = open(os.path.join(basedir, "private", "node.pem"), "w")
|
f = open(os.path.join(basedir, "private", "node.pem"), "w")
|
||||||
f.write(SYSTEM_TEST_CERTS[which + 1])
|
f.write(SYSTEM_TEST_CERTS[which + 1])
|
||||||
f.close()
|
f.close()
|
||||||
config = self._generate_config(which, basedir)
|
config = self._generate_config(which, basedir, force_foolscap)
|
||||||
fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config)
|
fileutil.write(os.path.join(basedir, 'tahoe.cfg'), config)
|
||||||
return basedir
|
return basedir
|
||||||
|
|
||||||
|
28
src/allmydata/test/data/openssl-rsa-2048.txt
Normal file
28
src/allmydata/test/data/openssl-rsa-2048.txt
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDF1MeXulDWFO05
|
||||||
|
YXCh8aqNc1dS1ddJRzsti4BOWuDOepUc0oCaSIcC5aR7XJ+vhX7a02mTIwvLcuEH
|
||||||
|
8sxx0BJU4jCDpRI6aAqaKJxwZx1e6AcVFJDl7vzymhvWhqHuKh0jTvwM2zONWTwV
|
||||||
|
V8m2PbDdxu0Prwdx+Mt2sDT6xHEhJj5fI/GUDUEdkhLJF6DQSulFRqqd0qP7qcI9
|
||||||
|
fSHZbM7MywfzqFUe8J1+tk4fBh2v7gNzN1INpzh2mDtLPAtxr4ZPtEb/0D0U4PsP
|
||||||
|
CniOHP0U8sF3VY0+K5qoCQr92cLRJvT/vLpQGVNUTFdFrtbqDoFxUCyEH4FUqRDX
|
||||||
|
2mVrPo2xAgMBAAECggEAA0Ev1y5/1NTPbgytBeIIH3d+v9hwKDbHecVoMwnOVeFJ
|
||||||
|
BZpONrOToovhAc1NXH2wj4SvwYWfpJ1HR9piDAuLeKlnuUu4ffzfE0gQok4E+v4r
|
||||||
|
2yg9ZcYBs/NOetAYVwbq960tiv/adFRr71E0WqbfS3fBx8q2L3Ujkkhd98PudUhQ
|
||||||
|
izbrTvkT7q00OPCWGwgWepMlLEowUWwZehGI0MlbONg7SbRraZZmG586Iy0tpC3e
|
||||||
|
AM7wC1/ORzFqcRgTIxXizQ5RHL7S0OQPLhbEJbuwPonNjze3p0EP4wNBELZTaVOd
|
||||||
|
xeA22Py4Bh/d1q3aEgbwR7tLyA8YfEzshTaY6oV8AQKBgQD0uFo8pyWk0AWXfjzn
|
||||||
|
jV4yYyPWy8pJA6YfAJAST8m7B/JeYgGlfHxTlNZiB40DsJq08tOZv3HAubgMpFIa
|
||||||
|
reuDxPqo6/Quwdy4Syu+AFhY48KIuwuoegG/L+5qcQLE69r1w71ZV6wUvLmXYX2I
|
||||||
|
Y6nYz+OdpD1JrMIr6Js60XURsQKBgQDO8yWl7ufIDKMbQpbs0PgUQsH4FtzGcP4J
|
||||||
|
j/7/8GfhKYt6rPsrojPHUbAi1+25xBVOuhm0Zx2ku2t+xPIMJoS+15EcER1Z2iHZ
|
||||||
|
Zci9UGpJpUxGcUhG7ETF1HZv0xKHcEOl9eIIOcAP9Vd9DqnGk85gy6ti6MHe/5Tn
|
||||||
|
IMD36OQ8AQKBgQDwqE7NMM67KnslRNaeG47T3F0FQbm3XehCuqnz6BUJYcI+gQD/
|
||||||
|
fdFB3K+LDcPmKgmqAtaGbxdtoPXXMM0xQXHHTrH15rxmMu1dK0dj/TDkkW7gSZko
|
||||||
|
YHtRSdCbSnGfuBXG9GxD7QzkA8g7j3sE4oXIGoDLqRVAW61DwubMy+jlsQKBgGNB
|
||||||
|
+Zepi1/Gt+BWQt8YpzPIhRIBnShMf3uEphCJdLlo3K4dE2btKBp8UpeTq0CDDJky
|
||||||
|
5ytAndYp0jf+K/2p59dEuyOUDdjPp5aGnA446JGkB35tzPW/Uoj0C049FVEChl+u
|
||||||
|
HBhH4peE285uXv2QXNbOOMh6zKmxOfDVI9iDyhwBAoGBAIXq2Ar0zDXXaL3ncEKo
|
||||||
|
pXt9BZ8OpJo2pvB1t2VPePOwEQ0wdT+H62fKNY47NiF9+LyS541/ps5Qhv6AmiKJ
|
||||||
|
Z7I0Vb6+sxQljYH/LNW+wc2T/pIAi/7sNcmnlBtZfoVwt99bk2CyoRALPLWHYCkh
|
||||||
|
c7Tty2bZzDZy6aCX+FGRt5N/
|
||||||
|
-----END PRIVATE KEY-----
|
@ -30,6 +30,7 @@ from allmydata.mutable.publish import MutableData
|
|||||||
from ..test_download import PausingConsumer, PausingAndStoppingConsumer, \
|
from ..test_download import PausingConsumer, PausingAndStoppingConsumer, \
|
||||||
StoppingConsumer, ImmediatelyStoppingConsumer
|
StoppingConsumer, ImmediatelyStoppingConsumer
|
||||||
from .. import common_util as testutil
|
from .. import common_util as testutil
|
||||||
|
from ...crypto.rsa import create_signing_keypair
|
||||||
from .util import (
|
from .util import (
|
||||||
FakeStorage,
|
FakeStorage,
|
||||||
make_nodemaker_with_peers,
|
make_nodemaker_with_peers,
|
||||||
@ -65,6 +66,16 @@ class Filenode(AsyncBrokenTestCase, testutil.ShouldFailMixin):
|
|||||||
d.addCallback(_created)
|
d.addCallback(_created)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
async def test_create_with_keypair(self):
|
||||||
|
"""
|
||||||
|
An SDMF can be created using a given keypair.
|
||||||
|
"""
|
||||||
|
(priv, pub) = create_signing_keypair(2048)
|
||||||
|
node = await self.nodemaker.create_mutable_file(keypair=(pub, priv))
|
||||||
|
self.assertThat(
|
||||||
|
(node.get_privkey(), node.get_pubkey()),
|
||||||
|
Equals((priv, pub)),
|
||||||
|
)
|
||||||
|
|
||||||
def test_create_mdmf(self):
|
def test_create_mdmf(self):
|
||||||
d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
|
d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
|
||||||
|
@ -1,19 +1,12 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Tests related to the way ``allmydata.mutable`` handles different versions
|
||||||
|
of data for an object.
|
||||||
"""
|
"""
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
|
from io import StringIO
|
||||||
import os
|
import os
|
||||||
from six.moves import cStringIO as StringIO
|
from typing import Optional
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
from ..common import AsyncTestCase
|
from ..common import AsyncTestCase
|
||||||
from testtools.matchers import (
|
from testtools.matchers import (
|
||||||
Equals,
|
Equals,
|
||||||
@ -47,49 +40,38 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
|||||||
self.small_data = b"test data" * 10 # 90 B; SDMF
|
self.small_data = b"test data" * 10 # 90 B; SDMF
|
||||||
|
|
||||||
|
|
||||||
def do_upload_mdmf(self, data=None):
|
async def do_upload_mdmf(self, data: Optional[bytes] = None) -> MutableFileNode:
|
||||||
if data is None:
|
if data is None:
|
||||||
data = self.data
|
data = self.data
|
||||||
d = self.nm.create_mutable_file(MutableData(data),
|
n = await self.nm.create_mutable_file(MutableData(data),
|
||||||
version=MDMF_VERSION)
|
version=MDMF_VERSION)
|
||||||
def _then(n):
|
|
||||||
self.assertThat(n, IsInstance(MutableFileNode))
|
self.assertThat(n, IsInstance(MutableFileNode))
|
||||||
self.assertThat(n._protocol_version, Equals(MDMF_VERSION))
|
self.assertThat(n._protocol_version, Equals(MDMF_VERSION))
|
||||||
self.mdmf_node = n
|
self.mdmf_node = n
|
||||||
return n
|
return n
|
||||||
d.addCallback(_then)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def do_upload_sdmf(self, data=None):
|
async def do_upload_sdmf(self, data: Optional[bytes] = None) -> MutableFileNode:
|
||||||
if data is None:
|
if data is None:
|
||||||
data = self.small_data
|
data = self.small_data
|
||||||
d = self.nm.create_mutable_file(MutableData(data))
|
n = await self.nm.create_mutable_file(MutableData(data))
|
||||||
def _then(n):
|
|
||||||
self.assertThat(n, IsInstance(MutableFileNode))
|
self.assertThat(n, IsInstance(MutableFileNode))
|
||||||
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
||||||
self.sdmf_node = n
|
self.sdmf_node = n
|
||||||
return n
|
return n
|
||||||
d.addCallback(_then)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def do_upload_empty_sdmf(self):
|
async def do_upload_empty_sdmf(self) -> MutableFileNode:
|
||||||
d = self.nm.create_mutable_file(MutableData(b""))
|
n = await self.nm.create_mutable_file(MutableData(b""))
|
||||||
def _then(n):
|
|
||||||
self.assertThat(n, IsInstance(MutableFileNode))
|
self.assertThat(n, IsInstance(MutableFileNode))
|
||||||
self.sdmf_zero_length_node = n
|
self.sdmf_zero_length_node = n
|
||||||
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
||||||
return n
|
return n
|
||||||
d.addCallback(_then)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def do_upload(self):
|
async def do_upload(self) -> MutableFileNode:
|
||||||
d = self.do_upload_mdmf()
|
await self.do_upload_mdmf()
|
||||||
d.addCallback(lambda ign: self.do_upload_sdmf())
|
return await self.do_upload_sdmf()
|
||||||
return d
|
|
||||||
|
|
||||||
def test_debug(self):
|
async def test_debug(self) -> None:
|
||||||
d = self.do_upload_mdmf()
|
n = await self.do_upload_mdmf()
|
||||||
def _debug(n):
|
|
||||||
fso = debug.FindSharesOptions()
|
fso = debug.FindSharesOptions()
|
||||||
storage_index = base32.b2a(n.get_storage_index())
|
storage_index = base32.b2a(n.get_storage_index())
|
||||||
fso.si_s = str(storage_index, "utf-8") # command-line options are unicode on Python 3
|
fso.si_s = str(storage_index, "utf-8") # command-line options are unicode on Python 3
|
||||||
@ -139,165 +121,123 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
|||||||
# encryption salts and is not constant. fields[5] is the
|
# encryption salts and is not constant. fields[5] is the
|
||||||
# remaining time on the longest lease, which is timing dependent.
|
# remaining time on the longest lease, which is timing dependent.
|
||||||
# The rest of the line is the quoted pathname to the share.
|
# The rest of the line is the quoted pathname to the share.
|
||||||
d.addCallback(_debug)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def test_get_sequence_number(self):
|
async def test_get_sequence_number(self) -> None:
|
||||||
d = self.do_upload()
|
await self.do_upload()
|
||||||
d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
|
bv = await self.mdmf_node.get_best_readable_version()
|
||||||
d.addCallback(lambda bv:
|
self.assertThat(bv.get_sequence_number(), Equals(1))
|
||||||
self.assertThat(bv.get_sequence_number(), Equals(1)))
|
bv = await self.sdmf_node.get_best_readable_version()
|
||||||
d.addCallback(lambda ignored:
|
self.assertThat(bv.get_sequence_number(), Equals(1))
|
||||||
self.sdmf_node.get_best_readable_version())
|
|
||||||
d.addCallback(lambda bv:
|
|
||||||
self.assertThat(bv.get_sequence_number(), Equals(1)))
|
|
||||||
# Now update. The sequence number in both cases should be 1 in
|
# Now update. The sequence number in both cases should be 1 in
|
||||||
# both cases.
|
# both cases.
|
||||||
def _do_update(ignored):
|
|
||||||
new_data = MutableData(b"foo bar baz" * 100000)
|
new_data = MutableData(b"foo bar baz" * 100000)
|
||||||
new_small_data = MutableData(b"foo bar baz" * 10)
|
new_small_data = MutableData(b"foo bar baz" * 10)
|
||||||
d1 = self.mdmf_node.overwrite(new_data)
|
d1 = self.mdmf_node.overwrite(new_data)
|
||||||
d2 = self.sdmf_node.overwrite(new_small_data)
|
d2 = self.sdmf_node.overwrite(new_small_data)
|
||||||
dl = gatherResults([d1, d2])
|
await gatherResults([d1, d2])
|
||||||
return dl
|
bv = await self.mdmf_node.get_best_readable_version()
|
||||||
d.addCallback(_do_update)
|
self.assertThat(bv.get_sequence_number(), Equals(2))
|
||||||
d.addCallback(lambda ignored:
|
bv = await self.sdmf_node.get_best_readable_version()
|
||||||
self.mdmf_node.get_best_readable_version())
|
self.assertThat(bv.get_sequence_number(), Equals(2))
|
||||||
d.addCallback(lambda bv:
|
|
||||||
self.assertThat(bv.get_sequence_number(), Equals(2)))
|
|
||||||
d.addCallback(lambda ignored:
|
|
||||||
self.sdmf_node.get_best_readable_version())
|
|
||||||
d.addCallback(lambda bv:
|
|
||||||
self.assertThat(bv.get_sequence_number(), Equals(2)))
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
async def test_cap_after_upload(self) -> None:
|
||||||
def test_cap_after_upload(self):
|
|
||||||
# If we create a new mutable file and upload things to it, and
|
# If we create a new mutable file and upload things to it, and
|
||||||
# it's an MDMF file, we should get an MDMF cap back from that
|
# it's an MDMF file, we should get an MDMF cap back from that
|
||||||
# file and should be able to use that.
|
# file and should be able to use that.
|
||||||
# That's essentially what MDMF node is, so just check that.
|
# That's essentially what MDMF node is, so just check that.
|
||||||
d = self.do_upload_mdmf()
|
await self.do_upload_mdmf()
|
||||||
def _then(ign):
|
|
||||||
mdmf_uri = self.mdmf_node.get_uri()
|
mdmf_uri = self.mdmf_node.get_uri()
|
||||||
cap = uri.from_string(mdmf_uri)
|
cap = uri.from_string(mdmf_uri)
|
||||||
self.assertTrue(isinstance(cap, uri.WriteableMDMFFileURI))
|
self.assertTrue(isinstance(cap, uri.WriteableMDMFFileURI))
|
||||||
readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
|
readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
|
||||||
cap = uri.from_string(readonly_mdmf_uri)
|
cap = uri.from_string(readonly_mdmf_uri)
|
||||||
self.assertTrue(isinstance(cap, uri.ReadonlyMDMFFileURI))
|
self.assertTrue(isinstance(cap, uri.ReadonlyMDMFFileURI))
|
||||||
d.addCallback(_then)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def test_mutable_version(self):
|
async def test_mutable_version(self) -> None:
|
||||||
# assert that getting parameters from the IMutableVersion object
|
# assert that getting parameters from the IMutableVersion object
|
||||||
# gives us the same data as getting them from the filenode itself
|
# gives us the same data as getting them from the filenode itself
|
||||||
d = self.do_upload()
|
await self.do_upload()
|
||||||
d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
|
bv = await self.mdmf_node.get_best_mutable_version()
|
||||||
def _check_mdmf(bv):
|
|
||||||
n = self.mdmf_node
|
n = self.mdmf_node
|
||||||
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
||||||
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
||||||
self.assertFalse(bv.is_readonly())
|
self.assertFalse(bv.is_readonly())
|
||||||
d.addCallback(_check_mdmf)
|
|
||||||
d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
|
bv = await self.sdmf_node.get_best_mutable_version()
|
||||||
def _check_sdmf(bv):
|
|
||||||
n = self.sdmf_node
|
n = self.sdmf_node
|
||||||
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
||||||
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
||||||
self.assertFalse(bv.is_readonly())
|
self.assertFalse(bv.is_readonly())
|
||||||
d.addCallback(_check_sdmf)
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_readonly_version(self):
|
async def test_get_readonly_version(self) -> None:
|
||||||
d = self.do_upload()
|
await self.do_upload()
|
||||||
d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
|
bv = await self.mdmf_node.get_best_readable_version()
|
||||||
d.addCallback(lambda bv: self.assertTrue(bv.is_readonly()))
|
self.assertTrue(bv.is_readonly())
|
||||||
|
|
||||||
# Attempting to get a mutable version of a mutable file from a
|
# Attempting to get a mutable version of a mutable file from a
|
||||||
# filenode initialized with a readcap should return a readonly
|
# filenode initialized with a readcap should return a readonly
|
||||||
# version of that same node.
|
# version of that same node.
|
||||||
d.addCallback(lambda ign: self.mdmf_node.get_readonly())
|
ro = self.mdmf_node.get_readonly()
|
||||||
d.addCallback(lambda ro: ro.get_best_mutable_version())
|
v = await ro.get_best_mutable_version()
|
||||||
d.addCallback(lambda v: self.assertTrue(v.is_readonly()))
|
self.assertTrue(v.is_readonly())
|
||||||
|
|
||||||
d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
|
bv = await self.sdmf_node.get_best_readable_version()
|
||||||
d.addCallback(lambda bv: self.assertTrue(bv.is_readonly()))
|
self.assertTrue(bv.is_readonly())
|
||||||
|
|
||||||
d.addCallback(lambda ign: self.sdmf_node.get_readonly())
|
ro = self.sdmf_node.get_readonly()
|
||||||
d.addCallback(lambda ro: ro.get_best_mutable_version())
|
v = await ro.get_best_mutable_version()
|
||||||
d.addCallback(lambda v: self.assertTrue(v.is_readonly()))
|
self.assertTrue(v.is_readonly())
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def test_toplevel_overwrite(self):
|
async def test_toplevel_overwrite(self) -> None:
|
||||||
new_data = MutableData(b"foo bar baz" * 100000)
|
new_data = MutableData(b"foo bar baz" * 100000)
|
||||||
new_small_data = MutableData(b"foo bar baz" * 10)
|
new_small_data = MutableData(b"foo bar baz" * 10)
|
||||||
d = self.do_upload()
|
await self.do_upload()
|
||||||
d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
|
await self.mdmf_node.overwrite(new_data)
|
||||||
d.addCallback(lambda ignored:
|
data = await self.mdmf_node.download_best_version()
|
||||||
self.mdmf_node.download_best_version())
|
self.assertThat(data, Equals(b"foo bar baz" * 100000))
|
||||||
d.addCallback(lambda data:
|
await self.sdmf_node.overwrite(new_small_data)
|
||||||
self.assertThat(data, Equals(b"foo bar baz" * 100000)))
|
data = await self.sdmf_node.download_best_version()
|
||||||
d.addCallback(lambda ignored:
|
self.assertThat(data, Equals(b"foo bar baz" * 10))
|
||||||
self.sdmf_node.overwrite(new_small_data))
|
|
||||||
d.addCallback(lambda ignored:
|
|
||||||
self.sdmf_node.download_best_version())
|
|
||||||
d.addCallback(lambda data:
|
|
||||||
self.assertThat(data, Equals(b"foo bar baz" * 10)))
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def test_toplevel_modify(self):
|
async def test_toplevel_modify(self) -> None:
|
||||||
d = self.do_upload()
|
await self.do_upload()
|
||||||
def modifier(old_contents, servermap, first_time):
|
def modifier(old_contents, servermap, first_time):
|
||||||
return old_contents + b"modified"
|
return old_contents + b"modified"
|
||||||
d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
|
await self.mdmf_node.modify(modifier)
|
||||||
d.addCallback(lambda ignored:
|
data = await self.mdmf_node.download_best_version()
|
||||||
self.mdmf_node.download_best_version())
|
self.assertThat(data, Contains(b"modified"))
|
||||||
d.addCallback(lambda data:
|
await self.sdmf_node.modify(modifier)
|
||||||
self.assertThat(data, Contains(b"modified")))
|
data = await self.sdmf_node.download_best_version()
|
||||||
d.addCallback(lambda ignored:
|
self.assertThat(data, Contains(b"modified"))
|
||||||
self.sdmf_node.modify(modifier))
|
|
||||||
d.addCallback(lambda ignored:
|
|
||||||
self.sdmf_node.download_best_version())
|
|
||||||
d.addCallback(lambda data:
|
|
||||||
self.assertThat(data, Contains(b"modified")))
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def test_version_modify(self):
|
async def test_version_modify(self) -> None:
|
||||||
# TODO: When we can publish multiple versions, alter this test
|
# TODO: When we can publish multiple versions, alter this test
|
||||||
# to modify a version other than the best usable version, then
|
# to modify a version other than the best usable version, then
|
||||||
# test to see that the best recoverable version is that.
|
# test to see that the best recoverable version is that.
|
||||||
d = self.do_upload()
|
await self.do_upload()
|
||||||
def modifier(old_contents, servermap, first_time):
|
def modifier(old_contents, servermap, first_time):
|
||||||
return old_contents + b"modified"
|
return old_contents + b"modified"
|
||||||
d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
|
await self.mdmf_node.modify(modifier)
|
||||||
d.addCallback(lambda ignored:
|
data = await self.mdmf_node.download_best_version()
|
||||||
self.mdmf_node.download_best_version())
|
self.assertThat(data, Contains(b"modified"))
|
||||||
d.addCallback(lambda data:
|
await self.sdmf_node.modify(modifier)
|
||||||
self.assertThat(data, Contains(b"modified")))
|
data = await self.sdmf_node.download_best_version()
|
||||||
d.addCallback(lambda ignored:
|
self.assertThat(data, Contains(b"modified"))
|
||||||
self.sdmf_node.modify(modifier))
|
|
||||||
d.addCallback(lambda ignored:
|
|
||||||
self.sdmf_node.download_best_version())
|
|
||||||
d.addCallback(lambda data:
|
|
||||||
self.assertThat(data, Contains(b"modified")))
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def test_download_version(self):
|
async def test_download_version(self) -> None:
|
||||||
d = self.publish_multiple()
|
await self.publish_multiple()
|
||||||
# We want to have two recoverable versions on the grid.
|
# We want to have two recoverable versions on the grid.
|
||||||
d.addCallback(lambda res:
|
|
||||||
self._set_versions({0:0,2:0,4:0,6:0,8:0,
|
self._set_versions({0:0,2:0,4:0,6:0,8:0,
|
||||||
1:1,3:1,5:1,7:1,9:1}))
|
1:1,3:1,5:1,7:1,9:1})
|
||||||
# Now try to download each version. We should get the plaintext
|
# Now try to download each version. We should get the plaintext
|
||||||
# associated with that version.
|
# associated with that version.
|
||||||
d.addCallback(lambda ignored:
|
smap = await self._fn.get_servermap(mode=MODE_READ)
|
||||||
self._fn.get_servermap(mode=MODE_READ))
|
|
||||||
def _got_servermap(smap):
|
|
||||||
versions = smap.recoverable_versions()
|
versions = smap.recoverable_versions()
|
||||||
assert len(versions) == 2
|
assert len(versions) == 2
|
||||||
|
|
||||||
@ -310,80 +250,58 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
|||||||
self.version1_index = self.version1_seqnum - 1
|
self.version1_index = self.version1_seqnum - 1
|
||||||
self.version2_index = self.version2_seqnum - 1
|
self.version2_index = self.version2_seqnum - 1
|
||||||
|
|
||||||
d.addCallback(_got_servermap)
|
results = await self._fn.download_version(self.servermap, self.version1)
|
||||||
d.addCallback(lambda ignored:
|
|
||||||
self._fn.download_version(self.servermap, self.version1))
|
|
||||||
d.addCallback(lambda results:
|
|
||||||
self.assertThat(self.CONTENTS[self.version1_index],
|
self.assertThat(self.CONTENTS[self.version1_index],
|
||||||
Equals(results)))
|
Equals(results))
|
||||||
d.addCallback(lambda ignored:
|
results = await self._fn.download_version(self.servermap, self.version2)
|
||||||
self._fn.download_version(self.servermap, self.version2))
|
|
||||||
d.addCallback(lambda results:
|
|
||||||
self.assertThat(self.CONTENTS[self.version2_index],
|
self.assertThat(self.CONTENTS[self.version2_index],
|
||||||
Equals(results)))
|
Equals(results))
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def test_download_nonexistent_version(self):
|
async def test_download_nonexistent_version(self) -> None:
|
||||||
d = self.do_upload_mdmf()
|
await self.do_upload_mdmf()
|
||||||
d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
|
servermap = await self.mdmf_node.get_servermap(mode=MODE_WRITE)
|
||||||
def _set_servermap(servermap):
|
await self.shouldFail(UnrecoverableFileError, "nonexistent version",
|
||||||
self.servermap = servermap
|
|
||||||
d.addCallback(_set_servermap)
|
|
||||||
d.addCallback(lambda ignored:
|
|
||||||
self.shouldFail(UnrecoverableFileError, "nonexistent version",
|
|
||||||
None,
|
None,
|
||||||
self.mdmf_node.download_version, self.servermap,
|
self.mdmf_node.download_version, servermap,
|
||||||
"not a version"))
|
"not a version")
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def _test_partial_read(self, node, expected, modes, step):
|
async def _test_partial_read(self, node, expected, modes, step) -> None:
|
||||||
d = node.get_best_readable_version()
|
version = await node.get_best_readable_version()
|
||||||
for (name, offset, length) in modes:
|
for (name, offset, length) in modes:
|
||||||
d.addCallback(self._do_partial_read, name, expected, offset, length)
|
await self._do_partial_read(version, name, expected, offset, length)
|
||||||
# then read the whole thing, but only a few bytes at a time, and see
|
# then read the whole thing, but only a few bytes at a time, and see
|
||||||
# that the results are what we expect.
|
# that the results are what we expect.
|
||||||
def _read_data(version):
|
|
||||||
c = consumer.MemoryConsumer()
|
c = consumer.MemoryConsumer()
|
||||||
d2 = defer.succeed(None)
|
|
||||||
for i in range(0, len(expected), step):
|
for i in range(0, len(expected), step):
|
||||||
d2.addCallback(lambda ignored, i=i: version.read(c, i, step))
|
await version.read(c, i, step)
|
||||||
d2.addCallback(lambda ignored:
|
self.assertThat(expected, Equals(b"".join(c.chunks)))
|
||||||
self.assertThat(expected, Equals(b"".join(c.chunks))))
|
|
||||||
return d2
|
|
||||||
d.addCallback(_read_data)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def _do_partial_read(self, version, name, expected, offset, length):
|
async def _do_partial_read(self, version, name, expected, offset, length) -> None:
|
||||||
c = consumer.MemoryConsumer()
|
c = consumer.MemoryConsumer()
|
||||||
d = version.read(c, offset, length)
|
await version.read(c, offset, length)
|
||||||
if length is None:
|
if length is None:
|
||||||
expected_range = expected[offset:]
|
expected_range = expected[offset:]
|
||||||
else:
|
else:
|
||||||
expected_range = expected[offset:offset+length]
|
expected_range = expected[offset:offset+length]
|
||||||
d.addCallback(lambda ignored: b"".join(c.chunks))
|
results = b"".join(c.chunks)
|
||||||
def _check(results):
|
|
||||||
if results != expected_range:
|
if results != expected_range:
|
||||||
print("read([%d]+%s) got %d bytes, not %d" % \
|
print("read([%d]+%s) got %d bytes, not %d" % \
|
||||||
(offset, length, len(results), len(expected_range)))
|
(offset, length, len(results), len(expected_range)))
|
||||||
print("got: %s ... %s" % (results[:20], results[-20:]))
|
print("got: %r ... %r" % (results[:20], results[-20:]))
|
||||||
print("exp: %s ... %s" % (expected_range[:20], expected_range[-20:]))
|
print("exp: %r ... %r" % (expected_range[:20], expected_range[-20:]))
|
||||||
self.fail("results[%s] != expected_range" % name)
|
self.fail("results[%s] != expected_range" % name)
|
||||||
return version # daisy-chained to next call
|
|
||||||
d.addCallback(_check)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def test_partial_read_mdmf_0(self):
|
async def test_partial_read_mdmf_0(self) -> None:
|
||||||
data = b""
|
data = b""
|
||||||
d = self.do_upload_mdmf(data=data)
|
result = await self.do_upload_mdmf(data=data)
|
||||||
modes = [("all1", 0,0),
|
modes = [("all1", 0,0),
|
||||||
("all2", 0,None),
|
("all2", 0,None),
|
||||||
]
|
]
|
||||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
await self._test_partial_read(result, data, modes, 1)
|
||||||
return d
|
|
||||||
|
|
||||||
def test_partial_read_mdmf_large(self):
|
async def test_partial_read_mdmf_large(self) -> None:
|
||||||
segment_boundary = mathutil.next_multiple(128 * 1024, 3)
|
segment_boundary = mathutil.next_multiple(128 * 1024, 3)
|
||||||
modes = [("start_on_segment_boundary", segment_boundary, 50),
|
modes = [("start_on_segment_boundary", segment_boundary, 50),
|
||||||
("ending_one_byte_after_segment_boundary", segment_boundary-50, 51),
|
("ending_one_byte_after_segment_boundary", segment_boundary-50, 51),
|
||||||
@ -393,20 +311,18 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
|||||||
("complete_file1", 0, len(self.data)),
|
("complete_file1", 0, len(self.data)),
|
||||||
("complete_file2", 0, None),
|
("complete_file2", 0, None),
|
||||||
]
|
]
|
||||||
d = self.do_upload_mdmf()
|
result = await self.do_upload_mdmf()
|
||||||
d.addCallback(self._test_partial_read, self.data, modes, 10000)
|
await self._test_partial_read(result, self.data, modes, 10000)
|
||||||
return d
|
|
||||||
|
|
||||||
def test_partial_read_sdmf_0(self):
|
async def test_partial_read_sdmf_0(self) -> None:
|
||||||
data = b""
|
data = b""
|
||||||
modes = [("all1", 0,0),
|
modes = [("all1", 0,0),
|
||||||
("all2", 0,None),
|
("all2", 0,None),
|
||||||
]
|
]
|
||||||
d = self.do_upload_sdmf(data=data)
|
result = await self.do_upload_sdmf(data=data)
|
||||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
await self._test_partial_read(result, data, modes, 1)
|
||||||
return d
|
|
||||||
|
|
||||||
def test_partial_read_sdmf_2(self):
|
async def test_partial_read_sdmf_2(self) -> None:
|
||||||
data = b"hi"
|
data = b"hi"
|
||||||
modes = [("one_byte", 0, 1),
|
modes = [("one_byte", 0, 1),
|
||||||
("last_byte", 1, 1),
|
("last_byte", 1, 1),
|
||||||
@ -414,11 +330,10 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
|||||||
("complete_file", 0, 2),
|
("complete_file", 0, 2),
|
||||||
("complete_file2", 0, None),
|
("complete_file2", 0, None),
|
||||||
]
|
]
|
||||||
d = self.do_upload_sdmf(data=data)
|
result = await self.do_upload_sdmf(data=data)
|
||||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
await self._test_partial_read(result, data, modes, 1)
|
||||||
return d
|
|
||||||
|
|
||||||
def test_partial_read_sdmf_90(self):
|
async def test_partial_read_sdmf_90(self) -> None:
|
||||||
modes = [("start_at_middle", 50, 40),
|
modes = [("start_at_middle", 50, 40),
|
||||||
("start_at_middle2", 50, None),
|
("start_at_middle2", 50, None),
|
||||||
("zero_length_at_start", 0, 0),
|
("zero_length_at_start", 0, 0),
|
||||||
@ -427,11 +342,10 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
|||||||
("complete_file1", 0, None),
|
("complete_file1", 0, None),
|
||||||
("complete_file2", 0, 90),
|
("complete_file2", 0, 90),
|
||||||
]
|
]
|
||||||
d = self.do_upload_sdmf()
|
result = await self.do_upload_sdmf()
|
||||||
d.addCallback(self._test_partial_read, self.small_data, modes, 10)
|
await self._test_partial_read(result, self.small_data, modes, 10)
|
||||||
return d
|
|
||||||
|
|
||||||
def test_partial_read_sdmf_100(self):
|
async def test_partial_read_sdmf_100(self) -> None:
|
||||||
data = b"test data "*10
|
data = b"test data "*10
|
||||||
modes = [("start_at_middle", 50, 50),
|
modes = [("start_at_middle", 50, 50),
|
||||||
("start_at_middle2", 50, None),
|
("start_at_middle2", 50, None),
|
||||||
@ -440,42 +354,30 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
|||||||
("complete_file1", 0, 100),
|
("complete_file1", 0, 100),
|
||||||
("complete_file2", 0, None),
|
("complete_file2", 0, None),
|
||||||
]
|
]
|
||||||
d = self.do_upload_sdmf(data=data)
|
result = await self.do_upload_sdmf(data=data)
|
||||||
d.addCallback(self._test_partial_read, data, modes, 10)
|
await self._test_partial_read(result, data, modes, 10)
|
||||||
return d
|
|
||||||
|
|
||||||
|
async def _test_read_and_download(self, node, expected) -> None:
|
||||||
def _test_read_and_download(self, node, expected):
|
version = await node.get_best_readable_version()
|
||||||
d = node.get_best_readable_version()
|
|
||||||
def _read_data(version):
|
|
||||||
c = consumer.MemoryConsumer()
|
c = consumer.MemoryConsumer()
|
||||||
|
await version.read(c)
|
||||||
|
self.assertThat(expected, Equals(b"".join(c.chunks)))
|
||||||
|
|
||||||
c2 = consumer.MemoryConsumer()
|
c2 = consumer.MemoryConsumer()
|
||||||
d2 = defer.succeed(None)
|
await version.read(c2, offset=0, size=len(expected))
|
||||||
d2.addCallback(lambda ignored: version.read(c))
|
self.assertThat(expected, Equals(b"".join(c2.chunks)))
|
||||||
d2.addCallback(lambda ignored:
|
|
||||||
self.assertThat(expected, Equals(b"".join(c.chunks))))
|
|
||||||
|
|
||||||
d2.addCallback(lambda ignored: version.read(c2, offset=0,
|
data = await node.download_best_version()
|
||||||
size=len(expected)))
|
self.assertThat(expected, Equals(data))
|
||||||
d2.addCallback(lambda ignored:
|
|
||||||
self.assertThat(expected, Equals(b"".join(c2.chunks))))
|
|
||||||
return d2
|
|
||||||
d.addCallback(_read_data)
|
|
||||||
d.addCallback(lambda ignored: node.download_best_version())
|
|
||||||
d.addCallback(lambda data: self.assertThat(expected, Equals(data)))
|
|
||||||
return d
|
|
||||||
|
|
||||||
def test_read_and_download_mdmf(self):
|
async def test_read_and_download_mdmf(self) -> None:
|
||||||
d = self.do_upload_mdmf()
|
result = await self.do_upload_mdmf()
|
||||||
d.addCallback(self._test_read_and_download, self.data)
|
await self._test_read_and_download(result, self.data)
|
||||||
return d
|
|
||||||
|
|
||||||
def test_read_and_download_sdmf(self):
|
async def test_read_and_download_sdmf(self) -> None:
|
||||||
d = self.do_upload_sdmf()
|
result = await self.do_upload_sdmf()
|
||||||
d.addCallback(self._test_read_and_download, self.small_data)
|
await self._test_read_and_download(result, self.small_data)
|
||||||
return d
|
|
||||||
|
|
||||||
def test_read_and_download_sdmf_zero_length(self):
|
async def test_read_and_download_sdmf_zero_length(self) -> None:
|
||||||
d = self.do_upload_empty_sdmf()
|
result = await self.do_upload_empty_sdmf()
|
||||||
d.addCallback(self._test_read_and_download, b"")
|
await self._test_read_and_download(result, b"")
|
||||||
return d
|
|
||||||
|
@ -1619,7 +1619,8 @@ class FakeMutableFile(object): # type: ignore # incomplete implementation
|
|||||||
return defer.succeed(None)
|
return defer.succeed(None)
|
||||||
|
|
||||||
class FakeNodeMaker(NodeMaker):
|
class FakeNodeMaker(NodeMaker):
|
||||||
def create_mutable_file(self, contents=b"", keysize=None, version=None):
|
def create_mutable_file(self, contents=b"", keysize=None, version=None, keypair=None):
|
||||||
|
assert keypair is None, "FakeNodeMaker does not support externally supplied keypairs"
|
||||||
return defer.succeed(FakeMutableFile(contents))
|
return defer.succeed(FakeMutableFile(contents))
|
||||||
|
|
||||||
class FakeClient2(_Client): # type: ignore # tahoe-lafs/ticket/3573
|
class FakeClient2(_Client): # type: ignore # tahoe-lafs/ticket/3573
|
||||||
|
@ -15,9 +15,8 @@ from typing import Set
|
|||||||
from random import Random
|
from random import Random
|
||||||
from unittest import SkipTest
|
from unittest import SkipTest
|
||||||
|
|
||||||
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
|
from twisted.internet.defer import inlineCallbacks, returnValue
|
||||||
from twisted.internet.task import Clock
|
from twisted.internet.task import Clock
|
||||||
from twisted.internet import reactor
|
|
||||||
from foolscap.api import Referenceable, RemoteException
|
from foolscap.api import Referenceable, RemoteException
|
||||||
|
|
||||||
# A better name for this would be IStorageClient...
|
# A better name for this would be IStorageClient...
|
||||||
@ -26,8 +25,6 @@ from allmydata.interfaces import IStorageServer
|
|||||||
from .common_system import SystemTestMixin
|
from .common_system import SystemTestMixin
|
||||||
from .common import AsyncTestCase
|
from .common import AsyncTestCase
|
||||||
from allmydata.storage.server import StorageServer # not a IStorageServer!!
|
from allmydata.storage.server import StorageServer # not a IStorageServer!!
|
||||||
from allmydata.storage.http_client import StorageClient
|
|
||||||
from allmydata.storage_client import _HTTPStorageServer
|
|
||||||
|
|
||||||
|
|
||||||
# Use random generator with known seed, so results are reproducible if tests
|
# Use random generator with known seed, so results are reproducible if tests
|
||||||
@ -439,6 +436,17 @@ class IStorageServerImmutableAPIsTestsMixin(object):
|
|||||||
b"immutable", storage_index, 0, b"ono"
|
b"immutable", storage_index, 0, b"ono"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@inlineCallbacks
|
||||||
|
def test_advise_corrupt_share_unknown_share_number(self):
|
||||||
|
"""
|
||||||
|
Calling ``advise_corrupt_share()`` on an immutable share, with an
|
||||||
|
unknown share number, does not result in error.
|
||||||
|
"""
|
||||||
|
storage_index, _, _ = yield self.create_share()
|
||||||
|
yield self.storage_client.advise_corrupt_share(
|
||||||
|
b"immutable", storage_index, 999, b"ono"
|
||||||
|
)
|
||||||
|
|
||||||
@inlineCallbacks
|
@inlineCallbacks
|
||||||
def test_allocate_buckets_creates_lease(self):
|
def test_allocate_buckets_creates_lease(self):
|
||||||
"""
|
"""
|
||||||
@ -908,6 +916,19 @@ class IStorageServerMutableAPIsTestsMixin(object):
|
|||||||
b"mutable", storage_index, 0, b"ono"
|
b"mutable", storage_index, 0, b"ono"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@inlineCallbacks
|
||||||
|
def test_advise_corrupt_share_unknown_share_number(self):
|
||||||
|
"""
|
||||||
|
Calling ``advise_corrupt_share()`` on a mutable share with an unknown
|
||||||
|
share number does not result in error (other behavior is opaque at this
|
||||||
|
level of abstraction).
|
||||||
|
"""
|
||||||
|
secrets, storage_index = yield self.create_slot()
|
||||||
|
|
||||||
|
yield self.storage_client.advise_corrupt_share(
|
||||||
|
b"mutable", storage_index, 999, b"ono"
|
||||||
|
)
|
||||||
|
|
||||||
@inlineCallbacks
|
@inlineCallbacks
|
||||||
def test_STARAW_create_lease(self):
|
def test_STARAW_create_lease(self):
|
||||||
"""
|
"""
|
||||||
@ -1023,7 +1044,10 @@ class _SharedMixin(SystemTestMixin):
|
|||||||
SKIP_TESTS = set() # type: Set[str]
|
SKIP_TESTS = set() # type: Set[str]
|
||||||
|
|
||||||
def _get_istorage_server(self):
|
def _get_istorage_server(self):
|
||||||
raise NotImplementedError("implement in subclass")
|
native_server = next(iter(self.clients[0].storage_broker.get_known_servers()))
|
||||||
|
client = native_server.get_storage_server()
|
||||||
|
self.assertTrue(IStorageServer.providedBy(client))
|
||||||
|
return client
|
||||||
|
|
||||||
@inlineCallbacks
|
@inlineCallbacks
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
@ -1046,7 +1070,7 @@ class _SharedMixin(SystemTestMixin):
|
|||||||
self._clock = Clock()
|
self._clock = Clock()
|
||||||
self._clock.advance(123456)
|
self._clock.advance(123456)
|
||||||
self.server._clock = self._clock
|
self.server._clock = self._clock
|
||||||
self.storage_client = yield self._get_istorage_server()
|
self.storage_client = self._get_istorage_server()
|
||||||
|
|
||||||
def fake_time(self):
|
def fake_time(self):
|
||||||
"""Return the current fake, test-controlled, time."""
|
"""Return the current fake, test-controlled, time."""
|
||||||
@ -1062,51 +1086,29 @@ class _SharedMixin(SystemTestMixin):
|
|||||||
yield SystemTestMixin.tearDown(self)
|
yield SystemTestMixin.tearDown(self)
|
||||||
|
|
||||||
|
|
||||||
class _FoolscapMixin(_SharedMixin):
|
|
||||||
"""Run tests on Foolscap version of ``IStorageServer``."""
|
|
||||||
|
|
||||||
def _get_native_server(self):
|
|
||||||
return next(iter(self.clients[0].storage_broker.get_known_servers()))
|
|
||||||
|
|
||||||
def _get_istorage_server(self):
|
|
||||||
client = self._get_native_server().get_storage_server()
|
|
||||||
self.assertTrue(IStorageServer.providedBy(client))
|
|
||||||
return succeed(client)
|
|
||||||
|
|
||||||
|
|
||||||
class _HTTPMixin(_SharedMixin):
|
|
||||||
"""Run tests on the HTTP version of ``IStorageServer``."""
|
|
||||||
|
|
||||||
def _get_istorage_server(self):
|
|
||||||
nurl = list(self.clients[0].storage_nurls)[0]
|
|
||||||
|
|
||||||
# Create HTTP client with non-persistent connections, so we don't leak
|
|
||||||
# state across tests:
|
|
||||||
client: IStorageServer = _HTTPStorageServer.from_http_client(
|
|
||||||
StorageClient.from_nurl(nurl, reactor, persistent=False)
|
|
||||||
)
|
|
||||||
self.assertTrue(IStorageServer.providedBy(client))
|
|
||||||
|
|
||||||
return succeed(client)
|
|
||||||
|
|
||||||
|
|
||||||
class FoolscapSharedAPIsTests(
|
class FoolscapSharedAPIsTests(
|
||||||
_FoolscapMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase
|
_SharedMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase
|
||||||
):
|
):
|
||||||
"""Foolscap-specific tests for shared ``IStorageServer`` APIs."""
|
"""Foolscap-specific tests for shared ``IStorageServer`` APIs."""
|
||||||
|
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE = True
|
||||||
|
|
||||||
|
|
||||||
class HTTPSharedAPIsTests(
|
class HTTPSharedAPIsTests(
|
||||||
_HTTPMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase
|
_SharedMixin, IStorageServerSharedAPIsTestsMixin, AsyncTestCase
|
||||||
):
|
):
|
||||||
"""HTTP-specific tests for shared ``IStorageServer`` APIs."""
|
"""HTTP-specific tests for shared ``IStorageServer`` APIs."""
|
||||||
|
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE = False
|
||||||
|
|
||||||
|
|
||||||
class FoolscapImmutableAPIsTests(
|
class FoolscapImmutableAPIsTests(
|
||||||
_FoolscapMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase
|
_SharedMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase
|
||||||
):
|
):
|
||||||
"""Foolscap-specific tests for immutable ``IStorageServer`` APIs."""
|
"""Foolscap-specific tests for immutable ``IStorageServer`` APIs."""
|
||||||
|
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE = True
|
||||||
|
|
||||||
def test_disconnection(self):
|
def test_disconnection(self):
|
||||||
"""
|
"""
|
||||||
If we disconnect in the middle of writing to a bucket, all data is
|
If we disconnect in the middle of writing to a bucket, all data is
|
||||||
@ -1129,23 +1131,29 @@ class FoolscapImmutableAPIsTests(
|
|||||||
"""
|
"""
|
||||||
current = self.storage_client
|
current = self.storage_client
|
||||||
yield self.bounce_client(0)
|
yield self.bounce_client(0)
|
||||||
self.storage_client = self._get_native_server().get_storage_server()
|
self.storage_client = self._get_istorage_server()
|
||||||
assert self.storage_client is not current
|
assert self.storage_client is not current
|
||||||
|
|
||||||
|
|
||||||
class HTTPImmutableAPIsTests(
|
class HTTPImmutableAPIsTests(
|
||||||
_HTTPMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase
|
_SharedMixin, IStorageServerImmutableAPIsTestsMixin, AsyncTestCase
|
||||||
):
|
):
|
||||||
"""HTTP-specific tests for immutable ``IStorageServer`` APIs."""
|
"""HTTP-specific tests for immutable ``IStorageServer`` APIs."""
|
||||||
|
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE = False
|
||||||
|
|
||||||
|
|
||||||
class FoolscapMutableAPIsTests(
|
class FoolscapMutableAPIsTests(
|
||||||
_FoolscapMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase
|
_SharedMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase
|
||||||
):
|
):
|
||||||
"""Foolscap-specific tests for mutable ``IStorageServer`` APIs."""
|
"""Foolscap-specific tests for mutable ``IStorageServer`` APIs."""
|
||||||
|
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE = True
|
||||||
|
|
||||||
|
|
||||||
class HTTPMutableAPIsTests(
|
class HTTPMutableAPIsTests(
|
||||||
_HTTPMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase
|
_SharedMixin, IStorageServerMutableAPIsTestsMixin, AsyncTestCase
|
||||||
):
|
):
|
||||||
"""HTTP-specific tests for mutable ``IStorageServer`` APIs."""
|
"""HTTP-specific tests for mutable ``IStorageServer`` APIs."""
|
||||||
|
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE = False
|
||||||
|
@ -1,198 +0,0 @@
|
|||||||
"""
|
|
||||||
Tests for allmydata.util.pipeline.
|
|
||||||
|
|
||||||
Ported to Python 3.
|
|
||||||
"""
|
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
import gc
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
from twisted.trial import unittest
|
|
||||||
from twisted.python import log
|
|
||||||
from twisted.python.failure import Failure
|
|
||||||
|
|
||||||
from allmydata.util import pipeline
|
|
||||||
|
|
||||||
|
|
||||||
class Pipeline(unittest.TestCase):
|
|
||||||
def pause(self, *args, **kwargs):
|
|
||||||
d = defer.Deferred()
|
|
||||||
self.calls.append( (d, args, kwargs) )
|
|
||||||
return d
|
|
||||||
|
|
||||||
def failUnlessCallsAre(self, expected):
|
|
||||||
#print(self.calls)
|
|
||||||
#print(expected)
|
|
||||||
self.failUnlessEqual(len(self.calls), len(expected), self.calls)
|
|
||||||
for i,c in enumerate(self.calls):
|
|
||||||
self.failUnlessEqual(c[1:], expected[i], str(i))
|
|
||||||
|
|
||||||
def test_basic(self):
|
|
||||||
self.calls = []
|
|
||||||
finished = []
|
|
||||||
p = pipeline.Pipeline(100)
|
|
||||||
|
|
||||||
d = p.flush() # fires immediately
|
|
||||||
d.addCallbacks(finished.append, log.err)
|
|
||||||
self.failUnlessEqual(len(finished), 1)
|
|
||||||
finished = []
|
|
||||||
|
|
||||||
d = p.add(10, self.pause, "one")
|
|
||||||
# the call should start right away, and our return Deferred should
|
|
||||||
# fire right away
|
|
||||||
d.addCallbacks(finished.append, log.err)
|
|
||||||
self.failUnlessEqual(len(finished), 1)
|
|
||||||
self.failUnlessEqual(finished[0], None)
|
|
||||||
self.failUnlessCallsAre([ ( ("one",) , {} ) ])
|
|
||||||
self.failUnlessEqual(p.gauge, 10)
|
|
||||||
|
|
||||||
# pipeline: [one]
|
|
||||||
|
|
||||||
finished = []
|
|
||||||
d = p.add(20, self.pause, "two", kw=2)
|
|
||||||
# pipeline: [one, two]
|
|
||||||
|
|
||||||
# the call and the Deferred should fire right away
|
|
||||||
d.addCallbacks(finished.append, log.err)
|
|
||||||
self.failUnlessEqual(len(finished), 1)
|
|
||||||
self.failUnlessEqual(finished[0], None)
|
|
||||||
self.failUnlessCallsAre([ ( ("one",) , {} ),
|
|
||||||
( ("two",) , {"kw": 2} ),
|
|
||||||
])
|
|
||||||
self.failUnlessEqual(p.gauge, 30)
|
|
||||||
|
|
||||||
self.calls[0][0].callback("one-result")
|
|
||||||
# pipeline: [two]
|
|
||||||
self.failUnlessEqual(p.gauge, 20)
|
|
||||||
|
|
||||||
finished = []
|
|
||||||
d = p.add(90, self.pause, "three", "posarg1")
|
|
||||||
# pipeline: [two, three]
|
|
||||||
flushed = []
|
|
||||||
fd = p.flush()
|
|
||||||
fd.addCallbacks(flushed.append, log.err)
|
|
||||||
self.failUnlessEqual(flushed, [])
|
|
||||||
|
|
||||||
# the call will be made right away, but the return Deferred will not,
|
|
||||||
# because the pipeline is now full.
|
|
||||||
d.addCallbacks(finished.append, log.err)
|
|
||||||
self.failUnlessEqual(len(finished), 0)
|
|
||||||
self.failUnlessCallsAre([ ( ("one",) , {} ),
|
|
||||||
( ("two",) , {"kw": 2} ),
|
|
||||||
( ("three", "posarg1"), {} ),
|
|
||||||
])
|
|
||||||
self.failUnlessEqual(p.gauge, 110)
|
|
||||||
|
|
||||||
self.failUnlessRaises(pipeline.SingleFileError, p.add, 10, self.pause)
|
|
||||||
|
|
||||||
# retiring either call will unblock the pipeline, causing the #3
|
|
||||||
# Deferred to fire
|
|
||||||
self.calls[2][0].callback("three-result")
|
|
||||||
# pipeline: [two]
|
|
||||||
|
|
||||||
self.failUnlessEqual(len(finished), 1)
|
|
||||||
self.failUnlessEqual(finished[0], None)
|
|
||||||
self.failUnlessEqual(flushed, [])
|
|
||||||
|
|
||||||
# retiring call#2 will finally allow the flush() Deferred to fire
|
|
||||||
self.calls[1][0].callback("two-result")
|
|
||||||
self.failUnlessEqual(len(flushed), 1)
|
|
||||||
|
|
||||||
def test_errors(self):
|
|
||||||
self.calls = []
|
|
||||||
p = pipeline.Pipeline(100)
|
|
||||||
|
|
||||||
d1 = p.add(200, self.pause, "one")
|
|
||||||
d2 = p.flush()
|
|
||||||
|
|
||||||
finished = []
|
|
||||||
d1.addBoth(finished.append)
|
|
||||||
self.failUnlessEqual(finished, [])
|
|
||||||
|
|
||||||
flushed = []
|
|
||||||
d2.addBoth(flushed.append)
|
|
||||||
self.failUnlessEqual(flushed, [])
|
|
||||||
|
|
||||||
self.calls[0][0].errback(ValueError("oops"))
|
|
||||||
|
|
||||||
self.failUnlessEqual(len(finished), 1)
|
|
||||||
f = finished[0]
|
|
||||||
self.failUnless(isinstance(f, Failure))
|
|
||||||
self.failUnless(f.check(pipeline.PipelineError))
|
|
||||||
self.failUnlessIn("PipelineError", str(f.value))
|
|
||||||
self.failUnlessIn("ValueError", str(f.value))
|
|
||||||
r = repr(f.value)
|
|
||||||
self.failUnless("ValueError" in r, r)
|
|
||||||
f2 = f.value.error
|
|
||||||
self.failUnless(f2.check(ValueError))
|
|
||||||
|
|
||||||
self.failUnlessEqual(len(flushed), 1)
|
|
||||||
f = flushed[0]
|
|
||||||
self.failUnless(isinstance(f, Failure))
|
|
||||||
self.failUnless(f.check(pipeline.PipelineError))
|
|
||||||
f2 = f.value.error
|
|
||||||
self.failUnless(f2.check(ValueError))
|
|
||||||
|
|
||||||
# now that the pipeline is in the failed state, any new calls will
|
|
||||||
# fail immediately
|
|
||||||
|
|
||||||
d3 = p.add(20, self.pause, "two")
|
|
||||||
|
|
||||||
finished = []
|
|
||||||
d3.addBoth(finished.append)
|
|
||||||
self.failUnlessEqual(len(finished), 1)
|
|
||||||
f = finished[0]
|
|
||||||
self.failUnless(isinstance(f, Failure))
|
|
||||||
self.failUnless(f.check(pipeline.PipelineError))
|
|
||||||
r = repr(f.value)
|
|
||||||
self.failUnless("ValueError" in r, r)
|
|
||||||
f2 = f.value.error
|
|
||||||
self.failUnless(f2.check(ValueError))
|
|
||||||
|
|
||||||
d4 = p.flush()
|
|
||||||
flushed = []
|
|
||||||
d4.addBoth(flushed.append)
|
|
||||||
self.failUnlessEqual(len(flushed), 1)
|
|
||||||
f = flushed[0]
|
|
||||||
self.failUnless(isinstance(f, Failure))
|
|
||||||
self.failUnless(f.check(pipeline.PipelineError))
|
|
||||||
f2 = f.value.error
|
|
||||||
self.failUnless(f2.check(ValueError))
|
|
||||||
|
|
||||||
def test_errors2(self):
|
|
||||||
self.calls = []
|
|
||||||
p = pipeline.Pipeline(100)
|
|
||||||
|
|
||||||
d1 = p.add(10, self.pause, "one")
|
|
||||||
d2 = p.add(20, self.pause, "two")
|
|
||||||
d3 = p.add(30, self.pause, "three")
|
|
||||||
d4 = p.flush()
|
|
||||||
|
|
||||||
# one call fails, then the second one succeeds: make sure
|
|
||||||
# ExpandableDeferredList tolerates the second one
|
|
||||||
|
|
||||||
flushed = []
|
|
||||||
d4.addBoth(flushed.append)
|
|
||||||
self.failUnlessEqual(flushed, [])
|
|
||||||
|
|
||||||
self.calls[0][0].errback(ValueError("oops"))
|
|
||||||
self.failUnlessEqual(len(flushed), 1)
|
|
||||||
f = flushed[0]
|
|
||||||
self.failUnless(isinstance(f, Failure))
|
|
||||||
self.failUnless(f.check(pipeline.PipelineError))
|
|
||||||
f2 = f.value.error
|
|
||||||
self.failUnless(f2.check(ValueError))
|
|
||||||
|
|
||||||
self.calls[1][0].callback("two-result")
|
|
||||||
self.calls[2][0].errback(ValueError("three-error"))
|
|
||||||
|
|
||||||
del d1,d2,d3,d4
|
|
||||||
gc.collect() # for PyPy
|
|
@ -47,6 +47,9 @@ from twisted.internet.defer import (
|
|||||||
inlineCallbacks,
|
inlineCallbacks,
|
||||||
DeferredList,
|
DeferredList,
|
||||||
)
|
)
|
||||||
|
from twisted.internet.testing import (
|
||||||
|
MemoryReactorClock,
|
||||||
|
)
|
||||||
from twisted.python.filepath import FilePath
|
from twisted.python.filepath import FilePath
|
||||||
from allmydata.util import fileutil, pollmixin
|
from allmydata.util import fileutil, pollmixin
|
||||||
from allmydata.util.encodingutil import unicode_to_argv
|
from allmydata.util.encodingutil import unicode_to_argv
|
||||||
@ -60,6 +63,9 @@ import allmydata
|
|||||||
from allmydata.scripts.runner import (
|
from allmydata.scripts.runner import (
|
||||||
parse_options,
|
parse_options,
|
||||||
)
|
)
|
||||||
|
from allmydata.scripts.tahoe_run import (
|
||||||
|
on_stdin_close,
|
||||||
|
)
|
||||||
|
|
||||||
from .common import (
|
from .common import (
|
||||||
PIPE,
|
PIPE,
|
||||||
@ -624,6 +630,64 @@ class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin):
|
|||||||
yield client_running
|
yield client_running
|
||||||
|
|
||||||
|
|
||||||
|
def _simulate_windows_stdin_close(stdio):
|
||||||
|
"""
|
||||||
|
on Unix we can just close all the readers, correctly "simulating"
|
||||||
|
a stdin close .. of course, Windows has to be difficult
|
||||||
|
"""
|
||||||
|
stdio.writeConnectionLost()
|
||||||
|
stdio.readConnectionLost()
|
||||||
|
|
||||||
|
|
||||||
|
class OnStdinCloseTests(SyncTestCase):
|
||||||
|
"""
|
||||||
|
Tests for on_stdin_close
|
||||||
|
"""
|
||||||
|
|
||||||
|
def test_close_called(self):
|
||||||
|
"""
|
||||||
|
our on-close method is called when stdin closes
|
||||||
|
"""
|
||||||
|
reactor = MemoryReactorClock()
|
||||||
|
called = []
|
||||||
|
|
||||||
|
def onclose():
|
||||||
|
called.append(True)
|
||||||
|
transport = on_stdin_close(reactor, onclose)
|
||||||
|
self.assertEqual(called, [])
|
||||||
|
|
||||||
|
if platform.isWindows():
|
||||||
|
_simulate_windows_stdin_close(transport)
|
||||||
|
else:
|
||||||
|
for reader in reactor.getReaders():
|
||||||
|
reader.loseConnection()
|
||||||
|
reactor.advance(1) # ProcessReader does a callLater(0, ..)
|
||||||
|
|
||||||
|
self.assertEqual(called, [True])
|
||||||
|
|
||||||
|
def test_exception_ignored(self):
|
||||||
|
"""
|
||||||
|
An exception from our on-close function is discarded.
|
||||||
|
"""
|
||||||
|
reactor = MemoryReactorClock()
|
||||||
|
called = []
|
||||||
|
|
||||||
|
def onclose():
|
||||||
|
called.append(True)
|
||||||
|
raise RuntimeError("unexpected error")
|
||||||
|
transport = on_stdin_close(reactor, onclose)
|
||||||
|
self.assertEqual(called, [])
|
||||||
|
|
||||||
|
if platform.isWindows():
|
||||||
|
_simulate_windows_stdin_close(transport)
|
||||||
|
else:
|
||||||
|
for reader in reactor.getReaders():
|
||||||
|
reader.loseConnection()
|
||||||
|
reactor.advance(1) # ProcessReader does a callLater(0, ..)
|
||||||
|
|
||||||
|
self.assertEqual(called, [True])
|
||||||
|
|
||||||
|
|
||||||
class PidFileLocking(SyncTestCase):
|
class PidFileLocking(SyncTestCase):
|
||||||
"""
|
"""
|
||||||
Direct tests for allmydata.util.pid functions
|
Direct tests for allmydata.util.pid functions
|
||||||
|
@ -3,14 +3,9 @@ Tests for allmydata.storage.
|
|||||||
|
|
||||||
Ported to Python 3.
|
Ported to Python 3.
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import native_str, PY2, bytes_to_native_str, bchr
|
from __future__ import annotations
|
||||||
if PY2:
|
from future.utils import native_str, bytes_to_native_str, bchr
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
from six import ensure_str
|
from six import ensure_str
|
||||||
|
|
||||||
from io import (
|
from io import (
|
||||||
@ -59,7 +54,7 @@ from allmydata.storage.common import storage_index_to_dir, \
|
|||||||
si_b2a, si_a2b
|
si_b2a, si_a2b
|
||||||
from allmydata.storage.lease import LeaseInfo
|
from allmydata.storage.lease import LeaseInfo
|
||||||
from allmydata.immutable.layout import WriteBucketProxy, WriteBucketProxy_v2, \
|
from allmydata.immutable.layout import WriteBucketProxy, WriteBucketProxy_v2, \
|
||||||
ReadBucketProxy
|
ReadBucketProxy, _WriteBuffer
|
||||||
from allmydata.mutable.layout import MDMFSlotWriteProxy, MDMFSlotReadProxy, \
|
from allmydata.mutable.layout import MDMFSlotWriteProxy, MDMFSlotReadProxy, \
|
||||||
LayoutInvalid, MDMFSIGNABLEHEADER, \
|
LayoutInvalid, MDMFSIGNABLEHEADER, \
|
||||||
SIGNED_PREFIX, MDMFHEADER, \
|
SIGNED_PREFIX, MDMFHEADER, \
|
||||||
@ -3746,3 +3741,39 @@ class LeaseInfoTests(SyncTestCase):
|
|||||||
info.to_mutable_data(),
|
info.to_mutable_data(),
|
||||||
HasLength(info.mutable_size()),
|
HasLength(info.mutable_size()),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class WriteBufferTests(SyncTestCase):
|
||||||
|
"""Tests for ``_WriteBuffer``."""
|
||||||
|
|
||||||
|
@given(
|
||||||
|
small_writes=strategies.lists(
|
||||||
|
strategies.binary(min_size=1, max_size=20),
|
||||||
|
min_size=10, max_size=20),
|
||||||
|
batch_size=strategies.integers(min_value=5, max_value=10)
|
||||||
|
)
|
||||||
|
def test_write_buffer(self, small_writes: list[bytes], batch_size: int):
|
||||||
|
"""
|
||||||
|
``_WriteBuffer`` coalesces small writes into bigger writes based on
|
||||||
|
the batch size.
|
||||||
|
"""
|
||||||
|
wb = _WriteBuffer(batch_size)
|
||||||
|
result = b""
|
||||||
|
for data in small_writes:
|
||||||
|
should_flush = wb.queue_write(data)
|
||||||
|
if should_flush:
|
||||||
|
flushed_offset, flushed_data = wb.flush()
|
||||||
|
self.assertEqual(flushed_offset, len(result))
|
||||||
|
# The flushed data is in batch sizes, or closest approximation
|
||||||
|
# given queued inputs:
|
||||||
|
self.assertTrue(batch_size <= len(flushed_data) < batch_size + len(data))
|
||||||
|
result += flushed_data
|
||||||
|
|
||||||
|
# Final flush:
|
||||||
|
remaining_length = wb.get_queued_bytes()
|
||||||
|
flushed_offset, flushed_data = wb.flush()
|
||||||
|
self.assertEqual(remaining_length, len(flushed_data))
|
||||||
|
self.assertEqual(flushed_offset, len(result))
|
||||||
|
result += flushed_data
|
||||||
|
|
||||||
|
self.assertEqual(result, b"".join(small_writes))
|
||||||
|
@ -31,10 +31,13 @@ from klein import Klein
|
|||||||
from hyperlink import DecodedURL
|
from hyperlink import DecodedURL
|
||||||
from collections_extended import RangeMap
|
from collections_extended import RangeMap
|
||||||
from twisted.internet.task import Clock, Cooperator
|
from twisted.internet.task import Clock, Cooperator
|
||||||
|
from twisted.internet.interfaces import IReactorTime
|
||||||
|
from twisted.internet.defer import CancelledError, Deferred
|
||||||
from twisted.web import http
|
from twisted.web import http
|
||||||
from twisted.web.http_headers import Headers
|
from twisted.web.http_headers import Headers
|
||||||
from werkzeug import routing
|
from werkzeug import routing
|
||||||
from werkzeug.exceptions import NotFound as WNotFound
|
from werkzeug.exceptions import NotFound as WNotFound
|
||||||
|
from testtools.matchers import Equals
|
||||||
|
|
||||||
from .common import SyncTestCase
|
from .common import SyncTestCase
|
||||||
from ..storage.http_common import get_content_type, CBOR_MIME_TYPE
|
from ..storage.http_common import get_content_type, CBOR_MIME_TYPE
|
||||||
@ -245,6 +248,7 @@ def gen_bytes(length: int) -> bytes:
|
|||||||
class TestApp(object):
|
class TestApp(object):
|
||||||
"""HTTP API for testing purposes."""
|
"""HTTP API for testing purposes."""
|
||||||
|
|
||||||
|
clock: IReactorTime
|
||||||
_app = Klein()
|
_app = Klein()
|
||||||
_swissnum = SWISSNUM_FOR_TEST # Match what the test client is using
|
_swissnum = SWISSNUM_FOR_TEST # Match what the test client is using
|
||||||
|
|
||||||
@ -266,6 +270,25 @@ class TestApp(object):
|
|||||||
"""Return bytes to the given length using ``gen_bytes()``."""
|
"""Return bytes to the given length using ``gen_bytes()``."""
|
||||||
return gen_bytes(length)
|
return gen_bytes(length)
|
||||||
|
|
||||||
|
@_authorized_route(_app, set(), "/slowly_never_finish_result", methods=["GET"])
|
||||||
|
def slowly_never_finish_result(self, request, authorization):
|
||||||
|
"""
|
||||||
|
Send data immediately, after 59 seconds, after another 59 seconds, and then
|
||||||
|
never again, without finishing the response.
|
||||||
|
"""
|
||||||
|
request.write(b"a")
|
||||||
|
self.clock.callLater(59, request.write, b"b")
|
||||||
|
self.clock.callLater(59 + 59, request.write, b"c")
|
||||||
|
return Deferred()
|
||||||
|
|
||||||
|
@_authorized_route(_app, set(), "/die_unfinished", methods=["GET"])
|
||||||
|
def die(self, request, authorization):
|
||||||
|
"""
|
||||||
|
Dies half-way.
|
||||||
|
"""
|
||||||
|
request.transport.loseConnection()
|
||||||
|
return Deferred()
|
||||||
|
|
||||||
|
|
||||||
def result_of(d):
|
def result_of(d):
|
||||||
"""
|
"""
|
||||||
@ -291,14 +314,25 @@ class CustomHTTPServerTests(SyncTestCase):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(CustomHTTPServerTests, self).setUp()
|
super(CustomHTTPServerTests, self).setUp()
|
||||||
|
StorageClient.start_test_mode(
|
||||||
|
lambda pool: self.addCleanup(pool.closeCachedConnections)
|
||||||
|
)
|
||||||
|
self.addCleanup(StorageClient.stop_test_mode)
|
||||||
# Could be a fixture, but will only be used in this test class so not
|
# Could be a fixture, but will only be used in this test class so not
|
||||||
# going to bother:
|
# going to bother:
|
||||||
self._http_server = TestApp()
|
self._http_server = TestApp()
|
||||||
|
treq = StubTreq(self._http_server._app.resource())
|
||||||
self.client = StorageClient(
|
self.client = StorageClient(
|
||||||
DecodedURL.from_text("http://127.0.0.1"),
|
DecodedURL.from_text("http://127.0.0.1"),
|
||||||
SWISSNUM_FOR_TEST,
|
SWISSNUM_FOR_TEST,
|
||||||
treq=StubTreq(self._http_server._app.resource()),
|
treq=treq,
|
||||||
|
# We're using a Treq private API to get the reactor, alas, but only
|
||||||
|
# in a test, so not going to worry about it too much. This would be
|
||||||
|
# fixed if https://github.com/twisted/treq/issues/226 were ever
|
||||||
|
# fixed.
|
||||||
|
clock=treq._agent._memoryReactor,
|
||||||
)
|
)
|
||||||
|
self._http_server.clock = self.client._clock
|
||||||
|
|
||||||
def test_authorization_enforcement(self):
|
def test_authorization_enforcement(self):
|
||||||
"""
|
"""
|
||||||
@ -346,7 +380,9 @@ class CustomHTTPServerTests(SyncTestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
result_of(limited_content(response, at_least_length)).read(),
|
result_of(
|
||||||
|
limited_content(response, self._http_server.clock, at_least_length)
|
||||||
|
).read(),
|
||||||
gen_bytes(length),
|
gen_bytes(length),
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -365,7 +401,52 @@ class CustomHTTPServerTests(SyncTestCase):
|
|||||||
)
|
)
|
||||||
|
|
||||||
with self.assertRaises(ValueError):
|
with self.assertRaises(ValueError):
|
||||||
result_of(limited_content(response, too_short))
|
result_of(limited_content(response, self._http_server.clock, too_short))
|
||||||
|
|
||||||
|
def test_limited_content_silence_causes_timeout(self):
|
||||||
|
"""
|
||||||
|
``http_client.limited_content() times out if it receives no data for 60
|
||||||
|
seconds.
|
||||||
|
"""
|
||||||
|
response = result_of(
|
||||||
|
self.client.request(
|
||||||
|
"GET",
|
||||||
|
"http://127.0.0.1/slowly_never_finish_result",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
body_deferred = limited_content(response, self._http_server.clock, 4)
|
||||||
|
result = []
|
||||||
|
error = []
|
||||||
|
body_deferred.addCallbacks(result.append, error.append)
|
||||||
|
|
||||||
|
for i in range(59 + 59 + 60):
|
||||||
|
self.assertEqual((result, error), ([], []))
|
||||||
|
self._http_server.clock.advance(1)
|
||||||
|
# Push data between in-memory client and in-memory server:
|
||||||
|
self.client._treq._agent.flush()
|
||||||
|
|
||||||
|
# After 59 (second write) + 59 (third write) + 60 seconds (quiescent
|
||||||
|
# timeout) the limited_content() response times out.
|
||||||
|
self.assertTrue(error)
|
||||||
|
with self.assertRaises(CancelledError):
|
||||||
|
error[0].raiseException()
|
||||||
|
|
||||||
|
def test_limited_content_cancels_timeout_on_failed_response(self):
|
||||||
|
"""
|
||||||
|
If the response fails somehow, the timeout is still cancelled.
|
||||||
|
"""
|
||||||
|
response = result_of(
|
||||||
|
self.client.request(
|
||||||
|
"GET",
|
||||||
|
"http://127.0.0.1/die",
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
d = limited_content(response, self._http_server.clock, 4)
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
result_of(d)
|
||||||
|
self.assertEqual(len(self._http_server.clock.getDelayedCalls()), 0)
|
||||||
|
|
||||||
|
|
||||||
class HttpTestFixture(Fixture):
|
class HttpTestFixture(Fixture):
|
||||||
@ -375,6 +456,10 @@ class HttpTestFixture(Fixture):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def _setUp(self):
|
def _setUp(self):
|
||||||
|
StorageClient.start_test_mode(
|
||||||
|
lambda pool: self.addCleanup(pool.closeCachedConnections)
|
||||||
|
)
|
||||||
|
self.addCleanup(StorageClient.stop_test_mode)
|
||||||
self.clock = Clock()
|
self.clock = Clock()
|
||||||
self.tempdir = self.useFixture(TempDir())
|
self.tempdir = self.useFixture(TempDir())
|
||||||
# The global Cooperator used by Twisted (a) used by pull producers in
|
# The global Cooperator used by Twisted (a) used by pull producers in
|
||||||
@ -396,6 +481,7 @@ class HttpTestFixture(Fixture):
|
|||||||
DecodedURL.from_text("http://127.0.0.1"),
|
DecodedURL.from_text("http://127.0.0.1"),
|
||||||
SWISSNUM_FOR_TEST,
|
SWISSNUM_FOR_TEST,
|
||||||
treq=self.treq,
|
treq=self.treq,
|
||||||
|
clock=self.clock,
|
||||||
)
|
)
|
||||||
|
|
||||||
def result_of_with_flush(self, d):
|
def result_of_with_flush(self, d):
|
||||||
@ -470,6 +556,20 @@ class GenericHTTPAPITests(SyncTestCase):
|
|||||||
super(GenericHTTPAPITests, self).setUp()
|
super(GenericHTTPAPITests, self).setUp()
|
||||||
self.http = self.useFixture(HttpTestFixture())
|
self.http = self.useFixture(HttpTestFixture())
|
||||||
|
|
||||||
|
def test_missing_authentication(self) -> None:
|
||||||
|
"""
|
||||||
|
If nothing is given in the ``Authorization`` header at all an
|
||||||
|
``Unauthorized`` response is returned.
|
||||||
|
"""
|
||||||
|
client = StubTreq(self.http.http_server.get_resource())
|
||||||
|
response = self.http.result_of_with_flush(
|
||||||
|
client.request(
|
||||||
|
"GET",
|
||||||
|
"http://127.0.0.1/storage/v1/version",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
self.assertThat(response.code, Equals(http.UNAUTHORIZED))
|
||||||
|
|
||||||
def test_bad_authentication(self):
|
def test_bad_authentication(self):
|
||||||
"""
|
"""
|
||||||
If the wrong swissnum is used, an ``Unauthorized`` response code is
|
If the wrong swissnum is used, an ``Unauthorized`` response code is
|
||||||
@ -480,6 +580,7 @@ class GenericHTTPAPITests(SyncTestCase):
|
|||||||
DecodedURL.from_text("http://127.0.0.1"),
|
DecodedURL.from_text("http://127.0.0.1"),
|
||||||
b"something wrong",
|
b"something wrong",
|
||||||
treq=StubTreq(self.http.http_server.get_resource()),
|
treq=StubTreq(self.http.http_server.get_resource()),
|
||||||
|
clock=self.http.clock,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
with assert_fails_with_http_code(self, http.UNAUTHORIZED):
|
with assert_fails_with_http_code(self, http.UNAUTHORIZED):
|
||||||
@ -1100,18 +1201,42 @@ class MutableHTTPAPIsTests(SyncTestCase):
|
|||||||
)
|
)
|
||||||
return storage_index, write_secret, lease_secret
|
return storage_index, write_secret, lease_secret
|
||||||
|
|
||||||
def test_write_can_be_read(self):
|
def test_write_can_be_read_small_data(self):
|
||||||
|
"""
|
||||||
|
Small written data can be read using ``read_share_chunk``.
|
||||||
|
"""
|
||||||
|
self.write_can_be_read(b"abcdef")
|
||||||
|
|
||||||
|
def test_write_can_be_read_large_data(self):
|
||||||
|
"""
|
||||||
|
Large written data (50MB) can be read using ``read_share_chunk``.
|
||||||
|
"""
|
||||||
|
self.write_can_be_read(b"abcdefghij" * 5 * 1024 * 1024)
|
||||||
|
|
||||||
|
def write_can_be_read(self, data):
|
||||||
"""
|
"""
|
||||||
Written data can be read using ``read_share_chunk``.
|
Written data can be read using ``read_share_chunk``.
|
||||||
"""
|
"""
|
||||||
storage_index, _, _ = self.create_upload()
|
lease_secret = urandom(32)
|
||||||
data0 = self.http.result_of_with_flush(
|
storage_index = urandom(16)
|
||||||
self.mut_client.read_share_chunk(storage_index, 0, 1, 7)
|
self.http.result_of_with_flush(
|
||||||
|
self.mut_client.read_test_write_chunks(
|
||||||
|
storage_index,
|
||||||
|
urandom(32),
|
||||||
|
lease_secret,
|
||||||
|
lease_secret,
|
||||||
|
{
|
||||||
|
0: TestWriteVectors(
|
||||||
|
write_vectors=[WriteVector(offset=0, data=data)]
|
||||||
|
),
|
||||||
|
},
|
||||||
|
[],
|
||||||
)
|
)
|
||||||
data1 = self.http.result_of_with_flush(
|
|
||||||
self.mut_client.read_share_chunk(storage_index, 1, 0, 8)
|
|
||||||
)
|
)
|
||||||
self.assertEqual((data0, data1), (b"bcdef-0", b"abcdef-1"))
|
read_data = self.http.result_of_with_flush(
|
||||||
|
self.mut_client.read_share_chunk(storage_index, 0, 0, len(data))
|
||||||
|
)
|
||||||
|
self.assertEqual(read_data, data)
|
||||||
|
|
||||||
def test_read_before_write(self):
|
def test_read_before_write(self):
|
||||||
"""In combo read/test/write operation, reads happen before writes."""
|
"""In combo read/test/write operation, reads happen before writes."""
|
||||||
@ -1190,15 +1315,6 @@ class MutableHTTPAPIsTests(SyncTestCase):
|
|||||||
b"aXYZef-0",
|
b"aXYZef-0",
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_too_large_write(self):
|
|
||||||
"""
|
|
||||||
Writing too large of a chunk results in a REQUEST ENTITY TOO LARGE http
|
|
||||||
error.
|
|
||||||
"""
|
|
||||||
with self.assertRaises(ClientException) as e:
|
|
||||||
self.create_upload(b"0123456789" * 1024 * 1024)
|
|
||||||
self.assertEqual(e.exception.code, http.REQUEST_ENTITY_TOO_LARGE)
|
|
||||||
|
|
||||||
def test_list_shares(self):
|
def test_list_shares(self):
|
||||||
"""``list_shares()`` returns the shares for a given storage index."""
|
"""``list_shares()`` returns the shares for a given storage index."""
|
||||||
storage_index, _, _ = self.create_upload()
|
storage_index, _, _ = self.create_upload()
|
||||||
@ -1441,7 +1557,9 @@ class SharedImmutableMutableTestsMixin:
|
|||||||
self.http.client.request(
|
self.http.client.request(
|
||||||
"GET",
|
"GET",
|
||||||
self.http.client.relative_url(
|
self.http.client.relative_url(
|
||||||
"/storage/v1/{}/{}/1".format(self.KIND, _encode_si(storage_index))
|
"/storage/v1/{}/{}/1".format(
|
||||||
|
self.KIND, _encode_si(storage_index)
|
||||||
|
)
|
||||||
),
|
),
|
||||||
headers=headers,
|
headers=headers,
|
||||||
)
|
)
|
||||||
|
@ -12,7 +12,7 @@ from cryptography import x509
|
|||||||
|
|
||||||
from twisted.internet.endpoints import serverFromString
|
from twisted.internet.endpoints import serverFromString
|
||||||
from twisted.internet import reactor
|
from twisted.internet import reactor
|
||||||
from twisted.internet.task import deferLater
|
from twisted.internet.defer import maybeDeferred
|
||||||
from twisted.web.server import Site
|
from twisted.web.server import Site
|
||||||
from twisted.web.static import Data
|
from twisted.web.static import Data
|
||||||
from twisted.web.client import Agent, HTTPConnectionPool, ResponseNeverReceived
|
from twisted.web.client import Agent, HTTPConnectionPool, ResponseNeverReceived
|
||||||
@ -30,6 +30,7 @@ from ..storage.http_common import get_spki_hash
|
|||||||
from ..storage.http_client import _StorageClientHTTPSPolicy
|
from ..storage.http_client import _StorageClientHTTPSPolicy
|
||||||
from ..storage.http_server import _TLSEndpointWrapper
|
from ..storage.http_server import _TLSEndpointWrapper
|
||||||
from ..util.deferredutil import async_to_deferred
|
from ..util.deferredutil import async_to_deferred
|
||||||
|
from .common_system import spin_until_cleanup_done
|
||||||
|
|
||||||
|
|
||||||
class HTTPSNurlTests(SyncTestCase):
|
class HTTPSNurlTests(SyncTestCase):
|
||||||
@ -87,6 +88,10 @@ class PinningHTTPSValidation(AsyncTestCase):
|
|||||||
self.addCleanup(self._port_assigner.tearDown)
|
self.addCleanup(self._port_assigner.tearDown)
|
||||||
return AsyncTestCase.setUp(self)
|
return AsyncTestCase.setUp(self)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
d = maybeDeferred(AsyncTestCase.tearDown, self)
|
||||||
|
return d.addCallback(lambda _: spin_until_cleanup_done())
|
||||||
|
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def listen(self, private_key_path: FilePath, cert_path: FilePath):
|
async def listen(self, private_key_path: FilePath, cert_path: FilePath):
|
||||||
"""
|
"""
|
||||||
@ -107,9 +112,6 @@ class PinningHTTPSValidation(AsyncTestCase):
|
|||||||
yield f"https://127.0.0.1:{listening_port.getHost().port}/"
|
yield f"https://127.0.0.1:{listening_port.getHost().port}/"
|
||||||
finally:
|
finally:
|
||||||
await listening_port.stopListening()
|
await listening_port.stopListening()
|
||||||
# Make sure all server connections are closed :( No idea why this
|
|
||||||
# is necessary when it's not for IStorageServer HTTPS tests.
|
|
||||||
await deferLater(reactor, 0.01)
|
|
||||||
|
|
||||||
def request(self, url: str, expected_certificate: x509.Certificate):
|
def request(self, url: str, expected_certificate: x509.Certificate):
|
||||||
"""
|
"""
|
||||||
@ -198,10 +200,6 @@ class PinningHTTPSValidation(AsyncTestCase):
|
|||||||
response = await self.request(url, certificate)
|
response = await self.request(url, certificate)
|
||||||
self.assertEqual(await response.content(), b"YOYODYNE")
|
self.assertEqual(await response.content(), b"YOYODYNE")
|
||||||
|
|
||||||
# We keep getting TLSMemoryBIOProtocol being left around, so try harder
|
|
||||||
# to wait for it to finish.
|
|
||||||
await deferLater(reactor, 0.001)
|
|
||||||
|
|
||||||
# A potential attack to test is a private key that doesn't match the
|
# A potential attack to test is a private key that doesn't match the
|
||||||
# certificate... but OpenSSL (quite rightly) won't let you listen with that
|
# certificate... but OpenSSL (quite rightly) won't let you listen with that
|
||||||
# so I don't know how to test that! See
|
# so I don't know how to test that! See
|
||||||
|
@ -34,7 +34,7 @@ from allmydata.util.encodingutil import quote_output, unicode_to_argv
|
|||||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||||
from allmydata.util.consumer import MemoryConsumer, download_to_data
|
from allmydata.util.consumer import MemoryConsumer, download_to_data
|
||||||
from allmydata.interfaces import IDirectoryNode, IFileNode, \
|
from allmydata.interfaces import IDirectoryNode, IFileNode, \
|
||||||
NoSuchChildError, NoSharesError
|
NoSuchChildError, NoSharesError, SDMF_VERSION, MDMF_VERSION
|
||||||
from allmydata.monitor import Monitor
|
from allmydata.monitor import Monitor
|
||||||
from allmydata.mutable.common import NotWriteableError
|
from allmydata.mutable.common import NotWriteableError
|
||||||
from allmydata.mutable import layout as mutable_layout
|
from allmydata.mutable import layout as mutable_layout
|
||||||
@ -117,11 +117,17 @@ class CountingDataUploadable(upload.Data):
|
|||||||
|
|
||||||
|
|
||||||
class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
||||||
|
"""Foolscap integration-y tests."""
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE = True
|
||||||
timeout = 180
|
timeout = 180
|
||||||
|
|
||||||
|
@property
|
||||||
|
def basedir(self):
|
||||||
|
return "system/SystemTest/{}-foolscap-{}".format(
|
||||||
|
self.id().split(".")[-1], self.FORCE_FOOLSCAP_FOR_STORAGE
|
||||||
|
)
|
||||||
|
|
||||||
def test_connections(self):
|
def test_connections(self):
|
||||||
self.basedir = "system/SystemTest/test_connections"
|
|
||||||
d = self.set_up_nodes()
|
d = self.set_up_nodes()
|
||||||
self.extra_node = None
|
self.extra_node = None
|
||||||
d.addCallback(lambda res: self.add_extra_node(self.numclients))
|
d.addCallback(lambda res: self.add_extra_node(self.numclients))
|
||||||
@ -149,11 +155,9 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
del test_connections
|
del test_connections
|
||||||
|
|
||||||
def test_upload_and_download_random_key(self):
|
def test_upload_and_download_random_key(self):
|
||||||
self.basedir = "system/SystemTest/test_upload_and_download_random_key"
|
|
||||||
return self._test_upload_and_download(convergence=None)
|
return self._test_upload_and_download(convergence=None)
|
||||||
|
|
||||||
def test_upload_and_download_convergent(self):
|
def test_upload_and_download_convergent(self):
|
||||||
self.basedir = "system/SystemTest/test_upload_and_download_convergent"
|
|
||||||
return self._test_upload_and_download(convergence=b"some convergence string")
|
return self._test_upload_and_download(convergence=b"some convergence string")
|
||||||
|
|
||||||
def _test_upload_and_download(self, convergence):
|
def _test_upload_and_download(self, convergence):
|
||||||
@ -473,9 +477,10 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
|
|
||||||
def _corrupt_mutable_share(self, filename, which):
|
def _corrupt_mutable_share(self, filename, which):
|
||||||
msf = MutableShareFile(filename)
|
msf = MutableShareFile(filename)
|
||||||
datav = msf.readv([ (0, 1000000) ])
|
# Read more than share length:
|
||||||
|
datav = msf.readv([ (0, 10_000_000) ])
|
||||||
final_share = datav[0]
|
final_share = datav[0]
|
||||||
assert len(final_share) < 1000000 # ought to be truncated
|
assert len(final_share) < 10_000_000 # ought to be truncated
|
||||||
pieces = mutable_layout.unpack_share(final_share)
|
pieces = mutable_layout.unpack_share(final_share)
|
||||||
(seqnum, root_hash, IV, k, N, segsize, datalen,
|
(seqnum, root_hash, IV, k, N, segsize, datalen,
|
||||||
verification_key, signature, share_hash_chain, block_hash_tree,
|
verification_key, signature, share_hash_chain, block_hash_tree,
|
||||||
@ -515,13 +520,20 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
msf.writev( [(0, final_share)], None)
|
msf.writev( [(0, final_share)], None)
|
||||||
|
|
||||||
|
|
||||||
def test_mutable(self):
|
def test_mutable_sdmf(self):
|
||||||
self.basedir = "system/SystemTest/test_mutable"
|
"""SDMF mutables can be uploaded, downloaded, and many other things."""
|
||||||
|
return self._test_mutable(SDMF_VERSION)
|
||||||
|
|
||||||
|
def test_mutable_mdmf(self):
|
||||||
|
"""MDMF mutables can be uploaded, downloaded, and many other things."""
|
||||||
|
return self._test_mutable(MDMF_VERSION)
|
||||||
|
|
||||||
|
def _test_mutable(self, mutable_version):
|
||||||
DATA = b"initial contents go here." # 25 bytes % 3 != 0
|
DATA = b"initial contents go here." # 25 bytes % 3 != 0
|
||||||
DATA_uploadable = MutableData(DATA)
|
DATA_uploadable = MutableData(DATA)
|
||||||
NEWDATA = b"new contents yay"
|
NEWDATA = b"new contents yay"
|
||||||
NEWDATA_uploadable = MutableData(NEWDATA)
|
NEWDATA_uploadable = MutableData(NEWDATA)
|
||||||
NEWERDATA = b"this is getting old"
|
NEWERDATA = b"this is getting old" * 1_000_000
|
||||||
NEWERDATA_uploadable = MutableData(NEWERDATA)
|
NEWERDATA_uploadable = MutableData(NEWERDATA)
|
||||||
|
|
||||||
d = self.set_up_nodes()
|
d = self.set_up_nodes()
|
||||||
@ -529,7 +541,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
def _create_mutable(res):
|
def _create_mutable(res):
|
||||||
c = self.clients[0]
|
c = self.clients[0]
|
||||||
log.msg("starting create_mutable_file")
|
log.msg("starting create_mutable_file")
|
||||||
d1 = c.create_mutable_file(DATA_uploadable)
|
d1 = c.create_mutable_file(DATA_uploadable, mutable_version)
|
||||||
def _done(res):
|
def _done(res):
|
||||||
log.msg("DONE: %s" % (res,))
|
log.msg("DONE: %s" % (res,))
|
||||||
self._mutable_node_1 = res
|
self._mutable_node_1 = res
|
||||||
@ -551,17 +563,19 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
filename)
|
filename)
|
||||||
self.failUnlessEqual(rc, 0)
|
self.failUnlessEqual(rc, 0)
|
||||||
try:
|
try:
|
||||||
|
share_type = 'SDMF' if mutable_version == SDMF_VERSION else 'MDMF'
|
||||||
self.failUnless("Mutable slot found:\n" in output)
|
self.failUnless("Mutable slot found:\n" in output)
|
||||||
self.failUnless("share_type: SDMF\n" in output)
|
self.assertIn(f"share_type: {share_type}\n", output)
|
||||||
peerid = idlib.nodeid_b2a(self.clients[client_num].nodeid)
|
peerid = idlib.nodeid_b2a(self.clients[client_num].nodeid)
|
||||||
self.failUnless(" WE for nodeid: %s\n" % peerid in output)
|
self.failUnless(" WE for nodeid: %s\n" % peerid in output)
|
||||||
self.failUnless(" num_extra_leases: 0\n" in output)
|
self.failUnless(" num_extra_leases: 0\n" in output)
|
||||||
self.failUnless(" secrets are for nodeid: %s\n" % peerid
|
self.failUnless(" secrets are for nodeid: %s\n" % peerid
|
||||||
in output)
|
in output)
|
||||||
self.failUnless(" SDMF contents:\n" in output)
|
self.failUnless(f" {share_type} contents:\n" in output)
|
||||||
self.failUnless(" seqnum: 1\n" in output)
|
self.failUnless(" seqnum: 1\n" in output)
|
||||||
self.failUnless(" required_shares: 3\n" in output)
|
self.failUnless(" required_shares: 3\n" in output)
|
||||||
self.failUnless(" total_shares: 10\n" in output)
|
self.failUnless(" total_shares: 10\n" in output)
|
||||||
|
if mutable_version == SDMF_VERSION:
|
||||||
self.failUnless(" segsize: 27\n" in output, (output, filename))
|
self.failUnless(" segsize: 27\n" in output, (output, filename))
|
||||||
self.failUnless(" datalen: 25\n" in output)
|
self.failUnless(" datalen: 25\n" in output)
|
||||||
# the exact share_hash_chain nodes depends upon the sharenum,
|
# the exact share_hash_chain nodes depends upon the sharenum,
|
||||||
@ -569,9 +583,13 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
# now
|
# now
|
||||||
self.failUnless(" share_hash_chain: " in output)
|
self.failUnless(" share_hash_chain: " in output)
|
||||||
self.failUnless(" block_hash_tree: 1 nodes\n" in output)
|
self.failUnless(" block_hash_tree: 1 nodes\n" in output)
|
||||||
|
if mutable_version == SDMF_VERSION:
|
||||||
expected = (" verify-cap: URI:SSK-Verifier:%s:" %
|
expected = (" verify-cap: URI:SSK-Verifier:%s:" %
|
||||||
str(base32.b2a(storage_index), "ascii"))
|
str(base32.b2a(storage_index), "ascii"))
|
||||||
self.failUnless(expected in output)
|
else:
|
||||||
|
expected = (" verify-cap: URI:MDMF-Verifier:%s" %
|
||||||
|
str(base32.b2a(storage_index), "ascii"))
|
||||||
|
self.assertIn(expected, output)
|
||||||
except unittest.FailTest:
|
except unittest.FailTest:
|
||||||
print()
|
print()
|
||||||
print("dump-share output was:")
|
print("dump-share output was:")
|
||||||
@ -691,6 +709,9 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
# when we retrieve this, we should get three signature
|
# when we retrieve this, we should get three signature
|
||||||
# failures (where we've mangled seqnum, R, and segsize). The
|
# failures (where we've mangled seqnum, R, and segsize). The
|
||||||
# pubkey mangling
|
# pubkey mangling
|
||||||
|
|
||||||
|
if mutable_version == SDMF_VERSION:
|
||||||
|
# TODO Corrupting shares in test_systm doesn't work for MDMF right now
|
||||||
d.addCallback(_corrupt_shares)
|
d.addCallback(_corrupt_shares)
|
||||||
|
|
||||||
d.addCallback(lambda res: self._newnode3.download_best_version())
|
d.addCallback(lambda res: self._newnode3.download_best_version())
|
||||||
@ -699,7 +720,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
def _check_empty_file(res):
|
def _check_empty_file(res):
|
||||||
# make sure we can create empty files, this usually screws up the
|
# make sure we can create empty files, this usually screws up the
|
||||||
# segsize math
|
# segsize math
|
||||||
d1 = self.clients[2].create_mutable_file(MutableData(b""))
|
d1 = self.clients[2].create_mutable_file(MutableData(b""), mutable_version)
|
||||||
d1.addCallback(lambda newnode: newnode.download_best_version())
|
d1.addCallback(lambda newnode: newnode.download_best_version())
|
||||||
d1.addCallback(lambda res: self.failUnlessEqual(b"", res))
|
d1.addCallback(lambda res: self.failUnlessEqual(b"", res))
|
||||||
return d1
|
return d1
|
||||||
@ -746,7 +767,6 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
# plaintext_hash check.
|
# plaintext_hash check.
|
||||||
|
|
||||||
def test_filesystem(self):
|
def test_filesystem(self):
|
||||||
self.basedir = "system/SystemTest/test_filesystem"
|
|
||||||
self.data = LARGE_DATA
|
self.data = LARGE_DATA
|
||||||
d = self.set_up_nodes()
|
d = self.set_up_nodes()
|
||||||
def _new_happy_semantics(ign):
|
def _new_happy_semantics(ign):
|
||||||
@ -1713,7 +1733,6 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
def test_filesystem_with_cli_in_subprocess(self):
|
def test_filesystem_with_cli_in_subprocess(self):
|
||||||
# We do this in a separate test so that test_filesystem doesn't skip if we can't run bin/tahoe.
|
# We do this in a separate test so that test_filesystem doesn't skip if we can't run bin/tahoe.
|
||||||
|
|
||||||
self.basedir = "system/SystemTest/test_filesystem_with_cli_in_subprocess"
|
|
||||||
d = self.set_up_nodes()
|
d = self.set_up_nodes()
|
||||||
def _new_happy_semantics(ign):
|
def _new_happy_semantics(ign):
|
||||||
for c in self.clients:
|
for c in self.clients:
|
||||||
@ -1794,9 +1813,21 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
|
|||||||
|
|
||||||
|
|
||||||
class Connections(SystemTestMixin, unittest.TestCase):
|
class Connections(SystemTestMixin, unittest.TestCase):
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE = True
|
||||||
|
|
||||||
def test_rref(self):
|
def test_rref(self):
|
||||||
self.basedir = "system/Connections/rref"
|
# The way the listening port is created is via
|
||||||
|
# SameProcessStreamEndpointAssigner (allmydata.test.common), which then
|
||||||
|
# makes an endpoint string parsed by AdoptedServerPort. The latter does
|
||||||
|
# dup(fd), which results in the filedescriptor staying alive _until the
|
||||||
|
# test ends_. That means that when we disown the service, we still have
|
||||||
|
# the listening port there on the OS level! Just the resulting
|
||||||
|
# connections aren't handled. So this test relies on aggressive
|
||||||
|
# timeouts in the HTTP client and presumably some equivalent in
|
||||||
|
# Foolscap, since connection refused does _not_ happen.
|
||||||
|
self.basedir = "system/Connections/rref-foolscap-{}".format(
|
||||||
|
self.FORCE_FOOLSCAP_FOR_STORAGE
|
||||||
|
)
|
||||||
d = self.set_up_nodes(2)
|
d = self.set_up_nodes(2)
|
||||||
def _start(ign):
|
def _start(ign):
|
||||||
self.c0 = self.clients[0]
|
self.c0 = self.clients[0]
|
||||||
@ -1812,9 +1843,13 @@ class Connections(SystemTestMixin, unittest.TestCase):
|
|||||||
|
|
||||||
# now shut down the server
|
# now shut down the server
|
||||||
d.addCallback(lambda ign: self.clients[1].disownServiceParent())
|
d.addCallback(lambda ign: self.clients[1].disownServiceParent())
|
||||||
|
|
||||||
|
# kill any persistent http connections that might continue to work
|
||||||
|
d.addCallback(lambda ign: self.close_idle_http_connections())
|
||||||
|
|
||||||
# and wait for the client to notice
|
# and wait for the client to notice
|
||||||
def _poll():
|
def _poll():
|
||||||
return len(self.c0.storage_broker.get_connected_servers()) < 2
|
return len(self.c0.storage_broker.get_connected_servers()) == 1
|
||||||
d.addCallback(lambda ign: self.poll(_poll))
|
d.addCallback(lambda ign: self.poll(_poll))
|
||||||
|
|
||||||
def _down(ign):
|
def _down(ign):
|
||||||
@ -1824,3 +1859,16 @@ class Connections(SystemTestMixin, unittest.TestCase):
|
|||||||
self.assertEqual(storage_server, self.s1_storage_server)
|
self.assertEqual(storage_server, self.s1_storage_server)
|
||||||
d.addCallback(_down)
|
d.addCallback(_down)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPSystemTest(SystemTest):
|
||||||
|
"""HTTP storage protocol variant of the system tests."""
|
||||||
|
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE = False
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class HTTPConnections(Connections):
|
||||||
|
"""HTTP storage protocol variant of the connections tests."""
|
||||||
|
FORCE_FOOLSCAP_FOR_STORAGE = False
|
||||||
|
|
||||||
|
@ -9,18 +9,7 @@
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
Tests for the allmydata.testing helpers
|
Tests for the allmydata.testing helpers
|
||||||
|
|
||||||
Ported to Python 3.
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
from twisted.internet.defer import (
|
from twisted.internet.defer import (
|
||||||
inlineCallbacks,
|
inlineCallbacks,
|
||||||
@ -56,10 +45,12 @@ from testtools.matchers import (
|
|||||||
IsInstance,
|
IsInstance,
|
||||||
MatchesStructure,
|
MatchesStructure,
|
||||||
AfterPreprocessing,
|
AfterPreprocessing,
|
||||||
|
Contains,
|
||||||
)
|
)
|
||||||
from testtools.twistedsupport import (
|
from testtools.twistedsupport import (
|
||||||
succeeded,
|
succeeded,
|
||||||
)
|
)
|
||||||
|
from twisted.web.http import GONE
|
||||||
|
|
||||||
|
|
||||||
class FakeWebTest(SyncTestCase):
|
class FakeWebTest(SyncTestCase):
|
||||||
@ -144,7 +135,8 @@ class FakeWebTest(SyncTestCase):
|
|||||||
|
|
||||||
def test_download_missing(self):
|
def test_download_missing(self):
|
||||||
"""
|
"""
|
||||||
Error if we download a capability that doesn't exist
|
The response to a request to download a capability that doesn't exist
|
||||||
|
is 410 (GONE).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
http_client = create_tahoe_treq_client()
|
http_client = create_tahoe_treq_client()
|
||||||
@ -157,7 +149,11 @@ class FakeWebTest(SyncTestCase):
|
|||||||
resp,
|
resp,
|
||||||
succeeded(
|
succeeded(
|
||||||
MatchesStructure(
|
MatchesStructure(
|
||||||
code=Equals(500)
|
code=Equals(GONE),
|
||||||
|
content=AfterPreprocessing(
|
||||||
|
lambda m: m(),
|
||||||
|
succeeded(Contains(b"No data for")),
|
||||||
|
),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -1,19 +1,14 @@
|
|||||||
"""
|
"""
|
||||||
Ported to Python 3.
|
Tests for a bunch of web-related APIs.
|
||||||
"""
|
"""
|
||||||
from __future__ import print_function
|
from __future__ import annotations
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
from six import ensure_binary
|
from six import ensure_binary
|
||||||
|
|
||||||
import os.path, re, time
|
import os.path, re, time
|
||||||
import treq
|
import treq
|
||||||
from urllib.parse import quote as urlquote, unquote as urlunquote
|
from urllib.parse import quote as urlquote, unquote as urlunquote
|
||||||
|
from base64 import urlsafe_b64encode
|
||||||
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
@ -38,6 +33,7 @@ from allmydata.util import fileutil, base32, hashutil, jsonbytes as json
|
|||||||
from allmydata.util.consumer import download_to_data
|
from allmydata.util.consumer import download_to_data
|
||||||
from allmydata.util.encodingutil import to_bytes
|
from allmydata.util.encodingutil import to_bytes
|
||||||
from ...util.connection_status import ConnectionStatus
|
from ...util.connection_status import ConnectionStatus
|
||||||
|
from ...crypto.rsa import PublicKey, PrivateKey, create_signing_keypair, der_string_from_signing_key
|
||||||
from ..common import (
|
from ..common import (
|
||||||
EMPTY_CLIENT_CONFIG,
|
EMPTY_CLIENT_CONFIG,
|
||||||
FakeCHKFileNode,
|
FakeCHKFileNode,
|
||||||
@ -65,6 +61,7 @@ from allmydata.interfaces import (
|
|||||||
MustBeReadonlyError,
|
MustBeReadonlyError,
|
||||||
)
|
)
|
||||||
from allmydata.mutable import servermap, publish, retrieve
|
from allmydata.mutable import servermap, publish, retrieve
|
||||||
|
from allmydata.mutable.common import derive_mutable_keys
|
||||||
from .. import common_util as testutil
|
from .. import common_util as testutil
|
||||||
from ..common_util import TimezoneMixin
|
from ..common_util import TimezoneMixin
|
||||||
from ..common_web import (
|
from ..common_web import (
|
||||||
@ -93,6 +90,7 @@ class FakeNodeMaker(NodeMaker):
|
|||||||
'happy': 7,
|
'happy': 7,
|
||||||
'max_segment_size':128*1024 # 1024=KiB
|
'max_segment_size':128*1024 # 1024=KiB
|
||||||
}
|
}
|
||||||
|
all_contents: dict[bytes, object]
|
||||||
def _create_lit(self, cap):
|
def _create_lit(self, cap):
|
||||||
return FakeCHKFileNode(cap, self.all_contents)
|
return FakeCHKFileNode(cap, self.all_contents)
|
||||||
def _create_immutable(self, cap):
|
def _create_immutable(self, cap):
|
||||||
@ -100,11 +98,19 @@ class FakeNodeMaker(NodeMaker):
|
|||||||
def _create_mutable(self, cap):
|
def _create_mutable(self, cap):
|
||||||
return FakeMutableFileNode(None, None,
|
return FakeMutableFileNode(None, None,
|
||||||
self.encoding_params, None,
|
self.encoding_params, None,
|
||||||
self.all_contents).init_from_cap(cap)
|
self.all_contents, None).init_from_cap(cap)
|
||||||
def create_mutable_file(self, contents=b"", keysize=None,
|
def create_mutable_file(self,
|
||||||
version=SDMF_VERSION):
|
contents=None,
|
||||||
|
version=None,
|
||||||
|
keypair: tuple[PublicKey, PrivateKey] | None=None,
|
||||||
|
):
|
||||||
|
if contents is None:
|
||||||
|
contents = b""
|
||||||
|
if version is None:
|
||||||
|
version = SDMF_VERSION
|
||||||
|
|
||||||
n = FakeMutableFileNode(None, None, self.encoding_params, None,
|
n = FakeMutableFileNode(None, None, self.encoding_params, None,
|
||||||
self.all_contents)
|
self.all_contents, keypair)
|
||||||
return n.create(contents, version=version)
|
return n.create(contents, version=version)
|
||||||
|
|
||||||
class FakeUploader(service.Service):
|
class FakeUploader(service.Service):
|
||||||
@ -2868,6 +2874,41 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi
|
|||||||
"Unknown format: foo",
|
"Unknown format: foo",
|
||||||
method="post", data=body, headers=headers)
|
method="post", data=body, headers=headers)
|
||||||
|
|
||||||
|
async def test_POST_upload_keypair(self) -> None:
|
||||||
|
"""
|
||||||
|
A *POST* creating a new mutable object may include a *private-key*
|
||||||
|
query argument giving a urlsafe-base64-encoded RSA private key to use
|
||||||
|
as the "signature key". The given signature key is used, rather than
|
||||||
|
a new one being generated.
|
||||||
|
"""
|
||||||
|
format = "sdmf"
|
||||||
|
priv, pub = create_signing_keypair(2048)
|
||||||
|
encoded_privkey = urlsafe_b64encode(der_string_from_signing_key(priv)).decode("ascii")
|
||||||
|
filename = "predetermined-sdmf"
|
||||||
|
expected_content = self.NEWFILE_CONTENTS * 100
|
||||||
|
actual_cap = uri.from_string(await self.POST(
|
||||||
|
self.public_url +
|
||||||
|
f"/foo?t=upload&format={format}&private-key={encoded_privkey}",
|
||||||
|
file=(filename, expected_content),
|
||||||
|
))
|
||||||
|
# Ideally we would inspect the private ("signature") and public
|
||||||
|
# ("verification") keys but they are not made easily accessible here
|
||||||
|
# (ostensibly because we have a FakeMutableFileNode instead of a real
|
||||||
|
# one).
|
||||||
|
#
|
||||||
|
# So, instead, re-compute the writekey and fingerprint and compare
|
||||||
|
# those against the capability string.
|
||||||
|
expected_writekey, _, expected_fingerprint = derive_mutable_keys((pub, priv))
|
||||||
|
self.assertEqual(
|
||||||
|
(expected_writekey, expected_fingerprint),
|
||||||
|
(actual_cap.writekey, actual_cap.fingerprint),
|
||||||
|
)
|
||||||
|
|
||||||
|
# And the capability we got can be used to download the data we
|
||||||
|
# uploaded.
|
||||||
|
downloaded_content = await self.GET(f"/uri/{actual_cap.to_string().decode('ascii')}")
|
||||||
|
self.assertEqual(expected_content, downloaded_content)
|
||||||
|
|
||||||
def test_POST_upload_format(self):
|
def test_POST_upload_format(self):
|
||||||
def _check_upload(ign, format, uri_prefix, fn=None):
|
def _check_upload(ign, format, uri_prefix, fn=None):
|
||||||
filename = format + ".txt"
|
filename = format + ".txt"
|
||||||
|
@ -202,6 +202,16 @@ class TahoeLAFSSiteTests(SyncTestCase):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_private_key_censoring(self):
|
||||||
|
"""
|
||||||
|
The log event for a request including a **private-key** query
|
||||||
|
argument has the private key value censored.
|
||||||
|
"""
|
||||||
|
self._test_censoring(
|
||||||
|
b"/uri?uri=URI:CHK:aaa:bbb&private-key=AAAAaaaabbbb==",
|
||||||
|
b"/uri?uri=[CENSORED]&private-key=[CENSORED]",
|
||||||
|
)
|
||||||
|
|
||||||
def test_uri_censoring(self):
|
def test_uri_censoring(self):
|
||||||
"""
|
"""
|
||||||
The log event for a request for **/uri/<CAP>** has the capability value
|
The log event for a request for **/uri/<CAP>** has the capability value
|
||||||
|
@ -6,20 +6,12 @@
|
|||||||
# This file is part of Tahoe-LAFS.
|
# This file is part of Tahoe-LAFS.
|
||||||
#
|
#
|
||||||
# See the docs/about.rst file for licensing information.
|
# See the docs/about.rst file for licensing information.
|
||||||
"""Test-helpers for clients that use the WebUI.
|
|
||||||
|
|
||||||
Ported to Python 3.
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
Test-helpers for clients that use the WebUI.
|
||||||
from __future__ import division
|
"""
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
|
|
||||||
@ -54,6 +46,7 @@ import allmydata.uri
|
|||||||
from allmydata.util import (
|
from allmydata.util import (
|
||||||
base32,
|
base32,
|
||||||
)
|
)
|
||||||
|
from ..util.dictutil import BytesKeyDict
|
||||||
|
|
||||||
|
|
||||||
__all__ = (
|
__all__ = (
|
||||||
@ -147,7 +140,7 @@ class _FakeTahoeUriHandler(Resource, object):
|
|||||||
|
|
||||||
isLeaf = True
|
isLeaf = True
|
||||||
|
|
||||||
data = attr.ib(default=attr.Factory(dict))
|
data: BytesKeyDict = attr.ib(default=attr.Factory(BytesKeyDict))
|
||||||
capability_generators = attr.ib(default=attr.Factory(dict))
|
capability_generators = attr.ib(default=attr.Factory(dict))
|
||||||
|
|
||||||
def _generate_capability(self, kind):
|
def _generate_capability(self, kind):
|
||||||
@ -209,7 +202,7 @@ class _FakeTahoeUriHandler(Resource, object):
|
|||||||
capability = None
|
capability = None
|
||||||
for arg, value in uri.query:
|
for arg, value in uri.query:
|
||||||
if arg == u"uri":
|
if arg == u"uri":
|
||||||
capability = value
|
capability = value.encode("utf-8")
|
||||||
# it's legal to use the form "/uri/<capability>"
|
# it's legal to use the form "/uri/<capability>"
|
||||||
if capability is None and request.postpath and request.postpath[0]:
|
if capability is None and request.postpath and request.postpath[0]:
|
||||||
capability = request.postpath[0]
|
capability = request.postpath[0]
|
||||||
@ -221,10 +214,9 @@ class _FakeTahoeUriHandler(Resource, object):
|
|||||||
|
|
||||||
# the user gave us a capability; if our Grid doesn't have any
|
# the user gave us a capability; if our Grid doesn't have any
|
||||||
# data for it, that's an error.
|
# data for it, that's an error.
|
||||||
capability = capability.encode('ascii')
|
|
||||||
if capability not in self.data:
|
if capability not in self.data:
|
||||||
request.setResponseCode(http.BAD_REQUEST)
|
request.setResponseCode(http.GONE)
|
||||||
return u"No data for '{}'".format(capability.decode('ascii'))
|
return u"No data for '{}'".format(capability.decode('ascii')).encode("utf-8")
|
||||||
|
|
||||||
return self.data[capability]
|
return self.data[capability]
|
||||||
|
|
||||||
|
@ -1,21 +1,6 @@
|
|||||||
"""
|
"""
|
||||||
Tools to mess with dicts.
|
Tools to mess with dicts.
|
||||||
|
|
||||||
Ported to Python 3.
|
|
||||||
"""
|
"""
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
# IMPORTANT: We deliberately don't import dict. The issue is that we're
|
|
||||||
# subclassing dict, so we'd end up exposing Python 3 dict APIs to lots of
|
|
||||||
# code that doesn't support it.
|
|
||||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, list, object, range, str, max, min # noqa: F401
|
|
||||||
from six import ensure_str
|
|
||||||
|
|
||||||
|
|
||||||
class DictOfSets(dict):
|
class DictOfSets(dict):
|
||||||
def add(self, key, value):
|
def add(self, key, value):
|
||||||
@ -104,7 +89,7 @@ def _make_enforcing_override(K, method_name):
|
|||||||
raise TypeError("{} must be of type {}".format(
|
raise TypeError("{} must be of type {}".format(
|
||||||
repr(key), self.KEY_TYPE))
|
repr(key), self.KEY_TYPE))
|
||||||
return getattr(dict, method_name)(self, key, *args, **kwargs)
|
return getattr(dict, method_name)(self, key, *args, **kwargs)
|
||||||
f.__name__ = ensure_str(method_name)
|
f.__name__ = method_name
|
||||||
setattr(K, method_name, f)
|
setattr(K, method_name, f)
|
||||||
|
|
||||||
for _method_name in ["__setitem__", "__getitem__", "setdefault", "get",
|
for _method_name in ["__setitem__", "__getitem__", "setdefault", "get",
|
||||||
@ -113,11 +98,6 @@ for _method_name in ["__setitem__", "__getitem__", "setdefault", "get",
|
|||||||
del _method_name
|
del _method_name
|
||||||
|
|
||||||
|
|
||||||
if PY2:
|
|
||||||
# No need for enforcement, can use either bytes or unicode as keys and it's
|
|
||||||
# fine.
|
|
||||||
BytesKeyDict = UnicodeKeyDict = dict
|
|
||||||
else:
|
|
||||||
class BytesKeyDict(_TypedKeyDict):
|
class BytesKeyDict(_TypedKeyDict):
|
||||||
"""Keys should be bytes."""
|
"""Keys should be bytes."""
|
||||||
|
|
||||||
|
@ -1,149 +0,0 @@
|
|||||||
"""
|
|
||||||
A pipeline of Deferreds.
|
|
||||||
|
|
||||||
Ported to Python 3.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from __future__ import absolute_import
|
|
||||||
from __future__ import division
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import unicode_literals
|
|
||||||
|
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
from twisted.internet import defer
|
|
||||||
from twisted.python.failure import Failure
|
|
||||||
from twisted.python import log
|
|
||||||
from allmydata.util.assertutil import precondition
|
|
||||||
|
|
||||||
|
|
||||||
class PipelineError(Exception):
|
|
||||||
"""One of the pipelined messages returned an error. The received Failure
|
|
||||||
object is stored in my .error attribute."""
|
|
||||||
def __init__(self, error):
|
|
||||||
self.error = error
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return "<PipelineError error=(%r)>" % (self.error,)
|
|
||||||
def __str__(self):
|
|
||||||
return "<PipelineError error=(%s)>" % (self.error,)
|
|
||||||
|
|
||||||
class SingleFileError(Exception):
|
|
||||||
"""You are not permitted to add a job to a full pipeline."""
|
|
||||||
|
|
||||||
|
|
||||||
class ExpandableDeferredList(defer.Deferred, object):
|
|
||||||
# like DeferredList(fireOnOneErrback=True) with a built-in
|
|
||||||
# gatherResults(), but you can add new Deferreds until you close it. This
|
|
||||||
# gives you a chance to add don't-complain-about-unhandled-error errbacks
|
|
||||||
# immediately after attachment, regardless of whether you actually end up
|
|
||||||
# wanting the list or not.
|
|
||||||
def __init__(self):
|
|
||||||
defer.Deferred.__init__(self)
|
|
||||||
self.resultsReceived = 0
|
|
||||||
self.resultList = []
|
|
||||||
self.failure = None
|
|
||||||
self.closed = False
|
|
||||||
|
|
||||||
def addDeferred(self, d):
|
|
||||||
precondition(not self.closed, "don't call addDeferred() on a closed ExpandableDeferredList")
|
|
||||||
index = len(self.resultList)
|
|
||||||
self.resultList.append(None)
|
|
||||||
d.addCallbacks(self._cbDeferred, self._ebDeferred,
|
|
||||||
callbackArgs=(index,))
|
|
||||||
return d
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
self.closed = True
|
|
||||||
self.checkForFinished()
|
|
||||||
|
|
||||||
def checkForFinished(self):
|
|
||||||
if not self.closed:
|
|
||||||
return
|
|
||||||
if self.called:
|
|
||||||
return
|
|
||||||
if self.failure:
|
|
||||||
self.errback(self.failure)
|
|
||||||
elif self.resultsReceived == len(self.resultList):
|
|
||||||
self.callback(self.resultList)
|
|
||||||
|
|
||||||
def _cbDeferred(self, res, index):
|
|
||||||
self.resultList[index] = res
|
|
||||||
self.resultsReceived += 1
|
|
||||||
self.checkForFinished()
|
|
||||||
return res
|
|
||||||
|
|
||||||
def _ebDeferred(self, f):
|
|
||||||
self.failure = f
|
|
||||||
self.checkForFinished()
|
|
||||||
return f
|
|
||||||
|
|
||||||
|
|
||||||
class Pipeline(object):
|
|
||||||
"""I manage a size-limited pipeline of Deferred operations, usually
|
|
||||||
callRemote() messages."""
|
|
||||||
|
|
||||||
def __init__(self, capacity):
|
|
||||||
self.capacity = capacity # how full we can be
|
|
||||||
self.gauge = 0 # how full we are
|
|
||||||
self.failure = None
|
|
||||||
self.waiting = [] # callers of add() who are blocked
|
|
||||||
self.unflushed = ExpandableDeferredList()
|
|
||||||
|
|
||||||
def add(self, _size, _func, *args, **kwargs):
|
|
||||||
# We promise that all the Deferreds we return will fire in the order
|
|
||||||
# they were returned. To make it easier to keep this promise, we
|
|
||||||
# prohibit multiple outstanding calls to add() .
|
|
||||||
if self.waiting:
|
|
||||||
raise SingleFileError
|
|
||||||
if self.failure:
|
|
||||||
return defer.fail(self.failure)
|
|
||||||
self.gauge += _size
|
|
||||||
fd = defer.maybeDeferred(_func, *args, **kwargs)
|
|
||||||
fd.addBoth(self._call_finished, _size)
|
|
||||||
self.unflushed.addDeferred(fd)
|
|
||||||
fd.addErrback(self._eat_pipeline_errors)
|
|
||||||
fd.addErrback(log.err, "_eat_pipeline_errors didn't eat it")
|
|
||||||
if self.gauge < self.capacity:
|
|
||||||
return defer.succeed(None)
|
|
||||||
d = defer.Deferred()
|
|
||||||
self.waiting.append(d)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def flush(self):
|
|
||||||
if self.failure:
|
|
||||||
return defer.fail(self.failure)
|
|
||||||
d, self.unflushed = self.unflushed, ExpandableDeferredList()
|
|
||||||
d.close()
|
|
||||||
d.addErrback(self._flushed_error)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def _flushed_error(self, f):
|
|
||||||
precondition(self.failure) # should have been set by _call_finished
|
|
||||||
return self.failure
|
|
||||||
|
|
||||||
def _call_finished(self, res, size):
|
|
||||||
self.gauge -= size
|
|
||||||
if isinstance(res, Failure):
|
|
||||||
res = Failure(PipelineError(res))
|
|
||||||
if not self.failure:
|
|
||||||
self.failure = res
|
|
||||||
if self.failure:
|
|
||||||
while self.waiting:
|
|
||||||
d = self.waiting.pop(0)
|
|
||||||
d.errback(self.failure)
|
|
||||||
else:
|
|
||||||
while self.waiting and (self.gauge < self.capacity):
|
|
||||||
d = self.waiting.pop(0)
|
|
||||||
d.callback(None)
|
|
||||||
# the d.callback() might trigger a new call to add(), which
|
|
||||||
# will raise our gauge and might cause the pipeline to be
|
|
||||||
# filled. So the while() loop gets a chance to tell the
|
|
||||||
# caller to stop.
|
|
||||||
return res
|
|
||||||
|
|
||||||
def _eat_pipeline_errors(self, f):
|
|
||||||
f.trap(PipelineError)
|
|
||||||
return None
|
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user