Merge branch 'master' into 2916.grid-manager-integration-tests.2

This commit is contained in:
meejah 2023-08-02 13:23:52 -06:00
commit 5c45cb44bf
27 changed files with 993 additions and 547 deletions

View File

@ -89,20 +89,12 @@ workflows:
- "nixos":
name: "<<matrix.pythonVersion>>"
nixpkgs: "22.11"
nixpkgs: "nixpkgs-unstable"
matrix:
parameters:
pythonVersion:
- "python38"
- "python39"
- "python310"
- "nixos":
name: "<<matrix.pythonVersion>>"
nixpkgs: "unstable"
matrix:
parameters:
pythonVersion:
- "python311"
# Eventually, test against PyPy 3.8
@ -385,8 +377,8 @@ jobs:
parameters:
nixpkgs:
description: >-
Reference the name of a niv-managed nixpkgs source (see `niv show`
and nix/sources.json)
Reference the name of a flake-managed nixpkgs input (see `nix flake
metadata` and flake.nix)
type: "string"
pythonVersion:
description: >-
@ -403,14 +395,17 @@ jobs:
- "run":
name: "Unit Test"
command: |
# The dependencies are all built so we can allow more
# parallelism here.
source .circleci/lib.sh
cache_if_able nix-build \
--cores 8 \
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
--argstr pythonVersion "<<parameters.pythonVersion>>" \
nix/tests.nix
# Translate the nixpkgs selection into a flake reference we
# can use to override the default nixpkgs input.
NIXPKGS=$(nixpkgs_flake_reference <<parameters.nixpkgs>>)
cache_if_able nix run \
--override-input nixpkgs "$NIXPKGS" \
.#<<parameters.pythonVersion>>-unittest -- \
--jobs $UNITTEST_CORES \
allmydata
typechecks:
docker:
@ -536,20 +531,23 @@ executors:
docker:
# Run in a highly Nix-capable environment.
- <<: *DOCKERHUB_AUTH
image: "nixos/nix:2.10.3"
image: "nixos/nix:2.16.1"
environment:
# CACHIX_AUTH_TOKEN is manually set in the CircleCI web UI and allows us
# to push to CACHIX_NAME. CACHIX_NAME tells cachix which cache to push
# to.
CACHIX_NAME: "tahoe-lafs-opensource"
# Let us use features marked "experimental". For example, most/all of
# the `nix <subcommand>` forms.
NIX_CONFIG: "experimental-features = nix-command flakes"
commands:
nix-build:
parameters:
nixpkgs:
description: >-
Reference the name of a niv-managed nixpkgs source (see `niv show`
and nix/sources.json)
Reference the name of a flake-managed nixpkgs input (see `nix flake
metadata` and flake.nix)
type: "string"
pythonVersion:
description: >-
@ -565,15 +563,17 @@ commands:
# Get cachix for Nix-friendly caching.
name: "Install Basic Dependencies"
command: |
NIXPKGS="https://github.com/nixos/nixpkgs/archive/nixos-<<parameters.nixpkgs>>.tar.gz"
nix-env \
--file $NIXPKGS \
--install \
-A cachix bash
# Activate it for "binary substitution". This sets up
# configuration tht lets Nix download something from the cache
# instead of building it locally, if possible.
cachix use "${CACHIX_NAME}"
# Get some build environment dependencies and let them float on a
# certain release branch. These aren't involved in the actual
# package build (only in CI environment setup) so the fact that
# they float shouldn't hurt reproducibility.
NIXPKGS="nixpkgs/nixos-23.05"
nix profile install $NIXPKGS#cachix $NIXPKGS#bash $NIXPKGS#jp
# Activate our cachix cache for "binary substitution". This sets
# up configuration tht lets Nix download something from the cache
# instead of building it locally, if possible.
cachix use "${CACHIX_NAME}"
- "checkout"
@ -585,32 +585,16 @@ commands:
-p 'python3.withPackages (ps: [ ps.setuptools ])' \
--run 'python setup.py update_version'
- "run":
name: "Build Dependencies"
command: |
# CircleCI build environment looks like it has a zillion and a
# half cores. Don't let Nix autodetect this high core count
# because it blows up memory usage and fails the test run. Pick a
# number of cores that suits the build environment we're paying
# for (the free one!).
source .circleci/lib.sh
# nix-shell will build all of the dependencies of the target but
# not the target itself.
cache_if_able nix-shell \
--run "" \
--cores 3 \
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
--argstr pythonVersion "<<parameters.pythonVersion>>" \
./default.nix
- "run":
name: "Build Package"
command: |
source .circleci/lib.sh
cache_if_able nix-build \
--cores 4 \
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
--argstr pythonVersion "<<parameters.pythonVersion>>" \
./default.nix
NIXPKGS=$(nixpkgs_flake_reference <<parameters.nixpkgs>>)
cache_if_able nix build \
--verbose \
--print-build-logs \
--cores "$DEPENDENCY_CORES" \
--override-input nixpkgs "$NIXPKGS" \
.#<<parameters.pythonVersion>>-tahoe-lafs
- steps: "<<parameters.buildSteps>>"

View File

@ -1,3 +1,13 @@
# CircleCI build environment looks like it has a zillion and a half cores.
# Don't let Nix autodetect this high core count because it blows up memory
# usage and fails the test run. Pick a number of cores that suits the build
# environment we're paying for (the free one!).
DEPENDENCY_CORES=3
# Once dependencies are built, we can allow some more concurrency for our own
# test suite.
UNITTEST_CORES=8
# Run a command, enabling cache writes to cachix if possible. The command is
# accepted as a variable number of positional arguments (like argv).
function cache_if_able() {
@ -117,3 +127,22 @@ function describe_build() {
echo "Cache not writeable."
fi
}
# Inspect the flake input metadata for an input of a given name and return the
# revision at which that input is pinned. If the input does not exist then
# return garbage (probably "null").
read_input_revision() {
input_name=$1
shift
nix flake metadata --json | jp --unquoted 'locks.nodes."'"$input_name"'".locked.rev'
}
# Return a flake reference that refers to a certain revision of nixpkgs. The
# certain revision is the revision to which the specified input is pinned.
nixpkgs_flake_reference() {
input_name=$1
shift
echo "github:NixOS/nixpkgs?rev=$(read_input_revision $input_name)"
}

View File

@ -1,49 +1,13 @@
let
# sources.nix contains information about which versions of some of our
# dependencies we should use. since we use it to pin nixpkgs, all the rest
# of our dependencies are *also* pinned - indirectly.
#
# sources.nix is managed using a tool called `niv`. as an example, to
# update to the most recent version of nixpkgs from the 21.11 maintenance
# release, in the top-level tahoe-lafs checkout directory you run:
#
# niv update nixpkgs-21.11
#
# niv also supports chosing a specific revision, following a different
# branch, etc. find complete documentation for the tool at
# https://github.com/nmattia/niv
sources = import nix/sources.nix;
in
{
pkgsVersion ? "nixpkgs-22.11" # a string which chooses a nixpkgs from the
# niv-managed sources data
, pkgs ? import sources.${pkgsVersion} { } # nixpkgs itself
, pythonVersion ? "python310" # a string choosing the python derivation from
# nixpkgs to target
, extrasNames ? [ "tor" "i2p" ] # a list of strings identifying tahoe-lafs extras,
# the dependencies of which the resulting
# package will also depend on. Include all of the
# runtime extras by default because the incremental
# cost of including them is a lot smaller than the
# cost of re-building the whole thing to add them.
}:
with (pkgs.${pythonVersion}.override {
packageOverrides = import ./nix/python-overrides.nix;
}).pkgs;
callPackage ./nix/tahoe-lafs.nix {
# Select whichever package extras were requested.
inherit extrasNames;
# Define the location of the Tahoe-LAFS source to be packaged (the same
# directory as contains this file). Clean up as many of the non-source
# files (eg the `.git` directory, `~` backup files, nix's own `result`
# symlink, etc) as possible to avoid needing to re-build when files that
# make no difference to the package have changed.
tahoe-lafs-src = pkgs.lib.cleanSource ./.;
doCheck = false;
}
# This is the flake-compat glue code. It loads the flake and gives us its
# outputs. This gives us backwards compatibility with pre-flake consumers.
# All of the real action is in flake.nix.
(import
(
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
fetchTarball {
url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
sha256 = lock.nodes.flake-compat.locked.narHash;
}
)
{ src = ./.; }
).defaultNix.default

View File

@ -57,6 +57,20 @@ The key-value store is implemented by a grid of Tahoe-LAFS storage servers --
user-space processes. Tahoe-LAFS storage clients communicate with the storage
servers over TCP.
There are two supported protocols:
* Foolscap, the only supported protocol in release before v1.19.
* HTTPS, new in v1.19.
By default HTTPS is disabled (this will change in
https://tahoe-lafs.org/trac/tahoe-lafs/ticket/4041). When HTTPS is enabled on
the server, the server transparently listens for both Foolscap and HTTPS on the
same port. Clients can use either; by default they will only use Foolscap, but
when configured appropriately they will use HTTPS when possible (this will
change in https://tahoe-lafs.org/trac/tahoe-lafs/ticket/4041). At this time the
only limitations of HTTPS is that I2P is not supported, so any usage of I2P only
uses Foolscap.
Storage servers hold data in the form of "shares". Shares are encoded pieces
of files. There are a configurable number of shares for each file, 10 by
default. Normally, each share is stored on a separate server, but in some

View File

@ -679,6 +679,14 @@ Client Configuration
location to prefer their local servers so that they can maintain access to
all of their uploads without using the internet.
``force_foolscap = (boolean, optional)``
If this is ``True``, the client will only connect to storage servers via
Foolscap, regardless of whether they support HTTPS. If this is ``False``,
the client will prefer HTTPS when it is available on the server. The default
value is ``True`` (this will change in
https://tahoe-lafs.org/trac/tahoe-lafs/ticket/4041).
In addition,
see :doc:`accepting-donations` for a convention for donating to storage server operators.
@ -796,6 +804,14 @@ Storage Server Configuration
(i.e. ``BASEDIR/storage``), but it can be placed elsewhere. Relative paths
will be interpreted relative to the node's base directory.
``force_foolscap = (boolean, optional)``
If this is ``True``, the node will expose the storage server via Foolscap
only, with no support for HTTPS. If this is ``False``, the server will
support both Foolscap and HTTPS on the same port. The default value is
``True`` (this will change in
https://tahoe-lafs.org/trac/tahoe-lafs/ticket/4041).
In addition,
see :doc:`accepting-donations` for a convention encouraging donations to storage server operators.

View File

@ -278,8 +278,8 @@ This NURL will be announced alongside their existing Foolscap-based server's fUR
Such an announcement will resemble this::
{
"anonymous-storage-FURL": "pb://...", # The old key
"gbs-anonymous-storage-url": "pb://...#v=1" # The new key
"anonymous-storage-FURL": "pb://...", # The old entry
"anonymous-storage-NURLs": ["pb://...#v=1"] # The new, additional entry
}
The transition process will proceed in three stages:
@ -320,12 +320,7 @@ The follow sequence of events is likely:
Ideally,
the client would not rely on an update from the introducer to give it the GBS NURL for the updated storage server.
Therefore,
when an updated client connects to a storage server using Foolscap,
it should request the server's version information.
If this information indicates that GBS is supported then the client should cache this GBS information.
On subsequent connection attempts,
it should make use of this GBS information.
In practice, we have decided not to implement this functionality.
Server Details
--------------

115
flake.lock generated Normal file
View File

@ -0,0 +1,115 @@
{
"nodes": {
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1687709756,
"narHash": "sha256-Y5wKlQSkgEK2weWdOu4J3riRd+kV/VCgHsqLNTTWQ/0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "dbabf0ca0c0c4bce6ea5eaf65af5cb694d2082c7",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs-22_11": {
"locked": {
"lastModified": 1688392541,
"narHash": "sha256-lHrKvEkCPTUO+7tPfjIcb7Trk6k31rz18vkyqmkeJfY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ea4c80b39be4c09702b0cb3b42eab59e2ba4f24b",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-22.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-23_05": {
"locked": {
"lastModified": 1689885880,
"narHash": "sha256-2ikAcvHKkKh8J/eUrwMA+wy1poscC+oL1RkN1V3RmT8=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "fa793b06f56896b7d1909e4b69977c7bf842b2f0",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-unstable": {
"locked": {
"lastModified": 1689791806,
"narHash": "sha256-QpXjfiyBFwa7MV/J6nM5FoBreks9O7j9cAZxV22MR8A=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "439ba0789ff84dddea64eb2d47a4a0d4887dbb1f",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "pull/244135/head",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-compat": "flake-compat",
"flake-utils": "flake-utils",
"nixpkgs": [
"nixpkgs-unstable"
],
"nixpkgs-22_11": "nixpkgs-22_11",
"nixpkgs-23_05": "nixpkgs-23_05",
"nixpkgs-unstable": "nixpkgs-unstable"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

240
flake.nix Normal file
View File

@ -0,0 +1,240 @@
{
description = "Tahoe-LAFS, free and open decentralized data store";
nixConfig = {
# Supply configuration for the build cache updated by our CI system. This
# should allow most users to avoid having to build a large number of
# packages (otherwise necessary due to our Python package overrides).
substituters = ["https://tahoe-lafs-opensource.cachix.org"];
trusted-public-keys = ["tahoe-lafs-opensource.cachix.org-1:eIKCHOPJYceJ2gb74l6e0mayuSdXqiavxYeAio0LFGo="];
};
inputs = {
# A couple possible nixpkgs pins. Ideally these could be selected easily
# from the command line but there seems to be no syntax/support for that.
# However, these at least cause certain revisions to be pinned in our lock
# file where you *can* dig them out - and the CI configuration does.
#
# These are really just examples for the time being since neither of these
# releases contains a package set that is completely compatible with our
# requirements. We could decide in the future that supporting multiple
# releases of NixOS at a time is worthwhile and then pins like these will
# help us test each of those releases.
"nixpkgs-22_11" = {
url = github:NixOS/nixpkgs?ref=nixos-22.11;
};
"nixpkgs-23_05" = {
url = github:NixOS/nixpkgs?ref=nixos-23.05;
};
# We depend on a very new python-cryptography which is not yet available
# from any release branch of nixpkgs. However, it is contained in a PR
# currently up for review. Point our nixpkgs at that for now.
"nixpkgs-unstable" = {
url = github:NixOS/nixpkgs?ref=pull/244135/head;
};
# Point the default nixpkgs at one of those. This avoids having getting a
# _third_ package set involved and gives a way to provide what should be a
# working experience by default (that is, if nixpkgs doesn't get
# overridden).
nixpkgs.follows = "nixpkgs-unstable";
# Also get flake-utils for simplified multi-system definitions.
flake-utils = {
url = github:numtide/flake-utils;
};
# And get a helper that lets us easily continue to provide a default.nix.
flake-compat = {
url = "github:edolstra/flake-compat";
flake = false;
};
};
outputs = { self, nixpkgs, flake-utils, ... }:
{
# Expose an overlay which adds our version of Tahoe-LAFS to the Python
# package sets we specify, as well as all of the correct versions of its
# dependencies.
#
# We will also use this to define some other outputs since it gives us
# the most succinct way to get a working Tahoe-LAFS package.
overlays.default = import ./nix/overlay.nix;
} // (flake-utils.lib.eachDefaultSystem (system: let
# The package set for this system architecture.
pkgs = import nixpkgs {
inherit system;
# And include our Tahoe-LAFS package in that package set.
overlays = [ self.overlays.default ];
};
# pythonVersions :: [string]
#
# The version strings for the Python runtimes we'll work with.
pythonVersions =
let
# Match attribute names that look like a Python derivation - CPython
# or PyPy. We take care to avoid things like "python-foo" and
# "python3Full-unittest" though. We only want things like "pypy38"
# or "python311".
nameMatches = name: null != builtins.match "(python|pypy)3[[:digit:]]{0,2}" name;
# Sometimes an old version is left in the package set as an error
# saying something like "we remove this". Make sure we whatever we
# found by name evaluates without error, too.
notError = drv: (builtins.tryEval drv).success;
in
# Discover all of the Python runtime derivations by inspecting names
# and filtering out derivations with errors.
builtins.attrNames (
pkgs.lib.attrsets.filterAttrs
(name: drv: nameMatches name && notError drv)
pkgs
);
# defaultPyVersion :: string
#
# An element of pythonVersions which we'll use for the default package.
defaultPyVersion = "python3";
# pythons :: [derivation]
#
# Retrieve the actual Python package for each configured version. We
# already applied our overlay to pkgs so our packages will already be
# available.
pythons = builtins.map (pyVer: pkgs.${pyVer}) pythonVersions;
# packageName :: string -> string
#
# Construct the Tahoe-LAFS package name for the given Python runtime.
packageName = pyVersion: "${pyVersion}-tahoe-lafs";
# string -> string
#
# Construct the unit test application name for the given Python runtime.
unitTestName = pyVersion: "${pyVersion}-unittest";
# (string -> a) -> (string -> b) -> string -> attrset a b
#
# Make a singleton attribute set from the result of two functions.
singletonOf = f: g: x: { ${f x} = g x; };
# [attrset] -> attrset
#
# Merge a list of attrset into a single attrset with overlap preferring
# rightmost values.
mergeAttrs = pkgs.lib.foldr pkgs.lib.mergeAttrs {};
# makeRuntimeEnv :: string -> derivation
#
# Create a derivation that includes a Python runtime, Tahoe-LAFS, and
# all of its dependencies.
makeRuntimeEnv = singletonOf packageName makeRuntimeEnv';
makeRuntimeEnv' = pyVersion: (pkgs.${pyVersion}.withPackages (ps: with ps;
[ tahoe-lafs ] ++
tahoe-lafs.passthru.extras.i2p ++
tahoe-lafs.passthru.extras.tor
)).overrideAttrs (old: {
# By default, withPackages gives us a derivation with a fairly generic
# name (like "python-env"). Put our name in there for legibility.
# See the similar override in makeTestEnv.
name = packageName pyVersion;
});
# makeTestEnv :: string -> derivation
#
# Create a derivation that includes a Python runtime and all of the
# Tahoe-LAFS dependencies, but not Tahoe-LAFS itself, which we'll get
# from the working directory.
makeTestEnv = pyVersion: (pkgs.${pyVersion}.withPackages (ps: with ps;
[ tahoe-lafs ] ++
tahoe-lafs.passthru.extras.i2p ++
tahoe-lafs.passthru.extras.tor ++
tahoe-lafs.passthru.extras.unittest
)).overrideAttrs (old: {
# See the similar override in makeRuntimeEnv'.
name = packageName pyVersion;
});
in {
# Include a package set with out overlay on it in our own output. This
# is mainly a development/debugging convenience as it will expose all of
# our Python package overrides beneath it. The magic name
# "legacyPackages" is copied from nixpkgs and has special support in the
# nix command line tool.
legacyPackages = pkgs;
# The flake's package outputs. We'll define one version of the package
# for each version of Python we could find. We'll also point the
# flake's "default" package at the derivation corresponding to the
# default Python version we defined above. The package consists of a
# Python environment with Tahoe-LAFS available to it.
packages =
mergeAttrs (
[ { default = self.packages.${system}.${packageName defaultPyVersion}; } ]
++ (builtins.map makeRuntimeEnv pythonVersions)
++ (builtins.map (singletonOf unitTestName makeTestEnv) pythonVersions)
);
# The flake's app outputs. We'll define a version of an app for running
# the test suite for each version of Python we could find. We'll also
# define a version of an app for running the "tahoe" command-line
# entrypoint for each version of Python we could find.
apps =
let
# writeScript :: string -> string -> path
#
# Write a shell program to a file so it can be run later.
#
# We avoid writeShellApplication here because it has ghc as a
# dependency but ghc has Python as a dependency and our Python
# package override triggers a rebuild of ghc and many Haskell
# packages which takes a looong time.
writeScript = name: text: "${pkgs.writeShellScript name text}";
# makeTahoeApp :: string -> attrset
#
# A helper function to define the Tahoe-LAFS runtime entrypoint for
# a certain Python runtime.
makeTahoeApp = pyVersion: {
"tahoe-${pyVersion}" = {
type = "app";
program =
writeScript "tahoe"
''
${makeRuntimeEnv' pyVersion}/bin/tahoe "$@"
'';
};
};
# makeUnitTestsApp :: string -> attrset
#
# A helper function to define the Tahoe-LAFS unit test entrypoint
# for a certain Python runtime.
makeUnitTestsApp = pyVersion: {
"${unitTestName pyVersion}" = {
type = "app";
program =
let
python = "${makeTestEnv pyVersion}/bin/python";
in
writeScript "unit-tests"
''
${python} setup.py update_version
export TAHOE_LAFS_HYPOTHESIS_PROFILE=ci
export PYTHONPATH=$PWD/src
${python} -m twisted.trial "$@"
'';
};
};
in
# Merge a default app definition with the rest of the apps.
mergeAttrs (
[ { default = self.apps.${system}."tahoe-python3"; } ]
++ (builtins.map makeUnitTestsApp pythonVersions)
++ (builtins.map makeTahoeApp pythonVersions)
);
}));
}

View File

@ -446,6 +446,32 @@ def tor_network(reactor, temp_dir, chutney, request):
request.addfinalizer(cleanup)
pytest_twisted.blockon(chutney(("start", basic_network)))
# Wait for the nodes to "bootstrap" - ie, form a network among themselves.
# Successful bootstrap is reported with a message something like:
#
# Everything bootstrapped after 151 sec
# Bootstrap finished: 151 seconds
# Node status:
# test000a : 100, done , Done
# test001a : 100, done , Done
# test002a : 100, done , Done
# test003r : 100, done , Done
# test004r : 100, done , Done
# test005r : 100, done , Done
# test006r : 100, done , Done
# test007r : 100, done , Done
# test008c : 100, done , Done
# test009c : 100, done , Done
# Published dir info:
# test000a : 100, all nodes , desc md md_cons ns_cons , Dir info cached
# test001a : 100, all nodes , desc md md_cons ns_cons , Dir info cached
# test002a : 100, all nodes , desc md md_cons ns_cons , Dir info cached
# test003r : 100, all nodes , desc md md_cons ns_cons , Dir info cached
# test004r : 100, all nodes , desc md md_cons ns_cons , Dir info cached
# test005r : 100, all nodes , desc md md_cons ns_cons , Dir info cached
# test006r : 100, all nodes , desc md md_cons ns_cons , Dir info cached
# test007r : 100, all nodes , desc md md_cons ns_cons , Dir info cached
pytest_twisted.blockon(chutney(("wait_for_bootstrap", basic_network)))
# print some useful stuff

View File

@ -657,19 +657,28 @@ def await_client_ready(tahoe, timeout=10, liveness=60*2, minimum_number_of_serve
time.sleep(1)
continue
print(
f"Now: {time.ctime()}\n"
f"Server last-received-data: {[s['last_received_data'] for s in servers]}"
)
now = time.time()
server_times = [
server['last_received_data']
for server in servers
for server
in servers
if server['last_received_data'] is not None
]
print(
f"Now: {time.ctime(now)}\n"
f"Liveness required: {liveness}\n"
f"Server last-received-data: {[time.ctime(s) for s in server_times]}\n"
f"Server ages: {[now - s for s in server_times]}\n"
)
# check that all times are 'recent enough' (it's OK if _some_ servers
# are down, we just want to make sure a sufficient number are up)
if len([time.time() - t <= liveness for t in server_times if t is not None]) < minimum_number_of_servers:
print("waiting because at least one server too old")
alive = [t for t in server_times if now - t <= liveness]
if len(alive) < minimum_number_of_servers:
print(
f"waiting because we found {len(alive)} servers "
f"and want {minimum_number_of_servers}"
)
time.sleep(1)
continue

View File

@ -0,0 +1 @@
Document the ``force_foolscap`` configuration options for ``[storage]`` and ``[client]``.

0
newsfragments/4042.minor Normal file
View File

0
newsfragments/4052.minor Normal file
View File

0
newsfragments/4055.minor Normal file
View File

10
nix/overlay.nix Normal file
View File

@ -0,0 +1,10 @@
# This overlay adds Tahoe-LAFS and all of its properly-configured Python
# package dependencies to a Python package set. Downstream consumers can
# apply it to their own nixpkgs derivation to produce a Tahoe-LAFS package.
final: prev: {
# Add our overrides such that they will be applied to any Python derivation
# in nixpkgs.
pythonPackagesExtensions = prev.pythonPackagesExtensions ++ [
(import ./python-overrides.nix)
];
}

View File

@ -9,14 +9,39 @@ let
# Disable a Python package's test suite.
dontCheck = drv: drv.overrideAttrs (old: { doInstallCheck = false; });
# string -> any -> derivation -> derivation
#
# If the overrideable function for the given derivation accepts an argument
# with the given name, override it with the given value.
#
# Since we try to work with multiple versions of nixpkgs, sometimes we need
# to override a parameter that exists in one version but not others. This
# makes it a bit easier to do so.
overrideIfPresent = name: value: drv:
if (drv.override.__functionArgs ? ${name})
then drv.override { "${name}" = value; }
else drv;
# Disable building a Python package's documentation.
dontBuildDocs = alsoDisable: drv: (drv.override ({
sphinxHook = null;
} // alsoDisable)).overrideAttrs ({ outputs, ... }: {
dontBuildDocs = drv: (
overrideIfPresent "sphinxHook" null (
overrideIfPresent "sphinx-rtd-theme" null
drv
)
).overrideAttrs ({ outputs, ... }: {
outputs = builtins.filter (x: "doc" != x) outputs;
});
in {
tahoe-lafs = self.callPackage ./tahoe-lafs.nix {
# Define the location of the Tahoe-LAFS source to be packaged (the same
# directory as contains this file). Clean up as many of the non-source
# files (eg the `.git` directory, `~` backup files, nix's own `result`
# symlink, etc) as possible to avoid needing to re-build when files that
# make no difference to the package have changed.
tahoe-lafs-src = self.lib.cleanSource ../.;
};
# Some dependencies aren't packaged in nixpkgs so supply our own packages.
pycddl = self.callPackage ./pycddl.nix { };
txi2p = self.callPackage ./txi2p.nix { };
@ -30,15 +55,23 @@ in {
inherit (super) txtorcon;
};
# Update the version of pyopenssl.
pyopenssl = self.callPackage ./pyopenssl.nix {
pyopenssl =
# Building the docs requires sphinx which brings in a dependency on babel,
# the test suite of which fails.
onPyPy (dontBuildDocs { sphinx-rtd-theme = null; })
# Avoid infinite recursion.
super.pyopenssl;
};
# With our customized package set a Twisted unit test fails. Patch the
# Twisted test suite to skip that test.
# Filed upstream at https://github.com/twisted/twisted/issues/11877
twisted = super.twisted.overrideAttrs (old: {
patches = (old.patches or []) ++ [ ./twisted.patch ];
});
# Update the version of pyopenssl - and since we're doing that anyway, we
# don't need the docs. Unfortunately this triggers a lot of rebuilding of
# dependent packages.
pyopenssl = dontBuildDocs (self.callPackage ./pyopenssl.nix {
inherit (super) pyopenssl;
});
# The cryptography that we get from nixpkgs to satisfy the pyopenssl upgrade
# that we did breaks service-identity ... so get a newer version that works.
service-identity = self.callPackage ./service-identity.nix { };
# collections-extended is currently broken for Python 3.11 in nixpkgs but
# we know where a working version lives.
@ -52,16 +85,19 @@ in {
# tornado and tk pull in a huge dependency trees for functionality we don't
# care about, also tkinter doesn't work on PyPy.
matplotlib = super.matplotlib.override { tornado = null; enableTk = false; };
matplotlib = onPyPy (matplotlib: matplotlib.override {
tornado = null;
enableTk = false;
}) super.matplotlib;
tqdm = super.tqdm.override {
tqdm = onPyPy (tqdm: tqdm.override {
# ibid.
tkinter = null;
# pandas is only required by the part of the test suite covering
# integration with pandas that we don't care about. pandas is a huge
# dependency.
pandas = null;
};
}) super.tqdm;
# The treq test suite depends on httpbin. httpbin pulls in babel (flask ->
# jinja2 -> babel) and arrow (brotlipy -> construct -> arrow). babel fails
@ -74,48 +110,25 @@ in {
six = onPyPy dontCheck super.six;
# Likewise for beautifulsoup4.
beautifulsoup4 = onPyPy (dontBuildDocs {}) super.beautifulsoup4;
beautifulsoup4 = onPyPy dontBuildDocs super.beautifulsoup4;
# The autobahn test suite pulls in a vast number of dependencies for
# functionality we don't care about. It might be nice to *selectively*
# disable just some of it but this is easier.
autobahn = onPyPy dontCheck super.autobahn;
autobahn = dontCheck super.autobahn;
# and python-dotenv tests pulls in a lot of dependencies, including jedi,
# which does not work on PyPy.
python-dotenv = onPyPy dontCheck super.python-dotenv;
# Upstream package unaccountably includes a sqlalchemy dependency ... but
# the project has no such dependency. Fixed in nixpkgs in
# da10e809fff70fbe1d86303b133b779f09f56503.
aiocontextvars = super.aiocontextvars.override { sqlalchemy = null; };
# By default, the sphinx docs are built, which pulls in a lot of
# dependencies - including jedi, which does not work on PyPy.
hypothesis =
(let h = super.hypothesis;
in
if (h.override.__functionArgs.enableDocumentation or false)
then h.override { enableDocumentation = false; }
else h).overrideAttrs ({ nativeBuildInputs, ... }: {
# The nixpkgs expression is missing the tzdata check input.
nativeBuildInputs = nativeBuildInputs ++ [ super.tzdata ];
});
hypothesis = onPyPy dontBuildDocs super.hypothesis;
# flaky's test suite depends on nose and nose appears to have Python 3
# incompatibilities (it includes `print` statements, for example).
flaky = onPyPy dontCheck super.flaky;
# Replace the deprecated way of running the test suite with the modern way.
# This also drops a bunch of unnecessary build-time dependencies, some of
# which are broken on PyPy. Fixed in nixpkgs in
# 5feb5054bb08ba779bd2560a44cf7d18ddf37fea.
zfec = (super.zfec.override {
setuptoolsTrial = null;
}).overrideAttrs (old: {
checkPhase = "trial zfec";
});
# collections-extended is packaged with poetry-core. poetry-core test suite
# uses virtualenv and virtualenv test suite fails on PyPy.
poetry-core = onPyPy dontCheck super.poetry-core;
@ -134,15 +147,6 @@ in {
# since we actually depend directly and significantly on Foolscap.
foolscap = onPyPy dontCheck super.foolscap;
# Fixed by nixpkgs PR https://github.com/NixOS/nixpkgs/pull/222246
psutil = super.psutil.overrideAttrs ({ pytestFlagsArray, disabledTests, ...}: {
# Upstream already disables some tests but there are even more that have
# build impurities that come from build system hardware configuration.
# Skip them too.
pytestFlagsArray = [ "-v" ] ++ pytestFlagsArray;
disabledTests = disabledTests ++ [ "sensors_temperatures" ];
});
# CircleCI build systems don't have enough memory to run this test suite.
lz4 = dontCheck super.lz4;
lz4 = onPyPy dontCheck super.lz4;
}

61
nix/service-identity.nix Normal file
View File

@ -0,0 +1,61 @@
{ lib
, attrs
, buildPythonPackage
, cryptography
, fetchFromGitHub
, hatch-fancy-pypi-readme
, hatch-vcs
, hatchling
, idna
, pyasn1
, pyasn1-modules
, pytestCheckHook
, pythonOlder
, setuptools
}:
buildPythonPackage rec {
pname = "service-identity";
version = "23.1.0";
format = "pyproject";
disabled = pythonOlder "3.8";
src = fetchFromGitHub {
owner = "pyca";
repo = pname;
rev = "refs/tags/${version}";
hash = "sha256-PGDtsDgRwh7GuuM4OuExiy8L4i3Foo+OD0wMrndPkvo=";
};
nativeBuildInputs = [
hatch-fancy-pypi-readme
hatch-vcs
hatchling
setuptools
];
propagatedBuildInputs = [
attrs
cryptography
idna
pyasn1
pyasn1-modules
];
nativeCheckInputs = [
pytestCheckHook
];
pythonImportsCheck = [
"service_identity"
];
meta = with lib; {
description = "Service identity verification for pyOpenSSL";
homepage = "https://service-identity.readthedocs.io";
changelog = "https://github.com/pyca/service-identity/releases/tag/${version}";
license = licenses.mit;
maintainers = with maintainers; [ fab ];
};
}

View File

@ -1,38 +0,0 @@
{
"niv": {
"branch": "master",
"description": "Easy dependency management for Nix projects",
"homepage": "https://github.com/nmattia/niv",
"owner": "nmattia",
"repo": "niv",
"rev": "5830a4dd348d77e39a0f3c4c762ff2663b602d4c",
"sha256": "1d3lsrqvci4qz2hwjrcnd8h5vfkg8aypq3sjd4g3izbc8frwz5sm",
"type": "tarball",
"url": "https://github.com/nmattia/niv/archive/5830a4dd348d77e39a0f3c4c762ff2663b602d4c.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"nixpkgs-22.11": {
"branch": "nixos-22.11",
"description": "Nix Packages collection",
"homepage": "",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "970402e6147c49603f4d06defe44d27fe51884ce",
"sha256": "1v0ljy7wqq14ad3gd1871fgvd4psr7dy14q724k0wwgxk7inbbwh",
"type": "tarball",
"url": "https://github.com/nixos/nixpkgs/archive/970402e6147c49603f4d06defe44d27fe51884ce.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
},
"nixpkgs-unstable": {
"branch": "master",
"description": "Nix Packages collection",
"homepage": "",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "d0c9a536331227ab883b4f6964be638fa436d81f",
"sha256": "1gg6v5rk1p26ciygdg262zc5vqws753rvgcma5rim2s6gyfrjaq1",
"type": "tarball",
"url": "https://github.com/nixos/nixpkgs/archive/d0c9a536331227ab883b4f6964be638fa436d81f.tar.gz",
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
}
}

View File

@ -1,174 +0,0 @@
# This file has been generated by Niv.
let
#
# The fetchers. fetch_<type> fetches specs of type <type>.
#
fetch_file = pkgs: name: spec:
let
name' = sanitizeName name + "-src";
in
if spec.builtin or true then
builtins_fetchurl { inherit (spec) url sha256; name = name'; }
else
pkgs.fetchurl { inherit (spec) url sha256; name = name'; };
fetch_tarball = pkgs: name: spec:
let
name' = sanitizeName name + "-src";
in
if spec.builtin or true then
builtins_fetchTarball { name = name'; inherit (spec) url sha256; }
else
pkgs.fetchzip { name = name'; inherit (spec) url sha256; };
fetch_git = name: spec:
let
ref =
if spec ? ref then spec.ref else
if spec ? branch then "refs/heads/${spec.branch}" else
if spec ? tag then "refs/tags/${spec.tag}" else
abort "In git source '${name}': Please specify `ref`, `tag` or `branch`!";
in
builtins.fetchGit { url = spec.repo; inherit (spec) rev; inherit ref; };
fetch_local = spec: spec.path;
fetch_builtin-tarball = name: throw
''[${name}] The niv type "builtin-tarball" is deprecated. You should instead use `builtin = true`.
$ niv modify ${name} -a type=tarball -a builtin=true'';
fetch_builtin-url = name: throw
''[${name}] The niv type "builtin-url" will soon be deprecated. You should instead use `builtin = true`.
$ niv modify ${name} -a type=file -a builtin=true'';
#
# Various helpers
#
# https://github.com/NixOS/nixpkgs/pull/83241/files#diff-c6f540a4f3bfa4b0e8b6bafd4cd54e8bR695
sanitizeName = name:
(
concatMapStrings (s: if builtins.isList s then "-" else s)
(
builtins.split "[^[:alnum:]+._?=-]+"
((x: builtins.elemAt (builtins.match "\\.*(.*)" x) 0) name)
)
);
# The set of packages used when specs are fetched using non-builtins.
mkPkgs = sources: system:
let
sourcesNixpkgs =
import (builtins_fetchTarball { inherit (sources.nixpkgs) url sha256; }) { inherit system; };
hasNixpkgsPath = builtins.any (x: x.prefix == "nixpkgs") builtins.nixPath;
hasThisAsNixpkgsPath = <nixpkgs> == ./.;
in
if builtins.hasAttr "nixpkgs" sources
then sourcesNixpkgs
else if hasNixpkgsPath && ! hasThisAsNixpkgsPath then
import <nixpkgs> {}
else
abort
''
Please specify either <nixpkgs> (through -I or NIX_PATH=nixpkgs=...) or
add a package called "nixpkgs" to your sources.json.
'';
# The actual fetching function.
fetch = pkgs: name: spec:
if ! builtins.hasAttr "type" spec then
abort "ERROR: niv spec ${name} does not have a 'type' attribute"
else if spec.type == "file" then fetch_file pkgs name spec
else if spec.type == "tarball" then fetch_tarball pkgs name spec
else if spec.type == "git" then fetch_git name spec
else if spec.type == "local" then fetch_local spec
else if spec.type == "builtin-tarball" then fetch_builtin-tarball name
else if spec.type == "builtin-url" then fetch_builtin-url name
else
abort "ERROR: niv spec ${name} has unknown type ${builtins.toJSON spec.type}";
# If the environment variable NIV_OVERRIDE_${name} is set, then use
# the path directly as opposed to the fetched source.
replace = name: drv:
let
saneName = stringAsChars (c: if isNull (builtins.match "[a-zA-Z0-9]" c) then "_" else c) name;
ersatz = builtins.getEnv "NIV_OVERRIDE_${saneName}";
in
if ersatz == "" then drv else
# this turns the string into an actual Nix path (for both absolute and
# relative paths)
if builtins.substring 0 1 ersatz == "/" then /. + ersatz else /. + builtins.getEnv "PWD" + "/${ersatz}";
# Ports of functions for older nix versions
# a Nix version of mapAttrs if the built-in doesn't exist
mapAttrs = builtins.mapAttrs or (
f: set: with builtins;
listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set))
);
# https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/lists.nix#L295
range = first: last: if first > last then [] else builtins.genList (n: first + n) (last - first + 1);
# https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L257
stringToCharacters = s: map (p: builtins.substring p 1 s) (range 0 (builtins.stringLength s - 1));
# https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L269
stringAsChars = f: s: concatStrings (map f (stringToCharacters s));
concatMapStrings = f: list: concatStrings (map f list);
concatStrings = builtins.concatStringsSep "";
# https://github.com/NixOS/nixpkgs/blob/8a9f58a375c401b96da862d969f66429def1d118/lib/attrsets.nix#L331
optionalAttrs = cond: as: if cond then as else {};
# fetchTarball version that is compatible between all the versions of Nix
builtins_fetchTarball = { url, name ? null, sha256 }@attrs:
let
inherit (builtins) lessThan nixVersion fetchTarball;
in
if lessThan nixVersion "1.12" then
fetchTarball ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; }))
else
fetchTarball attrs;
# fetchurl version that is compatible between all the versions of Nix
builtins_fetchurl = { url, name ? null, sha256 }@attrs:
let
inherit (builtins) lessThan nixVersion fetchurl;
in
if lessThan nixVersion "1.12" then
fetchurl ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; }))
else
fetchurl attrs;
# Create the final "sources" from the config
mkSources = config:
mapAttrs (
name: spec:
if builtins.hasAttr "outPath" spec
then abort
"The values in sources.json should not have an 'outPath' attribute"
else
spec // { outPath = replace name (fetch config.pkgs name spec); }
) config.sources;
# The "config" used by the fetchers
mkConfig =
{ sourcesFile ? if builtins.pathExists ./sources.json then ./sources.json else null
, sources ? if isNull sourcesFile then {} else builtins.fromJSON (builtins.readFile sourcesFile)
, system ? builtins.currentSystem
, pkgs ? mkPkgs sources system
}: rec {
# The sources, i.e. the attribute set of spec name to spec
inherit sources;
# The "pkgs" (evaluated nixpkgs) to use for e.g. non-builtin fetchers
inherit pkgs;
};
in
mkSources (mkConfig {}) // { __functor = _: settings: mkSources (mkConfig settings); }

View File

@ -1,24 +1,16 @@
let
pname = "tahoe-lafs";
version = "1.18.0.post1";
in
{ lib
, pythonPackages
, buildPythonPackage
, tahoe-lafs-src
, extrasNames
# control how the test suite is run
, doCheck
}:
let
pname = "tahoe-lafs";
version = "1.18.0.post1";
pickExtraDependencies = deps: extras: builtins.foldl' (accum: extra: accum ++ deps.${extra}) [] extras;
pythonExtraDependencies = with pythonPackages; {
tor = [ txtorcon ];
i2p = [ txi2p ];
};
pythonPackageDependencies = with pythonPackages; [
buildPythonPackage rec {
inherit pname version;
src = tahoe-lafs-src;
propagatedBuildInputs = with pythonPackages; [
attrs
autobahn
cbor2
@ -41,35 +33,42 @@ let
six
treq
twisted
# Get the dependencies for the Twisted extras we depend on, too.
twisted.passthru.optional-dependencies.tls
twisted.passthru.optional-dependencies.conch
werkzeug
zfec
zope_interface
] ++ pickExtraDependencies pythonExtraDependencies extrasNames;
] ++
# Get the dependencies for the Twisted extras we depend on, too.
twisted.passthru.optional-dependencies.tls ++
twisted.passthru.optional-dependencies.conch;
unitTestDependencies = with pythonPackages; [
beautifulsoup4
fixtures
hypothesis
mock
prometheus-client
testtools
];
# The test suite lives elsewhere.
doCheck = false;
in
buildPythonPackage {
inherit pname version;
src = tahoe-lafs-src;
propagatedBuildInputs = pythonPackageDependencies;
inherit doCheck;
checkInputs = unitTestDependencies;
checkPhase = ''
export TAHOE_LAFS_HYPOTHESIS_PROFILE=ci
python -m twisted.trial -j $NIX_BUILD_CORES allmydata
'';
passthru = {
extras = with pythonPackages; {
tor = [
txtorcon
];
i2p = [
txi2p
];
unittest = [
beautifulsoup4
html5lib
fixtures
hypothesis
mock
prometheus-client
testtools
];
integrationtest = [
pytest
pytest-twisted
paramiko
pytest-timeout
];
};
};
meta = with lib; {
homepage = "https://tahoe-lafs.org/";

View File

@ -1,4 +0,0 @@
# Build the package with the test suite enabled.
args@{...}: (import ../. args).override {
doCheck = true;
}

12
nix/twisted.patch Normal file
View File

@ -0,0 +1,12 @@
diff --git a/src/twisted/internet/test/test_endpoints.py b/src/twisted/internet/test/test_endpoints.py
index c650fd8aa6..a1754fd533 100644
--- a/src/twisted/internet/test/test_endpoints.py
+++ b/src/twisted/internet/test/test_endpoints.py
@@ -4214,6 +4214,7 @@ class WrapClientTLSParserTests(unittest.TestCase):
connectionCreator = connectionCreatorFromEndpoint(reactor, endpoint)
self.assertEqual(connectionCreator._hostname, "\xe9xample.example.com")
+ @skipIf(True, "self.assertFalse(plainClient.transport.disconnecting) fails")
def test_tls(self):
"""
When passed a string endpoint description beginning with C{tls:},

View File

@ -41,6 +41,7 @@ from twisted.internet.interfaces import (
IDelayedCall,
)
from twisted.internet.ssl import CertificateOptions
from twisted.protocols.tls import TLSMemoryBIOProtocol
from twisted.web.client import Agent, HTTPConnectionPool
from zope.interface import implementer
from hyperlink import DecodedURL
@ -72,7 +73,7 @@ except ImportError:
pass
def _encode_si(si): # type: (bytes) -> str
def _encode_si(si: bytes) -> str:
"""Encode the storage index into Unicode string."""
return str(si_b2a(si), "ascii")
@ -80,9 +81,13 @@ def _encode_si(si): # type: (bytes) -> str
class ClientException(Exception):
"""An unexpected response code from the server."""
def __init__(self, code, *additional_args):
Exception.__init__(self, code, *additional_args)
def __init__(
self, code: int, message: Optional[str] = None, body: Optional[bytes] = None
):
Exception.__init__(self, code, message, body)
self.code = code
self.message = message
self.body = body
register_exception_extractor(ClientException, lambda e: {"response_code": e.code})
@ -93,7 +98,7 @@ register_exception_extractor(ClientException, lambda e: {"response_code": e.code
# Tags are of the form #6.nnn, where the number is documented at
# https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml. Notably, #6.258
# indicates a set.
_SCHEMAS = {
_SCHEMAS: Mapping[str, Schema] = {
"get_version": Schema(
# Note that the single-quoted (`'`) string keys in this schema
# represent *byte* strings - per the CDDL specification. Text strings
@ -155,7 +160,7 @@ class _LengthLimitedCollector:
timeout_on_silence: IDelayedCall
f: BytesIO = field(factory=BytesIO)
def __call__(self, data: bytes):
def __call__(self, data: bytes) -> None:
self.timeout_on_silence.reset(60)
self.remaining_length -= len(data)
if self.remaining_length < 0:
@ -164,7 +169,7 @@ class _LengthLimitedCollector:
def limited_content(
response,
response: IResponse,
clock: IReactorTime,
max_length: int = 30 * 1024 * 1024,
) -> Deferred[BinaryIO]:
@ -300,11 +305,13 @@ class _StorageClientHTTPSPolicy:
expected_spki_hash: bytes
# IPolicyForHTTPS
def creatorForNetloc(self, hostname, port):
def creatorForNetloc(self, hostname: str, port: int) -> _StorageClientHTTPSPolicy:
return self
# IOpenSSLClientConnectionCreator
def clientConnectionForTLS(self, tlsProtocol):
def clientConnectionForTLS(
self, tlsProtocol: TLSMemoryBIOProtocol
) -> SSL.Connection:
return SSL.Connection(
_TLSContextFactory(self.expected_spki_hash).getContext(), None
)
@ -344,7 +351,7 @@ class StorageClientFactory:
cls.TEST_MODE_REGISTER_HTTP_POOL = callback
@classmethod
def stop_test_mode(cls):
def stop_test_mode(cls) -> None:
"""Stop testing mode."""
cls.TEST_MODE_REGISTER_HTTP_POOL = None
@ -437,7 +444,7 @@ class StorageClient(object):
"""Get a URL relative to the base URL."""
return self._base_url.click(path)
def _get_headers(self, headers): # type: (Optional[Headers]) -> Headers
def _get_headers(self, headers: Optional[Headers]) -> Headers:
"""Return the basic headers to be used by default."""
if headers is None:
headers = Headers()
@ -565,7 +572,7 @@ class StorageClient(object):
).read()
raise ClientException(response.code, response.phrase, data)
def shutdown(self) -> Deferred:
def shutdown(self) -> Deferred[object]:
"""Shutdown any connections."""
return self._pool.closeCachedConnections()

View File

@ -4,7 +4,18 @@ HTTP server for storage.
from __future__ import annotations
from typing import Any, Callable, Union, cast, Optional
from typing import (
Any,
Callable,
Union,
cast,
Optional,
TypeVar,
Sequence,
Protocol,
Dict,
)
from typing_extensions import ParamSpec, Concatenate
from functools import wraps
from base64 import b64decode
import binascii
@ -15,20 +26,24 @@ import mmap
from eliot import start_action
from cryptography.x509 import Certificate as CryptoCertificate
from zope.interface import implementer
from klein import Klein
from klein import Klein, KleinRenderable
from klein.resource import KleinResource
from twisted.web import http
from twisted.internet.interfaces import (
IListeningPort,
IStreamServerEndpoint,
IPullProducer,
IProtocolFactory,
)
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.defer import Deferred
from twisted.internet.ssl import CertificateOptions, Certificate, PrivateCertificate
from twisted.internet.interfaces import IReactorFromThreads
from twisted.web.server import Site, Request
from twisted.web.iweb import IRequest
from twisted.protocols.tls import TLSMemoryBIOFactory
from twisted.python.filepath import FilePath
from twisted.python.failure import Failure
from attrs import define, field, Factory
from werkzeug.http import (
@ -68,7 +83,7 @@ class ClientSecretsException(Exception):
def _extract_secrets(
header_values: list[str], required_secrets: set[Secrets]
header_values: Sequence[str], required_secrets: set[Secrets]
) -> dict[Secrets, bytes]:
"""
Given list of values of ``X-Tahoe-Authorization`` headers, and required
@ -102,18 +117,43 @@ def _extract_secrets(
return result
def _authorization_decorator(required_secrets):
class BaseApp(Protocol):
"""Protocol for ``HTTPServer`` and testing equivalent."""
_swissnum: bytes
P = ParamSpec("P")
T = TypeVar("T")
SecretsDict = Dict[Secrets, bytes]
App = TypeVar("App", bound=BaseApp)
def _authorization_decorator(
required_secrets: set[Secrets],
) -> Callable[
[Callable[Concatenate[App, Request, SecretsDict, P], T]],
Callable[Concatenate[App, Request, P], T],
]:
"""
1. Check the ``Authorization`` header matches server swissnum.
2. Extract ``X-Tahoe-Authorization`` headers and pass them in.
3. Log the request and response.
"""
def decorator(f):
def decorator(
f: Callable[Concatenate[App, Request, SecretsDict, P], T]
) -> Callable[Concatenate[App, Request, P], T]:
@wraps(f)
def route(self, request, *args, **kwargs):
# Don't set text/html content type by default:
request.defaultContentType = None
def route(
self: App,
request: Request,
*args: P.args,
**kwargs: P.kwargs,
) -> T:
# Don't set text/html content type by default.
# None is actually supported, see https://github.com/twisted/twisted/issues/11902
request.defaultContentType = None # type: ignore[assignment]
with start_action(
action_type="allmydata:storage:http-server:handle-request",
@ -163,7 +203,22 @@ def _authorization_decorator(required_secrets):
return decorator
def _authorized_route(app, required_secrets, *route_args, **route_kwargs):
def _authorized_route(
klein_app: Klein,
required_secrets: set[Secrets],
url: str,
*route_args: Any,
branch: bool = False,
**route_kwargs: Any,
) -> Callable[
[
Callable[
Concatenate[App, Request, SecretsDict, P],
KleinRenderable,
]
],
Callable[..., KleinRenderable],
]:
"""
Like Klein's @route, but with additional support for checking the
``Authorization`` header as well as ``X-Tahoe-Authorization`` headers. The
@ -173,12 +228,23 @@ def _authorized_route(app, required_secrets, *route_args, **route_kwargs):
:param required_secrets: Set of required ``Secret`` types.
"""
def decorator(f):
@app.route(*route_args, **route_kwargs)
def decorator(
f: Callable[
Concatenate[App, Request, SecretsDict, P],
KleinRenderable,
]
) -> Callable[..., KleinRenderable]:
@klein_app.route(url, *route_args, branch=branch, **route_kwargs) # type: ignore[arg-type]
@_authorization_decorator(required_secrets)
@wraps(f)
def handle_route(*args, **kwargs):
return f(*args, **kwargs)
def handle_route(
app: App,
request: Request,
secrets: SecretsDict,
*args: P.args,
**kwargs: P.kwargs,
) -> KleinRenderable:
return f(app, request, secrets, *args, **kwargs)
return handle_route
@ -234,7 +300,7 @@ class UploadsInProgress(object):
except (KeyError, IndexError):
raise _HTTPError(http.NOT_FOUND)
def remove_write_bucket(self, bucket: BucketWriter):
def remove_write_bucket(self, bucket: BucketWriter) -> None:
"""Stop tracking the given ``BucketWriter``."""
try:
storage_index, share_number = self._bucketwriters.pop(bucket)
@ -250,7 +316,7 @@ class UploadsInProgress(object):
def validate_upload_secret(
self, storage_index: bytes, share_number: int, upload_secret: bytes
):
) -> None:
"""
Raise an unauthorized-HTTP-response exception if the given
storage_index+share_number have a different upload secret than the
@ -272,7 +338,7 @@ class StorageIndexConverter(BaseConverter):
regex = "[" + str(rfc3548_alphabet, "ascii") + "]{26}"
def to_python(self, value):
def to_python(self, value: str) -> bytes:
try:
return si_a2b(value.encode("ascii"))
except (AssertionError, binascii.Error, ValueError):
@ -351,7 +417,7 @@ class _ReadAllProducer:
start: int = field(default=0)
@classmethod
def produce_to(cls, request: Request, read_data: ReadData) -> Deferred:
def produce_to(cls, request: Request, read_data: ReadData) -> Deferred[bytes]:
"""
Create and register the producer, returning ``Deferred`` that should be
returned from a HTTP server endpoint.
@ -360,7 +426,7 @@ class _ReadAllProducer:
request.registerProducer(producer, False)
return producer.result
def resumeProducing(self):
def resumeProducing(self) -> None:
data = self.read_data(self.start, 65536)
if not data:
self.request.unregisterProducer()
@ -371,10 +437,10 @@ class _ReadAllProducer:
self.request.write(data)
self.start += len(data)
def pauseProducing(self):
def pauseProducing(self) -> None:
pass
def stopProducing(self):
def stopProducing(self) -> None:
pass
@ -392,7 +458,7 @@ class _ReadRangeProducer:
start: int
remaining: int
def resumeProducing(self):
def resumeProducing(self) -> None:
if self.result is None or self.request is None:
return
@ -429,10 +495,10 @@ class _ReadRangeProducer:
if self.remaining == 0:
self.stopProducing()
def pauseProducing(self):
def pauseProducing(self) -> None:
pass
def stopProducing(self):
def stopProducing(self) -> None:
if self.request is not None:
self.request.unregisterProducer()
self.request = None
@ -511,12 +577,13 @@ def read_range(
return d
def _add_error_handling(app: Klein):
def _add_error_handling(app: Klein) -> None:
"""Add exception handlers to a Klein app."""
@app.handle_errors(_HTTPError)
def _http_error(_, request, failure):
def _http_error(self: Any, request: IRequest, failure: Failure) -> KleinRenderable:
"""Handle ``_HTTPError`` exceptions."""
assert isinstance(failure.value, _HTTPError)
request.setResponseCode(failure.value.code)
if failure.value.body is not None:
return failure.value.body
@ -524,13 +591,69 @@ def _add_error_handling(app: Klein):
return b""
@app.handle_errors(CDDLValidationError)
def _cddl_validation_error(_, request, failure):
def _cddl_validation_error(
self: Any, request: IRequest, failure: Failure
) -> KleinRenderable:
"""Handle CDDL validation errors."""
request.setResponseCode(http.BAD_REQUEST)
return str(failure.value).encode("utf-8")
class HTTPServer(object):
async def read_encoded(
reactor, request, schema: Schema, max_size: int = 1024 * 1024
) -> Any:
"""
Read encoded request body data, decoding it with CBOR by default.
Somewhat arbitrarily, limit body size to 1MiB by default.
"""
content_type = get_content_type(request.requestHeaders)
if content_type is None:
content_type = CBOR_MIME_TYPE
if content_type != CBOR_MIME_TYPE:
raise _HTTPError(http.UNSUPPORTED_MEDIA_TYPE)
# Make sure it's not too large:
request.content.seek(0, SEEK_END)
size = request.content.tell()
if size > max_size:
raise _HTTPError(http.REQUEST_ENTITY_TOO_LARGE)
request.content.seek(0, SEEK_SET)
# We don't want to load the whole message into memory, cause it might
# be quite large. The CDDL validator takes a read-only bytes-like
# thing. Luckily, for large request bodies twisted.web will buffer the
# data in a file, so we can use mmap() to get a memory view. The CDDL
# validator will not make a copy, so it won't increase memory usage
# beyond that.
try:
fd = request.content.fileno()
except (ValueError, OSError):
fd = -1
if fd >= 0:
# It's a file, so we can use mmap() to save memory.
message = mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
else:
message = request.content.read()
# Pycddl will release the GIL when validating larger documents, so
# let's take advantage of multiple CPUs:
if size > 10_000:
await defer_to_thread(reactor, schema.validate_cbor, message)
else:
schema.validate_cbor(message)
# The CBOR parser will allocate more memory, but at least we can feed
# it the file-like object, so that if it's large it won't be make two
# copies.
request.content.seek(SEEK_SET, 0)
# Typically deserialization to Python will not release the GIL, and
# indeed as of Jan 2023 cbor2 didn't have any code to release the GIL
# in the decode path. As such, running it in a different thread has no benefit.
return cbor2.load(request.content)
class HTTPServer(BaseApp):
"""
A HTTP interface to the storage server.
"""
@ -557,11 +680,11 @@ class HTTPServer(object):
self._uploads.remove_write_bucket
)
def get_resource(self):
def get_resource(self) -> KleinResource:
"""Return twisted.web ``Resource`` for this object."""
return self._app.resource()
def _send_encoded(self, request, data):
def _send_encoded(self, request: Request, data: object) -> Deferred[bytes]:
"""
Return encoded data suitable for writing as the HTTP body response, by
default using CBOR.
@ -587,61 +710,10 @@ class HTTPServer(object):
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3861
raise _HTTPError(http.NOT_ACCEPTABLE)
async def _read_encoded(
self, request, schema: Schema, max_size: int = 1024 * 1024
) -> Any:
"""
Read encoded request body data, decoding it with CBOR by default.
Somewhat arbitrarily, limit body size to 1MiB by default.
"""
content_type = get_content_type(request.requestHeaders)
if content_type != CBOR_MIME_TYPE:
raise _HTTPError(http.UNSUPPORTED_MEDIA_TYPE)
# Make sure it's not too large:
request.content.seek(0, SEEK_END)
size = request.content.tell()
if size > max_size:
raise _HTTPError(http.REQUEST_ENTITY_TOO_LARGE)
request.content.seek(0, SEEK_SET)
# We don't want to load the whole message into memory, cause it might
# be quite large. The CDDL validator takes a read-only bytes-like
# thing. Luckily, for large request bodies twisted.web will buffer the
# data in a file, so we can use mmap() to get a memory view. The CDDL
# validator will not make a copy, so it won't increase memory usage
# beyond that.
try:
fd = request.content.fileno()
except (ValueError, OSError):
fd = -1
if fd >= 0:
# It's a file, so we can use mmap() to save memory.
message = mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
else:
message = request.content.read()
# Pycddl will release the GIL when validating larger documents, so
# let's take advantage of multiple CPUs:
if size > 10_000:
await defer_to_thread(self._reactor, schema.validate_cbor, message)
else:
schema.validate_cbor(message)
# The CBOR parser will allocate more memory, but at least we can feed
# it the file-like object, so that if it's large it won't be make two
# copies.
request.content.seek(SEEK_SET, 0)
# Typically deserialization to Python will not release the GIL, and
# indeed as of Jan 2023 cbor2 didn't have any code to release the GIL
# in the decode path. As such, running it in a different thread has no benefit.
return cbor2.load(request.content)
##### Generic APIs #####
@_authorized_route(_app, set(), "/storage/v1/version", methods=["GET"])
def version(self, request, authorization):
def version(self, request: Request, authorization: SecretsDict) -> KleinRenderable:
"""Return version information."""
return self._send_encoded(request, self._get_version())
@ -673,12 +745,14 @@ class HTTPServer(object):
methods=["POST"],
)
@async_to_deferred
async def allocate_buckets(self, request, authorization, storage_index):
async def allocate_buckets(
self, request: Request, authorization: SecretsDict, storage_index: bytes
) -> KleinRenderable:
"""Allocate buckets."""
upload_secret = authorization[Secrets.UPLOAD]
# It's just a list of up to ~256 shares, shouldn't use many bytes.
info = await self._read_encoded(
request, _SCHEMAS["allocate_buckets"], max_size=8192
info = await read_encoded(
self._reactor, request, _SCHEMAS["allocate_buckets"], max_size=8192
)
# We do NOT validate the upload secret for existing bucket uploads.
@ -712,7 +786,13 @@ class HTTPServer(object):
"/storage/v1/immutable/<storage_index:storage_index>/<int(signed=False):share_number>/abort",
methods=["PUT"],
)
def abort_share_upload(self, request, authorization, storage_index, share_number):
def abort_share_upload(
self,
request: Request,
authorization: SecretsDict,
storage_index: bytes,
share_number: int,
) -> KleinRenderable:
"""Abort an in-progress immutable share upload."""
try:
bucket = self._uploads.get_write_bucket(
@ -743,7 +823,13 @@ class HTTPServer(object):
"/storage/v1/immutable/<storage_index:storage_index>/<int(signed=False):share_number>",
methods=["PATCH"],
)
def write_share_data(self, request, authorization, storage_index, share_number):
def write_share_data(
self,
request: Request,
authorization: SecretsDict,
storage_index: bytes,
share_number: int,
) -> KleinRenderable:
"""Write data to an in-progress immutable upload."""
content_range = parse_content_range_header(request.getHeader("content-range"))
if content_range is None or content_range.units != "bytes":
@ -753,14 +839,17 @@ class HTTPServer(object):
bucket = self._uploads.get_write_bucket(
storage_index, share_number, authorization[Secrets.UPLOAD]
)
offset = content_range.start
remaining = content_range.stop - content_range.start
offset = content_range.start or 0
# We don't support an unspecified stop for the range:
assert content_range.stop is not None
# Missing body makes no sense:
assert request.content is not None
remaining = content_range.stop - offset
finished = False
while remaining > 0:
data = request.content.read(min(remaining, 65536))
assert data, "uploaded data length doesn't match range"
try:
finished = bucket.write(offset, data)
except ConflictingWriteError:
@ -786,7 +875,9 @@ class HTTPServer(object):
"/storage/v1/immutable/<storage_index:storage_index>/shares",
methods=["GET"],
)
def list_shares(self, request, authorization, storage_index):
def list_shares(
self, request: Request, authorization: SecretsDict, storage_index: bytes
) -> KleinRenderable:
"""
List shares for the given storage index.
"""
@ -799,7 +890,13 @@ class HTTPServer(object):
"/storage/v1/immutable/<storage_index:storage_index>/<int(signed=False):share_number>",
methods=["GET"],
)
def read_share_chunk(self, request, authorization, storage_index, share_number):
def read_share_chunk(
self,
request: Request,
authorization: SecretsDict,
storage_index: bytes,
share_number: int,
) -> KleinRenderable:
"""Read a chunk for an already uploaded immutable."""
request.setHeader("content-type", "application/octet-stream")
try:
@ -816,7 +913,9 @@ class HTTPServer(object):
"/storage/v1/lease/<storage_index:storage_index>",
methods=["PUT"],
)
def add_or_renew_lease(self, request, authorization, storage_index):
def add_or_renew_lease(
self, request: Request, authorization: SecretsDict, storage_index: bytes
) -> KleinRenderable:
"""Update the lease for an immutable or mutable share."""
if not list(self._storage_server.get_shares(storage_index)):
raise _HTTPError(http.NOT_FOUND)
@ -839,8 +938,12 @@ class HTTPServer(object):
)
@async_to_deferred
async def advise_corrupt_share_immutable(
self, request, authorization, storage_index, share_number
):
self,
request: Request,
authorization: SecretsDict,
storage_index: bytes,
share_number: int,
) -> KleinRenderable:
"""Indicate that given share is corrupt, with a text reason."""
try:
bucket = self._storage_server.get_buckets(storage_index)[share_number]
@ -849,7 +952,8 @@ class HTTPServer(object):
# The reason can be a string with explanation, so in theory it could be
# longish?
info = await self._read_encoded(
info = await read_encoded(
self._reactor,
request,
_SCHEMAS["advise_corrupt_share"],
max_size=32768,
@ -866,10 +970,15 @@ class HTTPServer(object):
methods=["POST"],
)
@async_to_deferred
async def mutable_read_test_write(self, request, authorization, storage_index):
async def mutable_read_test_write(
self, request: Request, authorization: SecretsDict, storage_index: bytes
) -> KleinRenderable:
"""Read/test/write combined operation for mutables."""
rtw_request = await self._read_encoded(
request, _SCHEMAS["mutable_read_test_write"], max_size=2**48
rtw_request = await read_encoded(
self._reactor,
request,
_SCHEMAS["mutable_read_test_write"],
max_size=2**48,
)
secrets = (
authorization[Secrets.WRITE_ENABLER],
@ -905,7 +1014,13 @@ class HTTPServer(object):
"/storage/v1/mutable/<storage_index:storage_index>/<int(signed=False):share_number>",
methods=["GET"],
)
def read_mutable_chunk(self, request, authorization, storage_index, share_number):
def read_mutable_chunk(
self,
request: Request,
authorization: SecretsDict,
storage_index: bytes,
share_number: int,
) -> KleinRenderable:
"""Read a chunk from a mutable."""
request.setHeader("content-type", "application/octet-stream")
@ -945,8 +1060,12 @@ class HTTPServer(object):
)
@async_to_deferred
async def advise_corrupt_share_mutable(
self, request, authorization, storage_index, share_number
):
self,
request: Request,
authorization: SecretsDict,
storage_index: bytes,
share_number: int,
) -> KleinRenderable:
"""Indicate that given share is corrupt, with a text reason."""
if share_number not in {
shnum for (shnum, _) in self._storage_server.get_shares(storage_index)
@ -955,8 +1074,8 @@ class HTTPServer(object):
# The reason can be a string with explanation, so in theory it could be
# longish?
info = await self._read_encoded(
request, _SCHEMAS["advise_corrupt_share"], max_size=32768
info = await read_encoded(
self._reactor, request, _SCHEMAS["advise_corrupt_share"], max_size=32768
)
self._storage_server.advise_corrupt_share(
b"mutable", storage_index, share_number, info["reason"].encode("utf-8")
@ -978,7 +1097,10 @@ class _TLSEndpointWrapper(object):
@classmethod
def from_paths(
cls, endpoint, private_key_path: FilePath, cert_path: FilePath
cls: type[_TLSEndpointWrapper],
endpoint: IStreamServerEndpoint,
private_key_path: FilePath,
cert_path: FilePath,
) -> "_TLSEndpointWrapper":
"""
Create an endpoint with the given private key and certificate paths on
@ -993,7 +1115,7 @@ class _TLSEndpointWrapper(object):
)
return cls(endpoint=endpoint, context_factory=certificate_options)
def listen(self, factory):
def listen(self, factory: IProtocolFactory) -> Deferred[IListeningPort]:
return self.endpoint.listen(
TLSMemoryBIOFactory(self.context_factory, False, factory)
)

View File

@ -1429,7 +1429,7 @@ class _FakeRemoteReference(object):
result = yield getattr(self.local_object, action)(*args, **kwargs)
defer.returnValue(result)
except HTTPClientException as e:
raise RemoteException(e.args)
raise RemoteException((e.code, e.message, e.body))
@attr.s

View File

@ -42,6 +42,7 @@ from werkzeug.exceptions import NotFound as WNotFound
from testtools.matchers import Equals
from zope.interface import implementer
from ..util.deferredutil import async_to_deferred
from .common import SyncTestCase
from ..storage.http_common import (
get_content_type,
@ -59,6 +60,9 @@ from ..storage.http_server import (
_authorized_route,
StorageIndexConverter,
_add_error_handling,
read_encoded,
_SCHEMAS as SERVER_SCHEMAS,
BaseApp,
)
from ..storage.http_client import (
StorageClient,
@ -172,7 +176,7 @@ class ExtractSecretsTests(SyncTestCase):
``ClientSecretsException``.
"""
with self.assertRaises(ClientSecretsException):
_extract_secrets(["FOO eA=="], {})
_extract_secrets(["FOO eA=="], set())
def test_bad_secret_not_base64(self):
"""
@ -254,7 +258,7 @@ def gen_bytes(length: int) -> bytes:
return result
class TestApp(object):
class TestApp(BaseApp):
"""HTTP API for testing purposes."""
clock: IReactorTime
@ -262,7 +266,7 @@ class TestApp(object):
_add_error_handling(_app)
_swissnum = SWISSNUM_FOR_TEST # Match what the test client is using
@_authorized_route(_app, {}, "/noop", methods=["GET"])
@_authorized_route(_app, set(), "/noop", methods=["GET"])
def noop(self, request, authorization):
return "noop"
@ -303,6 +307,19 @@ class TestApp(object):
request.transport.loseConnection()
return Deferred()
@_authorized_route(_app, set(), "/read_body", methods=["POST"])
@async_to_deferred
async def read_body(self, request, authorization):
"""
Accept an advise_corrupt_share message, return the reason.
I.e. exercise codepaths used for reading CBOR from the body.
"""
data = await read_encoded(
self.clock, request, SERVER_SCHEMAS["advise_corrupt_share"]
)
return data["reason"]
def result_of(d):
"""
@ -320,6 +337,7 @@ def result_of(d):
+ "This is probably a test design issue."
)
class CustomHTTPServerTests(SyncTestCase):
"""
Tests that use a custom HTTP server.
@ -504,6 +522,40 @@ class CustomHTTPServerTests(SyncTestCase):
result_of(d)
self.assertEqual(len(self._http_server.clock.getDelayedCalls()), 0)
def test_request_with_no_content_type_same_as_cbor(self):
"""
If no ``Content-Type`` header is set when sending a body, it is assumed
to be CBOR.
"""
response = result_of(
self.client.request(
"POST",
DecodedURL.from_text("http://127.0.0.1/read_body"),
data=dumps({"reason": "test"}),
)
)
self.assertEqual(
result_of(limited_content(response, self._http_server.clock, 100)).read(),
b"test",
)
def test_request_with_wrong_content(self):
"""
If a non-CBOR ``Content-Type`` header is set when sending a body, the
server complains appropriatly.
"""
headers = Headers()
headers.setRawHeaders("content-type", ["some/value"])
response = result_of(
self.client.request(
"POST",
DecodedURL.from_text("http://127.0.0.1/read_body"),
data=dumps({"reason": "test"}),
headers=headers,
)
)
self.assertEqual(response.code, http.UNSUPPORTED_MEDIA_TYPE)
@implementer(IReactorFromThreads)
class Reactor(Clock):

View File

@ -109,9 +109,11 @@ class PinningHTTPSValidation(AsyncTestCase):
root.isLeaf = True
listening_port = await endpoint.listen(Site(root))
try:
yield f"https://127.0.0.1:{listening_port.getHost().port}/"
yield f"https://127.0.0.1:{listening_port.getHost().port}/" # type: ignore[attr-defined]
finally:
await listening_port.stopListening()
result = listening_port.stopListening()
if result is not None:
await result
def request(self, url: str, expected_certificate: x509.Certificate):
"""