mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-03-11 06:43:54 +00:00
Merge branch '4041' into 4047.test-foolscap-to-http-upgrade
This commit is contained in:
commit
6e3d617a81
@ -89,20 +89,12 @@ workflows:
|
||||
|
||||
- "nixos":
|
||||
name: "<<matrix.pythonVersion>>"
|
||||
nixpkgs: "22.11"
|
||||
nixpkgs: "nixpkgs-unstable"
|
||||
matrix:
|
||||
parameters:
|
||||
pythonVersion:
|
||||
- "python38"
|
||||
- "python39"
|
||||
- "python310"
|
||||
|
||||
- "nixos":
|
||||
name: "<<matrix.pythonVersion>>"
|
||||
nixpkgs: "unstable"
|
||||
matrix:
|
||||
parameters:
|
||||
pythonVersion:
|
||||
- "python311"
|
||||
|
||||
# Eventually, test against PyPy 3.8
|
||||
@ -385,8 +377,8 @@ jobs:
|
||||
parameters:
|
||||
nixpkgs:
|
||||
description: >-
|
||||
Reference the name of a niv-managed nixpkgs source (see `niv show`
|
||||
and nix/sources.json)
|
||||
Reference the name of a flake-managed nixpkgs input (see `nix flake
|
||||
metadata` and flake.nix)
|
||||
type: "string"
|
||||
pythonVersion:
|
||||
description: >-
|
||||
@ -403,14 +395,17 @@ jobs:
|
||||
- "run":
|
||||
name: "Unit Test"
|
||||
command: |
|
||||
# The dependencies are all built so we can allow more
|
||||
# parallelism here.
|
||||
source .circleci/lib.sh
|
||||
cache_if_able nix-build \
|
||||
--cores 8 \
|
||||
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
|
||||
--argstr pythonVersion "<<parameters.pythonVersion>>" \
|
||||
nix/tests.nix
|
||||
|
||||
# Translate the nixpkgs selection into a flake reference we
|
||||
# can use to override the default nixpkgs input.
|
||||
NIXPKGS=$(nixpkgs_flake_reference <<parameters.nixpkgs>>)
|
||||
|
||||
cache_if_able nix run \
|
||||
--override-input nixpkgs "$NIXPKGS" \
|
||||
.#<<parameters.pythonVersion>>-unittest -- \
|
||||
--jobs $UNITTEST_CORES \
|
||||
allmydata
|
||||
|
||||
typechecks:
|
||||
docker:
|
||||
@ -536,20 +531,23 @@ executors:
|
||||
docker:
|
||||
# Run in a highly Nix-capable environment.
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "nixos/nix:2.10.3"
|
||||
image: "nixos/nix:2.16.1"
|
||||
environment:
|
||||
# CACHIX_AUTH_TOKEN is manually set in the CircleCI web UI and allows us
|
||||
# to push to CACHIX_NAME. CACHIX_NAME tells cachix which cache to push
|
||||
# to.
|
||||
CACHIX_NAME: "tahoe-lafs-opensource"
|
||||
# Let us use features marked "experimental". For example, most/all of
|
||||
# the `nix <subcommand>` forms.
|
||||
NIX_CONFIG: "experimental-features = nix-command flakes"
|
||||
|
||||
commands:
|
||||
nix-build:
|
||||
parameters:
|
||||
nixpkgs:
|
||||
description: >-
|
||||
Reference the name of a niv-managed nixpkgs source (see `niv show`
|
||||
and nix/sources.json)
|
||||
Reference the name of a flake-managed nixpkgs input (see `nix flake
|
||||
metadata` and flake.nix)
|
||||
type: "string"
|
||||
pythonVersion:
|
||||
description: >-
|
||||
@ -565,15 +563,17 @@ commands:
|
||||
# Get cachix for Nix-friendly caching.
|
||||
name: "Install Basic Dependencies"
|
||||
command: |
|
||||
NIXPKGS="https://github.com/nixos/nixpkgs/archive/nixos-<<parameters.nixpkgs>>.tar.gz"
|
||||
nix-env \
|
||||
--file $NIXPKGS \
|
||||
--install \
|
||||
-A cachix bash
|
||||
# Activate it for "binary substitution". This sets up
|
||||
# configuration tht lets Nix download something from the cache
|
||||
# instead of building it locally, if possible.
|
||||
cachix use "${CACHIX_NAME}"
|
||||
# Get some build environment dependencies and let them float on a
|
||||
# certain release branch. These aren't involved in the actual
|
||||
# package build (only in CI environment setup) so the fact that
|
||||
# they float shouldn't hurt reproducibility.
|
||||
NIXPKGS="nixpkgs/nixos-23.05"
|
||||
nix profile install $NIXPKGS#cachix $NIXPKGS#bash $NIXPKGS#jp
|
||||
|
||||
# Activate our cachix cache for "binary substitution". This sets
|
||||
# up configuration tht lets Nix download something from the cache
|
||||
# instead of building it locally, if possible.
|
||||
cachix use "${CACHIX_NAME}"
|
||||
|
||||
- "checkout"
|
||||
|
||||
@ -585,32 +585,16 @@ commands:
|
||||
-p 'python3.withPackages (ps: [ ps.setuptools ])' \
|
||||
--run 'python setup.py update_version'
|
||||
|
||||
- "run":
|
||||
name: "Build Dependencies"
|
||||
command: |
|
||||
# CircleCI build environment looks like it has a zillion and a
|
||||
# half cores. Don't let Nix autodetect this high core count
|
||||
# because it blows up memory usage and fails the test run. Pick a
|
||||
# number of cores that suits the build environment we're paying
|
||||
# for (the free one!).
|
||||
source .circleci/lib.sh
|
||||
# nix-shell will build all of the dependencies of the target but
|
||||
# not the target itself.
|
||||
cache_if_able nix-shell \
|
||||
--run "" \
|
||||
--cores 3 \
|
||||
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
|
||||
--argstr pythonVersion "<<parameters.pythonVersion>>" \
|
||||
./default.nix
|
||||
|
||||
- "run":
|
||||
name: "Build Package"
|
||||
command: |
|
||||
source .circleci/lib.sh
|
||||
cache_if_able nix-build \
|
||||
--cores 4 \
|
||||
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
|
||||
--argstr pythonVersion "<<parameters.pythonVersion>>" \
|
||||
./default.nix
|
||||
NIXPKGS=$(nixpkgs_flake_reference <<parameters.nixpkgs>>)
|
||||
cache_if_able nix build \
|
||||
--verbose \
|
||||
--print-build-logs \
|
||||
--cores "$DEPENDENCY_CORES" \
|
||||
--override-input nixpkgs "$NIXPKGS" \
|
||||
.#<<parameters.pythonVersion>>-tahoe-lafs
|
||||
|
||||
- steps: "<<parameters.buildSteps>>"
|
||||
|
@ -1,3 +1,13 @@
|
||||
# CircleCI build environment looks like it has a zillion and a half cores.
|
||||
# Don't let Nix autodetect this high core count because it blows up memory
|
||||
# usage and fails the test run. Pick a number of cores that suits the build
|
||||
# environment we're paying for (the free one!).
|
||||
DEPENDENCY_CORES=3
|
||||
|
||||
# Once dependencies are built, we can allow some more concurrency for our own
|
||||
# test suite.
|
||||
UNITTEST_CORES=8
|
||||
|
||||
# Run a command, enabling cache writes to cachix if possible. The command is
|
||||
# accepted as a variable number of positional arguments (like argv).
|
||||
function cache_if_able() {
|
||||
@ -117,3 +127,22 @@ function describe_build() {
|
||||
echo "Cache not writeable."
|
||||
fi
|
||||
}
|
||||
|
||||
# Inspect the flake input metadata for an input of a given name and return the
|
||||
# revision at which that input is pinned. If the input does not exist then
|
||||
# return garbage (probably "null").
|
||||
read_input_revision() {
|
||||
input_name=$1
|
||||
shift
|
||||
|
||||
nix flake metadata --json | jp --unquoted 'locks.nodes."'"$input_name"'".locked.rev'
|
||||
}
|
||||
|
||||
# Return a flake reference that refers to a certain revision of nixpkgs. The
|
||||
# certain revision is the revision to which the specified input is pinned.
|
||||
nixpkgs_flake_reference() {
|
||||
input_name=$1
|
||||
shift
|
||||
|
||||
echo "github:NixOS/nixpkgs?rev=$(read_input_revision $input_name)"
|
||||
}
|
||||
|
18
.github/workflows/ci.yml
vendored
18
.github/workflows/ci.yml
vendored
@ -164,18 +164,20 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: macos-12
|
||||
python-version: "3.11"
|
||||
force-foolscap: false
|
||||
- os: windows-latest
|
||||
python-version: "3.11"
|
||||
force-foolscap: false
|
||||
os:
|
||||
# 22.04 has some issue with Tor at the moment:
|
||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3943
|
||||
- ubuntu-20.04
|
||||
- macos-12
|
||||
- windows-latest
|
||||
python-version:
|
||||
- "3.11"
|
||||
force-foolscap:
|
||||
- false
|
||||
include:
|
||||
- os: ubuntu-20.04
|
||||
python-version: "3.10"
|
||||
force-foolscap: false
|
||||
force-foolscap: true
|
||||
steps:
|
||||
|
||||
- name: Install Tor [Ubuntu]
|
||||
|
62
default.nix
62
default.nix
@ -1,49 +1,13 @@
|
||||
let
|
||||
# sources.nix contains information about which versions of some of our
|
||||
# dependencies we should use. since we use it to pin nixpkgs, all the rest
|
||||
# of our dependencies are *also* pinned - indirectly.
|
||||
#
|
||||
# sources.nix is managed using a tool called `niv`. as an example, to
|
||||
# update to the most recent version of nixpkgs from the 21.11 maintenance
|
||||
# release, in the top-level tahoe-lafs checkout directory you run:
|
||||
#
|
||||
# niv update nixpkgs-21.11
|
||||
#
|
||||
# niv also supports chosing a specific revision, following a different
|
||||
# branch, etc. find complete documentation for the tool at
|
||||
# https://github.com/nmattia/niv
|
||||
sources = import nix/sources.nix;
|
||||
in
|
||||
{
|
||||
pkgsVersion ? "nixpkgs-22.11" # a string which chooses a nixpkgs from the
|
||||
# niv-managed sources data
|
||||
|
||||
, pkgs ? import sources.${pkgsVersion} { } # nixpkgs itself
|
||||
|
||||
, pythonVersion ? "python310" # a string choosing the python derivation from
|
||||
# nixpkgs to target
|
||||
|
||||
, extrasNames ? [ "tor" "i2p" ] # a list of strings identifying tahoe-lafs extras,
|
||||
# the dependencies of which the resulting
|
||||
# package will also depend on. Include all of the
|
||||
# runtime extras by default because the incremental
|
||||
# cost of including them is a lot smaller than the
|
||||
# cost of re-building the whole thing to add them.
|
||||
|
||||
}:
|
||||
with (pkgs.${pythonVersion}.override {
|
||||
packageOverrides = import ./nix/python-overrides.nix;
|
||||
}).pkgs;
|
||||
callPackage ./nix/tahoe-lafs.nix {
|
||||
# Select whichever package extras were requested.
|
||||
inherit extrasNames;
|
||||
|
||||
# Define the location of the Tahoe-LAFS source to be packaged (the same
|
||||
# directory as contains this file). Clean up as many of the non-source
|
||||
# files (eg the `.git` directory, `~` backup files, nix's own `result`
|
||||
# symlink, etc) as possible to avoid needing to re-build when files that
|
||||
# make no difference to the package have changed.
|
||||
tahoe-lafs-src = pkgs.lib.cleanSource ./.;
|
||||
|
||||
doCheck = false;
|
||||
}
|
||||
# This is the flake-compat glue code. It loads the flake and gives us its
|
||||
# outputs. This gives us backwards compatibility with pre-flake consumers.
|
||||
# All of the real action is in flake.nix.
|
||||
(import
|
||||
(
|
||||
let lock = builtins.fromJSON (builtins.readFile ./flake.lock); in
|
||||
fetchTarball {
|
||||
url = "https://github.com/edolstra/flake-compat/archive/${lock.nodes.flake-compat.locked.rev}.tar.gz";
|
||||
sha256 = lock.nodes.flake-compat.locked.narHash;
|
||||
}
|
||||
)
|
||||
{ src = ./.; }
|
||||
).defaultNix.default
|
||||
|
@ -57,6 +57,18 @@ The key-value store is implemented by a grid of Tahoe-LAFS storage servers --
|
||||
user-space processes. Tahoe-LAFS storage clients communicate with the storage
|
||||
servers over TCP.
|
||||
|
||||
There are two supported protocols:
|
||||
|
||||
* Foolscap, the only supported protocol in release before v1.19.
|
||||
* HTTPS, new in v1.19.
|
||||
|
||||
By default HTTPS is enabled. When HTTPS is enabled on the server, the server
|
||||
transparently listens for both Foolscap and HTTPS on the same port. When it is
|
||||
disabled, the server only supports Foolscap. Clients can use either; by default
|
||||
they will use HTTPS when possible, falling back to I2p, but when configured
|
||||
appropriately they will only use Foolscap. At this time the only limitations of
|
||||
HTTPS is that I2P is not supported, so any usage of I2P only uses Foolscap.
|
||||
|
||||
Storage servers hold data in the form of "shares". Shares are encoded pieces
|
||||
of files. There are a configurable number of shares for each file, 10 by
|
||||
default. Normally, each share is stored on a separate server, but in some
|
||||
|
@ -679,6 +679,13 @@ Client Configuration
|
||||
location to prefer their local servers so that they can maintain access to
|
||||
all of their uploads without using the internet.
|
||||
|
||||
``force_foolscap = (boolean, optional)``
|
||||
|
||||
If this is ``True``, the client will only connect to storage servers via
|
||||
Foolscap, regardless of whether they support HTTPS. If this is ``False``,
|
||||
the client will prefer HTTPS when it is available on the server. The default
|
||||
value is ``False``.
|
||||
|
||||
In addition,
|
||||
see :doc:`accepting-donations` for a convention for donating to storage server operators.
|
||||
|
||||
@ -796,6 +803,13 @@ Storage Server Configuration
|
||||
(i.e. ``BASEDIR/storage``), but it can be placed elsewhere. Relative paths
|
||||
will be interpreted relative to the node's base directory.
|
||||
|
||||
``force_foolscap = (boolean, optional)``
|
||||
|
||||
If this is ``True``, the node will expose the storage server via Foolscap
|
||||
only, with no support for HTTPS. If this is ``False``, the server will
|
||||
support both Foolscap and HTTPS on the same port. The default value is
|
||||
``False``.
|
||||
|
||||
In addition,
|
||||
see :doc:`accepting-donations` for a convention encouraging donations to storage server operators.
|
||||
|
||||
|
@ -278,8 +278,8 @@ This NURL will be announced alongside their existing Foolscap-based server's fUR
|
||||
Such an announcement will resemble this::
|
||||
|
||||
{
|
||||
"anonymous-storage-FURL": "pb://...", # The old key
|
||||
"gbs-anonymous-storage-url": "pb://...#v=1" # The new key
|
||||
"anonymous-storage-FURL": "pb://...", # The old entry
|
||||
"anonymous-storage-NURLs": ["pb://...#v=1"] # The new, additional entry
|
||||
}
|
||||
|
||||
The transition process will proceed in three stages:
|
||||
@ -320,12 +320,7 @@ The follow sequence of events is likely:
|
||||
|
||||
Ideally,
|
||||
the client would not rely on an update from the introducer to give it the GBS NURL for the updated storage server.
|
||||
Therefore,
|
||||
when an updated client connects to a storage server using Foolscap,
|
||||
it should request the server's version information.
|
||||
If this information indicates that GBS is supported then the client should cache this GBS information.
|
||||
On subsequent connection attempts,
|
||||
it should make use of this GBS information.
|
||||
In practice, we have decided not to implement this functionality.
|
||||
|
||||
Server Details
|
||||
--------------
|
||||
|
115
flake.lock
generated
Normal file
115
flake.lock
generated
Normal file
@ -0,0 +1,115 @@
|
||||
{
|
||||
"nodes": {
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1673956053,
|
||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1687709756,
|
||||
"narHash": "sha256-Y5wKlQSkgEK2weWdOu4J3riRd+kV/VCgHsqLNTTWQ/0=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "dbabf0ca0c0c4bce6ea5eaf65af5cb694d2082c7",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-22_11": {
|
||||
"locked": {
|
||||
"lastModified": 1688392541,
|
||||
"narHash": "sha256-lHrKvEkCPTUO+7tPfjIcb7Trk6k31rz18vkyqmkeJfY=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "ea4c80b39be4c09702b0cb3b42eab59e2ba4f24b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-22.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-23_05": {
|
||||
"locked": {
|
||||
"lastModified": 1689885880,
|
||||
"narHash": "sha256-2ikAcvHKkKh8J/eUrwMA+wy1poscC+oL1RkN1V3RmT8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "fa793b06f56896b7d1909e4b69977c7bf842b2f0",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-unstable": {
|
||||
"locked": {
|
||||
"lastModified": 1689791806,
|
||||
"narHash": "sha256-QpXjfiyBFwa7MV/J6nM5FoBreks9O7j9cAZxV22MR8A=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "439ba0789ff84dddea64eb2d47a4a0d4887dbb1f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "pull/244135/head",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"flake-utils": "flake-utils",
|
||||
"nixpkgs": [
|
||||
"nixpkgs-unstable"
|
||||
],
|
||||
"nixpkgs-22_11": "nixpkgs-22_11",
|
||||
"nixpkgs-23_05": "nixpkgs-23_05",
|
||||
"nixpkgs-unstable": "nixpkgs-unstable"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
240
flake.nix
Normal file
240
flake.nix
Normal file
@ -0,0 +1,240 @@
|
||||
{
|
||||
description = "Tahoe-LAFS, free and open decentralized data store";
|
||||
|
||||
nixConfig = {
|
||||
# Supply configuration for the build cache updated by our CI system. This
|
||||
# should allow most users to avoid having to build a large number of
|
||||
# packages (otherwise necessary due to our Python package overrides).
|
||||
substituters = ["https://tahoe-lafs-opensource.cachix.org"];
|
||||
trusted-public-keys = ["tahoe-lafs-opensource.cachix.org-1:eIKCHOPJYceJ2gb74l6e0mayuSdXqiavxYeAio0LFGo="];
|
||||
};
|
||||
|
||||
inputs = {
|
||||
# A couple possible nixpkgs pins. Ideally these could be selected easily
|
||||
# from the command line but there seems to be no syntax/support for that.
|
||||
# However, these at least cause certain revisions to be pinned in our lock
|
||||
# file where you *can* dig them out - and the CI configuration does.
|
||||
#
|
||||
# These are really just examples for the time being since neither of these
|
||||
# releases contains a package set that is completely compatible with our
|
||||
# requirements. We could decide in the future that supporting multiple
|
||||
# releases of NixOS at a time is worthwhile and then pins like these will
|
||||
# help us test each of those releases.
|
||||
"nixpkgs-22_11" = {
|
||||
url = github:NixOS/nixpkgs?ref=nixos-22.11;
|
||||
};
|
||||
"nixpkgs-23_05" = {
|
||||
url = github:NixOS/nixpkgs?ref=nixos-23.05;
|
||||
};
|
||||
|
||||
# We depend on a very new python-cryptography which is not yet available
|
||||
# from any release branch of nixpkgs. However, it is contained in a PR
|
||||
# currently up for review. Point our nixpkgs at that for now.
|
||||
"nixpkgs-unstable" = {
|
||||
url = github:NixOS/nixpkgs?ref=pull/244135/head;
|
||||
};
|
||||
|
||||
# Point the default nixpkgs at one of those. This avoids having getting a
|
||||
# _third_ package set involved and gives a way to provide what should be a
|
||||
# working experience by default (that is, if nixpkgs doesn't get
|
||||
# overridden).
|
||||
nixpkgs.follows = "nixpkgs-unstable";
|
||||
|
||||
# Also get flake-utils for simplified multi-system definitions.
|
||||
flake-utils = {
|
||||
url = github:numtide/flake-utils;
|
||||
};
|
||||
|
||||
# And get a helper that lets us easily continue to provide a default.nix.
|
||||
flake-compat = {
|
||||
url = "github:edolstra/flake-compat";
|
||||
flake = false;
|
||||
};
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs, flake-utils, ... }:
|
||||
{
|
||||
# Expose an overlay which adds our version of Tahoe-LAFS to the Python
|
||||
# package sets we specify, as well as all of the correct versions of its
|
||||
# dependencies.
|
||||
#
|
||||
# We will also use this to define some other outputs since it gives us
|
||||
# the most succinct way to get a working Tahoe-LAFS package.
|
||||
overlays.default = import ./nix/overlay.nix;
|
||||
|
||||
} // (flake-utils.lib.eachDefaultSystem (system: let
|
||||
|
||||
# The package set for this system architecture.
|
||||
pkgs = import nixpkgs {
|
||||
inherit system;
|
||||
# And include our Tahoe-LAFS package in that package set.
|
||||
overlays = [ self.overlays.default ];
|
||||
};
|
||||
|
||||
# pythonVersions :: [string]
|
||||
#
|
||||
# The version strings for the Python runtimes we'll work with.
|
||||
pythonVersions =
|
||||
let
|
||||
# Match attribute names that look like a Python derivation - CPython
|
||||
# or PyPy. We take care to avoid things like "python-foo" and
|
||||
# "python3Full-unittest" though. We only want things like "pypy38"
|
||||
# or "python311".
|
||||
nameMatches = name: null != builtins.match "(python|pypy)3[[:digit:]]{0,2}" name;
|
||||
|
||||
# Sometimes an old version is left in the package set as an error
|
||||
# saying something like "we remove this". Make sure we whatever we
|
||||
# found by name evaluates without error, too.
|
||||
notError = drv: (builtins.tryEval drv).success;
|
||||
in
|
||||
# Discover all of the Python runtime derivations by inspecting names
|
||||
# and filtering out derivations with errors.
|
||||
builtins.attrNames (
|
||||
pkgs.lib.attrsets.filterAttrs
|
||||
(name: drv: nameMatches name && notError drv)
|
||||
pkgs
|
||||
);
|
||||
|
||||
# defaultPyVersion :: string
|
||||
#
|
||||
# An element of pythonVersions which we'll use for the default package.
|
||||
defaultPyVersion = "python3";
|
||||
|
||||
# pythons :: [derivation]
|
||||
#
|
||||
# Retrieve the actual Python package for each configured version. We
|
||||
# already applied our overlay to pkgs so our packages will already be
|
||||
# available.
|
||||
pythons = builtins.map (pyVer: pkgs.${pyVer}) pythonVersions;
|
||||
|
||||
# packageName :: string -> string
|
||||
#
|
||||
# Construct the Tahoe-LAFS package name for the given Python runtime.
|
||||
packageName = pyVersion: "${pyVersion}-tahoe-lafs";
|
||||
|
||||
# string -> string
|
||||
#
|
||||
# Construct the unit test application name for the given Python runtime.
|
||||
unitTestName = pyVersion: "${pyVersion}-unittest";
|
||||
|
||||
# (string -> a) -> (string -> b) -> string -> attrset a b
|
||||
#
|
||||
# Make a singleton attribute set from the result of two functions.
|
||||
singletonOf = f: g: x: { ${f x} = g x; };
|
||||
|
||||
# [attrset] -> attrset
|
||||
#
|
||||
# Merge a list of attrset into a single attrset with overlap preferring
|
||||
# rightmost values.
|
||||
mergeAttrs = pkgs.lib.foldr pkgs.lib.mergeAttrs {};
|
||||
|
||||
# makeRuntimeEnv :: string -> derivation
|
||||
#
|
||||
# Create a derivation that includes a Python runtime, Tahoe-LAFS, and
|
||||
# all of its dependencies.
|
||||
makeRuntimeEnv = singletonOf packageName makeRuntimeEnv';
|
||||
makeRuntimeEnv' = pyVersion: (pkgs.${pyVersion}.withPackages (ps: with ps;
|
||||
[ tahoe-lafs ] ++
|
||||
tahoe-lafs.passthru.extras.i2p ++
|
||||
tahoe-lafs.passthru.extras.tor
|
||||
)).overrideAttrs (old: {
|
||||
# By default, withPackages gives us a derivation with a fairly generic
|
||||
# name (like "python-env"). Put our name in there for legibility.
|
||||
# See the similar override in makeTestEnv.
|
||||
name = packageName pyVersion;
|
||||
});
|
||||
|
||||
# makeTestEnv :: string -> derivation
|
||||
#
|
||||
# Create a derivation that includes a Python runtime and all of the
|
||||
# Tahoe-LAFS dependencies, but not Tahoe-LAFS itself, which we'll get
|
||||
# from the working directory.
|
||||
makeTestEnv = pyVersion: (pkgs.${pyVersion}.withPackages (ps: with ps;
|
||||
[ tahoe-lafs ] ++
|
||||
tahoe-lafs.passthru.extras.i2p ++
|
||||
tahoe-lafs.passthru.extras.tor ++
|
||||
tahoe-lafs.passthru.extras.unittest
|
||||
)).overrideAttrs (old: {
|
||||
# See the similar override in makeRuntimeEnv'.
|
||||
name = packageName pyVersion;
|
||||
});
|
||||
in {
|
||||
# Include a package set with out overlay on it in our own output. This
|
||||
# is mainly a development/debugging convenience as it will expose all of
|
||||
# our Python package overrides beneath it. The magic name
|
||||
# "legacyPackages" is copied from nixpkgs and has special support in the
|
||||
# nix command line tool.
|
||||
legacyPackages = pkgs;
|
||||
|
||||
# The flake's package outputs. We'll define one version of the package
|
||||
# for each version of Python we could find. We'll also point the
|
||||
# flake's "default" package at the derivation corresponding to the
|
||||
# default Python version we defined above. The package consists of a
|
||||
# Python environment with Tahoe-LAFS available to it.
|
||||
packages =
|
||||
mergeAttrs (
|
||||
[ { default = self.packages.${system}.${packageName defaultPyVersion}; } ]
|
||||
++ (builtins.map makeRuntimeEnv pythonVersions)
|
||||
++ (builtins.map (singletonOf unitTestName makeTestEnv) pythonVersions)
|
||||
);
|
||||
|
||||
# The flake's app outputs. We'll define a version of an app for running
|
||||
# the test suite for each version of Python we could find. We'll also
|
||||
# define a version of an app for running the "tahoe" command-line
|
||||
# entrypoint for each version of Python we could find.
|
||||
apps =
|
||||
let
|
||||
# writeScript :: string -> string -> path
|
||||
#
|
||||
# Write a shell program to a file so it can be run later.
|
||||
#
|
||||
# We avoid writeShellApplication here because it has ghc as a
|
||||
# dependency but ghc has Python as a dependency and our Python
|
||||
# package override triggers a rebuild of ghc and many Haskell
|
||||
# packages which takes a looong time.
|
||||
writeScript = name: text: "${pkgs.writeShellScript name text}";
|
||||
|
||||
# makeTahoeApp :: string -> attrset
|
||||
#
|
||||
# A helper function to define the Tahoe-LAFS runtime entrypoint for
|
||||
# a certain Python runtime.
|
||||
makeTahoeApp = pyVersion: {
|
||||
"tahoe-${pyVersion}" = {
|
||||
type = "app";
|
||||
program =
|
||||
writeScript "tahoe"
|
||||
''
|
||||
${makeRuntimeEnv' pyVersion}/bin/tahoe "$@"
|
||||
'';
|
||||
};
|
||||
};
|
||||
|
||||
# makeUnitTestsApp :: string -> attrset
|
||||
#
|
||||
# A helper function to define the Tahoe-LAFS unit test entrypoint
|
||||
# for a certain Python runtime.
|
||||
makeUnitTestsApp = pyVersion: {
|
||||
"${unitTestName pyVersion}" = {
|
||||
type = "app";
|
||||
program =
|
||||
let
|
||||
python = "${makeTestEnv pyVersion}/bin/python";
|
||||
in
|
||||
writeScript "unit-tests"
|
||||
''
|
||||
${python} setup.py update_version
|
||||
export TAHOE_LAFS_HYPOTHESIS_PROFILE=ci
|
||||
export PYTHONPATH=$PWD/src
|
||||
${python} -m twisted.trial "$@"
|
||||
'';
|
||||
};
|
||||
};
|
||||
in
|
||||
# Merge a default app definition with the rest of the apps.
|
||||
mergeAttrs (
|
||||
[ { default = self.apps.${system}."tahoe-python3"; } ]
|
||||
++ (builtins.map makeUnitTestsApp pythonVersions)
|
||||
++ (builtins.map makeTahoeApp pythonVersions)
|
||||
);
|
||||
}));
|
||||
}
|
@ -285,7 +285,7 @@ def introducer_furl(introducer, temp_dir):
|
||||
include_args=["temp_dir", "flog_gatherer"],
|
||||
include_result=False,
|
||||
)
|
||||
def tor_introducer(reactor, temp_dir, flog_gatherer, request):
|
||||
def tor_introducer(reactor, temp_dir, flog_gatherer, request, tor_control_port):
|
||||
intro_dir = join(temp_dir, 'introducer_tor')
|
||||
print("making Tor introducer in {}".format(intro_dir))
|
||||
print("(this can take tens of seconds to allocate Onion address)")
|
||||
@ -299,9 +299,7 @@ def tor_introducer(reactor, temp_dir, flog_gatherer, request):
|
||||
request,
|
||||
(
|
||||
'create-introducer',
|
||||
# The control port should agree with the configuration of the
|
||||
# Tor network we bootstrap with chutney.
|
||||
'--tor-control-port', 'tcp:localhost:8007',
|
||||
'--tor-control-port', tor_control_port,
|
||||
'--hide-ip',
|
||||
'--listen=tor',
|
||||
intro_dir,
|
||||
@ -516,6 +514,17 @@ def chutney(reactor, temp_dir: str) -> tuple[str, dict[str, str]]:
|
||||
|
||||
return (chutney_dir, {"PYTHONPATH": join(chutney_dir, "lib")})
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def tor_control_port(tor_network):
|
||||
"""
|
||||
Get an endpoint description for the Tor control port for the local Tor
|
||||
network we run..
|
||||
"""
|
||||
# We ignore tor_network because it can't tell us the control port. But
|
||||
# asking for it forces the Tor network to be built before we run - so if
|
||||
# we get the hard-coded control port value correct, there should be
|
||||
# something listening at that address.
|
||||
return 'tcp:localhost:8007'
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@pytest.mark.skipif(sys.platform.startswith('win'),
|
||||
|
2
mypy.ini
2
mypy.ini
@ -9,7 +9,7 @@ no_implicit_optional = True
|
||||
warn_redundant_casts = True
|
||||
strict_equality = True
|
||||
|
||||
[mypy-allmydata.test.cli.wormholetesting,allmydata.test.test_connection_status]
|
||||
[mypy-allmydata.test.cli.wormholetesting,allmydata.listeners,allmydata.test.test_connection_status]
|
||||
disallow_any_generics = True
|
||||
disallow_subclassing_any = True
|
||||
disallow_untyped_calls = True
|
||||
|
0
newsfragments/4004.minor
Normal file
0
newsfragments/4004.minor
Normal file
1
newsfragments/4039.documentation
Normal file
1
newsfragments/4039.documentation
Normal file
@ -0,0 +1 @@
|
||||
Document the ``force_foolscap`` configuration options for ``[storage]`` and ``[client]``.
|
0
newsfragments/4040.minor
Normal file
0
newsfragments/4040.minor
Normal file
1
newsfragments/4041.feature
Normal file
1
newsfragments/4041.feature
Normal file
@ -0,0 +1 @@
|
||||
The storage server and client now support a new, HTTPS-based protocol.
|
0
newsfragments/4042.minor
Normal file
0
newsfragments/4042.minor
Normal file
0
newsfragments/4046.minor
Normal file
0
newsfragments/4046.minor
Normal file
0
newsfragments/4049.minor
Normal file
0
newsfragments/4049.minor
Normal file
0
newsfragments/4050.minor
Normal file
0
newsfragments/4050.minor
Normal file
0
newsfragments/4051.minor
Normal file
0
newsfragments/4051.minor
Normal file
10
nix/overlay.nix
Normal file
10
nix/overlay.nix
Normal file
@ -0,0 +1,10 @@
|
||||
# This overlay adds Tahoe-LAFS and all of its properly-configured Python
|
||||
# package dependencies to a Python package set. Downstream consumers can
|
||||
# apply it to their own nixpkgs derivation to produce a Tahoe-LAFS package.
|
||||
final: prev: {
|
||||
# Add our overrides such that they will be applied to any Python derivation
|
||||
# in nixpkgs.
|
||||
pythonPackagesExtensions = prev.pythonPackagesExtensions ++ [
|
||||
(import ./python-overrides.nix)
|
||||
];
|
||||
}
|
@ -9,14 +9,39 @@ let
|
||||
# Disable a Python package's test suite.
|
||||
dontCheck = drv: drv.overrideAttrs (old: { doInstallCheck = false; });
|
||||
|
||||
# string -> any -> derivation -> derivation
|
||||
#
|
||||
# If the overrideable function for the given derivation accepts an argument
|
||||
# with the given name, override it with the given value.
|
||||
#
|
||||
# Since we try to work with multiple versions of nixpkgs, sometimes we need
|
||||
# to override a parameter that exists in one version but not others. This
|
||||
# makes it a bit easier to do so.
|
||||
overrideIfPresent = name: value: drv:
|
||||
if (drv.override.__functionArgs ? ${name})
|
||||
then drv.override { "${name}" = value; }
|
||||
else drv;
|
||||
|
||||
# Disable building a Python package's documentation.
|
||||
dontBuildDocs = alsoDisable: drv: (drv.override ({
|
||||
sphinxHook = null;
|
||||
} // alsoDisable)).overrideAttrs ({ outputs, ... }: {
|
||||
dontBuildDocs = drv: (
|
||||
overrideIfPresent "sphinxHook" null (
|
||||
overrideIfPresent "sphinx-rtd-theme" null
|
||||
drv
|
||||
)
|
||||
).overrideAttrs ({ outputs, ... }: {
|
||||
outputs = builtins.filter (x: "doc" != x) outputs;
|
||||
});
|
||||
|
||||
in {
|
||||
tahoe-lafs = self.callPackage ./tahoe-lafs.nix {
|
||||
# Define the location of the Tahoe-LAFS source to be packaged (the same
|
||||
# directory as contains this file). Clean up as many of the non-source
|
||||
# files (eg the `.git` directory, `~` backup files, nix's own `result`
|
||||
# symlink, etc) as possible to avoid needing to re-build when files that
|
||||
# make no difference to the package have changed.
|
||||
tahoe-lafs-src = self.lib.cleanSource ../.;
|
||||
};
|
||||
|
||||
# Some dependencies aren't packaged in nixpkgs so supply our own packages.
|
||||
pycddl = self.callPackage ./pycddl.nix { };
|
||||
txi2p = self.callPackage ./txi2p.nix { };
|
||||
@ -30,15 +55,23 @@ in {
|
||||
inherit (super) txtorcon;
|
||||
};
|
||||
|
||||
# Update the version of pyopenssl.
|
||||
pyopenssl = self.callPackage ./pyopenssl.nix {
|
||||
pyopenssl =
|
||||
# Building the docs requires sphinx which brings in a dependency on babel,
|
||||
# the test suite of which fails.
|
||||
onPyPy (dontBuildDocs { sphinx-rtd-theme = null; })
|
||||
# Avoid infinite recursion.
|
||||
super.pyopenssl;
|
||||
};
|
||||
# With our customized package set a Twisted unit test fails. Patch the
|
||||
# Twisted test suite to skip that test.
|
||||
# Filed upstream at https://github.com/twisted/twisted/issues/11877
|
||||
twisted = super.twisted.overrideAttrs (old: {
|
||||
patches = (old.patches or []) ++ [ ./twisted.patch ];
|
||||
});
|
||||
|
||||
# Update the version of pyopenssl - and since we're doing that anyway, we
|
||||
# don't need the docs. Unfortunately this triggers a lot of rebuilding of
|
||||
# dependent packages.
|
||||
pyopenssl = dontBuildDocs (self.callPackage ./pyopenssl.nix {
|
||||
inherit (super) pyopenssl;
|
||||
});
|
||||
|
||||
# The cryptography that we get from nixpkgs to satisfy the pyopenssl upgrade
|
||||
# that we did breaks service-identity ... so get a newer version that works.
|
||||
service-identity = self.callPackage ./service-identity.nix { };
|
||||
|
||||
# collections-extended is currently broken for Python 3.11 in nixpkgs but
|
||||
# we know where a working version lives.
|
||||
@ -52,16 +85,19 @@ in {
|
||||
|
||||
# tornado and tk pull in a huge dependency trees for functionality we don't
|
||||
# care about, also tkinter doesn't work on PyPy.
|
||||
matplotlib = super.matplotlib.override { tornado = null; enableTk = false; };
|
||||
matplotlib = onPyPy (matplotlib: matplotlib.override {
|
||||
tornado = null;
|
||||
enableTk = false;
|
||||
}) super.matplotlib;
|
||||
|
||||
tqdm = super.tqdm.override {
|
||||
tqdm = onPyPy (tqdm: tqdm.override {
|
||||
# ibid.
|
||||
tkinter = null;
|
||||
# pandas is only required by the part of the test suite covering
|
||||
# integration with pandas that we don't care about. pandas is a huge
|
||||
# dependency.
|
||||
pandas = null;
|
||||
};
|
||||
}) super.tqdm;
|
||||
|
||||
# The treq test suite depends on httpbin. httpbin pulls in babel (flask ->
|
||||
# jinja2 -> babel) and arrow (brotlipy -> construct -> arrow). babel fails
|
||||
@ -74,48 +110,25 @@ in {
|
||||
six = onPyPy dontCheck super.six;
|
||||
|
||||
# Likewise for beautifulsoup4.
|
||||
beautifulsoup4 = onPyPy (dontBuildDocs {}) super.beautifulsoup4;
|
||||
beautifulsoup4 = onPyPy dontBuildDocs super.beautifulsoup4;
|
||||
|
||||
# The autobahn test suite pulls in a vast number of dependencies for
|
||||
# functionality we don't care about. It might be nice to *selectively*
|
||||
# disable just some of it but this is easier.
|
||||
autobahn = onPyPy dontCheck super.autobahn;
|
||||
autobahn = dontCheck super.autobahn;
|
||||
|
||||
# and python-dotenv tests pulls in a lot of dependencies, including jedi,
|
||||
# which does not work on PyPy.
|
||||
python-dotenv = onPyPy dontCheck super.python-dotenv;
|
||||
|
||||
# Upstream package unaccountably includes a sqlalchemy dependency ... but
|
||||
# the project has no such dependency. Fixed in nixpkgs in
|
||||
# da10e809fff70fbe1d86303b133b779f09f56503.
|
||||
aiocontextvars = super.aiocontextvars.override { sqlalchemy = null; };
|
||||
|
||||
# By default, the sphinx docs are built, which pulls in a lot of
|
||||
# dependencies - including jedi, which does not work on PyPy.
|
||||
hypothesis =
|
||||
(let h = super.hypothesis;
|
||||
in
|
||||
if (h.override.__functionArgs.enableDocumentation or false)
|
||||
then h.override { enableDocumentation = false; }
|
||||
else h).overrideAttrs ({ nativeBuildInputs, ... }: {
|
||||
# The nixpkgs expression is missing the tzdata check input.
|
||||
nativeBuildInputs = nativeBuildInputs ++ [ super.tzdata ];
|
||||
});
|
||||
hypothesis = onPyPy dontBuildDocs super.hypothesis;
|
||||
|
||||
# flaky's test suite depends on nose and nose appears to have Python 3
|
||||
# incompatibilities (it includes `print` statements, for example).
|
||||
flaky = onPyPy dontCheck super.flaky;
|
||||
|
||||
# Replace the deprecated way of running the test suite with the modern way.
|
||||
# This also drops a bunch of unnecessary build-time dependencies, some of
|
||||
# which are broken on PyPy. Fixed in nixpkgs in
|
||||
# 5feb5054bb08ba779bd2560a44cf7d18ddf37fea.
|
||||
zfec = (super.zfec.override {
|
||||
setuptoolsTrial = null;
|
||||
}).overrideAttrs (old: {
|
||||
checkPhase = "trial zfec";
|
||||
});
|
||||
|
||||
# collections-extended is packaged with poetry-core. poetry-core test suite
|
||||
# uses virtualenv and virtualenv test suite fails on PyPy.
|
||||
poetry-core = onPyPy dontCheck super.poetry-core;
|
||||
@ -134,15 +147,6 @@ in {
|
||||
# since we actually depend directly and significantly on Foolscap.
|
||||
foolscap = onPyPy dontCheck super.foolscap;
|
||||
|
||||
# Fixed by nixpkgs PR https://github.com/NixOS/nixpkgs/pull/222246
|
||||
psutil = super.psutil.overrideAttrs ({ pytestFlagsArray, disabledTests, ...}: {
|
||||
# Upstream already disables some tests but there are even more that have
|
||||
# build impurities that come from build system hardware configuration.
|
||||
# Skip them too.
|
||||
pytestFlagsArray = [ "-v" ] ++ pytestFlagsArray;
|
||||
disabledTests = disabledTests ++ [ "sensors_temperatures" ];
|
||||
});
|
||||
|
||||
# CircleCI build systems don't have enough memory to run this test suite.
|
||||
lz4 = dontCheck super.lz4;
|
||||
lz4 = onPyPy dontCheck super.lz4;
|
||||
}
|
||||
|
61
nix/service-identity.nix
Normal file
61
nix/service-identity.nix
Normal file
@ -0,0 +1,61 @@
|
||||
{ lib
|
||||
, attrs
|
||||
, buildPythonPackage
|
||||
, cryptography
|
||||
, fetchFromGitHub
|
||||
, hatch-fancy-pypi-readme
|
||||
, hatch-vcs
|
||||
, hatchling
|
||||
, idna
|
||||
, pyasn1
|
||||
, pyasn1-modules
|
||||
, pytestCheckHook
|
||||
, pythonOlder
|
||||
, setuptools
|
||||
}:
|
||||
|
||||
buildPythonPackage rec {
|
||||
pname = "service-identity";
|
||||
version = "23.1.0";
|
||||
format = "pyproject";
|
||||
|
||||
disabled = pythonOlder "3.8";
|
||||
|
||||
src = fetchFromGitHub {
|
||||
owner = "pyca";
|
||||
repo = pname;
|
||||
rev = "refs/tags/${version}";
|
||||
hash = "sha256-PGDtsDgRwh7GuuM4OuExiy8L4i3Foo+OD0wMrndPkvo=";
|
||||
};
|
||||
|
||||
nativeBuildInputs = [
|
||||
hatch-fancy-pypi-readme
|
||||
hatch-vcs
|
||||
hatchling
|
||||
setuptools
|
||||
];
|
||||
|
||||
propagatedBuildInputs = [
|
||||
attrs
|
||||
cryptography
|
||||
idna
|
||||
pyasn1
|
||||
pyasn1-modules
|
||||
];
|
||||
|
||||
nativeCheckInputs = [
|
||||
pytestCheckHook
|
||||
];
|
||||
|
||||
pythonImportsCheck = [
|
||||
"service_identity"
|
||||
];
|
||||
|
||||
meta = with lib; {
|
||||
description = "Service identity verification for pyOpenSSL";
|
||||
homepage = "https://service-identity.readthedocs.io";
|
||||
changelog = "https://github.com/pyca/service-identity/releases/tag/${version}";
|
||||
license = licenses.mit;
|
||||
maintainers = with maintainers; [ fab ];
|
||||
};
|
||||
}
|
@ -1,38 +0,0 @@
|
||||
{
|
||||
"niv": {
|
||||
"branch": "master",
|
||||
"description": "Easy dependency management for Nix projects",
|
||||
"homepage": "https://github.com/nmattia/niv",
|
||||
"owner": "nmattia",
|
||||
"repo": "niv",
|
||||
"rev": "5830a4dd348d77e39a0f3c4c762ff2663b602d4c",
|
||||
"sha256": "1d3lsrqvci4qz2hwjrcnd8h5vfkg8aypq3sjd4g3izbc8frwz5sm",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/nmattia/niv/archive/5830a4dd348d77e39a0f3c4c762ff2663b602d4c.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
},
|
||||
"nixpkgs-22.11": {
|
||||
"branch": "nixos-22.11",
|
||||
"description": "Nix Packages collection",
|
||||
"homepage": "",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "970402e6147c49603f4d06defe44d27fe51884ce",
|
||||
"sha256": "1v0ljy7wqq14ad3gd1871fgvd4psr7dy14q724k0wwgxk7inbbwh",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/nixos/nixpkgs/archive/970402e6147c49603f4d06defe44d27fe51884ce.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
},
|
||||
"nixpkgs-unstable": {
|
||||
"branch": "master",
|
||||
"description": "Nix Packages collection",
|
||||
"homepage": "",
|
||||
"owner": "nixos",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "d0c9a536331227ab883b4f6964be638fa436d81f",
|
||||
"sha256": "1gg6v5rk1p26ciygdg262zc5vqws753rvgcma5rim2s6gyfrjaq1",
|
||||
"type": "tarball",
|
||||
"url": "https://github.com/nixos/nixpkgs/archive/d0c9a536331227ab883b4f6964be638fa436d81f.tar.gz",
|
||||
"url_template": "https://github.com/<owner>/<repo>/archive/<rev>.tar.gz"
|
||||
}
|
||||
}
|
174
nix/sources.nix
174
nix/sources.nix
@ -1,174 +0,0 @@
|
||||
# This file has been generated by Niv.
|
||||
|
||||
let
|
||||
|
||||
#
|
||||
# The fetchers. fetch_<type> fetches specs of type <type>.
|
||||
#
|
||||
|
||||
fetch_file = pkgs: name: spec:
|
||||
let
|
||||
name' = sanitizeName name + "-src";
|
||||
in
|
||||
if spec.builtin or true then
|
||||
builtins_fetchurl { inherit (spec) url sha256; name = name'; }
|
||||
else
|
||||
pkgs.fetchurl { inherit (spec) url sha256; name = name'; };
|
||||
|
||||
fetch_tarball = pkgs: name: spec:
|
||||
let
|
||||
name' = sanitizeName name + "-src";
|
||||
in
|
||||
if spec.builtin or true then
|
||||
builtins_fetchTarball { name = name'; inherit (spec) url sha256; }
|
||||
else
|
||||
pkgs.fetchzip { name = name'; inherit (spec) url sha256; };
|
||||
|
||||
fetch_git = name: spec:
|
||||
let
|
||||
ref =
|
||||
if spec ? ref then spec.ref else
|
||||
if spec ? branch then "refs/heads/${spec.branch}" else
|
||||
if spec ? tag then "refs/tags/${spec.tag}" else
|
||||
abort "In git source '${name}': Please specify `ref`, `tag` or `branch`!";
|
||||
in
|
||||
builtins.fetchGit { url = spec.repo; inherit (spec) rev; inherit ref; };
|
||||
|
||||
fetch_local = spec: spec.path;
|
||||
|
||||
fetch_builtin-tarball = name: throw
|
||||
''[${name}] The niv type "builtin-tarball" is deprecated. You should instead use `builtin = true`.
|
||||
$ niv modify ${name} -a type=tarball -a builtin=true'';
|
||||
|
||||
fetch_builtin-url = name: throw
|
||||
''[${name}] The niv type "builtin-url" will soon be deprecated. You should instead use `builtin = true`.
|
||||
$ niv modify ${name} -a type=file -a builtin=true'';
|
||||
|
||||
#
|
||||
# Various helpers
|
||||
#
|
||||
|
||||
# https://github.com/NixOS/nixpkgs/pull/83241/files#diff-c6f540a4f3bfa4b0e8b6bafd4cd54e8bR695
|
||||
sanitizeName = name:
|
||||
(
|
||||
concatMapStrings (s: if builtins.isList s then "-" else s)
|
||||
(
|
||||
builtins.split "[^[:alnum:]+._?=-]+"
|
||||
((x: builtins.elemAt (builtins.match "\\.*(.*)" x) 0) name)
|
||||
)
|
||||
);
|
||||
|
||||
# The set of packages used when specs are fetched using non-builtins.
|
||||
mkPkgs = sources: system:
|
||||
let
|
||||
sourcesNixpkgs =
|
||||
import (builtins_fetchTarball { inherit (sources.nixpkgs) url sha256; }) { inherit system; };
|
||||
hasNixpkgsPath = builtins.any (x: x.prefix == "nixpkgs") builtins.nixPath;
|
||||
hasThisAsNixpkgsPath = <nixpkgs> == ./.;
|
||||
in
|
||||
if builtins.hasAttr "nixpkgs" sources
|
||||
then sourcesNixpkgs
|
||||
else if hasNixpkgsPath && ! hasThisAsNixpkgsPath then
|
||||
import <nixpkgs> {}
|
||||
else
|
||||
abort
|
||||
''
|
||||
Please specify either <nixpkgs> (through -I or NIX_PATH=nixpkgs=...) or
|
||||
add a package called "nixpkgs" to your sources.json.
|
||||
'';
|
||||
|
||||
# The actual fetching function.
|
||||
fetch = pkgs: name: spec:
|
||||
|
||||
if ! builtins.hasAttr "type" spec then
|
||||
abort "ERROR: niv spec ${name} does not have a 'type' attribute"
|
||||
else if spec.type == "file" then fetch_file pkgs name spec
|
||||
else if spec.type == "tarball" then fetch_tarball pkgs name spec
|
||||
else if spec.type == "git" then fetch_git name spec
|
||||
else if spec.type == "local" then fetch_local spec
|
||||
else if spec.type == "builtin-tarball" then fetch_builtin-tarball name
|
||||
else if spec.type == "builtin-url" then fetch_builtin-url name
|
||||
else
|
||||
abort "ERROR: niv spec ${name} has unknown type ${builtins.toJSON spec.type}";
|
||||
|
||||
# If the environment variable NIV_OVERRIDE_${name} is set, then use
|
||||
# the path directly as opposed to the fetched source.
|
||||
replace = name: drv:
|
||||
let
|
||||
saneName = stringAsChars (c: if isNull (builtins.match "[a-zA-Z0-9]" c) then "_" else c) name;
|
||||
ersatz = builtins.getEnv "NIV_OVERRIDE_${saneName}";
|
||||
in
|
||||
if ersatz == "" then drv else
|
||||
# this turns the string into an actual Nix path (for both absolute and
|
||||
# relative paths)
|
||||
if builtins.substring 0 1 ersatz == "/" then /. + ersatz else /. + builtins.getEnv "PWD" + "/${ersatz}";
|
||||
|
||||
# Ports of functions for older nix versions
|
||||
|
||||
# a Nix version of mapAttrs if the built-in doesn't exist
|
||||
mapAttrs = builtins.mapAttrs or (
|
||||
f: set: with builtins;
|
||||
listToAttrs (map (attr: { name = attr; value = f attr set.${attr}; }) (attrNames set))
|
||||
);
|
||||
|
||||
# https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/lists.nix#L295
|
||||
range = first: last: if first > last then [] else builtins.genList (n: first + n) (last - first + 1);
|
||||
|
||||
# https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L257
|
||||
stringToCharacters = s: map (p: builtins.substring p 1 s) (range 0 (builtins.stringLength s - 1));
|
||||
|
||||
# https://github.com/NixOS/nixpkgs/blob/0258808f5744ca980b9a1f24fe0b1e6f0fecee9c/lib/strings.nix#L269
|
||||
stringAsChars = f: s: concatStrings (map f (stringToCharacters s));
|
||||
concatMapStrings = f: list: concatStrings (map f list);
|
||||
concatStrings = builtins.concatStringsSep "";
|
||||
|
||||
# https://github.com/NixOS/nixpkgs/blob/8a9f58a375c401b96da862d969f66429def1d118/lib/attrsets.nix#L331
|
||||
optionalAttrs = cond: as: if cond then as else {};
|
||||
|
||||
# fetchTarball version that is compatible between all the versions of Nix
|
||||
builtins_fetchTarball = { url, name ? null, sha256 }@attrs:
|
||||
let
|
||||
inherit (builtins) lessThan nixVersion fetchTarball;
|
||||
in
|
||||
if lessThan nixVersion "1.12" then
|
||||
fetchTarball ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; }))
|
||||
else
|
||||
fetchTarball attrs;
|
||||
|
||||
# fetchurl version that is compatible between all the versions of Nix
|
||||
builtins_fetchurl = { url, name ? null, sha256 }@attrs:
|
||||
let
|
||||
inherit (builtins) lessThan nixVersion fetchurl;
|
||||
in
|
||||
if lessThan nixVersion "1.12" then
|
||||
fetchurl ({ inherit url; } // (optionalAttrs (!isNull name) { inherit name; }))
|
||||
else
|
||||
fetchurl attrs;
|
||||
|
||||
# Create the final "sources" from the config
|
||||
mkSources = config:
|
||||
mapAttrs (
|
||||
name: spec:
|
||||
if builtins.hasAttr "outPath" spec
|
||||
then abort
|
||||
"The values in sources.json should not have an 'outPath' attribute"
|
||||
else
|
||||
spec // { outPath = replace name (fetch config.pkgs name spec); }
|
||||
) config.sources;
|
||||
|
||||
# The "config" used by the fetchers
|
||||
mkConfig =
|
||||
{ sourcesFile ? if builtins.pathExists ./sources.json then ./sources.json else null
|
||||
, sources ? if isNull sourcesFile then {} else builtins.fromJSON (builtins.readFile sourcesFile)
|
||||
, system ? builtins.currentSystem
|
||||
, pkgs ? mkPkgs sources system
|
||||
}: rec {
|
||||
# The sources, i.e. the attribute set of spec name to spec
|
||||
inherit sources;
|
||||
|
||||
# The "pkgs" (evaluated nixpkgs) to use for e.g. non-builtin fetchers
|
||||
inherit pkgs;
|
||||
};
|
||||
|
||||
in
|
||||
mkSources (mkConfig {}) // { __functor = _: settings: mkSources (mkConfig settings); }
|
@ -1,24 +1,16 @@
|
||||
let
|
||||
pname = "tahoe-lafs";
|
||||
version = "1.18.0.post1";
|
||||
in
|
||||
{ lib
|
||||
, pythonPackages
|
||||
, buildPythonPackage
|
||||
, tahoe-lafs-src
|
||||
, extrasNames
|
||||
|
||||
# control how the test suite is run
|
||||
, doCheck
|
||||
}:
|
||||
let
|
||||
pname = "tahoe-lafs";
|
||||
version = "1.18.0.post1";
|
||||
|
||||
pickExtraDependencies = deps: extras: builtins.foldl' (accum: extra: accum ++ deps.${extra}) [] extras;
|
||||
|
||||
pythonExtraDependencies = with pythonPackages; {
|
||||
tor = [ txtorcon ];
|
||||
i2p = [ txi2p ];
|
||||
};
|
||||
|
||||
pythonPackageDependencies = with pythonPackages; [
|
||||
buildPythonPackage rec {
|
||||
inherit pname version;
|
||||
src = tahoe-lafs-src;
|
||||
propagatedBuildInputs = with pythonPackages; [
|
||||
attrs
|
||||
autobahn
|
||||
cbor2
|
||||
@ -41,35 +33,42 @@ let
|
||||
six
|
||||
treq
|
||||
twisted
|
||||
# Get the dependencies for the Twisted extras we depend on, too.
|
||||
twisted.passthru.optional-dependencies.tls
|
||||
twisted.passthru.optional-dependencies.conch
|
||||
werkzeug
|
||||
zfec
|
||||
zope_interface
|
||||
] ++ pickExtraDependencies pythonExtraDependencies extrasNames;
|
||||
] ++
|
||||
# Get the dependencies for the Twisted extras we depend on, too.
|
||||
twisted.passthru.optional-dependencies.tls ++
|
||||
twisted.passthru.optional-dependencies.conch;
|
||||
|
||||
unitTestDependencies = with pythonPackages; [
|
||||
beautifulsoup4
|
||||
fixtures
|
||||
hypothesis
|
||||
mock
|
||||
prometheus-client
|
||||
testtools
|
||||
];
|
||||
# The test suite lives elsewhere.
|
||||
doCheck = false;
|
||||
|
||||
in
|
||||
buildPythonPackage {
|
||||
inherit pname version;
|
||||
src = tahoe-lafs-src;
|
||||
propagatedBuildInputs = pythonPackageDependencies;
|
||||
|
||||
inherit doCheck;
|
||||
checkInputs = unitTestDependencies;
|
||||
checkPhase = ''
|
||||
export TAHOE_LAFS_HYPOTHESIS_PROFILE=ci
|
||||
python -m twisted.trial -j $NIX_BUILD_CORES allmydata
|
||||
'';
|
||||
passthru = {
|
||||
extras = with pythonPackages; {
|
||||
tor = [
|
||||
txtorcon
|
||||
];
|
||||
i2p = [
|
||||
txi2p
|
||||
];
|
||||
unittest = [
|
||||
beautifulsoup4
|
||||
html5lib
|
||||
fixtures
|
||||
hypothesis
|
||||
mock
|
||||
prometheus-client
|
||||
testtools
|
||||
];
|
||||
integrationtest = [
|
||||
pytest
|
||||
pytest-twisted
|
||||
paramiko
|
||||
pytest-timeout
|
||||
];
|
||||
};
|
||||
};
|
||||
|
||||
meta = with lib; {
|
||||
homepage = "https://tahoe-lafs.org/";
|
||||
|
@ -1,4 +0,0 @@
|
||||
# Build the package with the test suite enabled.
|
||||
args@{...}: (import ../. args).override {
|
||||
doCheck = true;
|
||||
}
|
12
nix/twisted.patch
Normal file
12
nix/twisted.patch
Normal file
@ -0,0 +1,12 @@
|
||||
diff --git a/src/twisted/internet/test/test_endpoints.py b/src/twisted/internet/test/test_endpoints.py
|
||||
index c650fd8aa6..a1754fd533 100644
|
||||
--- a/src/twisted/internet/test/test_endpoints.py
|
||||
+++ b/src/twisted/internet/test/test_endpoints.py
|
||||
@@ -4214,6 +4214,7 @@ class WrapClientTLSParserTests(unittest.TestCase):
|
||||
connectionCreator = connectionCreatorFromEndpoint(reactor, endpoint)
|
||||
self.assertEqual(connectionCreator._hostname, "\xe9xample.example.com")
|
||||
|
||||
+ @skipIf(True, "self.assertFalse(plainClient.transport.disconnecting) fails")
|
||||
def test_tls(self):
|
||||
"""
|
||||
When passed a string endpoint description beginning with C{tls:},
|
@ -28,7 +28,7 @@ from allmydata.grid_manager import (
|
||||
from allmydata.util import jsonbytes as json
|
||||
|
||||
|
||||
@click.group() # type: ignore[arg-type]
|
||||
@click.group()
|
||||
@click.option(
|
||||
'--config', '-c',
|
||||
type=click.Path(),
|
||||
@ -71,7 +71,7 @@ def grid_manager(ctx, config):
|
||||
ctx.obj = Config()
|
||||
|
||||
|
||||
@grid_manager.command() # type: ignore[attr-defined]
|
||||
@grid_manager.command()
|
||||
@click.pass_context
|
||||
def create(ctx):
|
||||
"""
|
||||
@ -91,7 +91,7 @@ def create(ctx):
|
||||
)
|
||||
|
||||
|
||||
@grid_manager.command() # type: ignore[attr-defined]
|
||||
@grid_manager.command()
|
||||
@click.pass_obj
|
||||
def public_identity(config):
|
||||
"""
|
||||
@ -103,7 +103,7 @@ def public_identity(config):
|
||||
click.echo(config.grid_manager.public_identity())
|
||||
|
||||
|
||||
@grid_manager.command() # type: ignore[arg-type, attr-defined]
|
||||
@grid_manager.command()
|
||||
@click.argument("name")
|
||||
@click.argument("public_key", type=click.STRING)
|
||||
@click.pass_context
|
||||
@ -132,7 +132,7 @@ def add(ctx, name, public_key):
|
||||
return 0
|
||||
|
||||
|
||||
@grid_manager.command() # type: ignore[arg-type, attr-defined]
|
||||
@grid_manager.command()
|
||||
@click.argument("name")
|
||||
@click.pass_context
|
||||
def remove(ctx, name):
|
||||
@ -155,8 +155,7 @@ def remove(ctx, name):
|
||||
save_grid_manager(fp, ctx.obj.grid_manager, create=False)
|
||||
|
||||
|
||||
@grid_manager.command() # type: ignore[attr-defined]
|
||||
# noqa: F811
|
||||
@grid_manager.command() # noqa: F811
|
||||
@click.pass_context
|
||||
def list(ctx):
|
||||
"""
|
||||
@ -176,7 +175,7 @@ def list(ctx):
|
||||
click.echo("expired {} ({})".format(cert.expires, abbreviate_time(delta)))
|
||||
|
||||
|
||||
@grid_manager.command() # type: ignore[arg-type, attr-defined]
|
||||
@grid_manager.command()
|
||||
@click.argument("name")
|
||||
@click.argument(
|
||||
"expiry_days",
|
||||
|
@ -837,7 +837,11 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
if hasattr(self.tub.negotiationClass, "add_storage_server"):
|
||||
nurls = self.tub.negotiationClass.add_storage_server(ss, swissnum.encode("ascii"))
|
||||
self.storage_nurls = nurls
|
||||
announcement[storage_client.ANONYMOUS_STORAGE_NURLS] = [n.to_text() for n in nurls]
|
||||
# There is code in e.g. storage_client.py that checks if an
|
||||
# announcement has changed. Since NURL order isn't meaningful,
|
||||
# we don't want a change in the order to count as a change, so we
|
||||
# send the NURLs as a set. CBOR supports sets, as does Foolscap.
|
||||
announcement[storage_client.ANONYMOUS_STORAGE_NURLS] = {n.to_text() for n in nurls}
|
||||
announcement["anonymous-storage-FURL"] = furl
|
||||
|
||||
enabled_storage_servers = self._enable_storage_servers(
|
||||
|
121
src/allmydata/listeners.py
Normal file
121
src/allmydata/listeners.py
Normal file
@ -0,0 +1,121 @@
|
||||
"""
|
||||
Define a protocol for listening on a transport such that Tahoe-LAFS can
|
||||
communicate over it, manage configuration for it in its configuration file,
|
||||
detect when it is possible to use it, etc.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Protocol, Sequence, Mapping, Optional, Union, Awaitable
|
||||
from typing_extensions import Literal
|
||||
|
||||
from attrs import frozen
|
||||
from twisted.python.usage import Options
|
||||
|
||||
from .interfaces import IAddressFamily
|
||||
from .util.iputil import allocate_tcp_port
|
||||
from .node import _Config
|
||||
|
||||
@frozen
|
||||
class ListenerConfig:
|
||||
"""
|
||||
:ivar tub_ports: Entries to merge into ``[node]tub.port``.
|
||||
|
||||
:ivar tub_locations: Entries to merge into ``[node]tub.location``.
|
||||
|
||||
:ivar node_config: Entries to add into the overall Tahoe-LAFS
|
||||
configuration beneath a section named after this listener.
|
||||
"""
|
||||
tub_ports: Sequence[str]
|
||||
tub_locations: Sequence[str]
|
||||
node_config: Mapping[str, Sequence[tuple[str, str]]]
|
||||
|
||||
class Listener(Protocol):
|
||||
"""
|
||||
An object which can listen on a transport and allow Tahoe-LAFS
|
||||
communication to happen over it.
|
||||
"""
|
||||
def is_available(self) -> bool:
|
||||
"""
|
||||
Can this type of listener actually be used in this runtime
|
||||
environment?
|
||||
"""
|
||||
|
||||
def can_hide_ip(self) -> bool:
|
||||
"""
|
||||
Can the transport supported by this type of listener conceal the
|
||||
node's public internet address from peers?
|
||||
"""
|
||||
|
||||
async def create_config(self, reactor: Any, cli_config: Options) -> Optional[ListenerConfig]:
|
||||
"""
|
||||
Set up an instance of this listener according to the given
|
||||
configuration parameters.
|
||||
|
||||
This may also allocate ephemeral resources if necessary.
|
||||
|
||||
:return: The created configuration which can be merged into the
|
||||
overall *tahoe.cfg* configuration file.
|
||||
"""
|
||||
|
||||
def create(self, reactor: Any, config: _Config) -> IAddressFamily:
|
||||
"""
|
||||
Instantiate this listener according to the given
|
||||
previously-generated configuration.
|
||||
|
||||
:return: A handle on the listener which can be used to integrate it
|
||||
into the Tahoe-LAFS node.
|
||||
"""
|
||||
|
||||
class TCPProvider:
|
||||
"""
|
||||
Support plain TCP connections.
|
||||
"""
|
||||
def is_available(self) -> Literal[True]:
|
||||
return True
|
||||
|
||||
def can_hide_ip(self) -> Literal[False]:
|
||||
return False
|
||||
|
||||
async def create_config(self, reactor: Any, cli_config: Options) -> ListenerConfig:
|
||||
tub_ports = []
|
||||
tub_locations = []
|
||||
if cli_config["port"]: # --port/--location are a pair
|
||||
tub_ports.append(cli_config["port"])
|
||||
tub_locations.append(cli_config["location"])
|
||||
else:
|
||||
assert "hostname" in cli_config
|
||||
hostname = cli_config["hostname"]
|
||||
new_port = allocate_tcp_port()
|
||||
tub_ports.append(f"tcp:{new_port}")
|
||||
tub_locations.append(f"tcp:{hostname}:{new_port}")
|
||||
|
||||
return ListenerConfig(tub_ports, tub_locations, {})
|
||||
|
||||
def create(self, reactor: Any, config: _Config) -> IAddressFamily:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
@frozen
|
||||
class StaticProvider:
|
||||
"""
|
||||
A provider that uses all pre-computed values.
|
||||
"""
|
||||
_available: bool
|
||||
_hide_ip: bool
|
||||
_config: Union[Awaitable[Optional[ListenerConfig]], Optional[ListenerConfig]]
|
||||
_address: IAddressFamily
|
||||
|
||||
def is_available(self) -> bool:
|
||||
return self._available
|
||||
|
||||
def can_hide_ip(self) -> bool:
|
||||
return self._hide_ip
|
||||
|
||||
async def create_config(self, reactor: Any, cli_config: Options) -> Optional[ListenerConfig]:
|
||||
if self._config is None or isinstance(self._config, ListenerConfig):
|
||||
return self._config
|
||||
return await self._config
|
||||
|
||||
def create(self, reactor: Any, config: _Config) -> IAddressFamily:
|
||||
return self._address
|
@ -959,11 +959,8 @@ def create_main_tub(config, tub_options,
|
||||
tub_options,
|
||||
default_connection_handlers,
|
||||
foolscap_connection_handlers,
|
||||
# TODO eventually we will want the default to be False, but for now we
|
||||
# don't want to enable HTTP by default.
|
||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3934
|
||||
force_foolscap=config.get_config(
|
||||
"storage", "force_foolscap", default=True, boolean=True
|
||||
"storage", "force_foolscap", default=False, boolean=True
|
||||
),
|
||||
handler_overrides=handler_overrides,
|
||||
certFile=certfile,
|
||||
|
@ -1,3 +1,8 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
|
||||
import io
|
||||
import os
|
||||
|
||||
@ -19,9 +24,40 @@ from allmydata.scripts.common import (
|
||||
write_introducer,
|
||||
)
|
||||
from allmydata.scripts.default_nodedir import _default_nodedir
|
||||
from allmydata.util import dictutil
|
||||
from allmydata.util.assertutil import precondition
|
||||
from allmydata.util.encodingutil import listdir_unicode, argv_to_unicode, quote_local_unicode_path, get_io_encoding
|
||||
from allmydata.util import fileutil, i2p_provider, iputil, tor_provider, jsonbytes as json
|
||||
|
||||
i2p_provider: Listener
|
||||
tor_provider: Listener
|
||||
|
||||
from allmydata.util import fileutil, i2p_provider, tor_provider, jsonbytes as json
|
||||
|
||||
from ..listeners import ListenerConfig, Listener, TCPProvider, StaticProvider
|
||||
|
||||
def _get_listeners() -> dict[str, Listener]:
|
||||
"""
|
||||
Get all of the kinds of listeners we might be able to use.
|
||||
"""
|
||||
return {
|
||||
"tor": tor_provider,
|
||||
"i2p": i2p_provider,
|
||||
"tcp": TCPProvider(),
|
||||
"none": StaticProvider(
|
||||
available=True,
|
||||
hide_ip=False,
|
||||
config=defer.succeed(None),
|
||||
# This is supposed to be an IAddressFamily but we have none for
|
||||
# this kind of provider. We could implement new client and server
|
||||
# endpoint types that always fail and pass an IAddressFamily here
|
||||
# that uses those. Nothing would ever even ask for them (at
|
||||
# least, yet), let alone try to use them, so that's a lot of extra
|
||||
# work for no practical result so I'm not doing it now.
|
||||
address=None, # type: ignore[arg-type]
|
||||
),
|
||||
}
|
||||
|
||||
_LISTENERS = _get_listeners()
|
||||
|
||||
dummy_tac = """
|
||||
import sys
|
||||
@ -98,8 +134,11 @@ def validate_where_options(o):
|
||||
if o['listen'] != "none" and o.get('join', None) is None:
|
||||
listeners = o['listen'].split(",")
|
||||
for l in listeners:
|
||||
if l not in ["tcp", "tor", "i2p"]:
|
||||
raise UsageError("--listen= must be none, or one/some of: tcp, tor, i2p")
|
||||
if l not in _LISTENERS:
|
||||
raise UsageError(
|
||||
"--listen= must be one/some of: "
|
||||
f"{', '.join(sorted(_LISTENERS))}",
|
||||
)
|
||||
if 'tcp' in listeners and not o['hostname']:
|
||||
raise UsageError("--listen=tcp requires --hostname=")
|
||||
if 'tcp' not in listeners and o['hostname']:
|
||||
@ -108,7 +147,7 @@ def validate_where_options(o):
|
||||
def validate_tor_options(o):
|
||||
use_tor = "tor" in o["listen"].split(",")
|
||||
if use_tor or any((o["tor-launch"], o["tor-control-port"])):
|
||||
if tor_provider._import_txtorcon() is None:
|
||||
if not _LISTENERS["tor"].is_available():
|
||||
raise UsageError(
|
||||
"Specifying any Tor options requires the 'txtorcon' module"
|
||||
)
|
||||
@ -123,7 +162,7 @@ def validate_tor_options(o):
|
||||
def validate_i2p_options(o):
|
||||
use_i2p = "i2p" in o["listen"].split(",")
|
||||
if use_i2p or any((o["i2p-launch"], o["i2p-sam-port"])):
|
||||
if i2p_provider._import_txi2p() is None:
|
||||
if not _LISTENERS["i2p"].is_available():
|
||||
raise UsageError(
|
||||
"Specifying any I2P options requires the 'txi2p' module"
|
||||
)
|
||||
@ -145,11 +184,17 @@ class _CreateBaseOptions(BasedirOptions):
|
||||
def postOptions(self):
|
||||
super(_CreateBaseOptions, self).postOptions()
|
||||
if self['hide-ip']:
|
||||
if tor_provider._import_txtorcon() is None and i2p_provider._import_txi2p() is None:
|
||||
ip_hiders = dictutil.filter(lambda v: v.can_hide_ip(), _LISTENERS)
|
||||
available = dictutil.filter(lambda v: v.is_available(), ip_hiders)
|
||||
if not available:
|
||||
raise UsageError(
|
||||
"--hide-ip was specified but neither 'txtorcon' nor 'txi2p' "
|
||||
"are installed.\nTo do so:\n pip install tahoe-lafs[tor]\nor\n"
|
||||
" pip install tahoe-lafs[i2p]"
|
||||
"--hide-ip was specified but no IP-hiding listener is installed.\n"
|
||||
"Try one of these:\n" +
|
||||
"".join([
|
||||
f"\tpip install tahoe-lafs[{name}]\n"
|
||||
for name
|
||||
in ip_hiders
|
||||
])
|
||||
)
|
||||
|
||||
class CreateClientOptions(_CreateBaseOptions):
|
||||
@ -218,8 +263,34 @@ class CreateIntroducerOptions(NoDefaultBasedirOptions):
|
||||
validate_i2p_options(self)
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def write_node_config(c, config):
|
||||
def merge_config(
|
||||
left: Optional[ListenerConfig],
|
||||
right: Optional[ListenerConfig],
|
||||
) -> Optional[ListenerConfig]:
|
||||
"""
|
||||
Merge two listener configurations into one configuration representing
|
||||
both of them.
|
||||
|
||||
If either is ``None`` then the result is ``None``. This supports the
|
||||
"disable listeners" functionality.
|
||||
|
||||
:raise ValueError: If the keys in the node configs overlap.
|
||||
"""
|
||||
if left is None or right is None:
|
||||
return None
|
||||
|
||||
overlap = set(left.node_config) & set(right.node_config)
|
||||
if overlap:
|
||||
raise ValueError(f"Node configs overlap: {overlap}")
|
||||
|
||||
return ListenerConfig(
|
||||
list(left.tub_ports) + list(right.tub_ports),
|
||||
list(left.tub_locations) + list(right.tub_locations),
|
||||
dict(list(left.node_config.items()) + list(right.node_config.items())),
|
||||
)
|
||||
|
||||
|
||||
async def write_node_config(c, config):
|
||||
# this is shared between clients and introducers
|
||||
c.write("# -*- mode: conf; coding: {c.encoding} -*-\n".format(c=c))
|
||||
c.write("\n")
|
||||
@ -232,9 +303,10 @@ def write_node_config(c, config):
|
||||
|
||||
if config["hide-ip"]:
|
||||
c.write("[connections]\n")
|
||||
if tor_provider._import_txtorcon():
|
||||
if _LISTENERS["tor"].is_available():
|
||||
c.write("tcp = tor\n")
|
||||
else:
|
||||
# XXX What about i2p?
|
||||
c.write("tcp = disabled\n")
|
||||
c.write("\n")
|
||||
|
||||
@ -253,38 +325,23 @@ def write_node_config(c, config):
|
||||
c.write("web.port = %s\n" % (webport,))
|
||||
c.write("web.static = public_html\n")
|
||||
|
||||
listeners = config['listen'].split(",")
|
||||
listener_config = ListenerConfig([], [], {})
|
||||
for listener_name in config['listen'].split(","):
|
||||
listener = _LISTENERS[listener_name]
|
||||
listener_config = merge_config(
|
||||
(await listener.create_config(reactor, config)),
|
||||
listener_config,
|
||||
)
|
||||
|
||||
tor_config = {}
|
||||
i2p_config = {}
|
||||
tub_ports = []
|
||||
tub_locations = []
|
||||
if listeners == ["none"]:
|
||||
c.write("tub.port = disabled\n")
|
||||
c.write("tub.location = disabled\n")
|
||||
if listener_config is None:
|
||||
tub_ports = ["disabled"]
|
||||
tub_locations = ["disabled"]
|
||||
else:
|
||||
if "tor" in listeners:
|
||||
(tor_config, tor_port, tor_location) = \
|
||||
yield tor_provider.create_config(reactor, config)
|
||||
tub_ports.append(tor_port)
|
||||
tub_locations.append(tor_location)
|
||||
if "i2p" in listeners:
|
||||
(i2p_config, i2p_port, i2p_location) = \
|
||||
yield i2p_provider.create_config(reactor, config)
|
||||
tub_ports.append(i2p_port)
|
||||
tub_locations.append(i2p_location)
|
||||
if "tcp" in listeners:
|
||||
if config["port"]: # --port/--location are a pair
|
||||
tub_ports.append(config["port"])
|
||||
tub_locations.append(config["location"])
|
||||
else:
|
||||
assert "hostname" in config
|
||||
hostname = config["hostname"]
|
||||
new_port = iputil.allocate_tcp_port()
|
||||
tub_ports.append("tcp:%s" % new_port)
|
||||
tub_locations.append("tcp:%s:%s" % (hostname, new_port))
|
||||
c.write("tub.port = %s\n" % ",".join(tub_ports))
|
||||
c.write("tub.location = %s\n" % ",".join(tub_locations))
|
||||
tub_ports = listener_config.tub_ports
|
||||
tub_locations = listener_config.tub_locations
|
||||
|
||||
c.write("tub.port = %s\n" % ",".join(tub_ports))
|
||||
c.write("tub.location = %s\n" % ",".join(tub_locations))
|
||||
c.write("\n")
|
||||
|
||||
c.write("#log_gatherer.furl =\n")
|
||||
@ -294,17 +351,12 @@ def write_node_config(c, config):
|
||||
c.write("#ssh.authorized_keys_file = ~/.ssh/authorized_keys\n")
|
||||
c.write("\n")
|
||||
|
||||
if tor_config:
|
||||
c.write("[tor]\n")
|
||||
for key, value in list(tor_config.items()):
|
||||
c.write("%s = %s\n" % (key, value))
|
||||
c.write("\n")
|
||||
|
||||
if i2p_config:
|
||||
c.write("[i2p]\n")
|
||||
for key, value in list(i2p_config.items()):
|
||||
c.write("%s = %s\n" % (key, value))
|
||||
c.write("\n")
|
||||
if listener_config is not None:
|
||||
for section, items in listener_config.node_config.items():
|
||||
c.write(f"[{section}]\n")
|
||||
for k, v in items:
|
||||
c.write(f"{k} = {v}\n")
|
||||
c.write("\n")
|
||||
|
||||
|
||||
def write_client_config(c, config):
|
||||
@ -445,7 +497,7 @@ def create_node(config):
|
||||
fileutil.make_dirs(os.path.join(basedir, "private"), 0o700)
|
||||
cfg_name = os.path.join(basedir, "tahoe.cfg")
|
||||
with io.open(cfg_name, "w", encoding='utf-8') as c:
|
||||
yield write_node_config(c, config)
|
||||
yield defer.Deferred.fromCoroutine(write_node_config(c, config))
|
||||
write_client_config(c, config)
|
||||
|
||||
print("Node created in %s" % quote_local_unicode_path(basedir), file=out)
|
||||
@ -488,7 +540,7 @@ def create_introducer(config):
|
||||
fileutil.make_dirs(os.path.join(basedir, "private"), 0o700)
|
||||
cfg_name = os.path.join(basedir, "tahoe.cfg")
|
||||
with io.open(cfg_name, "w", encoding='utf-8') as c:
|
||||
yield write_node_config(c, config)
|
||||
yield defer.Deferred.fromCoroutine(write_node_config(c, config))
|
||||
|
||||
print("Introducer created in %s" % quote_local_unicode_path(basedir), file=out)
|
||||
defer.returnValue(0)
|
||||
|
@ -530,6 +530,60 @@ def _add_error_handling(app: Klein):
|
||||
return str(failure.value).encode("utf-8")
|
||||
|
||||
|
||||
async def read_encoded(
|
||||
reactor, request, schema: Schema, max_size: int = 1024 * 1024
|
||||
) -> Any:
|
||||
"""
|
||||
Read encoded request body data, decoding it with CBOR by default.
|
||||
|
||||
Somewhat arbitrarily, limit body size to 1MiB by default.
|
||||
"""
|
||||
content_type = get_content_type(request.requestHeaders)
|
||||
if content_type is None:
|
||||
content_type = CBOR_MIME_TYPE
|
||||
if content_type != CBOR_MIME_TYPE:
|
||||
raise _HTTPError(http.UNSUPPORTED_MEDIA_TYPE)
|
||||
|
||||
# Make sure it's not too large:
|
||||
request.content.seek(0, SEEK_END)
|
||||
size = request.content.tell()
|
||||
if size > max_size:
|
||||
raise _HTTPError(http.REQUEST_ENTITY_TOO_LARGE)
|
||||
request.content.seek(0, SEEK_SET)
|
||||
|
||||
# We don't want to load the whole message into memory, cause it might
|
||||
# be quite large. The CDDL validator takes a read-only bytes-like
|
||||
# thing. Luckily, for large request bodies twisted.web will buffer the
|
||||
# data in a file, so we can use mmap() to get a memory view. The CDDL
|
||||
# validator will not make a copy, so it won't increase memory usage
|
||||
# beyond that.
|
||||
try:
|
||||
fd = request.content.fileno()
|
||||
except (ValueError, OSError):
|
||||
fd = -1
|
||||
if fd >= 0:
|
||||
# It's a file, so we can use mmap() to save memory.
|
||||
message = mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
|
||||
else:
|
||||
message = request.content.read()
|
||||
|
||||
# Pycddl will release the GIL when validating larger documents, so
|
||||
# let's take advantage of multiple CPUs:
|
||||
if size > 10_000:
|
||||
await defer_to_thread(reactor, schema.validate_cbor, message)
|
||||
else:
|
||||
schema.validate_cbor(message)
|
||||
|
||||
# The CBOR parser will allocate more memory, but at least we can feed
|
||||
# it the file-like object, so that if it's large it won't be make two
|
||||
# copies.
|
||||
request.content.seek(SEEK_SET, 0)
|
||||
# Typically deserialization to Python will not release the GIL, and
|
||||
# indeed as of Jan 2023 cbor2 didn't have any code to release the GIL
|
||||
# in the decode path. As such, running it in a different thread has no benefit.
|
||||
return cbor2.load(request.content)
|
||||
|
||||
|
||||
class HTTPServer(object):
|
||||
"""
|
||||
A HTTP interface to the storage server.
|
||||
@ -587,56 +641,6 @@ class HTTPServer(object):
|
||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3861
|
||||
raise _HTTPError(http.NOT_ACCEPTABLE)
|
||||
|
||||
async def _read_encoded(
|
||||
self, request, schema: Schema, max_size: int = 1024 * 1024
|
||||
) -> Any:
|
||||
"""
|
||||
Read encoded request body data, decoding it with CBOR by default.
|
||||
|
||||
Somewhat arbitrarily, limit body size to 1MiB by default.
|
||||
"""
|
||||
content_type = get_content_type(request.requestHeaders)
|
||||
if content_type != CBOR_MIME_TYPE:
|
||||
raise _HTTPError(http.UNSUPPORTED_MEDIA_TYPE)
|
||||
|
||||
# Make sure it's not too large:
|
||||
request.content.seek(0, SEEK_END)
|
||||
size = request.content.tell()
|
||||
if size > max_size:
|
||||
raise _HTTPError(http.REQUEST_ENTITY_TOO_LARGE)
|
||||
request.content.seek(0, SEEK_SET)
|
||||
|
||||
# We don't want to load the whole message into memory, cause it might
|
||||
# be quite large. The CDDL validator takes a read-only bytes-like
|
||||
# thing. Luckily, for large request bodies twisted.web will buffer the
|
||||
# data in a file, so we can use mmap() to get a memory view. The CDDL
|
||||
# validator will not make a copy, so it won't increase memory usage
|
||||
# beyond that.
|
||||
try:
|
||||
fd = request.content.fileno()
|
||||
except (ValueError, OSError):
|
||||
fd = -1
|
||||
if fd >= 0:
|
||||
# It's a file, so we can use mmap() to save memory.
|
||||
message = mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
|
||||
else:
|
||||
message = request.content.read()
|
||||
|
||||
# Pycddl will release the GIL when validating larger documents, so
|
||||
# let's take advantage of multiple CPUs:
|
||||
if size > 10_000:
|
||||
await defer_to_thread(self._reactor, schema.validate_cbor, message)
|
||||
else:
|
||||
schema.validate_cbor(message)
|
||||
|
||||
# The CBOR parser will allocate more memory, but at least we can feed
|
||||
# it the file-like object, so that if it's large it won't be make two
|
||||
# copies.
|
||||
request.content.seek(SEEK_SET, 0)
|
||||
# Typically deserialization to Python will not release the GIL, and
|
||||
# indeed as of Jan 2023 cbor2 didn't have any code to release the GIL
|
||||
# in the decode path. As such, running it in a different thread has no benefit.
|
||||
return cbor2.load(request.content)
|
||||
|
||||
##### Generic APIs #####
|
||||
|
||||
@ -677,8 +681,8 @@ class HTTPServer(object):
|
||||
"""Allocate buckets."""
|
||||
upload_secret = authorization[Secrets.UPLOAD]
|
||||
# It's just a list of up to ~256 shares, shouldn't use many bytes.
|
||||
info = await self._read_encoded(
|
||||
request, _SCHEMAS["allocate_buckets"], max_size=8192
|
||||
info = await read_encoded(
|
||||
self._reactor, request, _SCHEMAS["allocate_buckets"], max_size=8192
|
||||
)
|
||||
|
||||
# We do NOT validate the upload secret for existing bucket uploads.
|
||||
@ -849,7 +853,8 @@ class HTTPServer(object):
|
||||
|
||||
# The reason can be a string with explanation, so in theory it could be
|
||||
# longish?
|
||||
info = await self._read_encoded(
|
||||
info = await read_encoded(
|
||||
self._reactor,
|
||||
request,
|
||||
_SCHEMAS["advise_corrupt_share"],
|
||||
max_size=32768,
|
||||
@ -868,8 +873,8 @@ class HTTPServer(object):
|
||||
@async_to_deferred
|
||||
async def mutable_read_test_write(self, request, authorization, storage_index):
|
||||
"""Read/test/write combined operation for mutables."""
|
||||
rtw_request = await self._read_encoded(
|
||||
request, _SCHEMAS["mutable_read_test_write"], max_size=2**48
|
||||
rtw_request = await read_encoded(
|
||||
self._reactor, request, _SCHEMAS["mutable_read_test_write"], max_size=2**48
|
||||
)
|
||||
secrets = (
|
||||
authorization[Secrets.WRITE_ENABLER],
|
||||
@ -955,8 +960,8 @@ class HTTPServer(object):
|
||||
|
||||
# The reason can be a string with explanation, so in theory it could be
|
||||
# longish?
|
||||
info = await self._read_encoded(
|
||||
request, _SCHEMAS["advise_corrupt_share"], max_size=32768
|
||||
info = await read_encoded(
|
||||
self._reactor, request, _SCHEMAS["advise_corrupt_share"], max_size=32768
|
||||
)
|
||||
self._storage_server.advise_corrupt_share(
|
||||
b"mutable", storage_index, share_number, info["reason"].encode("utf-8")
|
||||
|
@ -293,7 +293,7 @@ class StorageFarmBroker(service.MultiService):
|
||||
connect to storage server over HTTP.
|
||||
"""
|
||||
return not node_config.get_config(
|
||||
"client", "force_foolscap", default=True, boolean=True,
|
||||
"client", "force_foolscap", default=False, boolean=True,
|
||||
) and len(announcement.get(ANONYMOUS_STORAGE_NURLS, [])) > 0
|
||||
|
||||
@log_call(
|
||||
|
@ -17,6 +17,7 @@ from ..common import (
|
||||
disable_modules,
|
||||
)
|
||||
from ...scripts import create_node
|
||||
from ...listeners import ListenerConfig, StaticProvider
|
||||
from ... import client
|
||||
|
||||
def read_config(basedir):
|
||||
@ -24,6 +25,68 @@ def read_config(basedir):
|
||||
config = configutil.get_config(tahoe_cfg)
|
||||
return config
|
||||
|
||||
class MergeConfigTests(unittest.TestCase):
|
||||
"""
|
||||
Tests for ``create_node.merge_config``.
|
||||
"""
|
||||
def test_disable_left(self) -> None:
|
||||
"""
|
||||
If the left argument to ``create_node.merge_config`` is ``None``
|
||||
then the return value is ``None``.
|
||||
"""
|
||||
conf = ListenerConfig([], [], {})
|
||||
self.assertEqual(None, create_node.merge_config(None, conf))
|
||||
|
||||
def test_disable_right(self) -> None:
|
||||
"""
|
||||
If the right argument to ``create_node.merge_config`` is ``None``
|
||||
then the return value is ``None``.
|
||||
"""
|
||||
conf = ListenerConfig([], [], {})
|
||||
self.assertEqual(None, create_node.merge_config(conf, None))
|
||||
|
||||
def test_disable_both(self) -> None:
|
||||
"""
|
||||
If both arguments to ``create_node.merge_config`` are ``None``
|
||||
then the return value is ``None``.
|
||||
"""
|
||||
self.assertEqual(None, create_node.merge_config(None, None))
|
||||
|
||||
def test_overlapping_keys(self) -> None:
|
||||
"""
|
||||
If there are any keys in the ``node_config`` of the left and right
|
||||
parameters that are shared then ``ValueError`` is raised.
|
||||
"""
|
||||
left = ListenerConfig([], [], {"foo": [("b", "ar")]})
|
||||
right = ListenerConfig([], [], {"foo": [("ba", "z")]})
|
||||
self.assertRaises(ValueError, lambda: create_node.merge_config(left, right))
|
||||
|
||||
def test_merge(self) -> None:
|
||||
"""
|
||||
``create_node.merge_config`` returns a ``ListenerConfig`` that has
|
||||
all of the ports, locations, and node config from each of the two
|
||||
``ListenerConfig`` values given.
|
||||
"""
|
||||
left = ListenerConfig(
|
||||
["left-port"],
|
||||
["left-location"],
|
||||
{"left": [("f", "oo")]},
|
||||
)
|
||||
right = ListenerConfig(
|
||||
["right-port"],
|
||||
["right-location"],
|
||||
{"right": [("ba", "r")]},
|
||||
)
|
||||
result = create_node.merge_config(left, right)
|
||||
self.assertEqual(
|
||||
ListenerConfig(
|
||||
["left-port", "right-port"],
|
||||
["left-location", "right-location"],
|
||||
{"left": [("f", "oo")], "right": [("ba", "r")]},
|
||||
),
|
||||
result,
|
||||
)
|
||||
|
||||
class Config(unittest.TestCase):
|
||||
def test_client_unrecognized_options(self):
|
||||
tests = [
|
||||
@ -45,7 +108,14 @@ class Config(unittest.TestCase):
|
||||
e = self.assertRaises(usage.UsageError, parse_cli, verb, *args)
|
||||
self.assertIn("option %s not recognized" % (option,), str(e))
|
||||
|
||||
def test_create_client_config(self):
|
||||
async def test_create_client_config(self):
|
||||
"""
|
||||
``create_node.write_client_config`` writes a configuration file
|
||||
that can be parsed.
|
||||
|
||||
TODO Maybe we should test that we can recover the given configuration
|
||||
from the parse, too.
|
||||
"""
|
||||
d = self.mktemp()
|
||||
os.mkdir(d)
|
||||
fname = os.path.join(d, 'tahoe.cfg')
|
||||
@ -59,7 +129,7 @@ class Config(unittest.TestCase):
|
||||
"shares-happy": "1",
|
||||
"shares-total": "1",
|
||||
}
|
||||
create_node.write_node_config(f, opts)
|
||||
await create_node.write_node_config(f, opts)
|
||||
create_node.write_client_config(f, opts)
|
||||
|
||||
# should succeed, no exceptions
|
||||
@ -245,7 +315,7 @@ class Config(unittest.TestCase):
|
||||
parse_cli,
|
||||
"create-node", "--listen=tcp,none",
|
||||
basedir)
|
||||
self.assertEqual(str(e), "--listen= must be none, or one/some of: tcp, tor, i2p")
|
||||
self.assertEqual(str(e), "--listen=tcp requires --hostname=")
|
||||
|
||||
def test_node_listen_bad(self):
|
||||
basedir = self.mktemp()
|
||||
@ -253,7 +323,7 @@ class Config(unittest.TestCase):
|
||||
parse_cli,
|
||||
"create-node", "--listen=XYZZY,tcp",
|
||||
basedir)
|
||||
self.assertEqual(str(e), "--listen= must be none, or one/some of: tcp, tor, i2p")
|
||||
self.assertEqual(str(e), "--listen= must be one/some of: i2p, none, tcp, tor")
|
||||
|
||||
def test_node_listen_tor_hostname(self):
|
||||
e = self.assertRaises(usage.UsageError,
|
||||
@ -287,24 +357,19 @@ class Config(unittest.TestCase):
|
||||
self.assertIn("To avoid clobbering anything, I am going to quit now", err)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_node_slow_tor(self):
|
||||
basedir = self.mktemp()
|
||||
def test_node_slow(self):
|
||||
"""
|
||||
A node can be created using a listener type that returns an
|
||||
unfired Deferred from its ``create_config`` method.
|
||||
"""
|
||||
d = defer.Deferred()
|
||||
self.patch(tor_provider, "create_config", lambda *a, **kw: d)
|
||||
d2 = run_cli("create-node", "--listen=tor", basedir)
|
||||
d.callback(({}, "port", "location"))
|
||||
rc, out, err = yield d2
|
||||
self.assertEqual(rc, 0)
|
||||
self.assertIn("Node created", out)
|
||||
self.assertEqual(err, "")
|
||||
slow = StaticProvider(True, False, d, None)
|
||||
create_node._LISTENERS["xxyzy"] = slow
|
||||
self.addCleanup(lambda: create_node._LISTENERS.pop("xxyzy"))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_node_slow_i2p(self):
|
||||
basedir = self.mktemp()
|
||||
d = defer.Deferred()
|
||||
self.patch(i2p_provider, "create_config", lambda *a, **kw: d)
|
||||
d2 = run_cli("create-node", "--listen=i2p", basedir)
|
||||
d.callback(({}, "port", "location"))
|
||||
d2 = run_cli("create-node", "--listen=xxyzy", basedir)
|
||||
d.callback(None)
|
||||
rc, out, err = yield d2
|
||||
self.assertEqual(rc, 0)
|
||||
self.assertIn("Node created", out)
|
||||
@ -369,10 +434,12 @@ def fake_config(testcase: unittest.TestCase, module: Any, result: Any) -> list[t
|
||||
class Tor(unittest.TestCase):
|
||||
def test_default(self):
|
||||
basedir = self.mktemp()
|
||||
tor_config = {"abc": "def"}
|
||||
tor_config = {"tor": [("abc", "def")]}
|
||||
tor_port = "ghi"
|
||||
tor_location = "jkl"
|
||||
config_d = defer.succeed( (tor_config, tor_port, tor_location) )
|
||||
config_d = defer.succeed(
|
||||
ListenerConfig([tor_port], [tor_location], tor_config)
|
||||
)
|
||||
|
||||
calls = fake_config(self, tor_provider, config_d)
|
||||
rc, out, err = self.successResultOf(
|
||||
@ -390,11 +457,12 @@ class Tor(unittest.TestCase):
|
||||
self.assertEqual(cfg.get("node", "tub.location"), "jkl")
|
||||
|
||||
def test_launch(self):
|
||||
"""
|
||||
The ``--tor-launch`` command line option sets ``tor-launch`` to
|
||||
``True``.
|
||||
"""
|
||||
basedir = self.mktemp()
|
||||
tor_config = {"abc": "def"}
|
||||
tor_port = "ghi"
|
||||
tor_location = "jkl"
|
||||
config_d = defer.succeed( (tor_config, tor_port, tor_location) )
|
||||
config_d = defer.succeed(None)
|
||||
|
||||
calls = fake_config(self, tor_provider, config_d)
|
||||
rc, out, err = self.successResultOf(
|
||||
@ -409,11 +477,12 @@ class Tor(unittest.TestCase):
|
||||
self.assertEqual(args[1]["tor-control-port"], None)
|
||||
|
||||
def test_control_port(self):
|
||||
"""
|
||||
The ``--tor-control-port`` command line parameter's value is
|
||||
passed along as the ``tor-control-port`` value.
|
||||
"""
|
||||
basedir = self.mktemp()
|
||||
tor_config = {"abc": "def"}
|
||||
tor_port = "ghi"
|
||||
tor_location = "jkl"
|
||||
config_d = defer.succeed( (tor_config, tor_port, tor_location) )
|
||||
config_d = defer.succeed(None)
|
||||
|
||||
calls = fake_config(self, tor_provider, config_d)
|
||||
rc, out, err = self.successResultOf(
|
||||
@ -451,10 +520,10 @@ class Tor(unittest.TestCase):
|
||||
class I2P(unittest.TestCase):
|
||||
def test_default(self):
|
||||
basedir = self.mktemp()
|
||||
i2p_config = {"abc": "def"}
|
||||
i2p_config = {"i2p": [("abc", "def")]}
|
||||
i2p_port = "ghi"
|
||||
i2p_location = "jkl"
|
||||
dest_d = defer.succeed( (i2p_config, i2p_port, i2p_location) )
|
||||
dest_d = defer.succeed(ListenerConfig([i2p_port], [i2p_location], i2p_config))
|
||||
|
||||
calls = fake_config(self, i2p_provider, dest_d)
|
||||
rc, out, err = self.successResultOf(
|
||||
@ -479,10 +548,7 @@ class I2P(unittest.TestCase):
|
||||
|
||||
def test_sam_port(self):
|
||||
basedir = self.mktemp()
|
||||
i2p_config = {"abc": "def"}
|
||||
i2p_port = "ghi"
|
||||
i2p_location = "jkl"
|
||||
dest_d = defer.succeed( (i2p_config, i2p_port, i2p_location) )
|
||||
dest_d = defer.succeed(None)
|
||||
|
||||
calls = fake_config(self, i2p_provider, dest_d)
|
||||
rc, out, err = self.successResultOf(
|
||||
|
@ -222,7 +222,8 @@ class GridManagerCommandLine(TestCase):
|
||||
result.output,
|
||||
)
|
||||
|
||||
@skipIf(not platform.isLinux(), "I only know how permissions work on linux")
|
||||
@skipIf(platform.isWindows(), "We don't know how to set permissions on Windows.")
|
||||
@skipIf(os.getuid() == 0, "cannot test as superuser with all permissions")
|
||||
def test_sign_bad_perms(self):
|
||||
"""
|
||||
Error reported if we can't create certificate file
|
||||
|
@ -13,6 +13,7 @@ if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
import attr
|
||||
from hyperlink import DecodedURL
|
||||
|
||||
from testtools.matchers import (
|
||||
Mismatch,
|
||||
@ -95,6 +96,7 @@ def matches_storage_announcement(basedir, anonymous=True, options=None):
|
||||
}
|
||||
if anonymous:
|
||||
announcement[u"anonymous-storage-FURL"] = matches_furl()
|
||||
announcement[u"anonymous-storage-NURLs"] = matches_nurls()
|
||||
if options:
|
||||
announcement[u"storage-options"] = MatchesListwise(options)
|
||||
return MatchesStructure(
|
||||
@ -112,6 +114,16 @@ def matches_furl():
|
||||
return AfterPreprocessing(decode_furl, Always())
|
||||
|
||||
|
||||
def matches_nurls():
|
||||
"""
|
||||
Matches a sequence of NURLs.
|
||||
"""
|
||||
return AfterPreprocessing(
|
||||
lambda nurls: [DecodedURL.from_text(u) for u in nurls],
|
||||
Always()
|
||||
)
|
||||
|
||||
|
||||
def matches_base32():
|
||||
"""
|
||||
Match any base32 encoded byte string.
|
||||
|
@ -1,17 +1,7 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
from unittest import skipIf
|
||||
from functools import (
|
||||
partial,
|
||||
)
|
||||
@ -42,6 +32,7 @@ from twisted.internet import defer
|
||||
from twisted.python.filepath import (
|
||||
FilePath,
|
||||
)
|
||||
from twisted.python.runtime import platform
|
||||
from testtools.matchers import (
|
||||
Equals,
|
||||
AfterPreprocessing,
|
||||
@ -156,12 +147,12 @@ class Basic(testutil.ReallyEqualMixin, unittest.TestCase):
|
||||
yield client.create_client(basedir)
|
||||
self.assertIn("[client]helper.furl", str(ctx.exception))
|
||||
|
||||
# if somebody knows a clever way to do this (cause
|
||||
# EnvironmentError when reading a file that really exists), on
|
||||
# windows, please fix this
|
||||
@skipIf(platform.isWindows(), "We don't know how to set permissions on Windows.")
|
||||
@skipIf(os.getuid() == 0, "cannot test as superuser with all permissions")
|
||||
def test_unreadable_config(self):
|
||||
if sys.platform == "win32":
|
||||
# if somebody knows a clever way to do this (cause
|
||||
# EnvironmentError when reading a file that really exists), on
|
||||
# windows, please fix this
|
||||
raise unittest.SkipTest("can't make unreadable files on windows")
|
||||
basedir = "test_client.Basic.test_unreadable_config"
|
||||
os.mkdir(basedir)
|
||||
fn = os.path.join(basedir, "tahoe.cfg")
|
||||
|
@ -1,17 +1,9 @@
|
||||
"""
|
||||
Tests for allmydata.util.dictutil.
|
||||
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import annotations
|
||||
|
||||
from future.utils import PY2, PY3
|
||||
if PY2:
|
||||
# dict omitted to match dictutil.py.
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
from unittest import skipIf
|
||||
|
||||
@ -168,3 +160,18 @@ class TypedKeyDictPython2(unittest.TestCase):
|
||||
# Demonstration of how bytes and unicode can be mixed:
|
||||
d = {u"abc": 1}
|
||||
self.assertEqual(d[b"abc"], 1)
|
||||
|
||||
|
||||
class FilterTests(unittest.TestCase):
|
||||
"""
|
||||
Tests for ``dictutil.filter``.
|
||||
"""
|
||||
def test_filter(self) -> None:
|
||||
"""
|
||||
``dictutil.filter`` returns a ``dict`` that contains the key/value
|
||||
pairs for which the value is matched by the given predicate.
|
||||
"""
|
||||
self.assertEqual(
|
||||
{1: 2},
|
||||
dictutil.filter(lambda v: v == 2, {1: 2, 2: 3}),
|
||||
)
|
||||
|
@ -177,7 +177,7 @@ class CreateDest(unittest.TestCase):
|
||||
with mock.patch("allmydata.util.i2p_provider.clientFromString",
|
||||
return_value=ep) as cfs:
|
||||
d = i2p_provider.create_config(reactor, cli_config)
|
||||
tahoe_config_i2p, i2p_port, i2p_location = self.successResultOf(d)
|
||||
i2p_config = self.successResultOf(d)
|
||||
|
||||
connect_to_i2p.assert_called_with(reactor, cli_config, txi2p)
|
||||
cfs.assert_called_with(reactor, "goodport")
|
||||
@ -189,9 +189,9 @@ class CreateDest(unittest.TestCase):
|
||||
"dest.private_key_file": os.path.join("private",
|
||||
"i2p_dest.privkey"),
|
||||
}
|
||||
self.assertEqual(tahoe_config_i2p, expected)
|
||||
self.assertEqual(i2p_port, "listen:i2p")
|
||||
self.assertEqual(i2p_location, "i2p:FOOBAR.b32.i2p:3457")
|
||||
self.assertEqual(dict(i2p_config.node_config["i2p"]), expected)
|
||||
self.assertEqual(i2p_config.tub_ports, ["listen:i2p"])
|
||||
self.assertEqual(i2p_config.tub_locations, ["i2p:FOOBAR.b32.i2p:3457"])
|
||||
|
||||
_None = object()
|
||||
class FakeConfig(dict):
|
||||
|
@ -1,14 +1,4 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import os
|
||||
@ -31,6 +21,7 @@ from unittest import skipIf
|
||||
from twisted.python.filepath import (
|
||||
FilePath,
|
||||
)
|
||||
from twisted.python.runtime import platform
|
||||
from twisted.trial import unittest
|
||||
from twisted.internet import defer
|
||||
|
||||
@ -333,10 +324,8 @@ class TestCase(testutil.SignalMixin, unittest.TestCase):
|
||||
default = [("hello", "world")]
|
||||
self.assertEqual(config.items("nosuch", default), default)
|
||||
|
||||
@skipIf(
|
||||
"win32" in sys.platform.lower() or "cygwin" in sys.platform.lower(),
|
||||
"We don't know how to set permissions on Windows.",
|
||||
)
|
||||
@skipIf(platform.isWindows(), "We don't know how to set permissions on Windows.")
|
||||
@skipIf(os.getuid() == 0, "cannot test as superuser with all permissions")
|
||||
def test_private_config_unreadable(self):
|
||||
"""
|
||||
Asking for inaccessible private config is an error
|
||||
@ -351,10 +340,8 @@ class TestCase(testutil.SignalMixin, unittest.TestCase):
|
||||
with self.assertRaises(Exception):
|
||||
config.get_or_create_private_config("foo")
|
||||
|
||||
@skipIf(
|
||||
"win32" in sys.platform.lower() or "cygwin" in sys.platform.lower(),
|
||||
"We don't know how to set permissions on Windows.",
|
||||
)
|
||||
@skipIf(platform.isWindows(), "We don't know how to set permissions on Windows.")
|
||||
@skipIf(os.getuid() == 0, "cannot test as superuser with all permissions")
|
||||
def test_private_config_unreadable_preexisting(self):
|
||||
"""
|
||||
error if reading private config data fails
|
||||
@ -411,6 +398,7 @@ class TestCase(testutil.SignalMixin, unittest.TestCase):
|
||||
self.assertEqual(len(counter), 1) # don't call unless necessary
|
||||
self.assertEqual(value, "newer")
|
||||
|
||||
@skipIf(os.getuid() == 0, "cannot test as superuser with all permissions")
|
||||
def test_write_config_unwritable_file(self):
|
||||
"""
|
||||
Existing behavior merely logs any errors upon writing
|
||||
|
@ -784,13 +784,14 @@ storage:
|
||||
self.assertTrue(done.called)
|
||||
|
||||
def test_should_we_use_http_default(self):
|
||||
"""Default is to not use HTTP; this will change eventually"""
|
||||
"""Default is to use HTTP."""
|
||||
basedir = self.mktemp()
|
||||
node_config = config_from_string(basedir, "", "")
|
||||
announcement = {ANONYMOUS_STORAGE_NURLS: ["pb://..."]}
|
||||
self.assertFalse(
|
||||
self.assertTrue(
|
||||
StorageFarmBroker._should_we_use_http(node_config, announcement)
|
||||
)
|
||||
# Lacking NURLs, we can't use HTTP:
|
||||
self.assertFalse(
|
||||
StorageFarmBroker._should_we_use_http(node_config, {})
|
||||
)
|
||||
|
@ -42,6 +42,7 @@ from werkzeug.exceptions import NotFound as WNotFound
|
||||
from testtools.matchers import Equals
|
||||
from zope.interface import implementer
|
||||
|
||||
from ..util.deferredutil import async_to_deferred
|
||||
from .common import SyncTestCase
|
||||
from ..storage.http_common import (
|
||||
get_content_type,
|
||||
@ -59,6 +60,8 @@ from ..storage.http_server import (
|
||||
_authorized_route,
|
||||
StorageIndexConverter,
|
||||
_add_error_handling,
|
||||
read_encoded,
|
||||
_SCHEMAS as SERVER_SCHEMAS,
|
||||
)
|
||||
from ..storage.http_client import (
|
||||
StorageClient,
|
||||
@ -172,7 +175,7 @@ class ExtractSecretsTests(SyncTestCase):
|
||||
``ClientSecretsException``.
|
||||
"""
|
||||
with self.assertRaises(ClientSecretsException):
|
||||
_extract_secrets(["FOO eA=="], {})
|
||||
_extract_secrets(["FOO eA=="], set())
|
||||
|
||||
def test_bad_secret_not_base64(self):
|
||||
"""
|
||||
@ -303,6 +306,19 @@ class TestApp(object):
|
||||
request.transport.loseConnection()
|
||||
return Deferred()
|
||||
|
||||
@_authorized_route(_app, set(), "/read_body", methods=["POST"])
|
||||
@async_to_deferred
|
||||
async def read_body(self, request, authorization):
|
||||
"""
|
||||
Accept an advise_corrupt_share message, return the reason.
|
||||
|
||||
I.e. exercise codepaths used for reading CBOR from the body.
|
||||
"""
|
||||
data = await read_encoded(
|
||||
self.clock, request, SERVER_SCHEMAS["advise_corrupt_share"]
|
||||
)
|
||||
return data["reason"]
|
||||
|
||||
|
||||
def result_of(d):
|
||||
"""
|
||||
@ -320,6 +336,7 @@ def result_of(d):
|
||||
+ "This is probably a test design issue."
|
||||
)
|
||||
|
||||
|
||||
class CustomHTTPServerTests(SyncTestCase):
|
||||
"""
|
||||
Tests that use a custom HTTP server.
|
||||
@ -504,6 +521,40 @@ class CustomHTTPServerTests(SyncTestCase):
|
||||
result_of(d)
|
||||
self.assertEqual(len(self._http_server.clock.getDelayedCalls()), 0)
|
||||
|
||||
def test_request_with_no_content_type_same_as_cbor(self):
|
||||
"""
|
||||
If no ``Content-Type`` header is set when sending a body, it is assumed
|
||||
to be CBOR.
|
||||
"""
|
||||
response = result_of(
|
||||
self.client.request(
|
||||
"POST",
|
||||
DecodedURL.from_text("http://127.0.0.1/read_body"),
|
||||
data=dumps({"reason": "test"}),
|
||||
)
|
||||
)
|
||||
self.assertEqual(
|
||||
result_of(limited_content(response, self._http_server.clock, 100)).read(),
|
||||
b"test",
|
||||
)
|
||||
|
||||
def test_request_with_wrong_content(self):
|
||||
"""
|
||||
If a non-CBOR ``Content-Type`` header is set when sending a body, the
|
||||
server complains appropriatly.
|
||||
"""
|
||||
headers = Headers()
|
||||
headers.setRawHeaders("content-type", ["some/value"])
|
||||
response = result_of(
|
||||
self.client.request(
|
||||
"POST",
|
||||
DecodedURL.from_text("http://127.0.0.1/read_body"),
|
||||
data=dumps({"reason": "test"}),
|
||||
headers=headers,
|
||||
)
|
||||
)
|
||||
self.assertEqual(response.code, http.UNSUPPORTED_MEDIA_TYPE)
|
||||
|
||||
|
||||
@implementer(IReactorFromThreads)
|
||||
class Reactor(Clock):
|
||||
|
@ -197,7 +197,7 @@ class CreateOnion(unittest.TestCase):
|
||||
with mock.patch("allmydata.util.tor_provider.allocate_tcp_port",
|
||||
return_value=999999):
|
||||
d = tor_provider.create_config(reactor, cli_config)
|
||||
tahoe_config_tor, tor_port, tor_location = self.successResultOf(d)
|
||||
tor_config = self.successResultOf(d)
|
||||
|
||||
launch_tor.assert_called_with(reactor, executable,
|
||||
os.path.abspath(private_dir), txtorcon)
|
||||
@ -214,10 +214,10 @@ class CreateOnion(unittest.TestCase):
|
||||
}
|
||||
if executable:
|
||||
expected["tor.executable"] = executable
|
||||
self.assertEqual(tahoe_config_tor, expected)
|
||||
self.assertEqual(tor_port, "tcp:999999:interface=127.0.0.1")
|
||||
self.assertEqual(tor_location, "tor:ONION.onion:3457")
|
||||
fn = os.path.join(basedir, tahoe_config_tor["onion.private_key_file"])
|
||||
self.assertEqual(dict(tor_config.node_config["tor"]), expected)
|
||||
self.assertEqual(tor_config.tub_ports, ["tcp:999999:interface=127.0.0.1"])
|
||||
self.assertEqual(tor_config.tub_locations, ["tor:ONION.onion:3457"])
|
||||
fn = os.path.join(basedir, dict(tor_config.node_config["tor"])["onion.private_key_file"])
|
||||
with open(fn, "rb") as f:
|
||||
privkey = f.read()
|
||||
self.assertEqual(privkey, b"privkey")
|
||||
@ -251,7 +251,7 @@ class CreateOnion(unittest.TestCase):
|
||||
with mock.patch("allmydata.util.tor_provider.allocate_tcp_port",
|
||||
return_value=999999):
|
||||
d = tor_provider.create_config(reactor, cli_config)
|
||||
tahoe_config_tor, tor_port, tor_location = self.successResultOf(d)
|
||||
tor_config = self.successResultOf(d)
|
||||
|
||||
connect_to_tor.assert_called_with(reactor, cli_config, txtorcon)
|
||||
txtorcon.EphemeralHiddenService.assert_called_with("3457 127.0.0.1:999999")
|
||||
@ -265,10 +265,10 @@ class CreateOnion(unittest.TestCase):
|
||||
"onion.private_key_file": os.path.join("private",
|
||||
"tor_onion.privkey"),
|
||||
}
|
||||
self.assertEqual(tahoe_config_tor, expected)
|
||||
self.assertEqual(tor_port, "tcp:999999:interface=127.0.0.1")
|
||||
self.assertEqual(tor_location, "tor:ONION.onion:3457")
|
||||
fn = os.path.join(basedir, tahoe_config_tor["onion.private_key_file"])
|
||||
self.assertEqual(dict(tor_config.node_config["tor"]), expected)
|
||||
self.assertEqual(tor_config.tub_ports, ["tcp:999999:interface=127.0.0.1"])
|
||||
self.assertEqual(tor_config.tub_locations, ["tor:ONION.onion:3457"])
|
||||
fn = os.path.join(basedir, dict(tor_config.node_config["tor"])["onion.private_key_file"])
|
||||
with open(fn, "rb") as f:
|
||||
privkey = f.read()
|
||||
self.assertEqual(privkey, b"privkey")
|
||||
|
@ -2,6 +2,23 @@
|
||||
Tools to mess with dicts.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import Callable, TypeVar
|
||||
|
||||
K = TypeVar("K")
|
||||
V = TypeVar("V")
|
||||
|
||||
def filter(pred: Callable[[V], bool], orig: dict[K, V]) -> dict[K, V]:
|
||||
"""
|
||||
Filter out key/value pairs whose value fails to match a predicate.
|
||||
"""
|
||||
return {
|
||||
k: v
|
||||
for (k, v)
|
||||
in orig.items()
|
||||
if pred(v)
|
||||
}
|
||||
|
||||
class DictOfSets(dict):
|
||||
def add(self, key, value):
|
||||
if key in self:
|
||||
|
@ -1,14 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import, print_function, with_statement
|
||||
from __future__ import division
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from typing_extensions import Literal
|
||||
|
||||
import os
|
||||
|
||||
@ -20,12 +15,15 @@ from twisted.internet.defer import inlineCallbacks, returnValue
|
||||
from twisted.internet.endpoints import clientFromString
|
||||
from twisted.internet.error import ConnectionRefusedError, ConnectError
|
||||
from twisted.application import service
|
||||
from twisted.python.usage import Options
|
||||
|
||||
from ..listeners import ListenerConfig
|
||||
from ..interfaces import (
|
||||
IAddressFamily,
|
||||
)
|
||||
from ..node import _Config
|
||||
|
||||
def create(reactor, config):
|
||||
def create(reactor: Any, config: _Config) -> IAddressFamily:
|
||||
"""
|
||||
Create a new Provider service (this is an IService so must be
|
||||
hooked up to a parent or otherwise started).
|
||||
@ -55,6 +53,21 @@ def _import_txi2p():
|
||||
except ImportError: # pragma: no cover
|
||||
return None
|
||||
|
||||
def is_available() -> bool:
|
||||
"""
|
||||
Can this type of listener actually be used in this runtime
|
||||
environment?
|
||||
|
||||
If its dependencies are missing then it cannot be.
|
||||
"""
|
||||
return not (_import_i2p() is None or _import_txi2p() is None)
|
||||
|
||||
def can_hide_ip() -> Literal[True]:
|
||||
"""
|
||||
Can the transport supported by this type of listener conceal the
|
||||
node's public internet address from peers?
|
||||
"""
|
||||
return True
|
||||
|
||||
def _try_to_connect(reactor, endpoint_desc, stdout, txi2p):
|
||||
# yields True or None
|
||||
@ -97,29 +110,35 @@ def _connect_to_i2p(reactor, cli_config, txi2p):
|
||||
else:
|
||||
raise ValueError("unable to reach any default I2P SAM port")
|
||||
|
||||
@inlineCallbacks
|
||||
def create_config(reactor, cli_config):
|
||||
async def create_config(reactor: Any, cli_config: Options) -> ListenerConfig:
|
||||
"""
|
||||
For a given set of command-line options, construct an I2P listener.
|
||||
|
||||
This includes allocating a new I2P address.
|
||||
"""
|
||||
txi2p = _import_txi2p()
|
||||
if not txi2p:
|
||||
raise ValueError("Cannot create I2P Destination without txi2p. "
|
||||
"Please 'pip install tahoe-lafs[i2p]' to fix this.")
|
||||
tahoe_config_i2p = {} # written into tahoe.cfg:[i2p]
|
||||
tahoe_config_i2p = [] # written into tahoe.cfg:[i2p]
|
||||
private_dir = os.path.abspath(os.path.join(cli_config["basedir"], "private"))
|
||||
stdout = cli_config.stdout
|
||||
# XXX We shouldn't carry stdout around by jamming it into the Options
|
||||
# value. See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/4048
|
||||
stdout = cli_config.stdout # type: ignore[attr-defined]
|
||||
if cli_config["i2p-launch"]:
|
||||
raise NotImplementedError("--i2p-launch is under development.")
|
||||
else:
|
||||
print("connecting to I2P (to allocate .i2p address)..", file=stdout)
|
||||
sam_port = yield _connect_to_i2p(reactor, cli_config, txi2p)
|
||||
sam_port = await _connect_to_i2p(reactor, cli_config, txi2p)
|
||||
print("I2P connection established", file=stdout)
|
||||
tahoe_config_i2p["sam.port"] = sam_port
|
||||
tahoe_config_i2p.append(("sam.port", sam_port))
|
||||
|
||||
external_port = 3457 # TODO: pick this randomly? there's no contention.
|
||||
|
||||
privkeyfile = os.path.join(private_dir, "i2p_dest.privkey")
|
||||
sam_endpoint = clientFromString(reactor, sam_port)
|
||||
print("allocating .i2p address...", file=stdout)
|
||||
dest = yield txi2p.generateDestination(reactor, privkeyfile, 'SAM', sam_endpoint)
|
||||
dest = await txi2p.generateDestination(reactor, privkeyfile, 'SAM', sam_endpoint)
|
||||
print(".i2p address allocated", file=stdout)
|
||||
i2p_port = "listen:i2p" # means "see [i2p]", calls Provider.get_listener()
|
||||
i2p_location = "i2p:%s:%d" % (dest.host, external_port)
|
||||
@ -132,10 +151,11 @@ def create_config(reactor, cli_config):
|
||||
# * "private_key_file" points to the on-disk copy of the private key
|
||||
# material (although we always write it to the same place)
|
||||
|
||||
tahoe_config_i2p["dest"] = "true"
|
||||
tahoe_config_i2p["dest.port"] = str(external_port)
|
||||
tahoe_config_i2p["dest.private_key_file"] = os.path.join("private",
|
||||
"i2p_dest.privkey")
|
||||
tahoe_config_i2p.extend([
|
||||
("dest", "true"),
|
||||
("dest.port", str(external_port)),
|
||||
("dest.private_key_file", os.path.join("private", "i2p_dest.privkey")),
|
||||
])
|
||||
|
||||
# tahoe_config_i2p: this is a dictionary of keys/values to add to the
|
||||
# "[i2p]" section of tahoe.cfg, which tells the new node how to launch
|
||||
@ -149,7 +169,7 @@ def create_config(reactor, cli_config):
|
||||
# at both create-node and startup time. The data directory is not
|
||||
# recorded in tahoe.cfg
|
||||
|
||||
returnValue((tahoe_config_i2p, i2p_port, i2p_location))
|
||||
return ListenerConfig([i2p_port], [i2p_location], {"i2p": tahoe_config_i2p})
|
||||
|
||||
|
||||
@implementer(IAddressFamily)
|
||||
|
@ -1,17 +1,10 @@
|
||||
"""
|
||||
Utilities for getting IP addresses.
|
||||
|
||||
Ported to Python 3.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
from future.utils import native_str
|
||||
|
||||
from future.utils import PY2, native_str
|
||||
if PY2:
|
||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
from typing import Callable
|
||||
|
||||
import os, socket
|
||||
|
||||
@ -39,6 +32,7 @@ from .gcutil import (
|
||||
|
||||
fcntl = requireModule("fcntl")
|
||||
|
||||
allocate_tcp_port: Callable[[], int]
|
||||
from foolscap.util import allocate_tcp_port # re-exported
|
||||
|
||||
try:
|
||||
|
@ -1,11 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
from typing import Any
|
||||
from typing_extensions import Literal
|
||||
import os
|
||||
|
||||
from zope.interface import (
|
||||
@ -16,12 +13,14 @@ from twisted.internet.defer import inlineCallbacks, returnValue
|
||||
from twisted.internet.endpoints import clientFromString, TCP4ServerEndpoint
|
||||
from twisted.internet.error import ConnectionRefusedError, ConnectError
|
||||
from twisted.application import service
|
||||
from twisted.python.usage import Options
|
||||
|
||||
from .observer import OneShotObserverList
|
||||
from .iputil import allocate_tcp_port
|
||||
from ..interfaces import (
|
||||
IAddressFamily,
|
||||
)
|
||||
from ..listeners import ListenerConfig
|
||||
|
||||
|
||||
def _import_tor():
|
||||
@ -38,7 +37,13 @@ def _import_txtorcon():
|
||||
except ImportError: # pragma: no cover
|
||||
return None
|
||||
|
||||
def create(reactor, config, import_tor=None, import_txtorcon=None) -> Optional[_Provider]:
|
||||
def can_hide_ip() -> Literal[True]:
|
||||
return True
|
||||
|
||||
def is_available() -> bool:
|
||||
return not (_import_tor() is None or _import_txtorcon() is None)
|
||||
|
||||
def create(reactor, config, import_tor=None, import_txtorcon=None) -> _Provider:
|
||||
"""
|
||||
Create a new _Provider service (this is an IService so must be
|
||||
hooked up to a parent or otherwise started).
|
||||
@ -150,31 +155,32 @@ def _connect_to_tor(reactor, cli_config, txtorcon):
|
||||
else:
|
||||
raise ValueError("unable to reach any default Tor control port")
|
||||
|
||||
@inlineCallbacks
|
||||
def create_config(reactor, cli_config):
|
||||
async def create_config(reactor: Any, cli_config: Options) -> ListenerConfig:
|
||||
txtorcon = _import_txtorcon()
|
||||
if not txtorcon:
|
||||
raise ValueError("Cannot create onion without txtorcon. "
|
||||
"Please 'pip install tahoe-lafs[tor]' to fix this.")
|
||||
tahoe_config_tor = {} # written into tahoe.cfg:[tor]
|
||||
tahoe_config_tor = [] # written into tahoe.cfg:[tor]
|
||||
private_dir = os.path.abspath(os.path.join(cli_config["basedir"], "private"))
|
||||
stdout = cli_config.stdout
|
||||
# XXX We shouldn't carry stdout around by jamming it into the Options
|
||||
# value. See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/4048
|
||||
stdout = cli_config.stdout # type: ignore[attr-defined]
|
||||
if cli_config["tor-launch"]:
|
||||
tahoe_config_tor["launch"] = "true"
|
||||
tahoe_config_tor.append(("launch", "true"))
|
||||
tor_executable = cli_config["tor-executable"]
|
||||
if tor_executable:
|
||||
tahoe_config_tor["tor.executable"] = tor_executable
|
||||
tahoe_config_tor.append(("tor.executable", tor_executable))
|
||||
print("launching Tor (to allocate .onion address)..", file=stdout)
|
||||
(_, tor) = yield _launch_tor(
|
||||
(_, tor) = await _launch_tor(
|
||||
reactor, tor_executable, private_dir, txtorcon)
|
||||
tor_control_proto = tor.protocol
|
||||
print("Tor launched", file=stdout)
|
||||
else:
|
||||
print("connecting to Tor (to allocate .onion address)..", file=stdout)
|
||||
(port, tor_control_proto) = yield _connect_to_tor(
|
||||
(port, tor_control_proto) = await _connect_to_tor(
|
||||
reactor, cli_config, txtorcon)
|
||||
print("Tor connection established", file=stdout)
|
||||
tahoe_config_tor["control.port"] = port
|
||||
tahoe_config_tor.append(("control.port", port))
|
||||
|
||||
external_port = 3457 # TODO: pick this randomly? there's no contention.
|
||||
|
||||
@ -183,12 +189,12 @@ def create_config(reactor, cli_config):
|
||||
"%d 127.0.0.1:%d" % (external_port, local_port)
|
||||
)
|
||||
print("allocating .onion address (takes ~40s)..", file=stdout)
|
||||
yield ehs.add_to_tor(tor_control_proto)
|
||||
await ehs.add_to_tor(tor_control_proto)
|
||||
print(".onion address allocated", file=stdout)
|
||||
tor_port = "tcp:%d:interface=127.0.0.1" % local_port
|
||||
tor_location = "tor:%s:%d" % (ehs.hostname, external_port)
|
||||
privkey = ehs.private_key
|
||||
yield ehs.remove_from_tor(tor_control_proto)
|
||||
await ehs.remove_from_tor(tor_control_proto)
|
||||
|
||||
# in addition to the "how to launch/connect-to tor" keys above, we also
|
||||
# record information about the onion service into tahoe.cfg.
|
||||
@ -200,12 +206,12 @@ def create_config(reactor, cli_config):
|
||||
# * "private_key_file" points to the on-disk copy of the private key
|
||||
# material (although we always write it to the same place)
|
||||
|
||||
tahoe_config_tor["onion"] = "true"
|
||||
tahoe_config_tor["onion.local_port"] = str(local_port)
|
||||
tahoe_config_tor["onion.external_port"] = str(external_port)
|
||||
assert privkey
|
||||
tahoe_config_tor["onion.private_key_file"] = os.path.join("private",
|
||||
"tor_onion.privkey")
|
||||
tahoe_config_tor.extend([
|
||||
("onion", "true"),
|
||||
("onion.local_port", str(local_port)),
|
||||
("onion.external_port", str(external_port)),
|
||||
("onion.private_key_file", os.path.join("private", "tor_onion.privkey")),
|
||||
])
|
||||
privkeyfile = os.path.join(private_dir, "tor_onion.privkey")
|
||||
with open(privkeyfile, "wb") as f:
|
||||
if isinstance(privkey, str):
|
||||
@ -224,7 +230,11 @@ def create_config(reactor, cli_config):
|
||||
# at both create-node and startup time. The data directory is not
|
||||
# recorded in tahoe.cfg
|
||||
|
||||
returnValue((tahoe_config_tor, tor_port, tor_location))
|
||||
return ListenerConfig(
|
||||
[tor_port],
|
||||
[tor_location],
|
||||
{"tor": tahoe_config_tor},
|
||||
)
|
||||
|
||||
|
||||
@implementer(IAddressFamily)
|
||||
|
7
tox.ini
7
tox.ini
@ -99,7 +99,7 @@ skip_install = true
|
||||
deps =
|
||||
# Pin a specific version so we get consistent outcomes; update this
|
||||
# occasionally:
|
||||
ruff == 0.0.263
|
||||
ruff == 0.0.278
|
||||
# towncrier doesn't work with importlib_resources 6.0.0
|
||||
# https://github.com/twisted/towncrier/issues/528
|
||||
importlib_resources < 6.0.0
|
||||
@ -125,9 +125,8 @@ commands =
|
||||
[testenv:typechecks]
|
||||
basepython = python3
|
||||
deps =
|
||||
mypy==1.3.0
|
||||
# When 0.9.2 comes out it will work with 1.3, it's just unreleased at the moment...
|
||||
git+https://github.com/shoobx/mypy-zope@f276030
|
||||
mypy==1.4.1
|
||||
mypy-zope
|
||||
types-mock
|
||||
types-six
|
||||
types-PyYAML
|
||||
|
Loading…
x
Reference in New Issue
Block a user