mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-20 05:28:04 +00:00
Merge remote-tracking branch 'origin/master' into 3528.test_cli-no-mock
This commit is contained in:
commit
91490fc2d5
@ -29,7 +29,7 @@ workflows:
|
||||
- "debian-9": &DOCKERHUB_CONTEXT
|
||||
context: "dockerhub-auth"
|
||||
|
||||
- "debian-8":
|
||||
- "debian-10":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
requires:
|
||||
- "debian-9"
|
||||
@ -86,9 +86,7 @@ workflows:
|
||||
# integration tests.
|
||||
- "debian-9"
|
||||
|
||||
# Generate the underlying data for a visualization to aid with Python 3
|
||||
# porting.
|
||||
- "build-porting-depgraph":
|
||||
- "typechecks":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
|
||||
images:
|
||||
@ -104,7 +102,7 @@ workflows:
|
||||
- "master"
|
||||
|
||||
jobs:
|
||||
- "build-image-debian-8":
|
||||
- "build-image-debian-10":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
- "build-image-debian-9":
|
||||
<<: *DOCKERHUB_CONTEXT
|
||||
@ -210,7 +208,7 @@ jobs:
|
||||
# filenames and argv).
|
||||
LANG: "en_US.UTF-8"
|
||||
# Select a tox environment to run for this job.
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py27-coverage"
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py27"
|
||||
# Additional arguments to pass to tox.
|
||||
TAHOE_LAFS_TOX_ARGS: ""
|
||||
# The path in which test artifacts will be placed.
|
||||
@ -220,7 +218,7 @@ jobs:
|
||||
WHEELHOUSE_PATH: &WHEELHOUSE_PATH "/tmp/wheelhouse"
|
||||
PIP_FIND_LINKS: "file:///tmp/wheelhouse"
|
||||
# Upload the coverage report.
|
||||
UPLOAD_COVERAGE: "yes"
|
||||
UPLOAD_COVERAGE: ""
|
||||
|
||||
# pip cannot install packages if the working directory is not readable.
|
||||
# We want to run a lot of steps as nobody instead of as root.
|
||||
@ -274,11 +272,11 @@ jobs:
|
||||
fi
|
||||
|
||||
|
||||
debian-8:
|
||||
debian-10:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/debian:8-py2.7"
|
||||
image: "tahoelafsci/debian:10-py2.7"
|
||||
user: "nobody"
|
||||
|
||||
|
||||
@ -373,7 +371,7 @@ jobs:
|
||||
# this reporter on Python 3. So drop that and just specify the
|
||||
# reporter.
|
||||
TAHOE_LAFS_TRIAL_ARGS: "--reporter=subunitv2-file"
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py36-coverage"
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py36"
|
||||
|
||||
|
||||
ubuntu-20-04:
|
||||
@ -448,32 +446,17 @@ jobs:
|
||||
# them in parallel.
|
||||
nix-build --cores 3 --max-jobs 2 nix/
|
||||
|
||||
# Generate up-to-date data for the dependency graph visualizer.
|
||||
build-porting-depgraph:
|
||||
# Get a system in which we can easily install Tahoe-LAFS and all its
|
||||
# dependencies. The dependency graph analyzer works by executing the code.
|
||||
# It's Python, what do you expect?
|
||||
<<: *DEBIAN
|
||||
typechecks:
|
||||
docker:
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "tahoelafsci/ubuntu:18.04-py3"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
|
||||
- add_ssh_keys:
|
||||
fingerprints:
|
||||
# Jean-Paul Calderone <exarkun@twistedmatrix.com> (CircleCI depgraph key)
|
||||
# This lets us push to tahoe-lafs/tahoe-depgraph in the next step.
|
||||
- "86:38:18:a7:c0:97:42:43:18:46:55:d6:21:b0:5f:d4"
|
||||
|
||||
- run:
|
||||
name: "Setup Python Environment"
|
||||
name: "Validate Types"
|
||||
command: |
|
||||
/tmp/venv/bin/pip install -e /tmp/project
|
||||
|
||||
- run:
|
||||
name: "Generate dependency graph data"
|
||||
command: |
|
||||
. /tmp/venv/bin/activate
|
||||
./misc/python3/depgraph.sh
|
||||
/tmp/venv/bin/tox -e typechecks
|
||||
|
||||
build-image: &BUILD_IMAGE
|
||||
# This is a template for a job to build a Docker image that has as much of
|
||||
@ -514,12 +497,12 @@ jobs:
|
||||
docker push tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION}
|
||||
|
||||
|
||||
build-image-debian-8:
|
||||
build-image-debian-10:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "debian"
|
||||
TAG: "8"
|
||||
TAG: "10"
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
|
||||
|
42
.codecov.yml
42
.codecov.yml
@ -1,42 +0,0 @@
|
||||
# Override defaults for codecov.io checks.
|
||||
#
|
||||
# Documentation is at https://docs.codecov.io/docs/codecov-yaml;
|
||||
# reference is at https://docs.codecov.io/docs/codecovyml-reference.
|
||||
#
|
||||
# To validate this file, use:
|
||||
#
|
||||
# curl --data-binary @.codecov.yml https://codecov.io/validate
|
||||
#
|
||||
# Codecov's defaults seem to leave red marks in GitHub CI checks in a
|
||||
# rather arbitrary manner, probably because of non-determinism in
|
||||
# coverage (see https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2891)
|
||||
# and maybe because computers are bad with floating point numbers.
|
||||
|
||||
# Allow coverage percentage a precision of zero decimals, and round to
|
||||
# the nearest number (for example, 89.957 to to 90; 89.497 to 89%).
|
||||
# Coverage above 90% is good, below 80% is bad.
|
||||
coverage:
|
||||
round: nearest
|
||||
range: 80..90
|
||||
precision: 0
|
||||
|
||||
# Aim for a target test coverage of 90% in codecov/project check (do
|
||||
# not allow project coverage to drop below that), and allow
|
||||
# codecov/patch a threshold of 1% (allow coverage in changes to drop
|
||||
# by that much, and no less). That should be good enough for us.
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 90%
|
||||
threshold: 1%
|
||||
patch:
|
||||
default:
|
||||
threshold: 1%
|
||||
|
||||
|
||||
codecov:
|
||||
# This is a public repository so supposedly we don't "need" to use an upload
|
||||
# token. However, using one makes sure that CI jobs running against forked
|
||||
# repositories have coverage uploaded to the right place in codecov so
|
||||
# their reports aren't incomplete.
|
||||
token: "abf679b6-e2e6-4b33-b7b5-6cfbd41ee691"
|
2
.github/CONTRIBUTING.rst
vendored
2
.github/CONTRIBUTING.rst
vendored
@ -17,4 +17,4 @@ Examples of contributions include:
|
||||
* `Patch reviews <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/PatchReviewProcess>`_
|
||||
|
||||
Before authoring or reviewing a patch,
|
||||
please familiarize yourself with the `coding standard <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CodingStandards>`_.
|
||||
please familiarize yourself with the `Coding Standards <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CodingStandards>`_ and the `Contributor Code of Conduct <../docs/CODE_OF_CONDUCT.md>`_.
|
||||
|
171
.github/workflows/ci.yml
vendored
171
.github/workflows/ci.yml
vendored
@ -30,17 +30,37 @@ jobs:
|
||||
with:
|
||||
args: install vcpython27
|
||||
|
||||
# See https://github.com/actions/checkout. A fetch-depth of 0
|
||||
# fetches all tags and branches.
|
||||
- name: Check out Tahoe-LAFS sources
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Fetch all history for all tags and branches
|
||||
run: git fetch --prune --unshallow
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
# To use pip caching with GitHub Actions in an OS-independent
|
||||
# manner, we need `pip cache dir` command, which became
|
||||
# available since pip v20.1+. At the time of writing this,
|
||||
# GitHub Actions offers pip v20.3.3 for both ubuntu-latest and
|
||||
# windows-latest, and pip v20.3.1 for macos-latest.
|
||||
- name: Get pip cache directory
|
||||
id: pip-cache
|
||||
run: |
|
||||
echo "::set-output name=dir::$(pip cache dir)"
|
||||
|
||||
# See https://github.com/actions/cache
|
||||
- name: Use pip cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade codecov tox setuptools
|
||||
@ -59,11 +79,110 @@ jobs:
|
||||
name: eliot.log
|
||||
path: eliot.log
|
||||
|
||||
- name: Upload coverage report
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
token: abf679b6-e2e6-4b33-b7b5-6cfbd41ee691
|
||||
file: coverage.xml
|
||||
# Upload this job's coverage data to Coveralls. While there is a GitHub
|
||||
# Action for this, as of Jan 2021 it does not support Python coverage
|
||||
# files - only lcov files. Therefore, we use coveralls-python, the
|
||||
# coveralls.io-supplied Python reporter, for this.
|
||||
- name: "Report Coverage to Coveralls"
|
||||
run: |
|
||||
pip install coveralls
|
||||
python -m coveralls
|
||||
env:
|
||||
# Some magic value required for some magic reason.
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
# Help coveralls identify our project.
|
||||
COVERALLS_REPO_TOKEN: "JPf16rLB7T2yjgATIxFzTsEgMdN1UNq6o"
|
||||
# Every source of coverage reports needs a unique "flag name".
|
||||
# Construct one by smashing a few variables from the matrix together
|
||||
# here.
|
||||
COVERALLS_FLAG_NAME: "run-${{ matrix.os }}-${{ matrix.python-version }}"
|
||||
# Mark the data as just one piece of many because we have more than
|
||||
# one instance of this job (Windows, macOS) which collects and
|
||||
# reports coverage. This is necessary to cause Coveralls to merge
|
||||
# multiple coverage results into a single report. Note the merge
|
||||
# only happens when we "finish" a particular build, as identified by
|
||||
# its "build_num" (aka "service_number").
|
||||
COVERALLS_PARALLEL: true
|
||||
|
||||
# Tell Coveralls that we're done reporting coverage data. Since we're using
|
||||
# the "parallel" mode where more than one coverage data file is merged into
|
||||
# a single report, we have to tell Coveralls when we've uploaded all of the
|
||||
# data files. This does it. We make sure it runs last by making it depend
|
||||
# on *all* of the coverage-collecting jobs.
|
||||
finish-coverage-report:
|
||||
# There happens to just be one coverage-collecting job at the moment. If
|
||||
# the coverage reports are broken and someone added more
|
||||
# coverage-collecting jobs to this workflow but didn't update this, that's
|
||||
# why.
|
||||
needs:
|
||||
- "coverage"
|
||||
runs-on: "ubuntu-latest"
|
||||
steps:
|
||||
- name: "Check out Tahoe-LAFS sources"
|
||||
uses: "actions/checkout@v2"
|
||||
|
||||
- name: "Finish Coveralls Reporting"
|
||||
run: |
|
||||
# coveralls-python does have a `--finish` option but it doesn't seem
|
||||
# to work, at least for us.
|
||||
# https://github.com/coveralls-clients/coveralls-python/issues/248
|
||||
#
|
||||
# But all it does is this simple POST so we can just send it
|
||||
# ourselves. The only hard part is guessing what the POST
|
||||
# parameters mean. And I've done that for you already.
|
||||
#
|
||||
# Since the build is done I'm going to guess that "done" is a fine
|
||||
# value for status.
|
||||
#
|
||||
# That leaves "build_num". The coveralls documentation gives some
|
||||
# hints about it. It suggests using $CIRCLE_WORKFLOW_ID if your job
|
||||
# is on CircleCI. CircleCI documentation says this about
|
||||
# CIRCLE_WORKFLOW_ID:
|
||||
#
|
||||
# Observation of the coveralls.io web interface, logs from the
|
||||
# coveralls command in action, and experimentation suggests the
|
||||
# value for PRs is something more like:
|
||||
#
|
||||
# <GIT MERGE COMMIT HASH>-PR-<PR NUM>
|
||||
#
|
||||
# For branches, it's just the git branch tip hash.
|
||||
|
||||
# For pull requests, refs/pull/<PR NUM>/merge was just checked out
|
||||
# by so HEAD will refer to the right revision. For branches, HEAD
|
||||
# is also the tip of the branch.
|
||||
REV=$(git rev-parse HEAD)
|
||||
|
||||
# We can get the PR number from the "context".
|
||||
#
|
||||
# https://docs.github.com/en/free-pro-team@latest/developers/webhooks-and-events/webhook-events-and-payloads#pull_request
|
||||
#
|
||||
# (via <https://github.community/t/github-ref-is-inconsistent/17728/3>).
|
||||
#
|
||||
# If this is a pull request, `github.event` is a `pull_request`
|
||||
# structure which has `number` right in it.
|
||||
#
|
||||
# If this is a push, `github.event` is a `push` instead but we only
|
||||
# need the revision to construct the build_num.
|
||||
|
||||
PR=${{ github.event.number }}
|
||||
|
||||
if [ "${PR}" = "" ]; then
|
||||
BUILD_NUM=$REV
|
||||
else
|
||||
BUILD_NUM=$REV-PR-$PR
|
||||
fi
|
||||
REPO_NAME=$GITHUB_REPOSITORY
|
||||
|
||||
curl \
|
||||
-k \
|
||||
https://coveralls.io/webhook?repo_token=$COVERALLS_REPO_TOKEN \
|
||||
-d \
|
||||
"payload[build_num]=$BUILD_NUM&payload[status]=done&payload[repo_name]=$REPO_NAME"
|
||||
env:
|
||||
# Some magic value required for some magic reason.
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
# Help coveralls identify our project.
|
||||
COVERALLS_REPO_TOKEN: "JPf16rLB7T2yjgATIxFzTsEgMdN1UNq6o"
|
||||
|
||||
integration:
|
||||
runs-on: ${{ matrix.os }}
|
||||
@ -103,15 +222,27 @@ jobs:
|
||||
|
||||
- name: Check out Tahoe-LAFS sources
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Fetch all history for all tags and branches
|
||||
run: git fetch --prune --unshallow
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Get pip cache directory
|
||||
id: pip-cache
|
||||
run: |
|
||||
echo "::set-output name=dir::$(pip cache dir)"
|
||||
|
||||
- name: Use pip cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade tox
|
||||
@ -155,15 +286,27 @@ jobs:
|
||||
|
||||
- name: Check out Tahoe-LAFS sources
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Fetch all history for all tags and branches
|
||||
run: git fetch --prune --unshallow
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Get pip cache directory
|
||||
id: pip-cache
|
||||
run: |
|
||||
echo "::set-output name=dir::$(pip cache dir)"
|
||||
|
||||
- name: Use pip cache
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: ${{ steps.pip-cache.outputs.dir }}
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-pip-
|
||||
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade tox
|
||||
|
7
CREDITS
7
CREDITS
@ -206,4 +206,9 @@ D: various bug-fixes and features
|
||||
|
||||
N: Viktoriia Savchuk
|
||||
W: https://twitter.com/viktoriiasvchk
|
||||
D: Developer community focused improvements on the README file.
|
||||
D: Developer community focused improvements on the README file.
|
||||
|
||||
N: Lukas Pirl
|
||||
E: tahoe@lukas-pirl.de
|
||||
W: http://lukas-pirl.de
|
||||
D: Buildslaves (Debian, Fedora, CentOS; 2016-2021)
|
||||
|
@ -6,7 +6,7 @@ Free and Open decentralized data store
|
||||
|
||||
`Tahoe-LAFS <https://www.tahoe-lafs.org>`__ (Tahoe Least-Authority File Store) is the first free software / open-source storage technology that distributes your data across multiple servers. Even if some servers fail or are taken over by an attacker, the entire file store continues to function correctly, preserving your privacy and security.
|
||||
|
||||
|Contributor Covenant| |readthedocs| |travis| |circleci| |codecov|
|
||||
|Contributor Covenant| |readthedocs| |travis| |circleci| |coveralls|
|
||||
|
||||
|
||||
Table of contents
|
||||
@ -125,9 +125,9 @@ See `TGPPL.PDF <https://tahoe-lafs.org/~zooko/tgppl.pdf>`__ for why the TGPPL ex
|
||||
.. |circleci| image:: https://circleci.com/gh/tahoe-lafs/tahoe-lafs.svg?style=svg
|
||||
:target: https://circleci.com/gh/tahoe-lafs/tahoe-lafs
|
||||
|
||||
.. |codecov| image:: https://codecov.io/github/tahoe-lafs/tahoe-lafs/coverage.svg?branch=master
|
||||
:alt: test coverage percentage
|
||||
:target: https://codecov.io/github/tahoe-lafs/tahoe-lafs?branch=master
|
||||
.. |coveralls| image:: https://coveralls.io/repos/github/tahoe-lafs/tahoe-lafs/badge.svg
|
||||
:alt: code coverage
|
||||
:target: https://coveralls.io/github/tahoe-lafs/tahoe-lafs
|
||||
|
||||
.. |Contributor Covenant| image:: https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg
|
||||
:alt: code of conduct
|
||||
|
@ -173,7 +173,9 @@ from PyPI with ``venv/bin/pip install tahoe-lafs``. After installation, run
|
||||
Install From a Source Tarball
|
||||
-----------------------------
|
||||
|
||||
You can also install directly from the source tarball URL::
|
||||
You can also install directly from the source tarball URL. To verify
|
||||
signatures, first see verifying_signatures_ and replace the URL in the
|
||||
following instructions with the local filename.
|
||||
|
||||
% virtualenv venv
|
||||
New python executable in ~/venv/bin/python2.7
|
||||
@ -189,6 +191,40 @@ You can also install directly from the source tarball URL::
|
||||
tahoe-lafs: 1.14.0
|
||||
...
|
||||
|
||||
.. _verifying_signatures:
|
||||
|
||||
Verifying Signatures
|
||||
--------------------
|
||||
|
||||
First download the source tarball and then any signatures. There are several
|
||||
developers who are able to produce signatures for a release. A release may
|
||||
have multiple signatures. All should be valid and you should confirm at least
|
||||
one of them (ideally, confirm all).
|
||||
|
||||
This statement, signed by the existing Tahoe release-signing key, attests to
|
||||
those developers authorized to sign a Tahoe release:
|
||||
|
||||
.. include:: developer-release-signatures
|
||||
:code:
|
||||
|
||||
Signatures are made available beside the release. So for example, a release
|
||||
like ``https://tahoe-lafs.org/downloads/tahoe-lafs-1.16.0.tar.bz2`` might
|
||||
have signatures ``tahoe-lafs-1.16.0.tar.bz2.meejah.asc`` and
|
||||
``tahoe-lafs-1.16.0.tar.bz2.warner.asc``.
|
||||
|
||||
To verify the signatures using GnuPG::
|
||||
|
||||
% gpg --verify tahoe-lafs-1.16.0.tar.bz2.meejah.asc tahoe-lafs-1.16.0.tar.bz2
|
||||
gpg: Signature made XXX
|
||||
gpg: using RSA key 9D5A2BD5688ECB889DEBCD3FC2602803128069A7
|
||||
gpg: Good signature from "meejah <meejah@meejah.ca>" [full]
|
||||
% gpg --verify tahoe-lafs-1.16.0.tar.bz2.warner.asc tahoe-lafs-1.16.0.tar.bz2
|
||||
gpg: Signature made XXX
|
||||
gpg: using RSA key 967EFE06699872411A77DF36D43B4C9C73225AAF
|
||||
gpg: Good signature from "Brian Warner <warner@lothar.com>" [full]
|
||||
|
||||
|
||||
|
||||
Extras
|
||||
------
|
||||
|
||||
|
@ -67,12 +67,12 @@ Here's how it works:
|
||||
A "storage grid" is made up of a number of storage servers. A storage server
|
||||
has direct attached storage (typically one or more hard disks). A "gateway"
|
||||
communicates with storage nodes, and uses them to provide access to the
|
||||
grid over protocols such as HTTP(S), SFTP or FTP.
|
||||
grid over protocols such as HTTP(S) and SFTP.
|
||||
|
||||
Note that you can find "client" used to refer to gateway nodes (which act as
|
||||
a client to storage servers), and also to processes or programs connecting to
|
||||
a gateway node and performing operations on the grid -- for example, a CLI
|
||||
command, Web browser, SFTP client, or FTP client.
|
||||
command, Web browser, or SFTP client.
|
||||
|
||||
Users do not rely on storage servers to provide *confidentiality* nor
|
||||
*integrity* for their data -- instead all of the data is encrypted and
|
||||
|
@ -28,7 +28,7 @@ import os
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = []
|
||||
extensions = ['recommonmark']
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
@ -36,7 +36,7 @@ templates_path = ['_templates']
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
# source_suffix = ['.rst', '.md']
|
||||
source_suffix = '.rst'
|
||||
source_suffix = ['.rst', '.md']
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8-sig'
|
||||
|
@ -81,7 +81,6 @@ Client/server nodes provide one or more of the following services:
|
||||
|
||||
* web-API service
|
||||
* SFTP service
|
||||
* FTP service
|
||||
* helper service
|
||||
* storage service.
|
||||
|
||||
@ -708,12 +707,12 @@ CLI
|
||||
file store, uploading/downloading files, and creating/running Tahoe
|
||||
nodes. See :doc:`frontends/CLI` for details.
|
||||
|
||||
SFTP, FTP
|
||||
SFTP
|
||||
|
||||
Tahoe can also run both SFTP and FTP servers, and map a username/password
|
||||
Tahoe can also run SFTP servers, and map a username/password
|
||||
pair to a top-level Tahoe directory. See :doc:`frontends/FTP-and-SFTP`
|
||||
for instructions on configuring these services, and the ``[sftpd]`` and
|
||||
``[ftpd]`` sections of ``tahoe.cfg``.
|
||||
for instructions on configuring this service, and the ``[sftpd]``
|
||||
section of ``tahoe.cfg``.
|
||||
|
||||
|
||||
Storage Server Configuration
|
||||
|
1
docs/contributing.rst
Normal file
1
docs/contributing.rst
Normal file
@ -0,0 +1 @@
|
||||
.. include:: ../.github/CONTRIBUTING.rst
|
42
docs/developer-release-signatures
Normal file
42
docs/developer-release-signatures
Normal file
@ -0,0 +1,42 @@
|
||||
-----BEGIN PGP SIGNED MESSAGE-----
|
||||
Hash: SHA512
|
||||
|
||||
|
||||
January 20, 2021
|
||||
|
||||
Any of the following core Tahoe contributers may sign a release. Each
|
||||
release MUST be signed by at least one developer but MAY have
|
||||
additional signatures. Each developer independently produces a
|
||||
signature which is made available beside Tahoe releases after 1.15.0
|
||||
|
||||
This statement is signed by the existing Tahoe release key. Any future
|
||||
such statements may be signed by it OR by any two developers (for
|
||||
example, to add or remove developers from the list).
|
||||
|
||||
meejah
|
||||
0xC2602803128069A7
|
||||
9D5A 2BD5 688E CB88 9DEB CD3F C260 2803 1280 69A7
|
||||
https://meejah.ca/meejah.asc
|
||||
|
||||
jean-paul calderone (exarkun)
|
||||
0xE27B085EDEAA4B1B
|
||||
96B9 C5DA B2EA 9EB6 7941 9DB7 E27B 085E DEAA 4B1B
|
||||
https://twistedmatrix.com/~exarkun/E27B085EDEAA4B1B.asc
|
||||
|
||||
brian warner (lothar)
|
||||
0x863333C265497810
|
||||
5810 F125 7F8C F753 7753 895A 8633 33C2 6549 7810
|
||||
https://www.lothar.com/warner-gpg.html
|
||||
|
||||
|
||||
-----BEGIN PGP SIGNATURE-----
|
||||
|
||||
iQEzBAEBCgAdFiEE405i0G0Oac/KQXn/veDTHWhmanoFAmAHIyIACgkQveDTHWhm
|
||||
anqhqQf/YSbMXL+gwFhAZsjX39EVlbr/Ik7WPPkJW7v1oHybTnwFpFIc52COU1x/
|
||||
sqRfk4OyYtz9IBgOPXoWgXu9R4qdK6vYKxEsekcGT9C5l0OyDz8YWXEWgbGK5mvI
|
||||
aEub9WucD8r2uOQnnW6DtznFuEpvOjtf/+2BU767+bvLsbViW88ocbuLfCqLdOgD
|
||||
WZT9j3M+Y2Dc56DAJzP/4fkrUSVIofZStYp5u9HBjburgcYIp0g/cyc4xXRoi6Mp
|
||||
lFTRFv3MIjmoamzSQseoIgP6fi8QRqPrffPrsyqAp+06mJnPhxxFqxtO/ZErmpSa
|
||||
+BGrLBxdWa8IF9U1A4Fs5nuAzAKMEg==
|
||||
=E9J+
|
||||
-----END PGP SIGNATURE-----
|
@ -1,22 +1,21 @@
|
||||
.. -*- coding: utf-8-with-signature -*-
|
||||
|
||||
=================================
|
||||
Tahoe-LAFS SFTP and FTP Frontends
|
||||
=================================
|
||||
========================
|
||||
Tahoe-LAFS SFTP Frontend
|
||||
========================
|
||||
|
||||
1. `SFTP/FTP Background`_
|
||||
1. `SFTP Background`_
|
||||
2. `Tahoe-LAFS Support`_
|
||||
3. `Creating an Account File`_
|
||||
4. `Running An Account Server (accounts.url)`_
|
||||
5. `Configuring SFTP Access`_
|
||||
6. `Configuring FTP Access`_
|
||||
7. `Dependencies`_
|
||||
8. `Immutable and Mutable Files`_
|
||||
9. `Known Issues`_
|
||||
6. `Dependencies`_
|
||||
7. `Immutable and Mutable Files`_
|
||||
8. `Known Issues`_
|
||||
|
||||
|
||||
SFTP/FTP Background
|
||||
===================
|
||||
SFTP Background
|
||||
===============
|
||||
|
||||
FTP is the venerable internet file-transfer protocol, first developed in
|
||||
1971. The FTP server usually listens on port 21. A separate connection is
|
||||
@ -33,20 +32,18 @@ Both FTP and SFTP were developed assuming a UNIX-like server, with accounts
|
||||
and passwords, octal file modes (user/group/other, read/write/execute), and
|
||||
ctime/mtime timestamps.
|
||||
|
||||
We recommend SFTP over FTP, because the protocol is better, and the server
|
||||
implementation in Tahoe-LAFS is more complete. See `Known Issues`_, below,
|
||||
for details.
|
||||
Previous versions of Tahoe-LAFS supported FTP, but now only the superior SFTP
|
||||
frontend is supported. See `Known Issues`_, below, for details on the
|
||||
limitations of SFTP.
|
||||
|
||||
Tahoe-LAFS Support
|
||||
==================
|
||||
|
||||
All Tahoe-LAFS client nodes can run a frontend SFTP server, allowing regular
|
||||
SFTP clients (like ``/usr/bin/sftp``, the ``sshfs`` FUSE plugin, and many
|
||||
others) to access the file store. They can also run an FTP server, so FTP
|
||||
clients (like ``/usr/bin/ftp``, ``ncftp``, and others) can too. These
|
||||
frontends sit at the same level as the web-API interface.
|
||||
others) to access the file store.
|
||||
|
||||
Since Tahoe-LAFS does not use user accounts or passwords, the SFTP/FTP
|
||||
Since Tahoe-LAFS does not use user accounts or passwords, the SFTP
|
||||
servers must be configured with a way to first authenticate a user (confirm
|
||||
that a prospective client has a legitimate claim to whatever authorities we
|
||||
might grant a particular user), and second to decide what directory cap
|
||||
@ -173,39 +170,6 @@ clients and with the sshfs filesystem, see wiki:SftpFrontend_
|
||||
|
||||
.. _wiki:SftpFrontend: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/SftpFrontend
|
||||
|
||||
Configuring FTP Access
|
||||
======================
|
||||
|
||||
To enable the FTP server with an accounts file, add the following lines to
|
||||
the BASEDIR/tahoe.cfg file::
|
||||
|
||||
[ftpd]
|
||||
enabled = true
|
||||
port = tcp:8021:interface=127.0.0.1
|
||||
accounts.file = private/accounts
|
||||
|
||||
The FTP server will listen on the given port number and on the loopback
|
||||
interface only. The "accounts.file" pathname will be interpreted relative to
|
||||
the node's BASEDIR.
|
||||
|
||||
To enable the FTP server with an account server instead, provide the URL of
|
||||
that server in an "accounts.url" directive::
|
||||
|
||||
[ftpd]
|
||||
enabled = true
|
||||
port = tcp:8021:interface=127.0.0.1
|
||||
accounts.url = https://example.com/login
|
||||
|
||||
You can provide both accounts.file and accounts.url, although it probably
|
||||
isn't very useful except for testing.
|
||||
|
||||
FTP provides no security, and so your password or caps could be eavesdropped
|
||||
if you connect to the FTP server remotely. The examples above include
|
||||
":interface=127.0.0.1" in the "port" option, which causes the server to only
|
||||
accept connections from localhost.
|
||||
|
||||
Public key authentication is not supported for FTP.
|
||||
|
||||
Dependencies
|
||||
============
|
||||
|
||||
@ -216,7 +180,7 @@ separately: debian puts it in the "python-twisted-conch" package.
|
||||
Immutable and Mutable Files
|
||||
===========================
|
||||
|
||||
All files created via SFTP (and FTP) are immutable files. However, files can
|
||||
All files created via SFTP are immutable files. However, files can
|
||||
only be created in writeable directories, which allows the directory entry to
|
||||
be relinked to a different file. Normally, when the path of an immutable file
|
||||
is opened for writing by SFTP, the directory entry is relinked to another
|
||||
@ -256,18 +220,3 @@ See also wiki:SftpFrontend_.
|
||||
|
||||
.. _ticket #1059: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1059
|
||||
.. _ticket #1089: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1089
|
||||
|
||||
Known Issues in the FTP Frontend
|
||||
--------------------------------
|
||||
|
||||
Mutable files are not supported by the FTP frontend (`ticket #680`_).
|
||||
|
||||
Non-ASCII filenames are not supported by FTP (`ticket #682`_).
|
||||
|
||||
The FTP frontend sometimes fails to report errors, for example if an upload
|
||||
fails because it does meet the "servers of happiness" threshold (`ticket
|
||||
#1081`_).
|
||||
|
||||
.. _ticket #680: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/680
|
||||
.. _ticket #682: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/682
|
||||
.. _ticket #1081: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1081
|
||||
|
@ -2032,10 +2032,11 @@ potential for surprises when the file store structure is changed.
|
||||
|
||||
Tahoe-LAFS provides a mutable file store, but the ways that the store can
|
||||
change are limited. The only things that can change are:
|
||||
* the mapping from child names to child objects inside mutable directories
|
||||
(by adding a new child, removing an existing child, or changing an
|
||||
existing child to point to a different object)
|
||||
* the contents of mutable files
|
||||
|
||||
* the mapping from child names to child objects inside mutable directories
|
||||
(by adding a new child, removing an existing child, or changing an
|
||||
existing child to point to a different object)
|
||||
* the contents of mutable files
|
||||
|
||||
Obviously if you query for information about the file store and then act
|
||||
to change it (such as by getting a listing of the contents of a mutable
|
||||
@ -2157,7 +2158,7 @@ When modifying the file, be careful to update it atomically, otherwise a
|
||||
request may arrive while the file is only halfway written, and the partial
|
||||
file may be incorrectly parsed.
|
||||
|
||||
The blacklist is applied to all access paths (including SFTP, FTP, and CLI
|
||||
The blacklist is applied to all access paths (including SFTP and CLI
|
||||
operations), not just the web-API. The blacklist also applies to directories.
|
||||
If a directory is blacklisted, the gateway will refuse access to both that
|
||||
directory and any child files/directories underneath it, when accessed via
|
||||
|
@ -122,7 +122,7 @@ Who should consider using a Helper?
|
||||
* clients who experience problems with TCP connection fairness: if other
|
||||
programs or machines in the same home are getting less than their fair
|
||||
share of upload bandwidth. If the connection is being shared fairly, then
|
||||
a Tahoe upload that is happening at the same time as a single FTP upload
|
||||
a Tahoe upload that is happening at the same time as a single SFTP upload
|
||||
should get half the bandwidth.
|
||||
* clients who have been given the helper.furl by someone who is running a
|
||||
Helper and is willing to let them use it
|
||||
|
@ -23,8 +23,9 @@ Contents:
|
||||
frontends/download-status
|
||||
|
||||
known_issues
|
||||
../.github/CONTRIBUTING
|
||||
contributing
|
||||
CODE_OF_CONDUCT
|
||||
release-checklist
|
||||
|
||||
servers
|
||||
helper
|
||||
|
@ -23,7 +23,7 @@ Known Issues in Tahoe-LAFS v1.10.3, released 30-Mar-2016
|
||||
* `Disclosure of file through embedded hyperlinks or JavaScript in that file`_
|
||||
* `Command-line arguments are leaked to other local users`_
|
||||
* `Capabilities may be leaked to web browser phishing filter / "safe browsing" servers`_
|
||||
* `Known issues in the FTP and SFTP frontends`_
|
||||
* `Known issues in the SFTP frontend`_
|
||||
* `Traffic analysis based on sizes of files/directories, storage indices, and timing`_
|
||||
* `Privacy leak via Google Chart API link in map-update timing web page`_
|
||||
|
||||
@ -213,8 +213,8 @@ To disable the filter in Chrome:
|
||||
|
||||
----
|
||||
|
||||
Known issues in the FTP and SFTP frontends
|
||||
------------------------------------------
|
||||
Known issues in the SFTP frontend
|
||||
---------------------------------
|
||||
|
||||
These are documented in :doc:`frontends/FTP-and-SFTP` and on `the
|
||||
SftpFrontend page`_ on the wiki.
|
||||
|
@ -40,23 +40,31 @@ Create Branch and Apply Updates
|
||||
- Create a branch for release-candidates (e.g. `XXXX.release-1.15.0.rc0`)
|
||||
- run `tox -e news` to produce a new NEWS.txt file (this does a commit)
|
||||
- create the news for the release
|
||||
|
||||
- newsfragments/<ticket number>.minor
|
||||
- commit it
|
||||
|
||||
- manually fix NEWS.txt
|
||||
|
||||
- proper title for latest release ("Release 1.15.0" instead of "Release ...post1432")
|
||||
- double-check date (maybe release will be in the future)
|
||||
- spot-check the release notes (these come from the newsfragments
|
||||
files though so don't do heavy editing)
|
||||
- commit these changes
|
||||
|
||||
- update "relnotes.txt"
|
||||
|
||||
- update all mentions of 1.14.0 -> 1.15.0
|
||||
- update "previous release" statement and date
|
||||
- summarize major changes
|
||||
- commit it
|
||||
|
||||
- update "CREDITS"
|
||||
|
||||
- are there any new contributors in this release?
|
||||
- one way: git log release-1.14.0.. | grep Author | sort | uniq
|
||||
- commit it
|
||||
|
||||
- update "docs/known_issues.rst" if appropriate
|
||||
- update "docs/INSTALL.rst" references to the new release
|
||||
- Push the branch to github
|
||||
@ -82,25 +90,36 @@ they will need to evaluate which contributors' signatures they trust.
|
||||
|
||||
- (all steps above are completed)
|
||||
- sign the release
|
||||
|
||||
- git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-1.15.0rc0" tahoe-lafs-1.15.0rc0
|
||||
- (replace the key-id above with your own)
|
||||
|
||||
- build all code locally
|
||||
- these should all pass:
|
||||
|
||||
- tox -e py27,codechecks,docs,integration
|
||||
|
||||
- these can fail (ideally they should not of course):
|
||||
|
||||
- tox -e deprecations,upcoming-deprecations
|
||||
|
||||
- build tarballs
|
||||
|
||||
- tox -e tarballs
|
||||
- confirm it at least exists:
|
||||
- ls dist/ | grep 1.15.0rc0
|
||||
|
||||
- inspect and test the tarballs
|
||||
|
||||
- install each in a fresh virtualenv
|
||||
- run `tahoe` command
|
||||
|
||||
- when satisfied, sign the tarballs:
|
||||
- gpg --pinentry=loopback --armor --sign dist/tahoe_lafs-1.15.0rc0-py2-none-any.whl
|
||||
- gpg --pinentry=loopback --armor --sign dist/tahoe_lafs-1.15.0rc0.tar.bz2
|
||||
- gpg --pinentry=loopback --armor --sign dist/tahoe_lafs-1.15.0rc0.tar.gz
|
||||
- gpg --pinentry=loopback --armor --sign dist/tahoe_lafs-1.15.0rc0.zip
|
||||
|
||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0-py2-none-any.whl
|
||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.bz2
|
||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.gz
|
||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.zip
|
||||
|
||||
|
||||
Privileged Contributor
|
||||
@ -118,6 +137,12 @@ Did anyone contribute a hack since the last release? If so, then
|
||||
https://tahoe-lafs.org/hacktahoelafs/ needs to be updated.
|
||||
|
||||
|
||||
Sign Git Tag
|
||||
````````````
|
||||
|
||||
- git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-X.Y.Z" tahoe-lafs-X.Y.Z
|
||||
|
||||
|
||||
Upload Artifacts
|
||||
````````````````
|
||||
|
||||
@ -129,6 +154,7 @@ need to be uploaded to https://tahoe-lafs.org in `~source/downloads`
|
||||
https://tahoe-lafs.org/downloads/ on the Web.
|
||||
- scp dist/*1.15.0* username@tahoe-lafs.org:/home/source/downloads
|
||||
- the following developers have access to do this:
|
||||
|
||||
- exarkun
|
||||
- meejah
|
||||
- warner
|
||||
@ -137,8 +163,9 @@ For the actual release, the tarball and signature files need to be
|
||||
uploaded to PyPI as well.
|
||||
|
||||
- how to do this?
|
||||
- (original guide says only "twine upload dist/*")
|
||||
- (original guide says only `twine upload dist/*`)
|
||||
- the following developers have access to do this:
|
||||
|
||||
- warner
|
||||
- exarkun (partial?)
|
||||
- meejah (partial?)
|
||||
|
@ -207,10 +207,10 @@ create a new directory and lose the capability to it, then you cannot
|
||||
access that directory ever again.
|
||||
|
||||
|
||||
The SFTP and FTP frontends
|
||||
--------------------------
|
||||
The SFTP frontend
|
||||
-----------------
|
||||
|
||||
You can access your Tahoe-LAFS grid via any SFTP_ or FTP_ client. See
|
||||
You can access your Tahoe-LAFS grid via any SFTP_ client. See
|
||||
:doc:`frontends/FTP-and-SFTP` for how to set this up. On most Unix
|
||||
platforms, you can also use SFTP to plug Tahoe-LAFS into your computer's
|
||||
local filesystem via ``sshfs``, but see the `FAQ about performance
|
||||
@ -220,7 +220,6 @@ The SftpFrontend_ page on the wiki has more information about using SFTP with
|
||||
Tahoe-LAFS.
|
||||
|
||||
.. _SFTP: https://en.wikipedia.org/wiki/SSH_file_transfer_protocol
|
||||
.. _FTP: https://en.wikipedia.org/wiki/File_Transfer_Protocol
|
||||
.. _FAQ about performance problems: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/FAQ#Q23_FUSE
|
||||
.. _SftpFrontend: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/SftpFrontend
|
||||
|
||||
|
@ -7,6 +7,7 @@ from os import mkdir, listdir, environ
|
||||
from os.path import join, exists
|
||||
from tempfile import mkdtemp, mktemp
|
||||
from functools import partial
|
||||
from json import loads
|
||||
|
||||
from foolscap.furl import (
|
||||
decode_furl,
|
||||
@ -37,6 +38,10 @@ from util import (
|
||||
_tahoe_runner_optional_coverage,
|
||||
await_client_ready,
|
||||
TahoeProcess,
|
||||
cli,
|
||||
_run_node,
|
||||
generate_ssh_key,
|
||||
block_with_timeout,
|
||||
)
|
||||
|
||||
|
||||
@ -152,7 +157,7 @@ def flog_gatherer(reactor, temp_dir, flog_binary, request):
|
||||
)
|
||||
print("Waiting for flogtool to complete")
|
||||
try:
|
||||
pytest_twisted.blockon(flog_protocol.done)
|
||||
block_with_timeout(flog_protocol.done, reactor)
|
||||
except ProcessTerminated as e:
|
||||
print("flogtool exited unexpectedly: {}".format(str(e)))
|
||||
print("Flogtool completed")
|
||||
@ -293,7 +298,7 @@ log_gatherer.furl = {log_furl}
|
||||
def cleanup():
|
||||
try:
|
||||
transport.signalProcess('TERM')
|
||||
pytest_twisted.blockon(protocol.exited)
|
||||
block_with_timeout(protocol.exited, reactor)
|
||||
except ProcessExitedAlready:
|
||||
pass
|
||||
request.addfinalizer(cleanup)
|
||||
@ -347,8 +352,50 @@ def alice(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, requ
|
||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, "alice",
|
||||
web_port="tcp:9980:interface=localhost",
|
||||
storage=False,
|
||||
# We're going to kill this ourselves, so no need for finalizer to
|
||||
# do it:
|
||||
finalize=False,
|
||||
)
|
||||
)
|
||||
await_client_ready(process)
|
||||
|
||||
# 1. Create a new RW directory cap:
|
||||
cli(process, "create-alias", "test")
|
||||
rwcap = loads(cli(process, "list-aliases", "--json"))["test"]["readwrite"]
|
||||
|
||||
# 2. Enable SFTP on the node:
|
||||
host_ssh_key_path = join(process.node_dir, "private", "ssh_host_rsa_key")
|
||||
accounts_path = join(process.node_dir, "private", "accounts")
|
||||
with open(join(process.node_dir, "tahoe.cfg"), "a") as f:
|
||||
f.write("""\
|
||||
[sftpd]
|
||||
enabled = true
|
||||
port = tcp:8022:interface=127.0.0.1
|
||||
host_pubkey_file = {ssh_key_path}.pub
|
||||
host_privkey_file = {ssh_key_path}
|
||||
accounts.file = {accounts_path}
|
||||
""".format(ssh_key_path=host_ssh_key_path, accounts_path=accounts_path))
|
||||
generate_ssh_key(host_ssh_key_path)
|
||||
|
||||
# 3. Add a SFTP access file with username/password and SSH key auth.
|
||||
|
||||
# The client SSH key path is typically going to be somewhere else (~/.ssh,
|
||||
# typically), but for convenience sake for testing we'll put it inside node.
|
||||
client_ssh_key_path = join(process.node_dir, "private", "ssh_client_rsa_key")
|
||||
generate_ssh_key(client_ssh_key_path)
|
||||
# Pub key format is "ssh-rsa <thekey> <username>". We want the key.
|
||||
ssh_public_key = open(client_ssh_key_path + ".pub").read().strip().split()[1]
|
||||
with open(accounts_path, "w") as f:
|
||||
f.write("""\
|
||||
alice password {rwcap}
|
||||
|
||||
alice2 ssh-rsa {ssh_public_key} {rwcap}
|
||||
""".format(rwcap=rwcap, ssh_public_key=ssh_public_key))
|
||||
|
||||
# 4. Restart the node with new SFTP config.
|
||||
process.kill()
|
||||
pytest_twisted.blockon(_run_node(reactor, process.node_dir, request, None))
|
||||
|
||||
await_client_ready(process)
|
||||
return process
|
||||
|
||||
@ -490,7 +537,13 @@ def tor_network(reactor, temp_dir, chutney, request):
|
||||
path=join(chutney_dir),
|
||||
env=env,
|
||||
)
|
||||
pytest_twisted.blockon(proto.done)
|
||||
try:
|
||||
block_with_timeout(proto.done, reactor)
|
||||
except ProcessTerminated:
|
||||
# If this doesn't exit cleanly, that's fine, that shouldn't fail
|
||||
# the test suite.
|
||||
pass
|
||||
|
||||
request.addfinalizer(cleanup)
|
||||
|
||||
return chut
|
||||
|
162
integration/test_sftp.py
Normal file
162
integration/test_sftp.py
Normal file
@ -0,0 +1,162 @@
|
||||
"""
|
||||
It's possible to create/rename/delete files and directories in Tahoe-LAFS using
|
||||
SFTP.
|
||||
|
||||
These tests use Paramiko, rather than Twisted's Conch, because:
|
||||
|
||||
1. It's a different implementation, so we're not testing Conch against
|
||||
itself.
|
||||
|
||||
2. Its API is much simpler to use.
|
||||
"""
|
||||
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
from posixpath import join
|
||||
from stat import S_ISDIR
|
||||
|
||||
from paramiko import SSHClient
|
||||
from paramiko.client import AutoAddPolicy
|
||||
from paramiko.sftp_client import SFTPClient
|
||||
from paramiko.ssh_exception import AuthenticationException
|
||||
from paramiko.rsakey import RSAKey
|
||||
|
||||
import pytest
|
||||
|
||||
from .util import generate_ssh_key, run_in_thread
|
||||
|
||||
|
||||
def connect_sftp(connect_args={"username": "alice", "password": "password"}):
|
||||
"""Create an SFTP client."""
|
||||
client = SSHClient()
|
||||
client.set_missing_host_key_policy(AutoAddPolicy)
|
||||
client.connect("localhost", port=8022, look_for_keys=False,
|
||||
allow_agent=False, **connect_args)
|
||||
sftp = SFTPClient.from_transport(client.get_transport())
|
||||
|
||||
def rmdir(path, delete_root=True):
|
||||
for f in sftp.listdir_attr(path=path):
|
||||
childpath = join(path, f.filename)
|
||||
if S_ISDIR(f.st_mode):
|
||||
rmdir(childpath)
|
||||
else:
|
||||
sftp.remove(childpath)
|
||||
if delete_root:
|
||||
sftp.rmdir(path)
|
||||
|
||||
# Delete any files left over from previous tests :(
|
||||
rmdir("/", delete_root=False)
|
||||
|
||||
return sftp
|
||||
|
||||
|
||||
@run_in_thread
|
||||
def test_bad_account_password_ssh_key(alice, tmpdir):
|
||||
"""
|
||||
Can't login with unknown username, wrong password, or wrong SSH pub key.
|
||||
"""
|
||||
# Wrong password, wrong username:
|
||||
for u, p in [("alice", "wrong"), ("someuser", "password")]:
|
||||
with pytest.raises(AuthenticationException):
|
||||
connect_sftp(connect_args={
|
||||
"username": u, "password": p,
|
||||
})
|
||||
|
||||
another_key = join(str(tmpdir), "ssh_key")
|
||||
generate_ssh_key(another_key)
|
||||
good_key = RSAKey(filename=join(alice.node_dir, "private", "ssh_client_rsa_key"))
|
||||
bad_key = RSAKey(filename=another_key)
|
||||
|
||||
# Wrong key:
|
||||
with pytest.raises(AuthenticationException):
|
||||
connect_sftp(connect_args={
|
||||
"username": "alice2", "pkey": bad_key,
|
||||
})
|
||||
|
||||
# Wrong username:
|
||||
with pytest.raises(AuthenticationException):
|
||||
connect_sftp(connect_args={
|
||||
"username": "someoneelse", "pkey": good_key,
|
||||
})
|
||||
|
||||
|
||||
@run_in_thread
|
||||
def test_ssh_key_auth(alice):
|
||||
"""It's possible to login authenticating with SSH public key."""
|
||||
key = RSAKey(filename=join(alice.node_dir, "private", "ssh_client_rsa_key"))
|
||||
sftp = connect_sftp(connect_args={
|
||||
"username": "alice2", "pkey": key
|
||||
})
|
||||
assert sftp.listdir() == []
|
||||
|
||||
|
||||
@run_in_thread
|
||||
def test_read_write_files(alice):
|
||||
"""It's possible to upload and download files."""
|
||||
sftp = connect_sftp()
|
||||
with sftp.file("myfile", "wb") as f:
|
||||
f.write(b"abc")
|
||||
f.write(b"def")
|
||||
|
||||
with sftp.file("myfile", "rb") as f:
|
||||
assert f.read(4) == b"abcd"
|
||||
assert f.read(2) == b"ef"
|
||||
assert f.read(1) == b""
|
||||
|
||||
|
||||
@run_in_thread
|
||||
def test_directories(alice):
|
||||
"""
|
||||
It's possible to create, list directories, and create and remove files in
|
||||
them.
|
||||
"""
|
||||
sftp = connect_sftp()
|
||||
assert sftp.listdir() == []
|
||||
|
||||
sftp.mkdir("childdir")
|
||||
assert sftp.listdir() == ["childdir"]
|
||||
|
||||
with sftp.file("myfile", "wb") as f:
|
||||
f.write(b"abc")
|
||||
assert sorted(sftp.listdir()) == ["childdir", "myfile"]
|
||||
|
||||
sftp.chdir("childdir")
|
||||
assert sftp.listdir() == []
|
||||
|
||||
with sftp.file("myfile2", "wb") as f:
|
||||
f.write(b"def")
|
||||
assert sftp.listdir() == ["myfile2"]
|
||||
|
||||
sftp.chdir(None) # root
|
||||
with sftp.file("childdir/myfile2", "rb") as f:
|
||||
assert f.read() == b"def"
|
||||
|
||||
sftp.remove("myfile")
|
||||
assert sftp.listdir() == ["childdir"]
|
||||
|
||||
sftp.rmdir("childdir")
|
||||
assert sftp.listdir() == []
|
||||
|
||||
|
||||
@run_in_thread
|
||||
def test_rename(alice):
|
||||
"""Directories and files can be renamed."""
|
||||
sftp = connect_sftp()
|
||||
sftp.mkdir("dir")
|
||||
|
||||
filepath = join("dir", "file")
|
||||
with sftp.file(filepath, "wb") as f:
|
||||
f.write(b"abc")
|
||||
|
||||
sftp.rename(filepath, join("dir", "file2"))
|
||||
sftp.rename("dir", "dir2")
|
||||
|
||||
with sftp.file(join("dir2", "file2"), "rb") as f:
|
||||
assert f.read() == b"abc"
|
@ -127,12 +127,12 @@ def test_deep_stats(alice):
|
||||
dircap_uri,
|
||||
data={
|
||||
u"t": u"upload",
|
||||
u"when_done": u".",
|
||||
},
|
||||
files={
|
||||
u"file": FILE_CONTENTS,
|
||||
},
|
||||
)
|
||||
resp.raise_for_status()
|
||||
|
||||
# confirm the file is in the directory
|
||||
resp = requests.get(
|
||||
@ -175,6 +175,7 @@ def test_deep_stats(alice):
|
||||
time.sleep(.5)
|
||||
|
||||
|
||||
@util.run_in_thread
|
||||
def test_status(alice):
|
||||
"""
|
||||
confirm we get something sensible from /status and the various sub-types
|
||||
|
@ -5,6 +5,7 @@ from os import mkdir, environ
|
||||
from os.path import exists, join
|
||||
from six.moves import StringIO
|
||||
from functools import partial
|
||||
from subprocess import check_output
|
||||
|
||||
from twisted.python.filepath import (
|
||||
FilePath,
|
||||
@ -12,9 +13,13 @@ from twisted.python.filepath import (
|
||||
from twisted.internet.defer import Deferred, succeed
|
||||
from twisted.internet.protocol import ProcessProtocol
|
||||
from twisted.internet.error import ProcessExitedAlready, ProcessDone
|
||||
from twisted.internet.threads import deferToThread
|
||||
|
||||
import requests
|
||||
|
||||
from paramiko.rsakey import RSAKey
|
||||
from boltons.funcutils import wraps
|
||||
|
||||
from allmydata.util.configutil import (
|
||||
get_config,
|
||||
set_config,
|
||||
@ -25,6 +30,12 @@ from allmydata import client
|
||||
import pytest_twisted
|
||||
|
||||
|
||||
def block_with_timeout(deferred, reactor, timeout=120):
|
||||
"""Block until Deferred has result, but timeout instead of waiting forever."""
|
||||
deferred.addTimeout(timeout, reactor)
|
||||
return pytest_twisted.blockon(deferred)
|
||||
|
||||
|
||||
class _ProcessExitedProtocol(ProcessProtocol):
|
||||
"""
|
||||
Internal helper that .callback()s on self.done when the process
|
||||
@ -123,11 +134,12 @@ def _cleanup_tahoe_process(tahoe_transport, exited):
|
||||
|
||||
:return: After the process has exited.
|
||||
"""
|
||||
from twisted.internet import reactor
|
||||
try:
|
||||
print("signaling {} with TERM".format(tahoe_transport.pid))
|
||||
tahoe_transport.signalProcess('TERM')
|
||||
print("signaled, blocking on exit")
|
||||
pytest_twisted.blockon(exited)
|
||||
block_with_timeout(exited, reactor)
|
||||
print("exited, goodbye")
|
||||
except ProcessExitedAlready:
|
||||
pass
|
||||
@ -175,11 +187,15 @@ class TahoeProcess(object):
|
||||
u"portnum",
|
||||
)
|
||||
|
||||
def kill(self):
|
||||
"""Kill the process, block until it's done."""
|
||||
_cleanup_tahoe_process(self.transport, self.transport.exited)
|
||||
|
||||
def __str__(self):
|
||||
return "<TahoeProcess in '{}'>".format(self._node_dir)
|
||||
|
||||
|
||||
def _run_node(reactor, node_dir, request, magic_text):
|
||||
def _run_node(reactor, node_dir, request, magic_text, finalize=True):
|
||||
"""
|
||||
Run a tahoe process from its node_dir.
|
||||
|
||||
@ -203,7 +219,8 @@ def _run_node(reactor, node_dir, request, magic_text):
|
||||
)
|
||||
transport.exited = protocol.exited
|
||||
|
||||
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
||||
if finalize:
|
||||
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
||||
|
||||
# XXX abusing the Deferred; should use .when_magic_seen() pattern
|
||||
|
||||
@ -222,7 +239,8 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
||||
magic_text=None,
|
||||
needed=2,
|
||||
happy=3,
|
||||
total=4):
|
||||
total=4,
|
||||
finalize=True):
|
||||
"""
|
||||
Helper to create a single node, run it and return the instance
|
||||
spawnProcess returned (ITransport)
|
||||
@ -270,7 +288,7 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
||||
d = Deferred()
|
||||
d.callback(None)
|
||||
d.addCallback(lambda _: created_d)
|
||||
d.addCallback(lambda _: _run_node(reactor, node_dir, request, magic_text))
|
||||
d.addCallback(lambda _: _run_node(reactor, node_dir, request, magic_text, finalize=finalize))
|
||||
return d
|
||||
|
||||
|
||||
@ -390,17 +408,13 @@ def await_file_vanishes(path, timeout=10):
|
||||
raise FileShouldVanishException(path, timeout)
|
||||
|
||||
|
||||
def cli(request, reactor, node_dir, *argv):
|
||||
def cli(node, *argv):
|
||||
"""
|
||||
Run a tahoe CLI subcommand for a given node, optionally running
|
||||
under coverage if '--coverage' was supplied.
|
||||
Run a tahoe CLI subcommand for a given node in a blocking manner, returning
|
||||
the output.
|
||||
"""
|
||||
proto = _CollectOutputProtocol()
|
||||
_tahoe_runner_optional_coverage(
|
||||
proto, reactor, request,
|
||||
['--node-directory', node_dir] + list(argv),
|
||||
)
|
||||
return proto.done
|
||||
arguments = ["tahoe", '--node-directory', node.node_dir]
|
||||
return check_output(arguments + list(argv))
|
||||
|
||||
|
||||
def node_url(node_dir, uri_fragment):
|
||||
@ -505,3 +519,36 @@ def await_client_ready(tahoe, timeout=10, liveness=60*2):
|
||||
tahoe,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def generate_ssh_key(path):
|
||||
"""Create a new SSH private/public key pair."""
|
||||
key = RSAKey.generate(2048)
|
||||
key.write_private_key_file(path)
|
||||
with open(path + ".pub", "wb") as f:
|
||||
f.write(b"%s %s" % (key.get_name(), key.get_base64()))
|
||||
|
||||
|
||||
def run_in_thread(f):
|
||||
"""Decorator for integration tests that runs code in a thread.
|
||||
|
||||
Because we're using pytest_twisted, tests that rely on the reactor are
|
||||
expected to return a Deferred and use async APIs so the reactor can run.
|
||||
|
||||
In the case of the integration test suite, it launches nodes in the
|
||||
background using Twisted APIs. The nodes stdout and stderr is read via
|
||||
Twisted code. If the reactor doesn't run, reads don't happen, and
|
||||
eventually the buffers fill up, and the nodes block when they try to flush
|
||||
logs.
|
||||
|
||||
We can switch to Twisted APIs (treq instead of requests etc.), but
|
||||
sometimes it's easier or expedient to just have a blocking test. So this
|
||||
decorator allows you to run the test in a thread, and the reactor can keep
|
||||
running in the main thread.
|
||||
|
||||
See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3597 for tracking bug.
|
||||
"""
|
||||
@wraps(f)
|
||||
def test(*args, **kwargs):
|
||||
return deferToThread(lambda: f(*args, **kwargs))
|
||||
return test
|
||||
|
@ -46,7 +46,7 @@ class ProvisioningTool(rend.Page):
|
||||
req = inevow.IRequest(ctx)
|
||||
|
||||
def getarg(name, astype=int):
|
||||
if req.method != "POST":
|
||||
if req.method != b"POST":
|
||||
return None
|
||||
if name in req.fields:
|
||||
return astype(req.fields[name].value)
|
||||
|
3
mypy.ini
Normal file
3
mypy.ini
Normal file
@ -0,0 +1,3 @@
|
||||
[mypy]
|
||||
ignore_missing_imports = True
|
||||
plugins=mypy_zope:plugin
|
1
newsfragments/3326.installation
Normal file
1
newsfragments/3326.installation
Normal file
@ -0,0 +1 @@
|
||||
Debian 8 support has been replaced with Debian 10 support.
|
0
newsfragments/3384.minor
Normal file
0
newsfragments/3384.minor
Normal file
0
newsfragments/3385.minor
Normal file
0
newsfragments/3385.minor
Normal file
1
newsfragments/3399.feature
Normal file
1
newsfragments/3399.feature
Normal file
@ -0,0 +1 @@
|
||||
Added 'typechecks' environment for tox running mypy and performing static typechecks.
|
0
newsfragments/3529.minor
Normal file
0
newsfragments/3529.minor
Normal file
0
newsfragments/3534.minor
Normal file
0
newsfragments/3534.minor
Normal file
0
newsfragments/3536.minor
Normal file
0
newsfragments/3536.minor
Normal file
0
newsfragments/3552.minor
Normal file
0
newsfragments/3552.minor
Normal file
0
newsfragments/3564.minor
Normal file
0
newsfragments/3564.minor
Normal file
0
newsfragments/3565.minor
Normal file
0
newsfragments/3565.minor
Normal file
0
newsfragments/3566.minor
Normal file
0
newsfragments/3566.minor
Normal file
0
newsfragments/3567.minor
Normal file
0
newsfragments/3567.minor
Normal file
0
newsfragments/3568.minor
Normal file
0
newsfragments/3568.minor
Normal file
0
newsfragments/3572.minor
Normal file
0
newsfragments/3572.minor
Normal file
0
newsfragments/3574.minor
Normal file
0
newsfragments/3574.minor
Normal file
0
newsfragments/3575.minor
Normal file
0
newsfragments/3575.minor
Normal file
0
newsfragments/3576.minor
Normal file
0
newsfragments/3576.minor
Normal file
0
newsfragments/3577.minor
Normal file
0
newsfragments/3577.minor
Normal file
0
newsfragments/3578.minor
Normal file
0
newsfragments/3578.minor
Normal file
0
newsfragments/3579.minor
Normal file
0
newsfragments/3579.minor
Normal file
0
newsfragments/3580.minor
Normal file
0
newsfragments/3580.minor
Normal file
0
newsfragments/3582.minor
Normal file
0
newsfragments/3582.minor
Normal file
1
newsfragments/3583.removed
Normal file
1
newsfragments/3583.removed
Normal file
@ -0,0 +1 @@
|
||||
FTP is no longer supported by Tahoe-LAFS. Please use the SFTP support instead.
|
1
newsfragments/3584.bugfix
Normal file
1
newsfragments/3584.bugfix
Normal file
@ -0,0 +1 @@
|
||||
SFTP public key auth likely works more consistently, and SFTP in general was previously broken.
|
1
newsfragments/3587.minor
Normal file
1
newsfragments/3587.minor
Normal file
@ -0,0 +1 @@
|
||||
|
1
newsfragments/3588.incompat
Normal file
1
newsfragments/3588.incompat
Normal file
@ -0,0 +1 @@
|
||||
The Tahoe command line now always uses UTF-8 to decode its arguments, regardless of locale.
|
0
newsfragments/3588.minor
Normal file
0
newsfragments/3588.minor
Normal file
0
newsfragments/3589.minor
Normal file
0
newsfragments/3589.minor
Normal file
1
newsfragments/3590.bugfix
Normal file
1
newsfragments/3590.bugfix
Normal file
@ -0,0 +1 @@
|
||||
Fixed issue where redirecting old-style URIs (/uri/?uri=...) didn't work.
|
0
newsfragments/3591.minor
Normal file
0
newsfragments/3591.minor
Normal file
0
newsfragments/3592.minor
Normal file
0
newsfragments/3592.minor
Normal file
0
newsfragments/3593.minor
Normal file
0
newsfragments/3593.minor
Normal file
0
newsfragments/3594.minor
Normal file
0
newsfragments/3594.minor
Normal file
0
newsfragments/3595.minor
Normal file
0
newsfragments/3595.minor
Normal file
0
newsfragments/3596.minor
Normal file
0
newsfragments/3596.minor
Normal file
0
newsfragments/3599.minor
Normal file
0
newsfragments/3599.minor
Normal file
0
newsfragments/3600.minor
Normal file
0
newsfragments/3600.minor
Normal file
0
newsfragments/3612.minor
Normal file
0
newsfragments/3612.minor
Normal file
@ -23,17 +23,12 @@ python.pkgs.buildPythonPackage rec {
|
||||
# This list is over-zealous because it's more work to disable individual
|
||||
# tests with in a module.
|
||||
|
||||
# test_system is a lot of integration-style tests that do a lot of real
|
||||
# networking between many processes. They sometimes fail spuriously.
|
||||
rm src/allmydata/test/test_system.py
|
||||
|
||||
# Many of these tests don't properly skip when i2p or tor dependencies are
|
||||
# not supplied (and we are not supplying them).
|
||||
rm src/allmydata/test/test_i2p_provider.py
|
||||
rm src/allmydata/test/test_connections.py
|
||||
rm src/allmydata/test/cli/test_create.py
|
||||
rm src/allmydata/test/test_client.py
|
||||
rm src/allmydata/test/test_runner.py
|
||||
'';
|
||||
|
||||
|
||||
|
15
setup.py
15
setup.py
@ -63,12 +63,8 @@ install_requires = [
|
||||
# version of cryptography will *really* be installed.
|
||||
"cryptography >= 2.6",
|
||||
|
||||
# * We need Twisted 10.1.0 for the FTP frontend in order for
|
||||
# Twisted's FTP server to support asynchronous close.
|
||||
# * The SFTP frontend depends on Twisted 11.0.0 to fix the SSH server
|
||||
# rekeying bug <https://twistedmatrix.com/trac/ticket/4395>
|
||||
# * The FTP frontend depends on Twisted >= 11.1.0 for
|
||||
# filepath.Permissions
|
||||
# * The SFTP frontend and manhole depend on the conch extra. However, we
|
||||
# can't explicitly declare that without an undesirable dependency on gmpy,
|
||||
# as explained in ticket #2740.
|
||||
@ -111,7 +107,9 @@ install_requires = [
|
||||
|
||||
# Eliot is contemplating dropping Python 2 support. Stick to a version we
|
||||
# know works on Python 2.7.
|
||||
"eliot ~= 1.7",
|
||||
"eliot ~= 1.7 ; python_version < '3.0'",
|
||||
# On Python 3, we want a new enough version to support custom JSON encoders.
|
||||
"eliot >= 1.13.0 ; python_version > '3.0'",
|
||||
|
||||
# Pyrsistent 0.17.0 (which we use by way of Eliot) has dropped
|
||||
# Python 2 entirely; stick to the version known to work for us.
|
||||
@ -383,10 +381,7 @@ setup(name="tahoe-lafs", # also set in __init__.py
|
||||
# this version from time to time, but we will do it
|
||||
# intentionally.
|
||||
"pyflakes == 2.2.0",
|
||||
# coverage 5.0 breaks the integration tests in some opaque way.
|
||||
# This probably needs to be addressed in a more permanent way
|
||||
# eventually...
|
||||
"coverage ~= 4.5",
|
||||
"coverage ~= 5.0",
|
||||
"mock",
|
||||
"tox",
|
||||
"pytest",
|
||||
@ -400,6 +395,8 @@ setup(name="tahoe-lafs", # also set in __init__.py
|
||||
"html5lib",
|
||||
"junitxml",
|
||||
"tenacity",
|
||||
"paramiko",
|
||||
"pytest-timeout",
|
||||
] + tor_requires + i2p_requires,
|
||||
"tor": tor_requires,
|
||||
"i2p": i2p_requires,
|
||||
|
@ -14,7 +14,9 @@ __all__ = [
|
||||
|
||||
__version__ = "unknown"
|
||||
try:
|
||||
from allmydata._version import __version__
|
||||
# type ignored as it fails in CI
|
||||
# (https://app.circleci.com/pipelines/github/tahoe-lafs/tahoe-lafs/1647/workflows/60ae95d4-abe8-492c-8a03-1ad3b9e42ed3/jobs/40972)
|
||||
from allmydata._version import __version__ # type: ignore
|
||||
except ImportError:
|
||||
# We're running in a tree that hasn't run update_version, and didn't
|
||||
# come with a _version.py, so we don't know what our version is.
|
||||
@ -24,7 +26,9 @@ except ImportError:
|
||||
full_version = "unknown"
|
||||
branch = "unknown"
|
||||
try:
|
||||
from allmydata._version import full_version, branch
|
||||
# type ignored as it fails in CI
|
||||
# (https://app.circleci.com/pipelines/github/tahoe-lafs/tahoe-lafs/1647/workflows/60ae95d4-abe8-492c-8a03-1ad3b9e42ed3/jobs/40972)
|
||||
from allmydata._version import full_version, branch # type: ignore
|
||||
except ImportError:
|
||||
# We're running in a tree that hasn't run update_version, and didn't
|
||||
# come with a _version.py, so we don't know what our full version or
|
||||
|
@ -1,3 +1,14 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
import os
|
||||
|
||||
@ -34,10 +45,10 @@ class Blacklist(object):
|
||||
try:
|
||||
if self.last_mtime is None or current_mtime > self.last_mtime:
|
||||
self.entries.clear()
|
||||
with open(self.blacklist_fn, "r") as f:
|
||||
with open(self.blacklist_fn, "rb") as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#"):
|
||||
if not line or line.startswith(b"#"):
|
||||
continue
|
||||
si_s, reason = line.split(None, 1)
|
||||
si = base32.a2b(si_s) # must be valid base32
|
||||
|
@ -86,12 +86,6 @@ _client_config = configutil.ValidConfiguration(
|
||||
"shares.total",
|
||||
"storage.plugins",
|
||||
),
|
||||
"ftpd": (
|
||||
"accounts.file",
|
||||
"accounts.url",
|
||||
"enabled",
|
||||
"port",
|
||||
),
|
||||
"storage": (
|
||||
"debug_discard",
|
||||
"enabled",
|
||||
@ -656,7 +650,6 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
raise ValueError("config error: helper is enabled, but tub "
|
||||
"is not listening ('tub.port=' is empty)")
|
||||
self.init_helper()
|
||||
self.init_ftp_server()
|
||||
self.init_sftp_server()
|
||||
|
||||
# If the node sees an exit_trigger file, it will poll every second to see
|
||||
@ -714,7 +707,7 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
def get_long_nodeid(self):
|
||||
# this matches what IServer.get_longname() says about us elsewhere
|
||||
vk_string = ed25519.string_from_verifying_key(self._node_public_key)
|
||||
return remove_prefix(vk_string, "pub-")
|
||||
return remove_prefix(vk_string, b"pub-")
|
||||
|
||||
def get_long_tubid(self):
|
||||
return idlib.nodeid_b2a(self.nodeid)
|
||||
@ -898,10 +891,6 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
if helper_furl in ("None", ""):
|
||||
helper_furl = None
|
||||
|
||||
# FURLs need to be bytes:
|
||||
if helper_furl is not None:
|
||||
helper_furl = helper_furl.encode("utf-8")
|
||||
|
||||
DEP = self.encoding_params
|
||||
DEP["k"] = int(self.config.get_config("client", "shares.needed", DEP["k"]))
|
||||
DEP["n"] = int(self.config.get_config("client", "shares.total", DEP["n"]))
|
||||
@ -1036,18 +1025,6 @@ class _Client(node.Node, pollmixin.PollMixin):
|
||||
)
|
||||
ws.setServiceParent(self)
|
||||
|
||||
def init_ftp_server(self):
|
||||
if self.config.get_config("ftpd", "enabled", False, boolean=True):
|
||||
accountfile = self.config.get_config("ftpd", "accounts.file", None)
|
||||
if accountfile:
|
||||
accountfile = self.config.get_config_path(accountfile)
|
||||
accounturl = self.config.get_config("ftpd", "accounts.url", None)
|
||||
ftp_portstr = self.config.get_config("ftpd", "port", "8021")
|
||||
|
||||
from allmydata.frontends import ftpd
|
||||
s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
|
||||
s.setServiceParent(self)
|
||||
|
||||
def init_sftp_server(self):
|
||||
if self.config.get_config("sftpd", "enabled", False, boolean=True):
|
||||
accountfile = self.config.get_config("sftpd", "accounts.file", None)
|
||||
|
@ -57,6 +57,10 @@ class CRSEncoder(object):
|
||||
|
||||
return defer.succeed((shares, desired_share_ids))
|
||||
|
||||
def encode_proposal(self, data, desired_share_ids=None):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
@implementer(ICodecDecoder)
|
||||
class CRSDecoder(object):
|
||||
|
||||
|
@ -1,4 +1,15 @@
|
||||
"""Implementation of the deep stats class."""
|
||||
"""Implementation of the deep stats class.
|
||||
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
import math
|
||||
|
||||
@ -13,7 +24,7 @@ from allmydata.util import mathutil
|
||||
class DeepStats(object):
|
||||
"""Deep stats object.
|
||||
|
||||
Holds results of the deep-stats opetation.
|
||||
Holds results of the deep-stats operation.
|
||||
Used for json generation in the API."""
|
||||
|
||||
# Json API version.
|
||||
@ -121,7 +132,7 @@ class DeepStats(object):
|
||||
h[bucket] += 1
|
||||
|
||||
def get_results(self):
|
||||
"""Returns deep-stats resutls."""
|
||||
"""Returns deep-stats results."""
|
||||
stats = self.stats.copy()
|
||||
for key in self.histograms:
|
||||
h = self.histograms[key]
|
||||
|
@ -18,7 +18,6 @@ import time
|
||||
from zope.interface import implementer
|
||||
from twisted.internet import defer
|
||||
from foolscap.api import fireEventually
|
||||
import json
|
||||
|
||||
from allmydata.crypto import aes
|
||||
from allmydata.deep_stats import DeepStats
|
||||
@ -31,7 +30,7 @@ from allmydata.interfaces import IFilesystemNode, IDirectoryNode, IFileNode, \
|
||||
from allmydata.check_results import DeepCheckResults, \
|
||||
DeepCheckAndRepairResults
|
||||
from allmydata.monitor import Monitor
|
||||
from allmydata.util import hashutil, base32, log
|
||||
from allmydata.util import hashutil, base32, log, jsonbytes as json
|
||||
from allmydata.util.encodingutil import quote_output, normalize
|
||||
from allmydata.util.assertutil import precondition
|
||||
from allmydata.util.netstring import netstring, split_netstring
|
||||
@ -569,7 +568,7 @@ class DirectoryNode(object):
|
||||
d = self.get_child_and_metadata(childnamex)
|
||||
return d
|
||||
|
||||
def set_uri(self, namex, writecap, readcap, metadata=None, overwrite=True):
|
||||
def set_uri(self, namex, writecap, readcap=None, metadata=None, overwrite=True):
|
||||
precondition(isinstance(writecap, (bytes, type(None))), writecap)
|
||||
precondition(isinstance(readcap, (bytes, type(None))), readcap)
|
||||
|
||||
|
@ -4,8 +4,8 @@ from zope.interface import implementer
|
||||
from twisted.web.client import getPage
|
||||
from twisted.internet import defer
|
||||
from twisted.cred import error, checkers, credentials
|
||||
from twisted.conch import error as conch_error
|
||||
from twisted.conch.ssh import keys
|
||||
from twisted.conch.checkers import SSHPublicKeyChecker, InMemorySSHKeyDB
|
||||
|
||||
from allmydata.util import base32
|
||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||
@ -29,7 +29,7 @@ class AccountFileChecker(object):
|
||||
def __init__(self, client, accountfile):
|
||||
self.client = client
|
||||
self.passwords = {}
|
||||
self.pubkeys = {}
|
||||
pubkeys = {}
|
||||
self.rootcaps = {}
|
||||
with open(abspath_expanduser_unicode(accountfile), "r") as f:
|
||||
for line in f:
|
||||
@ -40,12 +40,14 @@ class AccountFileChecker(object):
|
||||
if passwd.startswith("ssh-"):
|
||||
bits = rest.split()
|
||||
keystring = " ".join([passwd] + bits[:-1])
|
||||
key = keys.Key.fromString(keystring)
|
||||
rootcap = bits[-1]
|
||||
self.pubkeys[name] = keystring
|
||||
pubkeys[name] = [key]
|
||||
else:
|
||||
self.passwords[name] = passwd
|
||||
rootcap = rest
|
||||
self.rootcaps[name] = rootcap
|
||||
self._pubkeychecker = SSHPublicKeyChecker(InMemorySSHKeyDB(pubkeys))
|
||||
|
||||
def _avatarId(self, username):
|
||||
return FTPAvatarID(username, self.rootcaps[username])
|
||||
@ -57,11 +59,9 @@ class AccountFileChecker(object):
|
||||
|
||||
def requestAvatarId(self, creds):
|
||||
if credentials.ISSHPrivateKey.providedBy(creds):
|
||||
# Re-using twisted.conch.checkers.SSHPublicKeyChecker here, rather
|
||||
# than re-implementing all of the ISSHPrivateKey checking logic,
|
||||
# would be better. That would require Twisted 14.1.0 or newer,
|
||||
# though.
|
||||
return self._checkKey(creds)
|
||||
d = defer.maybeDeferred(self._pubkeychecker.requestAvatarId, creds)
|
||||
d.addCallback(self._avatarId)
|
||||
return d
|
||||
elif credentials.IUsernameHashedPassword.providedBy(creds):
|
||||
return self._checkPassword(creds)
|
||||
elif credentials.IUsernamePassword.providedBy(creds):
|
||||
@ -86,28 +86,6 @@ class AccountFileChecker(object):
|
||||
d.addCallback(self._cbPasswordMatch, str(creds.username))
|
||||
return d
|
||||
|
||||
def _checkKey(self, creds):
|
||||
"""
|
||||
Determine whether some key-based credentials correctly authenticates a
|
||||
user.
|
||||
|
||||
Returns a Deferred that fires with the username if so or with an
|
||||
UnauthorizedLogin failure otherwise.
|
||||
"""
|
||||
|
||||
# Is the public key indicated by the given credentials allowed to
|
||||
# authenticate the username in those credentials?
|
||||
if creds.blob == self.pubkeys.get(creds.username):
|
||||
if creds.signature is None:
|
||||
return defer.fail(conch_error.ValidPublicKey())
|
||||
|
||||
# Is the signature in the given credentials the correct
|
||||
# signature for the data in those credentials?
|
||||
key = keys.Key.fromString(creds.blob)
|
||||
if key.verify(creds.signature, creds.sigData):
|
||||
return defer.succeed(self._avatarId(creds.username))
|
||||
|
||||
return defer.fail(error.UnauthorizedLogin())
|
||||
|
||||
@implementer(checkers.ICredentialsChecker)
|
||||
class AccountURLChecker(object):
|
||||
|
@ -1,340 +0,0 @@
|
||||
from six import ensure_str
|
||||
|
||||
from types import NoneType
|
||||
|
||||
from zope.interface import implementer
|
||||
from twisted.application import service, strports
|
||||
from twisted.internet import defer
|
||||
from twisted.internet.interfaces import IConsumer
|
||||
from twisted.cred import portal
|
||||
from twisted.python import filepath
|
||||
from twisted.protocols import ftp
|
||||
|
||||
from allmydata.interfaces import IDirectoryNode, ExistingChildError, \
|
||||
NoSuchChildError
|
||||
from allmydata.immutable.upload import FileHandle
|
||||
from allmydata.util.fileutil import EncryptedTemporaryFile
|
||||
from allmydata.util.assertutil import precondition
|
||||
|
||||
@implementer(ftp.IReadFile)
|
||||
class ReadFile(object):
|
||||
def __init__(self, node):
|
||||
self.node = node
|
||||
def send(self, consumer):
|
||||
d = self.node.read(consumer)
|
||||
return d # when consumed
|
||||
|
||||
@implementer(IConsumer)
|
||||
class FileWriter(object):
|
||||
|
||||
def registerProducer(self, producer, streaming):
|
||||
if not streaming:
|
||||
raise NotImplementedError("Non-streaming producer not supported.")
|
||||
# we write the data to a temporary file, since Tahoe can't do
|
||||
# streaming upload yet.
|
||||
self.f = EncryptedTemporaryFile()
|
||||
return None
|
||||
|
||||
def unregisterProducer(self):
|
||||
# the upload actually happens in WriteFile.close()
|
||||
pass
|
||||
|
||||
def write(self, data):
|
||||
self.f.write(data)
|
||||
|
||||
@implementer(ftp.IWriteFile)
|
||||
class WriteFile(object):
|
||||
|
||||
def __init__(self, parent, childname, convergence):
|
||||
self.parent = parent
|
||||
self.childname = childname
|
||||
self.convergence = convergence
|
||||
|
||||
def receive(self):
|
||||
self.c = FileWriter()
|
||||
return defer.succeed(self.c)
|
||||
|
||||
def close(self):
|
||||
u = FileHandle(self.c.f, self.convergence)
|
||||
d = self.parent.add_file(self.childname, u)
|
||||
return d
|
||||
|
||||
|
||||
class NoParentError(Exception):
|
||||
pass
|
||||
|
||||
# filepath.Permissions was added in Twisted-11.1.0, which we require. Twisted
|
||||
# <15.0.0 expected an int, and only does '&' on it. Twisted >=15.0.0 expects
|
||||
# a filepath.Permissions. This satisfies both.
|
||||
|
||||
class IntishPermissions(filepath.Permissions):
|
||||
def __init__(self, statModeInt):
|
||||
self._tahoe_statModeInt = statModeInt
|
||||
filepath.Permissions.__init__(self, statModeInt)
|
||||
def __and__(self, other):
|
||||
return self._tahoe_statModeInt & other
|
||||
|
||||
@implementer(ftp.IFTPShell)
|
||||
class Handler(object):
|
||||
def __init__(self, client, rootnode, username, convergence):
|
||||
self.client = client
|
||||
self.root = rootnode
|
||||
self.username = username
|
||||
self.convergence = convergence
|
||||
|
||||
def makeDirectory(self, path):
|
||||
d = self._get_root(path)
|
||||
d.addCallback(lambda root_and_path:
|
||||
self._get_or_create_directories(root_and_path[0], root_and_path[1]))
|
||||
return d
|
||||
|
||||
def _get_or_create_directories(self, node, path):
|
||||
if not IDirectoryNode.providedBy(node):
|
||||
# unfortunately it is too late to provide the name of the
|
||||
# blocking directory in the error message.
|
||||
raise ftp.FileExistsError("cannot create directory because there "
|
||||
"is a file in the way")
|
||||
if not path:
|
||||
return defer.succeed(node)
|
||||
d = node.get(path[0])
|
||||
def _maybe_create(f):
|
||||
f.trap(NoSuchChildError)
|
||||
return node.create_subdirectory(path[0])
|
||||
d.addErrback(_maybe_create)
|
||||
d.addCallback(self._get_or_create_directories, path[1:])
|
||||
return d
|
||||
|
||||
def _get_parent(self, path):
|
||||
# fire with (parentnode, childname)
|
||||
path = [unicode(p) for p in path]
|
||||
if not path:
|
||||
raise NoParentError
|
||||
childname = path[-1]
|
||||
d = self._get_root(path)
|
||||
def _got_root(root_and_path):
|
||||
(root, path) = root_and_path
|
||||
if not path:
|
||||
raise NoParentError
|
||||
return root.get_child_at_path(path[:-1])
|
||||
d.addCallback(_got_root)
|
||||
def _got_parent(parent):
|
||||
return (parent, childname)
|
||||
d.addCallback(_got_parent)
|
||||
return d
|
||||
|
||||
def _remove_thing(self, path, must_be_directory=False, must_be_file=False):
|
||||
d = defer.maybeDeferred(self._get_parent, path)
|
||||
def _convert_error(f):
|
||||
f.trap(NoParentError)
|
||||
raise ftp.PermissionDeniedError("cannot delete root directory")
|
||||
d.addErrback(_convert_error)
|
||||
def _got_parent(parent_and_childname):
|
||||
(parent, childname) = parent_and_childname
|
||||
d = parent.get(childname)
|
||||
def _got_child(child):
|
||||
if must_be_directory and not IDirectoryNode.providedBy(child):
|
||||
raise ftp.IsNotADirectoryError("rmdir called on a file")
|
||||
if must_be_file and IDirectoryNode.providedBy(child):
|
||||
raise ftp.IsADirectoryError("rmfile called on a directory")
|
||||
return parent.delete(childname)
|
||||
d.addCallback(_got_child)
|
||||
d.addErrback(self._convert_error)
|
||||
return d
|
||||
d.addCallback(_got_parent)
|
||||
return d
|
||||
|
||||
def removeDirectory(self, path):
|
||||
return self._remove_thing(path, must_be_directory=True)
|
||||
|
||||
def removeFile(self, path):
|
||||
return self._remove_thing(path, must_be_file=True)
|
||||
|
||||
def rename(self, fromPath, toPath):
|
||||
# the target directory must already exist
|
||||
d = self._get_parent(fromPath)
|
||||
def _got_from_parent(fromparent_and_childname):
|
||||
(fromparent, childname) = fromparent_and_childname
|
||||
d = self._get_parent(toPath)
|
||||
d.addCallback(lambda toparent_and_tochildname:
|
||||
fromparent.move_child_to(childname,
|
||||
toparent_and_tochildname[0], toparent_and_tochildname[1],
|
||||
overwrite=False))
|
||||
return d
|
||||
d.addCallback(_got_from_parent)
|
||||
d.addErrback(self._convert_error)
|
||||
return d
|
||||
|
||||
def access(self, path):
|
||||
# we allow access to everything that exists. We are required to raise
|
||||
# an error for paths that don't exist: FTP clients (at least ncftp)
|
||||
# uses this to decide whether to mkdir or not.
|
||||
d = self._get_node_and_metadata_for_path(path)
|
||||
d.addErrback(self._convert_error)
|
||||
d.addCallback(lambda res: None)
|
||||
return d
|
||||
|
||||
def _convert_error(self, f):
|
||||
if f.check(NoSuchChildError):
|
||||
childname = f.value.args[0].encode("utf-8")
|
||||
msg = "'%s' doesn't exist" % childname
|
||||
raise ftp.FileNotFoundError(msg)
|
||||
if f.check(ExistingChildError):
|
||||
msg = f.value.args[0].encode("utf-8")
|
||||
raise ftp.FileExistsError(msg)
|
||||
return f
|
||||
|
||||
def _get_root(self, path):
|
||||
# return (root, remaining_path)
|
||||
path = [unicode(p) for p in path]
|
||||
if path and path[0] == "uri":
|
||||
d = defer.maybeDeferred(self.client.create_node_from_uri,
|
||||
str(path[1]))
|
||||
d.addCallback(lambda root: (root, path[2:]))
|
||||
else:
|
||||
d = defer.succeed((self.root,path))
|
||||
return d
|
||||
|
||||
def _get_node_and_metadata_for_path(self, path):
|
||||
d = self._get_root(path)
|
||||
def _got_root(root_and_path):
|
||||
(root,path) = root_and_path
|
||||
if path:
|
||||
return root.get_child_and_metadata_at_path(path)
|
||||
else:
|
||||
return (root,{})
|
||||
d.addCallback(_got_root)
|
||||
return d
|
||||
|
||||
def _populate_row(self, keys, childnode_and_metadata):
|
||||
(childnode, metadata) = childnode_and_metadata
|
||||
values = []
|
||||
isdir = bool(IDirectoryNode.providedBy(childnode))
|
||||
for key in keys:
|
||||
if key == "size":
|
||||
if isdir:
|
||||
value = 0
|
||||
else:
|
||||
value = childnode.get_size() or 0
|
||||
elif key == "directory":
|
||||
value = isdir
|
||||
elif key == "permissions":
|
||||
# Twisted-14.0.2 (and earlier) expected an int, and used it
|
||||
# in a rendering function that did (mode & NUMBER).
|
||||
# Twisted-15.0.0 expects a
|
||||
# twisted.python.filepath.Permissions , and calls its
|
||||
# .shorthand() method. This provides both.
|
||||
value = IntishPermissions(0o600)
|
||||
elif key == "hardlinks":
|
||||
value = 1
|
||||
elif key == "modified":
|
||||
# follow sftpd convention (i.e. linkmotime in preference to mtime)
|
||||
if "linkmotime" in metadata.get("tahoe", {}):
|
||||
value = metadata["tahoe"]["linkmotime"]
|
||||
else:
|
||||
value = metadata.get("mtime", 0)
|
||||
elif key == "owner":
|
||||
value = self.username
|
||||
elif key == "group":
|
||||
value = self.username
|
||||
else:
|
||||
value = "??"
|
||||
values.append(value)
|
||||
return values
|
||||
|
||||
def stat(self, path, keys=()):
|
||||
# for files only, I think
|
||||
d = self._get_node_and_metadata_for_path(path)
|
||||
def _render(node_and_metadata):
|
||||
(node, metadata) = node_and_metadata
|
||||
assert not IDirectoryNode.providedBy(node)
|
||||
return self._populate_row(keys, (node,metadata))
|
||||
d.addCallback(_render)
|
||||
d.addErrback(self._convert_error)
|
||||
return d
|
||||
|
||||
def list(self, path, keys=()):
|
||||
# the interface claims that path is a list of unicodes, but in
|
||||
# practice it is not
|
||||
d = self._get_node_and_metadata_for_path(path)
|
||||
def _list(node_and_metadata):
|
||||
(node, metadata) = node_and_metadata
|
||||
if IDirectoryNode.providedBy(node):
|
||||
return node.list()
|
||||
return { path[-1]: (node, metadata) } # need last-edge metadata
|
||||
d.addCallback(_list)
|
||||
def _render(children):
|
||||
results = []
|
||||
for (name, childnode) in children.iteritems():
|
||||
# the interface claims that the result should have a unicode
|
||||
# object as the name, but it fails unless you give it a
|
||||
# bytestring
|
||||
results.append( (name.encode("utf-8"),
|
||||
self._populate_row(keys, childnode) ) )
|
||||
return results
|
||||
d.addCallback(_render)
|
||||
d.addErrback(self._convert_error)
|
||||
return d
|
||||
|
||||
def openForReading(self, path):
|
||||
d = self._get_node_and_metadata_for_path(path)
|
||||
d.addCallback(lambda node_and_metadata: ReadFile(node_and_metadata[0]))
|
||||
d.addErrback(self._convert_error)
|
||||
return d
|
||||
|
||||
def openForWriting(self, path):
|
||||
path = [unicode(p) for p in path]
|
||||
if not path:
|
||||
raise ftp.PermissionDeniedError("cannot STOR to root directory")
|
||||
childname = path[-1]
|
||||
d = self._get_root(path)
|
||||
def _got_root(root_and_path):
|
||||
(root, path) = root_and_path
|
||||
if not path:
|
||||
raise ftp.PermissionDeniedError("cannot STOR to root directory")
|
||||
return root.get_child_at_path(path[:-1])
|
||||
d.addCallback(_got_root)
|
||||
def _got_parent(parent):
|
||||
return WriteFile(parent, childname, self.convergence)
|
||||
d.addCallback(_got_parent)
|
||||
return d
|
||||
|
||||
from allmydata.frontends.auth import AccountURLChecker, AccountFileChecker, NeedRootcapLookupScheme
|
||||
|
||||
|
||||
@implementer(portal.IRealm)
|
||||
class Dispatcher(object):
|
||||
def __init__(self, client):
|
||||
self.client = client
|
||||
|
||||
def requestAvatar(self, avatarID, mind, interface):
|
||||
assert interface == ftp.IFTPShell
|
||||
rootnode = self.client.create_node_from_uri(avatarID.rootcap)
|
||||
convergence = self.client.convergence
|
||||
s = Handler(self.client, rootnode, avatarID.username, convergence)
|
||||
def logout(): pass
|
||||
return (interface, s, None)
|
||||
|
||||
|
||||
class FTPServer(service.MultiService):
|
||||
def __init__(self, client, accountfile, accounturl, ftp_portstr):
|
||||
precondition(isinstance(accountfile, (unicode, NoneType)), accountfile)
|
||||
service.MultiService.__init__(self)
|
||||
|
||||
r = Dispatcher(client)
|
||||
p = portal.Portal(r)
|
||||
|
||||
if accountfile:
|
||||
c = AccountFileChecker(self, accountfile)
|
||||
p.registerChecker(c)
|
||||
if accounturl:
|
||||
c = AccountURLChecker(self, accounturl)
|
||||
p.registerChecker(c)
|
||||
if not accountfile and not accounturl:
|
||||
# we could leave this anonymous, with just the /uri/CAP form
|
||||
raise NeedRootcapLookupScheme("must provide some translation")
|
||||
|
||||
f = ftp.FTPFactory(p)
|
||||
# strports requires a native string.
|
||||
ftp_portstr = ensure_str(ftp_portstr)
|
||||
s = strports.service(ftp_portstr, f)
|
||||
s.setServiceParent(self)
|
@ -1,6 +1,17 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
import six
|
||||
import heapq, traceback, array, stat, struct
|
||||
from types import NoneType
|
||||
import heapq, traceback, stat, struct
|
||||
from stat import S_IFREG, S_IFDIR
|
||||
from time import time, strftime, localtime
|
||||
|
||||
@ -45,6 +56,17 @@ from allmydata.util.log import NOISY, OPERATIONAL, WEIRD, \
|
||||
if six.PY3:
|
||||
long = int
|
||||
|
||||
|
||||
def createSFTPError(errorCode, errorMessage):
|
||||
"""
|
||||
SFTPError that can accept both Unicode and bytes.
|
||||
|
||||
Twisted expects _native_ strings for the SFTPError message, but we often do
|
||||
Unicode by default even on Python 2.
|
||||
"""
|
||||
return SFTPError(errorCode, six.ensure_str(errorMessage))
|
||||
|
||||
|
||||
def eventually_callback(d):
|
||||
return lambda res: eventually(d.callback, res)
|
||||
|
||||
@ -53,9 +75,9 @@ def eventually_errback(d):
|
||||
|
||||
|
||||
def _utf8(x):
|
||||
if isinstance(x, unicode):
|
||||
return x.encode('utf-8')
|
||||
if isinstance(x, str):
|
||||
return x.encode('utf-8')
|
||||
if isinstance(x, bytes):
|
||||
return x
|
||||
return repr(x)
|
||||
|
||||
@ -64,7 +86,7 @@ def _to_sftp_time(t):
|
||||
"""SFTP times are unsigned 32-bit integers representing UTC seconds
|
||||
(ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC.
|
||||
A Tahoe time is the corresponding float."""
|
||||
return long(t) & long(0xFFFFFFFF)
|
||||
return int(t) & int(0xFFFFFFFF)
|
||||
|
||||
|
||||
def _convert_error(res, request):
|
||||
@ -73,7 +95,7 @@ def _convert_error(res, request):
|
||||
|
||||
if not isinstance(res, Failure):
|
||||
logged_res = res
|
||||
if isinstance(res, str): logged_res = "<data of length %r>" % (len(res),)
|
||||
if isinstance(res, (bytes, str)): logged_res = "<data of length %r>" % (len(res),)
|
||||
logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL)
|
||||
return res
|
||||
|
||||
@ -92,10 +114,10 @@ def _convert_error(res, request):
|
||||
raise err
|
||||
if err.check(NoSuchChildError):
|
||||
childname = _utf8(err.value.args[0])
|
||||
raise SFTPError(FX_NO_SUCH_FILE, childname)
|
||||
raise createSFTPError(FX_NO_SUCH_FILE, childname)
|
||||
if err.check(NotWriteableError) or err.check(ChildOfWrongTypeError):
|
||||
msg = _utf8(err.value.args[0])
|
||||
raise SFTPError(FX_PERMISSION_DENIED, msg)
|
||||
raise createSFTPError(FX_PERMISSION_DENIED, msg)
|
||||
if err.check(ExistingChildError):
|
||||
# Versions of SFTP after v3 (which is what twisted.conch implements)
|
||||
# define a specific error code for this case: FX_FILE_ALREADY_EXISTS.
|
||||
@ -104,16 +126,16 @@ def _convert_error(res, request):
|
||||
# to translate the error to the equivalent of POSIX EEXIST, which is
|
||||
# necessary for some picky programs (such as gedit).
|
||||
msg = _utf8(err.value.args[0])
|
||||
raise SFTPError(FX_FAILURE, msg)
|
||||
raise createSFTPError(FX_FAILURE, msg)
|
||||
if err.check(NotImplementedError):
|
||||
raise SFTPError(FX_OP_UNSUPPORTED, _utf8(err.value))
|
||||
raise createSFTPError(FX_OP_UNSUPPORTED, _utf8(err.value))
|
||||
if err.check(EOFError):
|
||||
raise SFTPError(FX_EOF, "end of file reached")
|
||||
raise createSFTPError(FX_EOF, "end of file reached")
|
||||
if err.check(defer.FirstError):
|
||||
_convert_error(err.value.subFailure, request)
|
||||
|
||||
# We assume that the error message is not anonymity-sensitive.
|
||||
raise SFTPError(FX_FAILURE, _utf8(err.value))
|
||||
raise createSFTPError(FX_FAILURE, _utf8(err.value))
|
||||
|
||||
|
||||
def _repr_flags(flags):
|
||||
@ -146,7 +168,7 @@ def _lsLine(name, attrs):
|
||||
# Since we now depend on Twisted v10.1, consider calling Twisted's version.
|
||||
|
||||
mode = st_mode
|
||||
perms = array.array('c', '-'*10)
|
||||
perms = ["-"] * 10
|
||||
ft = stat.S_IFMT(mode)
|
||||
if stat.S_ISDIR(ft): perms[0] = 'd'
|
||||
elif stat.S_ISREG(ft): perms[0] = '-'
|
||||
@ -165,7 +187,7 @@ def _lsLine(name, attrs):
|
||||
if mode&stat.S_IXOTH: perms[9] = 'x'
|
||||
# suid/sgid never set
|
||||
|
||||
l = perms.tostring()
|
||||
l = "".join(perms)
|
||||
l += str(st_nlink).rjust(5) + ' '
|
||||
un = str(st_uid)
|
||||
l += un.ljust(9)
|
||||
@ -182,6 +204,7 @@ def _lsLine(name, attrs):
|
||||
l += strftime("%b %d %Y ", localtime(st_mtime))
|
||||
else:
|
||||
l += strftime("%b %d %H:%M ", localtime(st_mtime))
|
||||
l = l.encode("utf-8")
|
||||
l += name
|
||||
return l
|
||||
|
||||
@ -223,7 +246,7 @@ def _populate_attrs(childnode, metadata, size=None):
|
||||
if childnode and size is None:
|
||||
size = childnode.get_size()
|
||||
if size is not None:
|
||||
_assert(isinstance(size, (int, long)) and not isinstance(size, bool), size=size)
|
||||
_assert(isinstance(size, int) and not isinstance(size, bool), size=size)
|
||||
attrs['size'] = size
|
||||
perms = S_IFREG | 0o666
|
||||
|
||||
@ -255,7 +278,7 @@ def _attrs_to_metadata(attrs):
|
||||
|
||||
for key in attrs:
|
||||
if key == "mtime" or key == "ctime" or key == "createtime":
|
||||
metadata[key] = long(attrs[key])
|
||||
metadata[key] = int(attrs[key])
|
||||
elif key.startswith("ext_"):
|
||||
metadata[key] = str(attrs[key])
|
||||
|
||||
@ -267,7 +290,7 @@ def _attrs_to_metadata(attrs):
|
||||
|
||||
|
||||
def _direntry_for(filenode_or_parent, childname, filenode=None):
|
||||
precondition(isinstance(childname, (unicode, NoneType)), childname=childname)
|
||||
precondition(isinstance(childname, (str, type(None))), childname=childname)
|
||||
|
||||
if childname is None:
|
||||
filenode_or_parent = filenode
|
||||
@ -275,7 +298,7 @@ def _direntry_for(filenode_or_parent, childname, filenode=None):
|
||||
if filenode_or_parent:
|
||||
rw_uri = filenode_or_parent.get_write_uri()
|
||||
if rw_uri and childname:
|
||||
return rw_uri + "/" + childname.encode('utf-8')
|
||||
return rw_uri + b"/" + childname.encode('utf-8')
|
||||
else:
|
||||
return rw_uri
|
||||
|
||||
@ -327,7 +350,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
||||
if size < self.current_size or size < self.downloaded:
|
||||
self.f.truncate(size)
|
||||
if size > self.current_size:
|
||||
self.overwrite(self.current_size, "\x00" * (size - self.current_size))
|
||||
self.overwrite(self.current_size, b"\x00" * (size - self.current_size))
|
||||
self.current_size = size
|
||||
|
||||
# make the invariant self.download_size <= self.current_size be true again
|
||||
@ -335,7 +358,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
||||
self.download_size = size
|
||||
|
||||
if self.downloaded >= self.download_size:
|
||||
self.download_done("size changed")
|
||||
self.download_done(b"size changed")
|
||||
|
||||
def registerProducer(self, p, streaming):
|
||||
if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY)
|
||||
@ -410,21 +433,21 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
||||
milestone = end
|
||||
|
||||
while len(self.milestones) > 0:
|
||||
(next, d) = self.milestones[0]
|
||||
if next > milestone:
|
||||
(next_, d) = self.milestones[0]
|
||||
if next_ > milestone:
|
||||
return
|
||||
if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY)
|
||||
if noisy: self.log("MILESTONE %r %r" % (next_, d), level=NOISY)
|
||||
heapq.heappop(self.milestones)
|
||||
eventually_callback(d)("reached")
|
||||
eventually_callback(d)(b"reached")
|
||||
|
||||
if milestone >= self.download_size:
|
||||
self.download_done("reached download size")
|
||||
self.download_done(b"reached download size")
|
||||
|
||||
def overwrite(self, offset, data):
|
||||
if noisy: self.log(".overwrite(%r, <data of length %r>)" % (offset, len(data)), level=NOISY)
|
||||
if self.is_closed:
|
||||
self.log("overwrite called on a closed OverwriteableFileConsumer", level=WEIRD)
|
||||
raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
|
||||
raise createSFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
|
||||
|
||||
if offset > self.current_size:
|
||||
# Normally writing at an offset beyond the current end-of-file
|
||||
@ -435,7 +458,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
||||
# the gap between the current EOF and the offset.
|
||||
|
||||
self.f.seek(self.current_size)
|
||||
self.f.write("\x00" * (offset - self.current_size))
|
||||
self.f.write(b"\x00" * (offset - self.current_size))
|
||||
start = self.current_size
|
||||
else:
|
||||
self.f.seek(offset)
|
||||
@ -455,7 +478,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
||||
if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY)
|
||||
if self.is_closed:
|
||||
self.log("read called on a closed OverwriteableFileConsumer", level=WEIRD)
|
||||
raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
||||
raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
||||
|
||||
# Note that the overwrite method is synchronous. When a write request is processed
|
||||
# (e.g. a writeChunk request on the async queue of GeneralSFTPFile), overwrite will
|
||||
@ -509,7 +532,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
||||
return d
|
||||
|
||||
def download_done(self, res):
|
||||
_assert(isinstance(res, (str, Failure)), res=res)
|
||||
_assert(isinstance(res, (bytes, Failure)), res=res)
|
||||
# Only the first call to download_done counts, but we log subsequent calls
|
||||
# (multiple calls are normal).
|
||||
if self.done_status is not None:
|
||||
@ -526,8 +549,8 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
||||
eventually_callback(self.done)(None)
|
||||
|
||||
while len(self.milestones) > 0:
|
||||
(next, d) = self.milestones[0]
|
||||
if noisy: self.log("MILESTONE FINISH %r %r %r" % (next, d, res), level=NOISY)
|
||||
(next_, d) = self.milestones[0]
|
||||
if noisy: self.log("MILESTONE FINISH %r %r %r" % (next_, d, res), level=NOISY)
|
||||
heapq.heappop(self.milestones)
|
||||
# The callback means that the milestone has been reached if
|
||||
# it is ever going to be. Note that the file may have been
|
||||
@ -541,7 +564,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
||||
self.f.close()
|
||||
except Exception as e:
|
||||
self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD)
|
||||
self.download_done("closed")
|
||||
self.download_done(b"closed")
|
||||
return self.done_status
|
||||
|
||||
def unregisterProducer(self):
|
||||
@ -565,7 +588,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
||||
PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
|
||||
if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY)
|
||||
|
||||
precondition(isinstance(userpath, str) and IFileNode.providedBy(filenode),
|
||||
precondition(isinstance(userpath, bytes) and IFileNode.providedBy(filenode),
|
||||
userpath=userpath, filenode=filenode)
|
||||
self.filenode = filenode
|
||||
self.metadata = metadata
|
||||
@ -577,7 +600,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
||||
self.log(request, level=OPERATIONAL)
|
||||
|
||||
if self.closed:
|
||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
||||
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
||||
return defer.execute(_closed)
|
||||
|
||||
d = defer.Deferred()
|
||||
@ -594,7 +617,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
||||
# i.e. we respond with an EOF error iff offset is already at EOF.
|
||||
|
||||
if offset >= len(data):
|
||||
eventually_errback(d)(Failure(SFTPError(FX_EOF, "read at or past end of file")))
|
||||
eventually_errback(d)(Failure(createSFTPError(FX_EOF, "read at or past end of file")))
|
||||
else:
|
||||
eventually_callback(d)(data[offset:offset+length]) # truncated if offset+length > len(data)
|
||||
return data
|
||||
@ -605,7 +628,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
||||
def writeChunk(self, offset, data):
|
||||
self.log(".writeChunk(%r, <data of length %r>) denied" % (offset, len(data)), level=OPERATIONAL)
|
||||
|
||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||
return defer.execute(_denied)
|
||||
|
||||
def close(self):
|
||||
@ -619,7 +642,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
||||
self.log(request, level=OPERATIONAL)
|
||||
|
||||
if self.closed:
|
||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
|
||||
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
|
||||
return defer.execute(_closed)
|
||||
|
||||
d = defer.execute(_populate_attrs, self.filenode, self.metadata)
|
||||
@ -628,7 +651,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
||||
|
||||
def setAttrs(self, attrs):
|
||||
self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL)
|
||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||
return defer.execute(_denied)
|
||||
|
||||
|
||||
@ -649,7 +672,7 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
||||
if noisy: self.log(".__init__(%r, %r = %r, %r, <convergence censored>)" %
|
||||
(userpath, flags, _repr_flags(flags), close_notify), level=NOISY)
|
||||
|
||||
precondition(isinstance(userpath, str), userpath=userpath)
|
||||
precondition(isinstance(userpath, bytes), userpath=userpath)
|
||||
self.userpath = userpath
|
||||
self.flags = flags
|
||||
self.close_notify = close_notify
|
||||
@ -668,11 +691,11 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
||||
# not be set before then.
|
||||
self.consumer = None
|
||||
|
||||
def open(self, parent=None, childname=None, filenode=None, metadata=None):
|
||||
def open(self, parent=None, childname=None, filenode=None, metadata=None): # noqa: F811
|
||||
self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" %
|
||||
(parent, childname, filenode, metadata), level=OPERATIONAL)
|
||||
|
||||
precondition(isinstance(childname, (unicode, NoneType)), childname=childname)
|
||||
precondition(isinstance(childname, (str, type(None))), childname=childname)
|
||||
precondition(filenode is None or IFileNode.providedBy(filenode), filenode=filenode)
|
||||
precondition(not self.closed, sftpfile=self)
|
||||
|
||||
@ -689,7 +712,7 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
||||
if (self.flags & FXF_TRUNC) or not filenode:
|
||||
# We're either truncating or creating the file, so we don't need the old contents.
|
||||
self.consumer = OverwriteableFileConsumer(0, tempfile_maker)
|
||||
self.consumer.download_done("download not needed")
|
||||
self.consumer.download_done(b"download not needed")
|
||||
else:
|
||||
self.async_.addCallback(lambda ignored: filenode.get_best_readable_version())
|
||||
|
||||
@ -703,7 +726,7 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
||||
d = version.read(self.consumer, 0, None)
|
||||
def _finished(res):
|
||||
if not isinstance(res, Failure):
|
||||
res = "download finished"
|
||||
res = b"download finished"
|
||||
self.consumer.download_done(res)
|
||||
d.addBoth(_finished)
|
||||
# It is correct to drop d here.
|
||||
@ -723,7 +746,7 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
||||
def rename(self, new_userpath, new_parent, new_childname):
|
||||
self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL)
|
||||
|
||||
precondition(isinstance(new_userpath, str) and isinstance(new_childname, unicode),
|
||||
precondition(isinstance(new_userpath, bytes) and isinstance(new_childname, str),
|
||||
new_userpath=new_userpath, new_childname=new_childname)
|
||||
self.userpath = new_userpath
|
||||
self.parent = new_parent
|
||||
@ -751,11 +774,11 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
||||
self.log(request, level=OPERATIONAL)
|
||||
|
||||
if not (self.flags & FXF_READ):
|
||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
|
||||
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
|
||||
return defer.execute(_denied)
|
||||
|
||||
if self.closed:
|
||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
||||
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
||||
return defer.execute(_closed)
|
||||
|
||||
d = defer.Deferred()
|
||||
@ -773,11 +796,11 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
||||
self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
|
||||
|
||||
if not (self.flags & FXF_WRITE):
|
||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||
return defer.execute(_denied)
|
||||
|
||||
if self.closed:
|
||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
|
||||
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
|
||||
return defer.execute(_closed)
|
||||
|
||||
self.has_changed = True
|
||||
@ -893,7 +916,7 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
||||
self.log(request, level=OPERATIONAL)
|
||||
|
||||
if self.closed:
|
||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
|
||||
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
|
||||
return defer.execute(_closed)
|
||||
|
||||
# Optimization for read-only handles, when we already know the metadata.
|
||||
@ -917,16 +940,16 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
||||
self.log(request, level=OPERATIONAL)
|
||||
|
||||
if not (self.flags & FXF_WRITE):
|
||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||
return defer.execute(_denied)
|
||||
|
||||
if self.closed:
|
||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle")
|
||||
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle")
|
||||
return defer.execute(_closed)
|
||||
|
||||
size = attrs.get("size", None)
|
||||
if size is not None and (not isinstance(size, (int, long)) or size < 0):
|
||||
def _bad(): raise SFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer")
|
||||
if size is not None and (not isinstance(size, int) or size < 0):
|
||||
def _bad(): raise createSFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer")
|
||||
return defer.execute(_bad)
|
||||
|
||||
d = defer.Deferred()
|
||||
@ -1012,7 +1035,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
def logout(self):
|
||||
self.log(".logout()", level=OPERATIONAL)
|
||||
|
||||
for files in self._heisenfiles.itervalues():
|
||||
for files in self._heisenfiles.values():
|
||||
for f in files:
|
||||
f.abandon()
|
||||
|
||||
@ -1039,7 +1062,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
request = "._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry)
|
||||
self.log(request, level=OPERATIONAL)
|
||||
|
||||
precondition(isinstance(userpath, str), userpath=userpath)
|
||||
precondition(isinstance(userpath, bytes), userpath=userpath)
|
||||
|
||||
# First we synchronously mark all heisenfiles matching the userpath or direntry
|
||||
# as abandoned, and remove them from the two heisenfile dicts. Then we .sync()
|
||||
@ -1088,8 +1111,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
(from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite))
|
||||
self.log(request, level=OPERATIONAL)
|
||||
|
||||
precondition((isinstance(from_userpath, str) and isinstance(from_childname, unicode) and
|
||||
isinstance(to_userpath, str) and isinstance(to_childname, unicode)),
|
||||
precondition((isinstance(from_userpath, bytes) and isinstance(from_childname, str) and
|
||||
isinstance(to_userpath, bytes) and isinstance(to_childname, str)),
|
||||
from_userpath=from_userpath, from_childname=from_childname, to_userpath=to_userpath, to_childname=to_childname)
|
||||
|
||||
if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY)
|
||||
@ -1118,7 +1141,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
# does not mean that they were not committed; it is used to determine whether
|
||||
# a NoSuchChildError from the rename attempt should be suppressed). If overwrite
|
||||
# is False and there were already heisenfiles at the destination userpath or
|
||||
# direntry, we return a Deferred that fails with SFTPError(FX_PERMISSION_DENIED).
|
||||
# direntry, we return a Deferred that fails with createSFTPError(FX_PERMISSION_DENIED).
|
||||
|
||||
from_direntry = _direntry_for(from_parent, from_childname)
|
||||
to_direntry = _direntry_for(to_parent, to_childname)
|
||||
@ -1127,7 +1150,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
(from_direntry, to_direntry, len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY)
|
||||
|
||||
if not overwrite and (to_userpath in self._heisenfiles or to_direntry in all_heisenfiles):
|
||||
def _existing(): raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
|
||||
def _existing(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8"))
|
||||
if noisy: self.log("existing", level=NOISY)
|
||||
return defer.execute(_existing)
|
||||
|
||||
@ -1161,7 +1184,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
request = "._update_attrs_for_heisenfiles(%r, %r, %r)" % (userpath, direntry, attrs)
|
||||
self.log(request, level=OPERATIONAL)
|
||||
|
||||
_assert(isinstance(userpath, str) and isinstance(direntry, str),
|
||||
_assert(isinstance(userpath, bytes) and isinstance(direntry, bytes),
|
||||
userpath=userpath, direntry=direntry)
|
||||
|
||||
files = []
|
||||
@ -1194,7 +1217,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
request = "._sync_heisenfiles(%r, %r, ignore=%r)" % (userpath, direntry, ignore)
|
||||
self.log(request, level=OPERATIONAL)
|
||||
|
||||
_assert(isinstance(userpath, str) and isinstance(direntry, (str, NoneType)),
|
||||
_assert(isinstance(userpath, bytes) and isinstance(direntry, (bytes, type(None))),
|
||||
userpath=userpath, direntry=direntry)
|
||||
|
||||
files = []
|
||||
@ -1219,7 +1242,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
def _remove_heisenfile(self, userpath, parent, childname, file_to_remove):
|
||||
if noisy: self.log("._remove_heisenfile(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY)
|
||||
|
||||
_assert(isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)),
|
||||
_assert(isinstance(userpath, bytes) and isinstance(childname, (str, type(None))),
|
||||
userpath=userpath, childname=childname)
|
||||
|
||||
direntry = _direntry_for(parent, childname)
|
||||
@ -1246,8 +1269,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
(existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata),
|
||||
level=NOISY)
|
||||
|
||||
_assert((isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)) and
|
||||
(metadata is None or 'no-write' in metadata)),
|
||||
_assert((isinstance(userpath, bytes) and isinstance(childname, (str, type(None))) and
|
||||
(metadata is None or 'no-write' in metadata)),
|
||||
userpath=userpath, childname=childname, metadata=metadata)
|
||||
|
||||
writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0
|
||||
@ -1280,17 +1303,17 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
|
||||
if not (flags & (FXF_READ | FXF_WRITE)):
|
||||
def _bad_readwrite():
|
||||
raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set")
|
||||
raise createSFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set")
|
||||
return defer.execute(_bad_readwrite)
|
||||
|
||||
if (flags & FXF_EXCL) and not (flags & FXF_CREAT):
|
||||
def _bad_exclcreat():
|
||||
raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT")
|
||||
raise createSFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT")
|
||||
return defer.execute(_bad_exclcreat)
|
||||
|
||||
path = self._path_from_string(pathstring)
|
||||
if not path:
|
||||
def _emptypath(): raise SFTPError(FX_NO_SUCH_FILE, "path cannot be empty")
|
||||
def _emptypath(): raise createSFTPError(FX_NO_SUCH_FILE, "path cannot be empty")
|
||||
return defer.execute(_emptypath)
|
||||
|
||||
# The combination of flags is potentially valid.
|
||||
@ -1349,20 +1372,20 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
def _got_root(root_and_path):
|
||||
(root, path) = root_and_path
|
||||
if root.is_unknown():
|
||||
raise SFTPError(FX_PERMISSION_DENIED,
|
||||
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||
"cannot open an unknown cap (or child of an unknown object). "
|
||||
"Upgrading the gateway to a later Tahoe-LAFS version may help")
|
||||
if not path:
|
||||
# case 1
|
||||
if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY)
|
||||
if not IFileNode.providedBy(root):
|
||||
raise SFTPError(FX_PERMISSION_DENIED,
|
||||
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||
"cannot open a directory cap")
|
||||
if (flags & FXF_WRITE) and root.is_readonly():
|
||||
raise SFTPError(FX_PERMISSION_DENIED,
|
||||
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||
"cannot write to a non-writeable filecap without a parent directory")
|
||||
if flags & FXF_EXCL:
|
||||
raise SFTPError(FX_FAILURE,
|
||||
raise createSFTPError(FX_FAILURE,
|
||||
"cannot create a file exclusively when it already exists")
|
||||
|
||||
# The file does not need to be added to all_heisenfiles, because it is not
|
||||
@ -1389,7 +1412,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
def _got_parent(parent):
|
||||
if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY)
|
||||
if parent.is_unknown():
|
||||
raise SFTPError(FX_PERMISSION_DENIED,
|
||||
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||
"cannot open a child of an unknown object. "
|
||||
"Upgrading the gateway to a later Tahoe-LAFS version may help")
|
||||
|
||||
@ -1404,13 +1427,13 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
# which is consistent with what might happen on a POSIX filesystem.
|
||||
|
||||
if parent_readonly:
|
||||
raise SFTPError(FX_FAILURE,
|
||||
raise createSFTPError(FX_FAILURE,
|
||||
"cannot create a file exclusively when the parent directory is read-only")
|
||||
|
||||
# 'overwrite=False' ensures failure if the link already exists.
|
||||
# FIXME: should use a single call to set_uri and return (child, metadata) (#1035)
|
||||
|
||||
zero_length_lit = "URI:LIT:"
|
||||
zero_length_lit = b"URI:LIT:"
|
||||
if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" %
|
||||
(parent, zero_length_lit, childname), level=NOISY)
|
||||
d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit,
|
||||
@ -1436,14 +1459,14 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
metadata['no-write'] = _no_write(parent_readonly, filenode, current_metadata)
|
||||
|
||||
if filenode.is_unknown():
|
||||
raise SFTPError(FX_PERMISSION_DENIED,
|
||||
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||
"cannot open an unknown cap. Upgrading the gateway "
|
||||
"to a later Tahoe-LAFS version may help")
|
||||
if not IFileNode.providedBy(filenode):
|
||||
raise SFTPError(FX_PERMISSION_DENIED,
|
||||
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||
"cannot open a directory as if it were a file")
|
||||
if (flags & FXF_WRITE) and metadata['no-write']:
|
||||
raise SFTPError(FX_PERMISSION_DENIED,
|
||||
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||
"cannot open a non-writeable file for writing")
|
||||
|
||||
return self._make_file(file, userpath, flags, parent=parent, childname=childname,
|
||||
@ -1453,10 +1476,10 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
f.trap(NoSuchChildError)
|
||||
|
||||
if not (flags & FXF_CREAT):
|
||||
raise SFTPError(FX_NO_SUCH_FILE,
|
||||
raise createSFTPError(FX_NO_SUCH_FILE,
|
||||
"the file does not exist, and was not opened with the creation (CREAT) flag")
|
||||
if parent_readonly:
|
||||
raise SFTPError(FX_PERMISSION_DENIED,
|
||||
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||
"cannot create a file when the parent directory is read-only")
|
||||
|
||||
return self._make_file(file, userpath, flags, parent=parent, childname=childname)
|
||||
@ -1495,9 +1518,9 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
(to_parent, to_childname) = to_pair
|
||||
|
||||
if from_childname is None:
|
||||
raise SFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI")
|
||||
raise createSFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI")
|
||||
if to_childname is None:
|
||||
raise SFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI")
|
||||
raise createSFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI")
|
||||
|
||||
# <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.5>
|
||||
# "It is an error if there already exists a file with the name specified
|
||||
@ -1512,7 +1535,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
d2.addCallback(lambda ign: to_parent.get(to_childname))
|
||||
def _expect_fail(res):
|
||||
if not isinstance(res, Failure):
|
||||
raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
|
||||
raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8"))
|
||||
|
||||
# It is OK if we fail for errors other than NoSuchChildError, since that probably
|
||||
# indicates some problem accessing the destination directory.
|
||||
@ -1537,7 +1560,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
if not isinstance(err, Failure) or (renamed and err.check(NoSuchChildError)):
|
||||
return None
|
||||
if not overwrite and err.check(ExistingChildError):
|
||||
raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
|
||||
raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8"))
|
||||
|
||||
return err
|
||||
d3.addBoth(_check)
|
||||
@ -1555,7 +1578,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
path = self._path_from_string(pathstring)
|
||||
metadata = _attrs_to_metadata(attrs)
|
||||
if 'no-write' in metadata:
|
||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only")
|
||||
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only")
|
||||
return defer.execute(_denied)
|
||||
|
||||
d = self._get_root(path)
|
||||
@ -1567,7 +1590,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
def _get_or_create_directories(self, node, path, metadata):
|
||||
if not IDirectoryNode.providedBy(node):
|
||||
# TODO: provide the name of the blocking file in the error message.
|
||||
def _blocked(): raise SFTPError(FX_FAILURE, "cannot create directory because there "
|
||||
def _blocked(): raise createSFTPError(FX_FAILURE, "cannot create directory because there "
|
||||
"is a file in the way") # close enough
|
||||
return defer.execute(_blocked)
|
||||
|
||||
@ -1605,7 +1628,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
def _got_parent(parent_and_childname):
|
||||
(parent, childname) = parent_and_childname
|
||||
if childname is None:
|
||||
raise SFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI")
|
||||
raise createSFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI")
|
||||
|
||||
direntry = _direntry_for(parent, childname)
|
||||
d2 = defer.succeed(False)
|
||||
@ -1636,18 +1659,18 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
d.addCallback(_got_parent_or_node)
|
||||
def _list(dirnode):
|
||||
if dirnode.is_unknown():
|
||||
raise SFTPError(FX_PERMISSION_DENIED,
|
||||
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||
"cannot list an unknown cap as a directory. Upgrading the gateway "
|
||||
"to a later Tahoe-LAFS version may help")
|
||||
if not IDirectoryNode.providedBy(dirnode):
|
||||
raise SFTPError(FX_PERMISSION_DENIED,
|
||||
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||
"cannot list a file as if it were a directory")
|
||||
|
||||
d2 = dirnode.list()
|
||||
def _render(children):
|
||||
parent_readonly = dirnode.is_readonly()
|
||||
results = []
|
||||
for filename, (child, metadata) in children.iteritems():
|
||||
for filename, (child, metadata) in list(children.items()):
|
||||
# The file size may be cached or absent.
|
||||
metadata['no-write'] = _no_write(parent_readonly, child, metadata)
|
||||
attrs = _populate_attrs(child, metadata)
|
||||
@ -1727,7 +1750,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
if "size" in attrs:
|
||||
# this would require us to download and re-upload the truncated/extended
|
||||
# file contents
|
||||
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported")
|
||||
def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported")
|
||||
return defer.execute(_unsupported)
|
||||
|
||||
path = self._path_from_string(pathstring)
|
||||
@ -1744,7 +1767,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
if childname is None:
|
||||
if updated_heisenfiles:
|
||||
return None
|
||||
raise SFTPError(FX_NO_SUCH_FILE, userpath)
|
||||
raise createSFTPError(FX_NO_SUCH_FILE, userpath)
|
||||
else:
|
||||
desired_metadata = _attrs_to_metadata(attrs)
|
||||
if noisy: self.log("desired_metadata = %r" % (desired_metadata,), level=NOISY)
|
||||
@ -1767,7 +1790,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
def readLink(self, pathstring):
|
||||
self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL)
|
||||
|
||||
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "readLink")
|
||||
def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "readLink")
|
||||
return defer.execute(_unsupported)
|
||||
|
||||
def makeLink(self, linkPathstring, targetPathstring):
|
||||
@ -1776,7 +1799,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
# If this is implemented, note the reversal of arguments described in point 7 of
|
||||
# <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>.
|
||||
|
||||
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "makeLink")
|
||||
def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "makeLink")
|
||||
return defer.execute(_unsupported)
|
||||
|
||||
def extendedRequest(self, extensionName, extensionData):
|
||||
@ -1785,8 +1808,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
# We implement the three main OpenSSH SFTP extensions; see
|
||||
# <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
|
||||
|
||||
if extensionName == 'posix-rename@openssh.com':
|
||||
def _bad(): raise SFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request")
|
||||
if extensionName == b'posix-rename@openssh.com':
|
||||
def _bad(): raise createSFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request")
|
||||
|
||||
if 4 > len(extensionData): return defer.execute(_bad)
|
||||
(fromPathLen,) = struct.unpack('>L', extensionData[0:4])
|
||||
@ -1803,11 +1826,11 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
# an error, or an FXP_EXTENDED_REPLY. But it happens to do the right thing
|
||||
# (respond with an FXP_STATUS message) if we return a Failure with code FX_OK.
|
||||
def _succeeded(ign):
|
||||
raise SFTPError(FX_OK, "request succeeded")
|
||||
raise createSFTPError(FX_OK, "request succeeded")
|
||||
d.addCallback(_succeeded)
|
||||
return d
|
||||
|
||||
if extensionName == 'statvfs@openssh.com' or extensionName == 'fstatvfs@openssh.com':
|
||||
if extensionName == b'statvfs@openssh.com' or extensionName == b'fstatvfs@openssh.com':
|
||||
# f_bsize and f_frsize should be the same to avoid a bug in 'df'
|
||||
return defer.succeed(struct.pack('>11Q',
|
||||
1024, # uint64 f_bsize /* file system block size */
|
||||
@ -1823,7 +1846,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
65535, # uint64 f_namemax /* maximum filename length */
|
||||
))
|
||||
|
||||
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "unsupported %r request <data of length %r>" %
|
||||
def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "unsupported %r request <data of length %r>" %
|
||||
(extensionName, len(extensionData)))
|
||||
return defer.execute(_unsupported)
|
||||
|
||||
@ -1838,29 +1861,29 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
||||
def _path_from_string(self, pathstring):
|
||||
if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY)
|
||||
|
||||
_assert(isinstance(pathstring, str), pathstring=pathstring)
|
||||
_assert(isinstance(pathstring, bytes), pathstring=pathstring)
|
||||
|
||||
# The home directory is the root directory.
|
||||
pathstring = pathstring.strip("/")
|
||||
if pathstring == "" or pathstring == ".":
|
||||
pathstring = pathstring.strip(b"/")
|
||||
if pathstring == b"" or pathstring == b".":
|
||||
path_utf8 = []
|
||||
else:
|
||||
path_utf8 = pathstring.split("/")
|
||||
path_utf8 = pathstring.split(b"/")
|
||||
|
||||
# <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.2>
|
||||
# "Servers SHOULD interpret a path name component ".." as referring to
|
||||
# the parent directory, and "." as referring to the current directory."
|
||||
path = []
|
||||
for p_utf8 in path_utf8:
|
||||
if p_utf8 == "..":
|
||||
if p_utf8 == b"..":
|
||||
# ignore excess .. components at the root
|
||||
if len(path) > 0:
|
||||
path = path[:-1]
|
||||
elif p_utf8 != ".":
|
||||
elif p_utf8 != b".":
|
||||
try:
|
||||
p = p_utf8.decode('utf-8', 'strict')
|
||||
except UnicodeError:
|
||||
raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
|
||||
raise createSFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
|
||||
path.append(p)
|
||||
|
||||
if noisy: self.log(" PATH %r" % (path,), level=NOISY)
|
||||
@ -1979,9 +2002,9 @@ class SFTPServer(service.MultiService):
|
||||
|
||||
def __init__(self, client, accountfile, accounturl,
|
||||
sftp_portstr, pubkey_file, privkey_file):
|
||||
precondition(isinstance(accountfile, (unicode, NoneType)), accountfile)
|
||||
precondition(isinstance(pubkey_file, unicode), pubkey_file)
|
||||
precondition(isinstance(privkey_file, unicode), privkey_file)
|
||||
precondition(isinstance(accountfile, (str, type(None))), accountfile)
|
||||
precondition(isinstance(pubkey_file, str), pubkey_file)
|
||||
precondition(isinstance(privkey_file, str), privkey_file)
|
||||
service.MultiService.__init__(self)
|
||||
|
||||
r = Dispatcher(client)
|
||||
@ -2012,5 +2035,5 @@ class SFTPServer(service.MultiService):
|
||||
f = SSHFactory()
|
||||
f.portal = p
|
||||
|
||||
s = strports.service(sftp_portstr, f)
|
||||
s = strports.service(six.ensure_str(sftp_portstr), f)
|
||||
s.setServiceParent(self)
|
||||
|
@ -9,6 +9,7 @@ from __future__ import unicode_literals
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
from six import ensure_str
|
||||
|
||||
import time
|
||||
now = time.time
|
||||
@ -98,7 +99,7 @@ class ShareFinder(object):
|
||||
|
||||
# internal methods
|
||||
def loop(self):
|
||||
pending_s = ",".join([rt.server.get_name()
|
||||
pending_s = ",".join([ensure_str(rt.server.get_name())
|
||||
for rt in self.pending_requests]) # sort?
|
||||
self.log(format="ShareFinder loop: running=%(running)s"
|
||||
" hungry=%(hungry)s, pending=%(pending)s",
|
||||
|
@ -255,11 +255,11 @@ class Encoder(object):
|
||||
# captures the slot, not the value
|
||||
#d.addCallback(lambda res: self.do_segment(i))
|
||||
# use this form instead:
|
||||
d.addCallback(lambda res, i=i: self._encode_segment(i))
|
||||
d.addCallback(lambda res, i=i: self._encode_segment(i, is_tail=False))
|
||||
d.addCallback(self._send_segment, i)
|
||||
d.addCallback(self._turn_barrier)
|
||||
last_segnum = self.num_segments - 1
|
||||
d.addCallback(lambda res: self._encode_tail_segment(last_segnum))
|
||||
d.addCallback(lambda res: self._encode_segment(last_segnum, is_tail=True))
|
||||
d.addCallback(self._send_segment, last_segnum)
|
||||
d.addCallback(self._turn_barrier)
|
||||
|
||||
@ -317,8 +317,24 @@ class Encoder(object):
|
||||
dl.append(d)
|
||||
return self._gather_responses(dl)
|
||||
|
||||
def _encode_segment(self, segnum):
|
||||
codec = self._codec
|
||||
def _encode_segment(self, segnum, is_tail):
|
||||
"""
|
||||
Encode one segment of input into the configured number of shares.
|
||||
|
||||
:param segnum: Ostensibly, the number of the segment to encode. In
|
||||
reality, this parameter is ignored and the *next* segment is
|
||||
encoded and returned.
|
||||
|
||||
:param bool is_tail: ``True`` if this is the last segment, ``False``
|
||||
otherwise.
|
||||
|
||||
:return: A ``Deferred`` which fires with a two-tuple. The first
|
||||
element is a list of string-y objects representing the encoded
|
||||
segment data for one of the shares. The second element is a list
|
||||
of integers giving the share numbers of the shares in the first
|
||||
element.
|
||||
"""
|
||||
codec = self._tail_codec if is_tail else self._codec
|
||||
start = time.time()
|
||||
|
||||
# the ICodecEncoder API wants to receive a total of self.segment_size
|
||||
@ -350,9 +366,11 @@ class Encoder(object):
|
||||
# footprint to 430KiB at the expense of more hash-tree overhead.
|
||||
|
||||
d = self._gather_data(self.required_shares, input_piece_size,
|
||||
crypttext_segment_hasher)
|
||||
crypttext_segment_hasher, allow_short=is_tail)
|
||||
def _done_gathering(chunks):
|
||||
for c in chunks:
|
||||
# If is_tail then a short trailing chunk will have been padded
|
||||
# by _gather_data
|
||||
assert len(c) == input_piece_size
|
||||
self._crypttext_hashes.append(crypttext_segment_hasher.digest())
|
||||
# during this call, we hit 5*segsize memory
|
||||
@ -365,31 +383,6 @@ class Encoder(object):
|
||||
d.addCallback(_done)
|
||||
return d
|
||||
|
||||
def _encode_tail_segment(self, segnum):
|
||||
|
||||
start = time.time()
|
||||
codec = self._tail_codec
|
||||
input_piece_size = codec.get_block_size()
|
||||
|
||||
crypttext_segment_hasher = hashutil.crypttext_segment_hasher()
|
||||
|
||||
d = self._gather_data(self.required_shares, input_piece_size,
|
||||
crypttext_segment_hasher, allow_short=True)
|
||||
def _done_gathering(chunks):
|
||||
for c in chunks:
|
||||
# a short trailing chunk will have been padded by
|
||||
# _gather_data
|
||||
assert len(c) == input_piece_size
|
||||
self._crypttext_hashes.append(crypttext_segment_hasher.digest())
|
||||
return codec.encode(chunks)
|
||||
d.addCallback(_done_gathering)
|
||||
def _done(res):
|
||||
elapsed = time.time() - start
|
||||
self._times["cumulative_encoding"] += elapsed
|
||||
return res
|
||||
d.addCallback(_done)
|
||||
return d
|
||||
|
||||
def _gather_data(self, num_chunks, input_chunk_size,
|
||||
crypttext_segment_hasher,
|
||||
allow_short=False):
|
||||
|
@ -19,7 +19,7 @@ from twisted.protocols import basic
|
||||
from allmydata.interfaces import IImmutableFileNode, ICheckable
|
||||
from allmydata.uri import LiteralFileURI
|
||||
|
||||
@implementer(IImmutableFileNode, ICheckable)
|
||||
|
||||
class _ImmutableFileNodeBase(object):
|
||||
|
||||
def get_write_uri(self):
|
||||
@ -56,6 +56,7 @@ class _ImmutableFileNodeBase(object):
|
||||
return not self == other
|
||||
|
||||
|
||||
@implementer(IImmutableFileNode, ICheckable)
|
||||
class LiteralFileNode(_ImmutableFileNodeBase):
|
||||
|
||||
def __init__(self, filecap):
|
||||
|
@ -141,7 +141,7 @@ class CHKCheckerAndUEBFetcher(object):
|
||||
|
||||
|
||||
@implementer(interfaces.RICHKUploadHelper)
|
||||
class CHKUploadHelper(Referenceable, upload.CHKUploader):
|
||||
class CHKUploadHelper(Referenceable, upload.CHKUploader): # type: ignore # warner/foolscap#78
|
||||
"""I am the helper-server -side counterpart to AssistedUploader. I handle
|
||||
peer selection, encoding, and share pushing. I read ciphertext from the
|
||||
remote AssistedUploader.
|
||||
@ -499,10 +499,13 @@ class LocalCiphertextReader(AskUntilSuccessMixin):
|
||||
# ??. I'm not sure if it makes sense to forward the close message.
|
||||
return self.call("close")
|
||||
|
||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3561
|
||||
def set_upload_status(self, upload_status):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@implementer(interfaces.RIHelper, interfaces.IStatsProducer)
|
||||
class Helper(Referenceable):
|
||||
class Helper(Referenceable): # type: ignore # warner/foolscap#78
|
||||
"""
|
||||
:ivar dict[bytes, CHKUploadHelper] _active_uploads: For any uploads which
|
||||
have been started but not finished, a mapping from storage index to the
|
||||
|
@ -11,20 +11,32 @@ from future.utils import PY2, native_str
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
from past.builtins import long, unicode
|
||||
from six import ensure_str
|
||||
|
||||
try:
|
||||
from typing import List
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import os, time, weakref, itertools
|
||||
|
||||
import attr
|
||||
|
||||
from zope.interface import implementer
|
||||
from twisted.python import failure
|
||||
from twisted.internet import defer
|
||||
from twisted.application import service
|
||||
from foolscap.api import Referenceable, Copyable, RemoteCopy, fireEventually
|
||||
from foolscap.api import Referenceable, Copyable, RemoteCopy
|
||||
|
||||
from allmydata.crypto import aes
|
||||
from allmydata.util.hashutil import file_renewal_secret_hash, \
|
||||
file_cancel_secret_hash, bucket_renewal_secret_hash, \
|
||||
bucket_cancel_secret_hash, plaintext_hasher, \
|
||||
storage_index_hash, plaintext_segment_hasher, convergence_hasher
|
||||
from allmydata.util.deferredutil import timeout_call
|
||||
from allmydata.util.deferredutil import (
|
||||
timeout_call,
|
||||
until,
|
||||
)
|
||||
from allmydata import hashtree, uri
|
||||
from allmydata.storage.server import si_b2a
|
||||
from allmydata.immutable import encode
|
||||
@ -385,6 +397,9 @@ class PeerSelector(object):
|
||||
)
|
||||
return self.happiness_mappings
|
||||
|
||||
def add_peers(self, peerids=None):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class _QueryStatistics(object):
|
||||
|
||||
@ -896,13 +911,45 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin):
|
||||
raise UploadUnhappinessError(msg)
|
||||
|
||||
|
||||
@attr.s
|
||||
class _Accum(object):
|
||||
"""
|
||||
Accumulate up to some known amount of ciphertext.
|
||||
|
||||
:ivar remaining: The number of bytes still expected.
|
||||
:ivar ciphertext: The bytes accumulated so far.
|
||||
"""
|
||||
remaining = attr.ib(validator=attr.validators.instance_of(int)) # type: int
|
||||
ciphertext = attr.ib(default=attr.Factory(list)) # type: List[bytes]
|
||||
|
||||
def extend(self,
|
||||
size, # type: int
|
||||
ciphertext, # type: List[bytes]
|
||||
):
|
||||
"""
|
||||
Accumulate some more ciphertext.
|
||||
|
||||
:param size: The amount of data the new ciphertext represents towards
|
||||
the goal. This may be more than the actual size of the given
|
||||
ciphertext if the source has run out of data.
|
||||
|
||||
:param ciphertext: The new ciphertext to accumulate.
|
||||
"""
|
||||
self.remaining -= size
|
||||
self.ciphertext.extend(ciphertext)
|
||||
|
||||
|
||||
@implementer(IEncryptedUploadable)
|
||||
class EncryptAnUploadable(object):
|
||||
"""This is a wrapper that takes an IUploadable and provides
|
||||
IEncryptedUploadable."""
|
||||
CHUNKSIZE = 50*1024
|
||||
|
||||
def __init__(self, original, log_parent=None, progress=None):
|
||||
def __init__(self, original, log_parent=None, progress=None, chunk_size=None):
|
||||
"""
|
||||
:param chunk_size: The number of bytes to read from the uploadable at a
|
||||
time, or None for some default.
|
||||
"""
|
||||
precondition(original.default_params_set,
|
||||
"set_default_encoding_parameters not called on %r before wrapping with EncryptAnUploadable" % (original,))
|
||||
self.original = IUploadable(original)
|
||||
@ -916,6 +963,8 @@ class EncryptAnUploadable(object):
|
||||
self._ciphertext_bytes_read = 0
|
||||
self._status = None
|
||||
self._progress = progress
|
||||
if chunk_size is not None:
|
||||
self.CHUNKSIZE = chunk_size
|
||||
|
||||
def set_upload_status(self, upload_status):
|
||||
self._status = IUploadStatus(upload_status)
|
||||
@ -1022,47 +1071,53 @@ class EncryptAnUploadable(object):
|
||||
# and size
|
||||
d.addCallback(lambda ignored: self.get_size())
|
||||
d.addCallback(lambda ignored: self._get_encryptor())
|
||||
# then fetch and encrypt the plaintext. The unusual structure here
|
||||
# (passing a Deferred *into* a function) is needed to avoid
|
||||
# overflowing the stack: Deferreds don't optimize out tail recursion.
|
||||
# We also pass in a list, to which _read_encrypted will append
|
||||
# ciphertext.
|
||||
ciphertext = []
|
||||
d2 = defer.Deferred()
|
||||
d.addCallback(lambda ignored:
|
||||
self._read_encrypted(length, ciphertext, hash_only, d2))
|
||||
d.addCallback(lambda ignored: d2)
|
||||
|
||||
accum = _Accum(length)
|
||||
|
||||
def action():
|
||||
"""
|
||||
Read some bytes into the accumulator.
|
||||
"""
|
||||
return self._read_encrypted(accum, hash_only)
|
||||
|
||||
def condition():
|
||||
"""
|
||||
Check to see if the accumulator has all the data.
|
||||
"""
|
||||
return accum.remaining == 0
|
||||
|
||||
d.addCallback(lambda ignored: until(action, condition))
|
||||
d.addCallback(lambda ignored: accum.ciphertext)
|
||||
return d
|
||||
|
||||
def _read_encrypted(self, remaining, ciphertext, hash_only, fire_when_done):
|
||||
if not remaining:
|
||||
fire_when_done.callback(ciphertext)
|
||||
return None
|
||||
def _read_encrypted(self,
|
||||
ciphertext_accum, # type: _Accum
|
||||
hash_only, # type: bool
|
||||
):
|
||||
# type: (...) -> defer.Deferred
|
||||
"""
|
||||
Read the next chunk of plaintext, encrypt it, and extend the accumulator
|
||||
with the resulting ciphertext.
|
||||
"""
|
||||
# tolerate large length= values without consuming a lot of RAM by
|
||||
# reading just a chunk (say 50kB) at a time. This only really matters
|
||||
# when hash_only==True (i.e. resuming an interrupted upload), since
|
||||
# that's the case where we will be skipping over a lot of data.
|
||||
size = min(remaining, self.CHUNKSIZE)
|
||||
remaining = remaining - size
|
||||
size = min(ciphertext_accum.remaining, self.CHUNKSIZE)
|
||||
|
||||
# read a chunk of plaintext..
|
||||
d = defer.maybeDeferred(self.original.read, size)
|
||||
# N.B.: if read() is synchronous, then since everything else is
|
||||
# actually synchronous too, we'd blow the stack unless we stall for a
|
||||
# tick. Once you accept a Deferred from IUploadable.read(), you must
|
||||
# be prepared to have it fire immediately too.
|
||||
d.addCallback(fireEventually)
|
||||
def _good(plaintext):
|
||||
# and encrypt it..
|
||||
# o/' over the fields we go, hashing all the way, sHA! sHA! sHA! o/'
|
||||
ct = self._hash_and_encrypt_plaintext(plaintext, hash_only)
|
||||
ciphertext.extend(ct)
|
||||
self._read_encrypted(remaining, ciphertext, hash_only,
|
||||
fire_when_done)
|
||||
def _err(why):
|
||||
fire_when_done.errback(why)
|
||||
# Intentionally tell the accumulator about the expected size, not
|
||||
# the actual size. If we run out of data we still want remaining
|
||||
# to drop otherwise it will never reach 0 and the loop will never
|
||||
# end.
|
||||
ciphertext_accum.extend(size, ct)
|
||||
d.addCallback(_good)
|
||||
d.addErrback(_err)
|
||||
return None
|
||||
return d
|
||||
|
||||
def _hash_and_encrypt_plaintext(self, data, hash_only):
|
||||
assert isinstance(data, (tuple, list)), type(data)
|
||||
@ -1423,7 +1478,7 @@ class LiteralUploader(object):
|
||||
return self._status
|
||||
|
||||
@implementer(RIEncryptedUploadable)
|
||||
class RemoteEncryptedUploadable(Referenceable):
|
||||
class RemoteEncryptedUploadable(Referenceable): # type: ignore # warner/foolscap#78
|
||||
|
||||
def __init__(self, encrypted_uploadable, upload_status):
|
||||
self._eu = IEncryptedUploadable(encrypted_uploadable)
|
||||
@ -1825,7 +1880,7 @@ class Uploader(service.MultiService, log.PrefixingLogMixin):
|
||||
def startService(self):
|
||||
service.MultiService.startService(self)
|
||||
if self._helper_furl:
|
||||
self.parent.tub.connectTo(self._helper_furl,
|
||||
self.parent.tub.connectTo(ensure_str(self._helper_furl),
|
||||
self._got_helper)
|
||||
|
||||
def _got_helper(self, helper):
|
||||
|
@ -681,7 +681,7 @@ class IURI(Interface):
|
||||
passing into init_from_string."""
|
||||
|
||||
|
||||
class IVerifierURI(Interface, IURI):
|
||||
class IVerifierURI(IURI):
|
||||
def init_from_string(uri):
|
||||
"""Accept a string (as created by my to_string() method) and populate
|
||||
this instance with its data. I am not normally called directly,
|
||||
@ -748,7 +748,7 @@ class IProgress(Interface):
|
||||
"Current amount of progress (in percentage)"
|
||||
)
|
||||
|
||||
def set_progress(self, value):
|
||||
def set_progress(value):
|
||||
"""
|
||||
Sets the current amount of progress.
|
||||
|
||||
@ -756,7 +756,7 @@ class IProgress(Interface):
|
||||
set_progress_total.
|
||||
"""
|
||||
|
||||
def set_progress_total(self, value):
|
||||
def set_progress_total(value):
|
||||
"""
|
||||
Sets the total amount of expected progress
|
||||
|
||||
@ -859,12 +859,6 @@ class IPeerSelector(Interface):
|
||||
peer selection begins.
|
||||
"""
|
||||
|
||||
def confirm_share_allocation(peerid, shnum):
|
||||
"""
|
||||
Confirm that an allocated peer=>share pairing has been
|
||||
successfully established.
|
||||
"""
|
||||
|
||||
def add_peers(peerids=set):
|
||||
"""
|
||||
Update my internal state to include the peers in peerids as
|
||||
@ -1824,11 +1818,6 @@ class IEncoder(Interface):
|
||||
willing to receive data.
|
||||
"""
|
||||
|
||||
def set_size(size):
|
||||
"""Specify the number of bytes that will be encoded. This must be
|
||||
peformed before get_serialized_params() can be called.
|
||||
"""
|
||||
|
||||
def set_encrypted_uploadable(u):
|
||||
"""Provide a source of encrypted upload data. 'u' must implement
|
||||
IEncryptedUploadable.
|
||||
|
@ -11,12 +11,12 @@ if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
from past.builtins import long
|
||||
|
||||
from six import ensure_text
|
||||
from six import ensure_text, ensure_str
|
||||
|
||||
import time
|
||||
from zope.interface import implementer
|
||||
from twisted.application import service
|
||||
from foolscap.api import Referenceable, eventually
|
||||
from foolscap.api import Referenceable
|
||||
from allmydata.interfaces import InsufficientVersionError
|
||||
from allmydata.introducer.interfaces import IIntroducerClient, \
|
||||
RIIntroducerSubscriberClient_v2
|
||||
@ -24,6 +24,9 @@ from allmydata.introducer.common import sign_to_foolscap, unsign_from_foolscap,\
|
||||
get_tubid_string_from_ann
|
||||
from allmydata.util import log, yamlutil, connection_status
|
||||
from allmydata.util.rrefutil import add_version_to_remote_reference
|
||||
from allmydata.util.observer import (
|
||||
ObserverList,
|
||||
)
|
||||
from allmydata.crypto.error import BadSignature
|
||||
from allmydata.util.assertutil import precondition
|
||||
|
||||
@ -39,8 +42,6 @@ class IntroducerClient(service.Service, Referenceable):
|
||||
nickname, my_version, oldest_supported,
|
||||
sequencer, cache_filepath):
|
||||
self._tub = tub
|
||||
if isinstance(introducer_furl, str):
|
||||
introducer_furl = introducer_furl.encode("utf-8")
|
||||
self.introducer_furl = introducer_furl
|
||||
|
||||
assert isinstance(nickname, str)
|
||||
@ -64,8 +65,7 @@ class IntroducerClient(service.Service, Referenceable):
|
||||
self._publisher = None
|
||||
self._since = None
|
||||
|
||||
self._local_subscribers = [] # (servicename,cb,args,kwargs) tuples
|
||||
self._subscribed_service_names = set()
|
||||
self._local_subscribers = {} # {servicename: ObserverList}
|
||||
self._subscriptions = set() # requests we've actually sent
|
||||
|
||||
# _inbound_announcements remembers one announcement per
|
||||
@ -96,7 +96,7 @@ class IntroducerClient(service.Service, Referenceable):
|
||||
def startService(self):
|
||||
service.Service.startService(self)
|
||||
self._introducer_error = None
|
||||
rc = self._tub.connectTo(self.introducer_furl, self._got_introducer)
|
||||
rc = self._tub.connectTo(ensure_str(self.introducer_furl), self._got_introducer)
|
||||
self._introducer_reconnector = rc
|
||||
def connect_failed(failure):
|
||||
self.log("Initial Introducer connection failed: perhaps it's down",
|
||||
@ -178,22 +178,22 @@ class IntroducerClient(service.Service, Referenceable):
|
||||
kwargs["facility"] = "tahoe.introducer.client"
|
||||
return log.msg(*args, **kwargs)
|
||||
|
||||
def subscribe_to(self, service_name, cb, *args, **kwargs):
|
||||
self._local_subscribers.append( (service_name,cb,args,kwargs) )
|
||||
self._subscribed_service_names.add(service_name)
|
||||
def subscribe_to(self, service_name, callback, *args, **kwargs):
|
||||
obs = self._local_subscribers.setdefault(service_name, ObserverList())
|
||||
obs.subscribe(lambda key_s, ann: callback(key_s, ann, *args, **kwargs))
|
||||
self._maybe_subscribe()
|
||||
for index,(ann,key_s,when) in list(self._inbound_announcements.items()):
|
||||
precondition(isinstance(key_s, bytes), key_s)
|
||||
servicename = index[0]
|
||||
if servicename == service_name:
|
||||
eventually(cb, key_s, ann, *args, **kwargs)
|
||||
obs.notify(key_s, ann)
|
||||
|
||||
def _maybe_subscribe(self):
|
||||
if not self._publisher:
|
||||
self.log("want to subscribe, but no introducer yet",
|
||||
level=log.NOISY)
|
||||
return
|
||||
for service_name in self._subscribed_service_names:
|
||||
for service_name in self._local_subscribers:
|
||||
if service_name in self._subscriptions:
|
||||
continue
|
||||
self._subscriptions.add(service_name)
|
||||
@ -272,7 +272,7 @@ class IntroducerClient(service.Service, Referenceable):
|
||||
precondition(isinstance(key_s, bytes), key_s)
|
||||
self._debug_counts["inbound_announcement"] += 1
|
||||
service_name = str(ann["service-name"])
|
||||
if service_name not in self._subscribed_service_names:
|
||||
if service_name not in self._local_subscribers:
|
||||
self.log("announcement for a service we don't care about [%s]"
|
||||
% (service_name,), level=log.UNUSUAL, umid="dIpGNA")
|
||||
self._debug_counts["wrong_service"] += 1
|
||||
@ -343,9 +343,9 @@ class IntroducerClient(service.Service, Referenceable):
|
||||
def _deliver_announcements(self, key_s, ann):
|
||||
precondition(isinstance(key_s, bytes), key_s)
|
||||
service_name = str(ann["service-name"])
|
||||
for (service_name2,cb,args,kwargs) in self._local_subscribers:
|
||||
if service_name2 == service_name:
|
||||
eventually(cb, key_s, ann, *args, **kwargs)
|
||||
obs = self._local_subscribers.get(service_name)
|
||||
if obs is not None:
|
||||
obs.notify(key_s, ann)
|
||||
|
||||
def connection_status(self):
|
||||
assert self.running # startService builds _introducer_reconnector
|
||||
|
@ -73,7 +73,7 @@ class IIntroducerClient(Interface):
|
||||
publish their services to the rest of the world, and I help them learn
|
||||
about services available on other nodes."""
|
||||
|
||||
def publish(service_name, ann, signing_key=None):
|
||||
def publish(service_name, ann, signing_key):
|
||||
"""Publish the given announcement dictionary (which must be
|
||||
JSON-serializable), plus some additional keys, to the world.
|
||||
|
||||
@ -83,8 +83,7 @@ class IIntroducerClient(Interface):
|
||||
the signing_key, if present, otherwise it is derived from the
|
||||
'anonymous-storage-FURL' key.
|
||||
|
||||
If signing_key= is set to an instance of SigningKey, it will be
|
||||
used to sign the announcement."""
|
||||
signing_key (a SigningKey) will be used to sign the announcement."""
|
||||
|
||||
def subscribe_to(service_name, callback, *args, **kwargs):
|
||||
"""Call this if you will eventually want to use services with the
|
||||
|
@ -15,6 +15,12 @@ from past.builtins import long
|
||||
from six import ensure_text
|
||||
|
||||
import time, os.path, textwrap
|
||||
|
||||
try:
|
||||
from typing import Any, Dict, Union
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from zope.interface import implementer
|
||||
from twisted.application import service
|
||||
from twisted.internet import defer
|
||||
@ -147,10 +153,12 @@ class IntroducerService(service.MultiService, Referenceable):
|
||||
name = "introducer"
|
||||
# v1 is the original protocol, added in 1.0 (but only advertised starting
|
||||
# in 1.3), removed in 1.12. v2 is the new signed protocol, added in 1.10
|
||||
VERSION = { #"http://allmydata.org/tahoe/protocols/introducer/v1": { },
|
||||
# TODO: reconcile bytes/str for keys
|
||||
VERSION = {
|
||||
#"http://allmydata.org/tahoe/protocols/introducer/v1": { },
|
||||
b"http://allmydata.org/tahoe/protocols/introducer/v2": { },
|
||||
b"application-version": allmydata.__full_version__.encode("utf-8"),
|
||||
}
|
||||
} # type: Dict[Union[bytes, str], Any]
|
||||
|
||||
def __init__(self):
|
||||
service.MultiService.__init__(self)
|
||||
|
@ -564,7 +564,7 @@ class MutableFileNode(object):
|
||||
return d
|
||||
|
||||
|
||||
def upload(self, new_contents, servermap):
|
||||
def upload(self, new_contents, servermap, progress=None):
|
||||
"""
|
||||
I overwrite the contents of the best recoverable version of this
|
||||
mutable file with new_contents, using servermap instead of
|
||||
@ -951,7 +951,7 @@ class MutableFileVersion(object):
|
||||
return self._servermap.size_of_version(self._version)
|
||||
|
||||
|
||||
def download_to_data(self, fetch_privkey=False, progress=None):
|
||||
def download_to_data(self, fetch_privkey=False, progress=None): # type: ignore # fixme
|
||||
"""
|
||||
I return a Deferred that fires with the contents of this
|
||||
readable object as a byte string.
|
||||
@ -1205,3 +1205,7 @@ class MutableFileVersion(object):
|
||||
self._servermap,
|
||||
mode=mode)
|
||||
return u.update()
|
||||
|
||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3562
|
||||
def get_servermap(self):
|
||||
raise NotImplementedError
|
||||
|
@ -23,6 +23,11 @@ from base64 import b32decode, b32encode
|
||||
from errno import ENOENT, EPERM
|
||||
from warnings import warn
|
||||
|
||||
try:
|
||||
from typing import Union
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import attr
|
||||
|
||||
# On Python 2 this will be the backported package.
|
||||
@ -273,6 +278,11 @@ def _error_about_old_config_files(basedir, generated_files):
|
||||
raise e
|
||||
|
||||
|
||||
def ensure_text_and_abspath_expanduser_unicode(basedir):
|
||||
# type: (Union[bytes, str]) -> str
|
||||
return abspath_expanduser_unicode(ensure_text(basedir))
|
||||
|
||||
|
||||
@attr.s
|
||||
class _Config(object):
|
||||
"""
|
||||
@ -300,8 +310,8 @@ class _Config(object):
|
||||
config = attr.ib(validator=attr.validators.instance_of(configparser.ConfigParser))
|
||||
portnum_fname = attr.ib()
|
||||
_basedir = attr.ib(
|
||||
converter=lambda basedir: abspath_expanduser_unicode(ensure_text(basedir)),
|
||||
)
|
||||
converter=ensure_text_and_abspath_expanduser_unicode,
|
||||
) # type: str
|
||||
config_path = attr.ib(
|
||||
validator=attr.validators.optional(
|
||||
attr.validators.instance_of(FilePath),
|
||||
@ -669,8 +679,8 @@ def create_connection_handlers(config, i2p_provider, tor_provider):
|
||||
# create that handler, so hints which want it will be ignored.
|
||||
handlers = {
|
||||
"tcp": _make_tcp_handler(),
|
||||
"tor": tor_provider.get_tor_handler(),
|
||||
"i2p": i2p_provider.get_i2p_handler(),
|
||||
"tor": tor_provider.get_client_endpoint(),
|
||||
"i2p": i2p_provider.get_client_endpoint(),
|
||||
}
|
||||
log.msg(
|
||||
format="built Foolscap connection handlers for: %(known_handlers)s",
|
||||
@ -927,7 +937,6 @@ class Node(service.MultiService):
|
||||
"""
|
||||
NODETYPE = "unknown NODETYPE"
|
||||
CERTFILE = "node.pem"
|
||||
GENERATED_FILES = []
|
||||
|
||||
def __init__(self, config, main_tub, control_tub, i2p_provider, tor_provider):
|
||||
"""
|
||||
|
@ -1,5 +1,10 @@
|
||||
from __future__ import print_function
|
||||
|
||||
try:
|
||||
from allmydata.scripts.types_ import SubCommands
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from twisted.python import usage
|
||||
from allmydata.scripts.common import BaseOptions
|
||||
|
||||
@ -79,8 +84,8 @@ def do_admin(options):
|
||||
|
||||
|
||||
subCommands = [
|
||||
["admin", None, AdminCommand, "admin subcommands: use 'tahoe admin' for a list"],
|
||||
]
|
||||
("admin", None, AdminCommand, "admin subcommands: use 'tahoe admin' for a list"),
|
||||
] # type: SubCommands
|
||||
|
||||
dispatch = {
|
||||
"admin": do_admin,
|
||||
|
@ -1,6 +1,12 @@
|
||||
from __future__ import print_function
|
||||
|
||||
import os.path, re, fnmatch
|
||||
|
||||
try:
|
||||
from allmydata.scripts.types_ import SubCommands, Parameters
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from twisted.python import usage
|
||||
from allmydata.scripts.common import get_aliases, get_default_nodedir, \
|
||||
DEFAULT_ALIAS, BaseOptions
|
||||
@ -19,7 +25,7 @@ class FileStoreOptions(BaseOptions):
|
||||
"This overrides the URL found in the --node-directory ."],
|
||||
["dir-cap", None, None,
|
||||
"Specify which dirnode URI should be used as the 'tahoe' alias."]
|
||||
]
|
||||
] # type: Parameters
|
||||
|
||||
def postOptions(self):
|
||||
self["quiet"] = self.parent["quiet"]
|
||||
@ -455,25 +461,25 @@ class DeepCheckOptions(FileStoreOptions):
|
||||
Optionally repair any problems found."""
|
||||
|
||||
subCommands = [
|
||||
["mkdir", None, MakeDirectoryOptions, "Create a new directory."],
|
||||
["add-alias", None, AddAliasOptions, "Add a new alias cap."],
|
||||
["create-alias", None, CreateAliasOptions, "Create a new alias cap."],
|
||||
["list-aliases", None, ListAliasesOptions, "List all alias caps."],
|
||||
["ls", None, ListOptions, "List a directory."],
|
||||
["get", None, GetOptions, "Retrieve a file from the grid."],
|
||||
["put", None, PutOptions, "Upload a file into the grid."],
|
||||
["cp", None, CpOptions, "Copy one or more files or directories."],
|
||||
["unlink", None, UnlinkOptions, "Unlink a file or directory on the grid."],
|
||||
["mv", None, MvOptions, "Move a file within the grid."],
|
||||
["ln", None, LnOptions, "Make an additional link to an existing file or directory."],
|
||||
["backup", None, BackupOptions, "Make target dir look like local dir."],
|
||||
["webopen", None, WebopenOptions, "Open a web browser to a grid file or directory."],
|
||||
["manifest", None, ManifestOptions, "List all files/directories in a subtree."],
|
||||
["stats", None, StatsOptions, "Print statistics about all files/directories in a subtree."],
|
||||
["check", None, CheckOptions, "Check a single file or directory."],
|
||||
["deep-check", None, DeepCheckOptions, "Check all files/directories reachable from a starting point."],
|
||||
["status", None, TahoeStatusCommand, "Various status information."],
|
||||
]
|
||||
("mkdir", None, MakeDirectoryOptions, "Create a new directory."),
|
||||
("add-alias", None, AddAliasOptions, "Add a new alias cap."),
|
||||
("create-alias", None, CreateAliasOptions, "Create a new alias cap."),
|
||||
("list-aliases", None, ListAliasesOptions, "List all alias caps."),
|
||||
("ls", None, ListOptions, "List a directory."),
|
||||
("get", None, GetOptions, "Retrieve a file from the grid."),
|
||||
("put", None, PutOptions, "Upload a file into the grid."),
|
||||
("cp", None, CpOptions, "Copy one or more files or directories."),
|
||||
("unlink", None, UnlinkOptions, "Unlink a file or directory on the grid."),
|
||||
("mv", None, MvOptions, "Move a file within the grid."),
|
||||
("ln", None, LnOptions, "Make an additional link to an existing file or directory."),
|
||||
("backup", None, BackupOptions, "Make target dir look like local dir."),
|
||||
("webopen", None, WebopenOptions, "Open a web browser to a grid file or directory."),
|
||||
("manifest", None, ManifestOptions, "List all files/directories in a subtree."),
|
||||
("stats", None, StatsOptions, "Print statistics about all files/directories in a subtree."),
|
||||
("check", None, CheckOptions, "Check a single file or directory."),
|
||||
("deep-check", None, DeepCheckOptions, "Check all files/directories reachable from a starting point."),
|
||||
("status", None, TahoeStatusCommand, "Various status information."),
|
||||
] # type: SubCommands
|
||||
|
||||
def mkdir(options):
|
||||
from allmydata.scripts import tahoe_mkdir
|
||||
@ -495,7 +501,7 @@ def list_aliases(options):
|
||||
rc = tahoe_add_alias.list_aliases(options)
|
||||
return rc
|
||||
|
||||
def list(options):
|
||||
def list_(options):
|
||||
from allmydata.scripts import tahoe_ls
|
||||
rc = tahoe_ls.list(options)
|
||||
return rc
|
||||
@ -581,7 +587,7 @@ dispatch = {
|
||||
"add-alias": add_alias,
|
||||
"create-alias": create_alias,
|
||||
"list-aliases": list_aliases,
|
||||
"ls": list,
|
||||
"ls": list_,
|
||||
"get": get,
|
||||
"put": put,
|
||||
"cp": cp,
|
||||
|
@ -4,6 +4,12 @@ import os, sys, urllib, textwrap
|
||||
import codecs
|
||||
from os.path import join
|
||||
|
||||
try:
|
||||
from typing import Optional
|
||||
from .types_ import Parameters
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from yaml import (
|
||||
safe_dump,
|
||||
)
|
||||
@ -41,8 +47,8 @@ class BaseOptions(usage.Options):
|
||||
def opt_version(self):
|
||||
raise usage.UsageError("--version not allowed on subcommands")
|
||||
|
||||
description = None
|
||||
description_unwrapped = None
|
||||
description = None # type: Optional[str]
|
||||
description_unwrapped = None # type: Optional[str]
|
||||
|
||||
def __str__(self):
|
||||
width = int(os.environ.get('COLUMNS', '80'))
|
||||
@ -65,7 +71,7 @@ class BasedirOptions(BaseOptions):
|
||||
optParameters = [
|
||||
["basedir", "C", None, "Specify which Tahoe base directory should be used. [default: %s]"
|
||||
% quote_local_unicode_path(_default_nodedir)],
|
||||
]
|
||||
] # type: Parameters
|
||||
|
||||
def parseArgs(self, basedir=None):
|
||||
# This finds the node-directory option correctly even if we are in a subcommand.
|
||||
@ -102,7 +108,7 @@ class NoDefaultBasedirOptions(BasedirOptions):
|
||||
|
||||
optParameters = [
|
||||
["basedir", "C", None, "Specify which Tahoe base directory should be used."],
|
||||
]
|
||||
] # type: Parameters
|
||||
|
||||
# This is overridden in order to ensure we get a "Wrong number of arguments."
|
||||
# error when more than one argument is given.
|
||||
|
@ -3,6 +3,11 @@ from __future__ import print_function
|
||||
import os
|
||||
import json
|
||||
|
||||
try:
|
||||
from allmydata.scripts.types_ import SubCommands
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from twisted.internet import reactor, defer
|
||||
from twisted.python.usage import UsageError
|
||||
from twisted.python.filepath import (
|
||||
@ -492,10 +497,10 @@ def create_introducer(config):
|
||||
|
||||
|
||||
subCommands = [
|
||||
["create-node", None, CreateNodeOptions, "Create a node that acts as a client, server or both."],
|
||||
["create-client", None, CreateClientOptions, "Create a client node (with storage initially disabled)."],
|
||||
["create-introducer", None, CreateIntroducerOptions, "Create an introducer node."],
|
||||
]
|
||||
("create-node", None, CreateNodeOptions, "Create a node that acts as a client, server or both."),
|
||||
("create-client", None, CreateClientOptions, "Create a client node (with storage initially disabled)."),
|
||||
("create-introducer", None, CreateIntroducerOptions, "Create an introducer node."),
|
||||
] # type: SubCommands
|
||||
|
||||
dispatch = {
|
||||
"create-node": create_node,
|
||||
|
@ -1,5 +1,12 @@
|
||||
from __future__ import print_function
|
||||
|
||||
try:
|
||||
from allmydata.scripts.types_ import SubCommands
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from future.utils import bchr
|
||||
|
||||
# do not import any allmydata modules at this level. Do that from inside
|
||||
# individual functions instead.
|
||||
import struct, time, os, sys
|
||||
@ -905,7 +912,7 @@ def corrupt_share(options):
|
||||
f = open(fn, "rb+")
|
||||
f.seek(offset)
|
||||
d = f.read(1)
|
||||
d = chr(ord(d) ^ 0x01)
|
||||
d = bchr(ord(d) ^ 0x01)
|
||||
f.seek(offset)
|
||||
f.write(d)
|
||||
f.close()
|
||||
@ -920,7 +927,7 @@ def corrupt_share(options):
|
||||
f.seek(m.DATA_OFFSET)
|
||||
data = f.read(2000)
|
||||
# make sure this slot contains an SMDF share
|
||||
assert data[0] == b"\x00", "non-SDMF mutable shares not supported"
|
||||
assert data[0:1] == b"\x00", "non-SDMF mutable shares not supported"
|
||||
f.close()
|
||||
|
||||
(version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize,
|
||||
@ -1051,8 +1058,8 @@ def do_debug(options):
|
||||
|
||||
|
||||
subCommands = [
|
||||
["debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list."],
|
||||
]
|
||||
("debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list."),
|
||||
] # type: SubCommands
|
||||
|
||||
dispatch = {
|
||||
"debug": do_debug,
|
||||
|
@ -4,6 +4,11 @@ import os, sys
|
||||
from six.moves import StringIO
|
||||
import six
|
||||
|
||||
try:
|
||||
from allmydata.scripts.types_ import SubCommands
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from twisted.python import usage
|
||||
from twisted.internet import defer, task, threads
|
||||
|
||||
@ -40,8 +45,8 @@ _control_node_dispatch = {
|
||||
}
|
||||
|
||||
process_control_commands = [
|
||||
["run", None, tahoe_run.RunOptions, "run a node without daemonizing"],
|
||||
]
|
||||
("run", None, tahoe_run.RunOptions, "run a node without daemonizing"),
|
||||
] # type: SubCommands
|
||||
|
||||
|
||||
class Options(usage.Options):
|
||||
@ -98,7 +103,7 @@ class Options(usage.Options):
|
||||
|
||||
create_dispatch = {}
|
||||
for module in (create_node,):
|
||||
create_dispatch.update(module.dispatch)
|
||||
create_dispatch.update(module.dispatch) # type: ignore
|
||||
|
||||
def parse_options(argv, config=None):
|
||||
if not config:
|
||||
|
@ -2,6 +2,11 @@ from __future__ import print_function
|
||||
|
||||
import json
|
||||
|
||||
try:
|
||||
from allmydata.scripts.types_ import SubCommands
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
from twisted.python import usage
|
||||
from twisted.internet import defer, reactor
|
||||
|
||||
@ -103,7 +108,7 @@ def invite(options):
|
||||
subCommands = [
|
||||
("invite", None, InviteOptions,
|
||||
"Invite a new node to this grid"),
|
||||
]
|
||||
] # type: SubCommands
|
||||
|
||||
dispatch = {
|
||||
"invite": invite,
|
||||
|
12
src/allmydata/scripts/types_.py
Normal file
12
src/allmydata/scripts/types_.py
Normal file
@ -0,0 +1,12 @@
|
||||
from typing import List, Tuple, Type, Sequence, Any
|
||||
from allmydata.scripts.common import BaseOptions
|
||||
|
||||
|
||||
# Historically, subcommands were implemented as lists, but due to a
|
||||
# [designed contraint in mypy](https://stackoverflow.com/a/52559625/70170),
|
||||
# a Tuple is required.
|
||||
SubCommand = Tuple[str, None, Type[BaseOptions], str]
|
||||
|
||||
SubCommands = List[SubCommand]
|
||||
|
||||
Parameters = List[Sequence[Any]]
|
@ -1,11 +1,16 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
from __future__ import unicode_literals
|
||||
|
||||
import time
|
||||
|
||||
# Python 2 compatibility
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import str # noqa: F401
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
import time
|
||||
|
||||
from twisted.application import service
|
||||
from twisted.application.internet import TimerService
|
||||
@ -18,7 +23,7 @@ from allmydata.interfaces import IStatsProducer
|
||||
@implementer(IStatsProducer)
|
||||
class CPUUsageMonitor(service.MultiService):
|
||||
HISTORY_LENGTH = 15
|
||||
POLL_INTERVAL = 60
|
||||
POLL_INTERVAL = 60 # type: float
|
||||
|
||||
def __init__(self):
|
||||
service.MultiService.__init__(self)
|
||||
|
@ -19,7 +19,7 @@ import os, time, struct
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
import pickle # type: ignore
|
||||
from twisted.internet import reactor
|
||||
from twisted.application import service
|
||||
from allmydata.storage.common import si_b2a
|
||||
|
@ -202,7 +202,7 @@ class ShareFile(object):
|
||||
|
||||
|
||||
@implementer(RIBucketWriter)
|
||||
class BucketWriter(Referenceable):
|
||||
class BucketWriter(Referenceable): # type: ignore # warner/foolscap#78
|
||||
|
||||
def __init__(self, ss, incominghome, finalhome, max_size, lease_info, canary):
|
||||
self.ss = ss
|
||||
@ -301,7 +301,7 @@ class BucketWriter(Referenceable):
|
||||
|
||||
|
||||
@implementer(RIBucketReader)
|
||||
class BucketReader(Referenceable):
|
||||
class BucketReader(Referenceable): # type: ignore # warner/foolscap#78
|
||||
|
||||
def __init__(self, ss, sharefname, storage_index=None, shnum=None):
|
||||
self.ss = ss
|
||||
|
@ -581,7 +581,7 @@ class StorageServer(service.MultiService, Referenceable):
|
||||
for share in six.viewvalues(shares):
|
||||
share.add_or_renew_lease(lease_info)
|
||||
|
||||
def slot_testv_and_readv_and_writev(
|
||||
def slot_testv_and_readv_and_writev( # type: ignore # warner/foolscap#78
|
||||
self,
|
||||
storage_index,
|
||||
secrets,
|
||||
|
@ -464,6 +464,7 @@ class StorageFarmBroker(service.MultiService):
|
||||
@implementer(IDisplayableServer)
|
||||
class StubServer(object):
|
||||
def __init__(self, serverid):
|
||||
assert isinstance(serverid, bytes)
|
||||
self.serverid = serverid # binary tubid
|
||||
def get_serverid(self):
|
||||
return self.serverid
|
||||
|
@ -113,4 +113,5 @@ if sys.platform == "win32":
|
||||
initialize()
|
||||
|
||||
from eliot import to_file
|
||||
to_file(open("eliot.log", "w"))
|
||||
from allmydata.util.jsonbytes import BytesJSONEncoder
|
||||
to_file(open("eliot.log", "w"), encoder=BytesJSONEncoder)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user