mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-28 00:38:52 +00:00
Merge branch 'master' into 2916.grid-manager-proposal.5
This commit is contained in:
commit
7ad6c9269c
@ -15,79 +15,66 @@ workflows:
|
|||||||
ci:
|
ci:
|
||||||
jobs:
|
jobs:
|
||||||
# Start with jobs testing various platforms.
|
# Start with jobs testing various platforms.
|
||||||
|
- "debian-9":
|
||||||
# Every job that pulls a Docker image from Docker Hub needs to provide
|
{}
|
||||||
# credentials for that pull operation to avoid being subjected to
|
|
||||||
# unauthenticated pull limits shared across all of CircleCI. Use this
|
|
||||||
# first job to define a yaml anchor that can be used to supply a
|
|
||||||
# CircleCI job context which makes Docker Hub credentials available in
|
|
||||||
# the environment.
|
|
||||||
#
|
|
||||||
# Contexts are managed in the CircleCI web interface:
|
|
||||||
#
|
|
||||||
# https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts
|
|
||||||
- "debian-9": &DOCKERHUB_CONTEXT
|
|
||||||
context: "dockerhub-auth"
|
|
||||||
|
|
||||||
- "debian-10":
|
- "debian-10":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
|
||||||
requires:
|
requires:
|
||||||
- "debian-9"
|
- "debian-9"
|
||||||
|
|
||||||
- "ubuntu-20-04":
|
- "ubuntu-20-04":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
- "ubuntu-18-04":
|
- "ubuntu-18-04":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
|
||||||
requires:
|
requires:
|
||||||
- "ubuntu-20-04"
|
- "ubuntu-20-04"
|
||||||
- "ubuntu-16-04":
|
- "ubuntu-16-04":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
|
||||||
requires:
|
requires:
|
||||||
- "ubuntu-20-04"
|
- "ubuntu-20-04"
|
||||||
|
|
||||||
- "fedora-29":
|
- "fedora-29":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
- "fedora-28":
|
- "fedora-28":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
|
||||||
requires:
|
requires:
|
||||||
- "fedora-29"
|
- "fedora-29"
|
||||||
|
|
||||||
- "centos-8":
|
- "centos-8":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
|
|
||||||
- "nixos-19-09":
|
- "nixos-19-09":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
|
|
||||||
# Test against PyPy 2.7
|
# Test against PyPy 2.7
|
||||||
- "pypy27-buster":
|
- "pypy27-buster":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
|
|
||||||
# Just one Python 3.6 configuration while the port is in-progress.
|
# Just one Python 3.6 configuration while the port is in-progress.
|
||||||
- "python36":
|
- "python36":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
|
|
||||||
# Other assorted tasks and configurations
|
# Other assorted tasks and configurations
|
||||||
- "lint":
|
- "lint":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
|
- "codechecks3":
|
||||||
|
{}
|
||||||
- "pyinstaller":
|
- "pyinstaller":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
- "deprecations":
|
- "deprecations":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
- "c-locale":
|
- "c-locale":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
# Any locale other than C or UTF-8.
|
# Any locale other than C or UTF-8.
|
||||||
- "another-locale":
|
- "another-locale":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
|
|
||||||
- "integration":
|
- "integration":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
|
||||||
requires:
|
requires:
|
||||||
# If the unit test suite doesn't pass, don't bother running the
|
# If the unit test suite doesn't pass, don't bother running the
|
||||||
# integration tests.
|
# integration tests.
|
||||||
- "debian-9"
|
- "debian-9"
|
||||||
|
|
||||||
- "typechecks":
|
- "typechecks":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
{}
|
||||||
|
- "docs":
|
||||||
|
{}
|
||||||
|
|
||||||
images:
|
images:
|
||||||
# Build the Docker images used by the ci jobs. This makes the ci jobs
|
# Build the Docker images used by the ci jobs. This makes the ci jobs
|
||||||
@ -102,8 +89,16 @@ workflows:
|
|||||||
- "master"
|
- "master"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
- "build-image-debian-10":
|
# Every job that pushes a Docker image from Docker Hub needs to provide
|
||||||
<<: *DOCKERHUB_CONTEXT
|
# credentials. Use this first job to define a yaml anchor that can be
|
||||||
|
# used to supply a CircleCI job context which makes Docker Hub
|
||||||
|
# credentials available in the environment.
|
||||||
|
#
|
||||||
|
# Contexts are managed in the CircleCI web interface:
|
||||||
|
#
|
||||||
|
# https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts
|
||||||
|
- "build-image-debian-10": &DOCKERHUB_CONTEXT
|
||||||
|
context: "dockerhub-auth"
|
||||||
- "build-image-debian-9":
|
- "build-image-debian-9":
|
||||||
<<: *DOCKERHUB_CONTEXT
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "build-image-ubuntu-16-04":
|
- "build-image-ubuntu-16-04":
|
||||||
@ -165,6 +160,24 @@ jobs:
|
|||||||
command: |
|
command: |
|
||||||
~/.local/bin/tox -e codechecks
|
~/.local/bin/tox -e codechecks
|
||||||
|
|
||||||
|
codechecks3:
|
||||||
|
docker:
|
||||||
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "circleci/python:3"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- "checkout"
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: "Install tox"
|
||||||
|
command: |
|
||||||
|
pip install --user tox
|
||||||
|
|
||||||
|
- run:
|
||||||
|
name: "Static-ish code checks"
|
||||||
|
command: |
|
||||||
|
~/.local/bin/tox -e codechecks3
|
||||||
|
|
||||||
pyinstaller:
|
pyinstaller:
|
||||||
docker:
|
docker:
|
||||||
- <<: *DOCKERHUB_AUTH
|
- <<: *DOCKERHUB_AUTH
|
||||||
@ -458,6 +471,18 @@ jobs:
|
|||||||
command: |
|
command: |
|
||||||
/tmp/venv/bin/tox -e typechecks
|
/tmp/venv/bin/tox -e typechecks
|
||||||
|
|
||||||
|
docs:
|
||||||
|
docker:
|
||||||
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/ubuntu:18.04-py3"
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- "checkout"
|
||||||
|
- run:
|
||||||
|
name: "Build documentation"
|
||||||
|
command: |
|
||||||
|
/tmp/venv/bin/tox -e docs
|
||||||
|
|
||||||
build-image: &BUILD_IMAGE
|
build-image: &BUILD_IMAGE
|
||||||
# This is a template for a job to build a Docker image that has as much of
|
# This is a template for a job to build a Docker image that has as much of
|
||||||
# the setup as we can manage already done and baked in. This cuts down on
|
# the setup as we can manage already done and baked in. This cuts down on
|
||||||
|
172
.github/workflows/ci.yml
vendored
172
.github/workflows/ci.yml
vendored
@ -18,22 +18,22 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os:
|
os:
|
||||||
- macos-latest
|
|
||||||
- windows-latest
|
- windows-latest
|
||||||
|
- ubuntu-latest
|
||||||
python-version:
|
python-version:
|
||||||
- 2.7
|
- 2.7
|
||||||
|
- 3.6
|
||||||
|
- 3.7
|
||||||
|
- 3.8
|
||||||
|
- 3.9
|
||||||
|
include:
|
||||||
|
# On macOS don't bother with 3.6-3.8, just to get faster builds.
|
||||||
|
- os: macos-latest
|
||||||
|
python-version: 2.7
|
||||||
|
- os: macos-latest
|
||||||
|
python-version: 3.9
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
# Get vcpython27 on Windows + Python 2.7, to build netifaces
|
|
||||||
# extension. See https://chocolatey.org/packages/vcpython27 and
|
|
||||||
# https://github.com/crazy-max/ghaction-chocolatey
|
|
||||||
- name: Install MSVC 9.0 for Python 2.7 [Windows]
|
|
||||||
if: matrix.os == 'windows-latest' && matrix.python-version == '2.7'
|
|
||||||
uses: crazy-max/ghaction-chocolatey@v1
|
|
||||||
with:
|
|
||||||
args: install vcpython27
|
|
||||||
|
|
||||||
# See https://github.com/actions/checkout. A fetch-depth of 0
|
# See https://github.com/actions/checkout. A fetch-depth of 0
|
||||||
# fetches all tags and branches.
|
# fetches all tags and branches.
|
||||||
- name: Check out Tahoe-LAFS sources
|
- name: Check out Tahoe-LAFS sources
|
||||||
@ -42,7 +42,7 @@ jobs:
|
|||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Set up Python ${{ matrix.python-version }}
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
uses: actions/setup-python@v1
|
uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
@ -67,14 +67,14 @@ jobs:
|
|||||||
|
|
||||||
- name: Install Python packages
|
- name: Install Python packages
|
||||||
run: |
|
run: |
|
||||||
pip install --upgrade codecov tox setuptools
|
pip install --upgrade codecov tox tox-gh-actions setuptools
|
||||||
pip list
|
pip list
|
||||||
|
|
||||||
- name: Display tool versions
|
- name: Display tool versions
|
||||||
run: python misc/build_helpers/show-tool-versions.py
|
run: python misc/build_helpers/show-tool-versions.py
|
||||||
|
|
||||||
- name: Run "tox -e py27-coverage"
|
- name: Run tox for corresponding Python version
|
||||||
run: tox -e py27-coverage
|
run: python -m tox
|
||||||
|
|
||||||
- name: Upload eliot.log in case of failure
|
- name: Upload eliot.log in case of failure
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v1
|
||||||
@ -87,10 +87,29 @@ jobs:
|
|||||||
# Action for this, as of Jan 2021 it does not support Python coverage
|
# Action for this, as of Jan 2021 it does not support Python coverage
|
||||||
# files - only lcov files. Therefore, we use coveralls-python, the
|
# files - only lcov files. Therefore, we use coveralls-python, the
|
||||||
# coveralls.io-supplied Python reporter, for this.
|
# coveralls.io-supplied Python reporter, for this.
|
||||||
|
#
|
||||||
|
# It is coveralls-python 1.x that has maintained compatibility
|
||||||
|
# with Python 2, while coveralls-python 3.x is compatible with
|
||||||
|
# Python 3. Sadly we can't use them both in the same workflow.
|
||||||
|
#
|
||||||
|
# The two versions of coveralls-python are somewhat mutually
|
||||||
|
# incompatible. Mixing these two different versions when
|
||||||
|
# reporting coverage to coveralls.io will lead to grief, since
|
||||||
|
# they get job IDs in different fashion. If we use both
|
||||||
|
# versions of coveralls in the same workflow, the finalizing
|
||||||
|
# step will be able to mark only part of the jobs as done, and
|
||||||
|
# the other part will be left hanging, never marked as done: it
|
||||||
|
# does not matter if we make an API call or `coveralls --finish`
|
||||||
|
# to indicate that CI has finished running.
|
||||||
|
#
|
||||||
|
# So we try to use the newer coveralls-python that is available
|
||||||
|
# via Python 3 (which is present in GitHub Actions tool cache,
|
||||||
|
# even when we're running Python 2.7 tests) throughout this
|
||||||
|
# workflow.
|
||||||
- name: "Report Coverage to Coveralls"
|
- name: "Report Coverage to Coveralls"
|
||||||
run: |
|
run: |
|
||||||
pip install coveralls
|
pip3 install --upgrade coveralls==3.0.1
|
||||||
python -m coveralls
|
python3 -m coveralls
|
||||||
env:
|
env:
|
||||||
# Some magic value required for some magic reason.
|
# Some magic value required for some magic reason.
|
||||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
@ -113,80 +132,22 @@ jobs:
|
|||||||
# a single report, we have to tell Coveralls when we've uploaded all of the
|
# a single report, we have to tell Coveralls when we've uploaded all of the
|
||||||
# data files. This does it. We make sure it runs last by making it depend
|
# data files. This does it. We make sure it runs last by making it depend
|
||||||
# on *all* of the coverage-collecting jobs.
|
# on *all* of the coverage-collecting jobs.
|
||||||
|
#
|
||||||
|
# See notes about parallel builds on GitHub Actions at
|
||||||
|
# https://coveralls-python.readthedocs.io/en/latest/usage/configuration.html
|
||||||
finish-coverage-report:
|
finish-coverage-report:
|
||||||
# There happens to just be one coverage-collecting job at the moment. If
|
|
||||||
# the coverage reports are broken and someone added more
|
|
||||||
# coverage-collecting jobs to this workflow but didn't update this, that's
|
|
||||||
# why.
|
|
||||||
needs:
|
needs:
|
||||||
- "coverage"
|
- "coverage"
|
||||||
runs-on: "ubuntu-latest"
|
runs-on: "ubuntu-latest"
|
||||||
|
container: "python:3-slim"
|
||||||
steps:
|
steps:
|
||||||
- name: "Check out Tahoe-LAFS sources"
|
- name: "Indicate completion to coveralls.io"
|
||||||
uses: "actions/checkout@v2"
|
|
||||||
|
|
||||||
- name: "Finish Coveralls Reporting"
|
|
||||||
run: |
|
run: |
|
||||||
# coveralls-python does have a `--finish` option but it doesn't seem
|
pip3 install --upgrade coveralls==3.0.1
|
||||||
# to work, at least for us.
|
python3 -m coveralls --finish
|
||||||
# https://github.com/coveralls-clients/coveralls-python/issues/248
|
|
||||||
#
|
|
||||||
# But all it does is this simple POST so we can just send it
|
|
||||||
# ourselves. The only hard part is guessing what the POST
|
|
||||||
# parameters mean. And I've done that for you already.
|
|
||||||
#
|
|
||||||
# Since the build is done I'm going to guess that "done" is a fine
|
|
||||||
# value for status.
|
|
||||||
#
|
|
||||||
# That leaves "build_num". The coveralls documentation gives some
|
|
||||||
# hints about it. It suggests using $CIRCLE_WORKFLOW_ID if your job
|
|
||||||
# is on CircleCI. CircleCI documentation says this about
|
|
||||||
# CIRCLE_WORKFLOW_ID:
|
|
||||||
#
|
|
||||||
# Observation of the coveralls.io web interface, logs from the
|
|
||||||
# coveralls command in action, and experimentation suggests the
|
|
||||||
# value for PRs is something more like:
|
|
||||||
#
|
|
||||||
# <GIT MERGE COMMIT HASH>-PR-<PR NUM>
|
|
||||||
#
|
|
||||||
# For branches, it's just the git branch tip hash.
|
|
||||||
|
|
||||||
# For pull requests, refs/pull/<PR NUM>/merge was just checked out
|
|
||||||
# by so HEAD will refer to the right revision. For branches, HEAD
|
|
||||||
# is also the tip of the branch.
|
|
||||||
REV=$(git rev-parse HEAD)
|
|
||||||
|
|
||||||
# We can get the PR number from the "context".
|
|
||||||
#
|
|
||||||
# https://docs.github.com/en/free-pro-team@latest/developers/webhooks-and-events/webhook-events-and-payloads#pull_request
|
|
||||||
#
|
|
||||||
# (via <https://github.community/t/github-ref-is-inconsistent/17728/3>).
|
|
||||||
#
|
|
||||||
# If this is a pull request, `github.event` is a `pull_request`
|
|
||||||
# structure which has `number` right in it.
|
|
||||||
#
|
|
||||||
# If this is a push, `github.event` is a `push` instead but we only
|
|
||||||
# need the revision to construct the build_num.
|
|
||||||
|
|
||||||
PR=${{ github.event.number }}
|
|
||||||
|
|
||||||
if [ "${PR}" = "" ]; then
|
|
||||||
BUILD_NUM=$REV
|
|
||||||
else
|
|
||||||
BUILD_NUM=$REV-PR-$PR
|
|
||||||
fi
|
|
||||||
REPO_NAME=$GITHUB_REPOSITORY
|
|
||||||
|
|
||||||
curl \
|
|
||||||
-k \
|
|
||||||
https://coveralls.io/webhook?repo_token=$COVERALLS_REPO_TOKEN \
|
|
||||||
-d \
|
|
||||||
"payload[build_num]=$BUILD_NUM&payload[status]=done&payload[repo_name]=$REPO_NAME"
|
|
||||||
env:
|
env:
|
||||||
# Some magic value required for some magic reason.
|
# Some magic value required for some magic reason.
|
||||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||||
# Help coveralls identify our project.
|
|
||||||
COVERALLS_REPO_TOKEN: "JPf16rLB7T2yjgATIxFzTsEgMdN1UNq6o"
|
|
||||||
|
|
||||||
integration:
|
integration:
|
||||||
runs-on: ${{ matrix.os }}
|
runs-on: ${{ matrix.os }}
|
||||||
@ -194,29 +155,34 @@ jobs:
|
|||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
os:
|
os:
|
||||||
- macos-latest
|
|
||||||
- windows-latest
|
- windows-latest
|
||||||
|
- ubuntu-latest
|
||||||
python-version:
|
python-version:
|
||||||
- 2.7
|
- 2.7
|
||||||
|
- 3.6
|
||||||
|
- 3.9
|
||||||
|
include:
|
||||||
|
# On macOS don't bother with 3.6, just to get faster builds.
|
||||||
|
- os: macos-latest
|
||||||
|
python-version: 2.7
|
||||||
|
- os: macos-latest
|
||||||
|
python-version: 3.9
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
# Get vcpython27 for Windows + Python 2.7, to build netifaces
|
|
||||||
# extension. See https://chocolatey.org/packages/vcpython27 and
|
|
||||||
# https://github.com/crazy-max/ghaction-chocolatey
|
|
||||||
- name: Install MSVC 9.0 for Python 2.7 [Windows]
|
|
||||||
if: matrix.os == 'windows-latest' && matrix.python-version == '2.7'
|
|
||||||
uses: crazy-max/ghaction-chocolatey@v1
|
|
||||||
with:
|
|
||||||
args: install vcpython27
|
|
||||||
|
|
||||||
- name: Install Tor [Ubuntu]
|
- name: Install Tor [Ubuntu]
|
||||||
if: matrix.os == 'ubuntu-latest'
|
if: matrix.os == 'ubuntu-latest'
|
||||||
run: sudo apt install tor
|
run: sudo apt install tor
|
||||||
|
|
||||||
- name: Install Tor [macOS]
|
# TODO: See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3744.
|
||||||
if: matrix.os == 'macos-latest'
|
# We have to use an older version of Tor for running integration
|
||||||
run: brew install tor
|
# tests on macOS.
|
||||||
|
- name: Install Tor [macOS, ${{ matrix.python-version }} ]
|
||||||
|
if: ${{ matrix.os == 'macos-latest' }}
|
||||||
|
run: |
|
||||||
|
brew extract --version 0.4.5.8 tor homebrew/cask
|
||||||
|
brew install tor@0.4.5.8
|
||||||
|
brew link --overwrite tor@0.4.5.8
|
||||||
|
|
||||||
- name: Install Tor [Windows]
|
- name: Install Tor [Windows]
|
||||||
if: matrix.os == 'windows-latest'
|
if: matrix.os == 'windows-latest'
|
||||||
@ -255,9 +221,14 @@ jobs:
|
|||||||
- name: Display tool versions
|
- name: Display tool versions
|
||||||
run: python misc/build_helpers/show-tool-versions.py
|
run: python misc/build_helpers/show-tool-versions.py
|
||||||
|
|
||||||
- name: Run "tox -e integration"
|
- name: Run "Python 2 integration tests"
|
||||||
|
if: ${{ matrix.python-version == '2.7' }}
|
||||||
run: tox -e integration
|
run: tox -e integration
|
||||||
|
|
||||||
|
- name: Run "Python 3 integration tests"
|
||||||
|
if: ${{ matrix.python-version != '2.7' }}
|
||||||
|
run: tox -e integration3
|
||||||
|
|
||||||
- name: Upload eliot.log in case of failure
|
- name: Upload eliot.log in case of failure
|
||||||
uses: actions/upload-artifact@v1
|
uses: actions/upload-artifact@v1
|
||||||
if: failure()
|
if: failure()
|
||||||
@ -279,15 +250,6 @@ jobs:
|
|||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
|
||||||
# Get vcpython27 for Windows + Python 2.7, to build netifaces
|
|
||||||
# extension. See https://chocolatey.org/packages/vcpython27 and
|
|
||||||
# https://github.com/crazy-max/ghaction-chocolatey
|
|
||||||
- name: Install MSVC 9.0 for Python 2.7 [Windows]
|
|
||||||
if: matrix.os == 'windows-latest' && matrix.python-version == '2.7'
|
|
||||||
uses: crazy-max/ghaction-chocolatey@v1
|
|
||||||
with:
|
|
||||||
args: install vcpython27
|
|
||||||
|
|
||||||
- name: Check out Tahoe-LAFS sources
|
- name: Check out Tahoe-LAFS sources
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
|
22
.lgtm.yml
22
.lgtm.yml
@ -1,22 +0,0 @@
|
|||||||
extraction:
|
|
||||||
python:
|
|
||||||
after_prepare:
|
|
||||||
- |
|
|
||||||
# https://discuss.lgtm.com/t/determination-of-python-requirements/974/4
|
|
||||||
sed -i 's/\("pyOpenSSL\)/\# Dependency removed for lgtm (see .lgtm.yml): \1/g' src/allmydata/_auto_deps.py
|
|
||||||
|
|
||||||
queries:
|
|
||||||
# This generates spurious errors for calls by interface because of the
|
|
||||||
# zope.interface choice to exclude self from method signatures. So, turn it
|
|
||||||
# off.
|
|
||||||
- exclude: "py/call/wrong-arguments"
|
|
||||||
|
|
||||||
# The premise of this query is broken. The errors it produces are nonsense.
|
|
||||||
# There is no such thing as a "procedure" in Python and "None" is not
|
|
||||||
# meaningless.
|
|
||||||
- exclude: "py/procedure-return-value-used"
|
|
||||||
|
|
||||||
# It is true that this query identifies things which are sometimes mistakes.
|
|
||||||
# However, it also identifies things which are entirely valid. Therefore,
|
|
||||||
# it produces noisy results.
|
|
||||||
- exclude: "py/implicit-string-concatenation-in-list"
|
|
42
CONTRIBUTORS.rst
Normal file
42
CONTRIBUTORS.rst
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
Contributor Checklist
|
||||||
|
=====================
|
||||||
|
|
||||||
|
|
||||||
|
* Create a ``Trac`` ticket, fill it out and assign it to yourself (contact exarkun if you don't have an account):
|
||||||
|
|
||||||
|
``https://tahoe-lafs.org/trac/tahoe-lafs/newticket``
|
||||||
|
|
||||||
|
* Use the ticket number to name your branch (example):
|
||||||
|
|
||||||
|
``3003.contributor-guide``
|
||||||
|
|
||||||
|
* Good idea to add tests at the same time you write your code.
|
||||||
|
|
||||||
|
* Add a file to the ``/newsfragments`` folder, named with the ticket number and the type of patch (example):
|
||||||
|
|
||||||
|
``newsfragments/3651.minor``
|
||||||
|
|
||||||
|
* ``towncrier`` recognizes the following types:
|
||||||
|
|
||||||
|
``incompat``, ``feature``, ``bugfix``, ``installation``, ``configuration``, ``documentation``, ``removed``, ``other``, ``minor``
|
||||||
|
* Add one sentence to ``newsfragments/<ticket-number>.<towncrier-type>`` describing the change (example):
|
||||||
|
|
||||||
|
``The integration test suite has been updated to use pytest-twisted instead of deprecated pytest APIs.``
|
||||||
|
|
||||||
|
* Run the test suite with ``tox``, ``tox -e codechecks`` and ``tox -e typechecks``
|
||||||
|
|
||||||
|
* Push your branch to Github with your ticket number in the merge commit message (example):
|
||||||
|
|
||||||
|
``Fixes ticket:3003``
|
||||||
|
|
||||||
|
This makes the ``Trac`` ticket close when your PR gets approved.
|
||||||
|
|
||||||
|
* Request appropriate review - we suggest asking `Tahoe Committers <https://github.com/orgs/tahoe-lafs/teams/tahoe-committers>`__
|
||||||
|
|
||||||
|
References
|
||||||
|
----------
|
||||||
|
|
||||||
|
This checklist is a summary of `this page on contributing Patches <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Patches>`__
|
||||||
|
|
||||||
|
Before authoring or reviewing a patch, please familiarize yourself with the `Coding Standard <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CodingStandards>`__
|
||||||
|
and the `Contributor Code of Conduct <docs/CODE_OF_CONDUCT.md>`__.
|
28
CREDITS
28
CREDITS
@ -204,6 +204,34 @@ E: meejah@meejah.ca
|
|||||||
P: 0xC2602803128069A7, 9D5A 2BD5 688E CB88 9DEB CD3F C260 2803 1280 69A7
|
P: 0xC2602803128069A7, 9D5A 2BD5 688E CB88 9DEB CD3F C260 2803 1280 69A7
|
||||||
D: various bug-fixes and features
|
D: various bug-fixes and features
|
||||||
|
|
||||||
|
N: Chad Whitacre
|
||||||
|
E: chad@zetaweb.com
|
||||||
|
D: Python3 porting
|
||||||
|
|
||||||
|
N: Itamar Turner-Trauring
|
||||||
|
E: itamar@pythonspeed.com
|
||||||
|
D: Python3 porting
|
||||||
|
|
||||||
|
N: Jason R. Coombs
|
||||||
|
E: jaraco@jaraco.com
|
||||||
|
D: Python3 porting
|
||||||
|
|
||||||
|
N: Maciej Fijalkowski
|
||||||
|
E: fijall@gmail.com
|
||||||
|
D: Python3 porting
|
||||||
|
|
||||||
|
N: Ross Patterson
|
||||||
|
E: me@rpatterson.net
|
||||||
|
D: Python3 porting
|
||||||
|
|
||||||
|
N: Sajith Sasidharan
|
||||||
|
E: sajith@hcoop.net
|
||||||
|
D: Python3 porting
|
||||||
|
|
||||||
|
N: Pete Fein
|
||||||
|
E: pete@snake.dev
|
||||||
|
D: Python3 porting
|
||||||
|
|
||||||
N: Viktoriia Savchuk
|
N: Viktoriia Savchuk
|
||||||
W: https://twitter.com/viktoriiasvchk
|
W: https://twitter.com/viktoriiasvchk
|
||||||
D: Developer community focused improvements on the README file.
|
D: Developer community focused improvements on the README file.
|
||||||
|
15
README.rst
15
README.rst
@ -6,7 +6,7 @@ Free and Open decentralized data store
|
|||||||
|
|
||||||
`Tahoe-LAFS <https://www.tahoe-lafs.org>`__ (Tahoe Least-Authority File Store) is the first free software / open-source storage technology that distributes your data across multiple servers. Even if some servers fail or are taken over by an attacker, the entire file store continues to function correctly, preserving your privacy and security.
|
`Tahoe-LAFS <https://www.tahoe-lafs.org>`__ (Tahoe Least-Authority File Store) is the first free software / open-source storage technology that distributes your data across multiple servers. Even if some servers fail or are taken over by an attacker, the entire file store continues to function correctly, preserving your privacy and security.
|
||||||
|
|
||||||
|Contributor Covenant| |readthedocs| |travis| |circleci| |coveralls|
|
|Contributor Covenant| |readthedocs| |circleci| |githubactions| |coveralls|
|
||||||
|
|
||||||
|
|
||||||
Table of contents
|
Table of contents
|
||||||
@ -72,7 +72,7 @@ You can find the full Tahoe-LAFS documentation at our `documentation site <http:
|
|||||||
|
|
||||||
Get involved with the Tahoe-LAFS community:
|
Get involved with the Tahoe-LAFS community:
|
||||||
|
|
||||||
- Chat with Tahoe-LAFS developers at #tahoe-lafs chat on irc.freenode.net or `Slack <https://join.slack.com/t/tahoe-lafs/shared_invite/zt-jqfj12r5-ZZ5z3RvHnubKVADpP~JINQ>`__.
|
- Chat with Tahoe-LAFS developers at ``#tahoe-lafs`` channel on `libera.chat <https://libera.chat/>`__ IRC network or `Slack <https://join.slack.com/t/tahoe-lafs/shared_invite/zt-jqfj12r5-ZZ5z3RvHnubKVADpP~JINQ>`__.
|
||||||
|
|
||||||
- Join our `weekly conference calls <https://www.tahoe-lafs.org/trac/tahoe-lafs/wiki/WeeklyMeeting>`__ with core developers and interested community members.
|
- Join our `weekly conference calls <https://www.tahoe-lafs.org/trac/tahoe-lafs/wiki/WeeklyMeeting>`__ with core developers and interested community members.
|
||||||
|
|
||||||
@ -93,6 +93,10 @@ As a community-driven open source project, Tahoe-LAFS welcomes contributions of
|
|||||||
|
|
||||||
Before authoring or reviewing a patch, please familiarize yourself with the `Coding Standard <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CodingStandards>`__ and the `Contributor Code of Conduct <docs/CODE_OF_CONDUCT.md>`__.
|
Before authoring or reviewing a patch, please familiarize yourself with the `Coding Standard <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CodingStandards>`__ and the `Contributor Code of Conduct <docs/CODE_OF_CONDUCT.md>`__.
|
||||||
|
|
||||||
|
🤝 Supporters
|
||||||
|
--------------
|
||||||
|
|
||||||
|
We would like to thank `Fosshost <https://fosshost.org>`__ for supporting us with hosting services. If your open source project needs help, you can apply for their support.
|
||||||
|
|
||||||
❓ FAQ
|
❓ FAQ
|
||||||
------
|
------
|
||||||
@ -118,13 +122,12 @@ See `TGPPL.PDF <https://tahoe-lafs.org/~zooko/tgppl.pdf>`__ for why the TGPPL ex
|
|||||||
:alt: documentation status
|
:alt: documentation status
|
||||||
:target: http://tahoe-lafs.readthedocs.io/en/latest/?badge=latest
|
:target: http://tahoe-lafs.readthedocs.io/en/latest/?badge=latest
|
||||||
|
|
||||||
.. |travis| image:: https://travis-ci.org/tahoe-lafs/tahoe-lafs.png?branch=master
|
|
||||||
:alt: build status
|
|
||||||
:target: https://travis-ci.org/tahoe-lafs/tahoe-lafs
|
|
||||||
|
|
||||||
.. |circleci| image:: https://circleci.com/gh/tahoe-lafs/tahoe-lafs.svg?style=svg
|
.. |circleci| image:: https://circleci.com/gh/tahoe-lafs/tahoe-lafs.svg?style=svg
|
||||||
:target: https://circleci.com/gh/tahoe-lafs/tahoe-lafs
|
:target: https://circleci.com/gh/tahoe-lafs/tahoe-lafs
|
||||||
|
|
||||||
|
.. |githubactions| image:: https://github.com/tahoe-lafs/tahoe-lafs/actions/workflows/ci.yml/badge.svg
|
||||||
|
:target: https://github.com/tahoe-lafs/tahoe-lafs/actions
|
||||||
|
|
||||||
.. |coveralls| image:: https://coveralls.io/repos/github/tahoe-lafs/tahoe-lafs/badge.svg
|
.. |coveralls| image:: https://coveralls.io/repos/github/tahoe-lafs/tahoe-lafs/badge.svg
|
||||||
:alt: code coverage
|
:alt: code coverage
|
||||||
:target: https://coveralls.io/github/tahoe-lafs/tahoe-lafs
|
:target: https://coveralls.io/github/tahoe-lafs/tahoe-lafs
|
||||||
|
@ -45,6 +45,7 @@ The following community members have made themselves available for conduct issue
|
|||||||
|
|
||||||
- Jean-Paul Calderone (jean-paul at leastauthority dot com)
|
- Jean-Paul Calderone (jean-paul at leastauthority dot com)
|
||||||
- meejah (meejah at meejah dot ca)
|
- meejah (meejah at meejah dot ca)
|
||||||
|
- May-Lee Sia(she/her) (tahoe dot lafs dot community at gmail dot com)
|
||||||
|
|
||||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
version 1.3.0, available at
|
version 1.3.0, available at
|
||||||
|
343
docs/INSTALL.rst
343
docs/INSTALL.rst
@ -1,343 +0,0 @@
|
|||||||
.. -*- coding: utf-8-with-signature-unix; fill-column: 77 -*-
|
|
||||||
|
|
||||||
..
|
|
||||||
note: if you aren't reading the rendered form of these docs at
|
|
||||||
http://tahoe-lafs.readthedocs.io/en/latest/ , then be aware that any
|
|
||||||
":doc:" links refer to other files in this docs/ directory
|
|
||||||
|
|
||||||
*********************
|
|
||||||
Installing Tahoe-LAFS
|
|
||||||
*********************
|
|
||||||
|
|
||||||
Welcome to `the Tahoe-LAFS project`_, a secure, decentralized, fault-tolerant
|
|
||||||
storage system. See :doc:`about` for an overview of the architecture and
|
|
||||||
security properties of the system.
|
|
||||||
|
|
||||||
This procedure should work on Windows, Mac, illumos (previously OpenSolaris),
|
|
||||||
and too many flavors of Linux and of BSD to list.
|
|
||||||
|
|
||||||
.. _the Tahoe-LAFS project: https://tahoe-lafs.org
|
|
||||||
|
|
||||||
First: In Case Of Trouble
|
|
||||||
=========================
|
|
||||||
|
|
||||||
In some cases these instructions may fail due to peculiarities of your
|
|
||||||
platform.
|
|
||||||
|
|
||||||
If the following instructions don't Just Work without any further effort on
|
|
||||||
your part, then please write to `the tahoe-dev mailing list`_ where friendly
|
|
||||||
hackers will help you out.
|
|
||||||
|
|
||||||
.. _the tahoe-dev mailing list: https://tahoe-lafs.org/cgi-bin/mailman/listinfo/tahoe-dev
|
|
||||||
|
|
||||||
Pre-Packaged Versions
|
|
||||||
=====================
|
|
||||||
|
|
||||||
You may not need to build Tahoe at all.
|
|
||||||
|
|
||||||
If you are on Windows, please see :doc:`windows` for platform-specific
|
|
||||||
instructions.
|
|
||||||
|
|
||||||
If you are on a Mac, you can either follow these instructions, or use the
|
|
||||||
pre-packaged bundle described in :doc:`OS-X`.
|
|
||||||
|
|
||||||
Many Linux distributions include Tahoe-LAFS packages. Debian and Ubuntu users
|
|
||||||
can ``apt-get install tahoe-lafs``. See `OSPackages`_ for other
|
|
||||||
platforms.
|
|
||||||
|
|
||||||
.. _OSPackages: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/OSPackages
|
|
||||||
|
|
||||||
|
|
||||||
Preliminaries
|
|
||||||
=============
|
|
||||||
|
|
||||||
If you don't use a pre-packaged copy of Tahoe, you can build it yourself.
|
|
||||||
You'll need Python2.7, pip, and virtualenv.
|
|
||||||
Tahoe-LAFS depends on some libraries which require a C compiler to build.
|
|
||||||
However, for many platforms, PyPI hosts already-built packages of libraries.
|
|
||||||
|
|
||||||
If there is no already-built package for your platform,
|
|
||||||
you will need a C compiler,
|
|
||||||
the Python development headers,
|
|
||||||
and some libraries (libffi-dev and libssl-dev).
|
|
||||||
|
|
||||||
On a modern Debian/Ubuntu-derived distribution, this command will get you
|
|
||||||
everything you need::
|
|
||||||
|
|
||||||
apt-get install build-essential python-dev libffi-dev libssl-dev libyaml-dev python-virtualenv
|
|
||||||
|
|
||||||
On OS-X, install pip and virtualenv as described below. If you want to
|
|
||||||
compile the dependencies yourself, you'll also need to install
|
|
||||||
Xcode and its command-line tools.
|
|
||||||
|
|
||||||
**Note** that Tahoe-LAFS depends on `openssl 1.1.1c` or greater.
|
|
||||||
|
|
||||||
Python 2.7
|
|
||||||
----------
|
|
||||||
|
|
||||||
Check if you already have an adequate version of Python installed by running
|
|
||||||
``python -V``. The latest version of Python v2.7 is recommended, which is
|
|
||||||
2.7.11 as of this writing. Python v2.6.x and v3 do not work. On Windows, we
|
|
||||||
recommend the use of native Python v2.7, not Cygwin Python. If you don't have
|
|
||||||
one of these versions of Python installed, `download`_ and install the latest
|
|
||||||
version of Python v2.7. Make sure that the path to the installation directory
|
|
||||||
has no spaces in it (e.g. on Windows, do not install Python in the "Program
|
|
||||||
Files" directory)::
|
|
||||||
|
|
||||||
% python --version
|
|
||||||
Python 2.7.11
|
|
||||||
|
|
||||||
.. _download: https://www.python.org/downloads/
|
|
||||||
|
|
||||||
pip
|
|
||||||
---
|
|
||||||
|
|
||||||
Many Python installations already include ``pip``, but in case yours does
|
|
||||||
not, get it with the `pip install instructions`_::
|
|
||||||
|
|
||||||
% pip --version
|
|
||||||
pip 10.0.1 from ... (python 2.7)
|
|
||||||
|
|
||||||
.. _pip install instructions: https://pip.pypa.io/en/stable/installing/
|
|
||||||
|
|
||||||
virtualenv
|
|
||||||
----------
|
|
||||||
|
|
||||||
If you do not have an OS-provided copy of ``virtualenv``, install it with the
|
|
||||||
instructions from the `virtualenv documentation`_::
|
|
||||||
|
|
||||||
|
|
||||||
% virtualenv --version
|
|
||||||
15.1.0
|
|
||||||
|
|
||||||
.. _virtualenv documentation: https://virtualenv.pypa.io/en/latest/installation.html
|
|
||||||
|
|
||||||
C compiler and libraries
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
Except on OS-X, where the Tahoe project hosts pre-compiled wheels for all
|
|
||||||
dependencies, you will need several C libraries installed before you can
|
|
||||||
build. You will also need the Python development headers, and a C compiler
|
|
||||||
(your python installation should know how to find these).
|
|
||||||
|
|
||||||
On Debian/Ubuntu-derived systems, the necessary packages are ``python-dev``,
|
|
||||||
``libffi-dev``, and ``libssl-dev``, and can be installed with ``apt-get``. On
|
|
||||||
RPM-based system (like Fedora) these may be named ``python-devel``, etc,
|
|
||||||
instead, and cam be installed with ``yum`` or ``rpm``.
|
|
||||||
|
|
||||||
**Note** that Tahoe-LAFS depends on `openssl 1.1.1c` or greater.
|
|
||||||
|
|
||||||
|
|
||||||
Install the Latest Tahoe-LAFS Release
|
|
||||||
=====================================
|
|
||||||
|
|
||||||
We recommend creating a fresh virtualenv for your Tahoe-LAFS install, to
|
|
||||||
isolate it from any python packages that are already installed (and to
|
|
||||||
isolate the rest of your system from Tahoe's dependencies).
|
|
||||||
|
|
||||||
This example uses a virtualenv named ``venv``, but you can call it anything
|
|
||||||
you like. Many people prefer to keep all their virtualenvs in one place, like
|
|
||||||
``~/.local/venvs/`` or ``~/venvs/``.
|
|
||||||
|
|
||||||
It's usually a good idea to upgrade the virtualenv's ``pip`` and
|
|
||||||
``setuptools`` to their latest versions, with ``venv/bin/pip install -U pip
|
|
||||||
setuptools``. Many operating systems have an older version of ``virtualenv``,
|
|
||||||
which then includes older versions of pip and setuptools. Upgrading is easy,
|
|
||||||
and only affects the virtualenv: not the rest of your computer.
|
|
||||||
|
|
||||||
Then use the virtualenv's ``pip`` to install the latest Tahoe-LAFS release
|
|
||||||
from PyPI with ``venv/bin/pip install tahoe-lafs``. After installation, run
|
|
||||||
``venv/bin/tahoe --version`` to confirm the install was successful::
|
|
||||||
|
|
||||||
% virtualenv venv
|
|
||||||
New python executable in ~/venv/bin/python2.7
|
|
||||||
Installing setuptools, pip, wheel...done.
|
|
||||||
|
|
||||||
% venv/bin/pip install -U pip setuptools
|
|
||||||
Downloading/unpacking pip from https://pypi.python.org/...
|
|
||||||
...
|
|
||||||
Successfully installed pip setuptools
|
|
||||||
|
|
||||||
% venv/bin/pip install tahoe-lafs
|
|
||||||
Collecting tahoe-lafs
|
|
||||||
...
|
|
||||||
Installing collected packages: ...
|
|
||||||
Successfully installed ...
|
|
||||||
|
|
||||||
% venv/bin/tahoe --version
|
|
||||||
tahoe-lafs: 1.14.0
|
|
||||||
foolscap: ...
|
|
||||||
|
|
||||||
%
|
|
||||||
|
|
||||||
Install From a Source Tarball
|
|
||||||
-----------------------------
|
|
||||||
|
|
||||||
You can also install directly from the source tarball URL. To verify
|
|
||||||
signatures, first see verifying_signatures_ and replace the URL in the
|
|
||||||
following instructions with the local filename.
|
|
||||||
|
|
||||||
% virtualenv venv
|
|
||||||
New python executable in ~/venv/bin/python2.7
|
|
||||||
Installing setuptools, pip, wheel...done.
|
|
||||||
|
|
||||||
% venv/bin/pip install https://tahoe-lafs.org/downloads/tahoe-lafs-1.14.0.tar.bz2
|
|
||||||
Collecting https://tahoe-lafs.org/downloads/tahoe-lafs-1.14.0.tar.bz2
|
|
||||||
...
|
|
||||||
Installing collected packages: ...
|
|
||||||
Successfully installed ...
|
|
||||||
|
|
||||||
% venv/bin/tahoe --version
|
|
||||||
tahoe-lafs: 1.14.0
|
|
||||||
...
|
|
||||||
|
|
||||||
.. _verifying_signatures:
|
|
||||||
|
|
||||||
Verifying Signatures
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
First download the source tarball and then any signatures. There are several
|
|
||||||
developers who are able to produce signatures for a release. A release may
|
|
||||||
have multiple signatures. All should be valid and you should confirm at least
|
|
||||||
one of them (ideally, confirm all).
|
|
||||||
|
|
||||||
This statement, signed by the existing Tahoe release-signing key, attests to
|
|
||||||
those developers authorized to sign a Tahoe release:
|
|
||||||
|
|
||||||
.. include:: developer-release-signatures
|
|
||||||
:code:
|
|
||||||
|
|
||||||
Signatures are made available beside the release. So for example, a release
|
|
||||||
like ``https://tahoe-lafs.org/downloads/tahoe-lafs-1.16.0.tar.bz2`` might
|
|
||||||
have signatures ``tahoe-lafs-1.16.0.tar.bz2.meejah.asc`` and
|
|
||||||
``tahoe-lafs-1.16.0.tar.bz2.warner.asc``.
|
|
||||||
|
|
||||||
To verify the signatures using GnuPG::
|
|
||||||
|
|
||||||
% gpg --verify tahoe-lafs-1.16.0.tar.bz2.meejah.asc tahoe-lafs-1.16.0.tar.bz2
|
|
||||||
gpg: Signature made XXX
|
|
||||||
gpg: using RSA key 9D5A2BD5688ECB889DEBCD3FC2602803128069A7
|
|
||||||
gpg: Good signature from "meejah <meejah@meejah.ca>" [full]
|
|
||||||
% gpg --verify tahoe-lafs-1.16.0.tar.bz2.warner.asc tahoe-lafs-1.16.0.tar.bz2
|
|
||||||
gpg: Signature made XXX
|
|
||||||
gpg: using RSA key 967EFE06699872411A77DF36D43B4C9C73225AAF
|
|
||||||
gpg: Good signature from "Brian Warner <warner@lothar.com>" [full]
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Extras
|
|
||||||
------
|
|
||||||
|
|
||||||
Tahoe-LAFS provides some functionality only when explicitly requested at installation time.
|
|
||||||
It does this using the "extras" feature of setuptools.
|
|
||||||
You can request these extra features when running the ``pip install`` command like this::
|
|
||||||
|
|
||||||
% venv/bin/pip install tahoe-lafs[tor]
|
|
||||||
|
|
||||||
This example enables support for listening and connecting using Tor.
|
|
||||||
The Tahoe-LAFS documentation for specific features which require an explicit install-time step will mention the "extra" that must be requested.
|
|
||||||
|
|
||||||
Hacking On Tahoe-LAFS
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
To modify the Tahoe source code, you should get a git checkout, and install
|
|
||||||
with the ``--editable`` flag. You should also use the ``[test]`` extra to get
|
|
||||||
the additional libraries needed to run the unit tests::
|
|
||||||
|
|
||||||
% git clone https://github.com/tahoe-lafs/tahoe-lafs.git
|
|
||||||
|
|
||||||
% cd tahoe-lafs
|
|
||||||
|
|
||||||
% virtualenv venv
|
|
||||||
|
|
||||||
% venv/bin/pip install --editable .[test]
|
|
||||||
Obtaining file::~/tahoe-lafs
|
|
||||||
...
|
|
||||||
Successfully installed ...
|
|
||||||
|
|
||||||
% venv/bin/tahoe --version
|
|
||||||
tahoe-lafs: 1.14.0.post34.dev0
|
|
||||||
...
|
|
||||||
|
|
||||||
This way, you won't have to re-run the ``pip install`` step each time you
|
|
||||||
modify the source code.
|
|
||||||
|
|
||||||
Running the ``tahoe`` executable
|
|
||||||
================================
|
|
||||||
|
|
||||||
The rest of the Tahoe-LAFS documentation assumes that you can run the
|
|
||||||
``tahoe`` executable that you just created. You have four basic options:
|
|
||||||
|
|
||||||
* Use the full path each time (e.g. ``~/venv/bin/tahoe``).
|
|
||||||
* "`Activate`_" the virtualenv with ``. venv/bin/activate``, to get a
|
|
||||||
subshell with a ``$PATH`` that includes the ``venv/bin/`` directory, then
|
|
||||||
you can just run ``tahoe``.
|
|
||||||
* Change your ``$PATH`` to include the ``venv/bin/`` directory, so you can
|
|
||||||
just run ``tahoe``.
|
|
||||||
* Symlink from ``~/bin/tahoe`` to the ``tahoe`` executable. Since ``~/bin``
|
|
||||||
is typically in your ``$PATH`` (at least if it exists when you log in),
|
|
||||||
this will let you just run ``tahoe``.
|
|
||||||
|
|
||||||
You might also find the `pipsi`_ tool convenient: ``pipsi install
|
|
||||||
tahoe-lafs`` will create a new virtualenv, install tahoe into it, then
|
|
||||||
symlink just the executable (into ``~/.local/bin/tahoe``). Then either add
|
|
||||||
``~/.local/bin/`` to your ``$PATH``, or make one last symlink into
|
|
||||||
``~/bin/tahoe``.
|
|
||||||
|
|
||||||
.. _Activate: https://virtualenv.pypa.io/en/latest/userguide.html#activate-script
|
|
||||||
.. _pipsi: https://pypi.python.org/pypi/pipsi/0.9
|
|
||||||
|
|
||||||
Running the Self-Tests
|
|
||||||
======================
|
|
||||||
|
|
||||||
To run the self-tests from a source tree, you'll need ``tox`` installed. On a
|
|
||||||
Debian/Ubuntu system, use ``apt-get install tox``. You can also install it
|
|
||||||
into your tahoe-specific virtualenv with ``pip install tox``.
|
|
||||||
|
|
||||||
Then just run ``tox``. This will create a new fresh virtualenv, install Tahoe
|
|
||||||
(from the source tree, including any changes you have made) and all its
|
|
||||||
dependencies (including testing-only dependencies) into the virtualenv, then
|
|
||||||
run the unit tests. This ensures that the tests are repeatable and match the
|
|
||||||
results of other users, unaffected by any other Python packages installed on
|
|
||||||
your machine. On a modern computer this will take 5-10 minutes, and should
|
|
||||||
result in a "all tests passed" mesage::
|
|
||||||
|
|
||||||
% tox
|
|
||||||
GLOB sdist-make: ~/tahoe-lafs/setup.py
|
|
||||||
py27 recreate: ~/tahoe-lafs/.tox/py27
|
|
||||||
py27 inst: ~/tahoe-lafs/.tox/dist/tahoe-lafs-1.14.0.post8.dev0.zip
|
|
||||||
py27 runtests: commands[0] | tahoe --version
|
|
||||||
py27 runtests: commands[1] | trial --rterrors allmydata
|
|
||||||
allmydata.test.test_auth
|
|
||||||
AccountFileCheckerKeyTests
|
|
||||||
test_authenticated ... [OK]
|
|
||||||
test_missing_signature ... [OK]
|
|
||||||
...
|
|
||||||
Ran 1186 tests in 423.179s
|
|
||||||
|
|
||||||
PASSED (skips=7, expectedFailures=3, successes=1176)
|
|
||||||
__________________________ summary ___________________________________
|
|
||||||
py27: commands succeeded
|
|
||||||
congratulations :)
|
|
||||||
|
|
||||||
Common Problems
|
|
||||||
===============
|
|
||||||
|
|
||||||
If you see an error like ``fatal error: Python.h: No such file or directory``
|
|
||||||
while compiling the dependencies, you need the Python development headers. If
|
|
||||||
you are on a Debian or Ubuntu system, you can install them with ``sudo
|
|
||||||
apt-get install python-dev``. On RedHat/Fedora, install ``python-devel``.
|
|
||||||
|
|
||||||
Similar errors about ``openssl/crypto.h`` indicate that you are missing the
|
|
||||||
OpenSSL development headers (``libssl-dev``). Likewise ``ffi.h`` means you
|
|
||||||
need ``libffi-dev``.
|
|
||||||
|
|
||||||
**Note** that Tahoe-LAFS depends on `openssl 1.1.1c` or greater.
|
|
||||||
|
|
||||||
|
|
||||||
Using Tahoe-LAFS
|
|
||||||
================
|
|
||||||
|
|
||||||
Now you are ready to deploy a decentralized filesystem. You will use the
|
|
||||||
``tahoe`` executable to create, configure, and launch your Tahoe-LAFS nodes.
|
|
||||||
See :doc:`running` for instructions on how to do that.
|
|
@ -1,6 +1,6 @@
|
|||||||
******************************************
|
***************************************
|
||||||
How To Build Tahoe-LAFS On A Desert Island
|
Building Tahoe-LAFS On A Desert Island
|
||||||
******************************************
|
***************************************
|
||||||
|
|
||||||
(or an airplane, or anywhere else without internet connectivity)
|
(or an airplane, or anywhere else without internet connectivity)
|
||||||
|
|
75
docs/Installation/install-on-linux.rst
Normal file
75
docs/Installation/install-on-linux.rst
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
****************************
|
||||||
|
Building Tahoe-LAFS on Linux
|
||||||
|
****************************
|
||||||
|
|
||||||
|
Tahoe-LAFS has made packages available for installing on many linux and BSD distributions.
|
||||||
|
Debian and Ubuntu users can use ``apt-get install tahoe-lafs``.
|
||||||
|
If you are working on a Linux distribution which does not have Tahoe-LAFS or are looking to hack on the source code, you can build Tahoe-LAFS yourself:
|
||||||
|
|
||||||
|
Prerequisites
|
||||||
|
=============
|
||||||
|
|
||||||
|
Make sure the following are installed:
|
||||||
|
|
||||||
|
* **Python 3's latest version**: Check for the version by running ``python --version``.
|
||||||
|
* **pip**: Most python installations already include ``pip``. However, if your installation does not, see `pip installation <https://pip.pypa.io/en/stable/installing/>`_.
|
||||||
|
* **virtualenv**: Use ``pip`` to install virtualenv::
|
||||||
|
|
||||||
|
pip install --user virtualenv
|
||||||
|
|
||||||
|
* **C compiler and libraries**:
|
||||||
|
|
||||||
|
* ``python-dev``: Python development headers.
|
||||||
|
* ``libffi-dev``: Foreign Functions Interface library.
|
||||||
|
* ``libssl-dev``: SSL library, Tahoe-LAFS needs OpenSSL version 1.1.1c or greater.
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
If you are working on Debian or Ubuntu, you can install the necessary libraries using ``apt-get``::
|
||||||
|
|
||||||
|
apt-get install python-dev libffi-dev libssl-dev
|
||||||
|
|
||||||
|
On an RPM-based system such as Fedora, you can install the necessary libraries using ``yum`` or ``rpm``. However, the packages may be named differently.
|
||||||
|
|
||||||
|
Install the Latest Tahoe-LAFS Release
|
||||||
|
=====================================
|
||||||
|
|
||||||
|
If you are looking to hack on the source code or run pre-release code, we recommend you install Tahoe-LAFS directly from source by creating a ``virtualenv`` instance:
|
||||||
|
|
||||||
|
1. Clone the Tahoe-LAFS repository::
|
||||||
|
|
||||||
|
git clone https://github.com/tahoe-lafs/tahoe-lafs.git
|
||||||
|
|
||||||
|
2. Move into the tahoe-lafs directory::
|
||||||
|
|
||||||
|
cd tahoe-lafs
|
||||||
|
|
||||||
|
3. Create a fresh virtualenv for your Tahoe-LAFS install::
|
||||||
|
|
||||||
|
virtualenv venv
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
venv is the name of the virtual environment in this example. Use any name for your environment.
|
||||||
|
|
||||||
|
4. Upgrade ``pip`` and ``setuptools`` on the newly created virtual environment::
|
||||||
|
|
||||||
|
venv/bin/pip install -U pip setuptools
|
||||||
|
|
||||||
|
5. If you'd like to modify the Tahoe source code, you need to install Tahoe-LAFS with the ``--editable`` flag with the ``test`` extra::
|
||||||
|
|
||||||
|
venv/bin/pip install --editable .[test]
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
Tahoe-LAFS provides extra functionality when requested explicitly at installation using the "extras" feature of setuptools. To learn more about the extras which Tahoe supports, see Tahoe extras.
|
||||||
|
|
||||||
|
6. Verify installation by checking for the version::
|
||||||
|
|
||||||
|
venv/bin/tahoe --version
|
||||||
|
|
||||||
|
If you do not want to use the full path, i.e., ``venv/bin/tahoe`` everytime you want to run tahoe, you can activate the ``virtualenv``::
|
||||||
|
|
||||||
|
. venv/bin/activate
|
||||||
|
|
||||||
|
This will generate a subshell with a ``$PATH`` that includes the ``venv/bin/`` directory.
|
||||||
|
|
||||||
|
|
||||||
|
|
45
docs/Installation/install-on-windows.rst
Normal file
45
docs/Installation/install-on-windows.rst
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
******************************
|
||||||
|
Building Tahoe-LAFS on Windows
|
||||||
|
******************************
|
||||||
|
|
||||||
|
If you are looking to hack on the source code or run pre-release code, we recommend you create a virtualenv instance and install Tahoe-LAFS into that:
|
||||||
|
|
||||||
|
|
||||||
|
1. Make sure you have Powershell installed. See `PowerShell installation <https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-windows?view=powershell-7.1>`_.
|
||||||
|
|
||||||
|
2. Install the latest version of Python 3. Download the .exe file at the `python website <https://www.python.org/downloads/>`_.
|
||||||
|
|
||||||
|
3. Open the installer by double-clicking it. Select the **Add Python to PATH** check-box, then click **Install Now**.
|
||||||
|
|
||||||
|
4. Start PowerShell and enter the following command to verify python installation::
|
||||||
|
|
||||||
|
python --version
|
||||||
|
|
||||||
|
5. Use ``pip`` to install ``virtualenv``::
|
||||||
|
|
||||||
|
pip install --user virtualenv
|
||||||
|
|
||||||
|
6. Create a fresh virtualenv for your Tahoe-LAFS install using the following command::
|
||||||
|
|
||||||
|
virtualenv venv
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
venv is the name of the virtual environment in this example. Use any name for your environment.
|
||||||
|
|
||||||
|
7. Use pip to install Tahoe-LAFS in the virtualenv instance::
|
||||||
|
|
||||||
|
venv\Scripts\pip install tahoe-lafs
|
||||||
|
|
||||||
|
6. Verify installation by checking for the version::
|
||||||
|
|
||||||
|
venv\Scripts\tahoe --version
|
||||||
|
|
||||||
|
If you do not want to use the full path, i.e. ``venv\Scripts\tahoe`` everytime you want to run tahoe, you can:
|
||||||
|
|
||||||
|
* Activate the virtualenv::
|
||||||
|
|
||||||
|
. venv\Scripts\activate
|
||||||
|
|
||||||
|
This will generate a subshell with a ``$PATH`` that includes the ``venv\Scripts\`` directory.
|
||||||
|
|
||||||
|
* Change your ``$PATH`` to include the ``venv\Scripts`` directory.
|
68
docs/Installation/install-tahoe.rst
Normal file
68
docs/Installation/install-tahoe.rst
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
.. -*- coding: utf-8-with-signature-unix; fill-column: 77 -*-
|
||||||
|
|
||||||
|
..
|
||||||
|
note: if you aren't reading the rendered form of these docs at
|
||||||
|
http://tahoe-lafs.readthedocs.io/en/latest/ , then be aware that any
|
||||||
|
":doc:" links refer to other files in this docs/ directory
|
||||||
|
|
||||||
|
*********************
|
||||||
|
Installing Tahoe-LAFS
|
||||||
|
*********************
|
||||||
|
|
||||||
|
`Tahoe-LAFS`_ is a secure, decentralized, and fault-tolerant storage system.
|
||||||
|
To see an overview of the architecture and security properties, see :doc:`Welcome to Tahoe LAFS! <../about-tahoe>`
|
||||||
|
|
||||||
|
Tahoe-LAFS can be installed and used on any of the following operating systems.
|
||||||
|
|
||||||
|
.. _Tahoe-LAFS: https://tahoe-lafs.org
|
||||||
|
|
||||||
|
Microsoft Windows
|
||||||
|
=================
|
||||||
|
|
||||||
|
To install Tahoe-LAFS on Windows:
|
||||||
|
|
||||||
|
1. Make sure you have Powershell installed. See `PowerShell installation <https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-core-on-windows?view=powershell-7.1>`_.
|
||||||
|
|
||||||
|
2. Install the latest version of Python 3. Download the .exe file at the `python website <https://www.python.org/downloads/>`_.
|
||||||
|
|
||||||
|
3. Open the installer by double-clicking it. Select the **Add Python to PATH** check-box, then click **Install Now**.
|
||||||
|
|
||||||
|
4. Start PowerShell and enter the following command to verify python installation::
|
||||||
|
|
||||||
|
python --version
|
||||||
|
|
||||||
|
5. Enter the following command to install Tahoe-LAFS::
|
||||||
|
|
||||||
|
pip install tahoe-lafs
|
||||||
|
|
||||||
|
6. Verify installation by checking for the version::
|
||||||
|
|
||||||
|
tahoe --version
|
||||||
|
|
||||||
|
If you want to hack on Tahoe's source code, you can install Tahoe in a ``virtualenv`` on your Windows Machine. To learn more, see :doc:`install-on-windows`.
|
||||||
|
|
||||||
|
Linux, BSD, or MacOS
|
||||||
|
====================
|
||||||
|
|
||||||
|
Tahoe-LAFS can be installed on MacOS, many Linux and BSD distributions. If you are using Ubuntu or Debian, run the following command to install Tahoe-LAFS::
|
||||||
|
|
||||||
|
apt-get install tahoe-lafs
|
||||||
|
|
||||||
|
If you are working on MacOS or a Linux distribution which does not have Tahoe-LAFS packages, you can build it yourself:
|
||||||
|
|
||||||
|
1. Make sure the following are installed:
|
||||||
|
|
||||||
|
* **Python 3's latest version**: Check for the version by running ``python --version``.
|
||||||
|
* **pip**: Most python installations already include `pip`. However, if your installation does not, see `pip installation <https://pip.pypa.io/en/stable/installing/>`_.
|
||||||
|
|
||||||
|
2. Install Tahoe-LAFS using pip::
|
||||||
|
|
||||||
|
pip install tahoe-lafs
|
||||||
|
|
||||||
|
3. Verify installation by checking for the version::
|
||||||
|
|
||||||
|
tahoe --version
|
||||||
|
|
||||||
|
If you are looking to hack on the source code or run pre-release code, we recommend you install Tahoe-LAFS on a `virtualenv` instance. To learn more, see :doc:`install-on-linux`.
|
||||||
|
|
||||||
|
You can always write to the `tahoe-dev mailing list <https://tahoe-lafs.org/cgi-bin/mailman/listinfo/tahoe-dev>`_ or chat on the `Libera.chat IRC <irc://irc.libera.chat/%23tahoe-lafs>`_ if you are not able to get Tahoe-LAFS up and running on your deployment.
|
@ -214,3 +214,7 @@ pseudoxml:
|
|||||||
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
|
||||||
@echo
|
@echo
|
||||||
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
|
||||||
|
|
||||||
|
.PHONY: livehtml
|
||||||
|
livehtml:
|
||||||
|
sphinx-autobuild -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||||
|
@ -1,23 +0,0 @@
|
|||||||
==============
|
|
||||||
OS-X Packaging
|
|
||||||
==============
|
|
||||||
|
|
||||||
Pre-built Tahoe-LAFS ".pkg" installers for OS-X are generated with each
|
|
||||||
source-code commit. These installers offer an easy way to get Tahoe and all
|
|
||||||
its dependencies installed on your Mac. They do not yet provide a
|
|
||||||
double-clickable application: after installation, you will have a "tahoe"
|
|
||||||
command-line tool, which you can use from a shell (a Terminal window) just as
|
|
||||||
if you'd installed from source.
|
|
||||||
|
|
||||||
Installers are available from this directory:
|
|
||||||
|
|
||||||
https://tahoe-lafs.org/source/tahoe-lafs/tarballs/OS-X-packages/
|
|
||||||
|
|
||||||
Download the latest .pkg file to your computer and double-click on it. This
|
|
||||||
will install to /Applications/tahoe.app, however the app icon there is not
|
|
||||||
how you use Tahoe (launching it will get you a dialog box with a reminder to
|
|
||||||
use Terminal). ``/Applications/tahoe.app/bin/tahoe`` is the executable. The
|
|
||||||
next shell you start ought to have that directory in your $PATH (thanks to a
|
|
||||||
file in ``/etc/paths.d/``), unless your ``.profile`` overrides it.
|
|
||||||
|
|
||||||
Tahoe-LAFS is also easy to install with pip, as described in the README.
|
|
@ -1,7 +1,30 @@
|
|||||||
|
If you are reading Tahoe-LAFS documentation
|
||||||
|
-------------------------------------------
|
||||||
|
|
||||||
Note: http://tahoe-lafs.readthedocs.io/en/latest/ is the preferred place to
|
If you are reading Tahoe-LAFS documentation at a code hosting site or
|
||||||
read this documentation (GitHub doesn't render cross-document links or
|
from a checked-out source tree, the preferred place to view the docs
|
||||||
images). If you're reading this on https://github.com/tahoe-lafs/tahoe-lafs ,
|
is http://tahoe-lafs.readthedocs.io/en/latest/. Code-hosting sites do
|
||||||
or from a checked-out source tree, then either run `tox -e docs` and open
|
not render cross-document links or images correctly.
|
||||||
_build/html/index.html in your browser, or view the pre-rendered trunk copy
|
|
||||||
at http://tahoe-lafs.readthedocs.io/en/latest/
|
|
||||||
|
If you are writing Tahoe-LAFS documentation
|
||||||
|
-------------------------------------------
|
||||||
|
|
||||||
|
To edit Tahoe-LAFS docs, you will need a checked-out source tree. You
|
||||||
|
can edit the `.rst` files in this directory using a text editor, and
|
||||||
|
then generate HTML output using Sphinx, a program that can produce its
|
||||||
|
output in HTML and other formats.
|
||||||
|
|
||||||
|
Files with `.rst` extension use reStructuredText markup format, which
|
||||||
|
is the format Sphinx natively handles. To learn more about Sphinx, and
|
||||||
|
for a friendly primer on reStructuredText, please see Sphinx project's
|
||||||
|
documentation, available at:
|
||||||
|
|
||||||
|
https://www.sphinx-doc.org/
|
||||||
|
|
||||||
|
If you have `tox` installed, you can run `tox -e docs` and then open
|
||||||
|
the resulting docs/_build/html/index.html in your web browser.
|
||||||
|
|
||||||
|
Note that Sphinx can also process Python docstrings to generate API
|
||||||
|
documentation. Tahoe-LAFS currently does not use Sphinx for this
|
||||||
|
purpose.
|
||||||
|
BIN
docs/_static/media/image2.png
vendored
BIN
docs/_static/media/image2.png
vendored
Binary file not shown.
Before Width: | Height: | Size: 4.3 KiB After Width: | Height: | Size: 7.6 KiB |
@ -127,7 +127,7 @@ For more technical detail, please see the `the doc page`_ on the Wiki.
|
|||||||
Get Started
|
Get Started
|
||||||
===========
|
===========
|
||||||
|
|
||||||
To use Tahoe-LAFS, please see :doc:`INSTALL`.
|
To use Tahoe-LAFS, please see :doc:`Installing Tahoe-LAFS <../Installation/install-tahoe>`.
|
||||||
|
|
||||||
License
|
License
|
||||||
=======
|
=======
|
@ -514,10 +514,10 @@ Command Examples
|
|||||||
the pattern will be matched against any level of the directory tree;
|
the pattern will be matched against any level of the directory tree;
|
||||||
it's still impossible to specify absolute path exclusions.
|
it's still impossible to specify absolute path exclusions.
|
||||||
|
|
||||||
``tahoe backup --exclude-from=/path/to/filename ~ work:backups``
|
``tahoe backup --exclude-from-utf-8=/path/to/filename ~ work:backups``
|
||||||
|
|
||||||
``--exclude-from`` is similar to ``--exclude``, but reads exclusion
|
``--exclude-from-utf-8`` is similar to ``--exclude``, but reads exclusion
|
||||||
patterns from ``/path/to/filename``, one per line.
|
patterns from a UTF-8-encoded ``/path/to/filename``, one per line.
|
||||||
|
|
||||||
``tahoe backup --exclude-vcs ~ work:backups``
|
``tahoe backup --exclude-vcs ~ work:backups``
|
||||||
|
|
||||||
|
@ -7,11 +7,10 @@ Tahoe-LAFS SFTP Frontend
|
|||||||
1. `SFTP Background`_
|
1. `SFTP Background`_
|
||||||
2. `Tahoe-LAFS Support`_
|
2. `Tahoe-LAFS Support`_
|
||||||
3. `Creating an Account File`_
|
3. `Creating an Account File`_
|
||||||
4. `Running An Account Server (accounts.url)`_
|
4. `Configuring SFTP Access`_
|
||||||
5. `Configuring SFTP Access`_
|
5. `Dependencies`_
|
||||||
6. `Dependencies`_
|
6. `Immutable and Mutable Files`_
|
||||||
7. `Immutable and Mutable Files`_
|
7. `Known Issues`_
|
||||||
8. `Known Issues`_
|
|
||||||
|
|
||||||
|
|
||||||
SFTP Background
|
SFTP Background
|
||||||
@ -78,33 +77,6 @@ start with "ssh-".
|
|||||||
Now add an ``accounts.file`` directive to your ``tahoe.cfg`` file, as described in
|
Now add an ``accounts.file`` directive to your ``tahoe.cfg`` file, as described in
|
||||||
the next sections.
|
the next sections.
|
||||||
|
|
||||||
Running An Account Server (accounts.url)
|
|
||||||
========================================
|
|
||||||
|
|
||||||
The accounts.url directive allows access requests to be controlled by an
|
|
||||||
HTTP-based login service, useful for centralized deployments. This was used
|
|
||||||
by AllMyData to provide web-based file access, where the service used a
|
|
||||||
simple PHP script and database lookups to map an account email address and
|
|
||||||
password to a Tahoe-LAFS directory cap. The service will receive a
|
|
||||||
multipart/form-data POST, just like one created with a <form> and <input>
|
|
||||||
fields, with three parameters:
|
|
||||||
|
|
||||||
• action: "authenticate" (this is a static string)
|
|
||||||
• email: USERNAME (Tahoe-LAFS has no notion of email addresses, but the
|
|
||||||
authentication service uses them as account names, so the interface
|
|
||||||
presents this argument as "email" rather than "username").
|
|
||||||
• passwd: PASSWORD
|
|
||||||
|
|
||||||
It should return a single string that either contains a Tahoe-LAFS directory
|
|
||||||
cap (URI:DIR2:...), or "0" to indicate a login failure.
|
|
||||||
|
|
||||||
Tahoe-LAFS recommends the service be secure, preferably localhost-only. This
|
|
||||||
makes it harder for attackers to brute force the password or use DNS
|
|
||||||
poisoning to cause the Tahoe-LAFS gateway to talk with the wrong server,
|
|
||||||
thereby revealing the usernames and passwords.
|
|
||||||
|
|
||||||
Public key authentication is not supported when an account server is used.
|
|
||||||
|
|
||||||
Configuring SFTP Access
|
Configuring SFTP Access
|
||||||
=======================
|
=======================
|
||||||
|
|
||||||
|
@ -10,8 +10,11 @@ Contents:
|
|||||||
.. toctree::
|
.. toctree::
|
||||||
:maxdepth: 2
|
:maxdepth: 2
|
||||||
|
|
||||||
about
|
about-tahoe
|
||||||
INSTALL
|
Installation/install-tahoe
|
||||||
|
Installation/install-on-windows
|
||||||
|
Installation/install-on-linux
|
||||||
|
Installation/install-on-desert-island
|
||||||
running
|
running
|
||||||
magic-wormhole-invites
|
magic-wormhole-invites
|
||||||
configuration
|
configuration
|
||||||
@ -42,6 +45,7 @@ Contents:
|
|||||||
backupdb
|
backupdb
|
||||||
|
|
||||||
developer-guide
|
developer-guide
|
||||||
|
ticket-triage
|
||||||
|
|
||||||
anonymity-configuration
|
anonymity-configuration
|
||||||
|
|
||||||
@ -50,10 +54,7 @@ Contents:
|
|||||||
logging
|
logging
|
||||||
stats
|
stats
|
||||||
|
|
||||||
desert-island
|
|
||||||
debian
|
debian
|
||||||
windows
|
|
||||||
OS-X
|
|
||||||
build/build-pyOpenSSL
|
build/build-pyOpenSSL
|
||||||
|
|
||||||
specifications/index
|
specifications/index
|
||||||
|
@ -13,6 +13,102 @@ Specifically, it should be possible to implement a Tahoe-LAFS storage server wit
|
|||||||
The Tahoe-LAFS client will also need to change but it is not expected that it will be noticably simplified by this change
|
The Tahoe-LAFS client will also need to change but it is not expected that it will be noticably simplified by this change
|
||||||
(though this may be the first step towards simplifying it).
|
(though this may be the first step towards simplifying it).
|
||||||
|
|
||||||
|
Glossary
|
||||||
|
--------
|
||||||
|
|
||||||
|
.. glossary::
|
||||||
|
|
||||||
|
`Foolscap <https://github.com/warner/foolscap/>`_
|
||||||
|
an RPC/RMI (Remote Procedure Call / Remote Method Invocation) protocol for use with Twisted
|
||||||
|
|
||||||
|
storage server
|
||||||
|
a Tahoe-LAFS process configured to offer storage and reachable over the network for store and retrieve operations
|
||||||
|
|
||||||
|
introducer
|
||||||
|
a Tahoe-LAFS process at a known location configured to re-publish announcements about the location of storage servers
|
||||||
|
|
||||||
|
fURL
|
||||||
|
a self-authenticating URL-like string which can be used to locate a remote object using the Foolscap protocol
|
||||||
|
|
||||||
|
lease
|
||||||
|
state associated with a share informing a storage server of the duration of storage desired by a client
|
||||||
|
|
||||||
|
share
|
||||||
|
a single unit of client-provided arbitrary data to be stored by a storage server
|
||||||
|
(in practice, one of the outputs of applying ZFEC encoding to some ciphertext with some additional metadata attached)
|
||||||
|
|
||||||
|
bucket
|
||||||
|
a group of one or more immutable shares held by a storage server and having a common storage index
|
||||||
|
|
||||||
|
slot
|
||||||
|
a group of one or more mutable shares held by a storage server and having a common storage index
|
||||||
|
(sometimes "slot" is considered a synonym for "storage index of a slot")
|
||||||
|
|
||||||
|
storage index
|
||||||
|
a short string which can address a slot or a bucket
|
||||||
|
(in practice, derived by hashing the encryption key associated with contents of that slot or bucket)
|
||||||
|
|
||||||
|
write enabler
|
||||||
|
a short secret string which storage servers require to be presented before allowing mutation of any mutable share
|
||||||
|
|
||||||
|
lease renew secret
|
||||||
|
a short secret string which storage servers required to be presented before allowing a particular lease to be renewed
|
||||||
|
|
||||||
|
Motivation
|
||||||
|
----------
|
||||||
|
|
||||||
|
Foolscap
|
||||||
|
~~~~~~~~
|
||||||
|
|
||||||
|
Foolscap is a remote method invocation protocol with several distinctive features.
|
||||||
|
At its core it allows separate processes to refer each other's objects and methods using a capability-based model.
|
||||||
|
This allows for extremely fine-grained access control in a system that remains highly securable without becoming overwhelmingly complicated.
|
||||||
|
Supporting this is a flexible and extensible serialization system which allows data to be exchanged between processes in carefully controlled ways.
|
||||||
|
|
||||||
|
Tahoe-LAFS avails itself of only a small portion of these features.
|
||||||
|
A Tahoe-LAFS storage server typically only exposes one object with a fixed set of methods to clients.
|
||||||
|
A Tahoe-LAFS introducer node does roughly the same.
|
||||||
|
Tahoe-LAFS exchanges simple data structures that have many common, standard serialized representations.
|
||||||
|
|
||||||
|
In exchange for this slight use of Foolscap's sophisticated mechanisms,
|
||||||
|
Tahoe-LAFS pays a substantial price:
|
||||||
|
|
||||||
|
* Foolscap is implemented only for Python.
|
||||||
|
Tahoe-LAFS is thus limited to being implemented only in Python.
|
||||||
|
* There is only one Python implementation of Foolscap.
|
||||||
|
The implementation is therefore the de facto standard and understanding of the protocol often relies on understanding that implementation.
|
||||||
|
* The Foolscap developer community is very small.
|
||||||
|
The implementation therefore advances very little and some non-trivial part of the maintenance cost falls on the Tahoe-LAFS project.
|
||||||
|
* The extensible serialization system imposes substantial complexity compared to the simple data structures Tahoe-LAFS actually exchanges.
|
||||||
|
|
||||||
|
HTTP
|
||||||
|
~~~~
|
||||||
|
|
||||||
|
HTTP is a request/response protocol that has become the lingua franca of the internet.
|
||||||
|
Combined with the principles of Representational State Transfer (REST) it is widely employed to create, update, and delete data in collections on the internet.
|
||||||
|
HTTP itself provides only modest functionality in comparison to Foolscap.
|
||||||
|
However its simplicity and widespread use have led to a diverse and almost overwhelming ecosystem of libraries, frameworks, toolkits, and so on.
|
||||||
|
|
||||||
|
By adopting HTTP in place of Foolscap Tahoe-LAFS can realize the following concrete benefits:
|
||||||
|
|
||||||
|
* Practically every language or runtime has an HTTP protocol implementation (or a dozen of them) available.
|
||||||
|
This change paves the way for new Tahoe-LAFS implementations using tools better suited for certain situations
|
||||||
|
(mobile client implementations, high-performance server implementations, easily distributed desktop clients, etc).
|
||||||
|
* The simplicity of and vast quantity of resources about HTTP make it a very easy protocol to learn and use.
|
||||||
|
This change reduces the barrier to entry for developers to contribute improvements to Tahoe-LAFS's network interactions.
|
||||||
|
* For any given language there is very likely an HTTP implementation with a large and active developer community.
|
||||||
|
Tahoe-LAFS can therefore benefit from the large effort being put into making better libraries for using HTTP.
|
||||||
|
* One of the core features of HTTP is the mundane transfer of bulk data and implementions are often capable of doing this with extreme efficiency.
|
||||||
|
The alignment of this core feature with a core activity of Tahoe-LAFS of transferring bulk data means that a substantial barrier to improved Tahoe-LAFS runtime performance will be eliminated.
|
||||||
|
|
||||||
|
TLS
|
||||||
|
~~~
|
||||||
|
|
||||||
|
The Foolscap-based protocol provides *some* of Tahoe-LAFS's confidentiality, integrity, and authentication properties by leveraging TLS.
|
||||||
|
An HTTP-based protocol can make use of TLS in largely the same way to provide the same properties.
|
||||||
|
Provision of these properties *is* dependant on implementers following Great Black Swamp's rules for x509 certificate validation
|
||||||
|
(rather than the standard "web" rules for validation).
|
||||||
|
|
||||||
Requirements
|
Requirements
|
||||||
------------
|
------------
|
||||||
|
|
||||||
@ -234,6 +330,19 @@ Because of the simple types used throughout
|
|||||||
and the equivalence described in `RFC 7049`_
|
and the equivalence described in `RFC 7049`_
|
||||||
these examples should be representative regardless of which of these two encodings is chosen.
|
these examples should be representative regardless of which of these two encodings is chosen.
|
||||||
|
|
||||||
|
HTTP Design
|
||||||
|
~~~~~~~~~~~
|
||||||
|
|
||||||
|
The HTTP interface described here is informed by the ideas of REST
|
||||||
|
(Representational State Transfer).
|
||||||
|
For ``GET`` requests query parameters are preferred over values encoded in the request body.
|
||||||
|
For other requests query parameters are encoded into the message body.
|
||||||
|
|
||||||
|
Many branches of the resource tree are conceived as homogenous containers:
|
||||||
|
one branch contains all of the share data;
|
||||||
|
another branch contains all of the lease data;
|
||||||
|
etc.
|
||||||
|
|
||||||
General
|
General
|
||||||
~~~~~~~
|
~~~~~~~
|
||||||
|
|
||||||
@ -257,6 +366,71 @@ For example::
|
|||||||
"application-version": "1.13.0"
|
"application-version": "1.13.0"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
``PUT /v1/lease/:storage_index``
|
||||||
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
|
|
||||||
|
Create a new lease on the bucket addressed by ``storage_index``.
|
||||||
|
The details of the lease are encoded in the request body.
|
||||||
|
For example::
|
||||||
|
|
||||||
|
{"renew-secret": "abcd", "cancel-secret": "efgh"}
|
||||||
|
|
||||||
|
If the ``renew-secret`` value matches an existing lease
|
||||||
|
then the expiration time of that lease will be changed to 31 days after the time of this operation.
|
||||||
|
If it does not match an existing lease
|
||||||
|
then a new lease will be created with this ``renew-secret`` which expires 31 days after the time of this operation.
|
||||||
|
|
||||||
|
In these cases the response is ``NO CONTENT`` with an empty body.
|
||||||
|
|
||||||
|
It is possible that the storage server will have no shares for the given ``storage_index`` because:
|
||||||
|
|
||||||
|
* no such shares have ever been uploaded.
|
||||||
|
* a previous lease expired and the storage server reclaimed the storage by deleting the shares.
|
||||||
|
|
||||||
|
In these cases the server takes no action and returns ``NOT FOUND``.
|
||||||
|
|
||||||
|
|
||||||
|
Discussion
|
||||||
|
``````````
|
||||||
|
|
||||||
|
We considered an alternative where ``renew-secret`` and ``cancel-secret`` are placed in query arguments on the request path.
|
||||||
|
We chose to put these values into the request body to make the URL simpler.
|
||||||
|
|
||||||
|
Several behaviors here are blindly copied from the Foolscap-based storage server protocol.
|
||||||
|
|
||||||
|
* There is a cancel secret but there is no API to use it to cancel a lease (see ticket:3768).
|
||||||
|
* The lease period is hard-coded at 31 days.
|
||||||
|
* There are separate **add** and **renew** lease APIs (see ticket:3773).
|
||||||
|
|
||||||
|
These are not necessarily ideal behaviors
|
||||||
|
but they are adopted to avoid any *semantic* changes between the Foolscap- and HTTP-based protocols.
|
||||||
|
It is expected that some or all of these behaviors may change in a future revision of the HTTP-based protocol.
|
||||||
|
|
||||||
|
``POST /v1/lease/:storage_index``
|
||||||
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
|
|
||||||
|
Renew an existing lease for all shares for the given storage index.
|
||||||
|
The details of the lease are encoded in the request body.
|
||||||
|
For example::
|
||||||
|
|
||||||
|
{"renew-secret": "abcd"}
|
||||||
|
|
||||||
|
If there are no shares for the given ``storage_index``
|
||||||
|
then ``NOT FOUND`` is returned.
|
||||||
|
|
||||||
|
If there is no lease with a matching ``renew-secret`` value on the given storage index
|
||||||
|
then ``NOT FOUND`` is returned.
|
||||||
|
In this case,
|
||||||
|
if the storage index refers to mutable data
|
||||||
|
then the response also includes a list of nodeids where the lease can be renewed.
|
||||||
|
For example::
|
||||||
|
|
||||||
|
{"nodeids": ["aaa...", "bbb..."]}
|
||||||
|
|
||||||
|
Othewise,
|
||||||
|
the matching lease's expiration time is changed to be 31 days from the time of this operation
|
||||||
|
and ``NO CONTENT`` is returned.
|
||||||
|
|
||||||
Immutable
|
Immutable
|
||||||
---------
|
---------
|
||||||
|
|
||||||
@ -268,6 +442,7 @@ Writing
|
|||||||
|
|
||||||
Initialize an immutable storage index with some buckets.
|
Initialize an immutable storage index with some buckets.
|
||||||
The buckets may have share data written to them once.
|
The buckets may have share data written to them once.
|
||||||
|
A lease is also created for the shares.
|
||||||
Details of the buckets to create are encoded in the request body.
|
Details of the buckets to create are encoded in the request body.
|
||||||
For example::
|
For example::
|
||||||
|
|
||||||
@ -294,6 +469,15 @@ However, we decided this does not matter because:
|
|||||||
therefore no proxy servers can perform any extra logging.
|
therefore no proxy servers can perform any extra logging.
|
||||||
* Tahoe-LAFS itself does not currently log HTTP request URLs.
|
* Tahoe-LAFS itself does not currently log HTTP request URLs.
|
||||||
|
|
||||||
|
The response includes ``already-have`` and ``allocated`` for two reasons:
|
||||||
|
|
||||||
|
* If an upload is interrupted and the client loses its local state that lets it know it already uploaded some shares
|
||||||
|
then this allows it to discover this fact (by inspecting ``already-have``) and only upload the missing shares (indicated by ``allocated``).
|
||||||
|
|
||||||
|
* If an upload has completed a client may still choose to re-balance storage by moving shares between servers.
|
||||||
|
This might be because a server has become unavailable and a remaining server needs to store more shares for the upload.
|
||||||
|
It could also just be that the client's preferred servers have changed.
|
||||||
|
|
||||||
``PUT /v1/immutable/:storage_index/:share_number``
|
``PUT /v1/immutable/:storage_index/:share_number``
|
||||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||||
|
|
||||||
@ -448,6 +632,136 @@ Just like ``GET /v1/mutable/:storage_index``.
|
|||||||
Advise the server the data read from the indicated share was corrupt.
|
Advise the server the data read from the indicated share was corrupt.
|
||||||
Just like the immutable version.
|
Just like the immutable version.
|
||||||
|
|
||||||
|
Sample Interactions
|
||||||
|
-------------------
|
||||||
|
|
||||||
|
Immutable Data
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
1. Create a bucket for storage index ``AAAAAAAAAAAAAAAA`` to hold two immutable shares, discovering that share ``1`` was already uploaded::
|
||||||
|
|
||||||
|
POST /v1/immutable/AAAAAAAAAAAAAAAA
|
||||||
|
{"renew-secret": "efgh", "cancel-secret": "ijkl",
|
||||||
|
"share-numbers": [1, 7], "allocated-size": 48}
|
||||||
|
|
||||||
|
200 OK
|
||||||
|
{"already-have": [1], "allocated": [7]}
|
||||||
|
|
||||||
|
#. Upload the content for immutable share ``7``::
|
||||||
|
|
||||||
|
PUT /v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||||
|
Content-Range: bytes 0-15/48
|
||||||
|
<first 16 bytes of share data>
|
||||||
|
|
||||||
|
200 OK
|
||||||
|
|
||||||
|
PUT /v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||||
|
Content-Range: bytes 16-31/48
|
||||||
|
<second 16 bytes of share data>
|
||||||
|
|
||||||
|
200 OK
|
||||||
|
|
||||||
|
PUT /v1/immutable/AAAAAAAAAAAAAAAA/7
|
||||||
|
Content-Range: bytes 32-47/48
|
||||||
|
<final 16 bytes of share data>
|
||||||
|
|
||||||
|
201 CREATED
|
||||||
|
|
||||||
|
#. Download the content of the previously uploaded immutable share ``7``::
|
||||||
|
|
||||||
|
GET /v1/immutable/AAAAAAAAAAAAAAAA?share=7&offset=0&size=48
|
||||||
|
|
||||||
|
200 OK
|
||||||
|
<complete 48 bytes of previously uploaded data>
|
||||||
|
|
||||||
|
#. Renew the lease on all immutable shares in bucket ``AAAAAAAAAAAAAAAA``::
|
||||||
|
|
||||||
|
POST /v1/lease/AAAAAAAAAAAAAAAA
|
||||||
|
{"renew-secret": "efgh"}
|
||||||
|
|
||||||
|
204 NO CONTENT
|
||||||
|
|
||||||
|
Mutable Data
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
|
||||||
|
1. Create mutable share number ``3`` with ``10`` bytes of data in slot ``BBBBBBBBBBBBBBBB``::
|
||||||
|
|
||||||
|
POST /v1/mutable/BBBBBBBBBBBBBBBB/read-test-write
|
||||||
|
{
|
||||||
|
"secrets": {
|
||||||
|
"write-enabler": "abcd",
|
||||||
|
"lease-renew": "efgh",
|
||||||
|
"lease-cancel": "ijkl"
|
||||||
|
},
|
||||||
|
"test-write-vectors": {
|
||||||
|
3: {
|
||||||
|
"test": [{
|
||||||
|
"offset": 0,
|
||||||
|
"size": 1,
|
||||||
|
"operator": "eq",
|
||||||
|
"specimen": ""
|
||||||
|
}],
|
||||||
|
"write": [{
|
||||||
|
"offset": 0,
|
||||||
|
"data": "xxxxxxxxxx"
|
||||||
|
}],
|
||||||
|
"new-length": 10
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"read-vector": []
|
||||||
|
}
|
||||||
|
|
||||||
|
200 OK
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"data": []
|
||||||
|
}
|
||||||
|
|
||||||
|
#. Safely rewrite the contents of a known version of mutable share number ``3`` (or fail)::
|
||||||
|
|
||||||
|
POST /v1/mutable/BBBBBBBBBBBBBBBB/read-test-write
|
||||||
|
{
|
||||||
|
"secrets": {
|
||||||
|
"write-enabler": "abcd",
|
||||||
|
"lease-renew": "efgh",
|
||||||
|
"lease-cancel": "ijkl"
|
||||||
|
},
|
||||||
|
"test-write-vectors": {
|
||||||
|
3: {
|
||||||
|
"test": [{
|
||||||
|
"offset": 0,
|
||||||
|
"size": <checkstring size>,
|
||||||
|
"operator": "eq",
|
||||||
|
"specimen": "<checkstring>"
|
||||||
|
}],
|
||||||
|
"write": [{
|
||||||
|
"offset": 0,
|
||||||
|
"data": "yyyyyyyyyy"
|
||||||
|
}],
|
||||||
|
"new-length": 10
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"read-vector": []
|
||||||
|
}
|
||||||
|
|
||||||
|
200 OK
|
||||||
|
{
|
||||||
|
"success": true,
|
||||||
|
"data": []
|
||||||
|
}
|
||||||
|
|
||||||
|
#. Download the contents of share number ``3``::
|
||||||
|
|
||||||
|
GET /v1/mutable/BBBBBBBBBBBBBBBB?share=3&offset=0&size=10
|
||||||
|
<complete 16 bytes of previously uploaded data>
|
||||||
|
|
||||||
|
#. Renew the lease on previously uploaded mutable share in slot ``BBBBBBBBBBBBBBBB``::
|
||||||
|
|
||||||
|
POST /v1/lease/BBBBBBBBBBBBBBBB
|
||||||
|
{"renew-secret": "efgh"}
|
||||||
|
|
||||||
|
204 NO CONTENT
|
||||||
|
|
||||||
.. _RFC 7469: https://tools.ietf.org/html/rfc7469#section-2.4
|
.. _RFC 7469: https://tools.ietf.org/html/rfc7469#section-2.4
|
||||||
|
|
||||||
.. _RFC 7049: https://tools.ietf.org/html/rfc7049#section-4
|
.. _RFC 7049: https://tools.ietf.org/html/rfc7049#section-4
|
||||||
|
@ -59,6 +59,10 @@ Create Branch and Apply Updates
|
|||||||
- summarize major changes
|
- summarize major changes
|
||||||
- commit it
|
- commit it
|
||||||
|
|
||||||
|
- update "nix/tahoe-lafs.nix"
|
||||||
|
|
||||||
|
- change the value given for `version` from `OLD.post1` to `NEW.post1`
|
||||||
|
|
||||||
- update "CREDITS"
|
- update "CREDITS"
|
||||||
|
|
||||||
- are there any new contributors in this release?
|
- are there any new contributors in this release?
|
||||||
@ -66,7 +70,7 @@ Create Branch and Apply Updates
|
|||||||
- commit it
|
- commit it
|
||||||
|
|
||||||
- update "docs/known_issues.rst" if appropriate
|
- update "docs/known_issues.rst" if appropriate
|
||||||
- update "docs/INSTALL.rst" references to the new release
|
- update "docs/Installation/install-tahoe.rst" references to the new release
|
||||||
- Push the branch to github
|
- Push the branch to github
|
||||||
- Create a (draft) PR; this should trigger CI (note that github
|
- Create a (draft) PR; this should trigger CI (note that github
|
||||||
doesn't let you create a PR without some changes on the branch so
|
doesn't let you create a PR without some changes on the branch so
|
||||||
@ -189,11 +193,16 @@ is appropriate.
|
|||||||
Once a release-candidate has marinated for some time then it can be
|
Once a release-candidate has marinated for some time then it can be
|
||||||
made into a the actual release.
|
made into a the actual release.
|
||||||
|
|
||||||
XXX Write this section when doing 1.15.0 actual release
|
The actual release follows the same steps as above, with some differences:
|
||||||
|
|
||||||
(In general, this means dropping the "rcX" part of the release and the
|
|
||||||
tag, uploading those artifacts, uploading to PyPI, ... )
|
|
||||||
|
|
||||||
|
- there is no "-rcX" on the end of release names
|
||||||
|
- the release is uploaded to PyPI (using Twine)
|
||||||
|
- the version is tagged in Git (ideally using "the tahoe release key"
|
||||||
|
but can be done with any of the authorized core developers' personal
|
||||||
|
key)
|
||||||
|
- the release-candidate branches must be merged back to master after
|
||||||
|
the release is official (e.g. causing newsfragments to be deleted on
|
||||||
|
master, etc)
|
||||||
|
|
||||||
|
|
||||||
Announcing the Release
|
Announcing the Release
|
||||||
|
@ -10,7 +10,7 @@ Introduction
|
|||||||
|
|
||||||
This is how to run a Tahoe-LAFS client or a complete Tahoe-LAFS grid.
|
This is how to run a Tahoe-LAFS client or a complete Tahoe-LAFS grid.
|
||||||
First you have to install the Tahoe-LAFS software, as documented in
|
First you have to install the Tahoe-LAFS software, as documented in
|
||||||
:doc:`INSTALL`.
|
:doc:`Installing Tahoe-LAFS <../Installation/install-tahoe>`.
|
||||||
|
|
||||||
The ``tahoe`` program in your virtualenv's ``bin`` directory is used to
|
The ``tahoe`` program in your virtualenv's ``bin`` directory is used to
|
||||||
create, start, and stop nodes. Each node lives in a separate base
|
create, start, and stop nodes. Each node lives in a separate base
|
||||||
@ -235,7 +235,7 @@ Socialize
|
|||||||
=========
|
=========
|
||||||
|
|
||||||
You can chat with other users of and hackers of this software on the
|
You can chat with other users of and hackers of this software on the
|
||||||
#tahoe-lafs IRC channel at ``irc.freenode.net``, or on the `tahoe-dev mailing
|
#tahoe-lafs IRC channel at ``irc.libera.chat``, or on the `tahoe-dev mailing
|
||||||
list`_.
|
list`_.
|
||||||
|
|
||||||
.. _tahoe-dev mailing list: https://tahoe-lafs.org/cgi-bin/mailman/listinfo/tahoe-dev
|
.. _tahoe-dev mailing list: https://tahoe-lafs.org/cgi-bin/mailman/listinfo/tahoe-dev
|
||||||
|
27
docs/ticket-triage.rst
Normal file
27
docs/ticket-triage.rst
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
=============
|
||||||
|
Ticket Triage
|
||||||
|
=============
|
||||||
|
|
||||||
|
Ticket triage is a weekly, informal ritual that is meant to solve the problem of
|
||||||
|
tickets getting opened and then forgotten about. It is simple and keeps project
|
||||||
|
momentum going and prevents ticket cruft.
|
||||||
|
|
||||||
|
It fosters conversation around project tasks and philosophies as they relate to
|
||||||
|
milestones.
|
||||||
|
|
||||||
|
Process
|
||||||
|
-------
|
||||||
|
- The role of Ticket Triager rotates regularly-ish, and is assigned ad hoc
|
||||||
|
- The Triager needs a ``Trac`` account
|
||||||
|
- The Triager looks at all the tickets that have been created in the last week (or month, etc.)
|
||||||
|
- They can use a custom query or do this as the week progresses
|
||||||
|
- BONUS ROUND: Dig up a stale ticket from the past
|
||||||
|
- Assign each ticket to a milestone on the Roadmap
|
||||||
|
- The following situations merit discussion:
|
||||||
|
- A ticket doesn't have an appropriate milestone and we should create one
|
||||||
|
- A ticket, in vanishingly rare circumstances, should be deleted
|
||||||
|
- The ticket is spam
|
||||||
|
- The ticket contains sensitive information and harm will come to one or more people if it continues to be distributed
|
||||||
|
- A ticket could be assigned to multiple milestones
|
||||||
|
- There is another question about a ticket
|
||||||
|
- These tickets will be brought as necessary to one of our meetings (currently Tuesdays) for discussion
|
@ -1,83 +0,0 @@
|
|||||||
Building Tahoe-LAFS on Windows
|
|
||||||
==============================
|
|
||||||
|
|
||||||
You'll need ``python``, ``pip``, and ``virtualenv``. But you won't need a
|
|
||||||
compiler.
|
|
||||||
|
|
||||||
Preliminaries
|
|
||||||
-------------
|
|
||||||
|
|
||||||
1: Install Python-2.7.11 . Use the "Windows x86-64 MSI installer" at
|
|
||||||
https://www.python.org/downloads/release/python-2711/
|
|
||||||
|
|
||||||
2: That should install ``pip``, but if it doesn't, look at
|
|
||||||
https://pip.pypa.io/en/stable/installing/ for installation instructions.
|
|
||||||
|
|
||||||
3: Install ``virtualenv`` with
|
|
||||||
https://virtualenv.pypa.io/en/latest/installation.html
|
|
||||||
|
|
||||||
Installation
|
|
||||||
------------
|
|
||||||
|
|
||||||
1: Start a CLI shell (e.g. PowerShell)
|
|
||||||
|
|
||||||
2: Create a new virtualenv. Everything specific to Tahoe will go into this.
|
|
||||||
You can use whatever name you like for the virtualenv, but example uses
|
|
||||||
"venv"::
|
|
||||||
|
|
||||||
PS C:\Users\me> virtualenv venv
|
|
||||||
New python executable in C:\Users\me\venv\Scripts\python.exe
|
|
||||||
Installing setuptools, pip, wheel...done.
|
|
||||||
>
|
|
||||||
|
|
||||||
3: Use the virtualenv's ``pip`` to install the latest release of Tahoe-LAFS
|
|
||||||
into this virtualenv::
|
|
||||||
|
|
||||||
PS C:\Users\me> venv\Scripts\pip install tahoe-lafs
|
|
||||||
Collecting tahoe-lafs
|
|
||||||
...
|
|
||||||
Installing collected packages: ...
|
|
||||||
Successfully installed ...
|
|
||||||
>
|
|
||||||
|
|
||||||
4: Verify that Tahoe was installed correctly by running ``tahoe --version``,
|
|
||||||
using the ``tahoe`` from the virtualenv's Scripts directory::
|
|
||||||
|
|
||||||
PS C:\Users\me> venv\Scripts\tahoe --version
|
|
||||||
tahoe-lafs: 1.11
|
|
||||||
foolscap: ...
|
|
||||||
|
|
||||||
Running Tahoe-LAFS
|
|
||||||
------------------
|
|
||||||
|
|
||||||
The rest of the documentation assumes you can run the ``tahoe`` executable
|
|
||||||
just as you did in step 4 above. If you want to type just ``tahoe`` instead
|
|
||||||
of ``venv\Scripts\tahoe``, you can either "`activate`_" the virtualenv (by
|
|
||||||
running ``venv\Scripts\activate``, or you can add the Scripts directory to
|
|
||||||
your ``%PATH%`` environment variable.
|
|
||||||
|
|
||||||
Now use the docs in :doc:`running` to learn how to configure your first
|
|
||||||
Tahoe node.
|
|
||||||
|
|
||||||
.. _activate: https://virtualenv.pypa.io/en/latest/userguide.html#activate-script
|
|
||||||
|
|
||||||
Installing A Different Version
|
|
||||||
------------------------------
|
|
||||||
|
|
||||||
The ``pip install tahoe-lafs`` command above will install the latest release
|
|
||||||
(from PyPI). If instead, you want to install from a git checkout, then run
|
|
||||||
the following command (using pip from the virtualenv, from the root of your
|
|
||||||
git checkout)::
|
|
||||||
|
|
||||||
$ venv\Scripts\pip install .
|
|
||||||
|
|
||||||
If you're planning to hack on the source code, you might want to add
|
|
||||||
``--editable`` so you won't have to re-install each time you make a change.
|
|
||||||
|
|
||||||
Dependencies
|
|
||||||
------------
|
|
||||||
|
|
||||||
Tahoe-LAFS depends upon several packages that use compiled C code (such as zfec).
|
|
||||||
This code must be built separately for each platform (Windows, OS-X, and different flavors of Linux).
|
|
||||||
Fortunately, this is now done by upstream packages for most platforms.
|
|
||||||
The result is that a C compiler is usually not required to install Tahoe-LAFS.
|
|
@ -1,5 +1,15 @@
|
|||||||
|
"""
|
||||||
|
Ported to Python 3.
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import shutil
|
import shutil
|
||||||
from time import sleep
|
from time import sleep
|
||||||
@ -28,7 +38,7 @@ from twisted.internet.error import (
|
|||||||
import pytest
|
import pytest
|
||||||
import pytest_twisted
|
import pytest_twisted
|
||||||
|
|
||||||
from util import (
|
from .util import (
|
||||||
_CollectOutputProtocol,
|
_CollectOutputProtocol,
|
||||||
_MagicTextProtocol,
|
_MagicTextProtocol,
|
||||||
_DumpOutputProtocol,
|
_DumpOutputProtocol,
|
||||||
|
@ -5,6 +5,15 @@
|
|||||||
# You can safely skip any of these tests, it'll just appear to "take
|
# You can safely skip any of these tests, it'll just appear to "take
|
||||||
# longer" to start the first test as the fixtures get built
|
# longer" to start the first test as the fixtures get built
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
|
|
||||||
def test_create_flogger(flog_gatherer):
|
def test_create_flogger(flog_gatherer):
|
||||||
print("Created flog_gatherer")
|
print("Created flog_gatherer")
|
||||||
|
64
integration/test_get_put.py
Normal file
64
integration/test_get_put.py
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
"""
|
||||||
|
Integration tests for getting and putting files, including reading from stdin
|
||||||
|
and stdout.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from subprocess import Popen, PIPE
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from .util import run_in_thread, cli
|
||||||
|
|
||||||
|
DATA = b"abc123 this is not utf-8 decodable \xff\x00\x33 \x11"
|
||||||
|
try:
|
||||||
|
DATA.decode("utf-8")
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
pass # great, what we want
|
||||||
|
else:
|
||||||
|
raise ValueError("BUG, the DATA string was decoded from UTF-8")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(scope="session")
|
||||||
|
def get_put_alias(alice):
|
||||||
|
cli(alice, "create-alias", "getput")
|
||||||
|
|
||||||
|
|
||||||
|
def read_bytes(path):
|
||||||
|
with open(path, "rb") as f:
|
||||||
|
return f.read()
|
||||||
|
|
||||||
|
|
||||||
|
@run_in_thread
|
||||||
|
def test_put_from_stdin(alice, get_put_alias, tmpdir):
|
||||||
|
"""
|
||||||
|
It's possible to upload a file via `tahoe put`'s STDIN, and then download
|
||||||
|
it to a file.
|
||||||
|
"""
|
||||||
|
tempfile = str(tmpdir.join("file"))
|
||||||
|
p = Popen(
|
||||||
|
["tahoe", "--node-directory", alice.node_dir, "put", "-", "getput:fromstdin"],
|
||||||
|
stdin=PIPE
|
||||||
|
)
|
||||||
|
p.stdin.write(DATA)
|
||||||
|
p.stdin.close()
|
||||||
|
assert p.wait() == 0
|
||||||
|
|
||||||
|
cli(alice, "get", "getput:fromstdin", tempfile)
|
||||||
|
assert read_bytes(tempfile) == DATA
|
||||||
|
|
||||||
|
|
||||||
|
def test_get_to_stdout(alice, get_put_alias, tmpdir):
|
||||||
|
"""
|
||||||
|
It's possible to upload a file, and then download it to stdout.
|
||||||
|
"""
|
||||||
|
tempfile = tmpdir.join("file")
|
||||||
|
with tempfile.open("wb") as f:
|
||||||
|
f.write(DATA)
|
||||||
|
cli(alice, "put", str(tempfile), "getput:tostdout")
|
||||||
|
|
||||||
|
p = Popen(
|
||||||
|
["tahoe", "--node-directory", alice.node_dir, "get", "getput:tostdout", "-"],
|
||||||
|
stdout=PIPE
|
||||||
|
)
|
||||||
|
assert p.stdout.read() == DATA
|
||||||
|
assert p.wait() == 0
|
244
integration/test_i2p.py
Normal file
244
integration/test_i2p.py
Normal file
@ -0,0 +1,244 @@
|
|||||||
|
"""
|
||||||
|
Integration tests for I2P support.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
|
import sys
|
||||||
|
from os.path import join, exists
|
||||||
|
from os import mkdir
|
||||||
|
from time import sleep
|
||||||
|
|
||||||
|
if PY2:
|
||||||
|
def which(path):
|
||||||
|
# This will result in skipping I2P tests on Python 2. Oh well.
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
from shutil import which
|
||||||
|
|
||||||
|
from eliot import log_call
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import pytest_twisted
|
||||||
|
|
||||||
|
from . import util
|
||||||
|
|
||||||
|
from twisted.python.filepath import (
|
||||||
|
FilePath,
|
||||||
|
)
|
||||||
|
from twisted.internet.error import ProcessExitedAlready
|
||||||
|
|
||||||
|
from allmydata.test.common import (
|
||||||
|
write_introducer,
|
||||||
|
)
|
||||||
|
|
||||||
|
if which("docker") is None:
|
||||||
|
pytest.skip('Skipping I2P tests since Docker is unavailable', allow_module_level=True)
|
||||||
|
# Docker on Windows machines sometimes expects Windows-y Docker images, so just
|
||||||
|
# don't bother.
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
pytest.skip('Skipping I2P tests on Windows', allow_module_level=True)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def i2p_network(reactor, temp_dir, request):
|
||||||
|
"""Fixture to start up local i2pd."""
|
||||||
|
proto = util._MagicTextProtocol("ephemeral keys")
|
||||||
|
reactor.spawnProcess(
|
||||||
|
proto,
|
||||||
|
which("docker"),
|
||||||
|
(
|
||||||
|
"docker", "run", "-p", "7656:7656", "purplei2p/i2pd",
|
||||||
|
# Bad URL for reseeds, so it can't talk to other routers.
|
||||||
|
"--reseed.urls", "http://localhost:1/",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def cleanup():
|
||||||
|
try:
|
||||||
|
proto.transport.signalProcess("KILL")
|
||||||
|
util.block_with_timeout(proto.exited, reactor)
|
||||||
|
except ProcessExitedAlready:
|
||||||
|
pass
|
||||||
|
request.addfinalizer(cleanup)
|
||||||
|
|
||||||
|
util.block_with_timeout(proto.magic_seen, reactor, timeout=30)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
@log_call(
|
||||||
|
action_type=u"integration:i2p:introducer",
|
||||||
|
include_args=["temp_dir", "flog_gatherer"],
|
||||||
|
include_result=False,
|
||||||
|
)
|
||||||
|
def i2p_introducer(reactor, temp_dir, flog_gatherer, request):
|
||||||
|
config = '''
|
||||||
|
[node]
|
||||||
|
nickname = introducer_i2p
|
||||||
|
web.port = 4561
|
||||||
|
log_gatherer.furl = {log_furl}
|
||||||
|
'''.format(log_furl=flog_gatherer)
|
||||||
|
|
||||||
|
intro_dir = join(temp_dir, 'introducer_i2p')
|
||||||
|
print("making introducer", intro_dir)
|
||||||
|
|
||||||
|
if not exists(intro_dir):
|
||||||
|
mkdir(intro_dir)
|
||||||
|
done_proto = util._ProcessExitedProtocol()
|
||||||
|
util._tahoe_runner_optional_coverage(
|
||||||
|
done_proto,
|
||||||
|
reactor,
|
||||||
|
request,
|
||||||
|
(
|
||||||
|
'create-introducer',
|
||||||
|
'--listen=i2p',
|
||||||
|
intro_dir,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
pytest_twisted.blockon(done_proto.done)
|
||||||
|
|
||||||
|
# over-write the config file with our stuff
|
||||||
|
with open(join(intro_dir, 'tahoe.cfg'), 'w') as f:
|
||||||
|
f.write(config)
|
||||||
|
|
||||||
|
# "tahoe run" is consistent across Linux/macOS/Windows, unlike the old
|
||||||
|
# "start" command.
|
||||||
|
protocol = util._MagicTextProtocol('introducer running')
|
||||||
|
transport = util._tahoe_runner_optional_coverage(
|
||||||
|
protocol,
|
||||||
|
reactor,
|
||||||
|
request,
|
||||||
|
(
|
||||||
|
'run',
|
||||||
|
intro_dir,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
def cleanup():
|
||||||
|
try:
|
||||||
|
transport.signalProcess('TERM')
|
||||||
|
util.block_with_timeout(protocol.exited, reactor)
|
||||||
|
except ProcessExitedAlready:
|
||||||
|
pass
|
||||||
|
request.addfinalizer(cleanup)
|
||||||
|
|
||||||
|
pytest_twisted.blockon(protocol.magic_seen)
|
||||||
|
return transport
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def i2p_introducer_furl(i2p_introducer, temp_dir):
|
||||||
|
furl_fname = join(temp_dir, 'introducer_i2p', 'private', 'introducer.furl')
|
||||||
|
while not exists(furl_fname):
|
||||||
|
print("Don't see {} yet".format(furl_fname))
|
||||||
|
sleep(.1)
|
||||||
|
furl = open(furl_fname, 'r').read()
|
||||||
|
return furl
|
||||||
|
|
||||||
|
|
||||||
|
@pytest_twisted.inlineCallbacks
|
||||||
|
def test_i2p_service_storage(reactor, request, temp_dir, flog_gatherer, i2p_network, i2p_introducer_furl):
|
||||||
|
yield _create_anonymous_node(reactor, 'carol_i2p', 8008, request, temp_dir, flog_gatherer, i2p_network, i2p_introducer_furl)
|
||||||
|
yield _create_anonymous_node(reactor, 'dave_i2p', 8009, request, temp_dir, flog_gatherer, i2p_network, i2p_introducer_furl)
|
||||||
|
# ensure both nodes are connected to "a grid" by uploading
|
||||||
|
# something via carol, and retrieve it using dave.
|
||||||
|
gold_path = join(temp_dir, "gold")
|
||||||
|
with open(gold_path, "w") as f:
|
||||||
|
f.write(
|
||||||
|
"The object-capability model is a computer security model. A "
|
||||||
|
"capability describes a transferable right to perform one (or "
|
||||||
|
"more) operations on a given object."
|
||||||
|
)
|
||||||
|
# XXX could use treq or similar to POST these to their respective
|
||||||
|
# WUIs instead ...
|
||||||
|
|
||||||
|
proto = util._CollectOutputProtocol()
|
||||||
|
reactor.spawnProcess(
|
||||||
|
proto,
|
||||||
|
sys.executable,
|
||||||
|
(
|
||||||
|
sys.executable, '-b', '-m', 'allmydata.scripts.runner',
|
||||||
|
'-d', join(temp_dir, 'carol_i2p'),
|
||||||
|
'put', gold_path,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
yield proto.done
|
||||||
|
cap = proto.output.getvalue().strip().split()[-1]
|
||||||
|
print("TEH CAP!", cap)
|
||||||
|
|
||||||
|
proto = util._CollectOutputProtocol(capture_stderr=False)
|
||||||
|
reactor.spawnProcess(
|
||||||
|
proto,
|
||||||
|
sys.executable,
|
||||||
|
(
|
||||||
|
sys.executable, '-b', '-m', 'allmydata.scripts.runner',
|
||||||
|
'-d', join(temp_dir, 'dave_i2p'),
|
||||||
|
'get', cap,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
yield proto.done
|
||||||
|
|
||||||
|
dave_got = proto.output.getvalue().strip()
|
||||||
|
assert dave_got == open(gold_path, 'rb').read().strip()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest_twisted.inlineCallbacks
|
||||||
|
def _create_anonymous_node(reactor, name, control_port, request, temp_dir, flog_gatherer, i2p_network, introducer_furl):
|
||||||
|
node_dir = FilePath(temp_dir).child(name)
|
||||||
|
web_port = "tcp:{}:interface=localhost".format(control_port + 2000)
|
||||||
|
|
||||||
|
print("creating", node_dir.path)
|
||||||
|
node_dir.makedirs()
|
||||||
|
proto = util._DumpOutputProtocol(None)
|
||||||
|
reactor.spawnProcess(
|
||||||
|
proto,
|
||||||
|
sys.executable,
|
||||||
|
(
|
||||||
|
sys.executable, '-b', '-m', 'allmydata.scripts.runner',
|
||||||
|
'create-node',
|
||||||
|
'--nickname', name,
|
||||||
|
'--introducer', introducer_furl,
|
||||||
|
'--hide-ip',
|
||||||
|
'--listen', 'i2p',
|
||||||
|
node_dir.path,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
yield proto.done
|
||||||
|
|
||||||
|
|
||||||
|
# Which services should this client connect to?
|
||||||
|
write_introducer(node_dir, "default", introducer_furl)
|
||||||
|
with node_dir.child('tahoe.cfg').open('w') as f:
|
||||||
|
node_config = '''
|
||||||
|
[node]
|
||||||
|
nickname = %(name)s
|
||||||
|
web.port = %(web_port)s
|
||||||
|
web.static = public_html
|
||||||
|
log_gatherer.furl = %(log_furl)s
|
||||||
|
|
||||||
|
[i2p]
|
||||||
|
enabled = true
|
||||||
|
|
||||||
|
[client]
|
||||||
|
shares.needed = 1
|
||||||
|
shares.happy = 1
|
||||||
|
shares.total = 2
|
||||||
|
|
||||||
|
''' % {
|
||||||
|
'name': name,
|
||||||
|
'web_port': web_port,
|
||||||
|
'log_furl': flog_gatherer,
|
||||||
|
}
|
||||||
|
node_config = node_config.encode("utf-8")
|
||||||
|
f.write(node_config)
|
||||||
|
|
||||||
|
print("running")
|
||||||
|
yield util._run_node(reactor, node_dir.path, request, None)
|
||||||
|
print("okay, launched")
|
@ -1,9 +1,21 @@
|
|||||||
|
"""
|
||||||
|
Ported to Python 3.
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
from os.path import join
|
from os.path import join
|
||||||
|
|
||||||
from twisted.internet.error import ProcessTerminated
|
from twisted.internet.error import ProcessTerminated
|
||||||
|
|
||||||
import util
|
from . import util
|
||||||
|
|
||||||
import pytest_twisted
|
import pytest_twisted
|
||||||
|
|
||||||
@ -30,7 +42,7 @@ def test_upload_immutable(reactor, temp_dir, introducer_furl, flog_gatherer, sto
|
|||||||
proto,
|
proto,
|
||||||
sys.executable,
|
sys.executable,
|
||||||
[
|
[
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
sys.executable, '-b', '-m', 'allmydata.scripts.runner',
|
||||||
'-d', node_dir,
|
'-d', node_dir,
|
||||||
'put', __file__,
|
'put', __file__,
|
||||||
]
|
]
|
||||||
@ -42,4 +54,4 @@ def test_upload_immutable(reactor, temp_dir, introducer_furl, flog_gatherer, sto
|
|||||||
assert isinstance(e, ProcessTerminated)
|
assert isinstance(e, ProcessTerminated)
|
||||||
|
|
||||||
output = proto.output.getvalue()
|
output = proto.output.getvalue()
|
||||||
assert "shares could be placed on only" in output
|
assert b"shares could be placed on only" in output
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
|
"""
|
||||||
|
Ported to Python 3.
|
||||||
|
"""
|
||||||
from __future__ import (
|
from __future__ import (
|
||||||
print_function,
|
print_function,
|
||||||
unicode_literals,
|
unicode_literals,
|
||||||
@ -5,12 +8,18 @@ from __future__ import (
|
|||||||
division,
|
division,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
|
from six import ensure_text
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from os.path import (
|
from os.path import (
|
||||||
join,
|
join,
|
||||||
)
|
)
|
||||||
from urlparse import (
|
from urllib.parse import (
|
||||||
urlsplit,
|
urlsplit,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -68,7 +77,7 @@ def _connect_client(reactor, api_auth_token, ws_url):
|
|||||||
factory = WebSocketClientFactory(
|
factory = WebSocketClientFactory(
|
||||||
url=ws_url,
|
url=ws_url,
|
||||||
headers={
|
headers={
|
||||||
"Authorization": "{} {}".format(SCHEME, api_auth_token),
|
"Authorization": "{} {}".format(str(SCHEME, "ascii"), api_auth_token),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
factory.protocol = _StreamingLogClientProtocol
|
factory.protocol = _StreamingLogClientProtocol
|
||||||
@ -127,7 +136,7 @@ def _test_streaming_logs(reactor, temp_dir, alice):
|
|||||||
node_url = cfg.get_config_from_file("node.url")
|
node_url = cfg.get_config_from_file("node.url")
|
||||||
api_auth_token = cfg.get_private_config("api_auth_token")
|
api_auth_token = cfg.get_private_config("api_auth_token")
|
||||||
|
|
||||||
ws_url = node_url.replace("http://", "ws://")
|
ws_url = ensure_text(node_url).replace("http://", "ws://")
|
||||||
log_url = ws_url + "private/logs/v1"
|
log_url = ws_url + "private/logs/v1"
|
||||||
|
|
||||||
print("Connecting to {}".format(log_url))
|
print("Connecting to {}".format(log_url))
|
||||||
|
@ -1,12 +1,22 @@
|
|||||||
|
"""
|
||||||
|
Ported to Python 3.
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
from os.path import join
|
from os.path import join
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import pytest_twisted
|
import pytest_twisted
|
||||||
|
|
||||||
import util
|
from . import util
|
||||||
|
|
||||||
from twisted.python.filepath import (
|
from twisted.python.filepath import (
|
||||||
FilePath,
|
FilePath,
|
||||||
@ -46,7 +56,7 @@ def test_onion_service_storage(reactor, request, temp_dir, flog_gatherer, tor_ne
|
|||||||
proto,
|
proto,
|
||||||
sys.executable,
|
sys.executable,
|
||||||
(
|
(
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
sys.executable, '-b', '-m', 'allmydata.scripts.runner',
|
||||||
'-d', join(temp_dir, 'carol'),
|
'-d', join(temp_dir, 'carol'),
|
||||||
'put', gold_path,
|
'put', gold_path,
|
||||||
)
|
)
|
||||||
@ -55,12 +65,12 @@ def test_onion_service_storage(reactor, request, temp_dir, flog_gatherer, tor_ne
|
|||||||
cap = proto.output.getvalue().strip().split()[-1]
|
cap = proto.output.getvalue().strip().split()[-1]
|
||||||
print("TEH CAP!", cap)
|
print("TEH CAP!", cap)
|
||||||
|
|
||||||
proto = util._CollectOutputProtocol()
|
proto = util._CollectOutputProtocol(capture_stderr=False)
|
||||||
reactor.spawnProcess(
|
reactor.spawnProcess(
|
||||||
proto,
|
proto,
|
||||||
sys.executable,
|
sys.executable,
|
||||||
(
|
(
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
sys.executable, '-b', '-m', 'allmydata.scripts.runner',
|
||||||
'-d', join(temp_dir, 'dave'),
|
'-d', join(temp_dir, 'dave'),
|
||||||
'get', cap,
|
'get', cap,
|
||||||
)
|
)
|
||||||
@ -68,7 +78,7 @@ def test_onion_service_storage(reactor, request, temp_dir, flog_gatherer, tor_ne
|
|||||||
yield proto.done
|
yield proto.done
|
||||||
|
|
||||||
dave_got = proto.output.getvalue().strip()
|
dave_got = proto.output.getvalue().strip()
|
||||||
assert dave_got == open(gold_path, 'r').read().strip()
|
assert dave_got == open(gold_path, 'rb').read().strip()
|
||||||
|
|
||||||
|
|
||||||
@pytest_twisted.inlineCallbacks
|
@pytest_twisted.inlineCallbacks
|
||||||
@ -84,7 +94,7 @@ def _create_anonymous_node(reactor, name, control_port, request, temp_dir, flog_
|
|||||||
proto,
|
proto,
|
||||||
sys.executable,
|
sys.executable,
|
||||||
(
|
(
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
sys.executable, '-b', '-m', 'allmydata.scripts.runner',
|
||||||
'create-node',
|
'create-node',
|
||||||
'--nickname', name,
|
'--nickname', name,
|
||||||
'--introducer', introducer_furl,
|
'--introducer', introducer_furl,
|
||||||
@ -100,7 +110,7 @@ def _create_anonymous_node(reactor, name, control_port, request, temp_dir, flog_
|
|||||||
# Which services should this client connect to?
|
# Which services should this client connect to?
|
||||||
write_introducer(node_dir, "default", introducer_furl)
|
write_introducer(node_dir, "default", introducer_furl)
|
||||||
with node_dir.child('tahoe.cfg').open('w') as f:
|
with node_dir.child('tahoe.cfg').open('w') as f:
|
||||||
f.write('''
|
node_config = '''
|
||||||
[node]
|
[node]
|
||||||
nickname = %(name)s
|
nickname = %(name)s
|
||||||
web.port = %(web_port)s
|
web.port = %(web_port)s
|
||||||
@ -125,7 +135,9 @@ shares.total = 2
|
|||||||
'log_furl': flog_gatherer,
|
'log_furl': flog_gatherer,
|
||||||
'control_port': control_port,
|
'control_port': control_port,
|
||||||
'local_port': control_port + 1000,
|
'local_port': control_port + 1000,
|
||||||
})
|
}
|
||||||
|
node_config = node_config.encode("utf-8")
|
||||||
|
f.write(node_config)
|
||||||
|
|
||||||
print("running")
|
print("running")
|
||||||
yield util._run_node(reactor, node_dir.path, request, None)
|
yield util._run_node(reactor, node_dir.path, request, None)
|
||||||
|
@ -7,15 +7,26 @@ Most of the tests have cursory asserts and encode 'what the WebAPI did
|
|||||||
at the time of testing' -- not necessarily a cohesive idea of what the
|
at the time of testing' -- not necessarily a cohesive idea of what the
|
||||||
WebAPI *should* do in every situation. It's not clear the latter
|
WebAPI *should* do in every situation. It's not clear the latter
|
||||||
exists anywhere, however.
|
exists anywhere, however.
|
||||||
|
|
||||||
|
Ported to Python 3.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import json
|
from urllib.parse import unquote as url_unquote, quote as url_quote
|
||||||
import urllib2
|
|
||||||
|
|
||||||
import allmydata.uri
|
import allmydata.uri
|
||||||
|
from allmydata.util import jsonbytes as json
|
||||||
|
|
||||||
import util
|
from . import util
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
import html5lib
|
import html5lib
|
||||||
@ -64,7 +75,7 @@ def test_upload_download(alice):
|
|||||||
u"filename": u"boom",
|
u"filename": u"boom",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
assert data == FILE_CONTENTS
|
assert str(data, "utf-8") == FILE_CONTENTS
|
||||||
|
|
||||||
|
|
||||||
def test_put(alice):
|
def test_put(alice):
|
||||||
@ -95,7 +106,7 @@ def test_helper_status(storage_nodes):
|
|||||||
resp = requests.get(url)
|
resp = requests.get(url)
|
||||||
assert resp.status_code >= 200 and resp.status_code < 300
|
assert resp.status_code >= 200 and resp.status_code < 300
|
||||||
dom = BeautifulSoup(resp.content, "html5lib")
|
dom = BeautifulSoup(resp.content, "html5lib")
|
||||||
assert unicode(dom.h1.string) == u"Helper Status"
|
assert str(dom.h1.string) == u"Helper Status"
|
||||||
|
|
||||||
|
|
||||||
def test_deep_stats(alice):
|
def test_deep_stats(alice):
|
||||||
@ -115,10 +126,10 @@ def test_deep_stats(alice):
|
|||||||
|
|
||||||
# when creating a directory, we'll be re-directed to a URL
|
# when creating a directory, we'll be re-directed to a URL
|
||||||
# containing our writecap..
|
# containing our writecap..
|
||||||
uri = urllib2.unquote(resp.url)
|
uri = url_unquote(resp.url)
|
||||||
assert 'URI:DIR2:' in uri
|
assert 'URI:DIR2:' in uri
|
||||||
dircap = uri[uri.find("URI:DIR2:"):].rstrip('/')
|
dircap = uri[uri.find("URI:DIR2:"):].rstrip('/')
|
||||||
dircap_uri = util.node_url(alice.node_dir, "uri/{}".format(urllib2.quote(dircap)))
|
dircap_uri = util.node_url(alice.node_dir, "uri/{}".format(url_quote(dircap)))
|
||||||
|
|
||||||
# POST a file into this directory
|
# POST a file into this directory
|
||||||
FILE_CONTENTS = u"a file in a directory"
|
FILE_CONTENTS = u"a file in a directory"
|
||||||
@ -145,7 +156,7 @@ def test_deep_stats(alice):
|
|||||||
k, data = d
|
k, data = d
|
||||||
assert k == u"dirnode"
|
assert k == u"dirnode"
|
||||||
assert len(data['children']) == 1
|
assert len(data['children']) == 1
|
||||||
k, child = data['children'].values()[0]
|
k, child = list(data['children'].values())[0]
|
||||||
assert k == u"filenode"
|
assert k == u"filenode"
|
||||||
assert child['size'] == len(FILE_CONTENTS)
|
assert child['size'] == len(FILE_CONTENTS)
|
||||||
|
|
||||||
@ -196,11 +207,11 @@ def test_status(alice):
|
|||||||
|
|
||||||
print("Uploaded data, cap={}".format(cap))
|
print("Uploaded data, cap={}".format(cap))
|
||||||
resp = requests.get(
|
resp = requests.get(
|
||||||
util.node_url(alice.node_dir, u"uri/{}".format(urllib2.quote(cap))),
|
util.node_url(alice.node_dir, u"uri/{}".format(url_quote(cap))),
|
||||||
)
|
)
|
||||||
|
|
||||||
print("Downloaded {} bytes of data".format(len(resp.content)))
|
print("Downloaded {} bytes of data".format(len(resp.content)))
|
||||||
assert resp.content == FILE_CONTENTS
|
assert str(resp.content, "ascii") == FILE_CONTENTS
|
||||||
|
|
||||||
resp = requests.get(
|
resp = requests.get(
|
||||||
util.node_url(alice.node_dir, "status"),
|
util.node_url(alice.node_dir, "status"),
|
||||||
@ -219,12 +230,12 @@ def test_status(alice):
|
|||||||
continue
|
continue
|
||||||
resp = requests.get(util.node_url(alice.node_dir, href))
|
resp = requests.get(util.node_url(alice.node_dir, href))
|
||||||
if href.startswith(u"/status/up"):
|
if href.startswith(u"/status/up"):
|
||||||
assert "File Upload Status" in resp.content
|
assert b"File Upload Status" in resp.content
|
||||||
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
|
if b"Total Size: %d" % (len(FILE_CONTENTS),) in resp.content:
|
||||||
found_upload = True
|
found_upload = True
|
||||||
elif href.startswith(u"/status/down"):
|
elif href.startswith(u"/status/down"):
|
||||||
assert "File Download Status" in resp.content
|
assert b"File Download Status" in resp.content
|
||||||
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
|
if b"Total Size: %d" % (len(FILE_CONTENTS),) in resp.content:
|
||||||
found_download = True
|
found_download = True
|
||||||
|
|
||||||
# download the specialized event information
|
# download the specialized event information
|
||||||
@ -297,7 +308,7 @@ def test_directory_deep_check(alice):
|
|||||||
print("Uploaded data1, cap={}".format(cap1))
|
print("Uploaded data1, cap={}".format(cap1))
|
||||||
|
|
||||||
resp = requests.get(
|
resp = requests.get(
|
||||||
util.node_url(alice.node_dir, u"uri/{}".format(urllib2.quote(cap0))),
|
util.node_url(alice.node_dir, u"uri/{}".format(url_quote(cap0))),
|
||||||
params={u"t": u"info"},
|
params={u"t": u"info"},
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -398,9 +409,9 @@ def test_directory_deep_check(alice):
|
|||||||
for _ in range(5):
|
for _ in range(5):
|
||||||
resp = requests.get(deepcheck_uri)
|
resp = requests.get(deepcheck_uri)
|
||||||
dom = BeautifulSoup(resp.content, "html5lib")
|
dom = BeautifulSoup(resp.content, "html5lib")
|
||||||
if dom.h1 and u'Results' in unicode(dom.h1.string):
|
if dom.h1 and u'Results' in str(dom.h1.string):
|
||||||
break
|
break
|
||||||
if dom.h2 and dom.h2.a and u"Reload" in unicode(dom.h2.a.string):
|
if dom.h2 and dom.h2.a and u"Reload" in str(dom.h2.a.string):
|
||||||
dom = None
|
dom = None
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
assert dom is not None, "Operation never completed"
|
assert dom is not None, "Operation never completed"
|
||||||
@ -438,7 +449,7 @@ def test_introducer_info(introducer):
|
|||||||
resp = requests.get(
|
resp = requests.get(
|
||||||
util.node_url(introducer.node_dir, u""),
|
util.node_url(introducer.node_dir, u""),
|
||||||
)
|
)
|
||||||
assert "Introducer" in resp.content
|
assert b"Introducer" in resp.content
|
||||||
|
|
||||||
resp = requests.get(
|
resp = requests.get(
|
||||||
util.node_url(introducer.node_dir, u""),
|
util.node_url(introducer.node_dir, u""),
|
||||||
@ -511,6 +522,6 @@ def test_mkdir_with_children(alice):
|
|||||||
params={u"t": "mkdir-with-children"},
|
params={u"t": "mkdir-with-children"},
|
||||||
data=json.dumps(meta),
|
data=json.dumps(meta),
|
||||||
)
|
)
|
||||||
assert resp.startswith("URI:DIR2")
|
assert resp.startswith(b"URI:DIR2")
|
||||||
cap = allmydata.uri.from_string(resp)
|
cap = allmydata.uri.from_string(resp)
|
||||||
assert isinstance(cap, allmydata.uri.DirectoryURI)
|
assert isinstance(cap, allmydata.uri.DirectoryURI)
|
||||||
|
@ -1,9 +1,21 @@
|
|||||||
|
"""
|
||||||
|
Ported to Python 3.
|
||||||
|
"""
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import json
|
import json
|
||||||
from os import mkdir, environ
|
from os import mkdir, environ
|
||||||
from os.path import exists, join
|
from os.path import exists, join
|
||||||
from six.moves import StringIO
|
from io import StringIO, BytesIO
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from subprocess import check_output
|
from subprocess import check_output
|
||||||
|
|
||||||
@ -55,9 +67,10 @@ class _CollectOutputProtocol(ProcessProtocol):
|
|||||||
self.output, and callback's on done with all of it after the
|
self.output, and callback's on done with all of it after the
|
||||||
process exits (for any reason).
|
process exits (for any reason).
|
||||||
"""
|
"""
|
||||||
def __init__(self):
|
def __init__(self, capture_stderr=True):
|
||||||
self.done = Deferred()
|
self.done = Deferred()
|
||||||
self.output = StringIO()
|
self.output = BytesIO()
|
||||||
|
self.capture_stderr = capture_stderr
|
||||||
|
|
||||||
def processEnded(self, reason):
|
def processEnded(self, reason):
|
||||||
if not self.done.called:
|
if not self.done.called:
|
||||||
@ -71,8 +84,9 @@ class _CollectOutputProtocol(ProcessProtocol):
|
|||||||
self.output.write(data)
|
self.output.write(data)
|
||||||
|
|
||||||
def errReceived(self, data):
|
def errReceived(self, data):
|
||||||
print("ERR: {}".format(data))
|
print("ERR: {!r}".format(data))
|
||||||
self.output.write(data)
|
if self.capture_stderr:
|
||||||
|
self.output.write(data)
|
||||||
|
|
||||||
|
|
||||||
class _DumpOutputProtocol(ProcessProtocol):
|
class _DumpOutputProtocol(ProcessProtocol):
|
||||||
@ -92,9 +106,11 @@ class _DumpOutputProtocol(ProcessProtocol):
|
|||||||
self.done.errback(reason)
|
self.done.errback(reason)
|
||||||
|
|
||||||
def outReceived(self, data):
|
def outReceived(self, data):
|
||||||
|
data = str(data, sys.stdout.encoding)
|
||||||
self._out.write(data)
|
self._out.write(data)
|
||||||
|
|
||||||
def errReceived(self, data):
|
def errReceived(self, data):
|
||||||
|
data = str(data, sys.stdout.encoding)
|
||||||
self._out.write(data)
|
self._out.write(data)
|
||||||
|
|
||||||
|
|
||||||
@ -114,6 +130,7 @@ class _MagicTextProtocol(ProcessProtocol):
|
|||||||
self.exited.callback(None)
|
self.exited.callback(None)
|
||||||
|
|
||||||
def outReceived(self, data):
|
def outReceived(self, data):
|
||||||
|
data = str(data, sys.stdout.encoding)
|
||||||
sys.stdout.write(data)
|
sys.stdout.write(data)
|
||||||
self._output.write(data)
|
self._output.write(data)
|
||||||
if not self.magic_seen.called and self._magic_text in self._output.getvalue():
|
if not self.magic_seen.called and self._magic_text in self._output.getvalue():
|
||||||
@ -121,6 +138,7 @@ class _MagicTextProtocol(ProcessProtocol):
|
|||||||
self.magic_seen.callback(self)
|
self.magic_seen.callback(self)
|
||||||
|
|
||||||
def errReceived(self, data):
|
def errReceived(self, data):
|
||||||
|
data = str(data, sys.stderr.encoding)
|
||||||
sys.stdout.write(data)
|
sys.stdout.write(data)
|
||||||
|
|
||||||
|
|
||||||
@ -152,9 +170,9 @@ def _tahoe_runner_optional_coverage(proto, reactor, request, other_args):
|
|||||||
`--coverage` option if the `request` indicates we should.
|
`--coverage` option if the `request` indicates we should.
|
||||||
"""
|
"""
|
||||||
if request.config.getoption('coverage'):
|
if request.config.getoption('coverage'):
|
||||||
args = [sys.executable, '-m', 'coverage', 'run', '-m', 'allmydata.scripts.runner', '--coverage']
|
args = [sys.executable, '-b', '-m', 'coverage', 'run', '-m', 'allmydata.scripts.runner', '--coverage']
|
||||||
else:
|
else:
|
||||||
args = [sys.executable, '-m', 'allmydata.scripts.runner']
|
args = [sys.executable, '-b', '-m', 'allmydata.scripts.runner']
|
||||||
args += other_args
|
args += other_args
|
||||||
return reactor.spawnProcess(
|
return reactor.spawnProcess(
|
||||||
proto,
|
proto,
|
||||||
@ -261,9 +279,9 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
|||||||
'--hostname', 'localhost',
|
'--hostname', 'localhost',
|
||||||
'--listen', 'tcp',
|
'--listen', 'tcp',
|
||||||
'--webport', web_port,
|
'--webport', web_port,
|
||||||
'--shares-needed', unicode(needed),
|
'--shares-needed', str(needed),
|
||||||
'--shares-happy', unicode(happy),
|
'--shares-happy', str(happy),
|
||||||
'--shares-total', unicode(total),
|
'--shares-total', str(total),
|
||||||
'--helper',
|
'--helper',
|
||||||
]
|
]
|
||||||
if not storage:
|
if not storage:
|
||||||
@ -280,7 +298,7 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
|||||||
config,
|
config,
|
||||||
u'node',
|
u'node',
|
||||||
u'log_gatherer.furl',
|
u'log_gatherer.furl',
|
||||||
flog_gatherer.decode("utf-8"),
|
flog_gatherer,
|
||||||
)
|
)
|
||||||
write_config(FilePath(config_path), config)
|
write_config(FilePath(config_path), config)
|
||||||
created_d.addCallback(created)
|
created_d.addCallback(created)
|
||||||
@ -526,7 +544,8 @@ def generate_ssh_key(path):
|
|||||||
key = RSAKey.generate(2048)
|
key = RSAKey.generate(2048)
|
||||||
key.write_private_key_file(path)
|
key.write_private_key_file(path)
|
||||||
with open(path + ".pub", "wb") as f:
|
with open(path + ".pub", "wb") as f:
|
||||||
f.write(b"%s %s" % (key.get_name(), key.get_base64()))
|
s = "%s %s" % (key.get_name(), key.get_base64())
|
||||||
|
f.write(s.encode("ascii"))
|
||||||
|
|
||||||
|
|
||||||
def run_in_thread(f):
|
def run_in_thread(f):
|
||||||
|
@ -1,13 +1,18 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
|
|
||||||
# ./check-debugging.py src
|
"""
|
||||||
|
Checks for defer.setDebugging().
|
||||||
|
|
||||||
|
Runs on Python 3.
|
||||||
|
|
||||||
|
Usage: ./check-debugging.py src
|
||||||
|
"""
|
||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys, re, os
|
import sys, re, os
|
||||||
|
|
||||||
ok = True
|
ok = True
|
||||||
umids = {}
|
|
||||||
|
|
||||||
for starting_point in sys.argv[1:]:
|
for starting_point in sys.argv[1:]:
|
||||||
for root, dirs, files in os.walk(starting_point):
|
for root, dirs, files in os.walk(starting_point):
|
||||||
|
@ -1,186 +0,0 @@
|
|||||||
#! /usr/bin/python
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import os, sys, compiler
|
|
||||||
from compiler.ast import Node, For, While, ListComp, AssName, Name, Lambda, Function
|
|
||||||
|
|
||||||
|
|
||||||
def check_source(source):
|
|
||||||
return check_thing(compiler.parse, source)
|
|
||||||
|
|
||||||
def check_file(path):
|
|
||||||
return check_thing(compiler.parseFile, path)
|
|
||||||
|
|
||||||
def check_thing(parser, thing):
|
|
||||||
try:
|
|
||||||
ast = parser(thing)
|
|
||||||
except SyntaxError as e:
|
|
||||||
return e
|
|
||||||
else:
|
|
||||||
results = []
|
|
||||||
check_ast(ast, results)
|
|
||||||
return results
|
|
||||||
|
|
||||||
def check_ast(ast, results):
|
|
||||||
"""Check a node outside a loop."""
|
|
||||||
if isinstance(ast, (For, While, ListComp)):
|
|
||||||
check_loop(ast, results)
|
|
||||||
else:
|
|
||||||
for child in ast.getChildNodes():
|
|
||||||
if isinstance(ast, Node):
|
|
||||||
check_ast(child, results)
|
|
||||||
|
|
||||||
def check_loop(ast, results):
|
|
||||||
"""Check a particular outer loop."""
|
|
||||||
|
|
||||||
# List comprehensions have a poorly designed AST of the form
|
|
||||||
# ListComp(exprNode, [ListCompFor(...), ...]), in which the
|
|
||||||
# result expression is outside the ListCompFor node even though
|
|
||||||
# it is logically inside the loop(s).
|
|
||||||
# There may be multiple ListCompFor nodes (in cases such as
|
|
||||||
# [lambda: (a,b) for a in ... for b in ...]
|
|
||||||
# ), and that case they are not nested in the AST. But these
|
|
||||||
# warts (nonobviously) happen not to matter for our analysis.
|
|
||||||
|
|
||||||
assigned = {} # maps name to lineno of topmost assignment
|
|
||||||
nested = set()
|
|
||||||
collect_assigned_and_nested(ast, assigned, nested)
|
|
||||||
|
|
||||||
# For each nested function...
|
|
||||||
for funcnode in nested:
|
|
||||||
# Check for captured variables in this function.
|
|
||||||
captured = set()
|
|
||||||
collect_captured(funcnode, assigned, captured, False)
|
|
||||||
for name in captured:
|
|
||||||
# We want to report the outermost capturing function
|
|
||||||
# (since that is where the workaround will need to be
|
|
||||||
# added), and the topmost assignment to the variable.
|
|
||||||
# Just one report per capturing function per variable
|
|
||||||
# will do.
|
|
||||||
results.append(make_result(funcnode, name, assigned[name]))
|
|
||||||
|
|
||||||
# Check each node in the function body in case it
|
|
||||||
# contains another 'for' loop.
|
|
||||||
childnodes = funcnode.getChildNodes()[len(funcnode.defaults):]
|
|
||||||
for child in childnodes:
|
|
||||||
check_ast(child, results)
|
|
||||||
|
|
||||||
def collect_assigned_and_nested(ast, assigned, nested):
|
|
||||||
"""
|
|
||||||
Collect the names assigned in this loop, not including names
|
|
||||||
assigned in nested functions. Also collect the nodes of functions
|
|
||||||
that are nested one level deep.
|
|
||||||
"""
|
|
||||||
if isinstance(ast, AssName):
|
|
||||||
if ast.name not in assigned or assigned[ast.name] > ast.lineno:
|
|
||||||
assigned[ast.name] = ast.lineno
|
|
||||||
else:
|
|
||||||
childnodes = ast.getChildNodes()
|
|
||||||
if isinstance(ast, (Lambda, Function)):
|
|
||||||
nested.add(ast)
|
|
||||||
|
|
||||||
# The default argument expressions are "outside" the
|
|
||||||
# function, even though they are children of the
|
|
||||||
# Lambda or Function node.
|
|
||||||
childnodes = childnodes[:len(ast.defaults)]
|
|
||||||
|
|
||||||
for child in childnodes:
|
|
||||||
if isinstance(ast, Node):
|
|
||||||
collect_assigned_and_nested(child, assigned, nested)
|
|
||||||
|
|
||||||
def collect_captured(ast, assigned, captured, in_function_yet):
|
|
||||||
"""Collect any captured variables that are also in assigned."""
|
|
||||||
if isinstance(ast, Name):
|
|
||||||
if ast.name in assigned:
|
|
||||||
captured.add(ast.name)
|
|
||||||
else:
|
|
||||||
childnodes = ast.getChildNodes()
|
|
||||||
if isinstance(ast, (Lambda, Function)):
|
|
||||||
# Formal parameters of the function are excluded from
|
|
||||||
# captures we care about in subnodes of the function body.
|
|
||||||
new_assigned = assigned.copy()
|
|
||||||
remove_argnames(ast.argnames, new_assigned)
|
|
||||||
|
|
||||||
if len(new_assigned) > 0:
|
|
||||||
for child in childnodes[len(ast.defaults):]:
|
|
||||||
collect_captured(child, new_assigned, captured, True)
|
|
||||||
|
|
||||||
# The default argument expressions are "outside" *this*
|
|
||||||
# function, even though they are children of the Lambda or
|
|
||||||
# Function node.
|
|
||||||
if not in_function_yet:
|
|
||||||
return
|
|
||||||
childnodes = childnodes[:len(ast.defaults)]
|
|
||||||
|
|
||||||
for child in childnodes:
|
|
||||||
if isinstance(ast, Node):
|
|
||||||
collect_captured(child, assigned, captured, True)
|
|
||||||
|
|
||||||
|
|
||||||
def remove_argnames(names, fromset):
|
|
||||||
for element in names:
|
|
||||||
if element in fromset:
|
|
||||||
del fromset[element]
|
|
||||||
elif isinstance(element, (tuple, list)):
|
|
||||||
remove_argnames(element, fromset)
|
|
||||||
|
|
||||||
|
|
||||||
def make_result(funcnode, var_name, var_lineno):
|
|
||||||
if hasattr(funcnode, 'name'):
|
|
||||||
func_name = 'function %r' % (funcnode.name,)
|
|
||||||
else:
|
|
||||||
func_name = '<lambda>'
|
|
||||||
return (funcnode.lineno, func_name, var_name, var_lineno)
|
|
||||||
|
|
||||||
def report(out, path, results):
|
|
||||||
for r in results:
|
|
||||||
print(path + (":%r %s captures %r assigned at line %d" % r), file=out)
|
|
||||||
|
|
||||||
def check(sources, out):
|
|
||||||
class Counts(object):
|
|
||||||
n = 0
|
|
||||||
processed_files = 0
|
|
||||||
suspect_files = 0
|
|
||||||
error_files = 0
|
|
||||||
counts = Counts()
|
|
||||||
|
|
||||||
def _process(path):
|
|
||||||
results = check_file(path)
|
|
||||||
if isinstance(results, SyntaxError):
|
|
||||||
print(path + (" NOT ANALYSED due to syntax error: %s" % results), file=out)
|
|
||||||
counts.error_files += 1
|
|
||||||
else:
|
|
||||||
report(out, path, results)
|
|
||||||
counts.n += len(results)
|
|
||||||
counts.processed_files += 1
|
|
||||||
if len(results) > 0:
|
|
||||||
counts.suspect_files += 1
|
|
||||||
|
|
||||||
for source in sources:
|
|
||||||
print("Checking %s..." % (source,), file=out)
|
|
||||||
if os.path.isfile(source):
|
|
||||||
_process(source)
|
|
||||||
else:
|
|
||||||
for (dirpath, dirnames, filenames) in os.walk(source):
|
|
||||||
for fn in filenames:
|
|
||||||
(basename, ext) = os.path.splitext(fn)
|
|
||||||
if ext == '.py':
|
|
||||||
_process(os.path.join(dirpath, fn))
|
|
||||||
|
|
||||||
print("%d suspiciously captured variables in %d out of %d file(s)."
|
|
||||||
% (counts.n, counts.suspect_files, counts.processed_files), file=out)
|
|
||||||
if counts.error_files > 0:
|
|
||||||
print("%d file(s) not processed due to syntax errors."
|
|
||||||
% (counts.error_files,), file=out)
|
|
||||||
return counts.n
|
|
||||||
|
|
||||||
|
|
||||||
sources = ['src']
|
|
||||||
if len(sys.argv) > 1:
|
|
||||||
sources = sys.argv[1:]
|
|
||||||
if check(sources, sys.stderr) > 0:
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: self-tests
|
|
@ -1,4 +1,10 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python3
|
||||||
|
|
||||||
|
"""
|
||||||
|
Ensure UMIDS are unique.
|
||||||
|
|
||||||
|
This runs on Python 3.
|
||||||
|
"""
|
||||||
|
|
||||||
# ./check-umids.py src
|
# ./check-umids.py src
|
||||||
|
|
||||||
|
@ -52,6 +52,8 @@ system where Tahoe is installed, or in a source tree with setup.py like this:
|
|||||||
setup.py run_with_pythonpath -p -c 'misc/make-canary-files.py ARGS..'
|
setup.py run_with_pythonpath -p -c 'misc/make-canary-files.py ARGS..'
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from past.builtins import cmp
|
||||||
|
|
||||||
import os, hashlib
|
import os, hashlib
|
||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
from allmydata.immutable import upload
|
from allmydata.immutable import upload
|
||||||
|
@ -201,7 +201,9 @@ class CPUWatcher(service.MultiService, resource.Resource, Referenceable):
|
|||||||
log.msg("error reading process %s (%s), ignoring" % (pid, name))
|
log.msg("error reading process %s (%s), ignoring" % (pid, name))
|
||||||
log.err()
|
log.err()
|
||||||
try:
|
try:
|
||||||
pickle.dump(self.history, open("history.pickle.tmp", "wb"))
|
# Newer protocols won't work in Python 2; when it is dropped,
|
||||||
|
# protocol v4 can be used (added in Python 3.4).
|
||||||
|
pickle.dump(self.history, open("history.pickle.tmp", "wb"), protocol=2)
|
||||||
os.rename("history.pickle.tmp", "history.pickle")
|
os.rename("history.pickle.tmp", "history.pickle")
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
@ -18,7 +18,7 @@ def factorial(n):
|
|||||||
factorial(n) with n<0 is -factorial(abs(n))
|
factorial(n) with n<0 is -factorial(abs(n))
|
||||||
"""
|
"""
|
||||||
result = 1
|
result = 1
|
||||||
for i in xrange(1, abs(n)+1):
|
for i in range(1, abs(n)+1):
|
||||||
result *= i
|
result *= i
|
||||||
assert n >= 0
|
assert n >= 0
|
||||||
return result
|
return result
|
||||||
@ -30,7 +30,7 @@ def binomial(n, k):
|
|||||||
# calculate n!/k! as one product, avoiding factors that
|
# calculate n!/k! as one product, avoiding factors that
|
||||||
# just get canceled
|
# just get canceled
|
||||||
P = k+1
|
P = k+1
|
||||||
for i in xrange(k+2, n+1):
|
for i in range(k+2, n+1):
|
||||||
P *= i
|
P *= i
|
||||||
# if you are paranoid:
|
# if you are paranoid:
|
||||||
# C, rem = divmod(P, factorial(n-k))
|
# C, rem = divmod(P, factorial(n-k))
|
||||||
|
@ -79,7 +79,7 @@ def make_candidate(B, K, K1, K2, q, T, T_min, L_hash, lg_N, sig_bytes, c_sign, c
|
|||||||
|
|
||||||
# Winternitz with B < 4 is never optimal. For example, going from B=4 to B=2 halves the
|
# Winternitz with B < 4 is never optimal. For example, going from B=4 to B=2 halves the
|
||||||
# chain depth, but that is cancelled out by doubling (roughly) the number of digits.
|
# chain depth, but that is cancelled out by doubling (roughly) the number of digits.
|
||||||
range_B = xrange(4, 33)
|
range_B = range(4, 33)
|
||||||
|
|
||||||
M = pow(2, lg_M)
|
M = pow(2, lg_M)
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ def calculate(K, K1, K2, q_max, L_hash, trees):
|
|||||||
T_min = ceil_div(lg_M - lg_K1, lg_K)
|
T_min = ceil_div(lg_M - lg_K1, lg_K)
|
||||||
|
|
||||||
last_q = None
|
last_q = None
|
||||||
for T in xrange(T_min, T_min+21):
|
for T in range(T_min, T_min+21):
|
||||||
# lg(total number of leaf private keys)
|
# lg(total number of leaf private keys)
|
||||||
lg_S = lg_K1 + lg_K*T
|
lg_S = lg_K1 + lg_K*T
|
||||||
lg_N = lg_S + lg_K2
|
lg_N = lg_S + lg_K2
|
||||||
@ -137,14 +137,14 @@ def calculate(K, K1, K2, q_max, L_hash, trees):
|
|||||||
|
|
||||||
# We approximate lg(M-x) as lg(M)
|
# We approximate lg(M-x) as lg(M)
|
||||||
lg_px_step = lg_M + lg_p - lg_1_p
|
lg_px_step = lg_M + lg_p - lg_1_p
|
||||||
for x in xrange(1, j):
|
for x in range(1, j):
|
||||||
lg_px[x] = lg_px[x-1] - lg(x) + lg_px_step
|
lg_px[x] = lg_px[x-1] - lg(x) + lg_px_step
|
||||||
|
|
||||||
q = None
|
q = None
|
||||||
# Find the minimum acceptable value of q.
|
# Find the minimum acceptable value of q.
|
||||||
for q_cand in xrange(1, q_max+1):
|
for q_cand in range(1, q_max+1):
|
||||||
lg_q = lg(q_cand)
|
lg_q = lg(q_cand)
|
||||||
lg_pforge = [lg_px[x] + (lg_q*x - lg_K2)*q_cand for x in xrange(1, j)]
|
lg_pforge = [lg_px[x] + (lg_q*x - lg_K2)*q_cand for x in range(1, j)]
|
||||||
if max(lg_pforge) < -L_hash + lg(j) and lg_px[j-1] + 1.0 < -L_hash:
|
if max(lg_pforge) < -L_hash + lg(j) and lg_px[j-1] + 1.0 < -L_hash:
|
||||||
#print("K = %d, K1 = %d, K2 = %d, L_hash = %d, lg_K2 = %.3f, q = %d, lg_pforge_1 = %.3f, lg_pforge_2 = %.3f, lg_pforge_3 = %.3f"
|
#print("K = %d, K1 = %d, K2 = %d, L_hash = %d, lg_K2 = %.3f, q = %d, lg_pforge_1 = %.3f, lg_pforge_2 = %.3f, lg_pforge_3 = %.3f"
|
||||||
# % (K, K1, K2, L_hash, lg_K2, q, lg_pforge_1, lg_pforge_2, lg_pforge_3))
|
# % (K, K1, K2, L_hash, lg_K2, q, lg_pforge_1, lg_pforge_2, lg_pforge_3))
|
||||||
@ -246,13 +246,13 @@ def search():
|
|||||||
K_max = 50
|
K_max = 50
|
||||||
c2 = compressions(2*L_hash)
|
c2 = compressions(2*L_hash)
|
||||||
c3 = compressions(3*L_hash)
|
c3 = compressions(3*L_hash)
|
||||||
for dau in xrange(0, 10):
|
for dau in range(0, 10):
|
||||||
a = pow(2, dau)
|
a = pow(2, dau)
|
||||||
for tri in xrange(0, ceil_log(30-dau, 3)):
|
for tri in range(0, ceil_log(30-dau, 3)):
|
||||||
x = int(a*pow(3, tri))
|
x = int(a*pow(3, tri))
|
||||||
h = dau + 2*tri
|
h = dau + 2*tri
|
||||||
c_x = int(sum_powers(2, dau)*c2 + a*sum_powers(3, tri)*c3)
|
c_x = int(sum_powers(2, dau)*c2 + a*sum_powers(3, tri)*c3)
|
||||||
for y in xrange(1, x+1):
|
for y in range(1, x+1):
|
||||||
if tri > 0:
|
if tri > 0:
|
||||||
# If the bottom level has arity 3, then for every 2 nodes by which the tree is
|
# If the bottom level has arity 3, then for every 2 nodes by which the tree is
|
||||||
# imperfect, we can save c3 compressions by pruning 3 leaves back to their parent.
|
# imperfect, we can save c3 compressions by pruning 3 leaves back to their parent.
|
||||||
@ -267,16 +267,16 @@ def search():
|
|||||||
if y not in trees or (h, c_y, (dau, tri)) < trees[y]:
|
if y not in trees or (h, c_y, (dau, tri)) < trees[y]:
|
||||||
trees[y] = (h, c_y, (dau, tri))
|
trees[y] = (h, c_y, (dau, tri))
|
||||||
|
|
||||||
#for x in xrange(1, K_max+1):
|
#for x in range(1, K_max+1):
|
||||||
# print(x, trees[x])
|
# print(x, trees[x])
|
||||||
|
|
||||||
candidates = []
|
candidates = []
|
||||||
progress = 0
|
progress = 0
|
||||||
fuzz = 0
|
fuzz = 0
|
||||||
complete = (K_max-1)*(2200-200)/100
|
complete = (K_max-1)*(2200-200)/100
|
||||||
for K in xrange(2, K_max+1):
|
for K in range(2, K_max+1):
|
||||||
for K2 in xrange(200, 2200, 100):
|
for K2 in range(200, 2200, 100):
|
||||||
for K1 in xrange(max(2, K-fuzz), min(K_max, K+fuzz)+1):
|
for K1 in range(max(2, K-fuzz), min(K_max, K+fuzz)+1):
|
||||||
candidates += calculate(K, K1, K2, q_max, L_hash, trees)
|
candidates += calculate(K, K1, K2, q_max, L_hash, trees)
|
||||||
progress += 1
|
progress += 1
|
||||||
print("searching: %3d %% \r" % (100.0 * progress / complete,), end=' ', file=stderr)
|
print("searching: %3d %% \r" % (100.0 * progress / complete,), end=' ', file=stderr)
|
||||||
@ -285,7 +285,7 @@ def search():
|
|||||||
step = 2.0
|
step = 2.0
|
||||||
bins = {}
|
bins = {}
|
||||||
limit = floor_div(limit_cost, step)
|
limit = floor_div(limit_cost, step)
|
||||||
for bin in xrange(0, limit+2):
|
for bin in range(0, limit+2):
|
||||||
bins[bin] = []
|
bins[bin] = []
|
||||||
|
|
||||||
for c in candidates:
|
for c in candidates:
|
||||||
@ -296,7 +296,7 @@ def search():
|
|||||||
|
|
||||||
# For each in a range of signing times, find the best candidate.
|
# For each in a range of signing times, find the best candidate.
|
||||||
best = []
|
best = []
|
||||||
for bin in xrange(0, limit):
|
for bin in range(0, limit):
|
||||||
candidates = bins[bin] + bins[bin+1] + bins[bin+2]
|
candidates = bins[bin] + bins[bin+1] + bins[bin+2]
|
||||||
if len(candidates) > 0:
|
if len(candidates) > 0:
|
||||||
best += [min(candidates, key=lambda c: c['sig_bytes'])]
|
best += [min(candidates, key=lambda c: c['sig_bytes'])]
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
|
from past.builtins import cmp
|
||||||
|
|
||||||
import random
|
import random
|
||||||
|
|
||||||
SERVER_CAPACITY = 10**12
|
SERVER_CAPACITY = 10**12
|
||||||
|
@ -2,6 +2,11 @@
|
|||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import input
|
||||||
|
|
||||||
|
|
||||||
import random, math, re
|
import random, math, re
|
||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
|
|
||||||
@ -205,7 +210,7 @@ def graph():
|
|||||||
series["alacrity"][file_size] = s.bytes_until_some_data
|
series["alacrity"][file_size] = s.bytes_until_some_data
|
||||||
g.plot([ (fs, series["overhead"][fs])
|
g.plot([ (fs, series["overhead"][fs])
|
||||||
for fs in sizes ])
|
for fs in sizes ])
|
||||||
raw_input("press return")
|
input("press return")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
@ -1 +0,0 @@
|
|||||||
PyPy is now a supported platform.
|
|
@ -1 +0,0 @@
|
|||||||
The Tahoe-LAFS project has adopted a formal code of conduct.
|
|
1
newsfragments/3037.other
Normal file
1
newsfragments/3037.other
Normal file
@ -0,0 +1 @@
|
|||||||
|
The "Great Black Swamp" proposed specification has been expanded to include two lease management APIs.
|
@ -1 +0,0 @@
|
|||||||
The Magic Folder frontend has been split out into a stand-alone project. The functionality is no longer part of Tahoe-LAFS itself. Learn more at <https://github.com/LeastAuthority/magic-folder>.
|
|
@ -1 +0,0 @@
|
|||||||
Tahoe-LAFS now supports CentOS 8 and no longer supports CentOS 7.
|
|
@ -1 +0,0 @@
|
|||||||
Make directory page links work.
|
|
@ -1 +0,0 @@
|
|||||||
Replace nevow with twisted.web in web.operations.OphandleTable
|
|
@ -1 +0,0 @@
|
|||||||
Replace nevow with twisted.web in web.operations.ReloadMixin
|
|
@ -1 +0,0 @@
|
|||||||
Port checker result pages' rendering from nevow to twisted web templates.
|
|
@ -1 +0,0 @@
|
|||||||
allmydata.testing.web, a new module, now offers a supported Python API for testing Tahoe-LAFS web API clients.
|
|
@ -1 +0,0 @@
|
|||||||
Slackware 14.2 is no longer a Tahoe-LAFS supported platform.
|
|
@ -1 +0,0 @@
|
|||||||
Tahoe-LAFS now supports Ubuntu 20.04.
|
|
@ -1 +0,0 @@
|
|||||||
Use last known revision of Chutney that is known to work with Python 2 for Tor integration tests.
|
|
@ -1 +0,0 @@
|
|||||||
Mutable files now use RSA exponent 65537
|
|
@ -1 +0,0 @@
|
|||||||
|
|
@ -1 +0,0 @@
|
|||||||
The "coverage" tox environment has been replaced by the "py27-coverage" and "py36-coverage" environments.
|
|
@ -1 +0,0 @@
|
|||||||
|
|
@ -1 +0,0 @@
|
|||||||
|
|
@ -1 +0,0 @@
|
|||||||
Added pre-commit config to run flake8 checks on commit/push.
|
|
@ -1 +0,0 @@
|
|||||||
Various, minor development `./Makefile` cleanup and improvement.
|
|
@ -1 +0,0 @@
|
|||||||
Minor test runner improvements and docs.
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user