mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-04-24 21:09:44 +00:00
Merge remote-tracking branch 'origin/master' into 2928.remote-allocate_tcp_port-test_node.py
This commit is contained in:
commit
af5531d81b
@ -1,60 +0,0 @@
|
||||
# adapted from https://packaging.python.org/en/latest/appveyor/
|
||||
|
||||
environment:
|
||||
|
||||
matrix:
|
||||
|
||||
# For Python versions available on Appveyor, see
|
||||
# http://www.appveyor.com/docs/installed-software#python
|
||||
- PYTHON: "C:\\Python27"
|
||||
- PYTHON: "C:\\Python27-x64"
|
||||
# DISTUTILS_USE_SDK: "1"
|
||||
# TOX_TESTENV_PASSENV: "DISTUTILS_USE_SDK INCLUDE LIB"
|
||||
|
||||
install:
|
||||
- |
|
||||
%PYTHON%\python.exe -m pip install -U pip
|
||||
%PYTHON%\python.exe -m pip install wheel tox virtualenv
|
||||
|
||||
# note:
|
||||
# %PYTHON% has: python.exe
|
||||
# %PYTHON%\Scripts has: pip.exe, tox.exe (and others installed by bare pip)
|
||||
|
||||
|
||||
build: off
|
||||
|
||||
# we run from C:\projects\tahoe-lafs
|
||||
|
||||
test_script:
|
||||
# Put your test command here.
|
||||
# Note that you must use the environment variable %PYTHON% to refer to
|
||||
# the interpreter you're using - Appveyor does not do anything special
|
||||
# to put the Python evrsion you want to use on PATH.
|
||||
- |
|
||||
%PYTHON%\Scripts\tox.exe -e py
|
||||
|
||||
after_test:
|
||||
# This builds the main tahoe wheel, and wheels for all dependencies.
|
||||
# Again, you only need build.cmd if you're building C extensions for
|
||||
# 64-bit Python 3.3/3.4. And you need to use %PYTHON% to get the correct
|
||||
# interpreter. If _trial_temp still exists, the "pip wheel" fails on
|
||||
# _trial_temp\local_dir (not sure why).
|
||||
- |
|
||||
copy _trial_temp\test.log trial_test_log.txt
|
||||
rd /s /q _trial_temp
|
||||
%PYTHON%\python.exe setup.py bdist_wheel
|
||||
%PYTHON%\python.exe -m pip wheel -w dist .
|
||||
|
||||
artifacts:
|
||||
# bdist_wheel puts your built wheel in the dist directory
|
||||
# "pip wheel -w dist ." puts all the dependency wheels there too
|
||||
# this gives us a zipfile with everything
|
||||
- path: 'dist\*'
|
||||
- path: trial_test_log.txt
|
||||
name: Trial test.log
|
||||
|
||||
#on_success:
|
||||
# You can use this step to upload your artifacts to a public website.
|
||||
# See Appveyor's documentation for more details. Or you can simply
|
||||
# access your wheels from the Appveyor "artifacts" tab for your build.
|
||||
|
27
.circleci/Dockerfile.centos
Normal file
27
.circleci/Dockerfile.centos
Normal file
@ -0,0 +1,27 @@
|
||||
ARG TAG
|
||||
FROM centos:${TAG}
|
||||
ARG PYTHON_VERSION
|
||||
|
||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||
ENV VIRTUALENV_PATH /tmp/venv
|
||||
# This will get updated by the CircleCI checkout step.
|
||||
ENV BUILD_SRC_ROOT /tmp/project
|
||||
|
||||
# XXX net-tools is actually a Tahoe-LAFS runtime dependency!
|
||||
RUN yum install --assumeyes \
|
||||
git \
|
||||
sudo \
|
||||
make automake gcc gcc-c++ \
|
||||
python${PYTHON_VERSION} \
|
||||
python${PYTHON_VERSION}-devel \
|
||||
libffi-devel \
|
||||
openssl-devel \
|
||||
libyaml \
|
||||
/usr/bin/virtualenv \
|
||||
net-tools
|
||||
|
||||
# Get the project source. This is better than it seems. CircleCI will
|
||||
# *update* this checkout on each job run, saving us more time per-job.
|
||||
COPY . ${BUILD_SRC_ROOT}
|
||||
|
||||
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"
|
32
.circleci/Dockerfile.debian
Normal file
32
.circleci/Dockerfile.debian
Normal file
@ -0,0 +1,32 @@
|
||||
ARG TAG
|
||||
FROM debian:${TAG}
|
||||
ARG PYTHON_VERSION
|
||||
|
||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||
ENV VIRTUALENV_PATH /tmp/venv
|
||||
# This will get updated by the CircleCI checkout step.
|
||||
ENV BUILD_SRC_ROOT /tmp/project
|
||||
|
||||
RUN apt-get --quiet update && \
|
||||
apt-get --quiet --yes install \
|
||||
git \
|
||||
lsb-release \
|
||||
sudo \
|
||||
build-essential \
|
||||
python${PYTHON_VERSION} \
|
||||
python${PYTHON_VERSION}-dev \
|
||||
libffi-dev \
|
||||
libssl-dev \
|
||||
libyaml-dev \
|
||||
virtualenv
|
||||
|
||||
# Get the project source. This is better than it seems. CircleCI will
|
||||
# *update* this checkout on each job run, saving us more time per-job.
|
||||
COPY . ${BUILD_SRC_ROOT}
|
||||
|
||||
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"
|
||||
|
||||
# Only the integration tests currently need this but it doesn't hurt to always
|
||||
# have it present and it's simpler than building a whole extra image just for
|
||||
# the integration tests.
|
||||
RUN ${BUILD_SRC_ROOT}/integration/install-tor.sh
|
27
.circleci/Dockerfile.fedora
Normal file
27
.circleci/Dockerfile.fedora
Normal file
@ -0,0 +1,27 @@
|
||||
ARG TAG
|
||||
FROM fedora:${TAG}
|
||||
ARG PYTHON_VERSION
|
||||
|
||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||
ENV VIRTUALENV_PATH /tmp/venv
|
||||
# This will get updated by the CircleCI checkout step.
|
||||
ENV BUILD_SRC_ROOT /tmp/project
|
||||
|
||||
# XXX net-tools is actually a Tahoe-LAFS runtime dependency!
|
||||
RUN yum install --assumeyes \
|
||||
git \
|
||||
sudo \
|
||||
make automake gcc gcc-c++ \
|
||||
python${PYTHON_VERSION} \
|
||||
python${PYTHON_VERSION}-devel \
|
||||
libffi-devel \
|
||||
openssl-devel \
|
||||
libyaml-devel \
|
||||
/usr/bin/virtualenv \
|
||||
net-tools
|
||||
|
||||
# Get the project source. This is better than it seems. CircleCI will
|
||||
# *update* this checkout on each job run, saving us more time per-job.
|
||||
COPY . ${BUILD_SRC_ROOT}
|
||||
|
||||
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"
|
23
.circleci/Dockerfile.pypy
Normal file
23
.circleci/Dockerfile.pypy
Normal file
@ -0,0 +1,23 @@
|
||||
FROM pypy:2.7-buster
|
||||
|
||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||
ENV VIRTUALENV_PATH /tmp/venv
|
||||
# This will get updated by the CircleCI checkout step.
|
||||
ENV BUILD_SRC_ROOT /tmp/project
|
||||
|
||||
RUN apt-get --quiet update && \
|
||||
apt-get --quiet --yes install \
|
||||
git \
|
||||
lsb-release \
|
||||
sudo \
|
||||
build-essential \
|
||||
libffi-dev \
|
||||
libssl-dev \
|
||||
libyaml-dev \
|
||||
virtualenv
|
||||
|
||||
# Get the project source. This is better than it seems. CircleCI will
|
||||
# *update* this checkout on each job run, saving us more time per-job.
|
||||
COPY . ${BUILD_SRC_ROOT}
|
||||
|
||||
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "pypy"
|
30
.circleci/Dockerfile.ubuntu
Normal file
30
.circleci/Dockerfile.ubuntu
Normal file
@ -0,0 +1,30 @@
|
||||
ARG TAG
|
||||
FROM ubuntu:${TAG}
|
||||
ARG PYTHON_VERSION
|
||||
|
||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||
ENV VIRTUALENV_PATH /tmp/venv
|
||||
# This will get updated by the CircleCI checkout step.
|
||||
ENV BUILD_SRC_ROOT /tmp/project
|
||||
|
||||
# language-pack-en included to support the en_US LANG setting.
|
||||
# iproute2 necessary for automatic address detection/assignment.
|
||||
RUN apt-get --quiet update && \
|
||||
apt-get --quiet --yes install git && \
|
||||
apt-get --quiet --yes install \
|
||||
sudo \
|
||||
build-essential \
|
||||
python${PYTHON_VERSION} \
|
||||
python${PYTHON_VERSION}-dev \
|
||||
libffi-dev \
|
||||
libssl-dev \
|
||||
libyaml-dev \
|
||||
virtualenv \
|
||||
language-pack-en \
|
||||
iproute2
|
||||
|
||||
# Get the project source. This is better than it seems. CircleCI will
|
||||
# *update* this checkout on each job run, saving us more time per-job.
|
||||
COPY . ${BUILD_SRC_ROOT}
|
||||
|
||||
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"
|
568
.circleci/config.yml
Normal file
568
.circleci/config.yml
Normal file
@ -0,0 +1,568 @@
|
||||
# https://circleci.com/docs/2.0/
|
||||
|
||||
# We use version 2.1 of CircleCI's configuration format (the docs are still at
|
||||
# the 2.0 link) in order to have access to Windows executors. This means we
|
||||
# can't use dots in job names anymore. They have a new "parameters" feature
|
||||
# that is supposed to remove the need to have version numbers in job names (the
|
||||
# source of our dots), but switching to that is going to be a bigger refactor:
|
||||
#
|
||||
# https://discuss.circleci.com/t/v2-1-job-name-validation/31123
|
||||
# https://circleci.com/docs/2.0/reusing-config/
|
||||
#
|
||||
version: 2.1
|
||||
|
||||
workflows:
|
||||
ci:
|
||||
jobs:
|
||||
# Platforms
|
||||
- "debian-9"
|
||||
- "debian-8":
|
||||
requires:
|
||||
- "debian-9"
|
||||
|
||||
- "ubuntu-20-04"
|
||||
- "ubuntu-18-04":
|
||||
requires:
|
||||
- "ubuntu-20-04"
|
||||
- "ubuntu-16-04":
|
||||
requires:
|
||||
- "ubuntu-20-04"
|
||||
|
||||
- "fedora-29"
|
||||
- "fedora-28":
|
||||
requires:
|
||||
- "fedora-29"
|
||||
|
||||
- "centos-8"
|
||||
|
||||
- "nixos-19-09"
|
||||
|
||||
# Test against PyPy 2.7
|
||||
- "pypy27-buster"
|
||||
|
||||
# Just one Python 3.6 configuration while the port is in-progress.
|
||||
- "python36"
|
||||
|
||||
# Other assorted tasks and configurations
|
||||
- "lint"
|
||||
- "pyinstaller"
|
||||
- "deprecations"
|
||||
- "c-locale"
|
||||
# Any locale other than C or UTF-8.
|
||||
- "another-locale"
|
||||
|
||||
- "integration":
|
||||
requires:
|
||||
# If the unit test suite doesn't pass, don't bother running the
|
||||
# integration tests.
|
||||
- "debian-9"
|
||||
|
||||
# Generate the underlying data for a visualization to aid with Python 3
|
||||
# porting.
|
||||
- "build-porting-depgraph"
|
||||
|
||||
images:
|
||||
# Build the Docker images used by the ci jobs. This makes the ci jobs
|
||||
# faster and takes various spurious failures out of the critical path.
|
||||
triggers:
|
||||
# Build once a day
|
||||
- schedule:
|
||||
cron: "0 0 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- "master"
|
||||
|
||||
jobs:
|
||||
- "build-image-debian-8"
|
||||
- "build-image-debian-9"
|
||||
- "build-image-ubuntu-16-04"
|
||||
- "build-image-ubuntu-18-04"
|
||||
- "build-image-ubuntu-20-04"
|
||||
- "build-image-fedora-28"
|
||||
- "build-image-fedora-29"
|
||||
- "build-image-centos-8"
|
||||
- "build-image-pypy27-buster"
|
||||
- "build-image-python36-ubuntu"
|
||||
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
docker:
|
||||
- image: "circleci/python:2"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
|
||||
- run:
|
||||
name: "Install tox"
|
||||
command: |
|
||||
pip install --user tox
|
||||
|
||||
- run:
|
||||
name: "Static-ish code checks"
|
||||
command: |
|
||||
~/.local/bin/tox -e codechecks
|
||||
|
||||
pyinstaller:
|
||||
docker:
|
||||
- image: "circleci/python:2"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
|
||||
- run:
|
||||
name: "Install tox"
|
||||
command: |
|
||||
pip install --user tox
|
||||
|
||||
- run:
|
||||
name: "Make PyInstaller executable"
|
||||
command: |
|
||||
~/.local/bin/tox -e pyinstaller
|
||||
|
||||
- run:
|
||||
# To verify that the resultant PyInstaller-generated binary executes
|
||||
# cleanly (i.e., that it terminates with an exit code of 0 and isn't
|
||||
# failing due to import/packaging-related errors, etc.).
|
||||
name: "Test PyInstaller executable"
|
||||
command: |
|
||||
dist/Tahoe-LAFS/tahoe --version
|
||||
|
||||
debian-9: &DEBIAN
|
||||
docker:
|
||||
- image: "tahoelafsci/debian:9-py2.7"
|
||||
user: "nobody"
|
||||
|
||||
environment: &UTF_8_ENVIRONMENT
|
||||
# In general, the test suite is not allowed to fail while the job
|
||||
# succeeds. But you can set this to "yes" if you want it to be
|
||||
# otherwise.
|
||||
ALLOWED_FAILURE: "no"
|
||||
# Tell Hypothesis which configuration we want it to use.
|
||||
TAHOE_LAFS_HYPOTHESIS_PROFILE: "ci"
|
||||
# Tell the C runtime things about character encoding (mainly to do with
|
||||
# filenames and argv).
|
||||
LANG: "en_US.UTF-8"
|
||||
# Select a tox environment to run for this job.
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py27-coverage"
|
||||
# Additional arguments to pass to tox.
|
||||
TAHOE_LAFS_TOX_ARGS: ""
|
||||
# The path in which test artifacts will be placed.
|
||||
ARTIFACTS_OUTPUT_PATH: "/tmp/artifacts"
|
||||
# Convince all of our pip invocations to look at the cached wheelhouse
|
||||
# we maintain.
|
||||
WHEELHOUSE_PATH: &WHEELHOUSE_PATH "/tmp/wheelhouse"
|
||||
PIP_FIND_LINKS: "file:///tmp/wheelhouse"
|
||||
# Upload the coverage report.
|
||||
UPLOAD_COVERAGE: "yes"
|
||||
|
||||
# pip cannot install packages if the working directory is not readable.
|
||||
# We want to run a lot of steps as nobody instead of as root.
|
||||
working_directory: "/tmp/project"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
- run: &SETUP_VIRTUALENV
|
||||
name: "Setup virtualenv"
|
||||
command: |
|
||||
/tmp/project/.circleci/setup-virtualenv.sh \
|
||||
"/tmp/venv" \
|
||||
"/tmp/project" \
|
||||
"${WHEELHOUSE_PATH}" \
|
||||
"${TAHOE_LAFS_TOX_ENVIRONMENT}" \
|
||||
"${TAHOE_LAFS_TOX_ARGS}"
|
||||
|
||||
- run: &RUN_TESTS
|
||||
name: "Run test suite"
|
||||
command: |
|
||||
/tmp/project/.circleci/run-tests.sh \
|
||||
"/tmp/venv" \
|
||||
"/tmp/project" \
|
||||
"${ALLOWED_FAILURE}" \
|
||||
"${ARTIFACTS_OUTPUT_PATH}" \
|
||||
"${TAHOE_LAFS_TOX_ENVIRONMENT}" \
|
||||
"${TAHOE_LAFS_TOX_ARGS}"
|
||||
# trial output gets directed straight to a log. avoid the circleci
|
||||
# timeout while the test suite runs.
|
||||
no_output_timeout: "20m"
|
||||
|
||||
- store_test_results: &STORE_TEST_RESULTS
|
||||
path: "/tmp/artifacts/junit"
|
||||
|
||||
- store_artifacts: &STORE_TEST_LOG
|
||||
# Despite passing --workdir /tmp to tox above, it still runs trial
|
||||
# in the project source checkout.
|
||||
path: "/tmp/project/_trial_temp/test.log"
|
||||
|
||||
- store_artifacts: &STORE_OTHER_ARTIFACTS
|
||||
# Store any other artifacts, too. This is handy to allow other jobs
|
||||
# sharing most of the definition of this one to be able to
|
||||
# contribute artifacts easily.
|
||||
path: "/tmp/artifacts"
|
||||
|
||||
- run: &SUBMIT_COVERAGE
|
||||
name: "Submit coverage results"
|
||||
command: |
|
||||
if [ -n "${UPLOAD_COVERAGE}" ]; then
|
||||
/tmp/venv/bin/codecov
|
||||
fi
|
||||
|
||||
|
||||
debian-8:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- image: "tahoelafsci/debian:8-py2.7"
|
||||
user: "nobody"
|
||||
|
||||
|
||||
pypy27-buster:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- image: "tahoelafsci/pypy:buster-py2"
|
||||
user: "nobody"
|
||||
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
# We don't do coverage since it makes PyPy far too slow:
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "pypy27"
|
||||
# Since we didn't collect it, don't upload it.
|
||||
UPLOAD_COVERAGE: ""
|
||||
|
||||
|
||||
c-locale:
|
||||
<<: *DEBIAN
|
||||
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
LANG: "C"
|
||||
|
||||
|
||||
another-locale:
|
||||
<<: *DEBIAN
|
||||
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
# aka "Latin 1"
|
||||
LANG: "en_US.ISO-8859-1"
|
||||
|
||||
|
||||
deprecations:
|
||||
<<: *DEBIAN
|
||||
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
# Select the deprecations tox environments.
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "deprecations,upcoming-deprecations"
|
||||
# Put the logs somewhere we can report them.
|
||||
TAHOE_LAFS_WARNINGS_LOG: "/tmp/artifacts/deprecation-warnings.log"
|
||||
# The deprecations tox environments don't do coverage measurement.
|
||||
UPLOAD_COVERAGE: ""
|
||||
|
||||
|
||||
integration:
|
||||
<<: *DEBIAN
|
||||
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
# Select the integration tests tox environments.
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "integration"
|
||||
# Disable artifact collection because py.test can't produce any.
|
||||
ARTIFACTS_OUTPUT_PATH: ""
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
# DRY, YAML-style. See the debian-9 steps.
|
||||
- run: *SETUP_VIRTUALENV
|
||||
- run: *RUN_TESTS
|
||||
|
||||
|
||||
ubuntu-16-04:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- image: "tahoelafsci/ubuntu:16.04-py2.7"
|
||||
user: "nobody"
|
||||
|
||||
|
||||
ubuntu-18-04: &UBUNTU_18_04
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- image: "tahoelafsci/ubuntu:18.04-py2.7"
|
||||
user: "nobody"
|
||||
|
||||
|
||||
python36:
|
||||
<<: *UBUNTU_18_04
|
||||
docker:
|
||||
- image: "tahoelafsci/ubuntu:18.04-py3"
|
||||
user: "nobody"
|
||||
|
||||
environment:
|
||||
<<: *UTF_8_ENVIRONMENT
|
||||
# The default trial args include --rterrors which is incompatible with
|
||||
# this reporter on Python 3. So drop that and just specify the
|
||||
# reporter.
|
||||
TAHOE_LAFS_TRIAL_ARGS: "--reporter=subunitv2-file"
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT: "py36-coverage"
|
||||
|
||||
|
||||
ubuntu-20-04:
|
||||
<<: *DEBIAN
|
||||
docker:
|
||||
- image: "tahoelafsci/ubuntu:20.04"
|
||||
user: "nobody"
|
||||
|
||||
|
||||
centos-8: &RHEL_DERIV
|
||||
docker:
|
||||
- image: "tahoelafsci/centos:8-py2"
|
||||
user: "nobody"
|
||||
|
||||
environment: *UTF_8_ENVIRONMENT
|
||||
|
||||
# pip cannot install packages if the working directory is not readable.
|
||||
# We want to run a lot of steps as nobody instead of as root.
|
||||
working_directory: "/tmp/project"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
- run: *SETUP_VIRTUALENV
|
||||
- run: *RUN_TESTS
|
||||
- store_test_results: *STORE_TEST_RESULTS
|
||||
- store_artifacts: *STORE_TEST_LOG
|
||||
- store_artifacts: *STORE_OTHER_ARTIFACTS
|
||||
- run: *SUBMIT_COVERAGE
|
||||
|
||||
|
||||
fedora-28:
|
||||
<<: *RHEL_DERIV
|
||||
docker:
|
||||
- image: "tahoelafsci/fedora:28-py"
|
||||
user: "nobody"
|
||||
|
||||
|
||||
fedora-29:
|
||||
<<: *RHEL_DERIV
|
||||
docker:
|
||||
- image: "tahoelafsci/fedora:29-py"
|
||||
user: "nobody"
|
||||
|
||||
|
||||
nixos-19-09:
|
||||
docker:
|
||||
# Run in a highly Nix-capable environment.
|
||||
- image: "nixorg/nix:circleci"
|
||||
|
||||
environment:
|
||||
NIX_PATH: "nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.09-small.tar.gz"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
- "run":
|
||||
name: "Build and Test"
|
||||
command: |
|
||||
# CircleCI build environment looks like it has a zillion and a
|
||||
# half cores. Don't let Nix autodetect this high core count
|
||||
# because it blows up memory usage and fails the test run. Pick a
|
||||
# number of cores that suites the build environment we're paying
|
||||
# for (the free one!).
|
||||
#
|
||||
# Also, let it run more than one job at a time because we have to
|
||||
# build a couple simple little dependencies that don't take
|
||||
# advantage of multiple cores and we get a little speedup by doing
|
||||
# them in parallel.
|
||||
nix-build --cores 3 --max-jobs 2 nix/
|
||||
|
||||
# Generate up-to-date data for the dependency graph visualizer.
|
||||
build-porting-depgraph:
|
||||
# Get a system in which we can easily install Tahoe-LAFS and all its
|
||||
# dependencies. The dependency graph analyzer works by executing the code.
|
||||
# It's Python, what do you expect?
|
||||
<<: *DEBIAN
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
|
||||
- add_ssh_keys:
|
||||
fingerprints:
|
||||
# Jean-Paul Calderone <exarkun@twistedmatrix.com> (CircleCI depgraph key)
|
||||
# This lets us push to tahoe-lafs/tahoe-depgraph in the next step.
|
||||
- "86:38:18:a7:c0:97:42:43:18:46:55:d6:21:b0:5f:d4"
|
||||
|
||||
- run:
|
||||
name: "Setup Python Environment"
|
||||
command: |
|
||||
/tmp/venv/bin/pip install -e /tmp/project
|
||||
|
||||
- run:
|
||||
name: "Generate dependency graph data"
|
||||
command: |
|
||||
. /tmp/venv/bin/activate
|
||||
./misc/python3/depgraph.sh
|
||||
|
||||
build-image: &BUILD_IMAGE
|
||||
# This is a template for a job to build a Docker image that has as much of
|
||||
# the setup as we can manage already done and baked in. This cuts down on
|
||||
# the per-job setup time the actual testing jobs have to perform - by
|
||||
# perhaps 10% - 20%.
|
||||
#
|
||||
# https://circleci.com/blog/how-to-build-a-docker-image-on-circleci-2-0/
|
||||
docker:
|
||||
- image: "docker:17.05.0-ce-git"
|
||||
|
||||
environment:
|
||||
DISTRO: "tahoelafsci/<DISTRO>:foo-py2"
|
||||
TAG: "tahoelafsci/distro:<TAG>-py2"
|
||||
PYTHON_VERSION: "tahoelafsci/distro:tag-py<PYTHON_VERSION}"
|
||||
|
||||
steps:
|
||||
- "checkout"
|
||||
- "setup_remote_docker"
|
||||
- run:
|
||||
name: "Get openssl"
|
||||
command: |
|
||||
apk add --no-cache openssl
|
||||
- run:
|
||||
name: "Get Dockerhub secrets"
|
||||
command: |
|
||||
# If you create an encryption key like this:
|
||||
#
|
||||
# openssl enc -aes-256-cbc -k secret -P -md sha256
|
||||
|
||||
# From the output that looks like:
|
||||
#
|
||||
# salt=...
|
||||
# key=...
|
||||
# iv =...
|
||||
#
|
||||
# extract just the value for ``key``.
|
||||
|
||||
# then you can re-generate ``secret-env-cipher`` locally using the
|
||||
# command:
|
||||
#
|
||||
# openssl aes-256-cbc -e -md sha256 -in secret-env-plain -out .circleci/secret-env-cipher -pass env:KEY
|
||||
#
|
||||
# Make sure the key is set as the KEY environment variable in the
|
||||
# CircleCI web interface. You can do this by visiting
|
||||
# <https://circleci.com/gh/tahoe-lafs/tahoe-lafs/edit#env-vars>
|
||||
# after logging in to CircleCI with an account in the tahoe-lafs
|
||||
# CircleCI team.
|
||||
#
|
||||
# Then you can recover the environment plaintext (for example, to
|
||||
# change and re-encrypt it) like just like CircleCI recovers it
|
||||
# here:
|
||||
#
|
||||
openssl aes-256-cbc -d -md sha256 -in .circleci/secret-env-cipher -pass env:KEY >> ~/.env
|
||||
- run:
|
||||
name: "Log in to Dockerhub"
|
||||
command: |
|
||||
. ~/.env
|
||||
# TAHOELAFSCI_PASSWORD come from the secret env.
|
||||
docker login -u tahoelafsci -p ${TAHOELAFSCI_PASSWORD}
|
||||
- run:
|
||||
name: "Build image"
|
||||
command: |
|
||||
docker \
|
||||
build \
|
||||
--build-arg TAG=${TAG} \
|
||||
--build-arg PYTHON_VERSION=${PYTHON_VERSION} \
|
||||
-t tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION} \
|
||||
-f ~/project/.circleci/Dockerfile.${DISTRO} \
|
||||
~/project/
|
||||
- run:
|
||||
name: "Push image"
|
||||
command: |
|
||||
docker push tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION}
|
||||
|
||||
|
||||
build-image-debian-8:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "debian"
|
||||
TAG: "8"
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
|
||||
build-image-debian-9:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "debian"
|
||||
TAG: "9"
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
|
||||
build-image-ubuntu-16-04:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "ubuntu"
|
||||
TAG: "16.04"
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
|
||||
build-image-ubuntu-18-04:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "ubuntu"
|
||||
TAG: "18.04"
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
|
||||
build-image-python36-ubuntu:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "ubuntu"
|
||||
TAG: "18.04"
|
||||
PYTHON_VERSION: "3"
|
||||
|
||||
|
||||
build-image-ubuntu-20-04:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "ubuntu"
|
||||
TAG: "20.04"
|
||||
PYTHON_VERSION: "2.7"
|
||||
|
||||
|
||||
build-image-centos-8:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "centos"
|
||||
TAG: "8"
|
||||
PYTHON_VERSION: "2"
|
||||
|
||||
|
||||
build-image-fedora-28:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "fedora"
|
||||
TAG: "28"
|
||||
# The default on Fedora (this version anyway) is still Python 2.
|
||||
PYTHON_VERSION: ""
|
||||
|
||||
|
||||
build-image-fedora-29:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "fedora"
|
||||
TAG: "29"
|
||||
|
||||
|
||||
build-image-pypy27-buster:
|
||||
<<: *BUILD_IMAGE
|
||||
|
||||
environment:
|
||||
DISTRO: "pypy"
|
||||
TAG: "buster"
|
||||
# We only have Python 2 for PyPy right now so there's no support for
|
||||
# setting up PyPy 3 in the image building toolchain. This value is just
|
||||
# for constructing the right Docker image tag.
|
||||
PYTHON_VERSION: "2"
|
49
.circleci/create-virtualenv.sh
Executable file
49
.circleci/create-virtualenv.sh
Executable file
@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
|
||||
# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
|
||||
set -euxo pipefail
|
||||
|
||||
# The filesystem location of the wheelhouse which we'll populate with wheels
|
||||
# for all of our dependencies.
|
||||
WHEELHOUSE_PATH="$1"
|
||||
shift
|
||||
|
||||
# The filesystem location of the root of a virtualenv we can use to get/build
|
||||
# wheels.
|
||||
BOOTSTRAP_VENV="$1"
|
||||
shift
|
||||
|
||||
# The basename of the Python executable (found on PATH) that will be used with
|
||||
# this image. This lets us create a virtualenv that uses the correct Python.
|
||||
PYTHON="$1"
|
||||
shift
|
||||
|
||||
# Set up the virtualenv as a non-root user so we can run the test suite as a
|
||||
# non-root user. See below.
|
||||
virtualenv --python "${PYTHON}" "${BOOTSTRAP_VENV}"
|
||||
|
||||
# For convenience.
|
||||
PIP="${BOOTSTRAP_VENV}/bin/pip"
|
||||
|
||||
# Tell pip where it can find any existing wheels.
|
||||
export PIP_FIND_LINKS="file://${WHEELHOUSE_PATH}"
|
||||
|
||||
# Get "certifi" to avoid bug #2913. Basically if a `setup_requires=...` causes
|
||||
# a package to be installed (with setuptools) then it'll fail on certain
|
||||
# platforms (travis's OX-X 10.12, Slackware 14.2) because PyPI's TLS
|
||||
# requirements (TLS >= 1.2) are incompatible with the old TLS clients
|
||||
# available to those systems. Installing it ahead of time (with pip) avoids
|
||||
# this problem. Make sure this step comes before any other attempts to
|
||||
# install things using pip!
|
||||
"${PIP}" install certifi
|
||||
|
||||
# Get a new, awesome version of pip and setuptools. For example, the
|
||||
# distro-packaged virtualenv's pip may not know about wheels. Get the newer
|
||||
# version of pip *first* in case we have a really old one now which can't even
|
||||
# install setuptools properly.
|
||||
"${PIP}" install --upgrade pip
|
||||
|
||||
# setuptools 45 requires Python 3.5 or newer. Even though we upgraded pip
|
||||
# above, it may still not be able to get us a compatible version unless we
|
||||
# explicitly ask for one.
|
||||
"${PIP}" install --upgrade setuptools==44.0.0 wheel
|
34
.circleci/fix-permissions.sh
Executable file
34
.circleci/fix-permissions.sh
Executable file
@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
|
||||
set -euxo pipefail
|
||||
|
||||
# The filesystem location of the wheelhouse which we'll populate with wheels
|
||||
# for all of our dependencies.
|
||||
WHEELHOUSE_PATH="$1"
|
||||
shift
|
||||
|
||||
# The filesystem location of the root of a virtualenv we can use to get/build
|
||||
# wheels.
|
||||
BOOTSTRAP_VENV="$1"
|
||||
shift
|
||||
|
||||
# The filesystem location of the root of the project source. We need this to
|
||||
# know what wheels to get/build, of course.
|
||||
PROJECT_ROOT="$1"
|
||||
shift
|
||||
|
||||
# Most stuff is going to run as nobody. Here's a helper to make sure nobody
|
||||
# can access necessary files.
|
||||
CHOWN_NOBODY="chown --recursive nobody:$(id --group nobody)"
|
||||
|
||||
# Avoid the /nonexistent home directory in nobody's /etc/passwd entry.
|
||||
usermod --home /tmp/nobody nobody
|
||||
|
||||
# Grant read access to nobody, the user which will eventually try to test this
|
||||
# checkout.
|
||||
${CHOWN_NOBODY} "${PROJECT_ROOT}"
|
||||
|
||||
# Create a place for some wheels to live.
|
||||
mkdir -p "${WHEELHOUSE_PATH}"
|
||||
${CHOWN_NOBODY} "${WHEELHOUSE_PATH}"
|
55
.circleci/populate-wheelhouse.sh
Executable file
55
.circleci/populate-wheelhouse.sh
Executable file
@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
|
||||
# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
|
||||
set -euxo pipefail
|
||||
|
||||
# Basic Python packages that you just need to have around to do anything,
|
||||
# practically speaking.
|
||||
BASIC_DEPS="pip wheel"
|
||||
|
||||
# Python packages we need to support the test infrastructure. *Not* packages
|
||||
# Tahoe-LAFS itself (implementation or test suite) need.
|
||||
TEST_DEPS="tox codecov"
|
||||
|
||||
# Python packages we need to generate test reports for CI infrastructure.
|
||||
# *Not* packages Tahoe-LAFS itself (implement or test suite) need.
|
||||
REPORTING_DEPS="python-subunit junitxml subunitreporter"
|
||||
|
||||
# The filesystem location of the wheelhouse which we'll populate with wheels
|
||||
# for all of our dependencies.
|
||||
WHEELHOUSE_PATH="$1"
|
||||
shift
|
||||
|
||||
# The filesystem location of the root of a virtualenv we can use to get/build
|
||||
# wheels.
|
||||
BOOTSTRAP_VENV="$1"
|
||||
shift
|
||||
|
||||
# The filesystem location of the root of the project source. We need this to
|
||||
# know what wheels to get/build, of course.
|
||||
PROJECT_ROOT="$1"
|
||||
shift
|
||||
|
||||
# For convenience.
|
||||
PIP="${BOOTSTRAP_VENV}/bin/pip"
|
||||
|
||||
# Tell pip where it can find any existing wheels.
|
||||
export PIP_FIND_LINKS="file://${WHEELHOUSE_PATH}"
|
||||
|
||||
# Populate the wheelhouse, if necessary. zfec 1.5.3 can only be built with a
|
||||
# UTF-8 environment so make sure we have one, at least for this invocation.
|
||||
LANG="en_US.UTF-8" "${PIP}" \
|
||||
wheel \
|
||||
--wheel-dir "${WHEELHOUSE_PATH}" \
|
||||
"${PROJECT_ROOT}"[test] \
|
||||
${BASIC_DEPS} \
|
||||
${TEST_DEPS} \
|
||||
${REPORTING_DEPS}
|
||||
|
||||
# Not strictly wheelhouse population but ... Note we omit basic deps here.
|
||||
# They're in the wheelhouse if Tahoe-LAFS wants to drag them in but it will
|
||||
# have to ask.
|
||||
"${PIP}" \
|
||||
install \
|
||||
${TEST_DEPS} \
|
||||
${REPORTING_DEPS}
|
28
.circleci/prepare-image.sh
Executable file
28
.circleci/prepare-image.sh
Executable file
@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
|
||||
set -euxo pipefail
|
||||
|
||||
# The filesystem location of the wheelhouse which we'll populate with wheels
|
||||
# for all of our dependencies.
|
||||
WHEELHOUSE_PATH="$1"
|
||||
shift
|
||||
|
||||
# The filesystem location of the root of a virtualenv we can use to get/build
|
||||
# wheels.
|
||||
BOOTSTRAP_VENV="$1"
|
||||
shift
|
||||
|
||||
# The filesystem location of the root of the project source. We need this to
|
||||
# know what wheels to get/build, of course.
|
||||
PROJECT_ROOT="$1"
|
||||
shift
|
||||
|
||||
# The basename of the Python executable (found on PATH) that will be used with
|
||||
# this image. This lets us create a virtualenv that uses the correct Python.
|
||||
PYTHON="$1"
|
||||
shift
|
||||
|
||||
"${PROJECT_ROOT}"/.circleci/fix-permissions.sh "${WHEELHOUSE_PATH}" "${BOOTSTRAP_VENV}" "${PROJECT_ROOT}"
|
||||
sudo --set-home -u nobody "${PROJECT_ROOT}"/.circleci/create-virtualenv.sh "${WHEELHOUSE_PATH}" "${BOOTSTRAP_VENV}" "${PYTHON}"
|
||||
sudo --set-home -u nobody "${PROJECT_ROOT}"/.circleci/populate-wheelhouse.sh "${WHEELHOUSE_PATH}" "${BOOTSTRAP_VENV}" "${PROJECT_ROOT}"
|
10
.circleci/run-build-locally.sh
Executable file
10
.circleci/run-build-locally.sh
Executable file
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
CIRCLE_TOKEN=efb53124be82dd4b3153bc0e3f60de71da629d59
|
||||
|
||||
curl --user ${CIRCLE_TOKEN}: \
|
||||
--request POST \
|
||||
--form revision=$(git rev-parse HEAD) \
|
||||
--form config=@config.yml \
|
||||
--form notify=false \
|
||||
https://circleci.com/api/v1.1/project/github/exarkun/tahoe-lafs/tree/2929.circleci
|
96
.circleci/run-tests.sh
Executable file
96
.circleci/run-tests.sh
Executable file
@ -0,0 +1,96 @@
|
||||
#!/bin/bash
|
||||
|
||||
# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
|
||||
set -euxo pipefail
|
||||
|
||||
# The filesystem location of the root of a virtualenv we can use to get/build
|
||||
# wheels.
|
||||
BOOTSTRAP_VENV="$1"
|
||||
shift
|
||||
|
||||
# The filesystem location of the root of the project source. We need this to
|
||||
# know what wheels to get/build, of course.
|
||||
PROJECT_ROOT="$1"
|
||||
shift
|
||||
|
||||
ALLOWED_FAILURE="$1"
|
||||
shift
|
||||
|
||||
ARTIFACTS=$1
|
||||
shift
|
||||
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT=$1
|
||||
shift
|
||||
|
||||
TAHOE_LAFS_TOX_ARGS=$1
|
||||
shift || :
|
||||
|
||||
if [ -n "${ARTIFACTS}" ]; then
|
||||
# If given an artifacts path, prepare to have some artifacts created
|
||||
# there. The integration tests don't produce any artifacts; that is the
|
||||
# case where we expect not to end up here.
|
||||
|
||||
# Make sure we can actually write things to this directory.
|
||||
mkdir -p "${ARTIFACTS}"
|
||||
|
||||
SUBUNIT2="${ARTIFACTS}"/results.subunit2
|
||||
|
||||
# Use an intermediate directory here because CircleCI extracts some label
|
||||
# information from its name.
|
||||
JUNITXML="${ARTIFACTS}"/junit/unittests/results.xml
|
||||
else
|
||||
SUBUNIT2=""
|
||||
JUNITXML=""
|
||||
fi
|
||||
|
||||
# A prefix for the test command that ensure it will exit after no more than a
|
||||
# certain amount of time. Ideally, we would only enforce a "silent" period
|
||||
# timeout but there isn't obviously a ready-made tool for that. The test
|
||||
# suite only takes about 5 - 6 minutes on CircleCI right now. 15 minutes
|
||||
# seems like a moderately safe window.
|
||||
#
|
||||
# This is primarily aimed at catching hangs on the PyPy job which runs for
|
||||
# about 21 minutes and then gets killed by CircleCI in a way that fails the
|
||||
# job and bypasses our "allowed failure" logic.
|
||||
TIMEOUT="timeout --kill-after 1m 15m"
|
||||
|
||||
# Run the test suite as a non-root user. This is the expected usage some
|
||||
# small areas of the test suite assume non-root privileges (such as unreadable
|
||||
# files being unreadable).
|
||||
#
|
||||
# Also run with /tmp as a workdir because the non-root user won't be able to
|
||||
# create the tox working filesystem state in the source checkout because it is
|
||||
# owned by root.
|
||||
#
|
||||
# Send the output directly to a file because transporting the binary subunit2
|
||||
# via tox and then scraping it out is hideous and failure prone.
|
||||
export SUBUNITREPORTER_OUTPUT_PATH="${SUBUNIT2}"
|
||||
export TAHOE_LAFS_TRIAL_ARGS="${TAHOE_LAFS_TRIAL_ARGS:---reporter=subunitv2-file --rterrors}"
|
||||
export PIP_NO_INDEX="1"
|
||||
|
||||
# Make output unbuffered, so progress reports from subunitv2-file get streamed
|
||||
# and notify CircleCI we're still alive.
|
||||
export PYTHONUNBUFFERED=1
|
||||
|
||||
if [ "${ALLOWED_FAILURE}" = "yes" ]; then
|
||||
alternative="true"
|
||||
else
|
||||
alternative="false"
|
||||
fi
|
||||
|
||||
${TIMEOUT} ${BOOTSTRAP_VENV}/bin/tox \
|
||||
-c ${PROJECT_ROOT}/tox.ini \
|
||||
--workdir /tmp/tahoe-lafs.tox \
|
||||
-e "${TAHOE_LAFS_TOX_ENVIRONMENT}" \
|
||||
${TAHOE_LAFS_TOX_ARGS} || "${alternative}"
|
||||
|
||||
if [ -n "${ARTIFACTS}" ]; then
|
||||
if [ ! -e "${SUBUNIT2}" ]; then
|
||||
echo "subunitv2 output file does not exist: ${SUBUNIT2}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create a junitxml results area.
|
||||
mkdir -p "$(dirname "${JUNITXML}")"
|
||||
"${BOOTSTRAP_VENV}"/bin/subunit2junitxml < "${SUBUNIT2}" > "${JUNITXML}" || "${alternative}"
|
||||
fi
|
1
.circleci/secret-env-cipher
Normal file
1
.circleci/secret-env-cipher
Normal file
@ -0,0 +1 @@
|
||||
Salted__ •GPÁøÊ)|!÷[©U[‡ûvSÚ,F¿–m:ö š~ÓY[Uú_¸Fx×’¤Ÿ%<25>“4l×Ö»Š8¼œ¹„1öø‰/lƒÌ`nÆ^·Z]óq𬿢&ø°÷£Ý‚‚ß%T¡n
|
42
.circleci/setup-virtualenv.sh
Executable file
42
.circleci/setup-virtualenv.sh
Executable file
@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
|
||||
set -euxo pipefail
|
||||
|
||||
# The filesystem location of the root of a virtualenv we can use to get/build
|
||||
# wheels.
|
||||
BOOTSTRAP_VENV="$1"
|
||||
shift
|
||||
|
||||
# The filesystem location of the root of the project source. We need this to
|
||||
# know what wheels to get/build, of course.
|
||||
PROJECT_ROOT="$1"
|
||||
shift
|
||||
|
||||
# The filesystem location of the wheelhouse which we'll populate with wheels
|
||||
# for all of our dependencies.
|
||||
WHEELHOUSE_PATH="$1"
|
||||
shift
|
||||
|
||||
TAHOE_LAFS_TOX_ENVIRONMENT=$1
|
||||
shift
|
||||
|
||||
TAHOE_LAFS_TOX_ARGS=$1
|
||||
shift || :
|
||||
|
||||
# Tell pip where it can find any existing wheels.
|
||||
export PIP_FIND_LINKS="file://${WHEELHOUSE_PATH}"
|
||||
|
||||
# It is tempting to also set PIP_NO_INDEX=1 but (a) that will cause problems
|
||||
# between the time dependencies change and the images are re-built and (b) the
|
||||
# upcoming-deprecations job wants to install some dependencies from github and
|
||||
# it's awkward to get that done any earlier than the tox run. So, we don't
|
||||
# set it.
|
||||
|
||||
# Get everything else installed in it, too.
|
||||
"${BOOTSTRAP_VENV}"/bin/tox \
|
||||
-c "${PROJECT_ROOT}"/tox.ini \
|
||||
--workdir /tmp/tahoe-lafs.tox \
|
||||
--notest \
|
||||
-e "${TAHOE_LAFS_TOX_ENVIRONMENT}" \
|
||||
${TAHOE_LAFS_TOX_ARGS}
|
34
.codecov.yml
Normal file
34
.codecov.yml
Normal file
@ -0,0 +1,34 @@
|
||||
# Override defaults for codecov.io checks.
|
||||
#
|
||||
# Documentation is at https://docs.codecov.io/docs/codecov-yaml;
|
||||
# reference is at https://docs.codecov.io/docs/codecovyml-reference.
|
||||
#
|
||||
# To validate this file, use:
|
||||
#
|
||||
# curl --data-binary @.codecov.yml https://codecov.io/validate
|
||||
#
|
||||
# Codecov's defaults seem to leave red marks in GitHub CI checks in a
|
||||
# rather arbitrary manner, probably because of non-determinism in
|
||||
# coverage (see https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2891)
|
||||
# and maybe because computers are bad with floating point numbers.
|
||||
|
||||
# Allow coverage percentage a precision of zero decimals, and round to
|
||||
# the nearest number (for example, 89.957 to to 90; 89.497 to 89%).
|
||||
# Coverage above 90% is good, below 80% is bad.
|
||||
coverage:
|
||||
round: nearest
|
||||
range: 80..90
|
||||
precision: 0
|
||||
|
||||
# Aim for a target test coverage of 90% in codecov/project check (do
|
||||
# not allow project coverage to drop below that), and allow
|
||||
# codecov/patch a threshold of 1% (allow coverage in changes to drop
|
||||
# by that much, and no less). That should be good enough for us.
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
target: 90%
|
||||
threshold: 1%
|
||||
patch:
|
||||
default:
|
||||
threshold: 1%
|
17
.coveragerc
17
.coveragerc
@ -8,3 +8,20 @@ source =
|
||||
omit =
|
||||
*/allmydata/test/*
|
||||
*/allmydata/_version.py
|
||||
parallel = True
|
||||
branch = True
|
||||
|
||||
[report]
|
||||
show_missing = True
|
||||
skip_covered = True
|
||||
|
||||
[paths]
|
||||
source =
|
||||
# It looks like this in the checkout
|
||||
src/
|
||||
# It looks like this in the Windows build environment
|
||||
D:/a/tahoe-lafs/tahoe-lafs/.tox/py*-coverage/Lib/site-packages/
|
||||
# Although sometimes it looks like this instead. Also it looks like this on macOS.
|
||||
.tox/py*-coverage/lib/python*/site-packages/
|
||||
# On some Linux CI jobs it looks like this
|
||||
/tmp/tahoe-lafs.tox/py*-coverage/lib/python*/site-packages/
|
||||
|
20
.github/CONTRIBUTING.rst
vendored
Normal file
20
.github/CONTRIBUTING.rst
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
.. -*- coding: utf-8 -*-
|
||||
|
||||
.. This document is rendered on the GitHub PR creation page to guide
|
||||
contributors. It is also rendered into the overall documentation.
|
||||
|
||||
Contributing to Tahoe-LAFS
|
||||
==========================
|
||||
|
||||
As an open source project,
|
||||
Tahoe-LAFS welcomes contributions of many forms.
|
||||
|
||||
Examples of contributions include:
|
||||
|
||||
* `Code patches <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Patches>`_
|
||||
* `Documentation improvements <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Doc>`_
|
||||
* `Bug reports <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/HowToReportABug>`_
|
||||
* `Patch reviews <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/PatchReviewProcess>`_
|
||||
|
||||
Before authoring or reviewing a patch,
|
||||
please familiarize yourself with the `coding standard <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CodingStandards>`_.
|
183
.github/workflows/ci.yml
vendored
Normal file
183
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "master"
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
|
||||
coverage:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- macos-latest
|
||||
- windows-latest
|
||||
python-version:
|
||||
- 2.7
|
||||
|
||||
steps:
|
||||
|
||||
# Get vcpython27 on Windows + Python 2.7, to build zfec
|
||||
# extension. See https://chocolatey.org/packages/vcpython27 and
|
||||
# https://github.com/crazy-max/ghaction-chocolatey
|
||||
- name: Install MSVC 9.0 for Python 2.7 [Windows]
|
||||
if: matrix.os == 'windows-latest' && matrix.python-version == '2.7'
|
||||
uses: crazy-max/ghaction-chocolatey@v1
|
||||
with:
|
||||
args: install vcpython27
|
||||
|
||||
- name: Check out Tahoe-LAFS sources
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Fetch all history for all tags and branches
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade codecov tox setuptools
|
||||
pip list
|
||||
|
||||
- name: Display tool versions
|
||||
run: python misc/build_helpers/show-tool-versions.py
|
||||
|
||||
- name: Run "tox -e py27-coverage"
|
||||
run: tox -e py27-coverage
|
||||
|
||||
- name: Upload eliot.log in case of failure
|
||||
uses: actions/upload-artifact@v1
|
||||
if: failure()
|
||||
with:
|
||||
name: eliot.log
|
||||
path: eliot.log
|
||||
|
||||
- name: Upload coverage report
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
token: abf679b6-e2e6-4b33-b7b5-6cfbd41ee691
|
||||
file: coverage.xml
|
||||
|
||||
integration:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- macos-latest
|
||||
- windows-latest
|
||||
python-version:
|
||||
- 2.7
|
||||
|
||||
steps:
|
||||
|
||||
- name: Install Tor [Ubuntu]
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
run: sudo apt install tor
|
||||
|
||||
- name: Install Tor [macOS]
|
||||
if: matrix.os == 'macos-latest'
|
||||
run: brew install tor
|
||||
|
||||
- name: Install Tor [Windows]
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: crazy-max/ghaction-chocolatey@v1
|
||||
with:
|
||||
args: install tor
|
||||
|
||||
- name: Install MSVC 9.0 for Python 2.7 [Windows]
|
||||
if: matrix.os == 'windows-latest' && matrix.python-version == '2.7'
|
||||
uses: crazy-max/ghaction-chocolatey@v1
|
||||
with:
|
||||
args: install vcpython27
|
||||
|
||||
- name: Check out Tahoe-LAFS sources
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Fetch all history for all tags and branches
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade tox
|
||||
pip list
|
||||
|
||||
- name: Display tool versions
|
||||
run: python misc/build_helpers/show-tool-versions.py
|
||||
|
||||
- name: Run "tox -e integration"
|
||||
run: tox -e integration
|
||||
|
||||
- name: Upload eliot.log in case of failure
|
||||
uses: actions/upload-artifact@v1
|
||||
if: failure()
|
||||
with:
|
||||
name: integration.eliot.json
|
||||
path: integration.eliot.json
|
||||
|
||||
packaging:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- macos-latest
|
||||
- windows-latest
|
||||
- ubuntu-latest
|
||||
python-version:
|
||||
- 2.7
|
||||
|
||||
steps:
|
||||
|
||||
# Get vcpython27 on Windows + Python 2.7, to build zfec
|
||||
# extension. See https://chocolatey.org/packages/vcpython27 and
|
||||
# https://github.com/crazy-max/ghaction-chocolatey
|
||||
- name: Install MSVC 9.0 for Python 2.7 [Windows]
|
||||
if: matrix.os == 'windows-latest' && matrix.python-version == '2.7'
|
||||
uses: crazy-max/ghaction-chocolatey@v1
|
||||
with:
|
||||
args: install vcpython27
|
||||
|
||||
- name: Check out Tahoe-LAFS sources
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Fetch all history for all tags and branches
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install Python packages
|
||||
run: |
|
||||
pip install --upgrade tox
|
||||
pip list
|
||||
|
||||
- name: Display tool versions
|
||||
run: python misc/build_helpers/show-tool-versions.py
|
||||
|
||||
- name: Run "tox -e pyinstaller"
|
||||
run: tox -e pyinstaller
|
||||
|
||||
# This step is to ensure there are no packaging/import errors.
|
||||
- name: Test PyInstaller executable
|
||||
run: dist/Tahoe-LAFS/tahoe --version
|
||||
|
||||
- name: Upload PyInstaller package
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: Tahoe-LAFS-${{ matrix.os }}-Python-${{ matrix.python-version }}
|
||||
path: dist/Tahoe-LAFS-*-*.*
|
14
.gitignore
vendored
14
.gitignore
vendored
@ -1,4 +1,4 @@
|
||||
venv
|
||||
venv*
|
||||
|
||||
# vim swap files
|
||||
*.swp
|
||||
@ -9,6 +9,7 @@ venv
|
||||
*~
|
||||
*.DS_Store
|
||||
.*.kate-swp
|
||||
*.bak
|
||||
|
||||
/build/
|
||||
/support/
|
||||
@ -27,6 +28,7 @@ Twisted-*.egg
|
||||
zope.interface-*.egg
|
||||
.pc
|
||||
|
||||
/src/allmydata/test/plugins/dropin.cache
|
||||
/_trial_temp*
|
||||
/_test_memory/
|
||||
/tmp*
|
||||
@ -35,6 +37,7 @@ zope.interface-*.egg
|
||||
/tahoe-deps/
|
||||
/tahoe-deps.tar.gz
|
||||
/.coverage
|
||||
/.coverage.*
|
||||
/.coverage.el
|
||||
/coverage-html/
|
||||
/miscaptures.txt
|
||||
@ -42,5 +45,12 @@ zope.interface-*.egg
|
||||
/.tox/
|
||||
/docs/_build/
|
||||
/coverage.xml
|
||||
/smoke_magicfolder/
|
||||
/.pre-commit-config.local.yaml
|
||||
/.hypothesis/
|
||||
/eliot.log
|
||||
/misc/python3/results.xml
|
||||
/misc/python3/results.subunit2
|
||||
|
||||
# This is the plaintext of the private environment needed for some CircleCI
|
||||
# operations. It's never supposed to be checked in.
|
||||
secret-env-plain
|
||||
|
15
.pre-commit-config.yaml
Normal file
15
.pre-commit-config.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: codechecks
|
||||
name: codechecks
|
||||
stages: ["commit"]
|
||||
entry: "tox -e codechecks"
|
||||
language: system
|
||||
pass_filenames: false
|
||||
- id: test
|
||||
name: test
|
||||
stages: ["push"]
|
||||
entry: "make test"
|
||||
language: system
|
||||
pass_filenames: false
|
59
.travis.yml
59
.travis.yml
@ -1,59 +0,0 @@
|
||||
sudo: false
|
||||
language: python
|
||||
cache: pip
|
||||
dist: trusty
|
||||
before_cache:
|
||||
- rm -f $HOME/.cache/pip/log/debug.log
|
||||
git:
|
||||
depth: 1000
|
||||
|
||||
env:
|
||||
global:
|
||||
- TAHOE_LAFS_HYPOTHESIS_PROFILE=ci
|
||||
|
||||
install:
|
||||
# ~/.local/bin is on $PATH by default, but on OS-X, --user puts it elsewhere
|
||||
- if [ "${TRAVIS_OS_NAME}" = "osx" ]; then export PATH=$HOME/Library/Python/2.7/bin:$PATH; fi
|
||||
- if [ "${TRAVIS_OS_NAME}" = "osx" ]; then wget https://bootstrap.pypa.io/get-pip.py && sudo python ./get-pip.py; fi
|
||||
- pip list
|
||||
- if [ "${TRAVIS_OS_NAME}" = "osx" ]; then pip install --user --upgrade codecov tox setuptools; fi
|
||||
- if [ "${TRAVIS_OS_NAME}" = "linux" ]; then pip install --upgrade codecov tox setuptools; fi
|
||||
- echo $PATH; which python; which pip; which tox
|
||||
- python misc/build_helpers/show-tool-versions.py
|
||||
|
||||
script:
|
||||
- if [ "${T}" = "integration" ]; then /bin/bash integration/install-tor.sh; fi
|
||||
- tox -e ${T}
|
||||
|
||||
after_success:
|
||||
- if [ "${T}" = "coverage" ]; then codecov; fi
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
irc:
|
||||
channels: "chat.freenode.net#tahoe-lafs"
|
||||
on_success: always # for testing
|
||||
on_failure: always
|
||||
template:
|
||||
- "%{repository}#%{build_number} [%{branch}: %{commit} by %{author}] %{message}"
|
||||
- "Changes: %{compare_url} | Details: %{build_url}"
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
env: T=coverage LANG=en_US.UTF-8
|
||||
- os: linux
|
||||
env: T=integration LANG=en_US.UTF-8
|
||||
- os: linux
|
||||
env: T=codechecks LANG=en_US.UTF-8
|
||||
- os: linux
|
||||
env: T=pyinstaller LANG=en_US.UTF-8
|
||||
- os: linux
|
||||
env: T=py27 LANG=C
|
||||
- os: osx
|
||||
env: T=py27 LANG=en_US.UTF-8
|
||||
language: generic # "python" is not available on OS-X
|
||||
- os: osx
|
||||
env: T=pyinstaller LANG=en_US.UTF-8
|
||||
language: generic # "python" is not available on OS-X
|
||||
fast_finish: true
|
9
CREDITS
9
CREDITS
@ -192,3 +192,12 @@ D: fix the Download! button on the Welcome page
|
||||
N: Jean-Paul Calderone
|
||||
E: exarkun@twistedmatrix.com
|
||||
D: support SFTP public key authentication.
|
||||
|
||||
N: David Stainton
|
||||
E: dstainton415@gmail.com
|
||||
D: various bug-fixes and features
|
||||
|
||||
N: meejah
|
||||
E: meejah@meejah.ca
|
||||
P: 0xC2602803128069A7, 9D5A 2BD5 688E CB88 9DEB CD3F C260 2803 1280 69A7
|
||||
D: various bug-fixes and features
|
||||
|
130
Makefile
130
Makefile
@ -1,16 +1,68 @@
|
||||
|
||||
# Tahoe LFS Development and maintenance tasks
|
||||
#
|
||||
# NOTE: this Makefile requires GNU make
|
||||
|
||||
### Defensive settings for make:
|
||||
# https://tech.davis-hansson.com/p/make/
|
||||
SHELL := bash
|
||||
.ONESHELL:
|
||||
.SHELLFLAGS := -xeu -o pipefail -c
|
||||
.SILENT:
|
||||
.DELETE_ON_ERROR:
|
||||
MAKEFLAGS += --warn-undefined-variables
|
||||
MAKEFLAGS += --no-builtin-rules
|
||||
|
||||
# Local target variables
|
||||
VCS_HOOK_SAMPLES=$(wildcard .git/hooks/*.sample)
|
||||
VCS_HOOKS=$(VCS_HOOK_SAMPLES:%.sample=%)
|
||||
PYTHON=python
|
||||
export PYTHON
|
||||
PYFLAKES=flake8
|
||||
export PYFLAKES
|
||||
VIRTUAL_ENV=./.tox/py27
|
||||
SOURCES=src/allmydata static misc setup.py
|
||||
APPNAME=tahoe-lafs
|
||||
TEST_SUITE=allmydata
|
||||
|
||||
|
||||
# Top-level, phony targets
|
||||
|
||||
.PHONY: default
|
||||
default:
|
||||
@echo "no default target"
|
||||
|
||||
PYTHON=python
|
||||
export PYTHON
|
||||
PYFLAKES=pyflakes
|
||||
export PYFLAKES
|
||||
.PHONY: install-vcs-hooks
|
||||
## Install the VCS hooks to run linters on commit and all tests on push
|
||||
install-vcs-hooks: .git/hooks/pre-commit .git/hooks/pre-push
|
||||
.PHONY: uninstall-vcs-hooks
|
||||
## Remove the VCS hooks
|
||||
uninstall-vcs-hooks: .tox/create-venvs.log
|
||||
"./$(dir $(<))py36/bin/pre-commit" uninstall || true
|
||||
"./$(dir $(<))py36/bin/pre-commit" uninstall -t pre-push || true
|
||||
|
||||
SOURCES=src/allmydata static misc setup.py
|
||||
APPNAME=tahoe-lafs
|
||||
.PHONY: test
|
||||
## Run all tests and code reports
|
||||
test: .tox/create-venvs.log
|
||||
# Run codechecks first since it takes the least time to report issues early.
|
||||
tox --develop -e codechecks
|
||||
# Run all the test environments in parallel to reduce run-time
|
||||
tox --develop -p auto -e 'py27,py36,pypy27'
|
||||
.PHONY: test-venv-coverage
|
||||
## Run all tests with coverage collection and reporting.
|
||||
test-venv-coverage:
|
||||
# Special handling for reporting coverage even when the test run fails
|
||||
rm -f ./.coverage.*
|
||||
test_exit=
|
||||
$(VIRTUAL_ENV)/bin/coverage run -m twisted.trial --rterrors --reporter=timing \
|
||||
$(TEST_SUITE) || test_exit="$$?"
|
||||
$(VIRTUAL_ENV)/bin/coverage combine
|
||||
$(VIRTUAL_ENV)/bin/coverage xml || true
|
||||
$(VIRTUAL_ENV)/bin/coverage report
|
||||
if [ ! -z "$$test_exit" ]; then exit "$$test_exit"; fi
|
||||
.PHONY: test-py3-all
|
||||
## Run all tests under Python 3
|
||||
test-py3-all: .tox/create-venvs.log
|
||||
tox --develop -e py36 allmydata
|
||||
|
||||
# This is necessary only if you want to automatically produce a new
|
||||
# _version.py file from the current git history (without doing a build).
|
||||
@ -18,57 +70,32 @@ APPNAME=tahoe-lafs
|
||||
make-version:
|
||||
$(PYTHON) ./setup.py update_version
|
||||
|
||||
.built:
|
||||
$(MAKE) build
|
||||
|
||||
src/allmydata/_version.py:
|
||||
$(MAKE) make-version
|
||||
|
||||
# Build OS X pkg packages.
|
||||
.PHONY: build-osx-pkg test-osx-pkg upload-osx-pkg
|
||||
.PHONY: build-osx-pkg
|
||||
build-osx-pkg:
|
||||
misc/build_helpers/build-osx-pkg.sh $(APPNAME)
|
||||
|
||||
.PHONY: test-osx-pkg
|
||||
test-osx-pkg:
|
||||
$(PYTHON) misc/build_helpers/test-osx-pkg.py
|
||||
|
||||
.PHONY: upload-osx-pkg
|
||||
upload-osx-pkg:
|
||||
@echo "uploading to ~tahoe-tarballs/OS-X-packages/ via flappserver"
|
||||
@if [ "X${BB_BRANCH}" = "Xmaster" ] || [ "X${BB_BRANCH}" = "X" ]; then \
|
||||
flappclient --furlfile ~/.tahoe-osx-pkg-upload.furl upload-file tahoe-lafs-*-osx.pkg; \
|
||||
else \
|
||||
echo not uploading tahoe-lafs-osx-pkg because this is not trunk but is branch \"${BB_BRANCH}\" ; \
|
||||
fi
|
||||
|
||||
.PHONY: smoketest
|
||||
smoketest:
|
||||
-python ./src/allmydata/test/check_magicfolder_smoke.py kill
|
||||
-rm -rf smoke_magicfolder/
|
||||
python ./src/allmydata/test/check_magicfolder_smoke.py
|
||||
|
||||
# code coverage-based testing is disabled temporarily, as we switch to tox.
|
||||
# This will eventually be added to a tox environment. The following comments
|
||||
# and variable settings are retained as notes for that future effort.
|
||||
|
||||
## # code coverage: install the "coverage" package from PyPI, do "make
|
||||
## # test-coverage" to do a unit test run with coverage-gathering enabled, then
|
||||
## # use "make coverage-output" to generate an HTML report. Also see "make
|
||||
## # .coverage.el" and misc/coding_tools/coverage.el for Emacs integration.
|
||||
##
|
||||
## # This might need to be python-coverage on Debian-based distros.
|
||||
## COVERAGE=coverage
|
||||
##
|
||||
## COVERAGEARGS=--branch --source=src/allmydata
|
||||
##
|
||||
## # --include appeared in coverage-3.4
|
||||
## COVERAGE_OMIT=--include '$(CURDIR)/src/allmydata/*' --omit '$(CURDIR)/src/allmydata/test/*'
|
||||
|
||||
# [Failure instance: Traceback: <class 'OpenSSL.SSL.Error'>: [('SSL routines', 'ssl3_read_bytes', 'tlsv1 alert unknown ca'), ('SSL routines', 'ssl3_write_bytes', 'ssl handshake failure')]
|
||||
#
|
||||
# @echo "uploading to ~tahoe-tarballs/OS-X-packages/ via flappserver"
|
||||
# @if [ "X${BB_BRANCH}" = "Xmaster" ] || [ "X${BB_BRANCH}" = "X" ]; then \
|
||||
# flappclient --furlfile ~/.tahoe-osx-pkg-upload.furl upload-file tahoe-lafs-*-osx.pkg; \
|
||||
# else \
|
||||
# echo not uploading tahoe-lafs-osx-pkg because this is not trunk but is branch \"${BB_BRANCH}\" ; \
|
||||
# fi
|
||||
|
||||
.PHONY: code-checks
|
||||
#code-checks: build version-and-path check-interfaces check-miscaptures -find-trailing-spaces -check-umids pyflakes
|
||||
code-checks: check-interfaces check-debugging check-miscaptures -find-trailing-spaces -check-umids pyflakes
|
||||
|
||||
.PHONY: check-interfaces
|
||||
check-interfaces:
|
||||
$(PYTHON) misc/coding_tools/check-interfaces.py 2>&1 |tee violations.txt
|
||||
@echo
|
||||
|
||||
@ -188,10 +215,11 @@ clean:
|
||||
rm -f *.pkg
|
||||
|
||||
.PHONY: distclean
|
||||
distclean: clean
|
||||
distclean: clean uninstall-vcs-hooks
|
||||
rm -rf src/*.egg-info
|
||||
rm -f src/allmydata/_version.py
|
||||
rm -f src/allmydata/_appname.py
|
||||
rm -rf ./.tox/
|
||||
|
||||
|
||||
.PHONY: find-trailing-spaces
|
||||
@ -224,3 +252,15 @@ tarballs: # delegated to tox, so setup.py can update setuptools if needed
|
||||
.PHONY: upload-tarballs
|
||||
upload-tarballs:
|
||||
@if [ "X${BB_BRANCH}" = "Xmaster" ] || [ "X${BB_BRANCH}" = "X" ]; then for f in dist/*; do flappclient --furlfile ~/.tahoe-tarball-upload.furl upload-file $$f; done ; else echo not uploading tarballs because this is not trunk but is branch \"${BB_BRANCH}\" ; fi
|
||||
|
||||
|
||||
# Real targets
|
||||
|
||||
src/allmydata/_version.py:
|
||||
$(MAKE) make-version
|
||||
|
||||
.tox/create-venvs.log: tox.ini setup.py
|
||||
tox --notest -p all | tee -a "$(@)"
|
||||
|
||||
$(VCS_HOOKS): .tox/create-venvs.log .pre-commit-config.yaml
|
||||
"./$(dir $(<))py36/bin/pre-commit" install --hook-type $(@:.git/hooks/%=%)
|
||||
|
418
NEWS.rst
418
NEWS.rst
@ -4,66 +4,372 @@
|
||||
User-Visible Changes in Tahoe-LAFS
|
||||
==================================
|
||||
|
||||
Release ?? (??)
|
||||
'''''''''''''''
|
||||
.. towncrier start line
|
||||
Release 1.14.0 (2020-03-11)
|
||||
'''''''''''''''''''''''''''
|
||||
|
||||
unedited list of changes since last release, needs cleanup, explanation,
|
||||
credit authors, limit to user-visible things
|
||||
Features
|
||||
--------
|
||||
|
||||
- Magic-Folders are now supported on macOS. (`#1432 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1432>`_)
|
||||
- Add a "tox -e draftnews" which runs towncrier in draft mode (`#2942 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2942>`_)
|
||||
- Fedora 29 is now tested as part of the project's continuous integration system. (`#2955 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2955>`_)
|
||||
- The Magic-Folder frontend now emits structured, causal logs. This makes it easier for developers to make sense of its behavior and for users to submit useful debugging information alongside problem reports. (`#2972 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2972>`_)
|
||||
- The `tahoe` CLI now accepts arguments for configuring structured logging messages which Tahoe-LAFS is being converted to emit. This change does not introduce any new defaults for on-filesystem logging. (`#2975 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2975>`_)
|
||||
- The web API now publishes streaming Eliot logs via a token-protected WebSocket at /private/logs/v1. (`#3006 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3006>`_)
|
||||
- End-to-end in-memory tests for websocket features (`#3041 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3041>`_)
|
||||
- allmydata.interfaces.IFoolscapStoragePlugin has been introduced, an extension point for customizing the storage protocol. (`#3049 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3049>`_)
|
||||
- Static storage server "announcements" in ``private/servers.yaml`` are now individually logged and ignored if they cannot be interpreted. (`#3051 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3051>`_)
|
||||
- Storage servers can now be configured to load plugins for allmydata.interfaces.IFoolscapStoragePlugin and offer them to clients. (`#3053 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3053>`_)
|
||||
- Storage clients can now be configured to load plugins for allmydata.interfaces.IFoolscapStoragePlugin and use them to negotiate with servers. (`#3054 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3054>`_)
|
||||
- The [storage] configuration section now accepts a boolean *anonymous* item to enable or disable anonymous storage access. The default behavior remains unchanged. (`#3184 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3184>`_)
|
||||
- Enable the helper when creating a node with `tahoe create-node --helper` (`#3235 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3235>`_)
|
||||
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- refactor initialization code to be more async-friendly (`#2870 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2870>`_)
|
||||
- Configuration-checking code wasn't being called due to indenting (`#2935 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2935>`_)
|
||||
- refactor configuration handling out of Node into _Config (`#2936 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2936>`_)
|
||||
- "tox -e codechecks" no longer dirties the working tree. (`#2941 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2941>`_)
|
||||
- Updated the Tor release key, used by the integration tests. (`#2944 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2944>`_)
|
||||
- `tahoe backup` no longer fails with an unhandled exception when it encounters a special file (device, fifo) in the backup source. (`#2950 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2950>`_)
|
||||
- Magic-Folders now creates spurious conflict files in fewer cases. In particular, if files are added to the folder while a client is offline, that client will not create conflict files for all those new files when it starts up. (`#2965 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2965>`_)
|
||||
- The confusing and misplaced sub-command group headings in `tahoe --help` output have been removed. (`#2976 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2976>`_)
|
||||
- The Magic-Folder frontend is now more responsive to subtree changes on Windows. (`#2997 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2997>`_)
|
||||
- remove ancient bundled jquery and d3, and the "dowload timeline" feature they support (`#3228 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3228>`_)
|
||||
|
||||
|
||||
Dependency/Installation Changes
|
||||
-------------------------------
|
||||
|
||||
- Tahoe-LAFS no longer makes start-up time assertions about the versions of its dependencies. It is the responsibility of the administrator of the installation to ensure the correct version of dependencies are supplied. (`#2749 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2749>`_)
|
||||
- Tahoe-LAFS now depends on Twisted 16.6 or newer. (`#2957 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2957>`_)
|
||||
|
||||
|
||||
Removed Features
|
||||
----------------
|
||||
|
||||
- "tahoe rm", an old alias for "tahoe unlink", has been removed. (`#1827 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1827>`_)
|
||||
- The direct dependencies on pyutil and zbase32 have been removed. (`#2098 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2098>`_)
|
||||
- Untested and unmaintained code for running Tahoe-LAFS as a Windows service has been removed. (`#2239 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2239>`_)
|
||||
- The redundant "pypywin32" dependency has been removed. (`#2392 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2392>`_)
|
||||
- Fedora 27 is no longer tested as part of the project's continuous integration system. (`#2955 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2955>`_)
|
||||
- "tahoe start", "tahoe daemonize", "tahoe restart", and "tahoe stop" are now deprecated in favor of using "tahoe run", possibly with a third-party process manager. (`#3273 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3273>`_)
|
||||
|
||||
|
||||
Other Changes
|
||||
-------------
|
||||
|
||||
- Tahoe-LAFS now tests for PyPy compatibility on CI. (`#2479 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2479>`_)
|
||||
- Tahoe-LAFS now requires Twisted 18.4.0 or newer. (`#2771 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2771>`_)
|
||||
- Tahoe-LAFS now uses towncrier to maintain the NEWS file. (`#2908 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2908>`_)
|
||||
- The release process document has been updated. (`#2920 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2920>`_)
|
||||
- allmydata.test.test_system.SystemTest is now more reliable with respect to bound address collisions. (`#2933 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2933>`_)
|
||||
- The Tox configuration has been fixed to work around a problem on Windows CI. (`#2956 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2956>`_)
|
||||
- The PyInstaller CI job now works around a pip/pyinstaller incompatibility. (`#2958 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2958>`_)
|
||||
- Some CI jobs for integration tests have been moved from TravisCI to CircleCI. (`#2959 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2959>`_)
|
||||
- Several warnings from a new release of pyflakes have been fixed. (`#2960 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2960>`_)
|
||||
- Some Slackware 14.2 continuous integration problems have been resolved. (`#2961 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2961>`_)
|
||||
- Some macOS continuous integration failures have been fixed. (`#2962 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2962>`_)
|
||||
- The NoNetworkGrid implementation has been somewhat improved. (`#2966 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2966>`_)
|
||||
- A bug in the test suite for the create-alias command has been fixed. (`#2967 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2967>`_)
|
||||
- The integration test suite has been updated to use pytest-twisted instead of deprecated pytest APIs. (`#2968 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2968>`_)
|
||||
- The magic-folder integration test suite now performs more aggressive cleanup of the processes it launches. (`#2969 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2969>`_)
|
||||
- The integration tests now correctly document the `--keep-tempdir` option. (`#2970 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2970>`_)
|
||||
- A misuse of super() in the integration tests has been fixed. (`#2971 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2971>`_)
|
||||
- Several utilities to facilitate the use of the Eliot causal logging library have been introduced. (`#2973 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2973>`_)
|
||||
- The Windows CI configuration has been tweaked. (`#2974 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2974>`_)
|
||||
- The Magic-Folder frontend has had additional logging improvements. (`#2977 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2977>`_)
|
||||
- (`#2981 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2981>`_, `#2982 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2982>`_)
|
||||
- Added a simple sytax checker so that once a file has reached python3 compatibility, it will not regress. (`#3001 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3001>`_)
|
||||
- Converted all uses of the print statement to the print function in the ./misc/ directory. (`#3002 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3002>`_)
|
||||
- The contributor guidelines are now linked from the GitHub pull request creation page. (`#3003 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3003>`_)
|
||||
- Updated the testing code to use the print function instead of the print statement. (`#3008 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3008>`_)
|
||||
- Replaced print statement with print fuction for all tahoe_* scripts. (`#3009 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3009>`_)
|
||||
- Replaced all remaining instances of the print statement with the print function. (`#3010 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3010>`_)
|
||||
- Replace StringIO imports with six.moves. (`#3011 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3011>`_)
|
||||
- Updated all Python files to use PEP-3110 exception syntax for Python3 compatibility. (`#3013 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3013>`_)
|
||||
- Update raise syntax for Python3 compatibility. (`#3014 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3014>`_)
|
||||
- Updated instances of octal literals to use the format 0o123 for Python3 compatibility. (`#3015 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3015>`_)
|
||||
- allmydata.test.no_network, allmydata.test.test_system, and allmydata.test.web.test_introducer are now more reliable with respect to bound address collisions. (`#3016 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3016>`_)
|
||||
- Removed tuple unpacking from function and lambda definitions for Python3 compatibility. (`#3019 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3019>`_)
|
||||
- Updated Python2 long numeric literals for Python3 compatibility. (`#3020 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3020>`_)
|
||||
- CircleCI jobs are now faster as a result of pre-building configured Docker images for the CI jobs. (`#3024 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3024>`_)
|
||||
- Removed used of backticks for "repr" for Python3 compatibility. (`#3027 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3027>`_)
|
||||
- Updated string literal syntax for Python3 compatibility. (`#3028 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3028>`_)
|
||||
- Updated CI to enforce Python3 syntax for entire repo. (`#3030 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3030>`_)
|
||||
- Replaced pycryptopp with cryptography. (`#3031 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3031>`_)
|
||||
- All old-style classes ported to new-style. (`#3042 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3042>`_)
|
||||
- Whitelisted "/bin/mv" as command for codechecks performed by tox. This fixes a current warning and prevents future errors (for tox 4). (`#3043 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3043>`_)
|
||||
- Progress towards Python 3 compatibility is now visible at <https://tahoe-lafs.github.io/tahoe-depgraph/>. (`#3152 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3152>`_)
|
||||
- Collect coverage information from integration tests (`#3234 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3234>`_)
|
||||
- NixOS is now a supported Tahoe-LAFS platform. (`#3266 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3266>`_)
|
||||
|
||||
|
||||
Misc/Other
|
||||
----------
|
||||
|
||||
- `#1893 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1893>`_, `#2266 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2266>`_, `#2283 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2283>`_, `#2766 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2766>`_, `#2980 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2980>`_, `#2985 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2985>`_, `#2986 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2986>`_, `#2987 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2987>`_, `#2988 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2988>`_, `#2989 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2989>`_, `#2990 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2990>`_, `#2991 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2991>`_, `#2992 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2992>`_, `#2995 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2995>`_, `#3000 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3000>`_, `#3004 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3004>`_, `#3005 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3005>`_, `#3007 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3007>`_, `#3012 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3012>`_, `#3017 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3017>`_, `#3021 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3021>`_, `#3023 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3023>`_, `#3025 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3025>`_, `#3026 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3026>`_, `#3029 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3029>`_, `#3036 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3036>`_, `#3038 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3038>`_, `#3048 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3048>`_, `#3086 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3086>`_, `#3097 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3097>`_, `#3111 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3111>`_, `#3118 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3118>`_, `#3119 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3119>`_, `#3227 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3227>`_, `#3229 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3229>`_, `#3232 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3232>`_, `#3233 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3233>`_, `#3237 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3237>`_, `#3238 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3238>`_, `#3239 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3239>`_, `#3240 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3240>`_, `#3242 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3242>`_, `#3243 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3243>`_, `#3245 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3245>`_, `#3246 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3246>`_, `#3248 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3248>`_, `#3250 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3250>`_, `#3252 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3252>`_, `#3255 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3255>`_, `#3256 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3256>`_, `#3259 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3259>`_, `#3261 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3261>`_, `#3262 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3262>`_, `#3263 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3263>`_, `#3264 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3264>`_, `#3265 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3265>`_, `#3267 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3267>`_, `#3268 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3268>`_, `#3271 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3271>`_, `#3272 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3272>`_, `#3274 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3274>`_, `#3275 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3275>`_, `#3276 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3276>`_, `#3279 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3279>`_, `#3281 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3281>`_, `#3282 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3282>`_, `#3285 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3285>`_
|
||||
|
||||
|
||||
Release 1.13.0 (05-August-2018)
|
||||
'''''''''''''''''''''''''''''''
|
||||
|
||||
New Features
|
||||
------------
|
||||
|
||||
The ``tahoe list-aliases`` command gained the ``--readonly-uri``
|
||||
option in `PR400`_, which lists read-only capabilities (the default
|
||||
shows read/write capabilities if available). This command also gained
|
||||
a ``--json`` option in `PR452`_, providing machine-readable output.
|
||||
|
||||
A new command ``tahoe status`` is added, showing some statistics and
|
||||
currently active operations (similar to the ``/status`` page in the
|
||||
Web UI). See also `PR502`_.
|
||||
|
||||
Immutable uploads now use the "servers of happiness" algorithm for
|
||||
uploading shares. This means better placement of shares on available
|
||||
servers. See `PR416`_.
|
||||
|
||||
To join a new client to a grid, the command ``tahoe invite`` was
|
||||
added. This uses `magic wormhole`_ to connect two computers and
|
||||
exchange the required information to start the client. The "client
|
||||
side" of this command is the also new option ``tahoe
|
||||
create-client --join=``. Together, this provides a way to provision a
|
||||
new client without having to securely transmit the fURL and other
|
||||
details. `PR418`_
|
||||
|
||||
``tahoe backup`` now reports progress. `PR474`_
|
||||
|
||||
The ``tub.port=`` option can now accept ``listen:i2p`` or
|
||||
``listen:tor`` options to use popular anonymity networks with storage
|
||||
servers. See `PR437`_
|
||||
|
||||
The place where storage servers put shares (the "storage path") is now
|
||||
configurable (`PR472`_).
|
||||
|
||||
A PyInstaller-based build is now available (`PR421`_). A "Docker
|
||||
compose" setup for development purposes is now available (`PR445`_).
|
||||
|
||||
There is now a recommended workflow for Zcash-based donations to support
|
||||
storage server operators (`PR506`_).
|
||||
|
||||
Bug Fixes in Core
|
||||
-----------------
|
||||
|
||||
Some bugs with pidfile handling were fixed (`PR440`_ and `PR450`_)
|
||||
meaning invalid pidfiles are now deleted. Error-messages related to
|
||||
``tahoe.cfg`` now include the full path to the file. `PR501`_ fixes
|
||||
"address already in use" test failures. `PR502`_ fixes ticket #2926
|
||||
("tahoe status" failures). `PR487`_ fixes ticket #1455 (setting
|
||||
``X-Frame-Options: DENY``)
|
||||
|
||||
|
||||
Web UI Changes
|
||||
--------------
|
||||
|
||||
We set the "Referrer-Policy: no-referrer" header on all requests. The
|
||||
Welcome page now understands the JSON option (`PR430`_) and OPTIONS
|
||||
requests are handled (`PR447`_).
|
||||
|
||||
|
||||
Magic Folder Changes
|
||||
--------------------
|
||||
|
||||
Multiple magic-folders in a single Tahoe client are now
|
||||
supported. Bugs with ``.backup`` files have been fixed, meaning
|
||||
spurious ``.backup`` files will be produced less often (`PR448`_,
|
||||
`PR475`_). Handling of default umask on new magic-folder files is
|
||||
fixed in `PR458`_. The user mtime value is now correctly preserved
|
||||
(`PR457`_).
|
||||
|
||||
A bug in ``tahoe magic-folder status`` causing active operations to
|
||||
sometimes not show up is fixed (`PR461`_). If a directory is missing,
|
||||
it is created (`PR492`_).
|
||||
|
||||
|
||||
Raw Pull Requests
|
||||
-----------------
|
||||
|
||||
In total, 50 Pull Requests were merged for this release, including
|
||||
contributions of code or review from 15 different GitHub users. Thanks
|
||||
everyone! A complete list of these PRs and contributions:
|
||||
|
||||
`PR380`_: `daira`_
|
||||
`PR400`_: `meejah`_ (with `warner`_)
|
||||
`PR403`_: `meejah`_
|
||||
`PR405`_: `meejah`_ (with `warner`_)
|
||||
`PR406`_: `meejah`_ (with `warner`_)
|
||||
`PR407`_: `david415`_ (with `meejah`_, `warner`_)
|
||||
`PR409`_: `str4d`_ (with `warner`_)
|
||||
`PR410`_: `tpltnt`_ (with `warner`_)
|
||||
`PR411`_: `tpltnt`_ (with `warner`_, `meejah`_)
|
||||
`PR412`_: `tpltnt`_ (with `warner`_)
|
||||
`PR414`_: `tpltnt`_ (with `meejah`_, `warner`_)
|
||||
`PR416`_: `david415`_, `meejah`_, `markberger`_, `warner`_
|
||||
`PR417`_: `meejah`_ (with `pataquets`_, `warner`_)
|
||||
`PR418`_: `meejah`_ (with `crwood`_, `exarkun`_, `warner`_)
|
||||
`PR419`_: `tpltnt`_ (with `warner`_)
|
||||
`PR420`_: `ValdikSS`_ (with `warner`_)
|
||||
`PR421`_: `crwood`_ (with `meejah`_, `warner`_)
|
||||
`PR423`_: `warner`_
|
||||
`PR428`_: `warner`_
|
||||
`PR429`_: `exarkun`_ (with `warner`_)
|
||||
`PR430`_: `david415`_, `exarkun`_ (with `warner`_)
|
||||
`PR432`_: `exarkun`_ (with `meejah`_)
|
||||
`PR433`_: `exarkun`_ (with `warner`_)
|
||||
`PR434`_: `exarkun`_ (with `warner`_)
|
||||
`PR437`_: `warner`_
|
||||
`PR438`_: `warner`_ (with `meejah`_)
|
||||
`PR440`_: `exarkun`_, `lpirl`_ (with `meejah`_)
|
||||
`PR444`_: `AnBuKu`_ (with `warner`_)
|
||||
`PR445`_: `bookchin`_ (with `warner`_)
|
||||
`PR447`_: `meejah`_ (with `tpltnt`_, `meejah`_)
|
||||
`PR448`_: `meejah`_ (with `warner`_)
|
||||
`PR450`_: `exarkun`_, `meejah`_, `lpirl`_
|
||||
`PR452`_: `meejah`_ (with `tpltnt`_)
|
||||
`PR453`_: `meejah`_
|
||||
`PR454`_: `meejah`_ (with `tpltnt`_, `meejah`_, `warner`_)
|
||||
`PR455`_: `tpltnt`_ (with `meejah`_)
|
||||
`PR456`_: `meejah`_ (with `meejah`_)
|
||||
`PR457`_: `meejah`_ (with `crwood`_, `tpltnt`_)
|
||||
`PR458`_: `meejah`_ (with `tpltnt`_)
|
||||
`PR460`_: `tpltnt`_ (with `exarkun`_, `meejah`_)
|
||||
`PR462`_: `meejah`_ (with `crwood`_)
|
||||
`PR464`_: `meejah`_
|
||||
`PR470`_: `meejah`_ (with `exarkun`_, `tpltnt`_, `warner`_)
|
||||
`PR472`_: `exarkun`_, `meskio`_
|
||||
`PR474`_: `exarkun`_
|
||||
`PR475`_: `meejah`_ (with `exarkun`_)
|
||||
`PR482`_: `crwood`_ (with `warner`_)
|
||||
`PR485`_: `warner`_
|
||||
`PR486`_: `exarkun`_ (with `warner`_)
|
||||
`PR487`_: `exarkun`_ (with `tpltnt`_)
|
||||
`PR489`_: `exarkun`_
|
||||
`PR490`_: `exarkun`_
|
||||
`PR491`_: `exarkun`_ (with `meejah`_)
|
||||
`PR492`_: `exarkun`_ (with `meejah`_, `tpltnt`_)
|
||||
`PR493`_: `exarkun`_ (with `meejah`_)
|
||||
`PR494`_: `exarkun`_ (with `meejah`_)
|
||||
`PR497`_: `meejah`_ (with `multikatt`_, `exarkun`_)
|
||||
`PR499`_: `exarkun`_ (with `meejah`_)
|
||||
`PR501`_: `exarkun`_ (with `meejah`_)
|
||||
`PR502`_: `exarkun`_ (with `meejah`_)
|
||||
`PR506`_: `exarkun`_ (with `crwood`_, `nejucomo`_)
|
||||
|
||||
|
||||
Developer and Internal Changes
|
||||
------------------------------
|
||||
|
||||
People hacking on Tahoe-LAFS code will be interested in some internal
|
||||
improvements which shouldn't have any user-visible effects:
|
||||
|
||||
* add "tahoe list-aliases --readonly-uri" PR400
|
||||
* web: return "Referrer-Policy: no-referrer" on all requests
|
||||
* deps: use stdlib "json" instead of external "simplejson" #2766 PR405
|
||||
* internal: use @implementer instead of implements PR406
|
||||
* add "tahoe status" command
|
||||
* other PRs: 407 412 410 419 423
|
||||
* deps: require txi2p>=0.3.2 to work around TLS who-is-client issue #2861 PR409
|
||||
* complain more loudly in setup.py under py3 PR414
|
||||
* immutable upload now uses happiness-metric algorithm #1382 PR416
|
||||
* deps: now need libyaml-dev from system before build PR420
|
||||
* rename "filesystem" to "file store" #2345 PR380
|
||||
* test "tahoe list-aliases --readonly-uri" #2863 PR403
|
||||
* replace deprecated twisted.web.client with treq #2857 PR428
|
||||
* internal: improve happiness integration test #2895 PR432
|
||||
* web internal: refactor response-format (?t=) logic #2893 PR429
|
||||
* web: add JSON welcome page #2476 PR430
|
||||
* improve/stablize some test coverage #2891
|
||||
* internal: fix pyflakes issues #2898 PR434
|
||||
* internal: setup.py use find_packages #2897 PR433
|
||||
* add magic-wormhole -based "tahoe invite" and "tahoe create-client --join="
|
||||
#126 PR418
|
||||
* internal: ValueOrderedDict fixes #2891
|
||||
* add PyInstaller build #2729 PR421
|
||||
* internal: remove unnused NumDict #2891 PR438
|
||||
* internal: setup.py use python_requires= so tox3 works #2876
|
||||
* fix crash when starting with invalid pidfile PR440
|
||||
* delete invalid pidfile #1680 PR450
|
||||
* update debian packaging notes after 'stretch' release PR444
|
||||
* add Dockerfile PR445
|
||||
* internal: rewrite tahoe stop/start/daemonize refs #1148 #275 #1121 #1377 #2149 #719 PR417
|
||||
* add tub.port=listen:i2p / listen:tor, refs #2889 PR437
|
||||
* handle multiple magic-folders
|
||||
* #2880 (something involving magic-folders) PR448
|
||||
* internal magic-folder test improvement PR453
|
||||
* deps: twisted>=16.4.0 for "python -m twisted.trial" PR454
|
||||
* web: handle OPTIONS request #1307 PR447
|
||||
* internal: add docs links to RFCs/etc PR456
|
||||
* docs: #455
|
||||
* magic-folder: don't set +x in default umask #2881 PR458
|
||||
* internal: pytest changes PR462
|
||||
* deps: pin pypiwin32 to 219 until upstream bug resolved PR464
|
||||
TODO: can we remove this now? pypiwin32 is now at 223
|
||||
* magic-folder: preserve user mtime #2882 PR457
|
||||
* internal: upload appveyor generated wheels as artifacts #2903
|
||||
* use secure mkstemp() PR460
|
||||
* add "tahoe list-aliases --json" PR452
|
||||
* tahoe.cfg: allow storage path to be configured #2045 PR472
|
||||
* magic-folder: something about status #2885
|
||||
* deps: setuptools >=28.8.0 for something PR470
|
||||
* tahoe-backup: report progress #1587 PR474
|
||||
* internal: fix tox-vs-setuptools-upgrade #2910
|
||||
* internal: skip some unicode tests on non-unicode platforms #2912
|
||||
* internal: tox: pre-install Incremental to workaround setuptools bug #2913
|
||||
* internal: fix PyInstaller builds PR482
|
||||
* internal: fix PyInstaller builds `PR482`_
|
||||
* internal: use @implementer instead of implements `PR406`_
|
||||
* internal: improve happiness integration test #2895 `PR432`_
|
||||
* web internal: refactor response-format (?t=) logic #2893 `PR429`_
|
||||
* internal: fix pyflakes issues #2898 `PR434`_
|
||||
* internal: setup.py use find_packages #2897 `PR433`_
|
||||
* internal: ValueOrderedDict fixes #2891
|
||||
* internal: remove unnused NumDict #2891 `PR438`_
|
||||
* internal: setup.py use python_requires= so tox3 works #2876
|
||||
* internal: rewrite tahoe stop/start/daemonize refs #1148 #275 #1121 #1377 #2149 #719 `PR417`_
|
||||
* internal: add docs links to RFCs/etc `PR456`_
|
||||
* internal: magic-folder test improvement `PR453`_
|
||||
* internal: pytest changes `PR462`_
|
||||
* internal: upload appveyor generated wheels as artifacts #2903
|
||||
* internal: fix tox-vs-setuptools-upgrade #2910
|
||||
* deps: require txi2p>=0.3.2 to work around TLS who-is-client issue #2861 `PR409`_
|
||||
* deps: now need libyaml-dev from system before build `PR420`_
|
||||
* deps: twisted>=16.4.0 for "python -m twisted.trial" `PR454`_
|
||||
* deps: pin pypiwin32 to 219 until upstream bug resolved `PR464`_
|
||||
* deps: setuptools >=28.8.0 for something `PR470`_
|
||||
* deps: use stdlib "json" instead of external "simplejson" #2766 `PR405`_
|
||||
* complain more loudly in setup.py under py3 `PR414`_
|
||||
* rename "filesystem" to "file store" #2345 `PR380`_
|
||||
* replace deprecated twisted.web.client with treq #2857 `PR428`_
|
||||
* improve/stablize some test coverage #2891
|
||||
* TODO: can we remove this now? pypiwin32 is now at 223
|
||||
* use secure mkstemp() `PR460`_
|
||||
* test "tahoe list-aliases --readonly-uri" #2863 `PR403`_
|
||||
* #455: remove outdated comment
|
||||
* `PR407`_ fix stopService calls
|
||||
* `PR410`_ explicit python2.7 virtualenv
|
||||
* `PR419`_ fix list of supported OSes
|
||||
* `PR423`_ switch travis to a supported Ubuntu
|
||||
* deps: no longer declare a PyCrypto dependency (actual use vanished long ago) `PR514`_
|
||||
|
||||
|
||||
|
||||
.. _PR380: https://github.com/tahoe-lafs/tahoe-lafs/pull/380
|
||||
.. _PR400: https://github.com/tahoe-lafs/tahoe-lafs/pull/400
|
||||
.. _PR403: https://github.com/tahoe-lafs/tahoe-lafs/pull/403
|
||||
.. _PR405: https://github.com/tahoe-lafs/tahoe-lafs/pull/405
|
||||
.. _PR406: https://github.com/tahoe-lafs/tahoe-lafs/pull/406
|
||||
.. _PR407: https://github.com/tahoe-lafs/tahoe-lafs/pull/407
|
||||
.. _PR409: https://github.com/tahoe-lafs/tahoe-lafs/pull/409
|
||||
.. _PR410: https://github.com/tahoe-lafs/tahoe-lafs/pull/410
|
||||
.. _PR412: https://github.com/tahoe-lafs/tahoe-lafs/pull/412
|
||||
.. _PR414: https://github.com/tahoe-lafs/tahoe-lafs/pull/414
|
||||
.. _PR416: https://github.com/tahoe-lafs/tahoe-lafs/pull/416
|
||||
.. _PR417: https://github.com/tahoe-lafs/tahoe-lafs/pull/417
|
||||
.. _PR418: https://github.com/tahoe-lafs/tahoe-lafs/pull/418
|
||||
.. _PR419: https://github.com/tahoe-lafs/tahoe-lafs/pull/419
|
||||
.. _PR420: https://github.com/tahoe-lafs/tahoe-lafs/pull/420
|
||||
.. _PR421: https://github.com/tahoe-lafs/tahoe-lafs/pull/421
|
||||
.. _PR423: https://github.com/tahoe-lafs/tahoe-lafs/pull/423
|
||||
.. _PR428: https://github.com/tahoe-lafs/tahoe-lafs/pull/428
|
||||
.. _PR429: https://github.com/tahoe-lafs/tahoe-lafs/pull/429
|
||||
.. _PR430: https://github.com/tahoe-lafs/tahoe-lafs/pull/430
|
||||
.. _PR432: https://github.com/tahoe-lafs/tahoe-lafs/pull/432
|
||||
.. _PR433: https://github.com/tahoe-lafs/tahoe-lafs/pull/433
|
||||
.. _PR434: https://github.com/tahoe-lafs/tahoe-lafs/pull/434
|
||||
.. _PR437: https://github.com/tahoe-lafs/tahoe-lafs/pull/437
|
||||
.. _PR438: https://github.com/tahoe-lafs/tahoe-lafs/pull/438
|
||||
.. _PR440: https://github.com/tahoe-lafs/tahoe-lafs/pull/440
|
||||
.. _PR444: https://github.com/tahoe-lafs/tahoe-lafs/pull/444
|
||||
.. _PR445: https://github.com/tahoe-lafs/tahoe-lafs/pull/445
|
||||
.. _PR447: https://github.com/tahoe-lafs/tahoe-lafs/pull/447
|
||||
.. _PR448: https://github.com/tahoe-lafs/tahoe-lafs/pull/448
|
||||
.. _PR450: https://github.com/tahoe-lafs/tahoe-lafs/pull/450
|
||||
.. _PR452: https://github.com/tahoe-lafs/tahoe-lafs/pull/452
|
||||
.. _PR453: https://github.com/tahoe-lafs/tahoe-lafs/pull/453
|
||||
.. _PR454: https://github.com/tahoe-lafs/tahoe-lafs/pull/454
|
||||
.. _PR456: https://github.com/tahoe-lafs/tahoe-lafs/pull/456
|
||||
.. _PR457: https://github.com/tahoe-lafs/tahoe-lafs/pull/457
|
||||
.. _PR458: https://github.com/tahoe-lafs/tahoe-lafs/pull/458
|
||||
.. _PR460: https://github.com/tahoe-lafs/tahoe-lafs/pull/460
|
||||
.. _PR462: https://github.com/tahoe-lafs/tahoe-lafs/pull/462
|
||||
.. _PR464: https://github.com/tahoe-lafs/tahoe-lafs/pull/464
|
||||
.. _PR470: https://github.com/tahoe-lafs/tahoe-lafs/pull/470
|
||||
.. _PR472: https://github.com/tahoe-lafs/tahoe-lafs/pull/472
|
||||
.. _PR474: https://github.com/tahoe-lafs/tahoe-lafs/pull/474
|
||||
.. _PR482: https://github.com/tahoe-lafs/tahoe-lafs/pull/482
|
||||
.. _PR502: https://github.com/tahoe-lafs/tahoe-lafs/pull/502
|
||||
.. _PR506: https://github.com/tahoe-lafs/tahoe-lafs/pull/506
|
||||
.. _PR514: https://github.com/tahoe-lafs/tahoe-lafs/pull/514
|
||||
.. _AnBuKu: https://github.com/AnBuKu
|
||||
.. _ValdikSS: https://github.com/ValdikSS
|
||||
.. _bookchin: https://github.com/bookchin
|
||||
.. _crwood: https://github.com/crwood
|
||||
.. _nejucomo: https://github.com/nejucomo
|
||||
.. _daira: https://github.com/daira
|
||||
.. _david415: https://github.com/david415
|
||||
.. _exarkun: https://github.com/exarkun
|
||||
.. _lpirl: https://github.com/lpirl
|
||||
.. _markberger: https://github.com/markberger
|
||||
.. _meejah: https://github.com/meejah
|
||||
.. _meskio: https://github.com/meskio
|
||||
.. _multikatt: https://github.com/multikatt
|
||||
.. _pataquets: https://github.com/pataquets
|
||||
.. _str4d: https://github.com/str4d
|
||||
.. _tpltnt: https://github.com/tpltnt
|
||||
.. _warner: https://github.com/warner
|
||||
|
||||
|
||||
|
||||
|
||||
|
12
README.rst
12
README.rst
@ -10,7 +10,8 @@ function correctly, preserving your privacy and security.
|
||||
For full documentation, please see
|
||||
http://tahoe-lafs.readthedocs.io/en/latest/ .
|
||||
|
||||
|readthedocs| |travis| |codecov|
|
||||
|Contributor Covenant| |readthedocs| |travis| |circleci| |codecov|
|
||||
|
||||
|
||||
INSTALLING
|
||||
==========
|
||||
@ -69,7 +70,7 @@ up your first Tahoe-LAFS node.
|
||||
LICENCE
|
||||
=======
|
||||
|
||||
Copyright 2006-2016 The Tahoe-LAFS Software Foundation
|
||||
Copyright 2006-2018 The Tahoe-LAFS Software Foundation
|
||||
|
||||
You may use this package under the GNU General Public License, version 2 or,
|
||||
at your option, any later version. You may use this package under the
|
||||
@ -99,6 +100,13 @@ slides.
|
||||
:alt: build status
|
||||
:target: https://travis-ci.org/tahoe-lafs/tahoe-lafs
|
||||
|
||||
.. |circleci| image:: https://circleci.com/gh/tahoe-lafs/tahoe-lafs.svg?style=svg
|
||||
:target: https://circleci.com/gh/tahoe-lafs/tahoe-lafs
|
||||
|
||||
.. |codecov| image:: https://codecov.io/github/tahoe-lafs/tahoe-lafs/coverage.svg?branch=master
|
||||
:alt: test coverage percentage
|
||||
:target: https://codecov.io/github/tahoe-lafs/tahoe-lafs?branch=master
|
||||
|
||||
.. |Contributor Covenant| image:: https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg
|
||||
:alt: code of conduct
|
||||
:target: docs/CODE_OF_CONDUCT.md
|
||||
|
54
docs/CODE_OF_CONDUCT.md
Normal file
54
docs/CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,54 @@
|
||||
# Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project, and in the interest of
|
||||
fostering an open and welcoming community, we pledge to respect all people who
|
||||
contribute through reporting issues, posting feature requests, updating
|
||||
documentation, submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project a harassment-free
|
||||
experience for everyone, regardless of level of experience, gender, gender
|
||||
identity and expression, sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information, such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
By adopting this Code of Conduct, project maintainers commit themselves to
|
||||
fairly and consistently applying these principles to every aspect of managing
|
||||
this project. Project maintainers who do not follow or enforce the Code of
|
||||
Conduct may be permanently removed from the project team.
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting a project maintainer (see below). All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. Maintainers are
|
||||
obligated to maintain confidentiality with regard to the reporter of an
|
||||
incident.
|
||||
|
||||
The following community members have made themselves available for conduct issues:
|
||||
|
||||
- Jean-Paul Calderone (jean-paul at leastauthority dot com)
|
||||
- meejah (meejah at meejah dot ca)
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 1.3.0, available at
|
||||
[http://contributor-covenant.org/version/1/3/0/][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/3/0/
|
@ -68,6 +68,8 @@ compile the dependencies yourself (instead of using ``--find-links`` to take
|
||||
advantage of the pre-compiled ones we host), you'll also need to install
|
||||
Xcode and its command-line tools.
|
||||
|
||||
**Note** that Tahoe-LAFS depends on `openssl 1.1.1c` or greater.
|
||||
|
||||
Python 2.7
|
||||
----------
|
||||
|
||||
@ -92,7 +94,7 @@ Many Python installations already include ``pip``, but in case yours does
|
||||
not, get it with the `pip install instructions`_::
|
||||
|
||||
% pip --version
|
||||
pip 8.1.1 from ... (python 2.7)
|
||||
pip 10.0.1 from ... (python 2.7)
|
||||
|
||||
.. _pip install instructions: https://pip.pypa.io/en/stable/installing/
|
||||
|
||||
@ -104,7 +106,7 @@ instructions from the `virtualenv documentation`_::
|
||||
|
||||
|
||||
% virtualenv --version
|
||||
15.0.1
|
||||
15.1.0
|
||||
|
||||
.. _virtualenv documentation: https://virtualenv.pypa.io/en/latest/installation.html
|
||||
|
||||
@ -121,6 +123,9 @@ On Debian/Ubuntu-derived systems, the necessary packages are ``python-dev``,
|
||||
RPM-based system (like Fedora) these may be named ``python-devel``, etc,
|
||||
instead, and cam be installed with ``yum`` or ``rpm``.
|
||||
|
||||
**Note** that Tahoe-LAFS depends on `openssl 1.1.1c` or greater.
|
||||
|
||||
|
||||
Install the Latest Tahoe-LAFS Release
|
||||
=====================================
|
||||
|
||||
@ -158,7 +163,7 @@ from PyPI with ``venv/bin/pip install tahoe-lafs``. After installation, run
|
||||
Successfully installed ...
|
||||
|
||||
% venv/bin/tahoe --version
|
||||
tahoe-lafs: 1.12.1
|
||||
tahoe-lafs: 1.14.0
|
||||
foolscap: ...
|
||||
|
||||
%
|
||||
@ -178,16 +183,27 @@ You can also install directly from the source tarball URL::
|
||||
New python executable in ~/venv/bin/python2.7
|
||||
Installing setuptools, pip, wheel...done.
|
||||
|
||||
% venv/bin/pip install https://tahoe-lafs.org/downloads/tahoe-lafs-1.12.1.tar.bz2
|
||||
Collecting https://tahoe-lafs.org/downloads/tahoe-lafs-1.12.1.tar.bz2
|
||||
% venv/bin/pip install https://tahoe-lafs.org/downloads/tahoe-lafs-1.14.0.tar.bz2
|
||||
Collecting https://tahoe-lafs.org/downloads/tahoe-lafs-1.14.0.tar.bz2
|
||||
...
|
||||
Installing collected packages: ...
|
||||
Successfully installed ...
|
||||
|
||||
% venv/bin/tahoe --version
|
||||
tahoe-lafs: 1.12.1
|
||||
tahoe-lafs: 1.14.0
|
||||
...
|
||||
|
||||
Extras
|
||||
------
|
||||
|
||||
Tahoe-LAFS provides some functionality only when explicitly requested at installation time.
|
||||
It does this using the "extras" feature of setuptools.
|
||||
You can request these extra features when running the ``pip install`` command like this::
|
||||
|
||||
% venv/bin/pip install tahoe-lafs[tor]
|
||||
|
||||
This example enables support for listening and connecting using Tor.
|
||||
The Tahoe-LAFS documentation for specific features which require an explicit install-time step will mention the "extra" that must be requested.
|
||||
|
||||
Hacking On Tahoe-LAFS
|
||||
---------------------
|
||||
@ -208,7 +224,7 @@ the additional libraries needed to run the unit tests::
|
||||
Successfully installed ...
|
||||
|
||||
% venv/bin/tahoe --version
|
||||
tahoe-lafs: 1.12.1.post34.dev0
|
||||
tahoe-lafs: 1.14.0.post34.dev0
|
||||
...
|
||||
|
||||
This way, you won't have to re-run the ``pip install`` step each time you
|
||||
@ -257,7 +273,7 @@ result in a "all tests passed" mesage::
|
||||
% tox
|
||||
GLOB sdist-make: ~/tahoe-lafs/setup.py
|
||||
py27 recreate: ~/tahoe-lafs/.tox/py27
|
||||
py27 inst: ~/tahoe-lafs/.tox/dist/tahoe-lafs-1.12.1.post8.dev0.zip
|
||||
py27 inst: ~/tahoe-lafs/.tox/dist/tahoe-lafs-1.14.0.post8.dev0.zip
|
||||
py27 runtests: commands[0] | tahoe --version
|
||||
py27 runtests: commands[1] | trial --rterrors allmydata
|
||||
allmydata.test.test_auth
|
||||
@ -270,7 +286,7 @@ result in a "all tests passed" mesage::
|
||||
PASSED (skips=7, expectedFailures=3, successes=1176)
|
||||
__________________________ summary ___________________________________
|
||||
py27: commands succeeded
|
||||
congratulations :)
|
||||
congratulations :)
|
||||
|
||||
Common Problems
|
||||
===============
|
||||
@ -284,6 +300,8 @@ Similar errors about ``openssl/crypto.h`` indicate that you are missing the
|
||||
OpenSSL development headers (``libssl-dev``). Likewise ``ffi.h`` means you
|
||||
need ``libffi-dev``.
|
||||
|
||||
**Note** that Tahoe-LAFS depends on `openssl 1.1.1c` or greater.
|
||||
|
||||
|
||||
Using Tahoe-LAFS
|
||||
================
|
||||
|
78
docs/accepting-donations.rst
Normal file
78
docs/accepting-donations.rst
Normal file
@ -0,0 +1,78 @@
|
||||
========================
|
||||
Storage Server Donations
|
||||
========================
|
||||
|
||||
The following is a configuration convention which allows users to anonymously support the operators of storage servers.
|
||||
Donations are made using `Zcash shielded transactions`_ to limit the amount of personal information incidentally conveyed.
|
||||
|
||||
Sending Donations
|
||||
=================
|
||||
|
||||
To support a storage server following this convention, you need several things:
|
||||
|
||||
* a Zcash wallet capable of sending shielded transactions
|
||||
(at least until Zcash 1.1.1 this requires a Zcash full node)
|
||||
* a shielded address with sufficient balance
|
||||
* a running Tahoe-LAFS client node which knows about the recipient storage server
|
||||
|
||||
For additional protection, you may also wish to operate your Zcash wallet and full node using Tor.
|
||||
|
||||
Find Zcash Shielded Address
|
||||
---------------------------
|
||||
|
||||
To find an address at which a storage server operator wishes to receive donations,
|
||||
launch the Tahoe-LAFS web UI::
|
||||
|
||||
$ tahoe webopen
|
||||
|
||||
Inspect the page for the storage server area.
|
||||
This will have a heading like *Connected to N of M known storage servers*.
|
||||
Each storage server in this section will have a nickname.
|
||||
A storage server with a nickname beginning with ``zcash:`` is signaling it accepts Zcash donations.
|
||||
Copy the full address following the ``zcash:`` prefix and save it for the next step.
|
||||
This is the donation address.
|
||||
Donation addresses beginning with ``z`` are shielded.
|
||||
It is recommended that all donations be sent from and to shielded addresses.
|
||||
|
||||
Send the Donation
|
||||
-----------------
|
||||
|
||||
First, select a donation amount.
|
||||
Next, use a Zcash wallet to send the selected amount to the donation address.
|
||||
Using the Zcash cli wallet, this can be done with commands like::
|
||||
|
||||
$ DONATION_ADDRESS="..."
|
||||
$ AMOUNT="..."
|
||||
$ YOUR_ADDRESS="..."
|
||||
$ zcash-cli z_sendmany $YOUR_ADDRESS "[{\"address\": \"$DONATION_ADDRESS\", \"amount\": $AMOUNT}]"
|
||||
|
||||
Remember that you must also have funds to pay the transaction fee
|
||||
(which defaults to 0.0001 ZEC in mid-2018).
|
||||
|
||||
Receiving Donations
|
||||
===================
|
||||
|
||||
To receive donations from users following this convention, you need the following:
|
||||
|
||||
* a Zcash shielded address
|
||||
|
||||
Configuring Tahoe-LAFS
|
||||
----------------------
|
||||
|
||||
The Zcash shielded address is placed in the storage server's ``nickname`` field.
|
||||
Edit ``tahoe.cfg`` and edit the ``nickname`` field in the ``node`` section like so::
|
||||
|
||||
[node]
|
||||
nickname = zcash:zcABCDEF....
|
||||
|
||||
Then restart the storage server.
|
||||
|
||||
Further Reading
|
||||
===============
|
||||
|
||||
To acquaint yourself with the security and privacy properties of Zcash,
|
||||
refer to the `Zcash documentation`_.
|
||||
|
||||
.. _Zcash shielded transactions: https://z.cash/support/security/privacy-security-recommendations.html#transaction
|
||||
|
||||
.. _Zcash documentation: http://zcash.readthedocs.io/en/latest/
|
@ -98,7 +98,7 @@ subset are needed to reconstruct the segment (3 out of 10, with the default
|
||||
settings).
|
||||
|
||||
It sends one block from each segment to a given server. The set of blocks on
|
||||
a given server constitutes a "share". Therefore a subset f the shares (3 out
|
||||
a given server constitutes a "share". Therefore a subset of the shares (3 out
|
||||
of 10, by default) are needed to reconstruct the file.
|
||||
|
||||
A hash of the encryption key is used to form the "storage index", which is
|
||||
|
35
docs/aspiration-contract.txt
Normal file
35
docs/aspiration-contract.txt
Normal file
@ -0,0 +1,35 @@
|
||||
In December 2018, the Tahoe-LAFS project engaged Aspiration[1], a US 501(c)3
|
||||
nonprofit technology organization, as a "fiscal sponsor"[2]. A portion of the
|
||||
project's Bitcoin will be given to Aspiration, from which they can pay
|
||||
developers and contractors to work on the Tahoe codebase. Aspiration will
|
||||
handle the payroll, taxes, accounting, project management, and oversight, and
|
||||
is compensated by an 8% management fee. This provides the tax-withholding
|
||||
structure to use our project's BTC for significant development.
|
||||
|
||||
We're using 25% of our ~369 BTC for this initial stage of the project,
|
||||
which will give us about $300K-$350K of development work, spread out
|
||||
over the 2019 calendar year. While it would have been nice to make this
|
||||
happen a year ago (given the recent decline of the BTC price), we think
|
||||
this is a reasonable value, and we're excited to finally get to use this
|
||||
surprise windfall to improve the codebase.
|
||||
|
||||
Our initial set of projects to fund, drafted by Liz Steininger of Least
|
||||
Authority and approved by Zooko and Brian, looks like this:
|
||||
|
||||
* porting Tahoe and dependent libraries to Python 3
|
||||
* improving grid operation/management tools
|
||||
* community outreach, UI/UX improvements, documentation
|
||||
* adding new community-requested features, improving garbage collection
|
||||
* possibly run another summit
|
||||
|
||||
If this goes well (and especially if the BTC price recovers), we'll
|
||||
probably do more next year.
|
||||
|
||||
As usual, the transfer amounts and addresses will be logged in
|
||||
"donations.rst" and "expenses.rst" in the docs/ directory.
|
||||
|
||||
Many thanks to Gunner and Josh Black of Aspiration, and Liz Steininger
|
||||
of Least Authority, for making this possible.
|
||||
|
||||
[1]: https://aspirationtech.org/
|
||||
[2]: https://aspirationtech.org/services/openprojects
|
@ -9,6 +9,7 @@ Configuring a Tahoe-LAFS node
|
||||
#. `Connection Management`_
|
||||
#. `Client Configuration`_
|
||||
#. `Storage Server Configuration`_
|
||||
#. `Storage Server Plugin Configuration`_
|
||||
#. `Frontend Configuration`_
|
||||
#. `Running A Helper`_
|
||||
#. `Running An Introducer`_
|
||||
@ -81,7 +82,6 @@ Client/server nodes provide one or more of the following services:
|
||||
* web-API service
|
||||
* SFTP service
|
||||
* FTP service
|
||||
* Magic Folder service
|
||||
* helper service
|
||||
* storage service.
|
||||
|
||||
@ -683,6 +683,8 @@ Client Configuration
|
||||
location to prefer their local servers so that they can maintain access to
|
||||
all of their uploads without using the internet.
|
||||
|
||||
In addition,
|
||||
see :doc:`accepting-donations` for a convention for donating to storage server operators.
|
||||
|
||||
Frontend Configuration
|
||||
======================
|
||||
@ -716,12 +718,6 @@ SFTP, FTP
|
||||
for instructions on configuring these services, and the ``[sftpd]`` and
|
||||
``[ftpd]`` sections of ``tahoe.cfg``.
|
||||
|
||||
Magic Folder
|
||||
|
||||
A node running on Linux or Windows can be configured to automatically
|
||||
upload files that are created or changed in a specified local directory.
|
||||
See :doc:`frontends/magic-folder` for details.
|
||||
|
||||
|
||||
Storage Server Configuration
|
||||
============================
|
||||
@ -736,6 +732,17 @@ Storage Server Configuration
|
||||
for clients who do not wish to provide storage service. The default value
|
||||
is ``True``.
|
||||
|
||||
``anonymous = (boolean, optional)``
|
||||
|
||||
If this is ``True``, the node will expose the storage server via Foolscap
|
||||
without any additional authentication or authorization. The capability to
|
||||
use all storage services is conferred by knowledge of the Foolscap fURL
|
||||
for the storage server which will be included in the storage server's
|
||||
announcement. If it is ``False``, the node will not expose this and
|
||||
storage must be exposed using the storage server plugin system (see
|
||||
`Storage Server Plugin Configuration`_ for details). The default value is
|
||||
``True``.
|
||||
|
||||
``readonly = (boolean, optional)``
|
||||
|
||||
If ``True``, the node will run a storage server but will not accept any
|
||||
@ -793,6 +800,35 @@ Storage Server Configuration
|
||||
(i.e. ``BASEDIR/storage``), but it can be placed elsewhere. Relative paths
|
||||
will be interpreted relative to the node's base directory.
|
||||
|
||||
In addition,
|
||||
see :doc:`accepting-donations` for a convention encouraging donations to storage server operators.
|
||||
|
||||
|
||||
Storage Server Plugin Configuration
|
||||
===================================
|
||||
|
||||
In addition to the built-in storage server,
|
||||
it is also possible to load and configure storage server plugins into Tahoe-LAFS.
|
||||
|
||||
Plugins to load are specified in the ``[storage]`` section.
|
||||
|
||||
``plugins = (string, optional)``
|
||||
|
||||
This gives a comma-separated list of plugin names.
|
||||
Plugins named here will be loaded and offered to clients.
|
||||
The default is for no such plugins to be loaded.
|
||||
|
||||
Each plugin can also be configured in a dedicated section.
|
||||
The section for each plugin is named after the plugin itself::
|
||||
|
||||
[storageserver.plugins.<plugin name>]
|
||||
|
||||
For example,
|
||||
the configuration section for a plugin named ``acme-foo-v1`` is ``[storageserver.plugins.acme-foo-v1]``.
|
||||
|
||||
The contents of such sections are defined by the plugins themselves.
|
||||
Refer to the documentation provided with those plugins.
|
||||
|
||||
|
||||
Running A Helper
|
||||
================
|
||||
|
@ -39,16 +39,16 @@ virtualenv.
|
||||
|
||||
The ``.deb`` packages, of course, rely solely upon other ``.deb`` packages.
|
||||
For reference, here is a list of the debian package names that provide Tahoe's
|
||||
dependencies as of the 1.9 release:
|
||||
dependencies as of the 1.14.0 release:
|
||||
|
||||
* python
|
||||
* python-zfec
|
||||
* python-pycryptopp
|
||||
* python-foolscap
|
||||
* python-openssl (needed by foolscap)
|
||||
* python-twisted
|
||||
* python-nevow
|
||||
* python-mock
|
||||
* python-cryptography
|
||||
* python-simplejson
|
||||
* python-setuptools
|
||||
* python-support (for Debian-specific install-time tools)
|
||||
|
89
docs/developer-guide.rst
Normal file
89
docs/developer-guide.rst
Normal file
@ -0,0 +1,89 @@
|
||||
Developer Guide
|
||||
===============
|
||||
|
||||
|
||||
Pre-commit Checks
|
||||
-----------------
|
||||
|
||||
This project is configured for use with `pre-commit`_ to install `VCS/git hooks`_ which
|
||||
perform some static code analysis checks and other code checks to catch common errors
|
||||
before each commit and to run the full self-test suite to find less obvious regressions
|
||||
before each push to a remote.
|
||||
|
||||
For example::
|
||||
|
||||
tahoe-lafs $ make install-vcs-hooks
|
||||
...
|
||||
+ ./.tox//py36/bin/pre-commit install --hook-type pre-commit
|
||||
pre-commit installed at .git/hooks/pre-commit
|
||||
+ ./.tox//py36/bin/pre-commit install --hook-type pre-push
|
||||
pre-commit installed at .git/hooks/pre-push
|
||||
tahoe-lafs $ python -c "import pathlib; pathlib.Path('src/allmydata/tabbed.py').write_text('def foo():\\n\\tpass\\n')"
|
||||
tahoe-lafs $ git add src/allmydata/tabbed.py
|
||||
tahoe-lafs $ git commit -a -m "Add a file that violates flake8"
|
||||
...
|
||||
codechecks...............................................................Failed
|
||||
- hook id: codechecks
|
||||
- exit code: 1
|
||||
|
||||
GLOB sdist-make: ./tahoe-lafs/setup.py
|
||||
codechecks inst-nodeps: ...
|
||||
codechecks installed: ...
|
||||
codechecks run-test-pre: PYTHONHASHSEED='...'
|
||||
codechecks run-test: commands[0] | flake8 src static misc setup.py
|
||||
src/allmydata/tabbed.py:2:1: W191 indentation contains tabs
|
||||
ERROR: InvocationError for command ./tahoe-lafs/.tox/codechecks/bin/flake8 src static misc setup.py (exited with code 1)
|
||||
___________________________________ summary ____________________________________
|
||||
ERROR: codechecks: commands failed
|
||||
...
|
||||
|
||||
To uninstall::
|
||||
|
||||
tahoe-lafs $ make uninstall-vcs-hooks
|
||||
...
|
||||
+ ./.tox/py36/bin/pre-commit uninstall
|
||||
pre-commit uninstalled
|
||||
+ ./.tox/py36/bin/pre-commit uninstall -t pre-push
|
||||
pre-push uninstalled
|
||||
|
||||
Note that running the full self-test suite takes several minutes so expect pushing to
|
||||
take some time. If you can't or don't want to wait for the hooks in some cases, use the
|
||||
``--no-verify`` option to ``$ git commit ...`` or ``$ git push ...``. Alternatively,
|
||||
see the `pre-commit`_ documentation and CLI help output and use the committed
|
||||
`pre-commit configuration`_ as a starting point to write a local, uncommitted
|
||||
``../.pre-commit-config.local.yaml`` configuration to use instead. For example::
|
||||
|
||||
tahoe-lafs $ ./.tox/py36/bin/pre-commit --help
|
||||
tahoe-lafs $ ./.tox/py36/bin/pre-commit instll --help
|
||||
tahoe-lafs $ cp "./.pre-commit-config.yaml" "./.pre-commit-config.local.yaml"
|
||||
tahoe-lafs $ editor "./.pre-commit-config.local.yaml"
|
||||
...
|
||||
tahoe-lafs $ ./.tox/py36/bin/pre-commit install -c "./.pre-commit-config.local.yaml" -t pre-push
|
||||
pre-commit installed at .git/hooks/pre-push
|
||||
tahoe-lafs $ git commit -a -m "Add a file that violates flake8"
|
||||
[3398.pre-commit 29f8f43d2] Add a file that violates flake8
|
||||
1 file changed, 2 insertions(+)
|
||||
create mode 100644 src/allmydata/tabbed.py
|
||||
tahoe-lafs $ git push
|
||||
...
|
||||
codechecks...............................................................Failed
|
||||
- hook id: codechecks
|
||||
- exit code: 1
|
||||
|
||||
GLOB sdist-make: ./tahoe-lafs/setup.py
|
||||
codechecks inst-nodeps: ...
|
||||
codechecks installed: ...
|
||||
codechecks run-test-pre: PYTHONHASHSEED='...'
|
||||
codechecks run-test: commands[0] | flake8 src static misc setup.py
|
||||
src/allmydata/tabbed.py:2:1: W191 indentation contains tabs
|
||||
ERROR: InvocationError for command ./tahoe-lafs/.tox/codechecks/bin/flake8 src static misc setup.py (exited with code 1)
|
||||
___________________________________ summary ____________________________________
|
||||
ERROR: codechecks: commands failed
|
||||
...
|
||||
|
||||
error: failed to push some refs to 'github.com:jaraco/tahoe-lafs.git'
|
||||
|
||||
|
||||
.. _`pre-commit`: https://pre-commit.com
|
||||
.. _`VCS/git hooks`: `pre-commit`_
|
||||
.. _`pre-commit configuration`: ../.pre-commit-config.yaml
|
@ -73,6 +73,10 @@ key on this list.
|
||||
~$1020
|
||||
1DskmM8uCvmvTKjPbeDgfmVsGifZCmxouG
|
||||
|
||||
* Aspiration contract (first phase, 2019)
|
||||
$300k-$350k
|
||||
1gDXYQNH4kCJ8Dk7kgiztfjNUaA1KJcHv
|
||||
|
||||
|
||||
Historical Donation Addresses
|
||||
=============================
|
||||
@ -100,17 +104,17 @@ This document is signed by the Tahoe-LAFS Release-Signing Key (GPG keyid
|
||||
(https://github.com/tahoe-lafs/tahoe-lafs.git) as `docs/donations.rst`.
|
||||
Both actions require access to secrets held closely by Tahoe developers.
|
||||
|
||||
signed: Brian Warner, 10-Nov-2016
|
||||
signed: Brian Warner, 27-Dec-2018
|
||||
|
||||
|
||||
-----BEGIN PGP SIGNATURE-----
|
||||
Version: GnuPG v2
|
||||
|
||||
iQEcBAEBCAAGBQJYJVQBAAoJEL3g0x1oZmp6/8gIAJ5N2jLRQgpfIQTbVvhpnnOc
|
||||
MGV/kTN5yiN88laX91BPiX8HoAYrBcrzVH/If/2qGkQOGt8RW/91XJC++85JopzN
|
||||
Gw8uoyhxFB2b4+Yw2WLBSFKx58CyNoq47ZSwLUpard7P/qNrN+Szb26X0jDLo+7V
|
||||
XL6kXphL82b775xbFxW6afSNSjFJzdbozU+imTqxCu+WqIRW8iD2vjQxx6T6SSrA
|
||||
q0aLSlZpmD2mHGG3C3K2yYnX7C0BoGR9j4HAN9HbXtTKdVxq98YZOh11jmU1RVV/
|
||||
nTncD4E1CMrv/QqmktjXw/2shiGihYX+3ZqTO5BAZerORn0MkxPOIvESSVUhHVw=
|
||||
=Oj0C
|
||||
iQEzBAEBCAAdFiEE405i0G0Oac/KQXn/veDTHWhmanoFAlwlrdsACgkQveDTHWhm
|
||||
anqEqQf/SdxMvI0+YbsZe+Gr/+lNWrNtfxAkjgLUZYRPmElZG6UKkNuPghXfsYRM
|
||||
71nRbgbn05jrke7AGlulxNplTxYP/5LQVf5K1nvTE7yPI/LBMudIpAbM3wPiLKSD
|
||||
qecrVZiqiIBPHWScyya91qirTHtJTJj39cs/N9937hD+Pm65paHWHDZhMkhStGH7
|
||||
05WtvD0G+fFuAgs04VDBz/XVQlPbngkmdKjIL06jpIAgzC3H9UGFcqe55HKY66jK
|
||||
W769TiRuGLLS07cOPqg8t2hPpE4wv9Gs02hfg1Jc656scsFuEkh5eMMj/MXcFsED
|
||||
8vwn16kjJk1fkeg+UofnXsHeHIJalQ==
|
||||
=/E+V
|
||||
-----END PGP SIGNATURE-----
|
||||
|
@ -44,7 +44,7 @@ arguments. "``tahoe --help``" might also provide something useful.
|
||||
Running "``tahoe --version``" will display a list of version strings, starting
|
||||
with the "allmydata" module (which contains the majority of the Tahoe-LAFS
|
||||
functionality) and including versions for a number of dependent libraries,
|
||||
like Twisted, Foolscap, pycryptopp, and zfec. "``tahoe --version-and-path``"
|
||||
like Twisted, Foolscap, cryptography, and zfec. "``tahoe --version-and-path``"
|
||||
will also show the path from which each library was imported.
|
||||
|
||||
On Unix systems, the shell expands filename wildcards (``'*'`` and ``'?'``)
|
||||
|
@ -211,14 +211,7 @@ Dependencies
|
||||
|
||||
The Tahoe-LAFS SFTP server requires the Twisted "Conch" component (a "conch"
|
||||
is a twisted shell, get it?). Many Linux distributions package the Conch code
|
||||
separately: debian puts it in the "python-twisted-conch" package. Conch
|
||||
requires the "pycrypto" package, which is a Python+C implementation of many
|
||||
cryptographic functions (the debian package is named "python-crypto").
|
||||
|
||||
Note that "pycrypto" is different than the "pycryptopp" package that
|
||||
Tahoe-LAFS uses (which is a Python wrapper around the C++ -based Crypto++
|
||||
library, a library that is frequently installed as /usr/lib/libcryptopp.a, to
|
||||
avoid problems with non-alphanumerics in filenames).
|
||||
separately: debian puts it in the "python-twisted-conch" package.
|
||||
|
||||
Immutable and Mutable Files
|
||||
===========================
|
||||
|
@ -1,148 +0,0 @@
|
||||
.. -*- coding: utf-8-with-signature -*-
|
||||
|
||||
================================
|
||||
Tahoe-LAFS Magic Folder Frontend
|
||||
================================
|
||||
|
||||
1. `Introduction`_
|
||||
2. `Configuration`_
|
||||
3. `Known Issues and Limitations With Magic-Folder`_
|
||||
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
The Magic Folder frontend synchronizes local directories on two or more
|
||||
clients, using a Tahoe-LAFS grid for storage. Whenever a file is created
|
||||
or changed under the local directory of one of the clients, the change is
|
||||
propagated to the grid and then to the other clients.
|
||||
|
||||
The implementation of the "drop-upload" frontend, on which Magic Folder is
|
||||
based, was written as a prototype at the First International Tahoe-LAFS
|
||||
Summit in June 2011. In 2015, with the support of a grant from the
|
||||
`Open Technology Fund`_, it was redesigned and extended to support
|
||||
synchronization between clients. It currently works on Linux and Windows.
|
||||
|
||||
Magic Folder is not currently in as mature a state as the other frontends
|
||||
(web, CLI, SFTP and FTP). This means that you probably should not rely on
|
||||
all changes to files in the local directory to result in successful uploads.
|
||||
There might be (and have been) incompatible changes to how the feature is
|
||||
configured.
|
||||
|
||||
We are very interested in feedback on how well this feature works for you, and
|
||||
suggestions to improve its usability, functionality, and reliability.
|
||||
|
||||
.. _`Open Technology Fund`: https://www.opentech.fund/
|
||||
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
The Magic Folder frontend runs as part of a gateway node. To set it up, you
|
||||
must use the tahoe magic-folder CLI. For detailed information see our
|
||||
:doc:`Magic-Folder CLI design
|
||||
documentation<../proposed/magic-folder/user-interface-design>`. For a
|
||||
given Magic-Folder collective directory you need to run the ``tahoe
|
||||
magic-folder create`` command. After that the ``tahoe magic-folder invite``
|
||||
command must used to generate an *invite code* for each member of the
|
||||
magic-folder collective. A confidential, authenticated communications channel
|
||||
should be used to transmit the invite code to each member, who will be
|
||||
joining using the ``tahoe magic-folder join`` command.
|
||||
|
||||
These settings are persisted in the ``[magic_folder]`` section of the
|
||||
gateway's ``tahoe.cfg`` file.
|
||||
|
||||
``[magic_folder]``
|
||||
|
||||
``enabled = (boolean, optional)``
|
||||
|
||||
If this is ``True``, Magic Folder will be enabled. The default value is
|
||||
``False``.
|
||||
|
||||
``local.directory = (UTF-8 path)``
|
||||
|
||||
This specifies the local directory to be monitored for new or changed
|
||||
files. If the path contains non-ASCII characters, it should be encoded
|
||||
in UTF-8 regardless of the system's filesystem encoding. Relative paths
|
||||
will be interpreted starting from the node's base directory.
|
||||
|
||||
You should not normally need to set these fields manually because they are
|
||||
set by the ``tahoe magic-folder create`` and/or ``tahoe magic-folder join``
|
||||
commands. Use the ``--help`` option to these commands for more information.
|
||||
|
||||
After setting up a Magic Folder collective and starting or restarting each
|
||||
gateway, you can confirm that the feature is working by copying a file into
|
||||
any local directory, and checking that it appears on other clients.
|
||||
Large files may take some time to appear.
|
||||
|
||||
The 'Operational Statistics' page linked from the Welcome page shows counts
|
||||
of the number of files uploaded, the number of change events currently
|
||||
queued, and the number of failed uploads. The 'Recent Uploads and Downloads'
|
||||
page and the node :doc:`log<../logging>` may be helpful to determine the
|
||||
cause of any failures.
|
||||
|
||||
|
||||
.. _Known Issues in Magic-Folder:
|
||||
|
||||
Known Issues and Limitations With Magic-Folder
|
||||
==============================================
|
||||
|
||||
This feature only works on Linux and Windows. There is a ticket to add
|
||||
support for Mac OS X and BSD-based systems (`#1432`_).
|
||||
|
||||
The only way to determine whether uploads have failed is to look at the
|
||||
'Operational Statistics' page linked from the Welcome page. This only shows
|
||||
a count of failures, not the names of files. Uploads are never retried.
|
||||
|
||||
The Magic Folder frontend performs its uploads sequentially (i.e. it waits
|
||||
until each upload is finished before starting the next), even when there
|
||||
would be enough memory and bandwidth to efficiently perform them in parallel.
|
||||
A Magic Folder upload can occur in parallel with an upload by a different
|
||||
frontend, though. (`#1459`_)
|
||||
|
||||
On Linux, if there are a large number of near-simultaneous file creation or
|
||||
change events (greater than the number specified in the file
|
||||
``/proc/sys/fs/inotify/max_queued_events``), it is possible that some events
|
||||
could be missed. This is fairly unlikely under normal circumstances, because
|
||||
the default value of ``max_queued_events`` in most Linux distributions is
|
||||
16384, and events are removed from this queue immediately without waiting for
|
||||
the corresponding upload to complete. (`#1430`_)
|
||||
|
||||
The Windows implementation might also occasionally miss file creation or
|
||||
change events, due to limitations of the underlying Windows API
|
||||
(ReadDirectoryChangesW). We do not know how likely or unlikely this is.
|
||||
(`#1431`_)
|
||||
|
||||
Some filesystems may not support the necessary change notifications.
|
||||
So, it is recommended for the local directory to be on a directly attached
|
||||
disk-based filesystem, not a network filesystem or one provided by a virtual
|
||||
machine.
|
||||
|
||||
The ``private/magic_folder_dircap`` and ``private/collective_dircap`` files
|
||||
cannot use an alias or path to specify the upload directory. (`#1711`_)
|
||||
|
||||
If a file in the upload directory is changed (actually relinked to a new
|
||||
file), then the old file is still present on the grid, and any other caps
|
||||
to it will remain valid. Eventually it will be possible to use
|
||||
:doc:`../garbage-collection` to reclaim the space used by these files; however
|
||||
currently they are retained indefinitely. (`#2440`_)
|
||||
|
||||
Unicode filenames are supported on both Linux and Windows, but on Linux, the
|
||||
local name of a file must be encoded correctly in order for it to be uploaded.
|
||||
The expected encoding is that printed by
|
||||
``python -c "import sys; print sys.getfilesystemencoding()"``.
|
||||
|
||||
On Windows, local directories with non-ASCII names are not currently working.
|
||||
(`#2219`_)
|
||||
|
||||
On Windows, when a node has Magic Folder enabled, it is unresponsive to Ctrl-C
|
||||
(it can only be killed using Task Manager or similar). (`#2218`_)
|
||||
|
||||
.. _`#1430`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1430
|
||||
.. _`#1431`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1431
|
||||
.. _`#1432`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1432
|
||||
.. _`#1459`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1459
|
||||
.. _`#1711`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1711
|
||||
.. _`#2218`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2218
|
||||
.. _`#2219`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2219
|
||||
.. _`#2440`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2440
|
@ -272,22 +272,3 @@ that size, assume that they have been corrupted and are not retrievable from the
|
||||
Tahoe storage grid. Tahoe v1.1 clients will refuse to upload files larger than
|
||||
12 GiB with a clean failure. A future release of Tahoe will remove this
|
||||
limitation so that larger files can be uploaded.
|
||||
|
||||
|
||||
=== pycryptopp defect resulting in data corruption ===
|
||||
|
||||
Versions of pycryptopp earlier than pycryptopp-0.5.0 had a defect
|
||||
which, when compiled with some compilers, would cause AES-256
|
||||
encryption and decryption to be computed incorrectly. This could
|
||||
cause data corruption. Tahoe v1.0 required, and came with a bundled
|
||||
copy of, pycryptopp v0.3.
|
||||
|
||||
==== how to manage it ====
|
||||
|
||||
You can detect whether pycryptopp-0.3 has this failure when it is
|
||||
compiled by your compiler. Run the unit tests that come with
|
||||
pycryptopp-0.3: unpack the "pycryptopp-0.3.tar" file that comes in the
|
||||
Tahoe v1.0 {{{misc/dependencies}}} directory, cd into the resulting
|
||||
{{{pycryptopp-0.3.0}}} directory, and execute {{{python ./setup.py
|
||||
test}}}. If the tests pass, then your compiler does not trigger this
|
||||
failure.
|
||||
|
@ -1,80 +1,110 @@
|
||||
* Tahoe Release Checklist [0/19]
|
||||
- [ ] make sure buildbot, travis, appveyor are green
|
||||
- [ ] NEWS.rst: summarize user-visible changes, aim for one page of text
|
||||
- [ ] update doc files
|
||||
- NEWS.rst: Add final release name and date to top-most item in NEWS.
|
||||
- relnotes.txt
|
||||
- CREDITS
|
||||
- docs/known_issues.rst
|
||||
- [ ] change docs/INSTALL.rst to point to just the current
|
||||
tahoe-lafs-X.Y.Z.tar.gz source code file
|
||||
- [ ] announce the tree is locked on IRC
|
||||
- [ ] git pull, should be empty
|
||||
- [ ] git tag -s -u 68666A7A -m "release Tahoe-LAFS-X.Y.Z" tahoe-lafs-X.Y.Z
|
||||
- produces a "signed tag"
|
||||
- [ ] build locally to make sure the release is reporting itself as the
|
||||
intended version, make tarballs too (for comparison only)
|
||||
- [ ] push tag to trigger buildslaves. Making a code change is no longer
|
||||
necessary (TODO: pushing just 1.12.0b2 was insufficient: travis fired,
|
||||
but not buildbot)
|
||||
- git push official master TAGNAME
|
||||
- that will build tarballs
|
||||
- [ ] make sure buildbot is green
|
||||
- [ ] download tarballs+wheels
|
||||
- [ ] announce tree is unlocked
|
||||
- [ ] compare tarballs+wheels against local copies (but sign upstreams)
|
||||
- [ ] sign each with "gpg -ba -u 68666a7a FILE"
|
||||
- [ ] twine upload dist/*
|
||||
- [ ] test "pip install tahoe-lafs" (from PyPI)
|
||||
- [ ] upload *.asc to org ~source/downloads/
|
||||
- [ ] copy the release tarball,sigs to tahoe-lafs.org: ~source/downloads/
|
||||
- [ ] move old release out of ~source/downloads (to downloads/old/?)
|
||||
- [ ] send out relnotes.txt:
|
||||
- add prefix with SHA256 of tarballs, release pubkey, git revhash
|
||||
- GPG-sign the email with release key
|
||||
- send to tahoe-announce@tahoe-lafs.org and tahoe-dev@tahoe-lafs.org
|
||||
- [ ] update Wiki front page: version on download link, News column
|
||||
- [ ] update Wiki "Doc": parade of release notes (with rev of NEWS.rst)
|
||||
- [ ] close the Milestone on the trac Roadmap
|
||||
- [ ] tweet to @tahoelafs
|
||||
- other stuff:
|
||||
- [ ] update https://tahoe-lafs.org/hacktahoelafs/
|
||||
- [ ] make an "announcement of new release" on freshmeat
|
||||
How to Make a Tahoe-LAFS Release
|
||||
|
||||
Any developer with push priveleges can do most of these steps, but a
|
||||
"Release Maintainer" is required for some signing operations -- these
|
||||
steps are marked with (Release Maintainer). Currently, the following
|
||||
people are Release Maintainers:
|
||||
|
||||
- Brian Warner (https://github.com/warner)
|
||||
|
||||
|
||||
* select features/PRs for new release [0/2]
|
||||
- [ ] made sure they are tagged/labeled
|
||||
- [ ] merged all release PRs
|
||||
|
||||
* basic quality checks [0/3]
|
||||
- [ ] all travis CI checks pass
|
||||
- [ ] all appveyor checks pass
|
||||
- [ ] all buildbot workers pass their checks
|
||||
|
||||
* freeze master branch [0/1]
|
||||
- [ ] announced the freeze of the master branch on IRC (i.e. non-release PRs won't be merged until after release)
|
||||
|
||||
* sync documentation [0/7]
|
||||
|
||||
- [ ] NEWS.rst: (run "tox -e news")
|
||||
- [ ] added final release name and date to top-most item in NEWS.rst
|
||||
- [ ] updated relnotes.txt (change next, last versions; summarize NEWS)
|
||||
- [ ] updated CREDITS
|
||||
- [ ] updated docs/known_issues.rst
|
||||
- [ ] docs/INSTALL.rst only points to current tahoe-lafs-X.Y.Z.tar.gz source code file
|
||||
- [ ] updated https://tahoe-lafs.org/hacktahoelafs/
|
||||
|
||||
* sign + build the tag [0/8]
|
||||
|
||||
- [ ] code passes all checks / tests (i.e. all CI is green)
|
||||
- [ ] documentation is ready (see above)
|
||||
- [ ] (Release Maintainer): git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-X.Y.Z" tahoe-lafs-X.Y.Z
|
||||
- [ ] build code locally:
|
||||
tox -e py27,codechecks,deprecations,docs,integration,upcoming-deprecations
|
||||
- [ ] created tarballs (they'll be in dist/ for later comparison)
|
||||
tox -e tarballs
|
||||
- [ ] release version is reporting itself as intended version
|
||||
ls dist/
|
||||
- [ ] 'git pull' doesn't pull anything
|
||||
- [ ] pushed tag to trigger buildslaves
|
||||
git push official master TAGNAME
|
||||
- [ ] confirmed Dockerhub built successfully:
|
||||
https://hub.docker.com/r/tahoelafs/base/builds/
|
||||
|
||||
* sign the release artifacts [0/8]
|
||||
|
||||
- [ ] (Release Maintainer): pushed signed tag (should trigger Buildbot builders)
|
||||
- [ ] Buildbot workers built all artifacts successfully
|
||||
- [ ] downloaded upstream tarballs+wheels
|
||||
- [ ] announce on IRC that master is unlocked
|
||||
- [ ] compared upstream tarballs+wheels against local copies
|
||||
- [ ] (Release Maintainer): signed each upstream artifacts with "gpg -ba -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A FILE"
|
||||
- [ ] added to relnotes.txt: [0/3]
|
||||
- [ ] prefix with SHA256 of tarballs
|
||||
- [ ] release pubkey
|
||||
- [ ] git revision hash
|
||||
- [ ] GPG-signed the release email with release key (write to
|
||||
relnotes.txt.asc) Ideally this is a Release Maintainer, but could
|
||||
be any developer
|
||||
|
||||
* publish release artifacts [0/9]
|
||||
|
||||
- [ ] uploaded to PyPI via: twine upload dist/*
|
||||
- [ ] uploaded *.asc to org ~source/downloads/
|
||||
- [ ] test install works properly: pip install tahoe-lafs
|
||||
- [ ] copied the release tarballs and signatures to tahoe-lafs.org: ~source/downloads/
|
||||
- [ ] moved old release out of ~source/downloads (to downloads/old/?)
|
||||
- [ ] ensured readthedocs.org updated
|
||||
- [ ] uploaded wheels to https://tahoe-lafs.org/deps/
|
||||
- [ ] uploaded release to https://github.com/tahoe-lafs/tahoe-lafs/releases
|
||||
|
||||
* check release downloads [0/]
|
||||
|
||||
- [ ] test PyPI via: pip install tahoe-lafs
|
||||
- [ ] https://github.com/tahoe-lafs/tahoe-lafs/releases
|
||||
- [ ] https://tahoe-lafs.org/downloads/
|
||||
- [ ] https://tahoe-lafs.org/deps/
|
||||
|
||||
* document release in trac [0/]
|
||||
|
||||
- [ ] closed the Milestone on the trac Roadmap
|
||||
|
||||
* unfreeze master branch [0/]
|
||||
|
||||
- [ ] announced on IRC that new PRs will be looked at/merged
|
||||
|
||||
* announce new release [0/]
|
||||
|
||||
- [ ] sent release email and relnotes.txt.asc to tahoe-announce@tahoe-lafs.org
|
||||
- [ ] sent release email and relnotes.txt.asc to tahoe-dev@tahoe-lafs.org
|
||||
- [ ] updated Wiki front page: version on download link, News column
|
||||
- [ ] updated Wiki "Doc": parade of release notes (with rev of NEWS.rst)
|
||||
- [ ] make an "announcement of new release" on freshmeat (XXX still a thing?)
|
||||
- [ ] make an "announcement of new release" on launchpad
|
||||
- [ ] send out relnotes.txt to:
|
||||
- p2p-hackers@lists.zooko.com
|
||||
- lwn@lwn.net
|
||||
- a Google+ page
|
||||
- cap-talk@mail.eros-os.org
|
||||
- cryptography@metzdown.com
|
||||
- cryptography@randombit.net
|
||||
- twisted-python@twistedmatrix.com
|
||||
- owncloud@kde.org
|
||||
- liberationtech@lists.stanford.edu
|
||||
- the "decentralization" group on groups.yahoo.com
|
||||
- pycrypto mailing list
|
||||
- fuse-devel@lists.sourceforge.net
|
||||
- fuse-sshfs@lists.sourceforge.net
|
||||
- duplicity-talk@nongnu.org
|
||||
- news@phoronix.com
|
||||
- python-list@python.org
|
||||
- cygwin@cygwin.com
|
||||
- The Boulder Linux Users' Group
|
||||
- The Boulder Hackerspace mailing list
|
||||
- cryptopp-users@googlegroups.com
|
||||
- tiddlywiki
|
||||
- hdfs-dev@hadoop.apache.org
|
||||
- bzr
|
||||
- mercurial
|
||||
- http://listcultures.org/pipermail/p2presearch_listcultures.org/
|
||||
- deltacloud
|
||||
- libcloud
|
||||
- swift@lists.launchpad.net
|
||||
- stephen@fosketts.net
|
||||
- Chris Mellor of The Register
|
||||
- nosql@mypopescu.com
|
||||
- The H Open
|
||||
- fans/customers of cleversafe
|
||||
- fans/customers of bitcasa
|
||||
- fans/customers of wuala
|
||||
- fans/customers of spideroak
|
||||
- [ ] tweeted as @tahoelafs
|
||||
- [ ] emailed relnotes.txt.asc to below listed mailing-lists/organizations
|
||||
- [ ] also announce release to (trimmed from previous version of this doc):
|
||||
- twisted-python@twistedmatrix.com
|
||||
- liberationtech@lists.stanford.edu
|
||||
- lwn@lwn.net
|
||||
- p2p-hackers@lists.zooko.com
|
||||
- python-list@python.org
|
||||
- http://listcultures.org/pipermail/p2presearch_listcultures.org/
|
||||
- cryptopp-users@googlegroups.com
|
||||
- (others?)
|
||||
|
@ -20,10 +20,11 @@ Contents:
|
||||
frontends/CLI
|
||||
frontends/webapi
|
||||
frontends/FTP-and-SFTP
|
||||
frontends/magic-folder
|
||||
frontends/download-status
|
||||
|
||||
known_issues
|
||||
../.github/CONTRIBUTING
|
||||
CODE_OF_CONDUCT
|
||||
|
||||
servers
|
||||
helper
|
||||
@ -32,12 +33,14 @@ Contents:
|
||||
|
||||
backdoors
|
||||
donations
|
||||
accepting-donations
|
||||
expenses
|
||||
cautions
|
||||
write_coordination
|
||||
magic-folder-howto
|
||||
backupdb
|
||||
|
||||
developer-guide
|
||||
|
||||
anonymity-configuration
|
||||
|
||||
nodekeys
|
||||
@ -63,4 +66,3 @@ Indices and tables
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
|
||||
|
@ -1,176 +0,0 @@
|
||||
.. _magic-folder-howto:
|
||||
|
||||
=========================
|
||||
Magic Folder Set-up Howto
|
||||
=========================
|
||||
|
||||
#. `This document`_
|
||||
#. `Setting up a local test grid`_
|
||||
#. `Setting up Magic Folder`_
|
||||
#. `Testing`_
|
||||
|
||||
|
||||
This document
|
||||
=============
|
||||
|
||||
This is preliminary documentation of how to set up Magic Folder using a test
|
||||
grid on a single Linux or Windows machine, with two clients and one server.
|
||||
It is aimed at a fairly technical audience.
|
||||
|
||||
For an introduction to Magic Folder and how to configure it
|
||||
more generally, see :doc:`frontends/magic-folder`.
|
||||
|
||||
It it possible to adapt these instructions to run the nodes on
|
||||
different machines, to synchronize between three or more clients,
|
||||
to mix Windows and Linux clients, and to use multiple servers
|
||||
(if the Tahoe-LAFS encoding parameters are changed).
|
||||
|
||||
|
||||
Setting up a local test grid
|
||||
============================
|
||||
|
||||
Linux
|
||||
-----
|
||||
|
||||
Run these commands::
|
||||
|
||||
mkdir ../grid
|
||||
bin/tahoe create-introducer ../grid/introducer
|
||||
bin/tahoe start ../grid/introducer
|
||||
export FURL=`cat ../grid/introducer/private/introducer.furl`
|
||||
bin/tahoe create-node --introducer="$FURL" ../grid/server
|
||||
bin/tahoe create-client --introducer="$FURL" ../grid/alice
|
||||
bin/tahoe create-client --introducer="$FURL" ../grid/bob
|
||||
|
||||
|
||||
Windows
|
||||
-------
|
||||
|
||||
Run::
|
||||
|
||||
mkdir ..\grid
|
||||
bin\tahoe create-introducer ..\grid\introducer
|
||||
bin\tahoe start ..\grid\introducer
|
||||
|
||||
Leave the introducer running in that Command Prompt,
|
||||
and in a separate Command Prompt (with the same current
|
||||
directory), run::
|
||||
|
||||
set /p FURL=<..\grid\introducer\private\introducer.furl
|
||||
bin\tahoe create-node --introducer=%FURL% ..\grid\server
|
||||
bin\tahoe create-client --introducer=%FURL% ..\grid\alice
|
||||
bin\tahoe create-client --introducer=%FURL% ..\grid\bob
|
||||
|
||||
|
||||
Both Linux and Windows
|
||||
----------------------
|
||||
|
||||
(Replace ``/`` with ``\`` for Windows paths.)
|
||||
|
||||
Edit ``../grid/alice/tahoe.cfg``, and make the following
|
||||
changes to the ``[node]`` and ``[client]`` sections::
|
||||
|
||||
[node]
|
||||
nickname = alice
|
||||
web.port = tcp:3457:interface=127.0.0.1
|
||||
|
||||
[client]
|
||||
shares.needed = 1
|
||||
shares.happy = 1
|
||||
shares.total = 1
|
||||
|
||||
Edit ``../grid/bob/tahoe.cfg``, and make the following
|
||||
change to the ``[node]`` section, and the same change as
|
||||
above to the ``[client]`` section::
|
||||
|
||||
[node]
|
||||
nickname = bob
|
||||
web.port = tcp:3458:interface=127.0.0.1
|
||||
|
||||
Note that when running nodes on a single machine,
|
||||
unique port numbers must be used for each node (and they
|
||||
must not clash with ports used by other server software).
|
||||
Here we have used the default of 3456 for the server,
|
||||
3457 for alice, and 3458 for bob.
|
||||
|
||||
Now start all of the nodes (the introducer should still be
|
||||
running from above)::
|
||||
|
||||
bin/tahoe start ../grid/server
|
||||
bin/tahoe start ../grid/alice
|
||||
bin/tahoe start ../grid/bob
|
||||
|
||||
On Windows, a separate Command Prompt is needed to run each
|
||||
node.
|
||||
|
||||
Open a web browser on http://127.0.0.1:3457/ and verify that
|
||||
alice is connected to the introducer and one storage server.
|
||||
Then do the same for http://127.0.0.1:3568/ to verify that
|
||||
bob is connected. Leave all of the nodes running for the
|
||||
next stage.
|
||||
|
||||
|
||||
Setting up Magic Folder
|
||||
=======================
|
||||
|
||||
Linux
|
||||
-----
|
||||
|
||||
Run::
|
||||
|
||||
mkdir -p ../local/alice ../local/bob
|
||||
bin/tahoe -d ../grid/alice magic-folder create magic: alice ../local/alice
|
||||
bin/tahoe -d ../grid/alice magic-folder invite magic: bob >invitecode
|
||||
export INVITECODE=`cat invitecode`
|
||||
bin/tahoe -d ../grid/bob magic-folder join "$INVITECODE" ../local/bob
|
||||
|
||||
bin/tahoe restart ../grid/alice
|
||||
bin/tahoe restart ../grid/bob
|
||||
|
||||
Windows
|
||||
-------
|
||||
|
||||
Run::
|
||||
|
||||
mkdir ..\local\alice ..\local\bob
|
||||
bin\tahoe -d ..\grid\alice magic-folder create magic: alice ..\local\alice
|
||||
bin\tahoe -d ..\grid\alice magic-folder invite magic: bob >invitecode
|
||||
set /p INVITECODE=<invitecode
|
||||
bin\tahoe -d ..\grid\bob magic-folder join %INVITECODE% ..\local\bob
|
||||
|
||||
Then close the Command Prompt windows that are running the alice and bob
|
||||
nodes, and open two new ones in which to run::
|
||||
|
||||
bin\tahoe start ..\grid\alice
|
||||
bin\tahoe start ..\grid\bob
|
||||
|
||||
|
||||
Testing
|
||||
=======
|
||||
|
||||
You can now experiment with creating files and directories in
|
||||
``../local/alice`` and ``/local/bob``; any changes should be
|
||||
propagated to the other directory.
|
||||
|
||||
Note that when a file is deleted, the corresponding file in the
|
||||
other directory will be renamed to a filename ending in ``.backup``.
|
||||
Deleting a directory will have no effect.
|
||||
|
||||
For other known issues and limitations, see :ref:`Known Issues in
|
||||
Magic-Folder`.
|
||||
|
||||
As mentioned earlier, it is also possible to run the nodes on
|
||||
different machines, to synchronize between three or more clients,
|
||||
to mix Windows and Linux clients, and to use multiple servers
|
||||
(if the Tahoe-LAFS encoding parameters are changed).
|
||||
|
||||
|
||||
Configuration
|
||||
=============
|
||||
|
||||
There will be a ``[magic_folder]`` section in your ``tahoe.cfg`` file
|
||||
after setting up Magic Folder.
|
||||
|
||||
There is an option you can add to this called ``poll_interval=`` to
|
||||
control how often (in seconds) the Downloader will check for new things
|
||||
to download.
|
@ -19,9 +19,7 @@ Invites and Joins
|
||||
|
||||
Inside Tahoe-LAFS we are using a channel created using `magic
|
||||
wormhole`_ to exchange configuration and the secret fURL of the
|
||||
Introducer with new clients. In the future, we would like to make the
|
||||
Magic Folder (:ref:`Magic Folder HOWTO <magic-folder-howto>`) invites and joins work this way
|
||||
as well.
|
||||
Introducer with new clients.
|
||||
|
||||
This is a two-part process. Alice runs a grid and wishes to have her
|
||||
friend Bob use it as a client. She runs ``tahoe invite bob`` which
|
||||
|
@ -546,16 +546,15 @@ The "restrictions dictionary" is a table which establishes an upper bound on
|
||||
how this authority (or any attenuations thereof) may be used. It is
|
||||
effectively a set of key-value pairs.
|
||||
|
||||
A "signing key" is an EC-DSA192 private key string, as supplied to the
|
||||
pycryptopp SigningKey() constructor, and is 12 bytes long. A "verifying key"
|
||||
is an EC-DSA192 public key string, as produced by pycryptopp, and is 24 bytes
|
||||
long. A "key identifier" is a string which securely identifies a specific
|
||||
signing/verifying keypair: for long RSA keys it would be a secure hash of the
|
||||
public key, but since ECDSA192 keys are so short, we simply use the full
|
||||
verifying key verbatim. A "key hint" is a variable-length prefix of the key
|
||||
identifier, perhaps zero bytes long, used to help a recipient reduce the
|
||||
number of verifying keys that it must search to find one that matches a
|
||||
signed message.
|
||||
A "signing key" is an EC-DSA192 private key string and is 12 bytes
|
||||
long. A "verifying key" is an EC-DSA192 public key string, and is 24
|
||||
bytes long. A "key identifier" is a string which securely identifies a
|
||||
specific signing/verifying keypair: for long RSA keys it would be a
|
||||
secure hash of the public key, but since ECDSA192 keys are so short,
|
||||
we simply use the full verifying key verbatim. A "key hint" is a
|
||||
variable-length prefix of the key identifier, perhaps zero bytes long,
|
||||
used to help a recipient reduce the number of verifying keys that it
|
||||
must search to find one that matches a signed message.
|
||||
|
||||
==== Authority Chains ====
|
||||
|
||||
|
514
docs/proposed/http-storage-node-protocol.rst
Normal file
514
docs/proposed/http-storage-node-protocol.rst
Normal file
@ -0,0 +1,514 @@
|
||||
.. -*- coding: utf-8 -*-
|
||||
|
||||
Storage Node Protocol ("Great Black Swamp", "GBS")
|
||||
==================================================
|
||||
|
||||
The target audience for this document is Tahoe-LAFS developers.
|
||||
After reading this document,
|
||||
one should expect to understand how Tahoe-LAFS clients interact over the network with Tahoe-LAFS storage nodes.
|
||||
|
||||
The primary goal of the introduction of this protocol is to simplify the task of implementing a Tahoe-LAFS storage server.
|
||||
Specifically, it should be possible to implement a Tahoe-LAFS storage server without a Foolscap implementation
|
||||
(substituting a simpler GBS server implementation).
|
||||
The Tahoe-LAFS client will also need to change but it is not expected that it will be noticably simplified by this change
|
||||
(though this may be the first step towards simplifying it).
|
||||
|
||||
Requirements
|
||||
------------
|
||||
|
||||
Security
|
||||
~~~~~~~~
|
||||
|
||||
Summary
|
||||
!!!!!!!
|
||||
|
||||
The storage node protocol should offer at minimum the security properties offered by the Foolscap-based protocol.
|
||||
The Foolscap-based protocol offers:
|
||||
|
||||
* **Peer authentication** by way of checked x509 certificates
|
||||
* **Message authentication** by way of TLS
|
||||
* **Message confidentiality** by way of TLS
|
||||
|
||||
* A careful configuration of the TLS connection parameters *may* also offer **forward secrecy**.
|
||||
However, Tahoe-LAFS' use of Foolscap takes no steps to ensure this is the case.
|
||||
|
||||
Discussion
|
||||
!!!!!!!!!!
|
||||
|
||||
A client node relies on a storage node to persist certain data until a future retrieval request is made.
|
||||
In this way, the client node is vulnerable to attacks which cause the data not to be persisted.
|
||||
Though this vulnerability can be (and typically is) mitigated by including redundancy in the share encoding parameters for stored data,
|
||||
it is still sensible to attempt to minimize unnecessary vulnerability to this attack.
|
||||
|
||||
One way to do this is for the client to be confident the storage node with which it is communicating is really the expected node.
|
||||
That is, for the client to perform **peer authentication** of the storage node it connects to.
|
||||
This allows it to develop a notion of that node's reputation over time.
|
||||
The more retrieval requests the node satisfies correctly the more it probably will satisfy correctly.
|
||||
Therefore, the protocol must include some means for verifying the identify of the storage node.
|
||||
The initialization of the client with the correct identity information is out of scope for this protocol
|
||||
(the system may be trust-on-first-use, there may be a third-party identity broker, etc).
|
||||
|
||||
With confidence that communication is proceeding with the intended storage node,
|
||||
it must also be possible to trust that data is exchanged without modification.
|
||||
That is, the protocol must include some means to perform **message authentication**.
|
||||
This is most likely done using cryptographic MACs (such as those used in TLS).
|
||||
|
||||
The messages which enable the mutable shares feature include secrets related to those shares.
|
||||
For example, the write enabler secret is used to restrict the parties with write access to mutable shares.
|
||||
It is exchanged over the network as part of a write operation.
|
||||
An attacker learning this secret can overwrite share data with garbage
|
||||
(lacking a separate encryption key,
|
||||
there is no way to write data which appears legitimate to a legitimate client).
|
||||
Therefore, **message confidentiality** is necessary when exchanging these secrets.
|
||||
**Forward secrecy** is preferred so that an attacker recording an exchange today cannot launch this attack at some future point after compromising the necessary keys.
|
||||
|
||||
Functionality
|
||||
-------------
|
||||
|
||||
Tahoe-LAFS application-level information must be transferred using this protocol.
|
||||
This information is exchanged with a dozen or so request/response-oriented messages.
|
||||
Some of these messages carry large binary payloads.
|
||||
Others are small structured-data messages.
|
||||
Some facility for expansion to support new information exchanges should also be present.
|
||||
|
||||
Solutions
|
||||
---------
|
||||
|
||||
An HTTP-based protocol, dubbed "Great Black Swamp" (or "GBS"), is described below.
|
||||
This protocol aims to satisfy the above requirements at a lower level of complexity than the current Foolscap-based protocol.
|
||||
|
||||
Communication with the storage node will take place using TLS.
|
||||
The TLS version and configuration will be dictated by an ongoing understanding of best practices.
|
||||
The storage node will present an x509 certificate during the TLS handshake.
|
||||
Storage clients will require that the certificate have a valid signature.
|
||||
The Subject Public Key Information (SPKI) hash of the certificate will constitute the storage node's identity.
|
||||
The **tub id** portion of the storage node fURL will be replaced with the SPKI hash.
|
||||
|
||||
When connecting to a storage node,
|
||||
the client will take the following steps to gain confidence it has reached the intended peer:
|
||||
|
||||
* It will perform the usual cryptographic verification of the certificate presented by the storage server.
|
||||
That is,
|
||||
it will check that the certificate itself is well-formed,
|
||||
that it is currently valid [#]_,
|
||||
and that the signature it carries is valid.
|
||||
* It will compare the SPKI hash of the certificate to the expected value.
|
||||
The specifics of the comparison are the same as for the comparison specified by `RFC 7469`_ with "sha256" [#]_.
|
||||
|
||||
To further clarify, consider this example.
|
||||
Alice operates a storage node.
|
||||
Alice generates a key pair and secures it properly.
|
||||
Alice generates a self-signed storage node certificate with the key pair.
|
||||
Alice's storage node announces (to an introducer) a fURL containing (among other information) the SPKI hash.
|
||||
Imagine the SPKI hash is ``i5xb...``.
|
||||
This results in a fURL of ``pb://i5xb...@example.com:443/g3m5...#v=2`` [#]_.
|
||||
Bob creates a client node pointed at the same introducer.
|
||||
Bob's client node receives the announcement from Alice's storage node
|
||||
(indirected through the introducer).
|
||||
|
||||
Bob's client node recognizes the fURL as referring to an HTTP-dialect server due to the ``v=2`` fragment.
|
||||
Bob's client node can now perform a TLS handshake with a server at the address in the fURL location hints
|
||||
(``example.com:443`` in this example).
|
||||
Following the above described validation procedures,
|
||||
Bob's client node can determine whether it has reached Alice's storage node or not.
|
||||
If and only if the validation procedure is successful does Bob's client node conclude it has reached Alice's storage node.
|
||||
**Peer authentication** has been achieved.
|
||||
|
||||
Additionally,
|
||||
by continuing to interact using TLS,
|
||||
Bob's client and Alice's storage node are assured of both **message authentication** and **message confidentiality**.
|
||||
|
||||
.. note::
|
||||
|
||||
Foolscap TubIDs are 20 bytes (SHA1 digest of the certificate).
|
||||
They are encoded with Base32 for a length of 32 bytes.
|
||||
SPKI information discussed here is 32 bytes (SHA256 digest).
|
||||
They would be encoded in Base32 for a length of 52 bytes.
|
||||
`base64url`_ provides a more compact encoding of the information while remaining URL-compatible.
|
||||
This would encode the SPKI information for a length of merely 43 bytes.
|
||||
SHA1,
|
||||
the current Foolscap hash function,
|
||||
is not a practical choice at this time due to advances made in `attacking SHA1`_.
|
||||
The selection of a safe hash function with output smaller than SHA256 could be the subject of future improvements.
|
||||
A 224 bit hash function (SHA3-224, for example) might be suitable -
|
||||
improving the encoded length to 38 bytes.
|
||||
|
||||
|
||||
Transition
|
||||
~~~~~~~~~~
|
||||
|
||||
To provide a seamless user experience during this protocol transition,
|
||||
there should be a period during which both protocols are supported by storage nodes.
|
||||
The GBS announcement will be introduced in a way that *updated client* software can recognize.
|
||||
Its introduction will also be made in such a way that *non-updated client* software disregards the new information
|
||||
(of which it cannot make any use).
|
||||
|
||||
Storage nodes will begin to operate a new GBS server.
|
||||
They may re-use their existing x509 certificate or generate a new one.
|
||||
Generation of a new certificate allows for certain non-optimal conditions to be addressed:
|
||||
|
||||
* The ``commonName`` of ``newpb_thingy`` may be changed to a more descriptive value.
|
||||
* A ``notValidAfter`` field with a timestamp in the past may be updated.
|
||||
|
||||
Storage nodes will announce a new fURL for this new HTTP-based server.
|
||||
This fURL will be announced alongside their existing Foolscap-based server's fURL.
|
||||
Such an announcement will resemble this::
|
||||
|
||||
{
|
||||
"anonymous-storage-FURL": "pb://...", # The old key
|
||||
"gbs-anonymous-storage-url": "pb://...#v=2" # The new key
|
||||
}
|
||||
|
||||
The transition process will proceed in three stages:
|
||||
|
||||
1. The first stage represents the starting conditions in which clients and servers can speak only Foolscap.
|
||||
#. The intermediate stage represents a condition in which some clients and servers can both speak Foolscap and GBS.
|
||||
#. The final stage represents the desired condition in which all clients and servers speak only GBS.
|
||||
|
||||
During the first stage only one client/server interaction is possible:
|
||||
the storage server announces only Foolscap and speaks only Foolscap.
|
||||
During the final stage there is only one supported interaction:
|
||||
the client and server are both updated and speak GBS to each other.
|
||||
|
||||
During the intermediate stage there are four supported interactions:
|
||||
|
||||
1. Both the client and server are non-updated.
|
||||
The interaction is just as it would be during the first stage.
|
||||
#. The client is updated and the server is non-updated.
|
||||
The client will see the Foolscap announcement and the lack of a GBS announcement.
|
||||
It will speak to the server using Foolscap.
|
||||
#. The client is non-updated and the server is updated.
|
||||
The client will see the Foolscap announcement.
|
||||
It will speak Foolscap to the storage server.
|
||||
#. Both the client and server are updated.
|
||||
The client will see the GBS announcement and disregard the Foolscap announcement.
|
||||
It will speak GBS to the server.
|
||||
|
||||
There is one further complication:
|
||||
the client maintains a cache of storage server information
|
||||
(to avoid continuing to rely on the introducer after it has been introduced).
|
||||
The follow sequence of events is likely:
|
||||
|
||||
1. The client connects to an introducer.
|
||||
#. It receives an announcement for a non-updated storage server (Foolscap only).
|
||||
#. It caches this announcement.
|
||||
#. At some point, the storage server is updated.
|
||||
#. The client uses the information in its cache to open a Foolscap connection to the storage server.
|
||||
|
||||
Ideally,
|
||||
the client would not rely on an update from the introducer to give it the GBS fURL for the updated storage server.
|
||||
Therefore,
|
||||
when an updated client connects to a storage server using Foolscap,
|
||||
it should request the server's version information.
|
||||
If this information indicates that GBS is supported then the client should cache this GBS information.
|
||||
On subsequent connection attempts,
|
||||
it should make use of this GBS information.
|
||||
|
||||
Server Details
|
||||
--------------
|
||||
|
||||
The protocol primarily enables interaction with "resources" of two types:
|
||||
storage indexes
|
||||
and shares.
|
||||
A particular resource is addressed by the HTTP request path.
|
||||
Details about the interface are encoded in the HTTP message body.
|
||||
|
||||
Message Encoding
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
The preferred encoding for HTTP message bodies is `CBOR`_.
|
||||
A request may be submitted using an alternate encoding by declaring this in the ``Content-Type`` header.
|
||||
A request may indicate its preference for an alternate encoding in the response using the ``Accept`` header.
|
||||
These two headers are used in the typical way for an HTTP application.
|
||||
|
||||
The only other encoding support for which is currently recommended is JSON.
|
||||
For HTTP messages carrying binary share data,
|
||||
this is expected to be a particularly poor encoding.
|
||||
However,
|
||||
for HTTP messages carrying small payloads of strings, numbers, and containers
|
||||
it is expected that JSON will be more convenient than CBOR for ad hoc testing and manual interaction.
|
||||
|
||||
For this same reason,
|
||||
JSON is used throughout for the examples presented here.
|
||||
Because of the simple types used throughout
|
||||
and the equivalence described in `RFC 7049`_
|
||||
these examples should be representative regardless of which of these two encodings is chosen.
|
||||
|
||||
General
|
||||
~~~~~~~
|
||||
|
||||
``GET /v1/version``
|
||||
!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Retrieve information about the version of the storage server.
|
||||
Information is returned as an encoded mapping.
|
||||
For example::
|
||||
|
||||
{ "http://allmydata.org/tahoe/protocols/storage/v1" :
|
||||
{ "maximum-immutable-share-size": 1234,
|
||||
"maximum-mutable-share-size": 1235,
|
||||
"available-space": 123456,
|
||||
"tolerates-immutable-read-overrun": true,
|
||||
"delete-mutable-shares-with-zero-length-writev": true,
|
||||
"fills-holes-with-zero-bytes": true,
|
||||
"prevents-read-past-end-of-share-data": true,
|
||||
"gbs-anonymous-storage-url": "pb://...#v=2"
|
||||
},
|
||||
"application-version": "1.13.0"
|
||||
}
|
||||
|
||||
Immutable
|
||||
---------
|
||||
|
||||
Writing
|
||||
~~~~~~~
|
||||
|
||||
``POST /v1/immutable/:storage_index``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Initialize an immutable storage index with some buckets.
|
||||
The buckets may have share data written to them once.
|
||||
Details of the buckets to create are encoded in the request body.
|
||||
For example::
|
||||
|
||||
{"renew-secret": "efgh", "cancel-secret": "ijkl",
|
||||
"share-numbers": [1, 7, ...], "allocated-size": 12345}
|
||||
|
||||
The response body includes encoded information about the created buckets.
|
||||
For example::
|
||||
|
||||
{"already-have": [1, ...], "allocated": [7, ...]}
|
||||
|
||||
Discussion
|
||||
``````````
|
||||
|
||||
We considered making this ``POST /v1/immutable`` instead.
|
||||
The motivation was to keep *storage index* out of the request URL.
|
||||
Request URLs have an elevated chance of being logged by something.
|
||||
We were concerned that having the *storage index* logged may increase some risks.
|
||||
However, we decided this does not matter because the *storage index* can only be used to read the share (which is ciphertext).
|
||||
TODO Verify this conclusion.
|
||||
|
||||
``PUT /v1/immutable/:storage_index/:share_number``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Write data for the indicated share.
|
||||
The share number must belong to the storage index.
|
||||
The request body is the raw share data (i.e., ``application/octet-stream``).
|
||||
*Content-Range* requests are encouraged for large transfers.
|
||||
For example,
|
||||
for a 1MiB share the data can be broken in to 8 128KiB chunks.
|
||||
Each chunk can be *PUT* separately with the appropriate *Content-Range* header.
|
||||
The server must recognize when all of the data has been received and mark the share as complete
|
||||
(which it can do because it was informed of the size when the storage index was initialized).
|
||||
Clients should upload chunks in re-assembly order.
|
||||
Servers may reject out-of-order chunks for implementation simplicity.
|
||||
If an individual *PUT* fails then only a limited amount of effort is wasted on the necessary retry.
|
||||
|
||||
.. think about copying https://developers.google.com/drive/api/v2/resumable-upload
|
||||
|
||||
``POST /v1/immutable/:storage_index/:share_number/corrupt``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Advise the server the data read from the indicated share was corrupt.
|
||||
The request body includes an human-meaningful string with details about the corruption.
|
||||
It also includes potentially important details about the share.
|
||||
|
||||
For example::
|
||||
|
||||
{"reason": "expected hash abcd, got hash efgh"}
|
||||
|
||||
.. share-type, storage-index, and share-number are inferred from the URL
|
||||
|
||||
Reading
|
||||
~~~~~~~
|
||||
|
||||
``GET /v1/immutable/:storage_index/shares``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Retrieve a list indicating all shares available for the indicated storage index.
|
||||
For example::
|
||||
|
||||
[1, 5]
|
||||
|
||||
``GET /v1/immutable/:storage_index?share=:s0&share=:sN&offset=o1&size=z0&offset=oN&size=zN``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Read data from the indicated immutable shares.
|
||||
If ``share`` query parameters are given, selecte only those shares for reading.
|
||||
Otherwise, select all shares present.
|
||||
If ``size`` and ``offset`` query parameters are given,
|
||||
only the portions thus identified of the selected shares are returned.
|
||||
Otherwise, all data is from the selected shares is returned.
|
||||
|
||||
The response body contains a mapping giving the read data.
|
||||
For example::
|
||||
|
||||
{
|
||||
3: ["foo", "bar"],
|
||||
7: ["baz", "quux"]
|
||||
}
|
||||
|
||||
Discussion
|
||||
``````````
|
||||
|
||||
Offset and size of the requested data are specified here as query arguments.
|
||||
Instead, this information could be present in a ``Range`` header in the request.
|
||||
This is the more obvious choice and leverages an HTTP feature built for exactly this use-case.
|
||||
However, HTTP requires that the ``Content-Type`` of the response to "range requests" be ``multipart/...``.
|
||||
The ``multipart`` major type brings along string sentinel delimiting as a means to frame the different response parts.
|
||||
There are many drawbacks to this framing technique:
|
||||
|
||||
1. It is resource-intensive to generate.
|
||||
2. It is resource-intensive to parse.
|
||||
3. It is complex to parse safely [#]_ [#]_ [#]_ [#]_.
|
||||
|
||||
Mutable
|
||||
-------
|
||||
|
||||
Writing
|
||||
~~~~~~~
|
||||
|
||||
``POST /v1/mutable/:storage_index/read-test-write``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
General purpose read-test-and-write operation for mutable storage indexes.
|
||||
A mutable storage index is also called a "slot"
|
||||
(particularly by the existing Tahoe-LAFS codebase).
|
||||
The first write operation on a mutable storage index creates it
|
||||
(that is,
|
||||
there is no separate "create this storage index" operation as there is for the immutable storage index type).
|
||||
|
||||
The request body includes the secrets necessary to rewrite to the shares
|
||||
along with test, read, and write vectors for the operation.
|
||||
For example::
|
||||
|
||||
{
|
||||
"secrets": {
|
||||
"write-enabler": "abcd",
|
||||
"lease-renew": "efgh",
|
||||
"lease-cancel": "ijkl"
|
||||
},
|
||||
"test-write-vectors": {
|
||||
0: {
|
||||
"test": [{
|
||||
"offset": 3,
|
||||
"size": 5,
|
||||
"operator": "eq",
|
||||
"specimen": "hello"
|
||||
}, ...],
|
||||
"write": [{
|
||||
"offset": 9,
|
||||
"data": "world"
|
||||
}, ...],
|
||||
"new-length": 5
|
||||
}
|
||||
},
|
||||
"read-vector": [{"offset": 3, "size": 12}, ...]
|
||||
}
|
||||
|
||||
The response body contains a boolean indicating whether the tests all succeed
|
||||
(and writes were applied) and a mapping giving read data (pre-write).
|
||||
For example::
|
||||
|
||||
{
|
||||
"success": true,
|
||||
"data": {
|
||||
0: ["foo"],
|
||||
5: ["bar"],
|
||||
...
|
||||
}
|
||||
}
|
||||
|
||||
Reading
|
||||
~~~~~~~
|
||||
|
||||
``GET /v1/mutable/:storage_index/shares``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Retrieve a list indicating all shares available for the indicated storage index.
|
||||
For example::
|
||||
|
||||
[1, 5]
|
||||
|
||||
``GET /v1/mutable/:storage_index?share=:s0&share=:sN&offset=:o1&size=:z0&offset=:oN&size=:zN``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Read data from the indicated mutable shares.
|
||||
Just like ``GET /v1/mutable/:storage_index``.
|
||||
|
||||
``POST /v1/mutable/:storage_index/:share_number/corrupt``
|
||||
!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
||||
|
||||
Advise the server the data read from the indicated share was corrupt.
|
||||
Just like the immutable version.
|
||||
|
||||
.. _RFC 7469: https://tools.ietf.org/html/rfc7469#section-2.4
|
||||
|
||||
.. _RFC 7049: https://tools.ietf.org/html/rfc7049#section-4
|
||||
|
||||
.. _CBOR: http://cbor.io/
|
||||
|
||||
.. [#]
|
||||
The security value of checking ``notValidBefore`` and ``notValidAfter`` is not entirely clear.
|
||||
The arguments which apply to web-facing certificates do not seem to apply
|
||||
(due to the decision for Tahoe-LAFS to operate independently of the web-oriented CA system).
|
||||
|
||||
Arguably, complexity is reduced by allowing an existing TLS implementation which wants to make these checks make them
|
||||
(compared to including additional code to either bypass them or disregard their results).
|
||||
Reducing complexity, at least in general, is often good for security.
|
||||
|
||||
On the other hand, checking the validity time period forces certificate regeneration
|
||||
(which comes with its own set of complexity).
|
||||
|
||||
A possible compromise is to recommend certificates with validity periods of many years or decades.
|
||||
"Recommend" may be read as "provide software supporting the generation of".
|
||||
|
||||
What about key theft?
|
||||
If certificates are valid for years then a successful attacker can pretend to be a valid storage node for years.
|
||||
However, short-validity-period certificates are no help in this case.
|
||||
The attacker can generate new, valid certificates using the stolen keys.
|
||||
|
||||
Therefore, the only recourse to key theft
|
||||
(really *identity theft*)
|
||||
is to burn the identity and generate a new one.
|
||||
Burning the identity is a non-trivial task.
|
||||
It is worth solving but it is not solved here.
|
||||
|
||||
.. [#]
|
||||
More simply::
|
||||
|
||||
from hashlib import sha256
|
||||
from cryptography.hazmat.primitives.serialization import (
|
||||
Encoding,
|
||||
PublicFormat,
|
||||
)
|
||||
from pybase64 import urlsafe_b64encode
|
||||
|
||||
def check_tub_id(tub_id):
|
||||
spki_bytes = cert.public_key().public_bytes(Encoding.DER, PublicFormat.SubjectPublicKeyInfo)
|
||||
spki_sha256 = sha256(spki_bytes).digest()
|
||||
spki_encoded = urlsafe_b64encode(spki_sha256)
|
||||
assert spki_encoded == tub_id
|
||||
|
||||
Note we use `base64url`_ rather than the Foolscap- and Tahoe-LAFS-preferred Base32.
|
||||
|
||||
.. [#]
|
||||
Other schemes for differentiating between the two server types is possible.
|
||||
If the tubID length remains different,
|
||||
that provides an unambiguous (if obscure) signal about which protocol to use.
|
||||
Or a different scheme could be adopted
|
||||
(``[x-]pb+http``, ``x-tahoe+http``, ``x-gbs`` come to mind).
|
||||
|
||||
.. [#]
|
||||
https://www.cvedetails.com/cve/CVE-2017-5638/
|
||||
.. [#]
|
||||
https://pivotal.io/security/cve-2018-1272
|
||||
.. [#]
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2017-5124
|
||||
.. [#]
|
||||
https://efail.de/
|
||||
|
||||
.. _base64url: https://tools.ietf.org/html/rfc7515#appendix-C
|
||||
|
||||
.. _attacking SHA1: https://en.wikipedia.org/wiki/SHA-1#Attacks
|
@ -14,7 +14,4 @@ index only lists the files that are in .rst format.
|
||||
:maxdepth: 2
|
||||
|
||||
leasedb
|
||||
magic-folder/filesystem-integration
|
||||
magic-folder/remote-to-local-sync
|
||||
magic-folder/user-interface-design
|
||||
magic-folder/multi-party-conflict-detection
|
||||
http-storage-node-protocol
|
||||
|
@ -1,118 +0,0 @@
|
||||
Magic Folder local filesystem integration design
|
||||
================================================
|
||||
|
||||
*Scope*
|
||||
|
||||
This document describes how to integrate the local filesystem with Magic
|
||||
Folder in an efficient and reliable manner. For now we ignore Remote to
|
||||
Local synchronization; the design and implementation of this is scheduled
|
||||
for a later time. We also ignore multiple writers for the same Magic
|
||||
Folder, which may or may not be supported in future. The design here will
|
||||
be updated to account for those features in later Objectives. Objective 3
|
||||
may require modifying the database schema or operation, and Objective 5
|
||||
may modify the User interface.
|
||||
|
||||
Tickets on the Tahoe-LAFS trac with the `otf-magic-folder-objective2`_
|
||||
keyword are within the scope of the local filesystem integration for
|
||||
Objective 2.
|
||||
|
||||
.. _otf-magic-folder-objective2: https://tahoe-lafs.org/trac/tahoe-lafs/query?status=!closed&keywords=~otf-magic-folder-objective2
|
||||
|
||||
.. _filesystem_integration-local-scanning-and-database:
|
||||
|
||||
*Local scanning and database*
|
||||
|
||||
When a Magic-Folder-enabled node starts up, it scans all directories
|
||||
under the local directory and adds every file to a first-in first-out
|
||||
"scan queue". When processing the scan queue, redundant uploads are
|
||||
avoided by using the same mechanism the Tahoe backup command uses: we
|
||||
keep track of previous uploads by recording each file's metadata such as
|
||||
size, ``ctime`` and ``mtime``. This information is stored in a database,
|
||||
referred to from now on as the magic folder db. Using this recorded
|
||||
state, we ensure that when Magic Folder is subsequently started, the
|
||||
local directory tree can be scanned quickly by comparing current
|
||||
filesystem metadata with the previously recorded metadata. Each file
|
||||
referenced in the scan queue is uploaded only if its metadata differs at
|
||||
the time it is processed. If a change event is detected for a file that
|
||||
is already queued (and therefore will be processed later), the redundant
|
||||
event is ignored.
|
||||
|
||||
To implement the magic folder db, we will use an SQLite schema that
|
||||
initially is the existing Tahoe-LAFS backup schema. This schema may
|
||||
change in later objectives; this will cause no backward compatibility
|
||||
problems, because this new feature will be developed on a branch that
|
||||
makes no compatibility guarantees. However we will have a separate SQLite
|
||||
database file and separate mutex lock just for Magic Folder. This avoids
|
||||
usability problems related to mutual exclusion. (If a single file and
|
||||
lock were used, a backup would block Magic Folder updates for a long
|
||||
time, and a user would not be able to tell when backups are possible
|
||||
because Magic Folder would acquire a lock at arbitrary times.)
|
||||
|
||||
|
||||
*Eventual consistency property*
|
||||
|
||||
During the process of reading a file in order to upload it, it is not
|
||||
possible to prevent further local writes. Such writes will result in
|
||||
temporary inconsistency (that is, the uploaded file will not reflect
|
||||
what the contents of the local file were at any specific time). Eventual
|
||||
consistency is reached when the queue of pending uploads is empty. That
|
||||
is, a consistent snapshot will be achieved eventually when local writes
|
||||
to the target folder cease for a sufficiently long period of time.
|
||||
|
||||
|
||||
*Detecting filesystem changes*
|
||||
|
||||
For the Linux implementation, we will use the `inotify`_ Linux kernel
|
||||
subsystem to gather events on the local Magic Folder directory tree. This
|
||||
implementation was already present in Tahoe-LAFS 1.9.0, but needs to be
|
||||
changed to gather directory creation and move events, in addition to the
|
||||
events indicating that a file has been written that are gathered by the
|
||||
current code.
|
||||
|
||||
.. _`inotify`: https://en.wikipedia.org/wiki/Inotify
|
||||
|
||||
For the Windows implementation, we will use the ``ReadDirectoryChangesW``
|
||||
Win32 API. The prototype implementation simulates a Python interface to
|
||||
the inotify API in terms of ``ReadDirectoryChangesW``, allowing most of
|
||||
the code to be shared across platforms.
|
||||
|
||||
The alternative of using `NTFS Change Journals`_ for Windows was
|
||||
considered, but appears to be more complicated and does not provide any
|
||||
additional functionality over the scanning approach described above.
|
||||
The Change Journal mechanism is also only available for NTFS filesystems,
|
||||
but FAT32 filesystems are still common in user installations of Windows.
|
||||
|
||||
.. _`NTFS Change Journals`: https://msdn.microsoft.com/en-us/library/aa363803%28VS.85%29.aspx
|
||||
|
||||
When we detect the creation of a new directory below the local Magic
|
||||
Folder directory, we create it in the Tahoe-LAFS filesystem, and also
|
||||
scan the new local directory for new files. This scan is necessary to
|
||||
avoid missing events for creation of files in a new directory before it
|
||||
can be watched, and to correctly handle cases where an existing directory
|
||||
is moved to be under the local Magic Folder directory.
|
||||
|
||||
|
||||
*User interface*
|
||||
|
||||
The Magic Folder local filesystem integration will initially have a
|
||||
provisional configuration file-based interface that may not be ideal from
|
||||
a usability perspective. Creating our local filesystem integration in
|
||||
this manner will allow us to use and test it independently of the rest of
|
||||
the Magic Folder software components. We will focus greater attention on
|
||||
user interface design as a later milestone in our development roadmap.
|
||||
|
||||
The configuration file, ``tahoe.cfg``, must define a target local
|
||||
directory to be synchronized. Provisionally, this configuration will
|
||||
replace the current ``[drop_upload]`` section::
|
||||
|
||||
[magic_folder]
|
||||
enabled = true
|
||||
local.directory = "/home/human"
|
||||
|
||||
When a filesystem directory is first configured for Magic Folder, the user
|
||||
needs to create the remote Tahoe-LAFS directory using ``tahoe mkdir``,
|
||||
and configure the Magic-Folder-enabled node with its URI (e.g. by putting
|
||||
it in a file ``private/magic_folder_dircap``). If there are existing
|
||||
files in the local directory, they will be uploaded as a result of the
|
||||
initial scan described earlier.
|
||||
|
@ -1,373 +0,0 @@
|
||||
Multi-party Conflict Detection
|
||||
==============================
|
||||
|
||||
The current Magic-Folder remote conflict detection design does not properly detect remote conflicts
|
||||
for groups of three or more parties. This design is specified in the "Fire Dragon" section of this document:
|
||||
https://github.com/tahoe-lafs/tahoe-lafs/blob/2551.wip.2/docs/proposed/magic-folder/remote-to-local-sync.rst#fire-dragons-distinguishing-conflicts-from-overwrites
|
||||
|
||||
This Tahoe-LAFS trac ticket comment outlines a scenario with
|
||||
three parties in which a remote conflict is falsely detected:
|
||||
|
||||
.. _`ticket comment`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2551#comment:22
|
||||
|
||||
|
||||
Summary and definitions
|
||||
=======================
|
||||
|
||||
Abstract file: a file being shared by a Magic Folder.
|
||||
|
||||
Local file: a file in a client's local filesystem corresponding to an abstract file.
|
||||
|
||||
Relative path: the path of an abstract or local file relative to the Magic Folder root.
|
||||
|
||||
Version: a snapshot of an abstract file, with associated metadata, that is uploaded by a Magic Folder client.
|
||||
|
||||
A version is associated with the file's relative path, its contents, and
|
||||
mtime and ctime timestamps. Versions also have a unique identity.
|
||||
|
||||
Follows relation:
|
||||
* If and only if a change to a client's local file at relative path F that results in an upload of version V',
|
||||
was made when the client already had version V of that file, then we say that V' directly follows V.
|
||||
* The follows relation is the irreflexive transitive closure of the "directly follows" relation.
|
||||
|
||||
The follows relation is transitive and acyclic, and therefore defines a DAG called the
|
||||
Version DAG. Different abstract files correspond to disconnected sets of nodes in the Version DAG
|
||||
(in other words there are no "follows" relations between different files).
|
||||
|
||||
The DAG is only ever extended, not mutated.
|
||||
|
||||
The desired behaviour for initially classifying overwrites and conflicts is as follows:
|
||||
|
||||
* if a client Bob currently has version V of a file at relative path F, and it sees a new version V'
|
||||
of that file in another client Alice's DMD, such that V' follows V, then the write of the new version
|
||||
is initially an overwrite and should be to the same filename.
|
||||
* if, in the same situation, V' does not follow V, then the write of the new version should be
|
||||
classified as a conflict.
|
||||
|
||||
The existing :doc:`remote-to-local-sync` document defines when an initial
|
||||
overwrite should be reclassified as a conflict.
|
||||
|
||||
The above definitions completely specify the desired solution of the false
|
||||
conflict behaviour described in the `ticket comment`_. However, they do not give
|
||||
a concrete algorithm to compute the follows relation, or a representation in the
|
||||
Tahoe-LAFS file store of the metadata needed to compute it.
|
||||
|
||||
We will consider two alternative designs, proposed by Leif Ryge and
|
||||
Zooko Wilcox-O'Hearn, that aim to fill this gap.
|
||||
|
||||
|
||||
|
||||
Leif's Proposal: Magic-Folder "single-file" snapshot design
|
||||
===========================================================
|
||||
|
||||
Abstract
|
||||
--------
|
||||
|
||||
We propose a relatively simple modification to the initial Magic Folder design which
|
||||
adds merkle DAGs of immutable historical snapshots for each file. The full history
|
||||
does not necessarily need to be retained, and the choice of how much history to retain
|
||||
can potentially be made on a per-file basis.
|
||||
|
||||
Motivation:
|
||||
-----------
|
||||
|
||||
no SPOFs, no admins
|
||||
```````````````````
|
||||
|
||||
Additionally, the initial design had two cases of excess authority:
|
||||
|
||||
1. The magic folder administrator (inviter) has everyone's write-caps and is thus essentially "root"
|
||||
2. Each client shares ambient authority and can delete anything or everything and
|
||||
(assuming there is not a conflict) the data will be deleted from all clients. So, each client
|
||||
is effectively "root" too.
|
||||
|
||||
Thus, while it is useful for file synchronization, the initial design is a much less safe place
|
||||
to store data than in a single mutable tahoe directory (because more client computers have the
|
||||
possibility to delete it).
|
||||
|
||||
|
||||
Glossary
|
||||
--------
|
||||
|
||||
- merkle DAG: like a merkle tree but with multiple roots, and with each node potentially having multiple parents
|
||||
- magic folder: a logical directory that can be synchronized between many clients
|
||||
(devices, users, ...) using a Tahoe-LAFS storage grid
|
||||
- client: a Magic-Folder-enabled Tahoe-LAFS client instance that has access to a magic folder
|
||||
- DMD: "distributed mutable directory", a physical Tahoe-LAFS mutable directory.
|
||||
Each client has the write cap to their own DMD, and read caps to all other client's DMDs
|
||||
(as in the original Magic Folder design).
|
||||
- snapshot: a reference to a version of a file; represented as an immutable directory containing
|
||||
an entry called "content" (pointing to the immutable file containing the file's contents),
|
||||
and an entry called "parent0" (pointing to a parent snapshot), and optionally parent1 through
|
||||
parentN pointing at other parents. The Magic Folder snapshot object is conceptually very similar
|
||||
to a git commit object, except for that it is created automatically and it records the history of an
|
||||
individual file rather than an entire repository. Also, commits do not need to have authors
|
||||
(although an author field could be easily added later).
|
||||
- deletion snapshot: immutable directory containing no content entry (only one or more parents)
|
||||
- capability: a Tahoe-LAFS diminishable cryptographic capability
|
||||
- cap: short for capability
|
||||
- conflict: the situation when another client's current snapshot for a file is different than our current snapshot, and is not a descendant of ours.
|
||||
- overwrite: the situation when another client's current snapshot for a file is a (not necessarily direct) descendant of our current snapshot.
|
||||
|
||||
|
||||
Overview
|
||||
--------
|
||||
|
||||
This new design will track the history of each file using "snapshots" which are
|
||||
created at each upload. Each snapshot will specify one or more parent snapshots,
|
||||
forming a directed acyclic graph. A Magic-Folder user's DMD uses a flattened directory
|
||||
hierarchy naming scheme, as in the original design. But, instead of pointing directly
|
||||
at file contents, each file name will link to that user's latest snapshot for that file.
|
||||
|
||||
Inside the dmd there will also be an immutable directory containing the client's subscriptions
|
||||
(read-caps to other clients' dmds).
|
||||
|
||||
Clients periodically poll each other's DMDs. When they see the current snapshot for a file is
|
||||
different than their own current snapshot for that file, they immediately begin downloading its
|
||||
contents and then walk backwards through the DAG from the new snapshot until they find their own
|
||||
snapshot or a common ancestor.
|
||||
|
||||
For the common ancestor search to be efficient, the client will need to keep a local store (in the magic folder db) of all of the snapshots
|
||||
(but not their contents) between the oldest current snapshot of any of their subscriptions and their own current snapshot.
|
||||
See "local cache purging policy" below for more details.
|
||||
|
||||
If the new snapshot is a descendant of the client's existing snapshot, then this update
|
||||
is an "overwrite" - like a git fast-forward. So, when the download of the new file completes it can overwrite
|
||||
the existing local file with the new contents and update its dmd to point at the new snapshot.
|
||||
|
||||
If the new snapshot is not a descendant of the client's current snapshot, then the update is a
|
||||
conflict. The new file is downloaded and named $filename.conflict-$user1,$user2 (including a list
|
||||
of other subscriptions who have that version as their current version).
|
||||
|
||||
Changes to the local .conflict- file are not tracked. When that file disappears
|
||||
(either by deletion, or being renamed) a new snapshot for the conflicting file is
|
||||
created which has two parents - the client's snapshot prior to the conflict, and the
|
||||
new conflicting snapshot. If multiple .conflict files are deleted or renamed in a short
|
||||
period of time, a single conflict-resolving snapshot with more than two parents can be created.
|
||||
|
||||
! I think this behavior will confuse users.
|
||||
|
||||
Tahoe-LAFS snapshot objects
|
||||
---------------------------
|
||||
|
||||
These Tahoe-LAFS snapshot objects only track the history of a single file, not a directory hierarchy.
|
||||
Snapshot objects contain only two field types:
|
||||
- ``Content``: an immutable capability of the file contents (omitted if deletion snapshot)
|
||||
- ``Parent0..N``: immutable capabilities representing parent snapshots
|
||||
|
||||
Therefore in this system an interesting side effect of this Tahoe snapshot object is that there is no
|
||||
snapshot author. The only notion of an identity in the Magic-Folder system is the write capability of the user's DMD.
|
||||
|
||||
The snapshot object is an immutable directory which looks like this:
|
||||
content -> immutable cap to file content
|
||||
parent0 -> immutable cap to a parent snapshot object
|
||||
parent1..N -> more parent snapshots
|
||||
|
||||
|
||||
Snapshot Author Identity
|
||||
------------------------
|
||||
|
||||
Snapshot identity might become an important feature so that bad actors
|
||||
can be recognized and other clients can stop "subscribing" to (polling for) updates from them.
|
||||
|
||||
Perhaps snapshots could be signed by the user's Magic-Folder write key for this purpose? Probably a bad idea to reuse the write-cap key for this. Better to introduce ed25519 identity keys which can (optionally) sign snapshot contents and store the signature as another member of the immutable directory.
|
||||
|
||||
|
||||
Conflict Resolution
|
||||
-------------------
|
||||
|
||||
detection of conflicts
|
||||
``````````````````````
|
||||
|
||||
A Magic-Folder client updates a given file's current snapshot link to a snapshot which is a descendent
|
||||
of the previous snapshot. For a given file, let's say "file1", Alice can detect that Bob's DMD has a "file1"
|
||||
that links to a snapshot which conflicts. Two snapshots conflict if one is not an ancestor of the other.
|
||||
|
||||
|
||||
a possible UI for resolving conflicts
|
||||
`````````````````````````````````````
|
||||
|
||||
If Alice links a conflicting snapshot object for a file named "file1",
|
||||
Bob and Carole will see a file in their Magic-Folder called "file1.conflicted.Alice".
|
||||
Alice conversely will see an additional file called "file1.conflicted.previous".
|
||||
If Alice wishes to resolve the conflict with her new version of the file then
|
||||
she simply deletes the file called "file1.conflicted.previous". If she wants to
|
||||
choose the other version then she moves it into place:
|
||||
|
||||
mv file1.conflicted.previous file1
|
||||
|
||||
|
||||
This scheme works for N number of conflicts. Bob for instance could choose
|
||||
the same resolution for the conflict, like this:
|
||||
|
||||
mv file1.Alice file1
|
||||
|
||||
|
||||
Deletion propagation and eventual Garbage Collection
|
||||
----------------------------------------------------
|
||||
|
||||
When a user deletes a file, this is represented by a link from their DMD file
|
||||
object to a deletion snapshot. Eventually all users will link this deletion
|
||||
snapshot into their DMD. When all users have the link then they locally cache
|
||||
the deletion snapshot and remove the link to that file in their DMD.
|
||||
Deletions can of course be undeleted; this means creating a new snapshot
|
||||
object that specifies itself a descent of the deletion snapshot.
|
||||
|
||||
Clients periodically renew leases to all capabilities recursively linked
|
||||
to in their DMD. Files which are unlinked by ALL the users of a
|
||||
given Magic-Folder will eventually be garbage collected.
|
||||
|
||||
Lease expirey duration must be tuned properly by storage servers such that
|
||||
Garbage Collection does not occur too frequently.
|
||||
|
||||
|
||||
|
||||
Performance Considerations
|
||||
--------------------------
|
||||
|
||||
local changes
|
||||
`````````````
|
||||
|
||||
Our old scheme requires two remote Tahoe-LAFS operations per local file modification:
|
||||
1. upload new file contents (as an immutable file)
|
||||
2. modify mutable directory (DMD) to link to the immutable file cap
|
||||
|
||||
Our new scheme requires three remote operations:
|
||||
1. upload new file contents (as in immutable file)
|
||||
2. upload immutable directory representing Tahoe-LAFS snapshot object
|
||||
3. modify mutable directory (DMD) to link to the immutable snapshot object
|
||||
|
||||
remote changes
|
||||
``````````````
|
||||
|
||||
Our old scheme requires one remote Tahoe-LAFS operation per remote file modification (not counting the polling of the dmd):
|
||||
1. Download new file content
|
||||
|
||||
Our new scheme requires a minimum of two remote operations (not counting the polling of the dmd) for conflicting downloads, or three remote operations for overwrite downloads:
|
||||
1. Download new snapshot object
|
||||
2. Download the content it points to
|
||||
3. If the download is an overwrite, modify the DMD to indicate that the downloaded version is their current version.
|
||||
|
||||
If the new snapshot is not a direct descendant of our current snapshot or the other party's previous snapshot we saw, we will also need to download more snapshots to determine if it is a conflict or an overwrite. However, those can be done in
|
||||
parallel with the content download since we will need to download the content in either case.
|
||||
|
||||
While the old scheme is obviously more efficient, we think that the properties provided by the new scheme make it worth the additional cost.
|
||||
|
||||
Physical updates to the DMD overiouslly need to be serialized, so multiple logical updates should be combined when an update is already in progress.
|
||||
|
||||
conflict detection and local caching
|
||||
````````````````````````````````````
|
||||
|
||||
Local caching of snapshots is important for performance.
|
||||
We refer to the client's local snapshot cache as the ``magic-folder db``.
|
||||
|
||||
Conflict detection can be expensive because it may require the client
|
||||
to download many snapshots from the other user's DMD in order to try
|
||||
and find it's own current snapshot or a descendent. The cost of scanning
|
||||
the remote DMDs should not be very high unless the client conducting the
|
||||
scan has lots of history to download because of being offline for a long
|
||||
time while many new snapshots were distributed.
|
||||
|
||||
|
||||
local cache purging policy
|
||||
``````````````````````````
|
||||
|
||||
The client's current snapshot for each file should be cached at all times.
|
||||
When all clients' views of a file are synchronized (they all have the same
|
||||
snapshot for that file), no ancestry for that file needs to be cached.
|
||||
When clients' views of a file are *not* synchronized, the most recent
|
||||
common ancestor of all clients' snapshots must be kept cached, as must
|
||||
all intermediate snapshots.
|
||||
|
||||
|
||||
Local Merge Property
|
||||
--------------------
|
||||
|
||||
Bob can in fact, set a pre-existing directory (with files) as his new Magic-Folder directory, resulting
|
||||
in a merge of the Magic-Folder with Bob's local directory. Filename collisions will result in conflicts
|
||||
because Bob's new snapshots are not descendent's of the existing Magic-Folder file snapshots.
|
||||
|
||||
|
||||
Example: simultaneous update with four parties:
|
||||
|
||||
1. A, B, C, D are in sync for file "foo" at snapshot X
|
||||
2. A and B simultaneously change the file, creating snapshots XA and XB (both descendants of X).
|
||||
3. C hears about XA first, and D hears about XB first. Both accept an overwrite.
|
||||
4. All four parties hear about the other update they hadn't heard about yet.
|
||||
5. Result:
|
||||
- everyone's local file "foo" has the content pointed to by the snapshot in their DMD's "foo" entry
|
||||
- A and C's DMDs each have the "foo" entry pointing at snapshot XA
|
||||
- B and D's DMDs each have the "foo" entry pointing at snapshot XB
|
||||
- A and C have a local file called foo.conflict-B,D with XB's content
|
||||
- B and D have a local file called foo.conflict-A,C with XA's content
|
||||
|
||||
Later:
|
||||
|
||||
- Everyone ignores the conflict, and continue updating their local "foo". but slowly enough that there are no further conflicts, so that A and C remain in sync with eachother, and B and D remain in sync with eachother.
|
||||
|
||||
- A and C's foo.conflict-B,D file continues to be updated with the latest version of the file B and D are working on, and vice-versa.
|
||||
|
||||
- A and C edit the file at the same time again, causing a new conflict.
|
||||
|
||||
- Local files are now:
|
||||
|
||||
A: "foo", "foo.conflict-B,D", "foo.conflict-C"
|
||||
|
||||
C: "foo", "foo.conflict-B,D", "foo.conflict-A"
|
||||
|
||||
B and D: "foo", "foo.conflict-A", "foo.conflict-C"
|
||||
|
||||
- Finally, D decides to look at "foo.conflict-A" and "foo.conflict-C", and they manually integrate (or decide to ignore) the differences into their own local file "foo".
|
||||
|
||||
- D deletes their conflict files.
|
||||
|
||||
- D's DMD now points to a snapshot that is a descendant of everyone else's current snapshot, resolving all conflicts.
|
||||
|
||||
- The conflict files on A, B, and C disappear, and everyone's local file "foo" contains D's manually-merged content.
|
||||
|
||||
|
||||
Daira: I think it is too complicated to include multiple nicknames in the .conflict files
|
||||
(e.g. "foo.conflict-B,D"). It should be sufficient to have one file for each other client,
|
||||
reflecting that client's latest version, regardless of who else it conflicts with.
|
||||
|
||||
|
||||
Zooko's Design (as interpreted by Daira)
|
||||
========================================
|
||||
|
||||
A version map is a mapping from client nickname to version number.
|
||||
|
||||
Definition: a version map M' strictly-follows a mapping M iff for every entry c->v
|
||||
in M, there is an entry c->v' in M' such that v' > v.
|
||||
|
||||
|
||||
Each client maintains a 'local version map' and a 'conflict version map' for each file
|
||||
in its magic folder db.
|
||||
If it has never written the file, then the entry for its own nickname in the local version
|
||||
map is zero. The conflict version map only contains entries for nicknames B where
|
||||
"$FILENAME.conflict-$B" exists.
|
||||
|
||||
When a client A uploads a file, it increments the version for its own nickname in its
|
||||
local version map for the file, and includes that map as metadata with its upload.
|
||||
|
||||
A download by client A from client B is an overwrite iff the downloaded version map
|
||||
strictly-follows A's local version map for that file; in this case A replaces its local
|
||||
version map with the downloaded version map. Otherwise it is a conflict, and the
|
||||
download is put into "$FILENAME.conflict-$B"; in this case A's
|
||||
local version map remains unchanged, and the entry B->v taken from the downloaded
|
||||
version map is added to its conflict version map.
|
||||
|
||||
If client A deletes or renames a conflict file "$FILENAME.conflict-$B", then A copies
|
||||
the entry for B from its conflict version map to its local version map, deletes
|
||||
the entry for B in its conflict version map, and performs another upload (with
|
||||
incremented version number) of $FILENAME.
|
||||
|
||||
|
||||
Example:
|
||||
A, B, C = (10, 20, 30) everyone agrees.
|
||||
A updates: (11, 20, 30)
|
||||
B updates: (10, 21, 30)
|
||||
|
||||
C will see either A or B first. Both would be an overwrite, if considered alone.
|
||||
|
||||
|
||||
|
@ -1,951 +0,0 @@
|
||||
Magic Folder design for remote-to-local sync
|
||||
============================================
|
||||
|
||||
Scope
|
||||
-----
|
||||
|
||||
In this Objective we will design remote-to-local synchronization:
|
||||
|
||||
* How to efficiently determine which objects (files and directories) have
|
||||
to be downloaded in order to bring the current local filesystem into sync
|
||||
with the newly-discovered version of the remote filesystem.
|
||||
* How to distinguish overwrites, in which the remote side was aware of
|
||||
your most recent version and overwrote it with a new version, from
|
||||
conflicts, in which the remote side was unaware of your most recent
|
||||
version when it published its new version. The latter needs to be raised
|
||||
to the user as an issue the user will have to resolve and the former must
|
||||
not bother the user.
|
||||
* How to overwrite the (stale) local versions of those objects with the
|
||||
newly acquired objects, while preserving backed-up versions of those
|
||||
overwritten objects in case the user didn't want this overwrite and wants
|
||||
to recover the old version.
|
||||
|
||||
Tickets on the Tahoe-LAFS trac with the `otf-magic-folder-objective4`_
|
||||
keyword are within the scope of the remote-to-local synchronization
|
||||
design.
|
||||
|
||||
.. _otf-magic-folder-objective4: https://tahoe-lafs.org/trac/tahoe-lafs/query?status=!closed&keywords=~otf-magic-folder-objective4
|
||||
|
||||
|
||||
Glossary
|
||||
''''''''
|
||||
|
||||
Object: a file or directory
|
||||
|
||||
DMD: distributed mutable directory
|
||||
|
||||
Folder: an abstract directory that is synchronized between clients.
|
||||
(A folder is not the same as the directory corresponding to it on
|
||||
any particular client, nor is it the same as a DMD.)
|
||||
|
||||
Collective: the set of clients subscribed to a given Magic Folder.
|
||||
|
||||
Descendant: a direct or indirect child in a directory or folder tree
|
||||
|
||||
Subfolder: a folder that is a descendant of a magic folder
|
||||
|
||||
Subpath: the path from a magic folder to one of its descendants
|
||||
|
||||
Write: a modification to a local filesystem object by a client
|
||||
|
||||
Read: a read from a local filesystem object by a client
|
||||
|
||||
Upload: an upload of a local object to the Tahoe-LAFS file store
|
||||
|
||||
Download: a download from the Tahoe-LAFS file store to a local object
|
||||
|
||||
Pending notification: a local filesystem change that has been detected
|
||||
but not yet processed.
|
||||
|
||||
|
||||
Representing the Magic Folder in Tahoe-LAFS
|
||||
-------------------------------------------
|
||||
|
||||
Unlike the local case where we use inotify or ReadDirectoryChangesW to
|
||||
detect filesystem changes, we have no mechanism to register a monitor for
|
||||
changes to a Tahoe-LAFS directory. Therefore, we must periodically poll
|
||||
for changes.
|
||||
|
||||
An important constraint on the solution is Tahoe-LAFS' ":doc:`write
|
||||
coordination directive<../../write_coordination>`", which prohibits
|
||||
concurrent writes by different storage clients to the same mutable object:
|
||||
|
||||
Tahoe does not provide locking of mutable files and directories. If
|
||||
there is more than one simultaneous attempt to change a mutable file
|
||||
or directory, then an UncoordinatedWriteError may result. This might,
|
||||
in rare cases, cause the file or directory contents to be accidentally
|
||||
deleted. The user is expected to ensure that there is at most one
|
||||
outstanding write or update request for a given file or directory at
|
||||
a time. One convenient way to accomplish this is to make a different
|
||||
file or directory for each person or process that wants to write.
|
||||
|
||||
Since it is a goal to allow multiple users to write to a Magic Folder,
|
||||
if the write coordination directive remains the same as above, then we
|
||||
will not be able to implement the Magic Folder as a single Tahoe-LAFS
|
||||
DMD. In general therefore, we will have multiple DMDs —spread across
|
||||
clients— that together represent the Magic Folder. Each client in a
|
||||
Magic Folder collective polls the other clients' DMDs in order to detect
|
||||
remote changes.
|
||||
|
||||
Six possible designs were considered for the representation of subfolders
|
||||
of the Magic Folder:
|
||||
|
||||
1. All subfolders written by a given Magic Folder client are collapsed
|
||||
into a single client DMD, containing immutable files. The child name of
|
||||
each file encodes the full subpath of that file relative to the Magic
|
||||
Folder.
|
||||
|
||||
2. The DMD tree under a client DMD is a direct copy of the folder tree
|
||||
written by that client to the Magic Folder. Not all subfolders have
|
||||
corresponding DMDs; only those to which that client has written files or
|
||||
child subfolders.
|
||||
|
||||
3. The directory tree under a client DMD is a ``tahoe backup`` structure
|
||||
containing immutable snapshots of the folder tree written by that client
|
||||
to the Magic Folder. As in design 2, only objects written by that client
|
||||
are present.
|
||||
|
||||
4. *Each* client DMD contains an eventually consistent mirror of all
|
||||
files and folders written by *any* Magic Folder client. Thus each client
|
||||
must also copy changes made by other Magic Folder clients to its own
|
||||
client DMD.
|
||||
|
||||
5. *Each* client DMD contains a ``tahoe backup`` structure containing
|
||||
immutable snapshots of all files and folders written by *any* Magic
|
||||
Folder client. Thus each client must also create another snapshot in its
|
||||
own client DMD when changes are made by another client. (It can potentially
|
||||
batch changes, subject to latency requirements.)
|
||||
|
||||
6. The write coordination problem is solved by implementing `two-phase
|
||||
commit`_. Then, the representation consists of a single DMD tree which is
|
||||
written by all clients.
|
||||
|
||||
.. _`two-phase commit`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1755
|
||||
|
||||
Here is a summary of advantages and disadvantages of each design:
|
||||
|
||||
+----------------------------+
|
||||
| Key |
|
||||
+=======+====================+
|
||||
| \+\+ | major advantage |
|
||||
+-------+--------------------+
|
||||
| \+ | minor advantage |
|
||||
+-------+--------------------+
|
||||
| ‒ | minor disadvantage |
|
||||
+-------+--------------------+
|
||||
| ‒ ‒ | major disadvantage |
|
||||
+-------+--------------------+
|
||||
| ‒ ‒ ‒ | showstopper |
|
||||
+-------+--------------------+
|
||||
|
||||
|
||||
123456+: All designs have the property that a recursive add-lease operation
|
||||
starting from a *collective directory* containing all of the client DMDs,
|
||||
will find all of the files and directories used in the Magic Folder
|
||||
representation. Therefore the representation is compatible with :doc:`garbage
|
||||
collection <../../garbage-collection>`, even when a pre-Magic-Folder client
|
||||
does the lease marking.
|
||||
|
||||
123456+: All designs avoid "breaking" pre-Magic-Folder clients that read
|
||||
a directory or file that is part of the representation.
|
||||
|
||||
456++: Only these designs allow a readcap to one of the client
|
||||
directories —or one of their subdirectories— to be directly shared
|
||||
with other Tahoe-LAFS clients (not necessarily Magic Folder clients),
|
||||
so that such a client sees all of the contents of the Magic Folder.
|
||||
Note that this was not a requirement of the OTF proposal, although it
|
||||
is useful.
|
||||
|
||||
135+: A Magic Folder client has only one mutable Tahoe-LAFS object to
|
||||
monitor per other client. This minimizes communication bandwidth for
|
||||
polling, or alternatively the latency possible for a given polling
|
||||
bandwidth.
|
||||
|
||||
1236+: A client does not need to make changes to its own DMD that repeat
|
||||
changes that another Magic Folder client had previously made. This reduces
|
||||
write bandwidth and complexity.
|
||||
|
||||
1‒: If the Magic Folder has many subfolders, their files will all be
|
||||
collapsed into the same DMD, which could get quite large. In practice a
|
||||
single DMD can easily handle the number of files expected to be written
|
||||
by a client, so this is unlikely to be a significant issue.
|
||||
|
||||
123‒ ‒: In these designs, the set of files in a Magic Folder is
|
||||
represented as the union of the files in all client DMDs. However,
|
||||
when a file is modified by more than one client, it will be linked
|
||||
from multiple client DMDs. We therefore need a mechanism, such as a
|
||||
version number or a monotonically increasing timestamp, to determine
|
||||
which copy takes priority.
|
||||
|
||||
35‒ ‒: When a Magic Folder client detects a remote change, it must
|
||||
traverse an immutable directory structure to see what has changed.
|
||||
Completely unchanged subtrees will have the same URI, allowing some of
|
||||
this traversal to be shortcutted.
|
||||
|
||||
24‒ ‒ ‒: When a Magic Folder client detects a remote change, it must
|
||||
traverse a mutable directory structure to see what has changed. This is
|
||||
more complex and less efficient than traversing an immutable structure,
|
||||
because shortcutting is not possible (each DMD retains the same URI even
|
||||
if a descendant object has changed), and because the structure may change
|
||||
while it is being traversed. Also the traversal needs to be robust
|
||||
against cycles, which can only occur in mutable structures.
|
||||
|
||||
45‒ ‒: When a change occurs in one Magic Folder client, it will propagate
|
||||
to all the other clients. Each client will therefore see multiple
|
||||
representation changes for a single logical change to the Magic Folder
|
||||
contents, and must suppress the duplicates. This is particularly
|
||||
problematic for design 4 where it interacts with the preceding issue.
|
||||
|
||||
4‒ ‒ ‒, 5‒ ‒: There is the potential for client DMDs to get "out of sync"
|
||||
with each other, potentially for long periods if errors occur. Thus each
|
||||
client must be able to "repair" its client directory (and its
|
||||
subdirectory structure) concurrently with performing its own writes. This
|
||||
is a significant complexity burden and may introduce failure modes that
|
||||
could not otherwise happen.
|
||||
|
||||
6‒ ‒ ‒: While two-phase commit is a well-established protocol, its
|
||||
application to Tahoe-LAFS requires significant design work, and may still
|
||||
leave some corner cases of the write coordination problem unsolved.
|
||||
|
||||
|
||||
+------------------------------------------------+-----------------------------------------+
|
||||
| Design Property | Designs Proposed |
|
||||
+================================================+======+======+======+======+======+======+
|
||||
| **advantages** | *1* | *2* | *3* | *4* | *5* | *6* |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| Compatible with garbage collection |\+ |\+ |\+ |\+ |\+ |\+ |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| Does not break old clients |\+ |\+ |\+ |\+ |\+ |\+ |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| Allows direct sharing | | | |\+\+ |\+\+ |\+\+ |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| Efficient use of bandwidth |\+ | |\+ | |\+ | |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| No repeated changes |\+ |\+ |\+ | | |\+ |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| **disadvantages** | *1* | *2* | *3* | *4* | *5* | *6* |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| Can result in large DMDs |‒ | | | | | |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| Need version number to determine priority |‒ ‒ |‒ ‒ |‒ ‒ | | | |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| Must traverse immutable directory structure | | |‒ ‒ | |‒ ‒ | |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| Must traverse mutable directory structure | |‒ ‒ ‒ | |‒ ‒ ‒ | | |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| Must suppress duplicate representation changes | | | |‒ ‒ |‒ ‒ | |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| "Out of sync" problem | | | |‒ ‒ ‒ |‒ ‒ | |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
| Unsolved design problems | | | | | |‒ ‒ ‒ |
|
||||
+------------------------------------------------+------+------+------+------+------+------+
|
||||
|
||||
|
||||
Evaluation of designs
|
||||
'''''''''''''''''''''
|
||||
|
||||
Designs 2 and 3 have no significant advantages over design 1, while
|
||||
requiring higher polling bandwidth and greater complexity due to the need
|
||||
to create subdirectories. These designs were therefore rejected.
|
||||
|
||||
Design 4 was rejected due to the out-of-sync problem, which is severe
|
||||
and possibly unsolvable for mutable structures.
|
||||
|
||||
For design 5, the out-of-sync problem is still present but possibly
|
||||
solvable. However, design 5 is substantially more complex, less efficient
|
||||
in bandwidth/latency, and less scalable in number of clients and
|
||||
subfolders than design 1. It only gains over design 1 on the ability to
|
||||
share directory readcaps to the Magic Folder (or subfolders), which was
|
||||
not a requirement. It would be possible to implement this feature in
|
||||
future by switching to design 6.
|
||||
|
||||
For the time being, however, design 6 was considered out-of-scope for
|
||||
this project.
|
||||
|
||||
Therefore, design 1 was chosen. That is:
|
||||
|
||||
All subfolders written by a given Magic Folder client are collapsed
|
||||
into a single client DMD, containing immutable files. The child name
|
||||
of each file encodes the full subpath of that file relative to the
|
||||
Magic Folder.
|
||||
|
||||
Each directory entry in a DMD also stores a version number, so that the
|
||||
latest version of a file is well-defined when it has been modified by
|
||||
multiple clients.
|
||||
|
||||
To enable representing empty directories, a client that creates a
|
||||
directory should link a corresponding zero-length file in its DMD,
|
||||
at a name that ends with the encoded directory separator character.
|
||||
|
||||
We want to enable dynamic configuration of the membership of a Magic
|
||||
Folder collective, without having to reconfigure or restart each client
|
||||
when another client joins. To support this, we have a single collective
|
||||
directory that links to all of the client DMDs, named by their client
|
||||
nicknames. If the collective directory is mutable, then it is possible
|
||||
to change its contents in order to add clients. Note that a client DMD
|
||||
should not be unlinked from the collective directory unless all of its
|
||||
files are first copied to some other client DMD.
|
||||
|
||||
A client needs to be able to write to its own DMD, and read from other DMDs.
|
||||
To be consistent with the `Principle of Least Authority`_, each client's
|
||||
reference to its own DMD is a write capability, whereas its reference
|
||||
to the collective directory is a read capability. The latter transitively
|
||||
grants read access to all of the other client DMDs and the files linked
|
||||
from them, as required.
|
||||
|
||||
.. _`Principle of Least Authority`: http://www.eros-os.org/papers/secnotsep.pdf
|
||||
|
||||
Design and implementation of the user interface for maintaining this
|
||||
DMD structure and configuration will be addressed in Objectives 5 and 6.
|
||||
|
||||
During operation, each client will poll for changes on other clients
|
||||
at a predetermined frequency. On each poll, it will reread the collective
|
||||
directory (to allow for added or removed clients), and then read each
|
||||
client DMD linked from it.
|
||||
|
||||
"Hidden" files, and files with names matching the patterns used for backup,
|
||||
temporary, and conflicted files, will be ignored, i.e. not synchronized
|
||||
in either direction. A file is hidden if it has a filename beginning with
|
||||
"." (on any platform), or has the hidden or system attribute on Windows.
|
||||
|
||||
|
||||
Conflict Detection and Resolution
|
||||
---------------------------------
|
||||
|
||||
The combination of local filesystems and distributed objects is
|
||||
an example of shared state concurrency, which is highly error-prone
|
||||
and can result in race conditions that are complex to analyze.
|
||||
Unfortunately we have no option but to use shared state in this
|
||||
situation.
|
||||
|
||||
We call the resulting design issues "dragons" (as in "Here be dragons"),
|
||||
which as a convenient mnemonic we have named after the classical
|
||||
Greek elements Earth, Fire, Air, and Water.
|
||||
|
||||
Note: all filenames used in the following sections are examples,
|
||||
and the filename patterns we use in the actual implementation may
|
||||
differ. The actual patterns will probably include timestamps, and
|
||||
for conflicted files, the nickname of the client that last changed
|
||||
the file.
|
||||
|
||||
|
||||
Earth Dragons: Collisions between local filesystem operations and downloads
|
||||
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
Write/download collisions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Suppose that Alice's Magic Folder client is about to write a
|
||||
version of ``foo`` that it has downloaded in response to a remote
|
||||
change.
|
||||
|
||||
The criteria for distinguishing overwrites from conflicts are
|
||||
described later in the `Fire Dragons`_ section. Suppose that the
|
||||
remote change has been initially classified as an overwrite.
|
||||
(As we will see, it may be reclassified in some circumstances.)
|
||||
|
||||
.. _`Fire Dragons`: #fire-dragons-distinguishing-conflicts-from-overwrites
|
||||
|
||||
Note that writing a file that does not already have an entry in the
|
||||
:ref:`magic folder db<filesystem_integration-local-scanning-and-database>` is
|
||||
initially classed as an overwrite.
|
||||
|
||||
A *write/download collision* occurs when another program writes
|
||||
to ``foo`` in the local filesystem, concurrently with the new
|
||||
version being written by the Magic Folder client. We need to
|
||||
ensure that this does not cause data loss, as far as possible.
|
||||
|
||||
An important constraint on the design is that on Windows, it is
|
||||
not possible to rename a file to the same name as an existing
|
||||
file in that directory. Also, on Windows it may not be possible to
|
||||
delete or rename a file that has been opened by another process
|
||||
(depending on the sharing flags specified by that process).
|
||||
Therefore we need to consider carefully how to handle failure
|
||||
conditions.
|
||||
|
||||
In our proposed design, Alice's Magic Folder client follows
|
||||
this procedure for an overwrite in response to a remote change:
|
||||
|
||||
1. Write a temporary file, say ``.foo.tmp``.
|
||||
2. Use the procedure described in the `Fire Dragons_` section
|
||||
to obtain an initial classification as an overwrite or a
|
||||
conflict. (This takes as input the ``last_downloaded_uri``
|
||||
field from the directory entry of the changed ``foo``.)
|
||||
3. Set the ``mtime`` of the replacement file to be at least *T* seconds
|
||||
before the current local time. Stat the replacement file
|
||||
to obtain its ``mtime`` and ``ctime`` as stored in the local
|
||||
filesystem, and update the file's last-seen statinfo in
|
||||
the magic folder db with this information. (Note that the
|
||||
retrieved ``mtime`` may differ from the one that was set due
|
||||
to rounding.)
|
||||
4. Perform a ''file replacement'' operation (explained below)
|
||||
with backup filename ``foo.backup``, replaced file ``foo``,
|
||||
and replacement file ``.foo.tmp``. If any step of this
|
||||
operation fails, reclassify as a conflict and stop.
|
||||
|
||||
To reclassify as a conflict, attempt to rename ``.foo.tmp`` to
|
||||
``foo.conflicted``, suppressing errors.
|
||||
|
||||
The implementation of file replacement differs between Unix
|
||||
and Windows. On Unix, it can be implemented as follows:
|
||||
|
||||
* 4a. Stat the replaced path, and set the permissions of the
|
||||
replacement file to be the same as the replaced file,
|
||||
bitwise-or'd with octal 600 (``rw-------``). If the replaced
|
||||
file does not exist, set the permissions according to the
|
||||
user's umask. If there is a directory at the replaced path,
|
||||
fail.
|
||||
* 4b. Attempt to move the replaced file (``foo``) to the
|
||||
backup filename (``foo.backup``). If an ``ENOENT`` error
|
||||
occurs because the replaced file does not exist, ignore this
|
||||
error and continue with steps 4c and 4d.
|
||||
* 4c. Attempt to create a hard link at the replaced filename
|
||||
(``foo``) pointing to the replacement file (``.foo.tmp``).
|
||||
* 4d. Attempt to unlink the replacement file (``.foo.tmp``),
|
||||
suppressing errors.
|
||||
|
||||
Note that, if there is no conflict, the entry for ``foo``
|
||||
recorded in the :ref:`magic folder
|
||||
db<filesystem_integration-local-scanning-and-database>` will
|
||||
reflect the ``mtime`` set in step 3. The move operation in step
|
||||
4b will cause a ``MOVED_FROM`` event for ``foo``, and the link
|
||||
operation in step 4c will cause an ``IN_CREATE`` event for
|
||||
``foo``. However, these events will not trigger an upload,
|
||||
because they are guaranteed to be processed only after the file
|
||||
replacement has finished, at which point the last-seen statinfo
|
||||
recorded in the database entry will exactly match the metadata
|
||||
for the file's inode on disk. (The two hard links — ``foo``
|
||||
and, while it still exists, ``.foo.tmp`` — share the same inode
|
||||
and therefore the same metadata.)
|
||||
|
||||
On Windows, file replacement can be implemented by a call to
|
||||
the `ReplaceFileW`_ API (with the
|
||||
``REPLACEFILE_IGNORE_MERGE_ERRORS`` flag). If an error occurs
|
||||
because the replaced file does not exist, then we ignore this
|
||||
error and attempt to move the replacement file to the replaced
|
||||
file.
|
||||
|
||||
Similar to the Unix case, the `ReplaceFileW`_ operation will
|
||||
cause one or more change notifications for ``foo``. The replaced
|
||||
``foo`` has the same ``mtime`` as the replacement file, and so any
|
||||
such notification(s) will not trigger an unwanted upload.
|
||||
|
||||
.. _`ReplaceFileW`: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365512%28v=vs.85%29.aspx
|
||||
|
||||
To determine whether this procedure adequately protects against data
|
||||
loss, we need to consider what happens if another process attempts to
|
||||
update ``foo``, for example by renaming ``foo.other`` to ``foo``.
|
||||
This requires us to analyze all possible interleavings between the
|
||||
operations performed by the Magic Folder client and the other process.
|
||||
(Note that atomic operations on a directory are totally ordered.)
|
||||
The set of possible interleavings differs between Windows and Unix.
|
||||
|
||||
On Unix, for the case where the replaced file already exists, we have:
|
||||
|
||||
* Interleaving A: the other process' rename precedes our rename in
|
||||
step 4b, and we get an ``IN_MOVED_TO`` event for its rename by
|
||||
step 2. Then we reclassify as a conflict; its changes end up at
|
||||
``foo`` and ours end up at ``foo.conflicted``. This avoids data
|
||||
loss.
|
||||
|
||||
* Interleaving B: its rename precedes ours in step 4b, and we do
|
||||
not get an event for its rename by step 2. Its changes end up at
|
||||
``foo.backup``, and ours end up at ``foo`` after being linked there
|
||||
in step 4c. This avoids data loss.
|
||||
|
||||
* Interleaving C: its rename happens between our rename in step 4b,
|
||||
and our link operation in step 4c of the file replacement. The
|
||||
latter fails with an ``EEXIST`` error because ``foo`` already
|
||||
exists. We reclassify as a conflict; the old version ends up at
|
||||
``foo.backup``, the other process' changes end up at ``foo``, and
|
||||
ours at ``foo.conflicted``. This avoids data loss.
|
||||
|
||||
* Interleaving D: its rename happens after our link in step 4c, and
|
||||
causes an ``IN_MOVED_TO`` event for ``foo``. Its rename also changes
|
||||
the ``mtime`` for ``foo`` so that it is different from the ``mtime``
|
||||
calculated in step 3, and therefore different from the metadata
|
||||
recorded for ``foo`` in the magic folder db. (Assuming no system
|
||||
clock changes, its rename will set an ``mtime`` timestamp
|
||||
corresponding to a time after step 4c, which is after the timestamp
|
||||
*T* seconds before step 4a, provided that *T* seconds is
|
||||
sufficiently greater than the timestamp granularity.) Therefore, an
|
||||
upload will be triggered for ``foo`` after its change, which is
|
||||
correct and avoids data loss.
|
||||
|
||||
If the replaced file did not already exist, an ``ENOENT`` error
|
||||
occurs at step 4b, and we continue with steps 4c and 4d. The other
|
||||
process' rename races with our link operation in step 4c. If the
|
||||
other process wins the race then the effect is similar to
|
||||
Interleaving C, and if we win the race this it is similar to
|
||||
Interleaving D. Either case avoids data loss.
|
||||
|
||||
|
||||
On Windows, the internal implementation of `ReplaceFileW`_ is similar
|
||||
to what we have described above for Unix; it works like this:
|
||||
|
||||
* 4a′. Copy metadata (which does not include ``mtime``) from the
|
||||
replaced file (``foo``) to the replacement file (``.foo.tmp``).
|
||||
|
||||
* 4b′. Attempt to move the replaced file (``foo``) onto the
|
||||
backup filename (``foo.backup``), deleting the latter if it
|
||||
already exists.
|
||||
|
||||
* 4c′. Attempt to move the replacement file (``.foo.tmp``) to the
|
||||
replaced filename (``foo``); fail if the destination already
|
||||
exists.
|
||||
|
||||
Notice that this is essentially the same as the algorithm we use
|
||||
for Unix, but steps 4c and 4d on Unix are combined into a single
|
||||
step 4c′. (If there is a failure at steps 4c′ after step 4b′ has
|
||||
completed, the `ReplaceFileW`_ call will fail with return code
|
||||
``ERROR_UNABLE_TO_MOVE_REPLACEMENT_2``. However, it is still
|
||||
preferable to use this API over two `MoveFileExW`_ calls, because
|
||||
it retains the attributes and ACLs of ``foo`` where possible.
|
||||
Also note that if the `ReplaceFileW`_ call fails with
|
||||
``ERROR_FILE_NOT_FOUND`` because the replaced file does not exist,
|
||||
then the replacment operation ignores this error and continues with
|
||||
the equivalent of step 4c′, as on Unix.)
|
||||
|
||||
However, on Windows the other application will not be able to
|
||||
directly rename ``foo.other`` onto ``foo`` (which would fail because
|
||||
the destination already exists); it will have to rename or delete
|
||||
``foo`` first. Without loss of generality, let's say ``foo`` is
|
||||
deleted. This complicates the interleaving analysis, because we
|
||||
have two operations done by the other process interleaving with
|
||||
three done by the magic folder process (rather than one operation
|
||||
interleaving with four as on Unix).
|
||||
|
||||
So on Windows, for the case where the replaced file already exists,
|
||||
we have:
|
||||
|
||||
* Interleaving A′: the other process' deletion of ``foo`` and its
|
||||
rename of ``foo.other`` to ``foo`` both precede our rename in
|
||||
step 4b. We get an event corresponding to its rename by step 2.
|
||||
Then we reclassify as a conflict; its changes end up at ``foo``
|
||||
and ours end up at ``foo.conflicted``. This avoids data loss.
|
||||
|
||||
* Interleaving B′: the other process' deletion of ``foo`` and its
|
||||
rename of ``foo.other`` to ``foo`` both precede our rename in
|
||||
step 4b. We do not get an event for its rename by step 2.
|
||||
Its changes end up at ``foo.backup``, and ours end up at ``foo``
|
||||
after being moved there in step 4c′. This avoids data loss.
|
||||
|
||||
* Interleaving C′: the other process' deletion of ``foo`` precedes
|
||||
our rename of ``foo`` to ``foo.backup`` done by `ReplaceFileW`_,
|
||||
but its rename of ``foo.other`` to ``foo`` does not, so we get
|
||||
an ``ERROR_FILE_NOT_FOUND`` error from `ReplaceFileW`_ indicating
|
||||
that the replaced file does not exist. We ignore this error and
|
||||
attempt to move ``foo.tmp`` to ``foo``, racing with the other
|
||||
process which is attempting to move ``foo.other`` to ``foo``.
|
||||
If we win the race, then our changes end up at ``foo``, and the
|
||||
other process' move fails. If the other process wins the race,
|
||||
then its changes end up at ``foo``, our move fails, and we
|
||||
reclassify as a conflict, so that our changes end up at
|
||||
``foo.conflicted``. Either possibility avoids data loss.
|
||||
|
||||
* Interleaving D′: the other process' deletion and/or rename happen
|
||||
during the call to `ReplaceFileW`_, causing the latter to fail.
|
||||
There are two subcases:
|
||||
|
||||
* if the error is ``ERROR_UNABLE_TO_MOVE_REPLACEMENT_2``, then
|
||||
``foo`` is renamed to ``foo.backup`` and ``.foo.tmp`` remains
|
||||
at its original name after the call.
|
||||
* for all other errors, ``foo`` and ``.foo.tmp`` both remain at
|
||||
their original names after the call.
|
||||
|
||||
In both subcases, we reclassify as a conflict and rename ``.foo.tmp``
|
||||
to ``foo.conflicted``. This avoids data loss.
|
||||
|
||||
* Interleaving E′: the other process' deletion of ``foo`` and attempt
|
||||
to rename ``foo.other`` to ``foo`` both happen after all internal
|
||||
operations of `ReplaceFileW`_ have completed. This causes deletion
|
||||
and rename events for ``foo`` (which will in practice be merged due
|
||||
to the pending delay, although we don't rely on that for
|
||||
correctness). The rename also changes the ``mtime`` for ``foo`` so
|
||||
that it is different from the ``mtime`` calculated in step 3, and
|
||||
therefore different from the metadata recorded for ``foo`` in the
|
||||
magic folder db. (Assuming no system clock changes, its rename will
|
||||
set an ``mtime`` timestamp corresponding to a time after the
|
||||
internal operations of `ReplaceFileW`_ have completed, which is
|
||||
after the timestamp *T* seconds before `ReplaceFileW`_ is called,
|
||||
provided that *T* seconds is sufficiently greater than the timestamp
|
||||
granularity.) Therefore, an upload will be triggered for ``foo``
|
||||
after its change, which is correct and avoids data loss.
|
||||
|
||||
.. _`MoveFileExW`: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365240%28v=vs.85%29.aspx
|
||||
|
||||
If the replaced file did not already exist, we get an
|
||||
``ERROR_FILE_NOT_FOUND`` error from `ReplaceFileW`_, and attempt to
|
||||
move ``foo.tmp`` to ``foo``. This is similar to Interleaving C, and
|
||||
either possibility for the resulting race avoids data loss.
|
||||
|
||||
We also need to consider what happens if another process opens ``foo``
|
||||
and writes to it directly, rather than renaming another file onto it:
|
||||
|
||||
* On Unix, open file handles refer to inodes, not paths. If the other
|
||||
process opens ``foo`` before it has been renamed to ``foo.backup``,
|
||||
and then closes the file, changes will have been written to the file
|
||||
at the same inode, even if that inode is now linked at ``foo.backup``.
|
||||
This avoids data loss.
|
||||
|
||||
* On Windows, we have two subcases, depending on whether the sharing
|
||||
flags specified by the other process when it opened its file handle
|
||||
included ``FILE_SHARE_DELETE``. (This flag covers both deletion and
|
||||
rename operations.)
|
||||
|
||||
i. If the sharing flags *do not* allow deletion/renaming, the
|
||||
`ReplaceFileW`_ operation will fail without renaming ``foo``.
|
||||
In this case we will end up with ``foo`` changed by the other
|
||||
process, and the downloaded file still in ``foo.tmp``.
|
||||
This avoids data loss.
|
||||
|
||||
ii. If the sharing flags *do* allow deletion/renaming, then
|
||||
data loss or corruption may occur. This is unavoidable and
|
||||
can be attributed to other process making a poor choice of
|
||||
sharing flags (either explicitly if it used `CreateFile`_, or
|
||||
via whichever higher-level API it used).
|
||||
|
||||
.. _`CreateFile`: https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858%28v=vs.85%29.aspx
|
||||
|
||||
Note that it is possible that another process tries to open the file
|
||||
between steps 4b and 4c (or 4b′ and 4c′ on Windows). In this case the
|
||||
open will fail because ``foo`` does not exist. Nevertheless, no data
|
||||
will be lost, and in many cases the user will be able to retry the
|
||||
operation.
|
||||
|
||||
Above we only described the case where the download was initially
|
||||
classified as an overwrite. If it was classed as a conflict, the
|
||||
procedure is the same except that we choose a unique filename
|
||||
for the conflicted file (say, ``foo.conflicted_unique``). We write
|
||||
the new contents to ``.foo.tmp`` and then rename it to
|
||||
``foo.conflicted_unique`` in such a way that the rename will fail
|
||||
if the destination already exists. (On Windows this is a simple
|
||||
rename; on Unix it can be implemented as a link operation followed
|
||||
by an unlink, similar to steps 4c and 4d above.) If this fails
|
||||
because another process wrote ``foo.conflicted_unique`` after we
|
||||
chose the filename, then we retry with a different filename.
|
||||
|
||||
|
||||
Read/download collisions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A *read/download collision* occurs when another program reads
|
||||
from ``foo`` in the local filesystem, concurrently with the new
|
||||
version being written by the Magic Folder client. We want to
|
||||
ensure that any successful attempt to read the file by the other
|
||||
program obtains a consistent view of its contents.
|
||||
|
||||
On Unix, the above procedure for writing downloads is sufficient
|
||||
to achieve this. There are three cases:
|
||||
|
||||
* A. The other process opens ``foo`` for reading before it is
|
||||
renamed to ``foo.backup``. Then the file handle will continue to
|
||||
refer to the old file across the rename, and the other process
|
||||
will read the old contents.
|
||||
|
||||
* B. The other process attempts to open ``foo`` after it has been
|
||||
renamed to ``foo.backup``, and before it is linked in step c.
|
||||
The open call fails, which is acceptable.
|
||||
|
||||
* C. The other process opens ``foo`` after it has been linked to
|
||||
the new file. Then it will read the new contents.
|
||||
|
||||
On Windows, the analysis is very similar, but case A′ needs to
|
||||
be split into two subcases, depending on the sharing mode the other
|
||||
process uses when opening the file for reading:
|
||||
|
||||
* A′. The other process opens ``foo`` before the Magic Folder
|
||||
client's attempt to rename ``foo`` to ``foo.backup`` (as part
|
||||
of the implementation of `ReplaceFileW`_). The subcases are:
|
||||
|
||||
i. The other process uses sharing flags that deny deletion and
|
||||
renames. The `ReplaceFileW`_ call fails, and the download is
|
||||
reclassified as a conflict. The downloaded file ends up at
|
||||
``foo.conflicted``, which is correct.
|
||||
|
||||
ii. The other process uses sharing flags that allow deletion
|
||||
and renames. The `ReplaceFileW`_ call succeeds, and the
|
||||
other process reads inconsistent data. This can be attributed
|
||||
to a poor choice of sharing flags by the other process.
|
||||
|
||||
* B′. The other process attempts to open ``foo`` at the point
|
||||
during the `ReplaceFileW`_ call where it does not exist.
|
||||
The open call fails, which is acceptable.
|
||||
|
||||
* C′. The other process opens ``foo`` after it has been linked to
|
||||
the new file. Then it will read the new contents.
|
||||
|
||||
|
||||
For both write/download and read/download collisions, we have
|
||||
considered only interleavings with a single other process, and
|
||||
only the most common possibilities for the other process'
|
||||
interaction with the file. If multiple other processes are
|
||||
involved, or if a process performs operations other than those
|
||||
considered, then we cannot say much about the outcome in general;
|
||||
however, we believe that such cases will be much less common.
|
||||
|
||||
|
||||
|
||||
Fire Dragons: Distinguishing conflicts from overwrites
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
When synchronizing a file that has changed remotely, the Magic Folder
|
||||
client needs to distinguish between overwrites, in which the remote
|
||||
side was aware of your most recent version (if any) and overwrote it
|
||||
with a new version, and conflicts, in which the remote side was unaware
|
||||
of your most recent version when it published its new version. Those two
|
||||
cases have to be handled differently — the latter needs to be raised
|
||||
to the user as an issue the user will have to resolve and the former
|
||||
must not bother the user.
|
||||
|
||||
For example, suppose that Alice's Magic Folder client sees a change
|
||||
to ``foo`` in Bob's DMD. If the version it downloads from Bob's DMD
|
||||
is "based on" the version currently in Alice's local filesystem at
|
||||
the time Alice's client attempts to write the downloaded file ‒or if
|
||||
there is no existing version in Alice's local filesystem at that time‒
|
||||
then it is an overwrite. Otherwise it is initially classified as a
|
||||
conflict.
|
||||
|
||||
This initial classification is used by the procedure for writing a
|
||||
file described in the `Earth Dragons`_ section above. As explained
|
||||
in that section, we may reclassify an overwrite as a conflict if an
|
||||
error occurs during the write procedure.
|
||||
|
||||
.. _`Earth Dragons`: #earth-dragons-collisions-between-local-filesystem-operations-and-downloads
|
||||
|
||||
In order to implement this policy, we need to specify how the
|
||||
"based on" relation between file versions is recorded and updated.
|
||||
|
||||
We propose to record this information:
|
||||
|
||||
* in the :ref:`magic folder
|
||||
db<filesystem_integration-local-scanning-and-database>`, for
|
||||
local files;
|
||||
* in the Tahoe-LAFS directory metadata, for files stored in the
|
||||
Magic Folder.
|
||||
|
||||
In the magic folder db we will add a *last-downloaded record*,
|
||||
consisting of ``last_downloaded_uri`` and ``last_downloaded_timestamp``
|
||||
fields, for each path stored in the database. Whenever a Magic Folder
|
||||
client downloads a file, it stores the downloaded version's URI and
|
||||
the current local timestamp in this record. Since only immutable
|
||||
files are used, the URI will be an immutable file URI, which is
|
||||
deterministically and uniquely derived from the file contents and
|
||||
the Tahoe-LAFS node's :doc:`convergence secret<../../convergence-secret>`.
|
||||
|
||||
(Note that the last-downloaded record is updated regardless of
|
||||
whether the download is an overwrite or a conflict. The rationale
|
||||
for this to avoid "conflict loops" between clients, where every
|
||||
new version after the first conflict would be considered as another
|
||||
conflict.)
|
||||
|
||||
Later, in response to a local filesystem change at a given path, the
|
||||
Magic Folder client reads the last-downloaded record associated with
|
||||
that path (if any) from the database and then uploads the current
|
||||
file. When it links the uploaded file into its client DMD, it
|
||||
includes the ``last_downloaded_uri`` field in the metadata of the
|
||||
directory entry, overwriting any existing field of that name. If
|
||||
there was no last-downloaded record associated with the path, this
|
||||
field is omitted.
|
||||
|
||||
Note that ``last_downloaded_uri`` field does *not* record the URI of
|
||||
the uploaded file (which would be redundant); it records the URI of
|
||||
the last download before the local change that caused the upload.
|
||||
The field will be absent if the file has never been downloaded by
|
||||
this client (i.e. if it was created on this client and no change
|
||||
by any other client has been detected).
|
||||
|
||||
A possible refinement also takes into account the
|
||||
``last_downloaded_timestamp`` field from the magic folder db, and
|
||||
compares it to the timestamp of the change that caused the upload
|
||||
(which should be later, assuming no system clock changes).
|
||||
If the duration between these timestamps is very short, then we
|
||||
are uncertain about whether the process on Bob's system that wrote
|
||||
the local file could have taken into account the last download.
|
||||
We can use this information to be conservative about treating
|
||||
changes as conflicts. So, if the duration is less than a configured
|
||||
threshold, we omit the ``last_downloaded_uri`` field from the
|
||||
metadata. This will have the effect of making other clients treat
|
||||
this change as a conflict whenever they already have a copy of the
|
||||
file.
|
||||
|
||||
Conflict/overwrite decision algorithm
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Now we are ready to describe the algorithm for determining whether a
|
||||
download for the file ``foo`` is an overwrite or a conflict (refining
|
||||
step 2 of the procedure from the `Earth Dragons`_ section).
|
||||
|
||||
Let ``last_downloaded_uri`` be the field of that name obtained from
|
||||
the directory entry metadata for ``foo`` in Bob's DMD (this field
|
||||
may be absent). Then the algorithm is:
|
||||
|
||||
* 2a. Attempt to "stat" ``foo`` to get its *current statinfo* (size
|
||||
in bytes, ``mtime``, and ``ctime``). If Alice has no local copy
|
||||
of ``foo``, classify as an overwrite.
|
||||
|
||||
* 2b. Read the following information for the path ``foo`` from the
|
||||
local magic folder db:
|
||||
|
||||
* the *last-seen statinfo*, if any (this is the size in
|
||||
bytes, ``mtime``, and ``ctime`` stored in the ``local_files``
|
||||
table when the file was last uploaded);
|
||||
* the ``last_uploaded_uri`` field of the ``local_files`` table
|
||||
for this file, which is the URI under which the file was last
|
||||
uploaded.
|
||||
|
||||
* 2c. If any of the following are true, then classify as a conflict:
|
||||
|
||||
* i. there are pending notifications of changes to ``foo``;
|
||||
* ii. the last-seen statinfo is either absent (i.e. there is
|
||||
no entry in the database for this path), or different from the
|
||||
current statinfo;
|
||||
* iii. either ``last_downloaded_uri`` or ``last_uploaded_uri``
|
||||
(or both) are absent, or they are different.
|
||||
|
||||
Otherwise, classify as an overwrite.
|
||||
|
||||
|
||||
Air Dragons: Collisions between local writes and uploads
|
||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
Short of filesystem-specific features on Unix or the `shadow copy service`_
|
||||
on Windows (which is per-volume and therefore difficult to use in this
|
||||
context), there is no way to *read* the whole contents of a file
|
||||
atomically. Therefore, when we read a file in order to upload it, we
|
||||
may read an inconsistent version if it was also being written locally.
|
||||
|
||||
.. _`shadow copy service`: https://technet.microsoft.com/en-us/library/ee923636%28v=ws.10%29.aspx
|
||||
|
||||
A well-behaved application can avoid this problem for its writes:
|
||||
|
||||
* On Unix, if another process modifies a file by renaming a temporary
|
||||
file onto it, then we will consistently read either the old contents
|
||||
or the new contents.
|
||||
* On Windows, if the other process uses sharing flags to deny reads
|
||||
while it is writing a file, then we will consistently read either
|
||||
the old contents or the new contents, unless a sharing error occurs.
|
||||
In the case of a sharing error we should retry later, up to a
|
||||
maximum number of retries.
|
||||
|
||||
In the case of a not-so-well-behaved application writing to a file
|
||||
at the same time we read from it, the magic folder will still be
|
||||
eventually consistent, but inconsistent versions may be visible to
|
||||
other users' clients.
|
||||
|
||||
In Objective 2 we implemented a delay, called the *pending delay*,
|
||||
after the notification of a filesystem change and before the file is
|
||||
read in order to upload it (Tahoe-LAFS ticket `#1440`_). If another
|
||||
change notification occurs within the pending delay time, the delay
|
||||
is restarted. This helps to some extent because it means that if
|
||||
files are written more quickly than the pending delay and less
|
||||
frequently than the pending delay, we shouldn't encounter this
|
||||
inconsistency.
|
||||
|
||||
.. _`#1440`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1440
|
||||
|
||||
The likelihood of inconsistency could be further reduced, even for
|
||||
writes by not-so-well-behaved applications, by delaying the actual
|
||||
upload for a further period —called the *stability delay*— after the
|
||||
file has finished being read. If a notification occurs between the
|
||||
end of the pending delay and the end of the stability delay, then
|
||||
the read would be aborted and the notification requeued.
|
||||
|
||||
This would have the effect of ensuring that no write notifications
|
||||
have been received for the file during a time window that brackets
|
||||
the period when it was being read, with margin before and after
|
||||
this period defined by the pending and stability delays. The delays
|
||||
are intended to account for asynchronous notification of events, and
|
||||
caching in the filesystem.
|
||||
|
||||
Note however that we cannot guarantee that the delays will be long
|
||||
enough to prevent inconsistency in any particular case. Also, the
|
||||
stability delay would potentially affect performance significantly
|
||||
because (unlike the pending delay) it is not overlapped when there
|
||||
are multiple files on the upload queue. This performance impact
|
||||
could be mitigated by uploading files in parallel where possible
|
||||
(Tahoe-LAFS ticket `#1459`_).
|
||||
|
||||
We have not yet decided whether to implement the stability delay, and
|
||||
it is not planned to be implemented for the OTF objective 4 milestone.
|
||||
Ticket `#2431`_ has been opened to track this idea.
|
||||
|
||||
.. _`#1459`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1459
|
||||
.. _`#2431`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2431
|
||||
|
||||
Note that the situation of both a local process and the Magic Folder
|
||||
client reading a file at the same time cannot cause any inconsistency.
|
||||
|
||||
|
||||
Water Dragons: Handling deletion and renames
|
||||
''''''''''''''''''''''''''''''''''''''''''''
|
||||
|
||||
Deletion of a file
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
When a file is deleted from the filesystem of a Magic Folder client,
|
||||
the most intuitive behavior is for it also to be deleted under that
|
||||
name from other clients. To avoid data loss, the other clients should
|
||||
actually rename their copies to a backup filename.
|
||||
|
||||
It would not be sufficient for a Magic Folder client that deletes
|
||||
a file to implement this simply by removing the directory entry from
|
||||
its DMD. Indeed, the entry may not exist in the client's DMD if it
|
||||
has never previously changed the file.
|
||||
|
||||
Instead, the client links a zero-length file into its DMD and sets
|
||||
``deleted: true`` in the directory entry metadata. Other clients
|
||||
take this as a signal to rename their copies to the backup filename.
|
||||
|
||||
Note that the entry for this zero-length file has a version number as
|
||||
usual, and later versions may restore the file.
|
||||
|
||||
When the downloader deletes a file (or renames it to a filename
|
||||
ending in ``.backup``) in response to a remote change, a local
|
||||
filesystem notification will occur, and we must make sure that this
|
||||
is not treated as a local change. To do this we have the downloader
|
||||
set the ``size`` field in the magic folder db to ``None`` (SQL NULL)
|
||||
just before deleting the file, and suppress notifications for which
|
||||
the local file does not exist, and the recorded ``size`` field is
|
||||
``None``.
|
||||
|
||||
When a Magic Folder client restarts, we can detect files that had
|
||||
been downloaded but were deleted while it was not running, because
|
||||
their paths will have last-downloaded records in the magic folder db
|
||||
with a ``size`` other than ``None``, and without any corresponding
|
||||
local file.
|
||||
|
||||
Deletion of a directory
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Local filesystems (unlike a Tahoe-LAFS filesystem) normally cannot
|
||||
unlink a directory that has any remaining children. Therefore a
|
||||
Magic Folder client cannot delete local copies of directories in
|
||||
general, because they will typically contain backup files. This must
|
||||
be done manually on each client if desired.
|
||||
|
||||
Nevertheless, a Magic Folder client that deletes a directory should
|
||||
set ``deleted: true`` on the metadata entry for the corresponding
|
||||
zero-length file. This avoids the directory being recreated after
|
||||
it has been manually deleted from a client.
|
||||
|
||||
Renaming
|
||||
~~~~~~~~
|
||||
|
||||
It is sufficient to handle renaming of a file by treating it as a
|
||||
deletion and an addition under the new name.
|
||||
|
||||
This also applies to directories, although users may find the
|
||||
resulting behavior unintuitive: all of the files under the old name
|
||||
will be renamed to backup filenames, and a new directory structure
|
||||
created under the new name. We believe this is the best that can be
|
||||
done without imposing unreasonable implementation complexity.
|
||||
|
||||
|
||||
Summary
|
||||
-------
|
||||
|
||||
This completes the design of remote-to-local synchronization.
|
||||
We realize that it may seem very complicated. Anecdotally, proprietary
|
||||
filesystem synchronization designs we are aware of, such as Dropbox,
|
||||
are said to incur similar or greater design complexity.
|
@ -1,205 +0,0 @@
|
||||
Magic Folder user interface design
|
||||
==================================
|
||||
|
||||
Scope
|
||||
-----
|
||||
|
||||
In this Objective we will design a user interface to allow users to conveniently
|
||||
and securely indicate which folders on some devices should be "magically" linked
|
||||
to which folders on other devices.
|
||||
|
||||
This is a critical usability and security issue for which there is no known perfect
|
||||
solution, but which we believe is amenable to a "good enough" trade-off solution.
|
||||
This document explains the design and justifies its trade-offs in terms of security,
|
||||
usability, and time-to-market.
|
||||
|
||||
Tickets on the Tahoe-LAFS trac with the `otf-magic-folder-objective6`_
|
||||
keyword are within the scope of the user interface design.
|
||||
|
||||
.. _otf-magic-folder-objective6: https://tahoe-lafs.org/trac/tahoe-lafs/query?status=!closed&keywords=~otf-magic-folder-objective6
|
||||
|
||||
Glossary
|
||||
''''''''
|
||||
|
||||
Object: a file or directory
|
||||
|
||||
DMD: distributed mutable directory
|
||||
|
||||
Folder: an abstract directory that is synchronized between clients.
|
||||
(A folder is not the same as the directory corresponding to it on
|
||||
any particular client, nor is it the same as a DMD.)
|
||||
|
||||
Collective: the set of clients subscribed to a given Magic Folder.
|
||||
|
||||
Diminishing: the process of deriving, from an existing capability,
|
||||
another capability that gives less authority (for example, deriving a
|
||||
read cap from a read/write cap).
|
||||
|
||||
|
||||
Design Constraints
|
||||
------------------
|
||||
|
||||
The design of the Tahoe-side representation of a Magic Folder, and the
|
||||
polling mechanism that the Magic Folder clients will use to detect remote
|
||||
changes was discussed in :doc:`remote-to-local-sync<remote-to-local-sync>`,
|
||||
and we will not revisit that here. The assumption made by that design was
|
||||
that each client would be configured with the following information:
|
||||
|
||||
* a write cap to its own *client DMD*.
|
||||
* a read cap to a *collective directory*.
|
||||
|
||||
The collective directory contains links to each client DMD named by the
|
||||
corresponding client's nickname.
|
||||
|
||||
This design was chosen to allow straightforward addition of clients without
|
||||
requiring each existing client to change its configuration.
|
||||
|
||||
Note that each client in a Magic Folder collective has the authority to add,
|
||||
modify or delete any object within the Magic Folder. It is also able to control
|
||||
to some extent whether its writes will be treated by another client as overwrites
|
||||
or as conflicts. However, there is still a reliability benefit to preventing a
|
||||
client from accidentally modifying another client's DMD, or from accidentally
|
||||
modifying the collective directory in a way that would lose data. This motivates
|
||||
ensuring that each client only has access to the caps above, rather than, say,
|
||||
every client having a write cap to the collective directory.
|
||||
|
||||
Another important design constraint is that we cannot violate the :doc:`write
|
||||
coordination directive<../../write_coordination>`; that is, we cannot write to
|
||||
the same mutable directory from multiple clients, even during the setup phase
|
||||
when adding a client.
|
||||
|
||||
Within these constraints, for usability we want to minimize the number of steps
|
||||
required to configure a Magic Folder collective.
|
||||
|
||||
|
||||
Proposed Design
|
||||
---------------
|
||||
|
||||
Three ``tahoe`` subcommands are added::
|
||||
|
||||
tahoe magic-folder create MAGIC: [MY_NICKNAME LOCAL_DIR]
|
||||
|
||||
Create an empty Magic Folder. The MAGIC: local alias is set
|
||||
to a write cap which can be used to refer to this Magic Folder
|
||||
in future ``tahoe magic-folder invite`` commands.
|
||||
|
||||
If MY_NICKNAME and LOCAL_DIR are given, the current client
|
||||
immediately joins the newly created Magic Folder with that
|
||||
nickname and local directory.
|
||||
|
||||
|
||||
tahoe magic-folder invite MAGIC: THEIR_NICKNAME
|
||||
|
||||
Print an "invitation" that can be used to invite another
|
||||
client to join a Magic Folder, with the given nickname.
|
||||
|
||||
The invitation must be sent to the user of the other client
|
||||
over a secure channel (e.g. PGP email, OTR, or ssh).
|
||||
|
||||
This command will normally be run by the same client that
|
||||
created the Magic Folder. However, it may be run by a
|
||||
different client if the ``MAGIC:`` alias is copied to
|
||||
the ``private/aliases`` file of that other client, or if
|
||||
``MAGIC:`` is replaced by the write cap to which it points.
|
||||
|
||||
|
||||
tahoe magic-folder join INVITATION LOCAL_DIR
|
||||
|
||||
Accept an invitation created by ``tahoe magic-folder invite``.
|
||||
The current client joins the specified Magic Folder, which will
|
||||
appear in the local filesystem at the given directory.
|
||||
|
||||
|
||||
There are no commands to remove a client or to revoke an
|
||||
invitation, although those are possible features that could
|
||||
be added in future. (When removing a client, it is necessary
|
||||
to copy each file it added to some other client's DMD, if it
|
||||
is the most recent version of that file.)
|
||||
|
||||
|
||||
Implementation
|
||||
''''''''''''''
|
||||
|
||||
For "``tahoe magic-folder create MAGIC: [MY_NICKNAME LOCAL_DIR]``" :
|
||||
|
||||
1. Run "``tahoe create-alias MAGIC:``".
|
||||
2. If ``MY_NICKNAME`` and ``LOCAL_DIR`` are given, do the equivalent of::
|
||||
|
||||
INVITATION=`tahoe invite-magic-folder MAGIC: MY_NICKNAME`
|
||||
tahoe join-magic-folder INVITATION LOCAL_DIR
|
||||
|
||||
|
||||
For "``tahoe magic-folder invite COLLECTIVE_WRITECAP NICKNAME``" :
|
||||
|
||||
(``COLLECTIVE_WRITECAP`` can, as a special case, be an alias such as ``MAGIC:``.)
|
||||
|
||||
1. Create an empty client DMD. Let its write URI be ``CLIENT_WRITECAP``.
|
||||
2. Diminish ``CLIENT_WRITECAP`` to ``CLIENT_READCAP``, and
|
||||
diminish ``COLLECTIVE_WRITECAP`` to ``COLLECTIVE_READCAP``.
|
||||
3. Run "``tahoe ln CLIENT_READCAP COLLECTIVE_WRITECAP/NICKNAME``".
|
||||
4. Print "``COLLECTIVE_READCAP+CLIENT_WRITECAP``" as the invitation,
|
||||
accompanied by instructions on how to accept the invitation and
|
||||
the need to send it over a secure channel.
|
||||
|
||||
|
||||
For "``tahoe magic-folder join INVITATION LOCAL_DIR``" :
|
||||
|
||||
1. Parse ``INVITATION`` as ``COLLECTIVE_READCAP+CLIENT_WRITECAP``.
|
||||
2. Write ``CLIENT_WRITECAP`` to the file ``magic_folder_dircap``
|
||||
under the client's ``private`` directory.
|
||||
3. Write ``COLLECTIVE_READCAP`` to the file ``collective_dircap``
|
||||
under the client's ``private`` directory.
|
||||
4. Edit the client's ``tahoe.cfg`` to set
|
||||
``[magic_folder] enabled = True`` and
|
||||
``[magic_folder] local.directory = LOCAL_DIR``.
|
||||
|
||||
|
||||
Discussion
|
||||
----------
|
||||
|
||||
The proposed design has a minor violation of the
|
||||
`Principle of Least Authority`_ in order to reduce the number
|
||||
of steps needed. The invoker of "``tahoe magic-folder invite``"
|
||||
creates the client DMD on behalf of the invited client, and
|
||||
could retain its write cap (which is part of the invitation).
|
||||
|
||||
.. _`Principle of Least Authority`: http://www.eros-os.org/papers/secnotsep.pdf
|
||||
|
||||
A possible alternative design would be for the invited client
|
||||
to create its own client DMD, and send it back to the inviter
|
||||
to be linked into the collective directory. However this would
|
||||
require another secure communication and another command
|
||||
invocation per client. Given that, as mentioned earlier, each
|
||||
client in a Magic Folder collective already has the authority
|
||||
to add, modify or delete any object within the Magic Folder,
|
||||
we considered the potential security/reliability improvement
|
||||
here not to be worth the loss of usability.
|
||||
|
||||
We also considered a design where each client had write access to
|
||||
the collective directory. This would arguably be a more serious
|
||||
violation of the Principle of Least Authority than the one above
|
||||
(because all clients would have excess authority rather than just
|
||||
the inviter). In any case, it was not clear how to make such a
|
||||
design satisfy the :doc:`write coordination
|
||||
directive<../../write_coordination>`, because the collective
|
||||
directory would have needed to be written to by multiple clients.
|
||||
|
||||
The reliance on a secure channel to send the invitation to its
|
||||
intended recipient is not ideal, since it may involve additional
|
||||
software such as clients for PGP, OTR, ssh etc. However, we believe
|
||||
that this complexity is necessary rather than incidental, because
|
||||
there must be some way to distinguish the intended recipient from
|
||||
potential attackers who would try to become members of the Magic
|
||||
Folder collective without authorization. By making use of existing
|
||||
channels that have likely already been set up by security-conscious
|
||||
users, we avoid reinventing the wheel or imposing substantial extra
|
||||
implementation costs.
|
||||
|
||||
The length of an invitation will be approximately the combined
|
||||
length of a Tahoe-LAFS read cap and write cap. This is several
|
||||
lines long, but still short enough to be cut-and-pasted successfully
|
||||
if care is taken. Errors in copying the invitation can be detected
|
||||
since Tahoe-LAFS cap URIs are self-authenticating.
|
||||
|
||||
The implementation of the ``tahoe`` subcommands is straightforward
|
||||
and raises no further difficult design issues.
|
@ -77,9 +77,9 @@ If you're planning to hack on the source code, you might want to add
|
||||
Dependencies
|
||||
------------
|
||||
|
||||
Tahoe-LAFS depends upon several packages that use compiled C code, such as
|
||||
zfec, pycryptopp, and others. This code must be built separately for each
|
||||
platform (Windows, OS-X, and different flavors of Linux).
|
||||
Tahoe-LAFS depends upon several packages that use compiled C code
|
||||
(such as zfec). This code must be built separately for each platform
|
||||
(Windows, OS-X, and different flavors of Linux).
|
||||
|
||||
Pre-compiled "wheels" of all Tahoe's dependencies are hosted on the
|
||||
tahoe-lafs.org website in the ``deps/`` directory. The ``--find-links=``
|
||||
|
@ -2,24 +2,44 @@ from __future__ import print_function
|
||||
|
||||
import sys
|
||||
import shutil
|
||||
from sys import stdout as _stdout
|
||||
from os import mkdir, listdir, unlink
|
||||
from os.path import join, abspath, curdir, exists
|
||||
from time import sleep
|
||||
from os import mkdir, listdir, environ
|
||||
from os.path import join, exists
|
||||
from tempfile import mkdtemp, mktemp
|
||||
from functools import partial
|
||||
|
||||
from foolscap.furl import (
|
||||
decode_furl,
|
||||
)
|
||||
|
||||
from eliot import (
|
||||
to_file,
|
||||
log_call,
|
||||
start_action,
|
||||
)
|
||||
|
||||
from twisted.python.procutils import which
|
||||
from twisted.internet.defer import Deferred, DeferredList
|
||||
from twisted.internet.task import deferLater
|
||||
from twisted.internet.error import ProcessExitedAlready
|
||||
from twisted.internet.defer import DeferredList
|
||||
from twisted.internet.error import (
|
||||
ProcessExitedAlready,
|
||||
ProcessTerminated,
|
||||
)
|
||||
|
||||
import pytest
|
||||
import pytest_twisted
|
||||
|
||||
from util import _CollectOutputProtocol
|
||||
from util import _MagicTextProtocol
|
||||
from util import _DumpOutputProtocol
|
||||
from util import _ProcessExitedProtocol
|
||||
from util import _create_node
|
||||
from util import _run_node
|
||||
from util import (
|
||||
_CollectOutputProtocol,
|
||||
_MagicTextProtocol,
|
||||
_DumpOutputProtocol,
|
||||
_ProcessExitedProtocol,
|
||||
_create_node,
|
||||
_run_node,
|
||||
_cleanup_tahoe_process,
|
||||
_tahoe_runner_optional_coverage,
|
||||
await_client_ready,
|
||||
TahoeProcess,
|
||||
)
|
||||
|
||||
|
||||
# pytest customization hooks
|
||||
@ -29,6 +49,17 @@ def pytest_addoption(parser):
|
||||
"--keep-tempdir", action="store_true", dest="keep",
|
||||
help="Keep the tmpdir with the client directories (introducer, etc)",
|
||||
)
|
||||
parser.addoption(
|
||||
"--coverage", action="store_true", dest="coverage",
|
||||
help="Collect coverage statistics",
|
||||
)
|
||||
|
||||
@pytest.fixture(autouse=True, scope='session')
|
||||
def eliot_logging():
|
||||
with open("integration.eliot.json", "w") as f:
|
||||
to_file(f)
|
||||
yield
|
||||
|
||||
|
||||
# I've mostly defined these fixtures from "easiest" to "most
|
||||
# complicated", and the dependencies basically go "down the
|
||||
@ -36,8 +67,8 @@ def pytest_addoption(parser):
|
||||
# set up the grid once, but the "con" that each test has to be a
|
||||
# little careful they're not stepping on toes etc :/
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@log_call(action_type=u"integration:reactor", include_result=False)
|
||||
def reactor():
|
||||
# this is a fixture in case we might want to try different
|
||||
# reactors for some reason.
|
||||
@ -46,18 +77,19 @@ def reactor():
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@log_call(action_type=u"integration:temp_dir", include_args=[])
|
||||
def temp_dir(request):
|
||||
"""
|
||||
Invoke like 'py.test --keep ...' to avoid deleting the temp-dir
|
||||
Invoke like 'py.test --keep-tempdir ...' to avoid deleting the temp-dir
|
||||
"""
|
||||
tmp = mkdtemp(prefix="tahoe")
|
||||
if request.config.getoption('keep', True):
|
||||
print("Will retain tempdir '{}'".format(tmp))
|
||||
if request.config.getoption('keep'):
|
||||
print("\nWill retain tempdir '{}'".format(tmp))
|
||||
|
||||
# I'm leaving this in and always calling it so that the tempdir
|
||||
# path is (also) printed out near the end of the run
|
||||
def cleanup():
|
||||
if request.config.getoption('keep', True):
|
||||
if request.config.getoption('keep'):
|
||||
print("Keeping tempdir '{}'".format(tmp))
|
||||
else:
|
||||
try:
|
||||
@ -70,15 +102,17 @@ def temp_dir(request):
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@log_call(action_type=u"integration:flog_binary", include_args=[])
|
||||
def flog_binary():
|
||||
return which('flogtool')[0]
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@log_call(action_type=u"integration:flog_gatherer", include_args=[])
|
||||
def flog_gatherer(reactor, temp_dir, flog_binary, request):
|
||||
out_protocol = _CollectOutputProtocol()
|
||||
gather_dir = join(temp_dir, 'flog_gather')
|
||||
process = reactor.spawnProcess(
|
||||
reactor.spawnProcess(
|
||||
out_protocol,
|
||||
flog_binary,
|
||||
(
|
||||
@ -88,7 +122,7 @@ def flog_gatherer(reactor, temp_dir, flog_binary, request):
|
||||
gather_dir,
|
||||
)
|
||||
)
|
||||
pytest.blockon(out_protocol.done)
|
||||
pytest_twisted.blockon(out_protocol.done)
|
||||
|
||||
twistd_protocol = _MagicTextProtocol("Gatherer waiting at")
|
||||
twistd_process = reactor.spawnProcess(
|
||||
@ -100,14 +134,10 @@ def flog_gatherer(reactor, temp_dir, flog_binary, request):
|
||||
),
|
||||
path=gather_dir,
|
||||
)
|
||||
pytest.blockon(twistd_protocol.magic_seen)
|
||||
pytest_twisted.blockon(twistd_protocol.magic_seen)
|
||||
|
||||
def cleanup():
|
||||
try:
|
||||
twistd_process.signalProcess('TERM')
|
||||
pytest.blockon(twistd_protocol.exited)
|
||||
except ProcessExitedAlready:
|
||||
pass
|
||||
_cleanup_tahoe_process(twistd_process, twistd_protocol.exited)
|
||||
|
||||
flog_file = mktemp('.flog_dump')
|
||||
flog_protocol = _DumpOutputProtocol(open(flog_file, 'w'))
|
||||
@ -122,7 +152,12 @@ def flog_gatherer(reactor, temp_dir, flog_binary, request):
|
||||
'flogtool', 'dump', join(temp_dir, 'flog_gather', flogs[0])
|
||||
),
|
||||
)
|
||||
pytest.blockon(flog_protocol.done)
|
||||
print("Waiting for flogtool to complete")
|
||||
try:
|
||||
pytest_twisted.blockon(flog_protocol.done)
|
||||
except ProcessTerminated as e:
|
||||
print("flogtool exited unexpectedly: {}".format(str(e)))
|
||||
print("Flogtool completed")
|
||||
|
||||
request.addfinalizer(cleanup)
|
||||
|
||||
@ -132,6 +167,11 @@ def flog_gatherer(reactor, temp_dir, flog_binary, request):
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@log_call(
|
||||
action_type=u"integration:introducer",
|
||||
include_args=["temp_dir", "flog_gatherer"],
|
||||
include_result=False,
|
||||
)
|
||||
def introducer(reactor, temp_dir, flog_gatherer, request):
|
||||
config = '''
|
||||
[node]
|
||||
@ -146,18 +186,18 @@ log_gatherer.furl = {log_furl}
|
||||
if not exists(intro_dir):
|
||||
mkdir(intro_dir)
|
||||
done_proto = _ProcessExitedProtocol()
|
||||
reactor.spawnProcess(
|
||||
_tahoe_runner_optional_coverage(
|
||||
done_proto,
|
||||
sys.executable,
|
||||
reactor,
|
||||
request,
|
||||
(
|
||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
||||
'create-introducer',
|
||||
'--listen=tcp',
|
||||
'--hostname=localhost',
|
||||
intro_dir,
|
||||
),
|
||||
)
|
||||
pytest.blockon(done_proto.done)
|
||||
pytest_twisted.blockon(done_proto.done)
|
||||
|
||||
# over-write the config file with our stuff
|
||||
with open(join(intro_dir, 'tahoe.cfg'), 'w') as f:
|
||||
@ -167,39 +207,48 @@ log_gatherer.furl = {log_furl}
|
||||
# but on linux it means daemonize. "tahoe run" is consistent
|
||||
# between platforms.
|
||||
protocol = _MagicTextProtocol('introducer running')
|
||||
process = reactor.spawnProcess(
|
||||
transport = _tahoe_runner_optional_coverage(
|
||||
protocol,
|
||||
sys.executable,
|
||||
reactor,
|
||||
request,
|
||||
(
|
||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
||||
'run',
|
||||
intro_dir,
|
||||
),
|
||||
)
|
||||
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
||||
|
||||
def cleanup():
|
||||
try:
|
||||
process.signalProcess('TERM')
|
||||
pytest.blockon(protocol.exited)
|
||||
except ProcessExitedAlready:
|
||||
pass
|
||||
request.addfinalizer(cleanup)
|
||||
|
||||
pytest.blockon(protocol.magic_seen)
|
||||
return process
|
||||
pytest_twisted.blockon(protocol.magic_seen)
|
||||
return TahoeProcess(transport, intro_dir)
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@log_call(action_type=u"integration:introducer:furl", include_args=["temp_dir"])
|
||||
def introducer_furl(introducer, temp_dir):
|
||||
furl_fname = join(temp_dir, 'introducer', 'private', 'introducer.furl')
|
||||
while not exists(furl_fname):
|
||||
print("Don't see {} yet".format(furl_fname))
|
||||
time.sleep(.1)
|
||||
sleep(.1)
|
||||
furl = open(furl_fname, 'r').read()
|
||||
tubID, location_hints, name = decode_furl(furl)
|
||||
if not location_hints:
|
||||
# If there are no location hints then nothing can ever possibly
|
||||
# connect to it and the only thing that can happen next is something
|
||||
# will hang or time out. So just give up right now.
|
||||
raise ValueError(
|
||||
"Introducer ({!r}) fURL has no location hints!".format(
|
||||
introducer_furl,
|
||||
),
|
||||
)
|
||||
return furl
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@log_call(
|
||||
action_type=u"integration:tor:introducer",
|
||||
include_args=["temp_dir", "flog_gatherer"],
|
||||
include_result=False,
|
||||
)
|
||||
def tor_introducer(reactor, temp_dir, flog_gatherer, request):
|
||||
config = '''
|
||||
[node]
|
||||
@ -214,18 +263,18 @@ log_gatherer.furl = {log_furl}
|
||||
if not exists(intro_dir):
|
||||
mkdir(intro_dir)
|
||||
done_proto = _ProcessExitedProtocol()
|
||||
reactor.spawnProcess(
|
||||
_tahoe_runner_optional_coverage(
|
||||
done_proto,
|
||||
sys.executable,
|
||||
reactor,
|
||||
request,
|
||||
(
|
||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
||||
'create-introducer',
|
||||
'--tor-control-port', 'tcp:localhost:8010',
|
||||
'--listen=tor',
|
||||
intro_dir,
|
||||
),
|
||||
)
|
||||
pytest.blockon(done_proto.done)
|
||||
pytest_twisted.blockon(done_proto.done)
|
||||
|
||||
# over-write the config file with our stuff
|
||||
with open(join(intro_dir, 'tahoe.cfg'), 'w') as f:
|
||||
@ -235,11 +284,11 @@ log_gatherer.furl = {log_furl}
|
||||
# but on linux it means daemonize. "tahoe run" is consistent
|
||||
# between platforms.
|
||||
protocol = _MagicTextProtocol('introducer running')
|
||||
process = reactor.spawnProcess(
|
||||
transport = _tahoe_runner_optional_coverage(
|
||||
protocol,
|
||||
sys.executable,
|
||||
reactor,
|
||||
request,
|
||||
(
|
||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
||||
'run',
|
||||
intro_dir,
|
||||
),
|
||||
@ -247,14 +296,14 @@ log_gatherer.furl = {log_furl}
|
||||
|
||||
def cleanup():
|
||||
try:
|
||||
process.signalProcess('TERM')
|
||||
pytest.blockon(protocol.exited)
|
||||
transport.signalProcess('TERM')
|
||||
pytest_twisted.blockon(protocol.exited)
|
||||
except ProcessExitedAlready:
|
||||
pass
|
||||
request.addfinalizer(cleanup)
|
||||
|
||||
pytest.blockon(protocol.magic_seen)
|
||||
return process
|
||||
pytest_twisted.blockon(protocol.magic_seen)
|
||||
return transport
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@ -262,147 +311,71 @@ def tor_introducer_furl(tor_introducer, temp_dir):
|
||||
furl_fname = join(temp_dir, 'introducer_tor', 'private', 'introducer.furl')
|
||||
while not exists(furl_fname):
|
||||
print("Don't see {} yet".format(furl_fname))
|
||||
time.sleep(.1)
|
||||
sleep(.1)
|
||||
furl = open(furl_fname, 'r').read()
|
||||
return furl
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@log_call(
|
||||
action_type=u"integration:storage_nodes",
|
||||
include_args=["temp_dir", "introducer_furl", "flog_gatherer"],
|
||||
include_result=False,
|
||||
)
|
||||
def storage_nodes(reactor, temp_dir, introducer, introducer_furl, flog_gatherer, request):
|
||||
nodes = []
|
||||
nodes_d = []
|
||||
# start all 5 nodes in parallel
|
||||
for x in range(5):
|
||||
name = 'node{}'.format(x)
|
||||
# tub_port = 9900 + x
|
||||
nodes.append(
|
||||
pytest.blockon(
|
||||
_create_node(
|
||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, name,
|
||||
web_port=None, storage=True,
|
||||
)
|
||||
web_port= 9990 + x
|
||||
nodes_d.append(
|
||||
_create_node(
|
||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, name,
|
||||
web_port="tcp:{}:interface=localhost".format(web_port),
|
||||
storage=True,
|
||||
)
|
||||
)
|
||||
#nodes = pytest.blockon(DeferredList(nodes))
|
||||
nodes_status = pytest_twisted.blockon(DeferredList(nodes_d))
|
||||
nodes = []
|
||||
for ok, process in nodes_status:
|
||||
assert ok, "Storage node creation failed: {}".format(process)
|
||||
nodes.append(process)
|
||||
return nodes
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@log_call(action_type=u"integration:alice", include_args=[], include_result=False)
|
||||
def alice(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request):
|
||||
try:
|
||||
mkdir(join(temp_dir, 'magic-alice'))
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
process = pytest.blockon(
|
||||
process = pytest_twisted.blockon(
|
||||
_create_node(
|
||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, "alice",
|
||||
web_port="tcp:9980:interface=localhost",
|
||||
storage=False,
|
||||
)
|
||||
)
|
||||
await_client_ready(process)
|
||||
return process
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@log_call(action_type=u"integration:bob", include_args=[], include_result=False)
|
||||
def bob(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request):
|
||||
try:
|
||||
mkdir(join(temp_dir, 'magic-bob'))
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
process = pytest.blockon(
|
||||
process = pytest_twisted.blockon(
|
||||
_create_node(
|
||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, "bob",
|
||||
web_port="tcp:9981:interface=localhost",
|
||||
storage=False,
|
||||
)
|
||||
)
|
||||
await_client_ready(process)
|
||||
return process
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def alice_invite(reactor, alice, temp_dir, request):
|
||||
node_dir = join(temp_dir, 'alice')
|
||||
|
||||
# FIXME XXX by the time we see "client running" in the logs, the
|
||||
# storage servers aren't "really" ready to roll yet (uploads
|
||||
# fairly consistently fail if we don't hack in this pause...)
|
||||
import time ; time.sleep(5)
|
||||
proto = _CollectOutputProtocol()
|
||||
reactor.spawnProcess(
|
||||
proto,
|
||||
sys.executable,
|
||||
[
|
||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
||||
'magic-folder', 'create',
|
||||
'--poll-interval', '2',
|
||||
'--basedir', node_dir, 'magik:', 'alice',
|
||||
join(temp_dir, 'magic-alice'),
|
||||
]
|
||||
)
|
||||
pytest.blockon(proto.done)
|
||||
|
||||
proto = _CollectOutputProtocol()
|
||||
reactor.spawnProcess(
|
||||
proto,
|
||||
sys.executable,
|
||||
[
|
||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
||||
'magic-folder', 'invite',
|
||||
'--basedir', node_dir, 'magik:', 'bob',
|
||||
]
|
||||
)
|
||||
pytest.blockon(proto.done)
|
||||
invite = proto.output.getvalue()
|
||||
print("invite from alice", invite)
|
||||
|
||||
# before magic-folder works, we have to stop and restart (this is
|
||||
# crappy for the tests -- can we fix it in magic-folder?)
|
||||
try:
|
||||
alice.signalProcess('TERM')
|
||||
pytest.blockon(alice.exited)
|
||||
except ProcessExitedAlready:
|
||||
pass
|
||||
magic_text = 'Completed initial Magic Folder scan successfully'
|
||||
pytest.blockon(_run_node(reactor, node_dir, request, magic_text))
|
||||
return invite
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def magic_folder(reactor, alice_invite, alice, bob, temp_dir, request):
|
||||
print("pairing magic-folder")
|
||||
bob_dir = join(temp_dir, 'bob')
|
||||
proto = _CollectOutputProtocol()
|
||||
transport = reactor.spawnProcess(
|
||||
proto,
|
||||
sys.executable,
|
||||
[
|
||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
||||
'magic-folder', 'join',
|
||||
'--poll-interval', '2',
|
||||
'--basedir', bob_dir,
|
||||
alice_invite,
|
||||
join(temp_dir, 'magic-bob'),
|
||||
]
|
||||
)
|
||||
pytest.blockon(proto.done)
|
||||
|
||||
# before magic-folder works, we have to stop and restart (this is
|
||||
# crappy for the tests -- can we fix it in magic-folder?)
|
||||
try:
|
||||
print("Sending TERM to Bob")
|
||||
bob.signalProcess('TERM')
|
||||
pytest.blockon(bob.exited)
|
||||
except ProcessExitedAlready:
|
||||
pass
|
||||
|
||||
magic_text = 'Completed initial Magic Folder scan successfully'
|
||||
pytest.blockon(_run_node(reactor, bob_dir, request, magic_text))
|
||||
return (join(temp_dir, 'magic-alice'), join(temp_dir, 'magic-bob'))
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@pytest.mark.skipif(sys.platform.startswith('win'),
|
||||
'Tor tests are unstable on Windows')
|
||||
def chutney(reactor, temp_dir):
|
||||
|
||||
chutney_dir = join(temp_dir, 'chutney')
|
||||
mkdir(chutney_dir)
|
||||
|
||||
@ -414,22 +387,46 @@ def chutney(reactor, temp_dir):
|
||||
# XXX yuck! should add a setup.py to chutney so we can at least
|
||||
# "pip install <path to tarball>" and/or depend on chutney in "pip
|
||||
# install -e .[dev]" (i.e. in the 'dev' extra)
|
||||
#
|
||||
# https://trac.torproject.org/projects/tor/ticket/20343
|
||||
proto = _DumpOutputProtocol(None)
|
||||
reactor.spawnProcess(
|
||||
proto,
|
||||
'/usr/bin/git',
|
||||
'git',
|
||||
(
|
||||
'/usr/bin/git', 'clone', '--depth=1',
|
||||
'git', 'clone',
|
||||
'https://git.torproject.org/chutney.git',
|
||||
chutney_dir,
|
||||
)
|
||||
),
|
||||
env=environ,
|
||||
)
|
||||
pytest.blockon(proto.done)
|
||||
pytest_twisted.blockon(proto.done)
|
||||
|
||||
# XXX: Here we reset Chutney to the last revision known to work
|
||||
# with Python 2, as a workaround for Chutney moving to Python 3.
|
||||
# When this is no longer necessary, we will have to drop this and
|
||||
# add '--depth=1' back to the above 'git clone' subprocess.
|
||||
proto = _DumpOutputProtocol(None)
|
||||
reactor.spawnProcess(
|
||||
proto,
|
||||
'git',
|
||||
(
|
||||
'git', '-C', chutney_dir,
|
||||
'reset', '--hard',
|
||||
'99bd06c7554b9113af8c0877b6eca4ceb95dcbaa'
|
||||
),
|
||||
env=environ,
|
||||
)
|
||||
pytest_twisted.blockon(proto.done)
|
||||
|
||||
return chutney_dir
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
@pytest.mark.skipif(sys.platform.startswith('win'),
|
||||
reason='Tor tests are unstable on Windows')
|
||||
def tor_network(reactor, temp_dir, chutney, request):
|
||||
|
||||
# this is the actual "chutney" script at the root of a chutney checkout
|
||||
chutney_dir = chutney
|
||||
chut = join(chutney_dir, 'chutney')
|
||||
@ -438,6 +435,8 @@ def tor_network(reactor, temp_dir, chutney, request):
|
||||
# ./chutney configure networks/basic
|
||||
# ./chutney start networks/basic
|
||||
|
||||
env = environ.copy()
|
||||
env.update({"PYTHONPATH": join(chutney_dir, "lib")})
|
||||
proto = _DumpOutputProtocol(None)
|
||||
reactor.spawnProcess(
|
||||
proto,
|
||||
@ -447,9 +446,9 @@ def tor_network(reactor, temp_dir, chutney, request):
|
||||
join(chutney_dir, 'networks', 'basic'),
|
||||
),
|
||||
path=join(chutney_dir),
|
||||
env={"PYTHONPATH": join(chutney_dir, "lib")},
|
||||
env=env,
|
||||
)
|
||||
pytest.blockon(proto.done)
|
||||
pytest_twisted.blockon(proto.done)
|
||||
|
||||
proto = _DumpOutputProtocol(None)
|
||||
reactor.spawnProcess(
|
||||
@ -460,9 +459,9 @@ def tor_network(reactor, temp_dir, chutney, request):
|
||||
join(chutney_dir, 'networks', 'basic'),
|
||||
),
|
||||
path=join(chutney_dir),
|
||||
env={"PYTHONPATH": join(chutney_dir, "lib")},
|
||||
env=env,
|
||||
)
|
||||
pytest.blockon(proto.done)
|
||||
pytest_twisted.blockon(proto.done)
|
||||
|
||||
# print some useful stuff
|
||||
proto = _CollectOutputProtocol()
|
||||
@ -474,9 +473,13 @@ def tor_network(reactor, temp_dir, chutney, request):
|
||||
join(chutney_dir, 'networks', 'basic'),
|
||||
),
|
||||
path=join(chutney_dir),
|
||||
env={"PYTHONPATH": join(chutney_dir, "lib")},
|
||||
env=env,
|
||||
)
|
||||
pytest.blockon(proto.done)
|
||||
try:
|
||||
pytest_twisted.blockon(proto.done)
|
||||
except ProcessTerminated:
|
||||
print("Chutney.TorNet status failed (continuing):")
|
||||
print(proto.output.getvalue())
|
||||
|
||||
def cleanup():
|
||||
print("Tearing down Chutney Tor network")
|
||||
@ -489,9 +492,9 @@ def tor_network(reactor, temp_dir, chutney, request):
|
||||
join(chutney_dir, 'networks', 'basic'),
|
||||
),
|
||||
path=join(chutney_dir),
|
||||
env={"PYTHONPATH": join(chutney_dir, "lib")},
|
||||
env=env,
|
||||
)
|
||||
pytest.blockon(proto.done)
|
||||
pytest_twisted.blockon(proto.done)
|
||||
request.addfinalizer(cleanup)
|
||||
|
||||
return chut
|
||||
|
@ -1,13 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
# https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
|
||||
set -euxo pipefail
|
||||
|
||||
CODENAME=$(lsb_release --short --codename)
|
||||
|
||||
if [ "$(id -u)" != "0" ]; then
|
||||
SUDO="sudo"
|
||||
else
|
||||
SUDO=""
|
||||
fi
|
||||
|
||||
# Script to install Tor
|
||||
echo "deb http://deb.torproject.org/torproject.org ${CODENAME} main" | ${SUDO} tee -a /etc/apt/sources.list
|
||||
echo "deb-src http://deb.torproject.org/torproject.org ${CODENAME} main" | ${SUDO} tee -a /etc/apt/sources.list
|
||||
|
||||
set -ex
|
||||
echo "deb http://deb.torproject.org/torproject.org trusty main" | sudo tee -a /etc/apt/sources.list
|
||||
echo "deb-src http://deb.torproject.org/torproject.org trusty main" | sudo tee -a /etc/apt/sources.list
|
||||
|
||||
# Install Tor repo signing key
|
||||
sudo apt-key add - <<EOF
|
||||
# # Install Tor repo signing key
|
||||
${SUDO} apt-key add - <<EOF
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQENBEqg7GsBCACsef8koRT8UyZxiv1Irke5nVpte54TDtTl1za1tOKfthmHbs2I
|
||||
@ -16,34 +25,696 @@ tknIyk5Goa36GMBl84gQceRs/4Zx3kxqCV+JYXE9CmdkpkVrh2K3j5+ysDWfD/kO
|
||||
dTzwu3WHaAwL8d5MJAGQn2i6bTw4UHytrYemS1DdG/0EThCCyAnPmmb8iBkZlSW8
|
||||
6MzVqTrN37yvYWTXk6MwKH50twaX5hzZAlSh9eqRjZLq51DDomO7EumXP90rS5mT
|
||||
QrS+wiYfGQttoZfbh3wl5ZjejgEjx+qrnOH7ABEBAAG0JmRlYi50b3Jwcm9qZWN0
|
||||
Lm9yZyBhcmNoaXZlIHNpZ25pbmcga2V5iQE8BBMBAgAmAhsDBgsJCAcDAgQVAggD
|
||||
BBYCAwECHgECF4AFAlQDRrwFCRSpj0cACgkQ7oy8noht3YnPxwgAp9e7yRer1v1G
|
||||
oywrrfam3afWNy7G0bI5gf98WPrhkgc3capVVDpOe87OaeezeICP6duTE8S5Yurw
|
||||
x+lbcCPZp7Co4uyjAdIjVHAhwGGhpzG34Y8Z6ebCd4z0AElNGpDQpMtKppLnCRRw
|
||||
knuvpKBIn4sxDgsofIg6vo4i8nL5mrIzhDpfbW9NK9lV4KvmvB4T+X5ZzdTkQ0ya
|
||||
1aHtGdMaTtKmOMVk/4ceGRDw65pllGEo4ZQEgGVZ3TmNHidiuShGqiVEbSDGRFEV
|
||||
OUiF9yvR+u6h/9iqULxOoAOfYMuGtatjrZM46d8DR2O1o00nbGHWYaQVqimGd52W
|
||||
rCJghAIMxbkBDQRKoO2QAQgA2uKxSRSKpd2JO1ODUDuxppYacY1JkemxDUEHG31c
|
||||
qCVTuFz4alNyl4I+8pmtX2i+YH7W9ew7uGgjRzPEjTOm8/Zz2ue+eQeroveuo0hy
|
||||
Fa9Y3CxhNMCE3EH4AufdofuCmnUf/W7TzyIvzecrwFPlyZhqWnmxEqu8FaR+jXK9
|
||||
Jsx2Zby/EihNoCwQOWtdv3I4Oi5KBbglxfxE7PmYgo9DYqTmHxmsnPiUE4FYZG26
|
||||
3Ll1ZqkbwW77nwDEl1uh+tjbOu+Y1cKwecWbyVIuY1eKOnzVC88ldVSKxzKOGu37
|
||||
My4z65GTByMQfMBnoZ+FZFGYiCiThj+c8i93DIRzYeOsjQARAQABiQJEBBgBAgAP
|
||||
AhsCBQJUA0bBBQkQ5ycvASnAXSAEGQECAAYFAkqg7ZAACgkQdKlBuiGeyBC0EQf5
|
||||
Af/G0/2xz0QwH58N6Cx/ZoMctPbxim+F+MtZWtiZdGJ7G1wFGILAtPqSG6WEDa+T
|
||||
hOeHbZ1uGvzuFS24IlkZHljgTZlL30p8DFdy73pajoqLRfrrkb9DJTGgVhP2axhn
|
||||
OW/Q6Zu4hoQPSn2VGVOVmuwMb3r1r93fQbw0bQy/oIf9J+q2rbp4/chOodd7XMW9
|
||||
5VMwiWIEdpYaD0moeK7+abYzBTG5ADMuZoK2ZrkteQZNQexSu4h0emWerLsMdvcM
|
||||
LyYiOdWP128+s1e/nibHGFPAeRPkQ+MVPMZlrqgVq9i34XPA9HrtxVBd/PuOHoaS
|
||||
1yrGuADspSZTC5on4PMaQgkQ7oy8noht3YmJqQgAqq0NouBzv3pytxnS/BAaV/n4
|
||||
fc4GP+xiTI0AHIN03Zmy47szUVPg5lwIEeopJxt5J8lCupJCxxIBRFT59MbE0msQ
|
||||
OT1L3vlgBeIidGTvVdrBQ1aESoRHm+yHIs7H16zkUmj+vDu/bne36/MoSU0bc2EO
|
||||
cB7hQ5AzvdbZh9tYjpyKTPCJbEe207SgcHJ3+erExQ/aiddAwjx9FGdFCZAoTNdm
|
||||
rjpNUROno3dbIG7fSCO7PVPCrdCxL0ZrtyuuEeTgTfcWxTQurYYNOxPv6sXF1VNP
|
||||
IJVBTfdAR2ZlhTpIjFMOWXJgXWiip8lYy3C/AU1bpgSV26gIIlk1AnnNHVBH+Q==
|
||||
=DMFk
|
||||
Lm9yZyBhcmNoaXZlIHNpZ25pbmcga2V5iEYEEBECAAYFAkqqojIACgkQ61qJaiiY
|
||||
i/WmOgCfTyf3NJ7wHTBckwAeE4MSt5ZtXVsAn0XDq8PWWnk4nK6TlevqK/VoWItF
|
||||
iEYEEBECAAYFAkqsYDUACgkQO50JPzGwl0voJwCcCSokiJSNY+yIr3nBPN/LJldb
|
||||
xekAmwfU60GeaWFwz7hqwVFL23xeTpyniEYEEBECAAYFAkt9ndgACgkQYhWWT1sX
|
||||
KrI5TACfcBPbsaPA1AUVVXXPv0KeWFYgVaIAoMr3jwd1NYVD6Te3D+yJhGzzCD6P
|
||||
iEYEEBECAAYFAkt+li8ACgkQTlMAGaGhvAU4FwCfX3H4Ggm/x0yIAvmt4CW8AP9F
|
||||
5D8AoKapuwbjsGncT3UdNFiHminAaq1tiEYEEBECAAYFAky6mjsACgkQhfcmMSeh
|
||||
yJpL+gCggxs4C5o+Oznk7WmFrPQ3lbnfDKIAni4p20aRuwx6QWGH8holjzTSmm5F
|
||||
iEYEEBECAAYFAlMI0FEACgkQhEMxewZV94DLagCcDG5SR00+00VHzBVE6fDg027e
|
||||
N2sAnjNLOYbRSBxBnELUDKC7Vjaz/sAMiEYEExECAAYFAlJStIQACgkQKQwSSb3Y
|
||||
cAuCRgCgv0d7P2Yu1R6Jiy/btNP18viYT5EAoIY1Lc47SYFUMA7FwyFFX6WSAb5Y
|
||||
iEwEExECAAwFAkqg7nQFgwll/3cACgkQ3nqvbpTAnH+GJACgxPkSbEp+WQCLZTLB
|
||||
P30+5AandyQAniMm5s8k2ccV4I1nr9O0qYejOJTiiF4EEBEIAAYFAkzBD8YACgkQ
|
||||
azeBLFtU1oxDCAD+KUQ7nSRJqZOY0CI6nAD7tak9K7Jlk0ORJcT3i6ZDyD8A/33a
|
||||
BXzMw0knTTdJ6DufeQYBTMK+CNXM+hkrHfBggPDXiF4EEBEIAAYFAk4Mhd4ACgkQ
|
||||
g6I5C/2iihoNrwEAzOrMMTbCho8OsG/tDxgnlwY9x/kBIqCfCdKLrZCMk9UA/i+Y
|
||||
GBQCHg1MaZzZrfbSeoE7/qyZOYDYzq78+0E16WLZiF4EEBEIAAYFAlPeZ9MACgkQ
|
||||
TqUU5bQa5qhFZwEAoWTXMOMQSx784WcMHXt8OEeQdOGEOSHksOJuWhyJ9CABAKBk
|
||||
eGV4TxerY2YPqeI6V/SBfzHqzMegt26ADIph2dG7iF4EEBEIAAYFAlViC18ACgkQ
|
||||
fX0Rv2KdWmd6sQEAnTAi5ZGUqq0S0Io5URugswOr/RwEFh8bO7nJOUWOcNkA/is3
|
||||
LmGIvmYS7kYmoYRjSj3Bc0vMndvD6Q2KYjp3L1cDiF4EEBEKAAYFAlFVUVkACgkQ
|
||||
h1gyehCfJZHbYgEAg6q8LKukKxNabqo2ovHBryFHWOVFogVY+iI605rwHZQA/1hK
|
||||
q3rEa8EHaDyeseFSiciQckDwrib5X5ep86ZwYNi8iGEEMBEIAAkFAlPeaoYCHQAA
|
||||
CgkQTqUU5bQa5qiGngD/ds3IJS3BbXy5dzS7vCZTYZGFq+wzVqMCVo4VXBZDZK0B
|
||||
AKWDu8MCktTdWUqd2H2lnS3w4xMDHdpxB5aEVg2kjK/piJwEEAECAAYFAkzUfOUA
|
||||
CgkQ47Feim8Q/EJp2gP/dFeyE02Rn3W723u/7rLss69unufYLR5rEXUsSZ+8xt75
|
||||
4PrTI4w02qcGOL05P+bOwbIZRhU9lcNZJetVYQtL3/sBVAIBoZVe3B+w0MiTWgRX
|
||||
cSdJ89FyfoGyowzdoAO7SuVWwA/I/DP7CRupvHC5hZpeffr/nmKOFQP135eakWCJ
|
||||
ARwEEAECAAYFAkyRaqYACgkQY5Cb4ntdZmsmWggAxgz83X4rA51TyuvIZye78dbg
|
||||
oHZDCsgCZjV3GtLcCImJdaCpmfetYdWOalCTo9NgI7cSoHiPm9YUcBgMUOLkvGx7
|
||||
WI+j5/5lytENxtZcNEOjPquJg3Y98ywHh0f1qMgkExVl9oJoHeOgtF0JKqX2PZpn
|
||||
z2caSqIpTMZYV+M+k8cWEYsG8WTgf48IWTAjTKF8eUmAwtwHKEal1nd8AsMMuZbL
|
||||
/Fwt93EHf3Pl2ySAuIc7uJU4953Q5abaSafUjzUlIjXvGA9LMEiE1/kdbszuJeiy
|
||||
2r8NNo/zAIX1Yt3RKX/JbeGSmkVVBwf1z07FJsWMe4zrQ8q/sP5T52RTIQBAg4kB
|
||||
HAQQAQIABgUCToOsZAAKCRD9hPy49bQwR2LNB/4tEamTJhxWcReIVRS4mIxmVZKh
|
||||
N4WwWVMt0FWPECVxNqdbk9RnU75/PGFJOO0CARmbVQlS/dFonEaUx45VX7WjoXvH
|
||||
OxpM4VqOMAoPCt8/1Z29HKILkiu91+4kHpMcKSC7mXTKgzEA3IFeL2UQ8cU+WU6T
|
||||
qxON8ST0uUlOfVC7Ldzmpv0YmCJJsD7uxLoA7vCgTnZPF0AmPEH48zV238VkYbiG
|
||||
N4fdaaNS19qGbVSUG1YsRWV47PgQVfBNASs2kd8FpF4l5w58ln/fQ4YQk1aQ2Sau
|
||||
D553W4uwT4rYPEQdMUJl3zc49AYemL6phy/1IMMxjHPN2XKeQ6fkOhHTPzs3iQEc
|
||||
BBABAgAGBQJQSx6AAAoJEH+pHtoamZ2Ehb0IAJzD7va1uonOpQiUuIRmUpoyYQ0E
|
||||
XOa+jlWpO8DQ/RPORPM1IEGIsDZ3kTx6UJ+Zha1TAisQJzuLqAeNRaRUo0Tt3elI
|
||||
UgI+oDNKRWGEpc4Z8/Rv4s6zBnPBkDwCEslAeFj3fnbLSR+9fHF0eD/u1Pj7uPyM
|
||||
23kiwWSnG4KQCyZhHPKRjhmBg1UhEA25fOr8p9yHuMqTjadMbp3+S8lBI3MZBXOK
|
||||
l2JUPRIZFe6rXqx+SVJjRW6cXMGHhe6QQGISzQBeBobqQnSim08sr18jvhleKqeg
|
||||
GZVs1YhadZQzmQBNJXNT/YmVX9cyrpktkHAPGRQ8NyjRSPwkRZAqaBnB71CJARwE
|
||||
EAECAAYFAlBbsukACgkQLJrFl69P+H9BSQf/Sv1aGS7wJKz7/Yi54t7hVmwxQuVE
|
||||
pvAy6/m6e/ikLRFInWe1kNiLlOcs5sjUgqQtoAlkpvw35klIwmNtR8jRVZDsvwu0
|
||||
E1U5XIJ0icQEsf4n0N81rYOlwrQuzDNOY0p4a7jpLFAwMhNwrBreF4ebz3ZF9yqu
|
||||
xmWuCoJHE3iA+J/FaMzmGdNVxMpQXUPOjdX1hNH2e1BBGwbUqpSlqI8qfjEVuYjZ
|
||||
Ts0u7xaHN9e6DaqwRoI9zcv143yY1FrRJuWFBLCsdogFxDDUKk2VwLSFw45dmZRT
|
||||
ABD8ew0Y7kkwHTmsEcVg8PM6XAVcVOT04+kVZQJ0so2Cd2sL041JreDaDokBHAQQ
|
||||
AQIABgUCUS5/vwAKCRB3FndEyejkKMDxB/4szydmGO8nIZ2eBqfTkQqrBzkcCmmL
|
||||
fily02lKt4m83FIFdDi/J1VyS90Ak0i20Z5aNUOvpnrXDr6H2syhTBmQowtTnCKL
|
||||
momS/Aa0/DkllV7p5wQomuv+n22QyMiNMd6d5iub7MYkDH8Xx4LL4LNbAZpwvDXD
|
||||
rwgccfrOwllGHI2VIFz1kkA1HNdE9ZzS3Md9Pse2I3Z1ArY6UUtGv7i30osVp7Qy
|
||||
w1GvgzqcG05f4IE/g50pNt4BLJecrwZumekSOfRviKyvp6gxwls3BUFfhecjlEb9
|
||||
SC6vh6z2S05CRQXHLxmmnz++T/6HJYe6evUbZ6ZrZ1qTzMchrsbZPwFviQEcBBAB
|
||||
AgAGBQJS2YorAAoJEEjriy1mvrzjXL4H/3Z17wsMqPhSN9XTmjp7QufqUhGDGl7F
|
||||
uCrJDsD+h1n0rwH831w01oVklHi0AC34TxdqFzJ3eqfSuym8jtx3CXimyU74Mix2
|
||||
h6O4vyDtIENYKMT2xAsQMvEbaGpSQKtzmaHox4BdysrXYsoKrW4w9DNzY/k+vPYL
|
||||
iPRchMHNIbG6TG2gL++P3f6H+AZxBTNAircvhATWrxcXpupMWm3qL60lQtJl2sU2
|
||||
RyDfPHQGQsBx4YJ34EO/74zgFGla7ORcf+0Wler+t6G6HdlKy2S+mpmi36rfgYwK
|
||||
AZS+wz9TXxqRgkVimiDbmt1hMtOzd5hKpHV5/oFDiWEZYPHh6/jVgiuJARwEEAEC
|
||||
AAYFAlMGdm0ACgkQ2C/gPMVbz+M9cAf9E5tc5jNpTRHyW50ISElxaXHciJthEJBl
|
||||
RxBRRN2I8cSIRWra8+u66O8v0qYrZzmW8rdMa4+bzTgX0ykIFYDoZIzy3GYid08h
|
||||
S4Aqhk/90Ssyj4Dr30FsF6xMZjS/WkXp7Io8DlyCHpw5pRccII6Xks+JY3rrgS7C
|
||||
T4hQzxuLdDHvw+ilb4TQQl6F3c8uQLlfIEgh7pgj2i9d7wrQHQMwxYPJ2B1p9OMY
|
||||
IH+dI78LWqlru1XC8YsV2H2qEqd1vWRsVgEe/3ntmFdCgsWj0PUgA8TNcSver0Ww
|
||||
2BpW8k2UmPvemN6w7oM18ERccevohsaX8iuYf5aCjtmbhEhhbwN9OIkBHAQQAQIA
|
||||
BgUCVcQyrgAKCRDHXurc0X7YRErCB/4uDl6B5/rymPi/3AK3LMyJbLqZZzErK917
|
||||
s491J+zelFywOoUEWdH+xvUzEOonioTvKkGrQ5Tooy3+7cHojW2qSauLh+rG+b+7
|
||||
3TZJyRSYDD4nwWz3/Wlg21BLinQioaNTgj0pb5Hm70NwQwUcFtvyJNw/LJ9mfQax
|
||||
t//OFSF2TRpBMr5MMhs5vd85G5hGHydZw9v0sLRglk5IzkcxNdkuWEG+MpCNBTJs
|
||||
3rkSzCmYSczS1aand3l/3KAwtkau/ru9wtBftrqsbLJZ8Nmv6Ud44nKTF0dsj5hZ
|
||||
aLrGbL5oeMfkEuYEZYSXl0CMtIg0wA9OCvk3ZjutMy0+8calRF87iQEcBBABAgAG
|
||||
BQJWc8vRAAoJELPrw2Cr+RQDqw4H/2Oj/o3ApVt42KmxOXC5McuaaINf3BrTwK0H
|
||||
DzIP+PSmRd3APVVku0Xv89obY/7l4YakI2UTzyvx5hvjRTM5jEOqm4bd0E1atLo5
|
||||
UIzGtSdgTnPeAbH07beW4UHSG1FCWw35CwYtdyXm9qri9ppWlPKmHc91PIwGJTfS
|
||||
oIfWUT6MnCSaPjCed3peTXj4FpW1JeOjDtE3yR8gvmIdIfrI4a8Y6CGYAUIdVWaw
|
||||
NifLahEZjYS2rFcGCssjBSaWR25McL7m8lb/ChpoqpvQry3MaJXoeOFE7X1zInPd
|
||||
a9vDdWR4QFrLDN8JjxzBzwsQcfaA+ypv95SlD3qL6vFpHGHZ4/6JARwEEAECAAYF
|
||||
AlZ1TPMACgkQGMawXRQNVOhCaQf/aQZ0xEVW+iBuqXzd65axP3yWS9dM//h9psP/
|
||||
UKhFzfxCdn3XzmJ92J0sv22DjR8AbbGLP/H9CeZY8nCQnYOHp+GQikGJNjzyd1Zn
|
||||
i+Ph67EYfEV2eqRO55GGmiRtUrZaur2pfnbNsvTQtA2rGXen5tLSsCh4qDNHrM1T
|
||||
lP9MSV0clzoVWRrRNvkODrSDaCdEEDrOqfy0AEFlLmBTqSsduo4cO46j0ruC0Svf
|
||||
lYx+2HN3rVtZzt1wrhaPBPnV6gP7dhKp9XM4erWV40dP14YyDExZoKNys7Kq7pnR
|
||||
QMbE3HL6UGa8VPvu9eiELs7kw01pYBtYl1my9ekminj8cygpdYkBHAQQAQgABgUC
|
||||
VolllwAKCRAjRRsQeqA5QYnjB/9oDZYh20qEpGIZRSmur8M/cGFKJ6IMxBHFIz73
|
||||
PM+hHB3v28aYRW0lXGu8BNGZVxkTuTjd1HlSFMCNpcNfbMmRhEGtEp3qGq+cq7zu
|
||||
72lVEiY8tJliq9zyOm+guFzUQ00pvaXuTUFlshvwlRS+GIGn8U2P/SVRGqSOqCki
|
||||
dp4f06yElt5QifwzvHT8KvxjPgFA5NfQAXE5i/IoepV53XDhECqOvsORbc0JT8n8
|
||||
/4hT8qHTno8UNbYK5BQjHlby92v7ZFVgI86Li2zb0HgQSmvpU/qRibSzg0gEUrWw
|
||||
UR4knTkoKYQwjry2bQ653oNgv0OsnSGEroYOyQ1Q96jOMFKViQEcBBABCAAGBQJW
|
||||
xLxwAAoJENnYUJL2kkSzPbcH/jl1mYhR4f25pRe1InyR7BJF83YDhJYIhbBCGqGV
|
||||
enFEy29hco832HkhMUukaos34KZjsWGDFX1IWe6cxOJvBZsDYHuaLCueh5I8/Tmt
|
||||
q+HuebuF0RJtJh7ItJoCrEv7ZyUQmbJ+aHLx2pXSqYUIiWlPvIlG2/esQlUo7pOu
|
||||
b7eEb8U3oKWYgs9HkytMeHSTKiuFJ7mzEyh2fLcgsc2q1XT4VxuqksWxYv8MstTO
|
||||
xrltQ7LyP2QH/BzfqI5yE3UfSSg1sZE2Nh2cIFNWTYVxdx1fBJWGtTT7l2o99mYw
|
||||
ufSLz1UTbGF5PcXeK3sYxN5IJta2FUByaJAWPJonRnojinyJARwEEAEKAAYFAlaU
|
||||
NeYACgkQhKVEYnRGm/7r8AgAkY8sPCR4JKQEgbCSDky2uVzc82QaxfaFvYY/oJSI
|
||||
54X9QBhT0dzEu/racr/apjyj3pdjkP8IM5Mya9+v9LZKLKne7pJUNsSiPUpfudPM
|
||||
i19Z2TW3+7F8LT53XNALS3Ink78MdAENpuxn1ERkOoqOFOKaKUUhaW4ai/cd1prz
|
||||
GQSKP1/TlERqs4E2+JphTGjL2LlV+jpSHyMD1dpfD6ZLlEiuyxr8qUV+HTbBcfnG
|
||||
UTEd56mjiDv2cUP6WacTlP0+F+NGcG2iAJXdkF6EClLyEnN70l8ud7HuXUMZI+nr
|
||||
J0jKqhYduTxViI8w98cxKVelp66mt+rzF0GGoxPZroWn4okBHAQRAQIABgUCU76j
|
||||
IgAKCRCPqWBGRct+91LXCACsE0vF2X24OnXg3uSlhhYMaLQUyA2HJAIvObrAVlWF
|
||||
n0vVvCF6XyPX32vOlM4v3qKMBl/hX8+uhSO+z8snQabR6ostipGiWgGRKWmbLB/5
|
||||
PfAAONPmJPsB2ACM4R2ojfiauMNcT3+Rszkr1rwnZZYu2Xg/hJpOqJaZAEkFs0GV
|
||||
ovm4i62pf17Zyb7+O59x+ki8AWL8AsK7QAELZed8Aql7Oi3PLKTZPGalXB6Yl4Gx
|
||||
wmBsX6Y3gmyxBCr9ZyAaJAe0jG1l4qOlJUL5P32/j+g+xw7I+3ntw/n9AeC/c3zu
|
||||
KTMIfC186lZdYmYfNr2oiYM/PGWTdd4xnJt+97i07ptJiQEcBBIBAgAGBQJUmpGD
|
||||
AAoJEJQEaQws42QMeBQH/Rj976yL2XWYrA6nDqEZipfIlpGOV95ZsXuQWkOKo0Om
|
||||
lL8bKKaizeBEy026fKB/XN7E8rIUANATLXSRbLf2Y4QxPaVPdCnpfQjGxIKtCORR
|
||||
OQPt+PDr5qG3tE1D59lgpcMkRjwnuF9FjBIpBB7NwW9Qhc2H22yHtDdTPw6o1L3y
|
||||
r9JXN9uT+4tg5Ww7lKC5jTIBx8evoxarMZTQqG5KviK4Si8b9u/yt9d1DsxAoj0S
|
||||
GzIR5ix0InPTHiadG8yYk6NYnEFUZ4puWaL3KV1tqYiUGlqsf01FxmkXQo2KySSW
|
||||
TmyIuqp0ge0o3ccCVKuhrkxG/wSRJUBZ47Mckr07tuGJARwEEwECAAYFAkzhRMsA
|
||||
CgkQTsYHIylgbnfbuggAwM65VhsyIv1qfHT6xG4QRBltjWi0KhMIh/ysMQEDDREE
|
||||
9i5c59wyQdY0/N+iiFbqoCN4QrzfUBI9WDdy1rkK2af+YzZ6E7dj5cIS16dNkk/x
|
||||
m0eDelkS3g+1Bo4G2tbGpfWHrfcoQhrRrt0BJpTgo5mD9LIqgKFxKvalj6O3MNpy
|
||||
xnyr9637PPaCS129wNKQm6uQ+OU5HH0JxYWE53s8U/hlafQDQCS58ylsteGVUkKZ
|
||||
LKTLIbQOifcL2LuwbTjnfTco3LoID6WO9yb8QF54xa8sx2OvnVeaQYWNoCzgvLDQ
|
||||
J8qP241l2uI61JW0faRwyY1K9xSWfYEVlMGjY15EoYkBPAQTAQIAJgIbAwYLCQgH
|
||||
AwIEFQIIAwQWAgMBAh4BAheABQJQPjNuBQkNIhUAAAoJEO6MvJ6Ibd2JGbAH/2fj
|
||||
tebQ7xsC8zUTnjIk8jmeH8kNZcp1KTkt31CZd6jN9KFj5dbSuaXQGYMJXi9AqPHd
|
||||
ux79eM6QjsMCN4bYJe3bA/CEueuL9bBxsfl9any8yJ8BcSJVcc61W4VDXi0iogSe
|
||||
qsHGagCHqXkti7/pd5RCzr42x0OG8eQ6qFWZ9LlKpLIdz5MjfQ7uJWdlhok5taSF
|
||||
g8WPJCSIMaQxRC93uYv3CEMusLH3hNjcNk9KqMZ/rFkr8AVIo7X6tCuNcOI6RLJ5
|
||||
o4mUNJflU8HKBpRf6ELhJAFfhV0Ai8Numtmj1F4s7bZTyDSfCYjc5evI/BWjJ6pG
|
||||
hQMyX32zPA9VDmVXZp2JATwEEwECACYCGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIX
|
||||
gAUCVANGvAUJFKmPRwAKCRDujLyeiG3dic/HCACn17vJF6vW/UajLCut9qbdp9Y3
|
||||
LsbRsjmB/3xY+uGSBzdxqlVUOk57zs5p57N4gI/p25MTxLli6vDH6VtwI9mnsKji
|
||||
7KMB0iNUcCHAYaGnMbfhjxnp5sJ3jPQASU0akNCky0qmkucJFHCSe6+koEifizEO
|
||||
Cyh8iDq+jiLycvmasjOEOl9tb00r2VXgq+a8HhP5flnN1ORDTJrVoe0Z0xpO0qY4
|
||||
xWT/hx4ZEPDrmmWUYSjhlASAZVndOY0eJ2K5KEaqJURtIMZEURU5SIX3K9H67qH/
|
||||
2KpQvE6gA59gy4a1q2Otkzjp3wNHY7WjTSdsYdZhpBWqKYZ3nZasImCEAgzFiQE8
|
||||
BBMBAgAmBQJKoOxrAhsDBQkJZgGABgsJCAcDAgQVAggDBBYCAwECHgECF4AACgkQ
|
||||
7oy8noht3YmVUAgApMyyFaBxvie1/jAMoQ3uZLjnrP/SWK9Sv9TIiiJxig4PLSNn
|
||||
+dlu1EZicFoZaGx+wLMhOOuCoLKAVfo3RSF2WgvBePkxqN03hILPAVuT2kus+7f7
|
||||
y926lkRy2mF+eWVd5CZDoHERABFtgX0Zf24TBz90Cza1tu+1OWiYgD7zi24AIlFw
|
||||
cU4Up9+ejZWGSG4J3yOZj5xkEAxg5RDKfkbsRVV+ZnqaxcDqe+Gpu4BFEiNv1r/O
|
||||
yZIA8FbWEjn0rnXDA4ynOsown9paQE0NrMIHrh6fR9+CUyeFzn+xFhPaNho7k8GA
|
||||
zC02WctTGX5lZRBaLt7MDC1i6eajVcC1eXgtPYkBXAQQAQIABgUCU3uwcQAKCRCK
|
||||
cvkT9Qxk2uuTCf4xTAn7tQPaq5wu6MIjizqrUuYnh/1B4bFW85HUrJ45BxqLZ3a1
|
||||
mk5Kl2hiV6bLoCXH+sOrCrDmdsYBuheth9lzDTcTljTEZR9v5vYyjDlxkuRvCiZ2
|
||||
/KLmjX9m5sg6NUPOgeQxc3R0JQ6D+IgevkgTrgN1F+eEHjS+rh4nsJzuRUiUvZnO
|
||||
IH1Vc92IejeOWafg7rAY/AvCYWJL20YbJ2cxDXa7wGc9SBn8h+7Nvp0+Q4Q95BdW
|
||||
2ux2aRfmBEG2JuC4KPYswZJI9MWKlzeQEW6aegXpynTtVieG8Ixa+IViqqREk2ia
|
||||
XtfoxVuvilBUcu5w9gNCJF+fHHZjUor5qHvZz91/6T0NBlCqZrcjwlONsReSh1St
|
||||
ez8SLEZk1NyYmG56nvCaYSb1FvOv+nCBjz5JaoyERfgv4LnI+A1hbXqn3YkBnAQQ
|
||||
AQIABgUCU3+zcQAKCRBPo46CLk+kj1MWC/44XL3oiuhfZ/lv+VGFXxLRI7bkN3rZ
|
||||
rn1Ed+6MONU5qz9pT9aF4C5H/IgAmIHWxDaA30zSXAEAGXY3ztXYOcm4/pnox/Wr
|
||||
6sXG83rG5M/L4fqD0PMv7mCbVt6bsINX5FTrCVUYU7ErsdpCgMRyJ8gKRh/tGsOt
|
||||
byMZ/3q9E+hyq/cGu8DjhfEjtQZDhP1Gpq4cyZrTRevl+Q2+5juA4bCyUl00DQLH
|
||||
dCuEEjryq4XWl0Q2CENDhkVV+WkvfuIOIVgW11j7+MmMXLzMMyk4MZtzgedJW8aU
|
||||
2/q0mPn313357E9DwMZj9XvB3JCx4dRjBR67zwYySVvnK8KMWVNPWcleVrY+oj1l
|
||||
9psq+d4pkjtAa/cd1mBfh7h6uKzkekj/zWuJV0+HEbKRmmBpc8SWc4QRNUrCBk7v
|
||||
VfGsBLCmiCK9Rij1zgrwihrw/T77BcvOcxhZNd3Y9Vs9vavExF0/5IqclwcuJqQO
|
||||
5fRKmMCFi1rwT5ZcWANmJXdaN8H/7D1WNXuJAZwEEAEKAAYFAlN4AagACgkQRCkH
|
||||
tYjfxFfaSQwAjmRJHNBnTYQ2Sluy9KzmgtiVlxl6Maxr2zBQvXv4/mH2Sl2BeFWa
|
||||
M8kiyQzl6XZV5/q8TCkmskW0N8YOl+l6AhFGuh4PS8UWe050fcxJCB6Z6XUFdvVQ
|
||||
1F1dI3bNcmm5libcMSNFNS7pQF1qaz4fmVniwPx1ezBdAvd4n4l4dipg2bW93iPM
|
||||
iy1JDRc1Um6U/ouW2KnD7l5/PkQKWLzSx96xvfimDD6DXbW+/7nFhle7foTLSlFO
|
||||
cyeuXCOQCa04XQOJGKZtiVp1Ax3Mv8t1A0t2EzYlTTKZCCCCa9EDReI1m7EJZ7+S
|
||||
JueaW6u6/TuM887l4FFuM+6Bow0IEC8FJyPdZg/BqnZ3tK4xSm3tF6oxc8IkaQJi
|
||||
p9R76hPSWRfzc7ooTbxQrzYVzTZa/pb6RfL5bTi3Q9D1xCRjPtkZIceMWfPtnyml
|
||||
TIDwdefzTT0wxj1vTSluqMih0LODRDrmysDSx9MBfH+zhigweooCCj0wLmOkmT0P
|
||||
jgJvL9TBG5HViQGcBBABCgAGBQJTeNsQAAoJEPLvL0cGnouP5ewL+wVOickmGd+D
|
||||
out44YAmPXSzdP1KervaRAWIQLFda7XFb2krwGwIpkw7hR9qhAG/CWbF/WRQqWB9
|
||||
M2qQEaHP7LXjPuCQVf9w5UJXzKUBft//PRF6IzBOm8g+yHY1MJo3x3PDd2Bym2hn
|
||||
r4iV4teVnoHiutAcKPndpu6idaTkhguNuKOc1hXqILi3x9WRVi1d2UL8MakyamVz
|
||||
2k2sRktKQEZ4goEYq+8kFeT/T0DH/bB5N3PEKwpK/v03T4fD8ihMFYwblN7Y+Rx0
|
||||
mrYthCIQYpfAVA6eXjyABv4kRj/l1G1ir8ar1PnrHiNp2Hv1aipDvfDZnNpicwyS
|
||||
OrdyQgpjGao75Ipw1RNcCuS9DWUUPOYYQQfknCeUMgtQDqoJBYiE3wp24QZw3Pss
|
||||
zyMk86bQWqGuhdrmA97zwX9f1me2BdhwyLPkBJVt/6t2Tp+vx00VmhbQKLbpPIAC
|
||||
zqAGw8RtUx1G5bmSjRgAuo6xWOC2u9Ncxt33u/zQ7UvC/wQ2FwHHD4kBnAQQAQoA
|
||||
BgUCU4DA6QAKCRAq0+1D59sVj5pDDAC+MneOmun1zAq7WSSZmf+AI3BzYGoYN67l
|
||||
J8QXTcgDgbqXAtGQvp71G2It9ugdPEeyQ4T3DxNIYA2uC344hdsVCAnQHO6NMvR5
|
||||
A1qBUldxp1w7GfgV39p1ANzxDNwGjwwfUQfqk9VEOp4+puut4o2fhyMmkC9RaGzW
|
||||
V5taPyWL1N9+JqfNfsjWFC5qeS9JOLTvhmk2lLVKnw7uKluiQVzr7yj/gqcsyA2s
|
||||
Pfs938cIr96CveTdd3d1IWcRErB72e3zb0PKKvrtXjfAMoZG0vrsA4So0D2Z3Y71
|
||||
0bGgLQ1WYDlRw7YM7/XKN2WWIBWxLNfEjVIuVnpHLCTNdmntLp5oaBsC9TrDwUMD
|
||||
Z5DEro1XHijX3h7x5Ni+XU89ZodSeQy9uvLwkgjiZIxD4DfCXQNc7I2a7h+M3rvu
|
||||
3LeBIQe3v/KNMDpgL20AyLxUs7/eqe0zWm3F4sfYu7ywA/mkH1Az3xTWj/I76Wlm
|
||||
KPSeJpNEi/fol0PCsTJ3vWdpu1Hkt4KJAZwEEQEKAAYFAk6poj8ACgkQoPIT8Ubr
|
||||
WB+JSgv/WGMIx4wAa6IHQdrG+PSSIjNg6nvhvvhos1U2bJldujyV1kCyq9symQzC
|
||||
5N5f2/WC1ZLhXhtitN+RzxQraViJcZMaW2qyOYvRdDYzJoGiMqr4deMTYQ0ujR4I
|
||||
qA3TSr3TSOS2LVlxkRI2CVQgSMHcVmR4uSbEIFdhNL3yGRgylhXzCBdsa7esdLr4
|
||||
VmZw/eHnFNA3fLBc+0yiaAnc33WKaI+UTnpyxieznAzFC5J0gODRFSBGNAVckAm9
|
||||
0wo326AkcUXV7Puss+GBKkwszzb0KiCijujAP198rZlKJEWSFZlPHrOImcWFF6xy
|
||||
njM672FaW8MB8Kc4hsAa/kjpK0uYr+XNVdTW/qUmo7cwJuLUJzaMo7wdmLBDr0y1
|
||||
DYGknGOTu17vxRoSOhPWPCmw+529rfV3bRBYB4qtUk9ebYnsOmDQmrVQPxlt6/e3
|
||||
nYgY6gF9+YahUmdOcmjQ5QoHysa0Yr9kh7vobrQF+3FS0wHrZtpqyQW9Iqe4UCq5
|
||||
lDqc4k+4iQHwBBABAgAGBQJSn809AAoJEP21uMBn8lOH5OwOn2sjIQLjowLD+bOx
|
||||
nm2Fc0SPAI+UJqeMOw8iUWKcE88iFT18y06TpqUEaeLAoZSAPWDPx6PI4WnL6QSg
|
||||
z7ia96dg4VU1FS/gUUVqExScq5WwttH7a95wjjBFuMhwNrvGyrx8DyFM2rKqj7cm
|
||||
ydsHY18e1eVuk6cFp6SRR5ek8qhRLG9G/zXutU8e5gfJv9FFIysruNztJOaePXRQ
|
||||
OxVFJZmmZoxq33cUjdx28EV22jplmJ+Ku00S+egZi1mZJccFwmNZ8BF3ZAv0dFNL
|
||||
OjTVOKNO4WoW3NtNksy1/T2hwhAERFZU89p1XSF3D1HaN4TiCYRP9pm3yk6gfFkY
|
||||
BOY3183aICQYT+w+KVdg8rxPkiryF0NV6f9kjfYS1sAYPqUMIzKYhV/h1UcPuqe0
|
||||
KK0f5NpK8obXO2lJzLZE7gqtbc88G3Rg+3vDukD4boSHdOhm6ii6ZamD2qdrFQDj
|
||||
ROjRy85lmmNJxXc14iAl0pNKE1EvTcJgJ9FnQngtaSYuk+YsqKcQzTMiBRUwUwxY
|
||||
8Vm3jKOh0SrG454RBGsleG8CfCGj44bxFE6Q4MldvJAkkYCfM7zSww/p5rOpN/Su
|
||||
3NMY4gYCHpOO8zKem4EzwJ9ENt4cAjH/hYkB8AQQAQIABgUCUtLMDAAKCRCkIrnY
|
||||
5Sfb27Z0Dp9OELHk0T6eraFt6k8z7NrxU6Bq2VroUQxcRFBLkdRhjN1BVr5P2u1J
|
||||
2h7Gly6maqiiHalpQm9RMHXRSSomVPhav7EZvOlQHiujkJDcDLWyLoFtlgvTD63v
|
||||
A/YFbnceWY2ATY6gp1/sp/t9zO/ywGuk6+xlVKld2jNJbQpkBwAUadWnEFpFixty
|
||||
EIgOU8uuIV6wj6/3VywshbG1Ml3HL4E4qpSqOTgoesQvLyyNjlI3JL8KB+cljNxJ
|
||||
xBOE5sqRgB5PhD9mlZDX5WFjL3EWwQHi1SpPPmuviciSZy0Shw0yjevvpMnHnkze
|
||||
H29Qb9gDfkvlmS5Hk2rYm3qvu/I59xEtmJfSXYpOyhe3EsffmOxHEqLmtQ12cx1T
|
||||
uKz0gXFnh+Mm/txE+sVHsiuPomanf9Ou6k8RA+mQ3+715P/PhoqG0Qu6G+GNCIoB
|
||||
+21ln8Yr+gwbsKXEYqVQITEXqDkeNlCHy9SFjpPXf5XJh45k4mLxul0THAwLt1R4
|
||||
1ChP42/+r3KuYWfXXAUsz0Cf5kfQLYqpwN2OkXytVK26UN6yVLuFBetTU4uJWtJB
|
||||
QBE3HyUtq00YASfrWy9ITz1Qv/NWc4xdpoJ789An1cV6UO7p9oi50Sine6uJAfAE
|
||||
EAEKAAYFAlKGBO0ACgkQN4Uj/AufSZbFOQ6fbHEEerx0zf6FtLG2/EyK00q95yQY
|
||||
363WfM6fXvEbEHe8RThPoZswxLAn96yfTNWXLhDS64muDntsPPpenk86siNzp9Br
|
||||
8qN1fKkZY2tBjyUtvGz9i+paQWowXPfFeV5WutjqRY3cn6xY4SXWNWyffr3XTYqu
|
||||
blnWs4s+yJuHQeb3XiWX4o8p9csmTuC5sJgmZpkvppRgzRpHAd8VCzzC/cMEVeV2
|
||||
+cbFon4sHw5NJVAXbaRoZ/P4SoA6S2Tz0SB1FWNa1v9TEu57/f7l8XYdI6nL4y6i
|
||||
mnJ/RZqgpG7gJUqJSwS/iu80JJqnZJ030hWrRZHHp2k+ZWr/kZgKGCxHbRCcQNpJ
|
||||
CmPmSuJccVABWIkoKjgVR4jXDbh+saGYLn2eUUzxkZmd7xaDSNUBhP2qdtKlGFc8
|
||||
ESL0qZkwixLhmpgUgFsf7D/bGGJyVkhOji4rJDZx9I0K5s0JrDrEqO0nzYod08s7
|
||||
aaOcQrgMYcQA7x/Z3BlSuRRo6KK61dOO42SzSbFSEW5Z8IEfSoUYHoyN81kbfC+j
|
||||
/q1dpwg+Bhw9PTqSWfLiXI6H15X7H/Ig6NDK0U9v9s+gqmqG0AtQhEnCEqKNZFV1
|
||||
K8rnY+B+lNXMA0PIgxA0iQHwBBABCgAGBQJSjUjjAAoJEMQJSn+pq5SBKV4On0Gz
|
||||
b3r2SAx4CM9zAhGoQw81yM34WUHrkDESj2TrKw0sLYLMzM3wriEzFT+88buowSBT
|
||||
8h3ONNDijbj8NdjYQCfY90bqgAROZ+W9/dmV2C9dJxmv5kWJQ/5D2ksuVpu1LUyK
|
||||
6AWXEkV1KpIcRHCP+Kb8EWaMEjPPQbNJ1KrFzAFfIUeFTbBL5kMmJK5aYVUiHWnL
|
||||
Zq0SK5OlWGqBihuRLI7OIoBOjlcoXvFoEgSkgUKpapE6C9VkErW60WCK91sMhaa8
|
||||
CY9pVDPaanMG2o73BfS3jGPylm4H2+8jlJ1+l5ietvoyiqOST1iIfOsbi30mxuVJ
|
||||
4JBvKtmapqpBwT6eNvCiPKsMyjB5oWI5IVbK8MDIaYQM9TL+nyMGhl19GzcUMP8t
|
||||
ZRlCifM9b/zmMMt1sgVY0koF8AZfh3Ho9KLyXqNMUtXAFSQrAcTbN5SmzjlJtl+h
|
||||
z6uhiHH9kAeSX4MFRXX6JDfZxyAw72JqJkZaPEAKQCpodkNwNG9b2dedIBsTaD9I
|
||||
oEkryDtR17qV2ePwlCeymuwNnGVVaJ8hLbI7ZATbIaSn7XNvMGM8hX0N/ram5nTv
|
||||
rR2laG1o1ss5oxtg7PfTrhMyCTrzTcxc8VskAgtbJjoyi4kCGwQQAQIABgUCUVSN
|
||||
VAAKCRB+fTNcWi1ewX4xD/d0R2OHFLo42KJPsIc9Wz3AMO7mfpbCmSXcxoM+Cyd9
|
||||
/GT2qgAt9hgItv3iqg9dj+AbjPNUKfpGG4Q4D/x/tb018C3F4U1PLC/PQ2lYX0cs
|
||||
vuv3Gp5MuNpCuHS5bW4kLyOpRZh1JrqniL8K1Mp8cdBhMf6H+ZckQuXShGHwOhGy
|
||||
BMu3X7biXikSvdgQmbDQMtaDbxuYZ+JGXF0uacPVnlAUwW1F55IIhmUHIV7t+poY
|
||||
o/8M0HJ/lB9y5auamrJT4acsPWS+fYHAjfGfpSE7T7QWuiIKJ2EmpVa5hpGhzII9
|
||||
ahF0wtHTKkF7d7RYV1p1UUA5nu8QFTope8fyERJDZg88ICt+TpXJ7+PJ9THcXgNI
|
||||
+papKy2wKHPfly6B+071BA4n0UX0tV7zqWk9axoN+nyUL97/k572kLTbxahrBEYX
|
||||
phdNeqqXHa/udWpTYaKwSGYmIohTSIqBZh7Xa/rhLsx2UfgR5B0WW34E8cTzuiZz
|
||||
iYalIC/9694vjOtPaSTpiPyK2Bn/gOF6zXEqtUYPTdVfYADyhD00uNAxAsmgmju+
|
||||
KkoYl6j4oG3a71LZWcdQ+hx3n+TgpNx51hXlqdv8g1HmkGM5KJW31ZgxfPmqgO6J
|
||||
fUiWucRaGHNjA2AdinU+pFq9rlIaHWaxG+xw+tFNtdTDxmmzaj2pCsYUz/qTAN31
|
||||
iQIcBBABAgAGBQJLaRPhAAoJEMXpfCtjn2pmYaYP/j/TT5PPK6kZxLg1Qx6HZZAO
|
||||
YRtHdGIub5Ffa8NO8o2LreO+GlHdxYyRajRKIlvunRWzcumKqmD4a1y7Z3yZeSwF
|
||||
CVMzANmki7W7l/nKtfAwr+WZlOA1upGTloub1+0JEAk0yz9N1ZXA9xruh8qH7HgT
|
||||
IBOM6BF3ZmUmZj5zsoGpBS8wvcPg9V3ytoHGkyowCSXVvNGmOenlHsxQyi4TsPmM
|
||||
yCtf2Xnjk0uC3iE7U6uSev4Z8B6yXYwKV/NL9lic1VaMu5UG8QD7JSR2XWFRQgct
|
||||
k8pO5GHXXVcWAnHWK9HvAPhnxv7UCRsb2dzuJzq3s0r9F5pYS2ea4wp/DOn4PzSl
|
||||
F7D7V4mnPg0CW6+UcEOUnO25z1bAssKnrTngPsb9y9sIveK4OLve0IsKoQ1tEhPc
|
||||
2bkC+b2l5fxhaWkV7PplRgE0vYftJQwUD4ttaD5HTfwSis6//9hgpeVRW/q5DmOu
|
||||
R7YQroiK0/IxRgKySBeJ15Lv+AT6Ta4GpwvPYk7HeflFDRSJbWvlmJBDUPbQtpsI
|
||||
/egWitCskUGT/QAM06OcBvGqLnM6bacEh9GhAiTcvJHf1EfCAJGZMY2OPs8n0A5W
|
||||
+GjQ7FRr3pqYIxXDaNK3Iiqz0JeRskS0I9ms7r+OoGhnGM6rKG3o0v9o6iSzJ5E3
|
||||
hMWgq8q1rl6P62lgVkCziQIcBBABAgAGBQJMm4KuAAoJENh0cn4zmn+obikP/2H3
|
||||
suQSV6maiLfYurcsNlaszLWdYAKhXRCnrkps99MbcvYOipJyI6XmaPjzm960BVCh
|
||||
mf6uAI1inQ/QuVlLy3F3dEQlngxu3Zg+/Id+TlsKoXPvBVztb1NJxshXRMfPXDYj
|
||||
uNjP8/nmHqMrIFS94sWwoyZazeDB64parIcR+TLxuyXyH6D8LnEMrTXMEmvE/ZE5
|
||||
Zgvbkpda8BJZSpQzWm8TKbH/vU9JGbSPikK7zAYPAOEUSYaT9dwbesvePRW0eM72
|
||||
u5KlduIfuXP2yrIGOD11zPgJyLl8vg6tWkVYES4VsqSanO91J6Q/zAwzjyl/J5Bd
|
||||
xdJo3HxLKOirbzbJ4jwq+RinJ76Brt/KpUOyC5tj79LYwRzSGEDRvcT59kzB++A+
|
||||
n/PDWoR499x2uzxvCZ/3WTLioO6hHh4re/pSQ59fHE0/MSDDFKZfQKZoy7lsKOXk
|
||||
18rGouz0EFP3sxGzoGKs5wShBSvglx+iiDZxh9d3f6/S+9QGY/ymbCPnOxNIpi8F
|
||||
ErbyRGa9jPZ0fsmwOEjev5MHBeZ9pMfpQSY6gZ+9oW9MMml17U2BRnXq7mCBrMRM
|
||||
fIpmyAQ7V+q1jjCK2QB5TwUuTU4+B8nteF3AoUfwKHZl64CQ/8/vPrAxhmaRwHNv
|
||||
dcJJxzvvo9trxeO0NlUrfE/ljOk8fL6tPlrJ7ov4iQIcBBABAgAGBQJNGJ3wAAoJ
|
||||
EIO1uBYaG9UOMXcP/0kA1SRdYd24ORdRdkVyhI8QqBE49+seV3iElKsk6e54auaQ
|
||||
DhpSFXfCLbSY2tmEnxD2AWDVwUDHtBPuKXREr8ytB44MKVm5Ar7M1o/ner+RJsMd
|
||||
YR1bxLxF4j5MuPgTLaZKEszxmI5C+eo8wvf5heFwtIq23HxO+7DtYO2XKWLj/k7Q
|
||||
3K760YvLtO72awqfMXr+MxX57/L6qyWdiMNfNiT1uGv9BpixRGB6xbDN18unpVKk
|
||||
3sLPcE3oc44UdkSuxVrqHXVMzUIxpQGqOf+KYk9s5Z0KijllK09uoZI3WyKOR2I5
|
||||
iGJDuBBzbuMGP23Gr3IMRTmVNAEWmjpxgLC2j1t80ocaAkguejTAKTjjXH1MWJHo
|
||||
ESsBXKdbk2xuAvnvqQqZ7weZfLCBS4XoSGdg3teeGa/ZQOHDknrLurqaa2ahFGxc
|
||||
G4lOrf0OBZWMaI9Kj3HnrcThmEOwIozL4SDmUvvQxyK5s3uZjphFAyxRhQx1fCKh
|
||||
nyA+D8oVtnTZ9uxtUWstIKK5RlOCxWJH3obvEGmGi+6E+zgDsK+ivqM8gFjj3XmM
|
||||
pO6dh3/yZ6B8b8kanj4cYlCHhpeJ7v16G+FvGh/aMBlCopXAvoTprxQgXa12MgYz
|
||||
YGRyuviOV+PWo+RTTPRyYmJ9RLADKSdHwA8VUvHp+nxZucES1M9PxVq92hhWiQIc
|
||||
BBABAgAGBQJQezFyAAoJEFOcQ2uC5Av326UQALBzrx914us/lT+hEnfz5aRDE7Tw
|
||||
Ohrt2ymPVzLvreRcaXOnbvG9eVz3FYwSQtl4UbprP6wjdi9bourU9ljNBEuyOAwo
|
||||
M0MwMwHnFHeDrmVFbgop3SkKzn8JHGzaEM+Tq6WKHYTXY3/KrCBdOy1sQPNeZoF7
|
||||
/rq4Z20CcrQaKdd0T7nAEy7TLQIXEnKCQKa2j+E55i584dIshxVWvNuwsfeZ649f
|
||||
2FTGM3hEg527BZ4eLQhZQLHkjIY+0w0EB9f4AhViZfutakQf5uqV9oRlgmHmQsN5
|
||||
vMKryC1G15HO9HPSMJf9mvtJm7U+ySNE354wt2Q2CwX1NdDLa8UUzlpGgR6cd4Pm
|
||||
AyVrykEWdtk/4ADic+tu4pTJVx92ssgiBAQoi/GMp61KPcxXU9O4flg0HDYjerGu
|
||||
Cau/5iUKWaLL9VBe3YdznoQBCzwquTs3TT1toXHjiujGFo5arl5elPv4eNfU/S0Y
|
||||
f3aguYbwj2vVrDbp3JxYjJouxklxQ2J4jOXD1cehjZ+xFRfdnyUDV2o9FzvWCc3N
|
||||
04var7Wx8+0mtok0N0xTkJunN8rkxvVUuh32zJlFlvZX4u61ZY4wI3hPz072AFBd
|
||||
qv+B645Hrk04Hbu93iZ5ZgcICNZppyd6xZeBvqaEZXS+Zv92HCbxIBS9P7zB3sXm
|
||||
QT57jusVSUdQtfJwiQIcBBABAgAGBQJQezFyAAoJEFOcQ2uC5Av326UQALBzrx91
|
||||
4us/lT+hEnfz5aRDE7TwOhrt2ymPVzLvreRcaXOnbvG9eVz3FYwSQtl4UbprP6wj
|
||||
di9bourU9ljNBEuyOAwoM0MwMwHnFHeDrmVFbgop3SkKzn8JHGzaEM+Tq6WKHYTX
|
||||
Y3/KrCBdOy1sQPNeZoF7/rq4Z20CcrQaKdd0T7nAEy7TLQIXEnKCQKa2j+E55i58
|
||||
4dIshxVWvNuwsfeZ649f2FTGM3hEg527BZ4eLQhZQLHkjIY+0w0EB9f4AhViZfut
|
||||
akQf5uqV9oRlgmHmQsN5vMKryC1G15HO9HPSMJf9mvtJm7U+ySNE354wt2Q2CwX1
|
||||
NdDLa8UUzlpGgR6cd4PmAyVrykEWdtk/4ADic+tu4pTJVx92ssgiBAQoi/GMp61K
|
||||
PcxXU9O4flg0HDYjerGuCau/5iUKWaLL9VBe3f//////////////////////////
|
||||
////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////iQIcBBABAgAGBQJRcGlBAAoJELlv
|
||||
IwCtEcvuoWwP/ReLzhFKWlc/F35MvNyO1usz+qvs+SrlAtwaNcv3Dd9ih0mw+bH+
|
||||
U+PVVgXlk1g0NY9hNNRLxt2mUc+mg9ttN+ha0RkqUYsYjg1Wj9bDuR0a+3DhtuS9
|
||||
hhEjWrBBT3UbTcWT5lxKkUgy4Sj+Dh0N78spHo2orUN3qRw3VkHY4hWcxAvlXreu
|
||||
Ev6J7Ik4uZ+8MMgJFld4oVhMmnWOrMwt10D58URvZsGypI+dK0p2JSue5yfBWkSM
|
||||
pFsJ8z2cCOBMAPQq9S63mhXZiORrxJS4pzJ87wcYG/H3R1pqF6I/49tWBlyZwiwO
|
||||
Ys0fFEJc9idF/hSzen/qDDQpvy4gNF48if7SGEtOBu1vEGqWKvNsataNcjYgj4BZ
|
||||
hDlMHgAxWn0G7VNRVsx1D6nzOzEAlFa/PQgQfCXScJXRV72uKoMk2uuOk8yb2+to
|
||||
OW5LoS/0UbsnUi77VvknpZPbQPQ5svsGBCU1BQpDeFsQk4IMW5Flv1VVSEtxnfLi
|
||||
89An4HPMN92+qNUDRM3E/eLkFnrPdiB3yMkjAgDbao5Gh+CTszQ118xkhmRC+pNC
|
||||
I75AS/X4V1WrcAJUniTbFgBRZr4t2tWfLMgx44XMtVrKraROj7QH4rEODSInBBEW
|
||||
T2hiJeWm4QS1g5Rfoym4ur02xxqhwXAsCXFGFKZirXDoTMHDds6dI0QXiQIcBBAB
|
||||
AgAGBQJR+DzBAAoJECIs6MQ2RAKIjU4QAIi24KlFH1hL0d45GsQswFJ3YiokF62j
|
||||
pXRU2x7/+D+cJUqA4omjaGkSn0Go+J2MG8/bQST/Eioev8/PtHpPVRWyOq1ACUue
|
||||
DFpvzXAmxEBA25OkdDRWiy2y2CUSwu2n/OJBg6+C3TIRyoqzs2YiXIDr9TDi7NcX
|
||||
UP2Gd+xDWyEh5zd3xilAZl/SNkW73gen2GnG0WRMjzvJ9SSqYVFGw2L0oaSyX+HI
|
||||
3ulAybWuYaHtwREcgcKJpRK7VMeICERRzmGQxaUzbBtsWf1lhVUaCjINbKEOOfuq
|
||||
EqcRGsXl3AJw/qYUaj3CE7hTiUpQ0kcDw7G0NvuYOFqBjTAZVOpr45vbOqCqKp4u
|
||||
pNh2KLsGcGqzBy+RubsEsbOmIuDImyjFLpGHOZv54mJNLQ+SDbbLcj96EPZ5+gg7
|
||||
ip7+e6gGqGhJEOQLWeXejTk2rAX5zgkHutmjqY7qZIe63iXnlq88B66tZct2dYwv
|
||||
3M9t2X2Mkx3UR1UxQZ8wJjmSYSS49HDfIZh5NIz58QH8AePltBk32yMxSFq0lndG
|
||||
KEyhE2omMyNYzSt0EcXcsaaiqrphQ9iPJ8fCY29MOkKRQz9S3P/NOZFQrqL3zavf
|
||||
JX2+npx0umP97xPIowMPn0QZEWkTf2rvcG3s8s5jfUNOsi+ZcPazhjwqV6tX2Ovb
|
||||
fK49CG3vTdcAiQIcBBABAgAGBQJTChVdAAoJEA7aqUXopP+Xnn8QAITUo4Kkapoc
|
||||
g1wurgpYjetGyz9pI6PwtV27Q9xWWjLWRnZhlsHhSo1JhvNY9QBIKb2QQU+WGoBR
|
||||
tWPIxm0AtbhmGBlKscRRMYfKB0U2pFE6HGDh7tWPzSzPWHKb3oobyB2bmgtBNsWG
|
||||
BxgcoQESC18uZnYJ2ffk6N5BhU7JnN5PD6TeKFokengr+BVkxwB0sVP5Zahc7lXN
|
||||
nj7mDTeths0ZyxDTzog9AnImKlJR7Qu/uhhhz2mYnobS9tgzvyqRtibmxd7RLwlG
|
||||
Owf6/jUA3wmYgvN1B17reB1GwylK9eRIem1OPG0t2UV/i6ik9BFMrwruoeTDd1PO
|
||||
nA7+SDLXC2tUbGwBK4PEKbD/IMe43dKQypaAYXQgWqwl3Lf3t9eCAfW0X/PRUdcS
|
||||
xA8WWpY6pFqT6Eg1GicSnwarhvSWcs9I3FNo7foBcu3S6+wO6jK6/izOOymznuzG
|
||||
putQnVCZuVIL3FF+QC95popTUjnTBRF6O1p2o5OfEOAJ2f76c3a/tZuPB5Z0Wfiv
|
||||
uQXzDDsEC7O7SRkYMvCI8Vs/H6fijsOnKYtSvHRbm52R0zFWHPFCK36/tFaoUVrc
|
||||
PNbGyFYT2klL177G+e6mJubz8nzPMLlWzP5utUYY8gM4UIaGr/mPudKmT2jFp/25
|
||||
oFKpxaiBrROsXVv9+NQ01QYtxT5BueyoiQIcBBABAgAGBQJTgEpqAAoJEPEtmFdD
|
||||
7iYgp/AP/jMbTr4b6Vqyrbaq/SBAfN2SpcmqOWmQn5tiBRvG/FgV68v55dugKMia
|
||||
B1Opw3zGgF3l8xzdjFduMFH1iZBgv7kaooguZ7ttV84g6xGVE/9XjOughX4KchIR
|
||||
rpS8Qqba7Sr46vYrNmGhP6YMh9CmOG5ydg0H6MwXmiC9osKY/G71OyP8/K07ziE+
|
||||
iImS+oOsCrnnwkxBEDLkwo3engbJMoCNLK+qpSuatwydtI6Gy7LaZOIBjEuf8I3m
|
||||
D4wkKU1SE6f6w83pEMzDq6xC2Y6hLCz1kooamJA08WJ01u1npIrje873yY6Qw0Wf
|
||||
E5Jsp0WR4gdhbScL4S4m2Q9ZLW/2jWFbwz6tNHAfo3//nWZn2II6GDJfgfX7dU9W
|
||||
6pid+p52bAAughh6hDjbH/eVF9BaWommIVtjjHAkpHWJ8V6vOKpojG+lyrrzi3Ye
|
||||
Tw44s14BsfxRx/8TcSKzM+1jXNpeT27pIHohjvlRVdJtrw5MYUOqTGpVtH6GhoJS
|
||||
m62sM196CbB7RkpEH0TojOenzQhsV+e+W4FVrb2QEQQqWB3TkmGLpiRt98FPltZO
|
||||
7fHJmMSaqe+WOxTxrciP0FoyZJxYQM+NujQUTlMxmAdSw5AHAwhVIHmtreTEZKHp
|
||||
lx984hSZiiKzdsTUr7/AcndDcdY9KN2/p3Zf11y6nPlYsv4ToDXTiQIcBBABAgAG
|
||||
BQJTwUz5AAoJEHhUBU7O/hnkcfcP/3+Vv2CLClRCfpgPTLjoG31P425RxTLmx0Hg
|
||||
H+ULaQI8D1Ymrx6j+UUW5mkFNtx1HkF8ebezH+wi4PrROJ5UNdJ0pW4cgMZCHlPU
|
||||
2uh1d/THKQnVOaBlqsDIyOAZem1sUtJfkYsOZTnUbbyS4CSYkf9HTfPfQ3TWePS0
|
||||
gEhj2zV4r4APMPKrfAfc761CWu33IY0SqYLwDWPQezd50pGJDJYnBWJixArgJQK7
|
||||
PCkD6hRslnXPW/Vj8VqBptiNO5yZGAKn8UPUg5LeMeXTU21Qh9KQbQHAGOWxbpUF
|
||||
Vnon3pVPms7JbrBA5I4U9Q3emBoiFgV+ARLQFcBehTBDOB3NHN+1nKaA0ZG7Q7Hk
|
||||
pTH1ophpbC9q67NvQvDgihlMXHwZRF+EwKBafo1IsgXwx8k/0Ju+6N+i8PlbnB98
|
||||
yTpxgo2rnj+9V5HkfqGGV33s9Aio959mNM6gOrTwVZvL/DzfyyOXvdUl9nIWM6su
|
||||
azxO6wWRItj9vUqDgq/byJ6X5R9rXyCioKhwe6ztU283hgpNAY7KkvTDyPpK+W8r
|
||||
u2sfY0IENUtT6w3pHGVVkoqGox5dkcO9fv+Ok0eGOLJnperFcvGdd4V/mGLKH+TS
|
||||
Ksn3fJPs+9MH8XMKcp6NDbki3aff5vPDfj2+nhwLXG6WT9l9IPmAxeaw5QxmkNDW
|
||||
v/2zU4RFiQIcBBABAgAGBQJT6qc3AAoJEDov2JR5p8TBfZEP/Rgw5Imwcis+iIVV
|
||||
IW7r3DyW+A9+9JAI8muShU+Z1zliImIwanwNWn5RbcmY4sohQ9SDmnb8L7wMuCNY
|
||||
tXR/neys4J2qn2pcHH/TIo629E0aVMRjLBU8Dok543ONx2BSAGuRPyXDciPzn54k
|
||||
NVXpAW5NI+Z5SniEQRJ40o9YpPQHcjGXnNtUc7pB9r02JsIqk6WX1iSzAl3Ke14U
|
||||
ySlWb4urmamvomlufVYOtGdxeniN1lgbrJY/BCb2b/ZRr0gupv3EEEp/uU2TLBVu
|
||||
S7yGbNlf+9gZk8ZVl117HVALCnpQ11QxodQaGH8HuR/QEex4Y802DyzEsT1Fnzm8
|
||||
eZcqZB9QO8pjcrOTU3yCiB/7vwpeHymLkogrMirQaxQK6OUnEhYNOuj9Det7cJPw
|
||||
zeinerHaCNlumxKbB6gm0w1R8tjqHHkzcjp9rEoH1UDf91ugHCxevhR11Cz4ZSwF
|
||||
bkx6+sUr1HgK4fCktKlkFcP8adDSncz8N2btbLmWYfBSKZK1z6GStIquwTLyLFrm
|
||||
xXSGrsiLaETXF0S0VI4IuCggggDOn43oxqoISSaMJTPr//vhbL387wwXbK271wC7
|
||||
WVAQmslrZhImTzLbxjTvL2L1/NCSRBJXKalRV9HCzZgGaq+7LFwz0PQeciSK+ijq
|
||||
RrmQKIyxhDVjcXm4OU6LrIFzoAzniQIcBBABAgAGBQJUOeCYAAoJENFZiZ/T2fiy
|
||||
0AcP/i7qIAAFw6wqYgojDsqA7/YifGh9RGvrxmC4dWdrgLxW7dorUh0uw/JLn0JR
|
||||
rzKoS6EF3hHrPQasmCPyz9ckZeRZjIhR5mKYtqrWsF3vpaL2VALXsb54KqAR8l4/
|
||||
iT083JTm1mvEbMJ4JFVGNrGNVIWYdDgfQOKzD6lZtwRZTEjY5u+sJHS4VRvjAju8
|
||||
2vlmEx8hmrcDV2f+9St40pThNR9o1Rcna562NFldsccL7fFL9uM7kmFMGid5JwaR
|
||||
U4b/iXiSZ6YctNQyfitkOWoHG0aKXvJM39WsJulHKCekSi4z4nNd5hZgMRFG2L4f
|
||||
zgcm6wNEh081yhsVN4xHxURT1DrMg3Xtd3Zj4wBL5XFHySludRCd/PYPRpvfcCwe
|
||||
JJ/OTroepfr3DGw/Qo2VnZKe+Hu+4KpZnB5NrYIz5mMcysJMDCXiA9YdwRlF7EsP
|
||||
/ma8FdYpxyrR61+GRY+ANUP1KMqautJj9qW7HtIbqaZUFAuhmD6uetcCraWD7EF7
|
||||
meaFJuozenO9fBzBgcpJiJWKjNElJxpaPXiWPC+dQVvK0jpy12U1UNp38PBs18M9
|
||||
w2eOsC70tVhko53rCr1clL5Tdb133jNWo+jyWmKcYFKARziGQ3Q3GTE8ycRwebZe
|
||||
SgIHYLzm9zHGZQc9crpC0Mfoa09vcbBNyt5NRT6s/nOE4tjTiQIcBBABAgAGBQJU
|
||||
OiR+AAoJEJo0q5orsokP9bYQALApojYAycnlIEF8GVnt0fbzSYLwGBxWuMMmzdiH
|
||||
3HHvTxsUBQ2KvcBRrvSC1C8gOhpYdouI+RSXPXb6pkBHWJPFmGaPp0RKqgLMDi+w
|
||||
K7zZiPESMK8vaYJS9RmLS2KzJMn30QYQ0VZfrJiw+K5ejSgdoFz3pOpcJCNlBmNj
|
||||
MocA6M+u6O1PDb/OOqSPRqSlzZzu1S5HyDaOK32XXZj20G7ltwn5abtdk9KO8KWR
|
||||
2b3ZT6GMzxx3L83lBL6hcg7a6NrYQKsXdUP/HEvt6pnVBBKTk6LzjRNAPp79a2w4
|
||||
muT0rMAfHdGzRhe77828KTlTllQXFBEKH6m23daQAHEw1ydB/9H+rG+S0ulP1V2I
|
||||
gza9agB9XASIgRKLjwkDdMzOehf0oKt0U6P6kpytS3M025t9yVA2qUuG6A5DWwwf
|
||||
uRrY+dxUdbF5ZoQYEJuXDLk7vVuh2ggJcnfGZ3fIHjtCwvdlMRQzDX2adpzoNy7u
|
||||
xZHz0QBbaSOeFhGs2xE1/lLfpRaWw+ISXdp7b9HjB6dI6bSfYUP940Mi72d62HUH
|
||||
wbWNkaWe4afeCAWbTuHWCe4jnPvTBF0t6mU4k+lwWo0uYYjJM2W4V0OGSflIdB8s
|
||||
gOxgmFzlB72TIwwVBeLrCUEvymHqbTUedY4jUSWrL15sfDjxAhGJxZiHe4ydqxz6
|
||||
p29jiQIcBBABAgAGBQJUZxhJAAoJEM2XTJo5TWM/ICcQAI5555kQLg4N5+fmq1rN
|
||||
6AwjrI4lW13IjX3H462PMvVWCHgJV86o+5/ab747czA0xbszW8vr/K0iayA6hfwR
|
||||
VtuEXfQl0Uh/Taj4+fhI8cfW2+5EX4+lpGrOclVCsHHVfVZ/k8LL7mbjboJhG9xj
|
||||
SFf6JBtqr+/AFsEIA+MYGFBaiLgL68j3CJuDPvjkwpC/Ofov5FRdPEOWQ9odC0z2
|
||||
yvDjAqxOkmDjjZ04FJ5PBnNbo67AOUk25shqHuBzVH94MAP7Hrg7UaFeRQiMgNEl
|
||||
f8qsTzukyzCFPxJ4yyFpb56dDil8wxsvGcJlOEfT8sAi3YT2J2QoT7KAcES+aYgP
|
||||
1Q2Uj3gv0MkaLNcDtPsQGksbxXoipq/Wygj2UwOQh9ZVtycjuv0D2LyfPMVTPVG6
|
||||
6DYUpaverd5QB2nT2LZLxXhOH0tIqmFUaoIFrvrLn25A0Z5QFy/HbPfWAw7PJvgj
|
||||
l6AoxT3nKvozt0tUdZ2DfE3h4zjBXDiv3Y014FmhgqwEOgnCn89SR/7SHMJsMKp8
|
||||
oyn1mfzsncs+gqcpOhvj2XfPEpnObDLqg98J6eyFGDfhEv1bNEOB4IcFF8YrUNEu
|
||||
6/rS+l7rNH7vlw7hVWE0D1EnpZ8KYk8qPlOuHDHLMgemECUbR7Ogt63H3jqFfDAh
|
||||
lKBUY3hG5S7lpWiLzcQccYZciQIcBBABAgAGBQJUa/DbAAoJEFyzYeVS+w0Q16QP
|
||||
/10IdfE8aurLIfVMURxzr0CWHBwuAGV6mCKAriYRaEEjMWFThYsRtCS/CGtdc9Bx
|
||||
XU5GwuHFcHFuBCP425I9kxmxh/Rc+w8A/ZZAVU5A4gaSB0hkM5oZdB2QwYmXrECE
|
||||
Sdt0iHxcz9/zyB1R4q2KryzbbkJNJJzbOrGpxG6vh6Dk4B9rFJeRYc7lVfH3TqiO
|
||||
HCljlHBdEw9iQDGl6IFuQxUqOJNJK75p+4/f0eK64W1jXI2bGekTAQ3V1mA9xv6P
|
||||
+SR+NjPg4WQlx6sTyksaxbkzOcchyx8zzm1DNH9wm4NsoZKME4n0sCIB7CdY7oBS
|
||||
FxJfyRp1JSPrUwdNIX8kSsdgJpM7ORgZkojfWWCqt6unlgRsZmurFYigzZFWBAGR
|
||||
eHIeHJ54eULpg2QPKnwwWuwYHdEPp/bbuaLcPQcklPOGnnQynBpUvu3Ud/Fr7+4T
|
||||
MHmOI/e5EUUyKbmK0pJLP36Lp3i28bHUTALF2mrDlx3+oMRjF5iSySC41KikBSBi
|
||||
pRx0WO3jFzdS6NLVdjNlxG9lpiHCkc7bHz9edMvuAnahK/EbS6hFUEkWQOJtJKc8
|
||||
B8hXJmChM2YxtEDVv0GngAAwcHZAvphFeuy9vYf2S5IbIqKMNrKgq4VQ+jTqHHXI
|
||||
57LkGHDCY2igDHQGo/StbI4s8Ow5btQMdXPnAO4rZ61FiQIcBBABAgAGBQJUcelG
|
||||
AAoJEJjdu04iyiyDqF0QAJUdUtSUzHV3Vo36pbamTnCtyOqEp2X5L5wCjh+UAw9K
|
||||
GeZu7Jiiz7ueQqxKQtz0miLnb2i3NeK9EWdoaKrM1+PIym7H40ATaurleKD9sq49
|
||||
b859tz5iy6DLh1YPeeeuQV/NbjJyh06SzNkMjke6S34CcpDa1OoczVsI1RufWVMu
|
||||
q1C94+PZD0yCCVLjMUD53c0AldgsFXdd2oEU/JPd7P9wCYSKV/+9F+wRa7/U77HR
|
||||
KNHd2FCshmJ1mbhk3BFHTALFn9ld1/mqtjUTArt14wxs5GxsPkr1YsWQ1A8uVUtn
|
||||
W3rsn0UnP9bFcAfn3/d382HhuyW+HOV4g5JhKVlG/hBbvdL+HV17Y+YksGeQW1sK
|
||||
Iqmjvr7ArFhCIUYo4+emyDEjQmTfuv8RRO1u4yR0iAZqlkk8/8z53ewE3HEfepwx
|
||||
uo6el/uuRuXfQOmWfdNENjd9xn5gzIDbwqwvtZExjN2PolbiaSLP/3pI8prtrOYu
|
||||
W+Mk5o6iucceazwdPyevOhoMuW5gZFffKo9w6TU5SRGcYIhrTJY8C7h2Yumsmir/
|
||||
XXpLaadcBp2R4uDEoHb6eGlXqvSYMED/mu1fw2VOuKosddCpf/JkwXHwwB39z+dX
|
||||
o3HYSofyec1mb3kcAsOUbTkAh86IWN1ymqcNTyytK8AEwOLHQ1f4o0ml7n9Xzf1H
|
||||
iQIcBBABAgAGBQJUsRPJAAoJEBe/lIwEdhN9Z5MP/3Oo8Oc767lRFi1Oj5FVoHvR
|
||||
xfZvX3oKrG3jphPlCBgKWK8xR7c5YECNIwnlQ8uCqUgxpFf8/iPV3xVuO1HFwDna
|
||||
fokTqyNtKz2XgpmyfteV/02e32hsDNGfaDCkqbUC2hkuDfWWZa/g0tWfSCryZaI6
|
||||
OkoD8UHSiYeDwVzLQXgGsR08iFP9xiHyQHNtCpy0HHeOutrjiWibADwEMZ6n9/1D
|
||||
SqTQkxnxBwIHpGqK1M06QQT6ty2Bbm16gru0N6ulMr3Dc516PdOzQzqo0T7c2BzS
|
||||
4wOydYE7UGEeRzuzA7Q57dVK+P0DLtqhiblJuyxBgMLxKICgEeR6ScjWQpHW19bC
|
||||
wfmbHIqHeeNCZCirF17KEtPqFCv5k5uzsqPvRv9yVwjo1/LF+k1iFgRez41AvGlN
|
||||
B+VrzziRK0YvdfS5wtQ1I/a9m2g+oyWPj6c3p57CrqxaSiGa+FOHOxUx+rQk2AdB
|
||||
8l4xtG3HNuiwjEy75CbKsHwIBRd/9kRrGcilb16/osU/c/jr4QopKU9HKhb0DIcl
|
||||
pY8B/ZMdYV3uG+oy0aLlld10GJ4SHW0x1uB/rZU5zireTudOb+12qMfF6AyVV/ts
|
||||
Aq4pELEVFD4INWxgh4EuzDAkJCvt6r7XfmojXTFR3vv9fHCc8vAVwRdbxK1NKn4B
|
||||
mMUVlSwZwLyy1roeLveCiQIcBBABAgAGBQJW5/QxAAoJEPvqMRCoU3iU3SkP+wRd
|
||||
T8z3EczONAcvJsu7ZHgh1ggzsmozTciSuaAZRfvFmUyB9h63cKNTS86CIrqHmMZr
|
||||
tHRu9llkNNiE4Nj8JAAsMPSR4YaKHfHxc3bOH0iWtcPxtIiQEwYs/7oP0/YzFAxc
|
||||
UmZBDeLvy7aKpFqdPUcEhMTWmscVajjJXv+6G8IZwYGFAFvSkYSimZP102gmgKQh
|
||||
cfPDqmlqy78Ft+T5MfIha1Q950iZyAM3j46lVWMkBaKPQKq1G3kKaL7Sy3o75y4N
|
||||
7lgzY5WfYnBYVAU8eUjv408FoFKAYFTsA3RG7P2VROoNefPaLRSgEgZPR6efVux9
|
||||
Z3R4zOUQuljvq8r00zMS0t5RVcDp1gCNZQ9xv2QeN/ZDld0U0IbDQRrlT15+l3St
|
||||
hkXapMMvbSVKEILMgaL+ysl7raMW/Zqv1KN2ByVJsPjWnwWCPnn0fMFWr15ExzfZ
|
||||
BUNh2rZlQ56jBsJanHF69Th0vI7JNm7/Gd5FRWL8RcXzAL/UbVDuyGaO2JPztQ2d
|
||||
L1lnHVL5mgOMjs90YpADenNR5XkQxuazTRiQIOXfoZhgPwe99S9vEdYM6UPYZjt8
|
||||
uo1bmFEkV0CGjWngJc2ySSurftXPFJ7gzFhDbx70Ga/1lw/4H2RPs9ZiZKKTtiGc
|
||||
DLhDxSuX5z3MgzzD3CNp7uKJQlTIg4aFeX9JWQvUiQIcBBABCgAGBQJTgEwEAAoJ
|
||||
EBYg3FrGoH2curQQAKKAZDUbPFSAyRMFlr3TFAYjzPgHz4+tdSgwFGaXjHb1b0Z9
|
||||
MJKBkqjoiTOo6ysTOzFeOVuql5tFv5lUR1ocHJHtIX7kARvLrlaAMAVPsG+f9Ft7
|
||||
jNg2B0E3uokZHUOCXdvX8O5KNMFjiT8arYbiw1hugAJrQ1KMKIv3EsT5Zf6AnwXI
|
||||
UN8eI4hUjZrJqmx1jjhKLam3SLuF8YMpAIAFwFb/OutQoRUU7CQzVb6/1B5FCIYd
|
||||
SWEHv5tT6dguFyUC2pjxIf7Oxz4qntPk4HDJtr4sOBj46cNUsW7Xrr23wpvabCQW
|
||||
YcGQc4gK12bB9uyleIo52UoDqjqLddbhDDv26GuyJVu1mlJR6oW6EYtRLZLb8cp+
|
||||
9p+9vWtLbp4AeyX3NGtY5iyZkGZCj7aks1DvxpNdcdU2u3Qp4IBZyneVvVYaj+UM
|
||||
y/jrVX6uKYvKUEW6xHsR6g9DIGUFK8dexYdkRHQ52ueT7W9cA6V8jzME8CE8YCtT
|
||||
Jxw/IQM1mHbcHkrx1iNXz33Of2qBouqMf2vDXyAvd/ilzca+dwOyoSGum2tnpD2M
|
||||
nCROrfo4eCeAOb4bZ46hEayzr6RNtmtUgnrTmV0iIxDkxSzGXfjWWt11H2W9H5bg
|
||||
aPG8dEqKcxFLYPOnCLJfvmYn4hhy72MKqdI/4/DlHHa13gBuL+2cm1pTLgltiQIc
|
||||
BBABCgAGBQJTgLe0AAoJELdhiDtEKL3AEToP/3kV24dJyYCcqzWg2NWLHUACkeXC
|
||||
GOLmKSoVVV3oFzu1OnZ9KSdhpwU0M+b199GAUM4Q4o6cIeTnqLd/plfWdNDmEtqw
|
||||
8T7hyGJWAHkf0n4c1nNgE3QFW4ri8zPeWPaJ3+nDms7GpIbYcLjLLNCzSActo68p
|
||||
vaKrn6EQ5UOub97g500VjWlcS7qfXWlgMcKvLVLUNHBgVSxTyghQDkQwhRl1IZB+
|
||||
LSM1p1qHgWYZdeMu7DXzK2m5htscHjcv+BlVxRXCPFf6zh7ZIKnaZoWKiWAjp2zX
|
||||
y9VntYJ7DpbOmYukH7PWys9b26agMUa+iHylBPlyijC2dvEEu5+myqPBZk60T+On
|
||||
trTp4PPXpX50TgylbabM0glxoHJBvPtgyOW5QM4UMdn1WAX3ohW+9y55WyMWWPXW
|
||||
nrQl9sZ79QyKLmoPJE8u7pcOXBpBJ5NvLghR/wRb04DPXjLrRvqE5V+mPpIYFFrG
|
||||
XD9wXhjWsgMIVC6oxGH55LIS2ZgLto1MJ7HfMEPWG6zkx/NIGss1Xxbd7ZOMvUFi
|
||||
eY2l7zWWVDs1aAA3ydc6/tA2ekjvbRWjOkIbA2ctmdGqo6CfqiqZsDhoqDs+xY6t
|
||||
J0IkOek5TRAMGbN3GpO92n3IO5BLpZ8mzoi49uoDNiVlZlDLViWclETtmr9Cfvav
|
||||
YXui4CtPbIsik4utiQIcBBABCgAGBQJTgSAwAAoJEF1w0uvK0snmuUcP/1kWyfoA
|
||||
qIt1DFY+Od+vL5HY1IMKG62t9c3TTff7le+QtOG7fvu0IHFZHpsiiYumOvhSDBBo
|
||||
0Bfy3aDHF13ul+hcTfDzuGdvbDNoma+GO6ccW0ZrFjD3eSVrUnO9nT12sTqrWl5+
|
||||
/GywcuH8htfA6pL60GgktympcMbi/lvTtFNW1Dcfo423f9bYdEkN71+P1UfT594b
|
||||
bGUQclIugeCLHsGK/GIN9tAoBOpa6b98U28cHxs6eoWaTRu1fhAW9MCP4Juj7d4O
|
||||
vfPA7o9XIRrQzcKFicpmRi0VRe7zB4btbIMie8jhMrUm1mez13PSVB8LB3/bivxt
|
||||
DgqYBy+B9V2dNQjYE7+aT0g8JVmoXr8WdyfP18wD9orWUowpBBj4R+RgpR/S8QfM
|
||||
lZJMfHIkAhSAYIwaAcJ4dboaNGAEtKsS7aeH/6LUUGIUuUeTJmFSn0o7v0hD0PUG
|
||||
d6Z33/v5JR4f5esaZwZd38SSjj7lObtzdgkQL5sCd73gTLhZm511DjNasnlJpROq
|
||||
KeB9LQCUON463vX6QWLXHtD72gaG4G8SRIUUHjt7vd9UoVUwqoV2N5ZRhoDmg+La
|
||||
UpnHz1zdbmVZrE6WHBDqxB1J3C09HQbV822EJAW/CRDrN9Y0fhucWN3TFQ6ZG+UX
|
||||
USWcJg7zxyUYB0tOMNULNvC+XrkiahmtxspWiQIcBBABCgAGBQJTpLA5AAoJEHQ3
|
||||
f59qR5Gf128P/iBTk6pvJaqe+17zV3z1G3WVyUtQOdMkVptBuMtHIykZZuBUQmTY
|
||||
XptQH+t+4da6pMFrxcsqu7JZAvelkz49y0zKt0cYpKivG/87qCAER/x3E24FoMkV
|
||||
WlrsN5J3STT30SXSZyL+lVEKU5zuqgtK2wjstn2xT4TuhQOZ3CDSWxjWBjbqcl4P
|
||||
JOnhzSlRJL28kq0Zx8SukxVwTpJarIKSL2dPivy7TZrlSdPO7sdIKnaOPHnekVaF
|
||||
35/SfTCm/sfnaZcCobQZd4sJij+xgc/HDJJcfsROhRUK9BvlBzcJDCohlz3FnOyK
|
||||
Xjafmk7nVfcwqRMlhX2rsO0abQQKxxnVzoUGSBf6SpRq3q1g2SRy7ABe2YnnCl+c
|
||||
q9acwmH7S0tzGGNLwdjEAHUA/1HdVq984kqx2eUiSCJ1vxIuHR36cNQYdyplnxr0
|
||||
+bn9Yb/wghF6E++z8xkX6WxKT/oWV/GTqL+jcH2efOOksR8MfjmTkncRsESbi1X/
|
||||
xAt6Fn9hv+qJUas7MSkKCkiOhAz7ZRxZMu2Qd9Vh//i6hP7qs8aMNo+/pXlYwJYJ
|
||||
YGuPo0oU/NhWm7yTM+MDrdBZDZ0EvP+t11R/yMrHk+aFXqEqTaB+Uw/LaaHMWv+Y
|
||||
DB8mfRUE0jbFipuWoSt55ElemSa18nnRcgBTbFL+U8Nm2IGbDGeqToGniQIcBBAB
|
||||
CgAGBQJVfZS1AAoJEFuCGoE7lKfEYBsP+gOUOmmHg0c09v/iPkel7JJGcNnipk4z
|
||||
8xl5nTxXay4nTY6TKtelOhQUBqDHBqdOe8PNWVutXqSDQKyzRPvXJRYgF2i3IUHq
|
||||
/GtCK2yPaGV7XnYfEvddXmjAlYS9LkHcYH7zp7vLMW/8HgZ0JjeHAfmNF5+Q62rk
|
||||
DUMVBnSRVlA+1mc3/o1O5p/Kn1Tt47kCkLJUMNyBxXl9BnbqJtFWKzoqgMovr2QE
|
||||
IZeUQzlJKygexnU4tCP5q5VefVqaVnEHkluXJq9knYK/G3c2Pet/GEDe5Fkukzou
|
||||
QvcqGaujjvc/pmT7VISkeO4YXvmfctOpggJ9J/ohxg4RgvqaRYdGoFgnNQMEnFLI
|
||||
xd5+8Sb48mskS59rVwwOllWsbR+6T/ZDW8FYmpNzzuK7Af/JoOcWy7/j0fwOhJa4
|
||||
qX5aKgph5S/rE9pvhmhbkgZta5m8GQ9bHInQnbefud5axRtSyx4cG1ZB/mRLFD7+
|
||||
kkVfW/KrtdP/7PuuYtIP/nEhs9HnwOmcoRI1WpDGERC6eUc+Dgc5sFD16tvp+2PW
|
||||
8/EBAWQK55b9jZ4Uws0D/3Tn8BE0CP1lJCZzIzKqbO4+VhWNq0eJgwZWTUNoXQuF
|
||||
P1gOhJT+yqtxBRBP9YAOg+bO5kdjqS9IinbbYoaMkY8rUmqrF5r5XNob9mJzgF52
|
||||
2npjWOx4P+7KiQIcBBIBAgAGBQJUyWhmAAoJEIHFzE+IMpocFMoP/RJWptx2l2qa
|
||||
aJW1r5p1F1wSYHFgkUPWgS2mNwcgkFgGm0+QhPXiNAw7evt6aTMLMatewzq3i34W
|
||||
9rIaNj1UNs7VFYEVzYzWrAGlBiMgkmvHpmMmNIoH5sOc6D8pzxagOalvHjHXXabR
|
||||
Ch6r8C6FX2jpQmwYVT/lF10ARGoQMW59MGFhUcEPfGVTFWgSEj5hgKvLhvDYj3Lq
|
||||
LreSsiKuVU7yU+K5kMY7q7wT+8jGt5zdoV/99OjbJOo/a7gmIDHGeuJnSuNRRV3D
|
||||
ltaRyk0N2FQcoB96q53++BdNXwDNTVA3eKVcrjpTXJcxMlpcmDvaF/KlIpctEDIA
|
||||
50aTNlkLvRLMnPTlFMeoNyURSc38HO5c35chioH8zd+2Cs/QHGyI+JBlTZOOodUB
|
||||
4alKB6SKHwMrWpy4+JfSxF+DUEW0VQwj/wXEpi+B3HKGYI0QNuzpEGZ1qvaq0Vi7
|
||||
SqlcyKbZuvUGBz/RdKeAFiSjmOOQUbm2cebmFQzYNr8KWPt42knV+PQMet92aaNV
|
||||
WhgPp7Z/OcvpUABQZBPchJvBRr+Qso+uqQvLRvlXGD+rRni1/NZxgnVh1cHN7CiF
|
||||
IJOlE+bBozJ+xtDx5ZOAlH5qWJ/bm19zQDnufWxocqNv3ek8DuM2iyOmvpbi1REi
|
||||
4ASbhDjMQDFmRNYx+3bIi80KJEnC2kZViQIcBBMBAgAGBQJWOIXXAAoJEE8/UHhs
|
||||
QB3OlqIP/3lofZqqiV+uoiTdV91Tjmij9Rioz0kohpQsm/tau6JKXItjG7DaG3XP
|
||||
L6NPckNGI+twD393Hdb/VkqatbpxLeJUQLoCjV3M02p6zDJHQ5wPiXgC/8HZVdcP
|
||||
2jlvnrkg4N5dpLJJK4wpZ/KXMsw/SrBj047ZnySIl5qw9ytXrQm58R7FBB/ANjEN
|
||||
vo9C3LEsaDAKv0TL4vyMpz52TjUfgoz68g31Sl6KKOw1HG+dUB69M7MARSVEgaWU
|
||||
Om33eM12QQtCTndJQDg+LeYjfvfHbcnMZnniCZR7rHGxAhBzgKQqJU/JizfZ4FDc
|
||||
BkABhsUQgkSeg3llFVzSU1iofT37A5cbQr0xUShPQwKgkESryuyL059neVsAhDY/
|
||||
hFeyWCKtVQ12i3H7cvzRlfYxD8c/mN5TDiC70Cft1pcLU++u/6Ga1kuzA7rkfoUo
|
||||
crCSjqb9FwLBokWcwbi7SyA8YD5m7W8sPINx7reokK7mvDsbOxpBp/y/yT5ZpTjK
|
||||
3/MNgESrq2N+Qg9EFC4Srlg8wzovn0zamzb2xDJpLfrV/t2DsFrVf2SWFd/YMjkl
|
||||
jOLQhbsEpQIdrfS8/hNGgfoUIiko8lqNi50sGQ7kO9kirmjCZaAuOaOi8U0K1C9R
|
||||
vVGTN3oGrxzRRXeqt2Z3bBqs5Lz5lrCNkerWZYXcItIyZ415i/FsiF4EEBYIAAYF
|
||||
AlpeZjsACgkQG7icBgI2dEl5UQD+LepkokCazIBkNFnZraHcCESgXDW5f8f+dpOx
|
||||
ZVo5Z0sA/1FkP70D6Mw5HbRuebIZJ6Ma56I7+Hjg2pVSs+vJ050HiQEcBBABAgAG
|
||||
BQJPdxJcAAoJEMP2qyU7W7Bccy8IAJSvbu6RkwVtTznNXGtGFXqVsCP/yJMAgU2l
|
||||
hLMAl6yvUMk9IrRyKZloxxFeBObqQ3urdLQqXeDIJmhrIoxix5Mv2VuSUJ7vj9Gx
|
||||
Ts+w6vldvPHc4BzJWR8YALTngfyUURMuJXV6BxseXvdq2WhOedSptLgGKFgZAQxG
|
||||
/LcUzlLES32H1IsEHnhUhbmi8yrrR7sTi2KD5XBUJf6cDeEbwBQQd0MDrr0oPOe7
|
||||
wLJSNtIbYj0hXAQug5AezJrh0dWvBqJIZxm9HGTsMc+dnpgWamVvcBMXdXxtKau+
|
||||
XxBfr35zVFDylNuULr8hj4ZmtOKKILCT7BCNQ5HpkVTXHru2kwaJARwEEAEIAAYF
|
||||
Alf7Qx8ACgkQo/9aebCRiCSTowf+Jm7U7n83AR4MriM1ehGg+QfX9kB3jsG1OXgK
|
||||
RpGPIORqxLAniMFGQKP/pqeg2X530HctqjpV+ALG4Ass/kNn4exu5se2KuThQMKL
|
||||
K7h7kfqCnrC8ObeCM7X70ny80b2h+749xWZtahpTuQwVrhcAikgPfS2nXSKdubOy
|
||||
eBH3y0kT2zAoml0MOQsUb6yGycjdnbFrKvfINKfuZvF+z16YOu3eYZ3NO6dErWQ5
|
||||
iTecuNe0nnn30D8+nWA5JfCxNDPfc0e85dm6xK6GTPdaQd5hpF14TdYZu5eT34BX
|
||||
JcmL5hJ6MzM+OFn5CIn2Xa6r6h9AOp5C0o15Qb6SXpUdZrV/34kBHAQQAQgABgUC
|
||||
WCj2AQAKCRABFQplW72BAiXGCACSHG54fSeKZysDiX7yUnaUeDf2szdvegD+OPSV
|
||||
JQhcDdhyC/YnipEN4XFpeIkpxUrBXWYyy5B/ymzDQl95O8vI6TnDpUa+bvpkWEAl
|
||||
BK2DuElRojXfPo35ABu0IetQ9xyR+3IzaepHL7Ekf0n0H9vFTmeyYUc3B1m7RDwn
|
||||
UJuAlWRt1qQHmOejkzTDBZALeg+BJ5PtnWqCr29+JZB8cwUJ3Ca8YpbiCrXWYHu3
|
||||
jlXDDyEhQ73t5OlruOMiYp+opmRySu4rF2d9yJIXnq6uf0WNb6G6JzlVMOqHKvtm
|
||||
rnwXb9zlFTSXb/NkxNmbYPrTvKmSr09YDC/p9iRkuDSeI/OEiQEzBBABCAAdFiEE
|
||||
IFnjmbk0Pj2JY1NS8U5YASgDCxkFAlqf+YgACgkQ8U5YASgDCxkWRwf9FHB9FN2G
|
||||
FXNhPGRrgtSzffos5ccxXGFKuzmNoJzceQNpecWbsWuzCG5gNOKlROgTzRsIV98h
|
||||
Nq8JWhlViEHq+fOUwt7m7pnPRSmDGIW+yAo4wHGsqgO0Y69viw66Rx4rG+g2ADdj
|
||||
qFfo5KuS1rQOyeF5MMJKPj8SvPLxWcfjnpdDg7OOnzJtG5FPviSektDc6kMac77I
|
||||
nF2WstLBykhxpdhtoYQk8uYdKoxDQWMqNDEh2pJkAKELMnHl898uiNTgLqgOQoNA
|
||||
C6UWVITDvUHqoq+uI12ZW6x2mwVDFWIQnTUsnhEnPIlM/zUHg0BuTmUv5/9x6XvW
|
||||
fJJkHis8YEBXXYkBUwQTAQIAPQIbAwYLCQgHAwIEFQIIAwQWAgMBAh4BAheAFiEE
|
||||
o8Tw+XnKoizbqPUS7oy8noht3YkFAltn6jwFCRhLy9EACgkQ7oy8noht3YkhfAf+
|
||||
L/XXwlc/4k/sWL3A4Kxe2LejqrrfSGdzo6A9JQTkwuGzb5t2UbynACNpbYxFlbdl
|
||||
g2zOH2rBx72Yjg4EYSyzPEOmCMvwAO3ekBmreO8UyPV38b3c6mss9JxTenkKokFt
|
||||
BqsAnUhryykaGlQ8fZs87oXbOtpHZL48DG2TlSiQ2k4j3YjiXnsHlPZpDPfVHrU1
|
||||
wlcxciI3SEPQNUxcRwHXkGtAcXK2P4fmRcDSXcgISh43Dg9ikV3yPLlJuxa887/u
|
||||
Qe2ytHNOCgC9GhGyCOfQV09lr7mKpfJmz2YR0xZ+NGd6n5Tvs5GpKwoc30zo9eOQ
|
||||
f6TAnQAX6w0NWHhKQEJCFYkCGwQQAQIABgUCUVSNVAAKCRB+fTNcWi1ewX4xD/d0
|
||||
R2OHFLo42KJPsIc9Wz3AMO7mfpbCmSXcxoM+Cyd9/GT2qgAt9hgItv3iqg9dj+Ab
|
||||
jPNUKfpGG4Q4D/x/tb018C3F4U1PLC/PQ2lYX0csvuv3Gp5MuNpCuHS5bW4kLyOp
|
||||
RZh1JrqniL8K1Mp8cdBhMf6H+ZckQuXShGHwOhGyBMu3X7biXikSvdgQmbDQMtaD
|
||||
bxuYZ+JGXF0uacPVnlAUwW1F55IIhmUHIV7t+poYo/8M0HJ/lB9y5auamrJT4acs
|
||||
PWS+fYHAjfGfpSE7T7QWuiIKJ2EmpVa5hpGhzII9ahF0wtHTKkF7d7RYV1p1UUA5
|
||||
nu8QFTope8fyERJDZg88ICt+TpXJ7+PJ9THcXgNI+papKy2wKHPfly6B+071BA4n
|
||||
0UX0tV7zqWk9axoN+nyUL97/k572kLTbxahrBEYXphdNeqqXHa/udWpTYaKwSGYm
|
||||
IohTSIqBZh7Xa/rhLsx2UfgR5B0WW34E8cTzuiZz////////////////////////
|
||||
////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////iQIcBBABCAAGBQJX+0LWAAoJ
|
||||
EAJ4If97HP7GahAQAMxf3Nyab2t+xJlFR+/ZCvqMq5rM8iq67ZK5fLG000RjLiBN
|
||||
5bd6BglAq03l2DuE3b9hdnosKfU3FCeysivn0af0kxjMaH+W+9JSQJ9E5EjO+RgI
|
||||
JDkn3n6X/lQjVl3N7R6FeaWY6Ug9paSCtAlVlwCfg/rn2jFIiHQb++44nQFpaX4W
|
||||
uNzZWoy1SOGg32e624fjsgqB0aH2cmY3oGdMFt8FGuzOfa89JGW8P7mUeZsiQQRx
|
||||
R4y+L7omQ60rlveKZeEo/ZVfSZUVtzM9wplXpUMbF6/XtUC9dmsVrSZePrsAHnjj
|
||||
bbk0GBKit2UswC8fKdHVz9YiWKuM4QLEWiucYLkcWcHUFyp1Tk9ZeS3R3yPASC4e
|
||||
WV72IVGS0mjjolcFwatMfYghQ42+sR+G6duEcJSN7sqrdzYxRny7aYz7GFXv1GCE
|
||||
iz/CzhepHDROpu9KZv6xetyP4xmaunanzzrd7kM23530jFRK53GJ/4p6XlwYA3jN
|
||||
sxaGoAADOTIwqolgxtvdrNwEeX0pNpFI85BXSJrvBxKseL4o2NlxxvkyrLPIuuU6
|
||||
EfnOgMtu5v1jgLkA3ON3eERxl7DM1I2bqFT2+Fpvsme6KFm1o4DepsO4wL9ZKmqU
|
||||
MZs6AxfmUopia93EtsZs801vNNUBmSsh3pvIyXGc/v3v2LJY236rsf0DmticiQIc
|
||||
BBABCAAGBQJbHUVXAAoJEMIYUlgZ94RRdLEP/jpetLMM956YJJkBbzALzmXFux3K
|
||||
l3z9+YA/kXgZC6NDNRItBnsxUlOFkBaTSvvq+18RGIDr5St3+cLjZnnOGoR0YY+K
|
||||
cAEzlOM0GMr118O6Bd+p61CqpA3oV0BErV3jmUe283OBw6q1+2HlDMAv1W1xpRhG
|
||||
b0UpBS2OvXQnzAHS5hqkSwnB9os5Uitaud/k1kxvS/IklAidb3nsx6CQDvlx3Bvx
|
||||
Z8WUWDz4uqrAoOyr5xw2G9zVZgbaV8t7i5mHegoie1mQWjo1kVesZ16buKoeire0
|
||||
8eIcbFYNBcQReTw6TzunHOJoSg4fSXPhj3g8PjJIuw3uSPbR1cV8DyRwtLXEogul
|
||||
nC2L+JHh2g7PDenKmNtP2pZByQA7pM7CPDZAsS0IIeaCs2n2kF0I4m61Lntx17XX
|
||||
T7k5P5Jl9+L7GwGWz7vxxtvsckpJV6LT8YphBWgPA/TYI1vBsHDVJdfaBWKVE+Us
|
||||
pRJIAhDPIsJHNw4+Y+rbDzsaxXGc7QTspkWSRLfKAkBLcS/xJ7HPDWCu7NMV6p8p
|
||||
whbzFQ54GyUPbIkGhsMru6G6cRMOiB1pB5XNyMVWmSqHKSxaTfEEdoyS4giu1h2/
|
||||
WLZxLsJOKu1ns1BsVmxNEOwX6OuBCq8JMuaIq7EOk/+Xs9TJbXDTQ0GvfnNMvTZo
|
||||
rYYrySR3xCJ9ju7riQIcBBABCgAGBQJZtcGvAAoJEGKrbC2pNmtMIVgP/0eNCkI5
|
||||
HX643HQs3G9xGg8OmyO0Kk5wv0T1BIAwPjA2tzz3iNEmVMDac8/3qeKCfOyEhdJp
|
||||
qvZxRZ8BKoOkmnIvbwdxPBow8ixdWGLN3ZIeRJL/c9/oxElQ35qyVmCVEkvSKFvp
|
||||
QAG5mvxq4usMRBeol/f7VSsKR7kqU40GamW1q8ExoLkAmnQAHfHx8dZmMBBG4tgV
|
||||
vSGwP0gpKBydEI6xtJXGexL6JumvHmmAAnImGQOL+cfv8oaVp9vXRFwrUZsx5ObG
|
||||
XtV4xeGTr3nd+ZvCoocK6AHXcZiLF3XsnkoAUh7IkTsFPMjQ9w3lb/E8MPjfLrIb
|
||||
w0WJYyNk4VoMePFYfWjGMU6zVRKwdurV1ndiSC4rZlapqfro78+u8pDoijNpzFsv
|
||||
my4Y89w80N5l5qyMZ6PMOoZo+iH5hvxITXCtCJHs0QaNzvu8PZSG5Gb4hVn+NcjH
|
||||
UfqulNxTIsyfISyvbdgQxEmFxSXeHPoMOhvaZn0niWL9JRAAXyM1urOhPG3mo5sq
|
||||
GPpQu1/DbbkA2oo02Uw/Ngh7MP7ujRhwsnC0BQOEgshkeEzACJ3FwB/HbZ1bd0eM
|
||||
jhhcMPwT4lbFQFadcFEhBSd96g93xpeLIIVw9+O447MtA8GHHmng+TE7QWFXL/CU
|
||||
u+n8l7IQtlBSt1KMktSgWEqs6LSvsySDMIETiQIcBBABCgAGBQJZ6mC5AAoJEKhb
|
||||
Oua8Odf3rvIP/iiehjNNyKMkzELw7xLRXbQ7AXesG+BKkVXBFZ4ertW6B1ovIkfD
|
||||
mM63Xv3xTQDCWjf/AewDSEF06k3TpV8P1a/Weu5ESnigHah801dk3GoSNs0CWRSL
|
||||
mZEMwRnyCK968PlZUdIdEr80SCy0pijFtuI2h81GbLZl5ic09jSXu2up+IxMb5w/
|
||||
cF7EeHNbyFtdn6WNnYCCWPM442eTpm1241+DCw17MvuOyyUSH23bBc9VePe3VsBX
|
||||
S0aNAJhZVrAuY3UWFEdnVcwmN0QIO4qTqxApT1jaMjvaP5O7TQ0O1X6nReJ4217D
|
||||
lb/Vj3FzVZl2f/BLjlQae0kBD/2p8waX8R7KSIvzaWJxtUWroOOgzlZgkzj1coD0
|
||||
PK0yysgM0KzoHEJFZcFz2Khde5SbbTz3iWE0KQgLiBuT0MVxRWrJcWq1b4cFeCr6
|
||||
C10ppmiTWqMlkWFczhXWZu+83b1uMeV1iXZGC0ldJTdscO8O4o9IXdhjr8BiLm7q
|
||||
sGuGJCtWZID8+5GlY+A09rDmwh2Kr5R/aBzQ+JPmzbNYvVmqAvMbYnl1IDowxWv0
|
||||
w6kduvMfTbUB6UkM/zfsbl4PccxlPXO1yPsiFe+f/HIJMcM0aFGqjxY3SmVtKcDX
|
||||
qy7w7Q3uTiy0u9MCqXCdpJRlDoMauM65Vcc/i3fR/MZdqPWcHcL8zKjSiQIzBBAB
|
||||
CgAdFiEExB/CGya6nZqq0a63ajVKIh777qgFAlrMzKYACgkQajVKIh777qjRPA/+
|
||||
NV/GvceePkKjxHKsUsFP5r9acmMBWtgyDddv3me3rN2wTR1inUji/ezPxrXOBlKx
|
||||
UC+6CK1Au3wuQsENRy2vqYrtWS/yc31chzuA4YolpFjy8BlRluobZJOoT9TYeVnE
|
||||
cZYhBMKV0HpoEXSgb+uca+dnIaFSgMXi/qXYfM0g1IOLcR+wAW+ptBzY0KSpxkqn
|
||||
qcmrwJPiMbtwExDcY0cAjHdl35MMSFe12KZdST4ZGScaXpzvB95JPeiC6kqPXaa6
|
||||
1bgUJteG2n85CZ0O9eSZXt2QSyaQapl8PLkI2cm7C7m12q7OqE2vrOIADnS2KTZh
|
||||
I7Jh6pJZbvuYvxoc0u1aofmV0IeYcWmE5fT0Hjf8Aw+K3l7DEBAQs/EXyxZ6JUom
|
||||
TJEQRM8lS7iYwPtuF0Q6c6H5HsmpJ8+zInyeqf4iwdmtu1YWohT8sIjYNHzWSraQ
|
||||
SXevJ0B+SvERjsZU3RonFbodQBtEJNS/LZ9JM6ROR/XCXFwrXF/X72SN6twZjsMh
|
||||
uKEv+KwJNyhsOU5uM4TVf+1aFmUeMSBIFfFOjtCeyJ6bmeqpBhme6gFoxgS326pS
|
||||
JvLf8H76l4CeZACzxStXnoDb/RFucIH/8GLtH/dCzlbv9Atd813+o4Sr9WLD2O/O
|
||||
agNXDgMiQu3j++RXB1VfMXVnrGy4BwFdvkueR5d85Q+JBBwEEAEIAAYFAlgGbPAA
|
||||
CgkQemOAneUSdiLOth/+LNI/VXkol7A+9Z3qdIdyqMA3zYqAq1RoV1Szxk5uqvVw
|
||||
uW7NziOBXr7hgx3JI3m+UlaLovFLCwWfZj4E0eRGmGs4ji82V6+1nczLBXjoETFf
|
||||
WsNKPOi9VHvi4M5/CBenei8JrVwhlVO6IlQobO4ik09EnB9EzujqqoVpMMARQtgn
|
||||
3Mo3YxhsgTUCC/A7iO8bQC02wFTyrIbjmhpmICJDdr+kd+18qDgJPZh31m9rYVwF
|
||||
gaEQU8bQtKgf/5uKX1CohbqF6HJNIsNkbIFWl0A0EK8B/mPPaBYLV5bbSCwhINWo
|
||||
3NC2pZMhltTQP6ubI1a97nRj9u+stg/WD/VlICgxIUhx3iawGvjIV49fPM1b9xwx
|
||||
caxosg21OrVpjCcYFoMQUgsDzwsMZz1L+F/Ut2R/KD3ShXE+yFu+h9ZVIFx+tzd9
|
||||
Tt6f8ApHbw9McAL2jldouJgPqfZoK+yl3PzdDgJSvF4QsINBGZmicNwzEvBgaxj5
|
||||
PubBby6FBhMrsd5oHn5S7yAaA8wGlZklehyLhN4C7/sZmisIGatfVJJYPP0h0Nfb
|
||||
tfZ90o28aapZqwCA5R2vXg/oBre5pF9+D95KpdRXHZlITfeIgN4bT5uhfucw2CRy
|
||||
jWDUfLRkh+n4gpRiub5Wq8lqcrFP98v4tmyNlgufPe9QZNA1wSI2+/WlN4VNZXjf
|
||||
54O0AWdStM6EbZrakSBB/riY6mv4Mzch1aEVF0wNJSmSw1pWr1TEDGvUd1qDp4KB
|
||||
qaX53S0eONpykGHnpY5qfm51QowLlqmNQP4EhtmDq2tiFTvIR85MJyUaE+BDIDOr
|
||||
mpi7w9xODXL74Tx4FGcL3SqPwB7jdUEb5ZqACZVwTsb0pERTuXyN1S1vWxjz8wk5
|
||||
k3YUE/eLaXxIvgSbUkxB3/kd9CYhn259HivpfuT2r6SieQe9wUOQdQ9LybKjKLfb
|
||||
5H77I58eq9yR3KbqhQcdfxV99P/1x89nWkv9Z/hES23rlsGK1oMDRiJyD/Tk8otA
|
||||
8Wffa2nkNwzLVPRm+TJd9JplA2u/RPO/79Cfqa2RU3Qwf0GSU4qARLR8REJ2KS6N
|
||||
sg80j3s2Nj0OHp6k+QBPMo2Fi8Dde29SJNB99x7Gf+/QdtO/QtU8jV5a+jVO9ZnY
|
||||
5usRpYg9h6UOUlHwxtWL7Aw48oMy2nVMvDGFGcx72EWoTXp9NX+i0Pz235Gxf3C4
|
||||
b498vQMR/COA2c0JcwiYK1FFKSDFOV4aFp9UXWeP1pyZh27iDDCO+ZX0Arrbt7y9
|
||||
rhwXEEd2O2FtIyGINa7QdHDJLcv75KX/obtffzijp8DGS78uIVt+EnlyEdcrDn0d
|
||||
+XSTM8HMJW/yjNaeV9n4/jIHrMMpWefft1tue5TFDrkBDQRKoO2QAQgA2uKxSRSK
|
||||
pd2JO1ODUDuxppYacY1JkemxDUEHG31cqCVTuFz4alNyl4I+8pmtX2i+YH7W9ew7
|
||||
uGgjRzPEjTOm8/Zz2ue+eQeroveuo0hyFa9Y3CxhNMCE3EH4AufdofuCmnUf/W7T
|
||||
zyIvzecrwFPlyZhqWnmxEqu8FaR+jXK9Jsx2Zby/EihNoCwQOWtdv3I4Oi5KBbgl
|
||||
xfxE7PmYgo9DYqTmHxmsnPiUE4FYZG263Ll1ZqkbwW77nwDEl1uh+tjbOu+Y1cKw
|
||||
ecWbyVIuY1eKOnzVC88ldVSKxzKOGu37My4z65GTByMQfMBnoZ+FZFGYiCiThj+c
|
||||
8i93DIRzYeOsjQARAQABiQJEBBgBAgAPAhsCBQJUA0bBBQkQ5ycvASnAXSAEGQEC
|
||||
AAYFAkqg7ZAACgkQdKlBuiGeyBC0EQf5Af/G0/2xz0QwH58N6Cx/ZoMctPbxim+F
|
||||
+MtZWtiZdGJ7G1wFGILAtPqSG6WEDa+ThOeHbZ1uGvzuFS24IlkZHljgTZlL30p8
|
||||
DFdy73pajoqLRfrrkb9DJTGgVhP2axhnOW/Q6Zu4hoQPSn2VGVOVmuwMb3r1r93f
|
||||
Qbw0bQy/oIf9J+q2rbp4/chOodd7XMW95VMwiWIEdpYaD0moeK7+abYzBTG5ADMu
|
||||
ZoK2ZrkteQZNQexSu4h0emWerLsMdvcMLyYiOdWP128+s1e/nibHGFPAeRPkQ+MV
|
||||
PMZlrqgVq9i34XPA9HrtxVBd/PuOHoaS1yrGuADspSZTC5on4PMaQgkQ7oy8noht
|
||||
3YmJqQgAqq0NouBzv3pytxnS/BAaV/n4fc4GP+xiTI0AHIN03Zmy47szUVPg5lwI
|
||||
EeopJxt5J8lCupJCxxIBRFT59MbE0msQOT1L3vlgBeIidGTvVdrBQ1aESoRHm+yH
|
||||
Is7H16zkUmj+vDu/bne36/MoSU0bc2EOcB7hQ5AzvdbZh9tYjpyKTPCJbEe207Sg
|
||||
cHJ3+erExQ/aiddAwjx9FGdFCZAoTNdmrjpNUROno3dbIG7fSCO7PVPCrdCxL0Zr
|
||||
tyuuEeTgTfcWxTQurYYNOxPv6sXF1VNPIJVBTfdAR2ZlhTpIjFMOWXJgXWiip8lY
|
||||
y3C/AU1bpgSV26gIIlk1AnnNHVBH+YheBBAWCAAGBQJaXmY7AAoJEBu4nAYCNnRJ
|
||||
eVEA/i3qZKJAmsyAZDRZ2a2h3AhEoFw1uX/H/naTsWVaOWdLAP9RZD+9A+jMOR20
|
||||
bnmyGSejGueiO/h44NqVUrPrydOdB4kCWwQYAQIAJgIbAhYhBKPE8Pl5yqIs26j1
|
||||
Eu6MvJ6Ibd2JBQJbZ+pFBQkVGmi1ASnAXSAEGQECAAYFAkqg7ZAACgkQdKlBuiGe
|
||||
yBC0EQf5Af/G0/2xz0QwH58N6Cx/ZoMctPbxim+F+MtZWtiZdGJ7G1wFGILAtPqS
|
||||
G6WEDa+ThOeHbZ1uGvzuFS24IlkZHljgTZlL30p8DFdy73pajoqLRfrrkb9DJTGg
|
||||
VhP2axhnOW/Q6Zu4hoQPSn2VGVOVmuwMb3r1r93fQbw0bQy/oIf9J+q2rbp4/chO
|
||||
odd7XMW95VMwiWIEdpYaD0moeK7+abYzBTG5ADMuZoK2ZrkteQZNQexSu4h0emWe
|
||||
rLsMdvcMLyYiOdWP128+s1e/nibHGFPAeRPkQ+MVPMZlrqgVq9i34XPA9HrtxVBd
|
||||
/PuOHoaS1yrGuADspSZTC5on4PMaQgkQ7oy8noht3YnJVwf/f6KY5ikoA9js2MMu
|
||||
zBuOuoopwPxIvm2s937zXVJPTdT389GOhGxhmoZD14yDgo3pHHSUOKlOV0Lth+p0
|
||||
E/hiJ192wn/owMQ5W7NQd7BbAetoFWgwjrxgbt0PdEwDT/ECqflCwMTJkeV0sRmO
|
||||
r+pcIkCSqoba2H2GdgWWay+jjq9bvz6MjQ/oxb+oDGInl4C81/S9PWk/gxqA49Pw
|
||||
1nrNhMk15A8TeSJI33AUwRhygnlLDJ84dCpGtnL3pcMEIXcXsF+uBw3SH4hDjP0F
|
||||
JrzIHFxZ8MmK6GA78qzYkays8ECE6RJRt2nGxvb8zMBKBuI3TTCwawR6NUfG9fv+
|
||||
I4Tesw==
|
||||
=i53l
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
||||
EOF
|
||||
|
||||
sudo apt-get update
|
||||
sudo apt-get install tor deb.torproject.org-keyring
|
||||
${SUDO} apt-get --quiet update
|
||||
${SUDO} apt-get --quiet --yes install tor deb.torproject.org-keyring
|
||||
|
@ -16,7 +16,3 @@ def test_create_introducer(introducer):
|
||||
|
||||
def test_create_storage(storage_nodes):
|
||||
print("Created {} storage nodes".format(len(storage_nodes)))
|
||||
|
||||
|
||||
def test_create_alice_bob_magicfolder(magic_folder):
|
||||
print("Alice and Bob have paired magic-folders")
|
||||
|
@ -1,387 +0,0 @@
|
||||
import sys
|
||||
import time
|
||||
import shutil
|
||||
from os import mkdir, unlink, listdir, utime
|
||||
from os.path import join, exists, getmtime
|
||||
|
||||
import util
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
# tests converted from check_magicfolder_smoke.py
|
||||
# see "conftest.py" for the fixtures (e.g. "magic_folder")
|
||||
|
||||
|
||||
def test_alice_writes_bob_receives(magic_folder):
|
||||
alice_dir, bob_dir = magic_folder
|
||||
|
||||
with open(join(alice_dir, "first_file"), "w") as f:
|
||||
f.write("alice wrote this")
|
||||
|
||||
util.await_file_contents(join(bob_dir, "first_file"), "alice wrote this")
|
||||
return
|
||||
|
||||
|
||||
def test_alice_writes_bob_receives_multiple(magic_folder):
|
||||
"""
|
||||
When Alice does a series of updates, Bob should just receive them
|
||||
with no .backup or .conflict files being produced.
|
||||
"""
|
||||
alice_dir, bob_dir = magic_folder
|
||||
|
||||
unwanted_files = [
|
||||
join(bob_dir, "multiple.backup"),
|
||||
join(bob_dir, "multiple.conflict")
|
||||
]
|
||||
|
||||
# first update
|
||||
with open(join(alice_dir, "multiple"), "w") as f:
|
||||
f.write("alice wrote this")
|
||||
|
||||
util.await_file_contents(
|
||||
join(bob_dir, "multiple"), "alice wrote this",
|
||||
error_if=unwanted_files,
|
||||
)
|
||||
|
||||
# second update
|
||||
with open(join(alice_dir, "multiple"), "w") as f:
|
||||
f.write("someone changed their mind")
|
||||
|
||||
util.await_file_contents(
|
||||
join(bob_dir, "multiple"), "someone changed their mind",
|
||||
error_if=unwanted_files,
|
||||
)
|
||||
|
||||
# third update
|
||||
with open(join(alice_dir, "multiple"), "w") as f:
|
||||
f.write("absolutely final version ship it")
|
||||
|
||||
util.await_file_contents(
|
||||
join(bob_dir, "multiple"), "absolutely final version ship it",
|
||||
error_if=unwanted_files,
|
||||
)
|
||||
|
||||
# forth update, but both "at once" so one should conflict
|
||||
time.sleep(2)
|
||||
with open(join(alice_dir, "multiple"), "w") as f:
|
||||
f.write("okay one more attempt")
|
||||
with open(join(bob_dir, "multiple"), "w") as f:
|
||||
f.write("...but just let me add")
|
||||
|
||||
bob_conflict = join(bob_dir, "multiple.conflict")
|
||||
alice_conflict = join(alice_dir, "multiple.conflict")
|
||||
|
||||
found = util.await_files_exist([
|
||||
bob_conflict,
|
||||
alice_conflict,
|
||||
])
|
||||
|
||||
assert len(found) > 0, "Should have found a conflict"
|
||||
print("conflict found (as expected)")
|
||||
|
||||
|
||||
def test_alice_writes_bob_receives_old_timestamp(magic_folder):
|
||||
alice_dir, bob_dir = magic_folder
|
||||
fname = join(alice_dir, "ts_file")
|
||||
ts = time.time() - (60 * 60 * 36) # 36 hours ago
|
||||
|
||||
with open(fname, "w") as f:
|
||||
f.write("alice wrote this")
|
||||
utime(fname, (time.time(), ts))
|
||||
|
||||
fname = join(bob_dir, "ts_file")
|
||||
util.await_file_contents(fname, "alice wrote this")
|
||||
# make sure the timestamp is correct
|
||||
assert int(getmtime(fname)) == int(ts)
|
||||
return
|
||||
|
||||
|
||||
def test_bob_writes_alice_receives(magic_folder):
|
||||
alice_dir, bob_dir = magic_folder
|
||||
|
||||
with open(join(bob_dir, "second_file"), "w") as f:
|
||||
f.write("bob wrote this")
|
||||
|
||||
util.await_file_contents(join(alice_dir, "second_file"), "bob wrote this")
|
||||
return
|
||||
|
||||
|
||||
def test_alice_deletes(magic_folder):
|
||||
# alice writes a file, waits for bob to get it and then deletes it.
|
||||
alice_dir, bob_dir = magic_folder
|
||||
|
||||
with open(join(alice_dir, "delfile"), "w") as f:
|
||||
f.write("alice wrote this")
|
||||
|
||||
util.await_file_contents(join(bob_dir, "delfile"), "alice wrote this")
|
||||
|
||||
# bob has the file; now alices deletes it
|
||||
unlink(join(alice_dir, "delfile"))
|
||||
|
||||
# bob should remove his copy, but preserve a backup
|
||||
util.await_file_vanishes(join(bob_dir, "delfile"))
|
||||
util.await_file_contents(join(bob_dir, "delfile.backup"), "alice wrote this")
|
||||
return
|
||||
|
||||
|
||||
def test_alice_creates_bob_edits(magic_folder):
|
||||
alice_dir, bob_dir = magic_folder
|
||||
|
||||
# alice writes a file
|
||||
with open(join(alice_dir, "editfile"), "w") as f:
|
||||
f.write("alice wrote this")
|
||||
|
||||
util.await_file_contents(join(bob_dir, "editfile"), "alice wrote this")
|
||||
|
||||
# now bob edits it
|
||||
with open(join(bob_dir, "editfile"), "w") as f:
|
||||
f.write("bob says foo")
|
||||
|
||||
util.await_file_contents(join(alice_dir, "editfile"), "bob says foo")
|
||||
|
||||
|
||||
def test_bob_creates_sub_directory(magic_folder):
|
||||
alice_dir, bob_dir = magic_folder
|
||||
|
||||
# bob makes a sub-dir, with a file in it
|
||||
mkdir(join(bob_dir, "subdir"))
|
||||
with open(join(bob_dir, "subdir", "a_file"), "w") as f:
|
||||
f.write("bob wuz here")
|
||||
|
||||
# alice gets it
|
||||
util.await_file_contents(join(alice_dir, "subdir", "a_file"), "bob wuz here")
|
||||
|
||||
# now bob deletes it again
|
||||
shutil.rmtree(join(bob_dir, "subdir"))
|
||||
|
||||
# alice should delete it as well
|
||||
util.await_file_vanishes(join(alice_dir, "subdir", "a_file"))
|
||||
# i *think* it's by design that the subdir won't disappear,
|
||||
# because a "a_file.backup" should appear...
|
||||
util.await_file_contents(join(alice_dir, "subdir", "a_file.backup"), "bob wuz here")
|
||||
|
||||
|
||||
def test_bob_creates_alice_deletes_bob_restores(magic_folder):
|
||||
alice_dir, bob_dir = magic_folder
|
||||
|
||||
# bob creates a file
|
||||
with open(join(bob_dir, "boom"), "w") as f:
|
||||
f.write("bob wrote this")
|
||||
|
||||
util.await_file_contents(
|
||||
join(alice_dir, "boom"),
|
||||
"bob wrote this"
|
||||
)
|
||||
|
||||
# alice deletes it (so bob should as well .. but keep a backup)
|
||||
unlink(join(alice_dir, "boom"))
|
||||
util.await_file_vanishes(join(bob_dir, "boom"))
|
||||
assert exists(join(bob_dir, "boom.backup"))
|
||||
|
||||
# bob restore it, with new contents
|
||||
unlink(join(bob_dir, "boom.backup"))
|
||||
with open(join(bob_dir, "boom"), "w") as f:
|
||||
f.write("bob wrote this again, because reasons")
|
||||
|
||||
# XXX double-check this behavior is correct!
|
||||
|
||||
# alice sees bob's update, but marks it as a conflict (because
|
||||
# .. she previously deleted it? does that really make sense)
|
||||
|
||||
util.await_file_contents(
|
||||
join(alice_dir, "boom"),
|
||||
"bob wrote this again, because reasons",
|
||||
)
|
||||
|
||||
|
||||
def test_bob_creates_alice_deletes_alice_restores(magic_folder):
|
||||
alice_dir, bob_dir = magic_folder
|
||||
|
||||
# bob creates a file
|
||||
with open(join(bob_dir, "boom2"), "w") as f:
|
||||
f.write("bob wrote this")
|
||||
|
||||
util.await_file_contents(
|
||||
join(alice_dir, "boom2"),
|
||||
"bob wrote this"
|
||||
)
|
||||
|
||||
# alice deletes it (so bob should as well)
|
||||
unlink(join(alice_dir, "boom2"))
|
||||
util.await_file_vanishes(join(bob_dir, "boom2"))
|
||||
|
||||
# alice restore it, with new contents
|
||||
with open(join(alice_dir, "boom2"), "w") as f:
|
||||
f.write("alice re-wrote this again, because reasons")
|
||||
|
||||
util.await_file_contents(
|
||||
join(bob_dir, "boom2"),
|
||||
"alice re-wrote this again, because reasons"
|
||||
)
|
||||
|
||||
|
||||
def test_bob_conflicts_with_alice_fresh(magic_folder):
|
||||
# both alice and bob make a file at "the same time".
|
||||
alice_dir, bob_dir = magic_folder
|
||||
|
||||
# either alice or bob will "win" by uploading to the DMD first.
|
||||
with open(join(bob_dir, 'alpha'), 'w') as f0, open(join(alice_dir, 'alpha'), 'w') as f1:
|
||||
f0.write("this is bob's alpha\n")
|
||||
f1.write("this is alice's alpha\n")
|
||||
|
||||
# there should be conflicts
|
||||
_bob_conflicts_alice_await_conflicts('alpha', alice_dir, bob_dir)
|
||||
|
||||
|
||||
def test_bob_conflicts_with_alice_preexisting(magic_folder):
|
||||
# both alice and bob edit a file at "the same time" (similar to
|
||||
# above, but the file already exists before the edits)
|
||||
alice_dir, bob_dir = magic_folder
|
||||
|
||||
# have bob create the file
|
||||
with open(join(bob_dir, 'beta'), 'w') as f:
|
||||
f.write("original beta (from bob)\n")
|
||||
util.await_file_contents(join(alice_dir, 'beta'), "original beta (from bob)\n")
|
||||
|
||||
# both alice and bob now have a "beta" file, at version 0
|
||||
|
||||
# either alice or bob will "win" by uploading to the DMD first
|
||||
# (however, they should both detect a conflict)
|
||||
with open(join(bob_dir, 'beta'), 'w') as f:
|
||||
f.write("this is bob's beta\n")
|
||||
with open(join(alice_dir, 'beta'), 'w') as f:
|
||||
f.write("this is alice's beta\n")
|
||||
|
||||
# both alice and bob should see a conflict
|
||||
_bob_conflicts_alice_await_conflicts("beta", alice_dir, bob_dir)
|
||||
|
||||
|
||||
def _bob_conflicts_alice_await_conflicts(name, alice_dir, bob_dir):
|
||||
"""
|
||||
shared code between _fresh and _preexisting conflict test
|
||||
"""
|
||||
found = util.await_files_exist(
|
||||
[
|
||||
join(bob_dir, '{}.conflict'.format(name)),
|
||||
join(alice_dir, '{}.conflict'.format(name)),
|
||||
],
|
||||
)
|
||||
|
||||
assert len(found) >= 1, "should be at least one conflict"
|
||||
assert open(join(bob_dir, name), 'r').read() == "this is bob's {}\n".format(name)
|
||||
assert open(join(alice_dir, name), 'r').read() == "this is alice's {}\n".format(name)
|
||||
|
||||
alice_conflict = join(alice_dir, '{}.conflict'.format(name))
|
||||
bob_conflict = join(bob_dir, '{}.conflict'.format(name))
|
||||
if exists(bob_conflict):
|
||||
assert open(bob_conflict, 'r').read() == "this is alice's {}\n".format(name)
|
||||
if exists(alice_conflict):
|
||||
assert open(alice_conflict, 'r').read() == "this is bob's {}\n".format(name)
|
||||
|
||||
|
||||
@pytest.inlineCallbacks
|
||||
def test_edmond_uploads_then_restarts(reactor, request, temp_dir, introducer_furl, flog_gatherer, storage_nodes):
|
||||
"""
|
||||
ticket 2880: if a magic-folder client uploads something, then
|
||||
re-starts a spurious .backup file should not appear
|
||||
"""
|
||||
|
||||
edmond_dir = join(temp_dir, 'edmond')
|
||||
edmond = yield util._create_node(
|
||||
reactor, request, temp_dir, introducer_furl, flog_gatherer,
|
||||
"edmond", web_port="tcp:9985:interface=localhost",
|
||||
storage=False,
|
||||
)
|
||||
|
||||
|
||||
magic_folder = join(temp_dir, 'magic-edmond')
|
||||
mkdir(magic_folder)
|
||||
created = False
|
||||
# create a magic-folder
|
||||
# (how can we know that the grid is ready?)
|
||||
for _ in range(10): # try 10 times
|
||||
try:
|
||||
proto = util._CollectOutputProtocol()
|
||||
transport = reactor.spawnProcess(
|
||||
proto,
|
||||
sys.executable,
|
||||
[
|
||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
||||
'magic-folder', 'create',
|
||||
'--poll-interval', '2',
|
||||
'--basedir', edmond_dir,
|
||||
'magik:',
|
||||
'edmond_magic',
|
||||
magic_folder,
|
||||
]
|
||||
)
|
||||
yield proto.done
|
||||
created = True
|
||||
break
|
||||
except Exception as e:
|
||||
print("failed to create magic-folder: {}".format(e))
|
||||
time.sleep(1)
|
||||
|
||||
assert created, "Didn't create a magic-folder"
|
||||
|
||||
# to actually-start the magic-folder we have to re-start
|
||||
edmond.signalProcess('TERM')
|
||||
yield edmond._protocol.exited
|
||||
time.sleep(1)
|
||||
edmond = yield util._run_node(reactor, edmond._node_dir, request, 'Completed initial Magic Folder scan successfully')
|
||||
|
||||
# add a thing to the magic-folder
|
||||
with open(join(magic_folder, "its_a_file"), "w") as f:
|
||||
f.write("edmond wrote this")
|
||||
|
||||
# fixme, do status-update attempts in a loop below
|
||||
time.sleep(5)
|
||||
|
||||
# let it upload; poll the HTTP magic-folder status API until it is
|
||||
# uploaded
|
||||
from allmydata.scripts.magic_folder_cli import _get_json_for_fragment
|
||||
|
||||
with open(join(edmond_dir, u'private', u'api_auth_token'), 'rb') as f:
|
||||
token = f.read()
|
||||
|
||||
uploaded = False
|
||||
for _ in range(10):
|
||||
options = {
|
||||
"node-url": open(join(edmond_dir, u'node.url'), 'r').read().strip(),
|
||||
}
|
||||
try:
|
||||
magic_data = _get_json_for_fragment(
|
||||
options,
|
||||
'magic_folder?t=json',
|
||||
method='POST',
|
||||
post_args=dict(
|
||||
t='json',
|
||||
name='default',
|
||||
token=token,
|
||||
)
|
||||
)
|
||||
for mf in magic_data:
|
||||
if mf['status'] == u'success' and mf['path'] == u'its_a_file':
|
||||
uploaded = True
|
||||
break
|
||||
except Exception as e:
|
||||
time.sleep(1)
|
||||
|
||||
assert uploaded, "expected to upload 'its_a_file'"
|
||||
|
||||
# re-starting edmond right now would "normally" trigger the 2880 bug
|
||||
|
||||
# kill edmond
|
||||
edmond.signalProcess('TERM')
|
||||
yield edmond._protocol.exited
|
||||
time.sleep(1)
|
||||
edmond = yield util._run_node(reactor, edmond._node_dir, request, 'Completed initial Magic Folder scan successfully')
|
||||
|
||||
# XXX how can we say for sure if we've waited long enough? look at
|
||||
# tail of logs for magic-folder ... somethingsomething?
|
||||
print("waiting 20 seconds to see if a .backup appears")
|
||||
for _ in range(20):
|
||||
assert exists(join(magic_folder, "its_a_file"))
|
||||
assert not exists(join(magic_folder, "its_a_file.backup"))
|
||||
time.sleep(1)
|
@ -6,13 +6,13 @@ from twisted.internet.error import ProcessTerminated
|
||||
|
||||
import util
|
||||
|
||||
import pytest
|
||||
import pytest_twisted
|
||||
|
||||
|
||||
@pytest.inlineCallbacks
|
||||
@pytest_twisted.inlineCallbacks
|
||||
def test_upload_immutable(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request):
|
||||
|
||||
yield util._create_node(
|
||||
edna = yield util._create_node(
|
||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, "edna",
|
||||
web_port="tcp:9983:interface=localhost",
|
||||
storage=False,
|
||||
@ -20,13 +20,10 @@ def test_upload_immutable(reactor, temp_dir, introducer_furl, flog_gatherer, sto
|
||||
happy=7,
|
||||
total=10,
|
||||
)
|
||||
|
||||
util.await_client_ready(edna)
|
||||
|
||||
node_dir = join(temp_dir, 'edna')
|
||||
|
||||
print("waiting 10 seconds unil we're maybe ready")
|
||||
yield task.deferLater(reactor, 10, lambda: None)
|
||||
|
||||
# upload a file, which should fail because we have don't have 7
|
||||
# storage servers (but happiness is set to 7)
|
||||
proto = util._CollectOutputProtocol()
|
||||
|
153
integration/test_streaming_logs.py
Normal file
153
integration/test_streaming_logs.py
Normal file
@ -0,0 +1,153 @@
|
||||
from __future__ import (
|
||||
print_function,
|
||||
unicode_literals,
|
||||
absolute_import,
|
||||
division,
|
||||
)
|
||||
|
||||
import json
|
||||
|
||||
from os.path import (
|
||||
join,
|
||||
)
|
||||
from urlparse import (
|
||||
urlsplit,
|
||||
)
|
||||
|
||||
import attr
|
||||
|
||||
from twisted.internet.defer import (
|
||||
Deferred,
|
||||
)
|
||||
from twisted.internet.endpoints import (
|
||||
HostnameEndpoint,
|
||||
)
|
||||
|
||||
import treq
|
||||
|
||||
from autobahn.twisted.websocket import (
|
||||
WebSocketClientFactory,
|
||||
WebSocketClientProtocol,
|
||||
)
|
||||
|
||||
from allmydata.client import (
|
||||
read_config,
|
||||
)
|
||||
from allmydata.web.private import (
|
||||
SCHEME,
|
||||
)
|
||||
from allmydata.util.eliotutil import (
|
||||
inline_callbacks,
|
||||
)
|
||||
|
||||
import pytest_twisted
|
||||
|
||||
def _url_to_endpoint(reactor, url):
|
||||
netloc = urlsplit(url).netloc
|
||||
host, port = netloc.split(":")
|
||||
return HostnameEndpoint(reactor, host, int(port))
|
||||
|
||||
|
||||
class _StreamingLogClientProtocol(WebSocketClientProtocol):
|
||||
def onOpen(self):
|
||||
self.factory.on_open.callback(self)
|
||||
|
||||
def onMessage(self, payload, isBinary):
|
||||
if self.on_message is None:
|
||||
# Already did our job, ignore it
|
||||
return
|
||||
on_message = self.on_message
|
||||
self.on_message = None
|
||||
on_message.callback(payload)
|
||||
|
||||
def onClose(self, wasClean, code, reason):
|
||||
self.on_close.callback(reason)
|
||||
|
||||
|
||||
def _connect_client(reactor, api_auth_token, ws_url):
|
||||
factory = WebSocketClientFactory(
|
||||
url=ws_url,
|
||||
headers={
|
||||
"Authorization": "{} {}".format(SCHEME, api_auth_token),
|
||||
}
|
||||
)
|
||||
factory.protocol = _StreamingLogClientProtocol
|
||||
factory.on_open = Deferred()
|
||||
|
||||
endpoint = _url_to_endpoint(reactor, ws_url)
|
||||
return endpoint.connect(factory)
|
||||
|
||||
|
||||
def _race(left, right):
|
||||
"""
|
||||
Wait for the first result from either of two Deferreds.
|
||||
|
||||
Any result, success or failure, causes the return Deferred to fire. It
|
||||
fires with either a Left or a Right instance depending on whether the left
|
||||
or right argument fired first.
|
||||
|
||||
The Deferred that loses the race is cancelled and any result it eventually
|
||||
produces is discarded.
|
||||
"""
|
||||
racing = [True]
|
||||
def got_result(result, which):
|
||||
if racing:
|
||||
racing.pop()
|
||||
loser = which.pick(left, right)
|
||||
loser.cancel()
|
||||
finished.callback(which(result))
|
||||
|
||||
finished = Deferred()
|
||||
left.addBoth(got_result, Left)
|
||||
right.addBoth(got_result, Right)
|
||||
return finished
|
||||
|
||||
|
||||
@attr.s
|
||||
class Left(object):
|
||||
value = attr.ib()
|
||||
|
||||
@classmethod
|
||||
def pick(cls, left, right):
|
||||
return left
|
||||
|
||||
|
||||
@attr.s
|
||||
class Right(object):
|
||||
value = attr.ib()
|
||||
|
||||
@classmethod
|
||||
def pick(cls, left, right):
|
||||
return right
|
||||
|
||||
|
||||
@inline_callbacks
|
||||
def _test_streaming_logs(reactor, temp_dir, alice):
|
||||
cfg = read_config(join(temp_dir, "alice"), "portnum")
|
||||
node_url = cfg.get_config_from_file("node.url")
|
||||
api_auth_token = cfg.get_private_config("api_auth_token")
|
||||
|
||||
ws_url = node_url.replace("http://", "ws://")
|
||||
log_url = ws_url + "private/logs/v1"
|
||||
|
||||
print("Connecting to {}".format(log_url))
|
||||
client = yield _connect_client(reactor, api_auth_token, log_url)
|
||||
print("Connected.")
|
||||
client.on_close = Deferred()
|
||||
client.on_message = Deferred()
|
||||
|
||||
# Capture this now before on_message perhaps goes away.
|
||||
racing = _race(client.on_close, client.on_message)
|
||||
|
||||
# Provoke _some_ log event.
|
||||
yield treq.get(node_url)
|
||||
|
||||
result = yield racing
|
||||
|
||||
assert isinstance(result, Right)
|
||||
json.loads(result.value)
|
||||
|
||||
|
||||
@pytest_twisted.inlineCallbacks
|
||||
def test_streaming_logs(reactor, temp_dir, alice):
|
||||
yield _test_streaming_logs(reactor, temp_dir, alice)
|
@ -5,18 +5,27 @@ import time
|
||||
import shutil
|
||||
from os import mkdir, unlink, listdir
|
||||
from os.path import join, exists
|
||||
from StringIO import StringIO
|
||||
from six.moves import StringIO
|
||||
|
||||
from twisted.internet.protocol import ProcessProtocol
|
||||
from twisted.internet.error import ProcessExitedAlready, ProcessDone
|
||||
from twisted.internet.defer import inlineCallbacks, Deferred
|
||||
|
||||
import pytest
|
||||
import pytest_twisted
|
||||
|
||||
import util
|
||||
|
||||
# see "conftest.py" for the fixtures (e.g. "magic_folder")
|
||||
# see "conftest.py" for the fixtures (e.g. "tor_network")
|
||||
|
||||
@pytest.inlineCallbacks
|
||||
# XXX: Integration tests that involve Tor do not run reliably on
|
||||
# Windows. They are skipped for now, in order to reduce CI noise.
|
||||
#
|
||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3347
|
||||
if sys.platform.startswith('win'):
|
||||
pytest.skip('Skipping Tor tests on Windows', allow_module_level=True)
|
||||
|
||||
@pytest_twisted.inlineCallbacks
|
||||
def test_onion_service_storage(reactor, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl):
|
||||
yield _create_anonymous_node(reactor, 'carol', 8008, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl)
|
||||
yield _create_anonymous_node(reactor, 'dave', 8009, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl)
|
||||
@ -62,7 +71,7 @@ def test_onion_service_storage(reactor, request, temp_dir, flog_gatherer, tor_ne
|
||||
assert dave_got == open(gold_path, 'r').read().strip()
|
||||
|
||||
|
||||
@pytest.inlineCallbacks
|
||||
@pytest_twisted.inlineCallbacks
|
||||
def _create_anonymous_node(reactor, name, control_port, request, temp_dir, flog_gatherer, tor_network, introducer_furl):
|
||||
node_dir = join(temp_dir, name)
|
||||
web_port = "tcp:{}:interface=localhost".format(control_port + 2000)
|
||||
|
519
integration/test_web.py
Normal file
519
integration/test_web.py
Normal file
@ -0,0 +1,519 @@
|
||||
"""
|
||||
These tests were originally written to achieve some level of
|
||||
coverage for the WebAPI functionality during Python3 porting (there
|
||||
aren't many tests of the Web API period).
|
||||
|
||||
Most of the tests have cursory asserts and encode 'what the WebAPI did
|
||||
at the time of testing' -- not necessarily a cohesive idea of what the
|
||||
WebAPI *should* do in every situation. It's not clear the latter
|
||||
exists anywhere, however.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import time
|
||||
import shutil
|
||||
import json
|
||||
import urllib2
|
||||
from os import mkdir, unlink, utime
|
||||
from os.path import join, exists, getmtime
|
||||
|
||||
import allmydata.uri
|
||||
|
||||
import util
|
||||
|
||||
import requests
|
||||
import pytest_twisted
|
||||
import html5lib
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
|
||||
def test_index(alice):
|
||||
"""
|
||||
we can download the index file
|
||||
"""
|
||||
util.web_get(alice, u"")
|
||||
|
||||
|
||||
def test_index_json(alice):
|
||||
"""
|
||||
we can download the index file as json
|
||||
"""
|
||||
data = util.web_get(alice, u"", params={u"t": u"json"})
|
||||
# it should be valid json
|
||||
json.loads(data)
|
||||
|
||||
|
||||
def test_upload_download(alice):
|
||||
"""
|
||||
upload a file, then download it via readcap
|
||||
"""
|
||||
|
||||
FILE_CONTENTS = u"some contents"
|
||||
|
||||
readcap = util.web_post(
|
||||
alice, u"uri",
|
||||
data={
|
||||
u"t": u"upload",
|
||||
u"format": u"mdmf",
|
||||
},
|
||||
files={
|
||||
u"file": FILE_CONTENTS,
|
||||
},
|
||||
)
|
||||
readcap = readcap.strip()
|
||||
|
||||
data = util.web_get(
|
||||
alice, u"uri",
|
||||
params={
|
||||
u"uri": readcap,
|
||||
u"filename": u"boom",
|
||||
}
|
||||
)
|
||||
assert data == FILE_CONTENTS
|
||||
|
||||
|
||||
def test_put(alice):
|
||||
"""
|
||||
use PUT to create a file
|
||||
"""
|
||||
|
||||
FILE_CONTENTS = b"added via PUT" * 20
|
||||
|
||||
resp = requests.put(
|
||||
util.node_url(alice.node_dir, u"uri"),
|
||||
data=FILE_CONTENTS,
|
||||
)
|
||||
cap = allmydata.uri.from_string(resp.text.strip().encode('ascii'))
|
||||
cfg = alice.get_config()
|
||||
assert isinstance(cap, allmydata.uri.CHKFileURI)
|
||||
assert cap.size == len(FILE_CONTENTS)
|
||||
assert cap.total_shares == int(cfg.get_config("client", "shares.total"))
|
||||
assert cap.needed_shares == int(cfg.get_config("client", "shares.needed"))
|
||||
|
||||
|
||||
def test_helper_status(storage_nodes):
|
||||
"""
|
||||
successfully GET the /helper_status page
|
||||
"""
|
||||
|
||||
url = util.node_url(storage_nodes[0].node_dir, "helper_status")
|
||||
resp = requests.get(url)
|
||||
assert resp.status_code >= 200 and resp.status_code < 300
|
||||
dom = BeautifulSoup(resp.content, "html5lib")
|
||||
assert unicode(dom.h1.string) == u"Helper Status"
|
||||
|
||||
|
||||
def test_deep_stats(alice):
|
||||
"""
|
||||
create a directory, do deep-stats on it and prove the /operations/
|
||||
URIs work
|
||||
"""
|
||||
resp = requests.post(
|
||||
util.node_url(alice.node_dir, "uri"),
|
||||
params={
|
||||
"format": "sdmf",
|
||||
"t": "mkdir",
|
||||
"redirect_to_result": "true",
|
||||
},
|
||||
)
|
||||
assert resp.status_code >= 200 and resp.status_code < 300
|
||||
|
||||
# when creating a directory, we'll be re-directed to a URL
|
||||
# containing our writecap..
|
||||
uri = urllib2.unquote(resp.url)
|
||||
assert 'URI:DIR2:' in uri
|
||||
dircap = uri[uri.find("URI:DIR2:"):].rstrip('/')
|
||||
dircap_uri = util.node_url(alice.node_dir, "uri/{}".format(urllib2.quote(dircap)))
|
||||
|
||||
# POST a file into this directory
|
||||
FILE_CONTENTS = u"a file in a directory"
|
||||
|
||||
resp = requests.post(
|
||||
dircap_uri,
|
||||
data={
|
||||
u"t": u"upload",
|
||||
u"when_done": u".",
|
||||
},
|
||||
files={
|
||||
u"file": FILE_CONTENTS,
|
||||
},
|
||||
)
|
||||
|
||||
# confirm the file is in the directory
|
||||
resp = requests.get(
|
||||
dircap_uri,
|
||||
params={
|
||||
u"t": u"json",
|
||||
},
|
||||
)
|
||||
d = json.loads(resp.content)
|
||||
k, data = d
|
||||
assert k == u"dirnode"
|
||||
assert len(data['children']) == 1
|
||||
k, child = data['children'].values()[0]
|
||||
assert k == u"filenode"
|
||||
assert child['size'] == len(FILE_CONTENTS)
|
||||
|
||||
# perform deep-stats on it...
|
||||
resp = requests.post(
|
||||
dircap_uri,
|
||||
data={
|
||||
u"t": u"start-deep-stats",
|
||||
u"ophandle": u"something_random",
|
||||
},
|
||||
)
|
||||
assert resp.status_code >= 200 and resp.status_code < 300
|
||||
|
||||
# confirm we get information from the op .. after its done
|
||||
tries = 10
|
||||
while tries > 0:
|
||||
tries -= 1
|
||||
resp = requests.get(
|
||||
util.node_url(alice.node_dir, u"operations/something_random"),
|
||||
)
|
||||
d = json.loads(resp.content)
|
||||
if d['size-literal-files'] == len(FILE_CONTENTS):
|
||||
print("stats completed successfully")
|
||||
break
|
||||
else:
|
||||
print("{} != {}; waiting".format(d['size-literal-files'], len(FILE_CONTENTS)))
|
||||
time.sleep(.5)
|
||||
|
||||
|
||||
def test_status(alice):
|
||||
"""
|
||||
confirm we get something sensible from /status and the various sub-types
|
||||
"""
|
||||
|
||||
# upload a file
|
||||
# (because of the nature of the integration-tests, we can only
|
||||
# assert things about "our" file because we don't know what other
|
||||
# operations may have happened in the grid before our test runs).
|
||||
|
||||
FILE_CONTENTS = u"all the Important Data of alice\n" * 1200
|
||||
|
||||
resp = requests.put(
|
||||
util.node_url(alice.node_dir, u"uri"),
|
||||
data=FILE_CONTENTS,
|
||||
)
|
||||
cap = resp.text.strip()
|
||||
|
||||
print("Uploaded data, cap={}".format(cap))
|
||||
resp = requests.get(
|
||||
util.node_url(alice.node_dir, u"uri/{}".format(urllib2.quote(cap))),
|
||||
)
|
||||
|
||||
print("Downloaded {} bytes of data".format(len(resp.content)))
|
||||
assert resp.content == FILE_CONTENTS
|
||||
|
||||
resp = requests.get(
|
||||
util.node_url(alice.node_dir, "status"),
|
||||
)
|
||||
dom = html5lib.parse(resp.content)
|
||||
|
||||
hrefs = [
|
||||
a.get('href')
|
||||
for a in dom.iter(u'{http://www.w3.org/1999/xhtml}a')
|
||||
]
|
||||
|
||||
found_upload = False
|
||||
found_download = False
|
||||
for href in hrefs:
|
||||
if href == u"/" or not href:
|
||||
continue
|
||||
resp = requests.get(util.node_url(alice.node_dir, href))
|
||||
if href.startswith(u"/status/up"):
|
||||
assert "File Upload Status" in resp.content
|
||||
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
|
||||
found_upload = True
|
||||
elif href.startswith(u"/status/down"):
|
||||
assert "File Download Status" in resp.content
|
||||
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
|
||||
found_download = True
|
||||
|
||||
# download the specialized event information
|
||||
resp = requests.get(
|
||||
util.node_url(alice.node_dir, u"{}/event_json".format(href)),
|
||||
)
|
||||
js = json.loads(resp.content)
|
||||
# there's usually just one "read" operation, but this can handle many ..
|
||||
total_bytes = sum([st['bytes_returned'] for st in js['read']], 0)
|
||||
assert total_bytes == len(FILE_CONTENTS)
|
||||
|
||||
|
||||
assert found_upload, "Failed to find the file we uploaded in the status-page"
|
||||
assert found_download, "Failed to find the file we downloaded in the status-page"
|
||||
|
||||
|
||||
def test_directory_deep_check(alice):
|
||||
"""
|
||||
use deep-check and confirm the result pages work
|
||||
"""
|
||||
|
||||
# create a directory
|
||||
resp = requests.post(
|
||||
util.node_url(alice.node_dir, u"uri"),
|
||||
params={
|
||||
u"t": u"mkdir",
|
||||
u"redirect_to_result": u"true",
|
||||
}
|
||||
)
|
||||
|
||||
# get json information about our directory
|
||||
dircap_url = resp.url
|
||||
resp = requests.get(
|
||||
dircap_url,
|
||||
params={u"t": u"json"},
|
||||
)
|
||||
dir_meta = json.loads(resp.content)
|
||||
|
||||
# upload a file of pangrams into the directory
|
||||
FILE_CONTENTS = u"Sphinx of black quartz, judge my vow.\n" * (2048*10)
|
||||
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"upload",
|
||||
u"upload-chk": u"upload-chk",
|
||||
},
|
||||
files={
|
||||
u"file": FILE_CONTENTS,
|
||||
}
|
||||
)
|
||||
cap0 = resp.content
|
||||
print("Uploaded data0, cap={}".format(cap0))
|
||||
|
||||
# a different pangram
|
||||
FILE_CONTENTS = u"The five boxing wizards jump quickly.\n" * (2048*10)
|
||||
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"upload",
|
||||
u"upload-chk": u"upload-chk",
|
||||
},
|
||||
files={
|
||||
u"file": FILE_CONTENTS,
|
||||
}
|
||||
)
|
||||
cap1 = resp.content
|
||||
print("Uploaded data1, cap={}".format(cap1))
|
||||
|
||||
resp = requests.get(
|
||||
util.node_url(alice.node_dir, u"uri/{}".format(urllib2.quote(cap0))),
|
||||
params={u"t": u"info"},
|
||||
)
|
||||
|
||||
def check_repair_data(checkdata):
|
||||
assert checkdata["healthy"] is True
|
||||
assert checkdata["count-happiness"] == 4
|
||||
assert checkdata["count-good-share-hosts"] == 4
|
||||
assert checkdata["count-shares-good"] == 4
|
||||
assert checkdata["count-corrupt-shares"] == 0
|
||||
assert checkdata["list-corrupt-shares"] == []
|
||||
|
||||
# do a "check" (once for HTML, then with JSON for easier asserts)
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"true",
|
||||
}
|
||||
)
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"true",
|
||||
u"output": u"JSON",
|
||||
}
|
||||
)
|
||||
check_repair_data(json.loads(resp.content)["results"])
|
||||
|
||||
# "check and repair"
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"true",
|
||||
u"repair": u"true",
|
||||
}
|
||||
)
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"true",
|
||||
u"repair": u"true",
|
||||
u"output": u"JSON",
|
||||
}
|
||||
)
|
||||
check_repair_data(json.loads(resp.content)["post-repair-results"]["results"])
|
||||
|
||||
# start a "deep check and repair"
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"start-deep-check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"on",
|
||||
u"repair": u"on",
|
||||
u"output": u"JSON",
|
||||
u"ophandle": u"deadbeef",
|
||||
}
|
||||
)
|
||||
deepcheck_uri = resp.url
|
||||
|
||||
data = json.loads(resp.content)
|
||||
tries = 10
|
||||
while not data['finished'] and tries > 0:
|
||||
tries -= 1
|
||||
time.sleep(0.5)
|
||||
print("deep-check not finished, reloading")
|
||||
resp = requests.get(deepcheck_uri, params={u"output": "JSON"})
|
||||
data = json.loads(resp.content)
|
||||
print("deep-check finished")
|
||||
assert data[u"stats"][u"count-immutable-files"] == 1
|
||||
assert data[u"stats"][u"count-literal-files"] == 0
|
||||
assert data[u"stats"][u"largest-immutable-file"] == 778240
|
||||
assert data[u"count-objects-checked"] == 2
|
||||
|
||||
# also get the HTML version
|
||||
resp = requests.post(
|
||||
dircap_url,
|
||||
params={
|
||||
u"t": u"start-deep-check",
|
||||
u"return_to": u".",
|
||||
u"verify": u"on",
|
||||
u"repair": u"on",
|
||||
u"ophandle": u"definitely_random",
|
||||
}
|
||||
)
|
||||
deepcheck_uri = resp.url
|
||||
|
||||
# if the operations isn't done, there's an <H2> tag with the
|
||||
# reload link; otherwise there's only an <H1> tag..wait up to 5
|
||||
# seconds for this to respond properly.
|
||||
for _ in range(5):
|
||||
resp = requests.get(deepcheck_uri)
|
||||
dom = BeautifulSoup(resp.content, "html5lib")
|
||||
if dom.h1 and u'Results' in unicode(dom.h1.string):
|
||||
break
|
||||
if dom.h2 and dom.h2.a and u"Reload" in unicode(dom.h2.a.string):
|
||||
dom = None
|
||||
time.sleep(1)
|
||||
assert dom is not None, "Operation never completed"
|
||||
|
||||
|
||||
def test_storage_info(storage_nodes):
|
||||
"""
|
||||
retrieve and confirm /storage URI for one storage node
|
||||
"""
|
||||
storage0 = storage_nodes[0]
|
||||
|
||||
requests.get(
|
||||
util.node_url(storage0.node_dir, u"storage"),
|
||||
)
|
||||
|
||||
|
||||
def test_storage_info_json(storage_nodes):
|
||||
"""
|
||||
retrieve and confirm /storage?t=json URI for one storage node
|
||||
"""
|
||||
storage0 = storage_nodes[0]
|
||||
|
||||
resp = requests.get(
|
||||
util.node_url(storage0.node_dir, u"storage"),
|
||||
params={u"t": u"json"},
|
||||
)
|
||||
data = json.loads(resp.content)
|
||||
assert data[u"stats"][u"storage_server.reserved_space"] == 1000000000
|
||||
|
||||
|
||||
def test_introducer_info(introducer):
|
||||
"""
|
||||
retrieve and confirm /introducer URI for the introducer
|
||||
"""
|
||||
resp = requests.get(
|
||||
util.node_url(introducer.node_dir, u""),
|
||||
)
|
||||
assert "Introducer" in resp.content
|
||||
|
||||
resp = requests.get(
|
||||
util.node_url(introducer.node_dir, u""),
|
||||
params={u"t": u"json"},
|
||||
)
|
||||
data = json.loads(resp.content)
|
||||
assert "announcement_summary" in data
|
||||
assert "subscription_summary" in data
|
||||
|
||||
|
||||
def test_mkdir_with_children(alice):
|
||||
"""
|
||||
create a directory using ?t=mkdir-with-children
|
||||
"""
|
||||
|
||||
# create a file to put in our directory
|
||||
FILE_CONTENTS = u"some file contents\n" * 500
|
||||
resp = requests.put(
|
||||
util.node_url(alice.node_dir, u"uri"),
|
||||
data=FILE_CONTENTS,
|
||||
)
|
||||
filecap = resp.content.strip()
|
||||
|
||||
# create a (sub) directory to put in our directory
|
||||
resp = requests.post(
|
||||
util.node_url(alice.node_dir, u"uri"),
|
||||
params={
|
||||
u"t": u"mkdir",
|
||||
}
|
||||
)
|
||||
# (we need both the read-write and read-only URIs I guess)
|
||||
dircap = resp.content
|
||||
dircap_obj = allmydata.uri.from_string(dircap)
|
||||
dircap_ro = dircap_obj.get_readonly().to_string()
|
||||
|
||||
# create json information about our directory
|
||||
meta = {
|
||||
"a_file": [
|
||||
"filenode", {
|
||||
"ro_uri": filecap,
|
||||
"metadata": {
|
||||
"ctime": 1202777696.7564139,
|
||||
"mtime": 1202777696.7564139,
|
||||
"tahoe": {
|
||||
"linkcrtime": 1202777696.7564139,
|
||||
"linkmotime": 1202777696.7564139
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"some_subdir": [
|
||||
"dirnode", {
|
||||
"rw_uri": dircap,
|
||||
"ro_uri": dircap_ro,
|
||||
"metadata": {
|
||||
"ctime": 1202778102.7589991,
|
||||
"mtime": 1202778111.2160511,
|
||||
"tahoe": {
|
||||
"linkcrtime": 1202777696.7564139,
|
||||
"linkmotime": 1202777696.7564139
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# create a new directory with one file and one sub-dir (all-at-once)
|
||||
resp = util.web_post(
|
||||
alice, u"uri",
|
||||
params={u"t": "mkdir-with-children"},
|
||||
data=json.dumps(meta),
|
||||
)
|
||||
assert resp.startswith("URI:DIR2")
|
||||
cap = allmydata.uri.from_string(resp)
|
||||
assert isinstance(cap, allmydata.uri.DirectoryURI)
|
@ -1,20 +1,25 @@
|
||||
import sys
|
||||
import time
|
||||
from os import mkdir
|
||||
import json
|
||||
from os import mkdir, environ
|
||||
from os.path import exists, join
|
||||
from StringIO import StringIO
|
||||
from six.moves import StringIO
|
||||
from functools import partial
|
||||
|
||||
from twisted.internet.defer import Deferred, succeed
|
||||
from twisted.internet.protocol import ProcessProtocol
|
||||
from twisted.internet.error import ProcessExitedAlready, ProcessDone
|
||||
|
||||
import requests
|
||||
|
||||
from allmydata.util.configutil import (
|
||||
get_config,
|
||||
set_config,
|
||||
write_config,
|
||||
)
|
||||
from allmydata import client
|
||||
|
||||
import pytest
|
||||
import pytest_twisted
|
||||
|
||||
|
||||
class _ProcessExitedProtocol(ProcessProtocol):
|
||||
@ -105,7 +110,78 @@ class _MagicTextProtocol(ProcessProtocol):
|
||||
sys.stdout.write(data)
|
||||
|
||||
|
||||
def _cleanup_tahoe_process(tahoe_transport, exited):
|
||||
"""
|
||||
Terminate the given process with a kill signal (SIGKILL on POSIX,
|
||||
TerminateProcess on Windows).
|
||||
|
||||
:param tahoe_transport: The `IProcessTransport` representing the process.
|
||||
:param exited: A `Deferred` which fires when the process has exited.
|
||||
|
||||
:return: After the process has exited.
|
||||
"""
|
||||
try:
|
||||
print("signaling {} with TERM".format(tahoe_transport.pid))
|
||||
tahoe_transport.signalProcess('TERM')
|
||||
print("signaled, blocking on exit")
|
||||
pytest_twisted.blockon(exited)
|
||||
print("exited, goodbye")
|
||||
except ProcessExitedAlready:
|
||||
pass
|
||||
|
||||
|
||||
def _tahoe_runner_optional_coverage(proto, reactor, request, other_args):
|
||||
"""
|
||||
Internal helper. Calls spawnProcess with `-m
|
||||
allmydata.scripts.runner` and `other_args`, optionally inserting a
|
||||
`--coverage` option if the `request` indicates we should.
|
||||
"""
|
||||
if request.config.getoption('coverage'):
|
||||
args = [sys.executable, '-m', 'coverage', 'run', '-m', 'allmydata.scripts.runner', '--coverage']
|
||||
else:
|
||||
args = [sys.executable, '-m', 'allmydata.scripts.runner']
|
||||
args += other_args
|
||||
return reactor.spawnProcess(
|
||||
proto,
|
||||
sys.executable,
|
||||
args,
|
||||
env=environ,
|
||||
)
|
||||
|
||||
|
||||
class TahoeProcess(object):
|
||||
"""
|
||||
A running Tahoe process, with associated information.
|
||||
"""
|
||||
|
||||
def __init__(self, process_transport, node_dir):
|
||||
self._process_transport = process_transport # IProcessTransport instance
|
||||
self._node_dir = node_dir # path
|
||||
|
||||
@property
|
||||
def transport(self):
|
||||
return self._process_transport
|
||||
|
||||
@property
|
||||
def node_dir(self):
|
||||
return self._node_dir
|
||||
|
||||
def get_config(self):
|
||||
return client.read_config(
|
||||
self._node_dir,
|
||||
u"portnum",
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return "<TahoeProcess in '{}'>".format(self._node_dir)
|
||||
|
||||
|
||||
def _run_node(reactor, node_dir, request, magic_text):
|
||||
"""
|
||||
Run a tahoe process from its node_dir.
|
||||
|
||||
:returns: a TahoeProcess for this node
|
||||
"""
|
||||
if magic_text is None:
|
||||
magic_text = "client running"
|
||||
protocol = _MagicTextProtocol(magic_text)
|
||||
@ -113,32 +189,29 @@ def _run_node(reactor, node_dir, request, magic_text):
|
||||
# on windows, "tahoe start" means: run forever in the foreground,
|
||||
# but on linux it means daemonize. "tahoe run" is consistent
|
||||
# between platforms.
|
||||
process = reactor.spawnProcess(
|
||||
|
||||
transport = _tahoe_runner_optional_coverage(
|
||||
protocol,
|
||||
sys.executable,
|
||||
(
|
||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
||||
reactor,
|
||||
request,
|
||||
[
|
||||
'--eliot-destination', 'file:{}/logs/eliot.json'.format(node_dir),
|
||||
'run',
|
||||
node_dir,
|
||||
),
|
||||
],
|
||||
)
|
||||
process.exited = protocol.exited
|
||||
transport.exited = protocol.exited
|
||||
|
||||
def cleanup():
|
||||
try:
|
||||
process.signalProcess('TERM')
|
||||
pytest.blockon(protocol.exited)
|
||||
except ProcessExitedAlready:
|
||||
pass
|
||||
request.addfinalizer(cleanup)
|
||||
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
||||
|
||||
# we return the 'process' ITransport instance
|
||||
# XXX abusing the Deferred; should use .when_magic_seen() or something?
|
||||
# XXX abusing the Deferred; should use .when_magic_seen() pattern
|
||||
|
||||
def got_proto(proto):
|
||||
process._protocol = proto
|
||||
process._node_dir = node_dir
|
||||
return process
|
||||
transport._protocol = proto
|
||||
return TahoeProcess(
|
||||
transport,
|
||||
node_dir,
|
||||
)
|
||||
protocol.magic_seen.addCallback(got_proto)
|
||||
return protocol.magic_seen
|
||||
|
||||
@ -163,7 +236,6 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
||||
mkdir(node_dir)
|
||||
done_proto = _ProcessExitedProtocol()
|
||||
args = [
|
||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
||||
'create-node',
|
||||
'--nickname', name,
|
||||
'--introducer', introducer_furl,
|
||||
@ -173,16 +245,13 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
||||
'--shares-needed', unicode(needed),
|
||||
'--shares-happy', unicode(happy),
|
||||
'--shares-total', unicode(total),
|
||||
'--helper',
|
||||
]
|
||||
if not storage:
|
||||
args.append('--no-storage')
|
||||
args.append(node_dir)
|
||||
|
||||
reactor.spawnProcess(
|
||||
done_proto,
|
||||
sys.executable,
|
||||
args,
|
||||
)
|
||||
_tahoe_runner_optional_coverage(done_proto, reactor, request, args)
|
||||
created_d = done_proto.done
|
||||
|
||||
def created(_):
|
||||
@ -241,7 +310,7 @@ class FileShouldVanishException(Exception):
|
||||
timeout
|
||||
"""
|
||||
def __init__(self, path, timeout):
|
||||
super(self, FileShouldVanishException).__init__(
|
||||
super(FileShouldVanishException, self).__init__(
|
||||
u"'{}' still exists after {}s".format(path, timeout),
|
||||
)
|
||||
|
||||
@ -287,7 +356,7 @@ def await_files_exist(paths, timeout=15, await_all=False):
|
||||
an Exception is raised
|
||||
"""
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < 15.0:
|
||||
while time.time() - start_time < timeout:
|
||||
print(" waiting for: {}".format(' '.join(paths)))
|
||||
found = [p for p in paths if exists(p)]
|
||||
print("found: {}".format(found))
|
||||
@ -313,3 +382,120 @@ def await_file_vanishes(path, timeout=10):
|
||||
return
|
||||
time.sleep(1)
|
||||
raise FileShouldVanishException(path, timeout)
|
||||
|
||||
|
||||
def cli(request, reactor, node_dir, *argv):
|
||||
"""
|
||||
Run a tahoe CLI subcommand for a given node, optionally running
|
||||
under coverage if '--coverage' was supplied.
|
||||
"""
|
||||
proto = _CollectOutputProtocol()
|
||||
_tahoe_runner_optional_coverage(
|
||||
proto, reactor, request,
|
||||
['--node-directory', node_dir] + list(argv),
|
||||
)
|
||||
return proto.done
|
||||
|
||||
|
||||
def node_url(node_dir, uri_fragment):
|
||||
"""
|
||||
Create a fully qualified URL by reading config from `node_dir` and
|
||||
adding the `uri_fragment`
|
||||
"""
|
||||
with open(join(node_dir, "node.url"), "r") as f:
|
||||
base = f.read().strip()
|
||||
url = base + uri_fragment
|
||||
return url
|
||||
|
||||
|
||||
def _check_status(response):
|
||||
"""
|
||||
Check the response code is a 2xx (raise an exception otherwise)
|
||||
"""
|
||||
if response.status_code < 200 or response.status_code >= 300:
|
||||
raise ValueError(
|
||||
"Expected a 2xx code, got {}".format(response.status_code)
|
||||
)
|
||||
|
||||
|
||||
def web_get(tahoe, uri_fragment, **kwargs):
|
||||
"""
|
||||
Make a GET request to the webport of `tahoe` (a `TahoeProcess`,
|
||||
usually from a fixture (e.g. `alice`). This will look like:
|
||||
`http://localhost:<webport>/<uri_fragment>`. All `kwargs` are
|
||||
passed on to `requests.get`
|
||||
"""
|
||||
url = node_url(tahoe.node_dir, uri_fragment)
|
||||
resp = requests.get(url, **kwargs)
|
||||
_check_status(resp)
|
||||
return resp.content
|
||||
|
||||
|
||||
def web_post(tahoe, uri_fragment, **kwargs):
|
||||
"""
|
||||
Make a POST request to the webport of `node` (a `TahoeProcess,
|
||||
usually from a fixture e.g. `alice`). This will look like:
|
||||
`http://localhost:<webport>/<uri_fragment>`. All `kwargs` are
|
||||
passed on to `requests.post`
|
||||
"""
|
||||
url = node_url(tahoe.node_dir, uri_fragment)
|
||||
resp = requests.post(url, **kwargs)
|
||||
_check_status(resp)
|
||||
return resp.content
|
||||
|
||||
|
||||
def await_client_ready(tahoe, timeout=10, liveness=60*2):
|
||||
"""
|
||||
Uses the status API to wait for a client-type node (in `tahoe`, a
|
||||
`TahoeProcess` instance usually from a fixture e.g. `alice`) to be
|
||||
'ready'. A client is deemed ready if:
|
||||
|
||||
- it answers `http://<node_url>/statistics/?t=json/`
|
||||
- there is at least one storage-server connected
|
||||
- every storage-server has a "last_received_data" and it is
|
||||
within the last `liveness` seconds
|
||||
|
||||
We will try for up to `timeout` seconds for the above conditions
|
||||
to be true. Otherwise, an exception is raised
|
||||
"""
|
||||
start = time.time()
|
||||
while (time.time() - start) < float(timeout):
|
||||
try:
|
||||
data = web_get(tahoe, u"", params={u"t": u"json"})
|
||||
js = json.loads(data)
|
||||
except Exception as e:
|
||||
print("waiting because '{}'".format(e))
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
if len(js['servers']) == 0:
|
||||
print("waiting because no servers at all")
|
||||
time.sleep(1)
|
||||
continue
|
||||
server_times = [
|
||||
server['last_received_data']
|
||||
for server in js['servers']
|
||||
]
|
||||
# if any times are null/None that server has never been
|
||||
# contacted (so it's down still, probably)
|
||||
if any(t is None for t in server_times):
|
||||
print("waiting because at least one server not contacted")
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
# check that all times are 'recent enough'
|
||||
if any([time.time() - t > liveness for t in server_times]):
|
||||
print("waiting because at least one server too old")
|
||||
time.sleep(1)
|
||||
continue
|
||||
|
||||
# we have a status with at least one server, and all servers
|
||||
# have been contacted recently
|
||||
return True
|
||||
# we only fall out of the loop when we've timed out
|
||||
raise RuntimeError(
|
||||
"Waited {} seconds for {} to be 'ready' but it never was".format(
|
||||
timeout,
|
||||
tahoe,
|
||||
)
|
||||
)
|
||||
|
@ -1,5 +1,7 @@
|
||||
# -*- python -*-
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
"""Monitor a Tahoe grid, by playing sounds in response to remote events.
|
||||
|
||||
To install:
|
||||
@ -47,20 +49,20 @@ class Listener:
|
||||
|
||||
# messages emitted by the Introducer: client join/leave
|
||||
if message.startswith("introducer: subscription[storage] request"):
|
||||
print "new client"
|
||||
print("new client")
|
||||
self.sound("voice/hooray.aiff")
|
||||
if message.startswith("introducer: unsubscribing"):
|
||||
print "unsubscribe"
|
||||
print("unsubscribe")
|
||||
self.sound("electro/zaptrill-fade.aiff")
|
||||
|
||||
# messages from the helper
|
||||
if message == "file already found in grid":
|
||||
print "already found"
|
||||
print("already found")
|
||||
self.sound("mech/ziplash-high.aiff")
|
||||
#if message == "upload done":
|
||||
if format == "plaintext_hash=%(plaintext_hash)s, SI=%(SI)s, size=%(size)d":
|
||||
size = m.get("size")
|
||||
print "upload done, size", size
|
||||
print("upload done, size", size)
|
||||
self.sound("mech/ziplash-low.aiff")
|
||||
if "fetching " in message:
|
||||
# helper grabbing ciphertext from client
|
||||
@ -90,31 +92,31 @@ class Listener:
|
||||
pass
|
||||
elif format == "excessive reactor delay (%ss)":
|
||||
self.sound("animal/frog-cheep.aiff")
|
||||
print "excessive delay %s: %s" % (m['args'][0], furl)
|
||||
print("excessive delay %s: %s" % (m['args'][0], furl))
|
||||
elif format == "excessive reactor delay (%(delay)ss)":
|
||||
self.sound("animal/frog-cheep.aiff")
|
||||
print "excessive delay %s: %s" % (m['delay'], furl)
|
||||
print("excessive delay %s: %s" % (m['delay'], furl))
|
||||
elif facility == "foolscap.negotiation":
|
||||
if (message == "got offer for an existing connection"
|
||||
or "master told us to use a new connection" in message):
|
||||
print "foolscap: got offer for an existing connection", message, furl
|
||||
print("foolscap: got offer for an existing connection", message, furl)
|
||||
else:
|
||||
#print "foolscap:", message
|
||||
pass
|
||||
elif m['level'] > 30: # SCARY or BAD
|
||||
#self.sound("mech/alarm-bell.aiff")
|
||||
self.sound("environ/thunder-tense.aiff")
|
||||
print m, furl
|
||||
print(m, furl)
|
||||
elif m['level'] == 30: # WEIRD
|
||||
self.sound("mech/glass-breaking.aiff")
|
||||
print m, furl
|
||||
print(m, furl)
|
||||
elif m['level'] > 20: # UNUSUAL or INFREQUENT or CURIOUS
|
||||
self.sound("mech/telephone-ring-old.aiff")
|
||||
print m, furl
|
||||
print(m, furl)
|
||||
|
||||
class BoodleSender(protocol.Protocol):
|
||||
def connectionMade(self):
|
||||
print "connected to boodler"
|
||||
print("connected to boodler")
|
||||
self.factory.listener.boodler = self.transport
|
||||
|
||||
class Bridge(Referenceable):
|
||||
@ -150,7 +152,7 @@ class Monitor(service.MultiService):
|
||||
reactor.connectTCP("localhost", 31863, cf)
|
||||
|
||||
def _got_logpublisher(self, publisher, fn, i, target):
|
||||
print "connected to %s:%d, %s" % (fn, i, target)
|
||||
print("connected to %s:%d, %s" % (fn, i, target))
|
||||
b = Bridge(target, self.listener)
|
||||
publisher.callRemote("subscribe_to_all", b)
|
||||
|
||||
|
@ -4,6 +4,9 @@ VERSION=`sh -c "cat src/allmydata/_version.py | grep verstr | head -n 1 | cut -d
|
||||
PWD=`pwd`
|
||||
TARGET="/Applications/tahoe.app"
|
||||
|
||||
# Clean up any test garbage that might be left over from a recent test run.
|
||||
rm -rvf _trial_temp
|
||||
|
||||
virtualenv osx-venv
|
||||
osx-venv/bin/pip install .
|
||||
|
||||
|
@ -2,13 +2,15 @@
|
||||
|
||||
# This helper script is used with the 'test-desert-island' Makefile target.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
|
||||
good = True
|
||||
build_out = sys.argv[1]
|
||||
mode = sys.argv[2]
|
||||
|
||||
print
|
||||
print()
|
||||
|
||||
for line in open(build_out, "r"):
|
||||
if mode == "no-downloads":
|
||||
@ -29,13 +31,13 @@ for line in open(build_out, "r"):
|
||||
# currently don't enforce that stronger requirement.
|
||||
if (line.startswith("Downloading http:") or
|
||||
line.startswith("Downloading https:")):
|
||||
print line,
|
||||
print(line, end=' ')
|
||||
good = False
|
||||
if good:
|
||||
if mode == "no-downloads":
|
||||
print "Good: build did not try to download any files"
|
||||
print("Good: build did not try to download any files")
|
||||
sys.exit(0)
|
||||
else:
|
||||
if mode == "no-downloads":
|
||||
print "Failed: build tried to download files"
|
||||
print("Failed: build tried to download files")
|
||||
sys.exit(1)
|
||||
|
@ -2,6 +2,8 @@
|
||||
# This script generates a table of dependencies in HTML format on stdout.
|
||||
# It expects to be run in the tahoe-lafs-dep-eggs directory.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import re, os, sys
|
||||
import pkg_resources
|
||||
|
||||
@ -83,27 +85,27 @@ greybgstyle = '; background-color: #E0E0E0'
|
||||
nobgstyle = ''
|
||||
unsupportedstyle = '; color: #C00000'
|
||||
|
||||
print '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">'
|
||||
print '<html>'
|
||||
print '<head>'
|
||||
print ' <meta http-equiv="Content-Type" content="text/html;charset=us-ascii">'
|
||||
print ' <title>Software packages that Tahoe-LAFS depends on</title>'
|
||||
print '</head>'
|
||||
print '<body>'
|
||||
print '<h2>What is this?</h2>'
|
||||
print '<p>See <a href="https://tahoe-lafs.org/trac/tahoe-lafs/browser/docs/quickstart.rst">quickstart.rst</a>, <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Installation">wiki:Installation</a>, and <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CompileError">wiki:CompileError</a>.'
|
||||
print '<h2>Software packages that Tahoe-LAFS depends on</h2>'
|
||||
print
|
||||
print('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">')
|
||||
print('<html>')
|
||||
print('<head>')
|
||||
print(' <meta http-equiv="Content-Type" content="text/html;charset=us-ascii">')
|
||||
print(' <title>Software packages that Tahoe-LAFS depends on</title>')
|
||||
print('</head>')
|
||||
print('<body>')
|
||||
print('<h2>What is this?</h2>')
|
||||
print('<p>See <a href="https://tahoe-lafs.org/trac/tahoe-lafs/browser/docs/quickstart.rst">quickstart.rst</a>, <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/Installation">wiki:Installation</a>, and <a href="https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CompileError">wiki:CompileError</a>.')
|
||||
print('<h2>Software packages that Tahoe-LAFS depends on</h2>')
|
||||
print()
|
||||
for pyver in reversed(sorted(python_versions)):
|
||||
greybackground = False
|
||||
if pyver:
|
||||
print '<p>Packages for Python %s that have compiled C/C++ code:</p>' % (pyver,)
|
||||
print '<table border="1">'
|
||||
print ' <tr>'
|
||||
print ' <th style="background-color: #FFFFD0" width="%d%%"> Platform </th>' % (width,)
|
||||
print('<p>Packages for Python %s that have compiled C/C++ code:</p>' % (pyver,))
|
||||
print('<table border="1">')
|
||||
print(' <tr>')
|
||||
print(' <th style="background-color: #FFFFD0" width="%d%%"> Platform </th>' % (width,))
|
||||
for pkg in sorted(platform_dependent_pkgs):
|
||||
print ' <th style="background-color: #FFE8FF;" width="%d%%"> %s </th>' % (width, pkg)
|
||||
print ' </tr>'
|
||||
print(' <th style="background-color: #FFE8FF;" width="%d%%"> %s </th>' % (width, pkg))
|
||||
print(' </tr>')
|
||||
|
||||
first = True
|
||||
for platform in sorted(matrix[pyver]):
|
||||
@ -122,38 +124,38 @@ for pyver in reversed(sorted(python_versions)):
|
||||
style2 = first and 'border-top: 2px solid #000000' or ''
|
||||
style2 += bgstyle
|
||||
annotated_platform = platform.replace('-', '‑') + (unsupported_python and ' (unsupported)' or '')
|
||||
print ' <tr>'
|
||||
print ' <td style="%s"> %s </td>' % (style1, annotated_platform)
|
||||
print(' <tr>')
|
||||
print(' <td style="%s"> %s </td>' % (style1, annotated_platform))
|
||||
for pkg in sorted(platform_dependent_pkgs):
|
||||
if pkg == 'pywin32' and not platform.startswith('windows'):
|
||||
print ' <td style="border: 0; text-align: center; %s"> n/a </td>' % (style2,)
|
||||
print(' <td style="border: 0; text-align: center; %s"> n/a </td>' % (style2,))
|
||||
else:
|
||||
print ' <td style="%s"> %s</td>' % (style2, file_list(row_files, pkg))
|
||||
print ' </tr>'
|
||||
print(' <td style="%s"> %s</td>' % (style2, file_list(row_files, pkg)))
|
||||
print(' </tr>')
|
||||
first = False
|
||||
|
||||
print '</table>'
|
||||
print
|
||||
print('</table>')
|
||||
print()
|
||||
|
||||
print '<p>Packages that are platform-independent or source-only:</p>'
|
||||
print '<table border="1">'
|
||||
print ' <tr>'
|
||||
print ' <th style="background-color:#FFFFD0;"> Package </th>'
|
||||
print ' <th style="background-color:#FFE8FF;"> All Python versions </th>'
|
||||
print ' </tr>'
|
||||
print('<p>Packages that are platform-independent or source-only:</p>')
|
||||
print('<table border="1">')
|
||||
print(' <tr>')
|
||||
print(' <th style="background-color:#FFFFD0;"> Package </th>')
|
||||
print(' <th style="background-color:#FFE8FF;"> All Python versions </th>')
|
||||
print(' </tr>')
|
||||
|
||||
style1 = 'border-top: 2px solid #000000; background-color:#FFFFF0;'
|
||||
style2 = 'border-top: 2px solid #000000;'
|
||||
m = matrix['']['']
|
||||
for pkg in sorted(platform_independent_pkgs):
|
||||
print ' <tr>'
|
||||
print ' <th style="%s"> %s </th>' % (style1, pkg)
|
||||
print ' <td style="%s"> %s</td>' % (style2, file_list(m, pkg))
|
||||
print ' </tr>'
|
||||
print(' <tr>')
|
||||
print(' <th style="%s"> %s </th>' % (style1, pkg))
|
||||
print(' <td style="%s"> %s</td>' % (style2, file_list(m, pkg)))
|
||||
print(' </tr>')
|
||||
|
||||
print '</table>'
|
||||
print('</table>')
|
||||
|
||||
# The document does validate, but not when it is included at the bottom of a directory listing.
|
||||
#print '<hr>'
|
||||
#print '<a href="http://validator.w3.org/check?uri=referer" target="_blank"><img border="0" src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01 Transitional" height="31" width="88"></a>'
|
||||
print '</body></html>'
|
||||
#print('<hr>')
|
||||
#print('<a href="http://validator.w3.org/check?uri=referer" target="_blank"><img border="0" src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01 Transitional" height="31" width="88"></a>')
|
||||
print('</body></html>')
|
||||
|
@ -1,4 +1,6 @@
|
||||
import sys, os, io
|
||||
from __future__ import print_function
|
||||
|
||||
import sys, os, io, re
|
||||
from twisted.internet import reactor, protocol, task, defer
|
||||
from twisted.python.procutils import which
|
||||
from twisted.python import usage
|
||||
@ -10,6 +12,7 @@ from twisted.python import usage
|
||||
class Options(usage.Options):
|
||||
optParameters = [
|
||||
["warnings", None, None, "file to write warnings into at end of test run"],
|
||||
["package", None, None, "Python package to which to restrict warning collection"]
|
||||
]
|
||||
|
||||
def parseArgs(self, command, *args):
|
||||
@ -17,7 +20,7 @@ class Options(usage.Options):
|
||||
self["args"] = list(args)
|
||||
|
||||
description = """Run as:
|
||||
PYTHONWARNINGS=default::DeprecationWarning python run-deprecations.py [--warnings=STDERRFILE] COMMAND ARGS..
|
||||
PYTHONWARNINGS=default::DeprecationWarning python run-deprecations.py [--warnings=STDERRFILE] [--package=PYTHONPACKAGE ] COMMAND ARGS..
|
||||
"""
|
||||
|
||||
class RunPP(protocol.ProcessProtocol):
|
||||
@ -32,6 +35,34 @@ class RunPP(protocol.ProcessProtocol):
|
||||
rc = reason.value.exitCode
|
||||
self.d.callback((signal, rc))
|
||||
|
||||
|
||||
def make_matcher(options):
|
||||
"""
|
||||
Make a function that matches a line with a relevant deprecation.
|
||||
|
||||
A deprecation warning line looks something like this::
|
||||
|
||||
somepath/foo/bar/baz.py:43: DeprecationWarning: Foo is deprecated, try bar instead.
|
||||
|
||||
Sadly there is no guarantee warnings begin at the beginning of a line
|
||||
since they are written to output without coordination with whatever other
|
||||
Python code is running in the process.
|
||||
|
||||
:return: A one-argument callable that accepts a string and returns
|
||||
``True`` if it contains an interesting warning and ``False``
|
||||
otherwise.
|
||||
"""
|
||||
pattern = r".*\.py[oc]?:\d+:" # (Pending)?DeprecationWarning: .*"
|
||||
if options["package"]:
|
||||
pattern = r".*/{}/".format(
|
||||
re.escape(options["package"]),
|
||||
) + pattern
|
||||
expression = re.compile(pattern)
|
||||
def match(line):
|
||||
return expression.match(line) is not None
|
||||
return match
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def run_command(main):
|
||||
config = Options()
|
||||
@ -51,7 +82,7 @@ def run_command(main):
|
||||
pw = os.environ.get("PYTHONWARNINGS")
|
||||
DDW = "default::DeprecationWarning"
|
||||
if pw != DDW:
|
||||
print "note: $PYTHONWARNINGS is '%s', not the expected %s" % (pw, DDW)
|
||||
print("note: $PYTHONWARNINGS is '%s', not the expected %s" % (pw, DDW))
|
||||
sys.stdout.flush()
|
||||
|
||||
pp = RunPP()
|
||||
@ -61,6 +92,8 @@ def run_command(main):
|
||||
reactor.spawnProcess(pp, exe, [exe] + config["args"], env=None)
|
||||
(signal, rc) = yield pp.d
|
||||
|
||||
match = make_matcher(config)
|
||||
|
||||
# maintain ordering, but ignore duplicates (for some reason, either the
|
||||
# 'warnings' module or twisted.python.deprecate isn't quashing them)
|
||||
already = set()
|
||||
@ -73,22 +106,22 @@ def run_command(main):
|
||||
|
||||
pp.stdout.seek(0)
|
||||
for line in pp.stdout.readlines():
|
||||
if "DeprecationWarning" in line:
|
||||
if match(line):
|
||||
add(line) # includes newline
|
||||
|
||||
pp.stderr.seek(0)
|
||||
for line in pp.stderr.readlines():
|
||||
if "DeprecationWarning" in line:
|
||||
if match(line):
|
||||
add(line)
|
||||
|
||||
if warnings:
|
||||
if config["warnings"]:
|
||||
with open(config["warnings"], "wb") as f:
|
||||
print >>f, "".join(warnings)
|
||||
print "ERROR: %d deprecation warnings found" % len(warnings)
|
||||
print("".join(warnings), file=f)
|
||||
print("ERROR: %d deprecation warnings found" % len(warnings))
|
||||
sys.exit(1)
|
||||
|
||||
print "no deprecation warnings"
|
||||
print("no deprecation warnings")
|
||||
if signal:
|
||||
sys.exit(signal)
|
||||
sys.exit(rc)
|
||||
|
@ -1,5 +1,7 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import locale, os, platform, subprocess, sys, traceback
|
||||
|
||||
|
||||
@ -9,31 +11,34 @@ def foldlines(s, numlines=None):
|
||||
lines = lines[:numlines]
|
||||
return " ".join(lines).replace("\r", "")
|
||||
|
||||
|
||||
def print_platform():
|
||||
try:
|
||||
import platform
|
||||
out = platform.platform()
|
||||
print "platform:", foldlines(out)
|
||||
print "machine: ", platform.machine()
|
||||
print("platform:", foldlines(out))
|
||||
print("machine: ", platform.machine())
|
||||
if hasattr(platform, 'linux_distribution'):
|
||||
print "linux_distribution:", repr(platform.linux_distribution())
|
||||
print("linux_distribution:", repr(platform.linux_distribution()))
|
||||
except EnvironmentError:
|
||||
sys.stderr.write("\nGot exception using 'platform'. Exception follows\n")
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def print_python_ver():
|
||||
print "python:", foldlines(sys.version)
|
||||
print 'maxunicode: ' + str(sys.maxunicode)
|
||||
print("python:", foldlines(sys.version))
|
||||
print('maxunicode: ' + str(sys.maxunicode))
|
||||
|
||||
|
||||
def print_python_encoding_settings():
|
||||
print 'filesystem.encoding: ' + str(sys.getfilesystemencoding())
|
||||
print 'locale.getpreferredencoding: ' + str(locale.getpreferredencoding())
|
||||
print('filesystem.encoding: ' + str(sys.getfilesystemencoding()))
|
||||
print('locale.getpreferredencoding: ' + str(locale.getpreferredencoding()))
|
||||
try:
|
||||
print 'locale.defaultlocale: ' + str(locale.getdefaultlocale())
|
||||
except ValueError, e:
|
||||
print 'got exception from locale.getdefaultlocale(): ', e
|
||||
print 'locale.locale: ' + str(locale.getlocale())
|
||||
print('locale.defaultlocale: ' + str(locale.getdefaultlocale()))
|
||||
except ValueError as e:
|
||||
print('got exception from locale.getdefaultlocale(): ', e)
|
||||
print('locale.locale: ' + str(locale.getlocale()))
|
||||
|
||||
def print_stdout(cmdlist, label=None, numlines=None):
|
||||
try:
|
||||
@ -41,23 +46,25 @@ def print_stdout(cmdlist, label=None, numlines=None):
|
||||
label = cmdlist[0]
|
||||
res = subprocess.Popen(cmdlist, stdin=open(os.devnull),
|
||||
stdout=subprocess.PIPE).communicate()[0]
|
||||
print label + ': ' + foldlines(res, numlines)
|
||||
except EnvironmentError, e:
|
||||
print(label + ': ' + foldlines(res.decode('utf-8'), numlines))
|
||||
except EnvironmentError as e:
|
||||
if isinstance(e, OSError) and e.errno == 2:
|
||||
print label + ': no such file or directory'
|
||||
print(label + ': no such file or directory')
|
||||
return
|
||||
sys.stderr.write("\nGot exception invoking '%s'. Exception follows.\n" % (cmdlist[0],))
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def print_as_ver():
|
||||
if os.path.exists('a.out'):
|
||||
print "WARNING: a file named a.out exists, and getting the version of the 'as' assembler writes to that filename, so I'm not attempting to get the version of 'as'."
|
||||
print("WARNING: a file named a.out exists, and getting the version of the 'as' assembler "
|
||||
"writes to that filename, so I'm not attempting to get the version of 'as'.")
|
||||
return
|
||||
try:
|
||||
res = subprocess.Popen(['as', '-version'], stdin=open(os.devnull),
|
||||
stdout, stderr = subprocess.Popen(['as', '-version'], stdin=open(os.devnull),
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
|
||||
print 'as: ' + foldlines(res[0]+' '+res[1])
|
||||
print('as: ' + foldlines(stdout.decode('utf-8') + ' ' + stderr.decode('utf-8')))
|
||||
if os.path.exists('a.out'):
|
||||
os.remove('a.out')
|
||||
except EnvironmentError:
|
||||
@ -65,53 +72,56 @@ def print_as_ver():
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
|
||||
|
||||
def print_setuptools_ver():
|
||||
try:
|
||||
import pkg_resources
|
||||
out = str(pkg_resources.require("setuptools"))
|
||||
print "setuptools:", foldlines(out)
|
||||
print("setuptools:", foldlines(out))
|
||||
except (ImportError, EnvironmentError):
|
||||
sys.stderr.write("\nGot exception using 'pkg_resources' to get the version of setuptools. Exception follows\n")
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
except pkg_resources.DistributionNotFound:
|
||||
print 'setuptools: DistributionNotFound'
|
||||
print('setuptools: DistributionNotFound')
|
||||
|
||||
|
||||
def print_py_pkg_ver(pkgname, modulename=None):
|
||||
if modulename is None:
|
||||
modulename = pkgname
|
||||
print
|
||||
print()
|
||||
try:
|
||||
import pkg_resources
|
||||
out = str(pkg_resources.require(pkgname))
|
||||
print pkgname + ': ' + foldlines(out)
|
||||
print(pkgname + ': ' + foldlines(out))
|
||||
except (ImportError, EnvironmentError):
|
||||
sys.stderr.write("\nGot exception using 'pkg_resources' to get the version of %s. Exception follows.\n" % (pkgname,))
|
||||
traceback.print_exc(file=sys.stderr)
|
||||
sys.stderr.flush()
|
||||
except pkg_resources.DistributionNotFound:
|
||||
print pkgname + ': DistributionNotFound'
|
||||
print(pkgname + ': DistributionNotFound')
|
||||
try:
|
||||
__import__(modulename)
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
modobj = sys.modules.get(modulename)
|
||||
print pkgname + ' module: ' + str(modobj)
|
||||
print(pkgname + ' module: ' + str(modobj))
|
||||
try:
|
||||
print pkgname + ' __version__: ' + str(modobj.__version__)
|
||||
print(pkgname + ' __version__: ' + str(modobj.__version__))
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
print_platform()
|
||||
print
|
||||
print()
|
||||
print_python_ver()
|
||||
print_stdout(['virtualenv', '--version'])
|
||||
print_stdout(['tox', '--version'])
|
||||
print
|
||||
print()
|
||||
print_stdout(['locale'])
|
||||
print_python_encoding_settings()
|
||||
print
|
||||
print()
|
||||
print_stdout(['buildbot', '--version'])
|
||||
print_stdout(['buildslave', '--version'])
|
||||
if 'windows' in platform.system().lower():
|
||||
@ -133,11 +143,9 @@ print_py_pkg_ver('coverage')
|
||||
print_py_pkg_ver('cryptography')
|
||||
print_py_pkg_ver('foolscap')
|
||||
print_py_pkg_ver('mock')
|
||||
print_py_pkg_ver('Nevow', 'nevow')
|
||||
print_py_pkg_ver('pyasn1')
|
||||
print_py_pkg_ver('pycparser')
|
||||
print_py_pkg_ver('pycrypto', 'Crypto')
|
||||
print_py_pkg_ver('pycryptopp')
|
||||
print_py_pkg_ver('cryptography')
|
||||
print_py_pkg_ver('pyflakes')
|
||||
print_py_pkg_ver('pyOpenSSL', 'OpenSSL')
|
||||
print_py_pkg_ver('six')
|
||||
|
@ -6,9 +6,7 @@ from subprocess import Popen, PIPE
|
||||
cmd = ["git", "status", "--porcelain"]
|
||||
p = Popen(cmd, stdout=PIPE)
|
||||
output = p.communicate()[0]
|
||||
print output
|
||||
print(output)
|
||||
if output == "":
|
||||
sys.exit(0)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
@ -15,7 +15,6 @@
|
||||
|
||||
# allmydata-tahoe: 1.10.0.post185.dev0 [2249-deps-and-osx-packaging-1: 76ac53846042d9a4095995be92af66cdc09d5ad0-dirty] (/Applications/tahoe.app/src)
|
||||
# foolscap: 0.7.0 (/Applications/tahoe.app/support/lib/python2.7/site-packages/foolscap-0.7.0-py2.7.egg)
|
||||
# pycryptopp: 0.6.0.1206569328141510525648634803928199668821045408958 (/Applications/tahoe.app/support/lib/python2.7/site-packages/pycryptopp-0.6.0.1206569328141510525648634803928199668821045408958-py2.7-macosx-10.9-intel.egg)
|
||||
# zfec: 1.4.24 (/Applications/tahoe.app/support/lib/python2.7/site-packages/zfec-1.4.24-py2.7-macosx-10.9-intel.egg)
|
||||
# Twisted: 13.0.0 (/Applications/tahoe.app/support/lib/python2.7/site-packages/Twisted-13.0.0-py2.7-macosx-10.9-intel.egg)
|
||||
# Nevow: 0.11.1 (/Applications/tahoe.app/support/lib/python2.7/site-packages/Nevow-0.11.1-py2.7.egg)
|
||||
@ -23,7 +22,6 @@
|
||||
# python: 2.7.5 (/usr/bin/python)
|
||||
# platform: Darwin-13.4.0-x86_64-i386-64bit (None)
|
||||
# pyOpenSSL: 0.13 (/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python)
|
||||
# pycrypto: 2.6.1 (/Applications/tahoe.app/support/lib/python2.7/site-packages/pycrypto-2.6.1-py2.7-macosx-10.9-intel.egg)
|
||||
# pyasn1: 0.1.7 (/Applications/tahoe.app/support/lib/python2.7/site-packages/pyasn1-0.1.7-py2.7.egg)
|
||||
# mock: 1.0.1 (/Applications/tahoe.app/support/lib/python2.7/site-packages)
|
||||
# setuptools: 0.6c16dev6 (/Applications/tahoe.app/support/lib/python2.7/site-packages/setuptools-0.6c16dev6.egg)
|
||||
@ -31,6 +29,8 @@
|
||||
# characteristic: 14.1.0 (/Applications/tahoe.app/support/lib/python2.7/site-packages)
|
||||
# pyasn1-modules: 0.0.5 (/Applications/tahoe.app/support/lib/python2.7/site-packages/pyasn1_modules-0.0.5-py2.7.egg)
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, re, shutil, subprocess, sys, tempfile
|
||||
|
||||
def test_osx_pkg(pkgfile):
|
||||
@ -56,7 +56,7 @@ def test_osx_pkg(pkgfile):
|
||||
gunzip_process = subprocess.Popen(['gunzip', '-dc'],
|
||||
stdin=cat_process.stdout,
|
||||
stdout=subprocess.PIPE)
|
||||
cpio_process = subprocess.Popen(['cpio', '-i'],
|
||||
cpio_process = subprocess.Popen(['cpio', '-i', '--verbose'],
|
||||
stdin=gunzip_process.stdout,
|
||||
stdout=subprocess.PIPE)
|
||||
cpio_process.communicate()
|
||||
@ -68,6 +68,13 @@ def test_osx_pkg(pkgfile):
|
||||
|
||||
rc = callit.wait()
|
||||
if rc != 0:
|
||||
print(
|
||||
"{} failed.\n"
|
||||
"stdout: {}\n"
|
||||
"stderr: {}\n".format(
|
||||
cmd, callit.stdout.read(), callit.stderr.read(),
|
||||
),
|
||||
)
|
||||
raise Exception("FAIL: '%s' returned non-zero exit code: %r" % (" ".join(cmd), rc))
|
||||
stdouttxt = callit.stdout.read()
|
||||
|
||||
@ -86,9 +93,8 @@ def test_osx_pkg(pkgfile):
|
||||
if __name__ == '__main__':
|
||||
pkgs = [fn for fn in os.listdir(".") if fn.endswith("-osx.pkg")]
|
||||
if len(pkgs) != 1:
|
||||
print "ERR: unable to find a single .pkg file:", pkgs
|
||||
print("ERR: unable to find a single .pkg file:", pkgs)
|
||||
sys.exit(1)
|
||||
print "Testing %s ..." % pkgs[0]
|
||||
print("Testing %s ..." % pkgs[0])
|
||||
test_osx_pkg(pkgs[0])
|
||||
print "Looks OK!"
|
||||
|
||||
print("Looks OK!")
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
# ./check-debugging.py src
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys, re, os
|
||||
|
||||
ok = True
|
||||
@ -15,8 +17,8 @@ for starting_point in sys.argv[1:]:
|
||||
lineno = lineno+1
|
||||
mo = re.search(r"\.setDebugging\(True\)", line)
|
||||
if mo:
|
||||
print "Do not use defer.setDebugging(True) in production"
|
||||
print "First used here: %s:%d" % (fn, lineno)
|
||||
print("Do not use defer.setDebugging(True) in production")
|
||||
print("First used here: %s:%d" % (fn, lineno))
|
||||
sys.exit(1)
|
||||
print "No cases of defer.setDebugging(True) were found, good!"
|
||||
print("No cases of defer.setDebugging(True) were found, good!")
|
||||
sys.exit(0)
|
||||
|
@ -4,6 +4,8 @@
|
||||
#
|
||||
# bin/tahoe @misc/coding_tools/check-interfaces.py
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys, re, platform
|
||||
|
||||
import zope.interface as zi
|
||||
@ -44,10 +46,10 @@ def strictly_implements(*interfaces):
|
||||
for interface in interfaces:
|
||||
try:
|
||||
verifyClass(interface, cls)
|
||||
except Exception, e:
|
||||
print >>_err, ("%s.%s does not correctly implement %s.%s:\n%s"
|
||||
except Exception as e:
|
||||
print("%s.%s does not correctly implement %s.%s:\n%s"
|
||||
% (cls.__module__, cls.__name__,
|
||||
interface.__module__, interface.__name__, e))
|
||||
interface.__module__, interface.__name__, e), file=_err)
|
||||
else:
|
||||
_other_modules_with_violations.add(cls.__module__)
|
||||
return cls
|
||||
@ -62,7 +64,7 @@ def check():
|
||||
|
||||
if len(sys.argv) >= 2:
|
||||
if sys.argv[1] == '--help' or len(sys.argv) > 2:
|
||||
print >>_err, "Usage: check-miscaptures.py [SOURCEDIR]"
|
||||
print("Usage: check-miscaptures.py [SOURCEDIR]", file=_err)
|
||||
return
|
||||
srcdir = sys.argv[1]
|
||||
else:
|
||||
@ -79,26 +81,26 @@ def check():
|
||||
for fn in filenames:
|
||||
(basename, ext) = os.path.splitext(fn)
|
||||
if ext in ('.pyc', '.pyo') and not os.path.exists(os.path.join(dirpath, basename+'.py')):
|
||||
print >>_err, ("Warning: no .py source file for %r.\n"
|
||||
% (os.path.join(dirpath, fn),))
|
||||
print("Warning: no .py source file for %r.\n"
|
||||
% (os.path.join(dirpath, fn),), file=_err)
|
||||
|
||||
if ext == '.py' and not excluded_file_basenames.match(basename):
|
||||
relpath = os.path.join(dirpath[len(srcdir)+1:], basename)
|
||||
module = relpath.replace(os.sep, '/').replace('/', '.')
|
||||
try:
|
||||
__import__(module)
|
||||
except ImportError, e:
|
||||
except ImportError as e:
|
||||
if not is_windows and (' _win' in str(e) or 'win32' in str(e)):
|
||||
print >>_err, ("Warning: %r imports a Windows-specific module, so we cannot check it (%s).\n"
|
||||
% (module, str(e)))
|
||||
print("Warning: %r imports a Windows-specific module, so we cannot check it (%s).\n"
|
||||
% (module, str(e)), file=_err)
|
||||
else:
|
||||
import traceback
|
||||
traceback.print_exc(file=_err)
|
||||
print >>_err
|
||||
print(file=_err)
|
||||
|
||||
others = list(_other_modules_with_violations)
|
||||
others.sort()
|
||||
print >>_err, "There were also interface violations in:\n", ", ".join(others), "\n"
|
||||
print("There were also interface violations in:\n", ", ".join(others), "\n", file=_err)
|
||||
|
||||
|
||||
# Forked from
|
||||
|
@ -1,5 +1,7 @@
|
||||
#! /usr/bin/python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys, compiler
|
||||
from compiler.ast import Node, For, While, ListComp, AssName, Name, Lambda, Function
|
||||
|
||||
@ -13,7 +15,7 @@ def check_file(path):
|
||||
def check_thing(parser, thing):
|
||||
try:
|
||||
ast = parser(thing)
|
||||
except SyntaxError, e:
|
||||
except SyntaxError as e:
|
||||
return e
|
||||
else:
|
||||
results = []
|
||||
@ -133,10 +135,10 @@ def make_result(funcnode, var_name, var_lineno):
|
||||
|
||||
def report(out, path, results):
|
||||
for r in results:
|
||||
print >>out, path + (":%r %s captures %r assigned at line %d" % r)
|
||||
print(path + (":%r %s captures %r assigned at line %d" % r), file=out)
|
||||
|
||||
def check(sources, out):
|
||||
class Counts:
|
||||
class Counts(object):
|
||||
n = 0
|
||||
processed_files = 0
|
||||
suspect_files = 0
|
||||
@ -146,7 +148,7 @@ def check(sources, out):
|
||||
def _process(path):
|
||||
results = check_file(path)
|
||||
if isinstance(results, SyntaxError):
|
||||
print >>out, path + (" NOT ANALYSED due to syntax error: %s" % results)
|
||||
print(path + (" NOT ANALYSED due to syntax error: %s" % results), file=out)
|
||||
counts.error_files += 1
|
||||
else:
|
||||
report(out, path, results)
|
||||
@ -156,7 +158,7 @@ def check(sources, out):
|
||||
counts.suspect_files += 1
|
||||
|
||||
for source in sources:
|
||||
print >>out, "Checking %s..." % (source,)
|
||||
print("Checking %s..." % (source,), file=out)
|
||||
if os.path.isfile(source):
|
||||
_process(source)
|
||||
else:
|
||||
@ -166,11 +168,11 @@ def check(sources, out):
|
||||
if ext == '.py':
|
||||
_process(os.path.join(dirpath, fn))
|
||||
|
||||
print >>out, ("%d suspiciously captured variables in %d out of %d file(s)."
|
||||
% (counts.n, counts.suspect_files, counts.processed_files))
|
||||
print("%d suspiciously captured variables in %d out of %d file(s)."
|
||||
% (counts.n, counts.suspect_files, counts.processed_files), file=out)
|
||||
if counts.error_files > 0:
|
||||
print >>out, ("%d file(s) not processed due to syntax errors."
|
||||
% (counts.error_files,))
|
||||
print("%d file(s) not processed due to syntax errors."
|
||||
% (counts.error_files,), file=out)
|
||||
return counts.n
|
||||
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
# ./check-umids.py src
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys, re, os
|
||||
|
||||
ok = True
|
||||
@ -20,13 +22,13 @@ for starting_point in sys.argv[1:]:
|
||||
umid = mo.group(1)
|
||||
if umid in umids:
|
||||
oldfn, oldlineno = umids[umid]
|
||||
print "%s:%d: duplicate umid '%s'" % (fn, lineno, umid)
|
||||
print "%s:%d: first used here" % (oldfn, oldlineno)
|
||||
print("%s:%d: duplicate umid '%s'" % (fn, lineno, umid))
|
||||
print("%s:%d: first used here" % (oldfn, oldlineno))
|
||||
ok = False
|
||||
umids[umid] = (fn,lineno)
|
||||
|
||||
if ok:
|
||||
print "all umids are unique"
|
||||
print("all umids are unique")
|
||||
else:
|
||||
print "some umids were duplicates"
|
||||
print("some umids were duplicates")
|
||||
sys.exit(1)
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys
|
||||
|
||||
from twisted.python import usage
|
||||
@ -22,7 +24,7 @@ def check(fn):
|
||||
line = line[:-1]
|
||||
if line.rstrip() != line:
|
||||
# the %s:%d:%d: lets emacs' compile-mode jump to those locations
|
||||
print "%s:%d:%d: trailing whitespace" % (fn, i+1, len(line)+1)
|
||||
print("%s:%d:%d: trailing whitespace" % (fn, i+1, len(line)+1))
|
||||
found[0] = True
|
||||
f.close()
|
||||
|
||||
|
@ -21,10 +21,11 @@
|
||||
# Install 'click' first. I run this with py2, but py3 might work too, if the
|
||||
# wheels can be built with py3.
|
||||
|
||||
from __future__ import print_function, unicode_literals
|
||||
import os, sys, subprocess, json, tempfile, zipfile, io, re, itertools
|
||||
from __future__ import unicode_literals, print_function
|
||||
import os, sys, subprocess, json, tempfile, zipfile, re, itertools
|
||||
import email.parser
|
||||
from pprint import pprint
|
||||
from six.moves import StringIO
|
||||
import click
|
||||
|
||||
all_packages = {} # name -> version
|
||||
@ -218,7 +219,7 @@ def scan(name, extra=None, path=""):
|
||||
path=path+"->%s[%s]" % (dep_name, dep_extra))
|
||||
|
||||
def generate_dot():
|
||||
f = io.StringIO()
|
||||
f = StringIO()
|
||||
f.write("digraph {\n")
|
||||
for name, extra in extras_to_show.keys():
|
||||
version = all_packages[name]
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
"""
|
||||
Given a list of nodeids and a 'convergence' file, create a bunch of files
|
||||
that will (when encoded at k=1,N=1) be uploaded to specific nodeids.
|
||||
@ -86,8 +88,8 @@ for line in open(opts["nodeids"], "r").readlines():
|
||||
nodes[nodeid] = nickname
|
||||
|
||||
if opts["k"] != 3 or opts["N"] != 10:
|
||||
print "note: using non-default k/N requires patching the Tahoe code"
|
||||
print "src/allmydata/client.py line 55, DEFAULT_ENCODING_PARAMETERS"
|
||||
print("note: using non-default k/N requires patching the Tahoe code")
|
||||
print("src/allmydata/client.py line 55, DEFAULT_ENCODING_PARAMETERS")
|
||||
|
||||
convergence_file = os.path.expanduser(opts["convergence"])
|
||||
convergence_s = open(convergence_file, "rb").read().strip()
|
||||
@ -109,7 +111,7 @@ def find_share_for_target(target):
|
||||
while True:
|
||||
attempts += 1
|
||||
suffix = base32.b2a(os.urandom(10))
|
||||
if verbose: print " trying", suffix,
|
||||
if verbose: print(" trying", suffix, end=' ')
|
||||
data = prefix + suffix + "\n"
|
||||
assert len(data) > 55 # no LIT files
|
||||
# now, what storage index will this get?
|
||||
@ -117,11 +119,11 @@ def find_share_for_target(target):
|
||||
eu = upload.EncryptAnUploadable(u)
|
||||
d = eu.get_storage_index() # this happens to run synchronously
|
||||
def _got_si(si, data=data):
|
||||
if verbose: print "SI", base32.b2a(si),
|
||||
if verbose: print("SI", base32.b2a(si), end=' ')
|
||||
peerlist = get_permuted_peers(si)
|
||||
if peerlist[0] == target:
|
||||
# great!
|
||||
if verbose: print " yay!"
|
||||
if verbose: print(" yay!")
|
||||
fn = base32.b2a(target)
|
||||
if nodes[target]:
|
||||
nickname = nodes[target].replace("/", "_")
|
||||
@ -131,7 +133,7 @@ def find_share_for_target(target):
|
||||
open(fn, "w").write(data)
|
||||
return True
|
||||
# nope, must try again
|
||||
if verbose: print " boo"
|
||||
if verbose: print(" boo")
|
||||
return False
|
||||
d.addCallback(_got_si)
|
||||
# get sneaky and look inside the Deferred for the synchronous result
|
||||
@ -142,10 +144,10 @@ os.mkdir("canaries")
|
||||
attempts = []
|
||||
for target in nodes:
|
||||
target_s = base32.b2a(target)
|
||||
print "working on", target_s
|
||||
print("working on", target_s)
|
||||
attempts.append(find_share_for_target(target))
|
||||
print "done"
|
||||
print "%d attempts total, avg %d per target, max %d" % \
|
||||
(sum(attempts), 1.0* sum(attempts) / len(nodes), max(attempts))
|
||||
print("done")
|
||||
print("%d attempts total, avg %d per target, max %d" % \
|
||||
(sum(attempts), 1.0* sum(attempts) / len(nodes), max(attempts)))
|
||||
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
"""Create a short probably-unique string for use as a umid= argument in a
|
||||
Foolscap log() call, to make it easier to locate the source code that
|
||||
generated the message. The main text of the log message is frequently
|
||||
@ -51,5 +53,5 @@ count = 1
|
||||
if len(sys.argv) > 1:
|
||||
count = int(sys.argv[1])
|
||||
for i in range(count):
|
||||
print make_id()
|
||||
print(make_id())
|
||||
|
||||
|
@ -1,13 +1,15 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from foolscap import Tub, eventual
|
||||
from twisted.internet import reactor
|
||||
import sys
|
||||
import pprint
|
||||
|
||||
def oops(f):
|
||||
print "ERROR"
|
||||
print f
|
||||
print("ERROR")
|
||||
print(f)
|
||||
|
||||
def fetch(furl):
|
||||
t = Tub()
|
||||
|
@ -1,5 +1,7 @@
|
||||
# -*- python -*-
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from twisted.internet import reactor
|
||||
import sys
|
||||
|
||||
@ -31,7 +33,7 @@ class CPUWatcherSubscriber(service.MultiService, Referenceable):
|
||||
tub.connectTo(furl, self.connected)
|
||||
|
||||
def connected(self, rref):
|
||||
print "subscribing"
|
||||
print("subscribing")
|
||||
d = rref.callRemote("get_averages")
|
||||
d.addCallback(self.remote_averages)
|
||||
d.addErrback(log.err)
|
||||
|
@ -1,5 +1,7 @@
|
||||
# -*- python -*-
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
"""
|
||||
# run this tool on a linux box in its own directory, with a file named
|
||||
# 'pids.txt' describing which processes to watch. It will follow CPU usage of
|
||||
@ -20,7 +22,6 @@
|
||||
# built-in graphs on web interface
|
||||
|
||||
|
||||
|
||||
import pickle, os.path, time, pprint
|
||||
from twisted.application import internet, service, strports
|
||||
from twisted.web import server, resource, http
|
||||
@ -210,7 +211,7 @@ class CPUWatcher(service.MultiService, resource.Resource, Referenceable):
|
||||
row.append(self._average_N(pid, avg))
|
||||
current.append(tuple(row))
|
||||
self.current = current
|
||||
print current
|
||||
print(current)
|
||||
for ob in self.observers:
|
||||
eventual.eventually(self.notify, ob)
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
# feed this the results of 'tahoe catalog-shares' for all servers
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
|
||||
chk_encodings = {}
|
||||
@ -45,23 +47,23 @@ sdmf_multiple_versions = [(si,lines)
|
||||
sdmf_multiple_versions.sort()
|
||||
|
||||
if chk_multiple_encodings:
|
||||
print
|
||||
print "CHK multiple encodings:"
|
||||
print()
|
||||
print("CHK multiple encodings:")
|
||||
for (si,lines) in chk_multiple_encodings:
|
||||
print " " + si
|
||||
print(" " + si)
|
||||
for line in sorted(lines):
|
||||
print " " + line
|
||||
print(" " + line)
|
||||
if sdmf_multiple_encodings:
|
||||
print
|
||||
print "SDMF multiple encodings:"
|
||||
print()
|
||||
print("SDMF multiple encodings:")
|
||||
for (si,lines) in sdmf_multiple_encodings:
|
||||
print " " + si
|
||||
print(" " + si)
|
||||
for line in sorted(lines):
|
||||
print " " + line
|
||||
print(" " + line)
|
||||
if sdmf_multiple_versions:
|
||||
print
|
||||
print "SDMF multiple versions:"
|
||||
print()
|
||||
print("SDMF multiple versions:")
|
||||
for (si,lines) in sdmf_multiple_versions:
|
||||
print " " + si
|
||||
print(" " + si)
|
||||
for line in sorted(lines):
|
||||
print " " + line
|
||||
print(" " + line)
|
||||
|
@ -1,5 +1,7 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
from foolscap import Tub
|
||||
from foolscap.eventual import eventually
|
||||
import sys
|
||||
@ -10,7 +12,7 @@ def go():
|
||||
d = t.getReference(sys.argv[1])
|
||||
d.addCallback(lambda rref: rref.callRemote("get_memory_usage"))
|
||||
def _got(res):
|
||||
print res
|
||||
print(res)
|
||||
reactor.stop()
|
||||
d.addCallback(_got)
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys, re
|
||||
import urllib
|
||||
import json
|
||||
@ -24,6 +26,6 @@ for (name, avg1, avg5, avg15) in current:
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == "config":
|
||||
print configinfo.rstrip()
|
||||
print(configinfo.rstrip())
|
||||
sys.exit(0)
|
||||
print data.rstrip()
|
||||
print(data.rstrip())
|
||||
|
@ -5,18 +5,20 @@
|
||||
# is left on all disks across the grid. The plugin should be configured with
|
||||
# env_url= pointing at the diskwatcher.tac webport.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys, urllib, json
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||
print """\
|
||||
print("""\
|
||||
graph_title Tahoe Remaining Disk Space
|
||||
graph_vlabel bytes remaining
|
||||
graph_category tahoe
|
||||
graph_info This graph shows the total amount of disk space left available in the grid
|
||||
disk_left.label disk left
|
||||
disk_left.draw LINE1"""
|
||||
disk_left.draw LINE1""")
|
||||
sys.exit(0)
|
||||
|
||||
url = os.environ["url"]
|
||||
data = json.load(urllib.urlopen(url))["available"]
|
||||
print "disk_left.value", data
|
||||
print("disk_left.value", data)
|
||||
|
@ -6,10 +6,12 @@
|
||||
# used. The plugin should be configured with env_url= pointing at the
|
||||
# diskwatcher.tac webport.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys, urllib, json
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||
print """\
|
||||
print("""\
|
||||
graph_title Tahoe Total Disk Space
|
||||
graph_vlabel bytes
|
||||
graph_category tahoe
|
||||
@ -17,10 +19,10 @@ graph_info This graph shows the total amount of disk space present in the grid,
|
||||
disk_total.label disk total
|
||||
disk_total.draw LINE2
|
||||
disk_used.label disk used
|
||||
disk_used.draw LINE1"""
|
||||
disk_used.draw LINE1""")
|
||||
sys.exit(0)
|
||||
|
||||
url = os.environ["url"]
|
||||
data = json.load(urllib.urlopen(url))
|
||||
print "disk_total.value", data["total"]
|
||||
print "disk_used.value", data["used"]
|
||||
print("disk_total.value", data["total"])
|
||||
print("disk_used.value", data["used"])
|
||||
|
@ -5,10 +5,12 @@
|
||||
# is being used per unit time. The plugin should be configured with env_url=
|
||||
# pointing at the diskwatcher.tac webport.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys, urllib, json
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||
print """\
|
||||
print("""\
|
||||
graph_title Tahoe Disk Usage Measurement
|
||||
graph_vlabel bytes per second
|
||||
graph_category tahoe
|
||||
@ -21,7 +23,7 @@ rate_1day.draw LINE1
|
||||
rate_2wk.label (two week sample)
|
||||
rate_2wk.draw LINE2
|
||||
rate_4wk.label (four week sample)
|
||||
rate_4wk.draw LINE2"""
|
||||
rate_4wk.draw LINE2""")
|
||||
sys.exit(0)
|
||||
|
||||
url = os.environ["url"]
|
||||
@ -31,10 +33,10 @@ data = dict([(name, growth)
|
||||
for (name, timespan, growth, timeleft) in timespans])
|
||||
# growth is in bytes per second
|
||||
if "1hr" in data:
|
||||
print "rate_1hr.value", data["1hr"]
|
||||
print("rate_1hr.value", data["1hr"])
|
||||
if "1day" in data:
|
||||
print "rate_1day.value", data["1day"]
|
||||
print("rate_1day.value", data["1day"])
|
||||
if "2wk" in data:
|
||||
print "rate_2wk.value", data["2wk"]
|
||||
print("rate_2wk.value", data["2wk"])
|
||||
if "4wk" in data:
|
||||
print "rate_4wk.value", data["4wk"]
|
||||
print("rate_4wk.value", data["4wk"])
|
||||
|
@ -5,18 +5,20 @@
|
||||
# used on all disks across the grid. The plugin should be configured with
|
||||
# env_url= pointing at the diskwatcher.tac webport.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys, urllib, json
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||
print """\
|
||||
print("""\
|
||||
graph_title Tahoe Total Disk Space Used
|
||||
graph_vlabel bytes used
|
||||
graph_category tahoe
|
||||
graph_info This graph shows the total amount of disk space used across the grid
|
||||
disk_used.label disk used
|
||||
disk_used.draw LINE1"""
|
||||
disk_used.draw LINE1""")
|
||||
sys.exit(0)
|
||||
|
||||
url = os.environ["url"]
|
||||
data = json.load(urllib.urlopen(url))["used"]
|
||||
print "disk_used.value", data
|
||||
print("disk_used.value", data)
|
||||
|
@ -5,10 +5,12 @@
|
||||
# left before the grid fills up. The plugin should be configured with
|
||||
# env_url= pointing at the diskwatcher.tac webport.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys, urllib, json
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||
print """\
|
||||
print("""\
|
||||
graph_title Tahoe Remaining Time Predictor
|
||||
graph_vlabel days remaining
|
||||
graph_category tahoe
|
||||
@ -20,7 +22,7 @@ days_1day.draw LINE1
|
||||
days_2wk.label days left (two week sample)
|
||||
days_2wk.draw LINE2
|
||||
days_4wk.label days left (four week sample)
|
||||
days_4wk.draw LINE2"""
|
||||
days_4wk.draw LINE2""")
|
||||
sys.exit(0)
|
||||
|
||||
url = os.environ["url"]
|
||||
@ -32,10 +34,10 @@ data = dict([(name, timeleft)
|
||||
# timeleft is in seconds
|
||||
DAY = 24*60*60
|
||||
if "1hr" in data:
|
||||
print "days_1hr.value", data["1hr"]/DAY
|
||||
print("days_1hr.value", data["1hr"]/DAY)
|
||||
if "1day" in data:
|
||||
print "days_1day.value", data["1day"]/DAY
|
||||
print("days_1day.value", data["1day"]/DAY)
|
||||
if "2wk" in data:
|
||||
print "days_2wk.value", data["2wk"]/DAY
|
||||
print("days_2wk.value", data["2wk"]/DAY)
|
||||
if "4wk" in data:
|
||||
print "days_4wk.value", data["4wk"]/DAY
|
||||
print("days_4wk.value", data["4wk"]/DAY)
|
||||
|
@ -1,15 +1,17 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys, os.path
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||
print """\
|
||||
print("""\
|
||||
graph_title Tahoe File Estimate
|
||||
graph_vlabel files
|
||||
graph_category tahoe
|
||||
graph_info This graph shows the estimated number of files and directories present in the grid
|
||||
files.label files
|
||||
files.draw LINE2"""
|
||||
files.draw LINE2""")
|
||||
sys.exit(0)
|
||||
|
||||
# Edit this to point at some subset of storage directories.
|
||||
@ -46,4 +48,4 @@ correction = 1+no_chance
|
||||
#print "correction", correction
|
||||
|
||||
files = unique_strings * (32*32/len(sections)) * correction
|
||||
print "files.value %d" % int(files)
|
||||
print("files.value %d" % int(files))
|
||||
|
@ -18,6 +18,8 @@
|
||||
# env.basedir_NODE3 /path/to/node3
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys
|
||||
|
||||
nodedirs = []
|
||||
@ -41,7 +43,7 @@ for nodename, basedir in nodedirs:
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == "config":
|
||||
print configinfo.rstrip()
|
||||
print(configinfo.rstrip())
|
||||
sys.exit(0)
|
||||
|
||||
for nodename, basedir in nodedirs:
|
||||
@ -52,5 +54,5 @@ for nodename, basedir in nodedirs:
|
||||
if dirpath == root and "incoming" in dirnames:
|
||||
dirnames.remove("incoming")
|
||||
shares += len(filenames)
|
||||
print "%s.value %d" % (nodename, shares)
|
||||
print("%s.value %d" % (nodename, shares))
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys
|
||||
import urllib
|
||||
import json
|
||||
@ -15,11 +17,11 @@ fetched.draw LINE2
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == "config":
|
||||
print configinfo.rstrip()
|
||||
print(configinfo.rstrip())
|
||||
sys.exit(0)
|
||||
|
||||
url = os.environ["url"]
|
||||
|
||||
data = json.loads(urllib.urlopen(url).read())
|
||||
print "fetched.value %d" % data["chk_upload_helper.active_uploads"]
|
||||
print("fetched.value %d" % data["chk_upload_helper.active_uploads"])
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys
|
||||
import urllib
|
||||
import json
|
||||
@ -17,10 +19,10 @@ fetched.min 0
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == "config":
|
||||
print configinfo.rstrip()
|
||||
print(configinfo.rstrip())
|
||||
sys.exit(0)
|
||||
|
||||
url = os.environ["url"]
|
||||
|
||||
data = json.loads(urllib.urlopen(url).read())
|
||||
print "fetched.value %d" % data["chk_upload_helper.fetched_bytes"]
|
||||
print("fetched.value %d" % data["chk_upload_helper.fetched_bytes"])
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys
|
||||
import urllib
|
||||
import json
|
||||
@ -19,13 +21,13 @@ storage_client.draw LINE2
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == "config":
|
||||
print configinfo.rstrip()
|
||||
print(configinfo.rstrip())
|
||||
sys.exit(0)
|
||||
|
||||
url = os.environ["url"]
|
||||
|
||||
data = json.loads(urllib.urlopen(url).read())
|
||||
print "storage_server.value %d" % data["announcement_summary"]["storage"]
|
||||
print "storage_hosts.value %d" % data["announcement_distinct_hosts"]["storage"]
|
||||
print "storage_client.value %d" % data["subscription_summary"]["storage"]
|
||||
print("storage_server.value %d" % data["announcement_summary"]["storage"])
|
||||
print("storage_hosts.value %d" % data["announcement_distinct_hosts"]["storage"])
|
||||
print("storage_client.value %d" % data["subscription_summary"]["storage"])
|
||||
|
||||
|
@ -4,6 +4,8 @@
|
||||
# by 'allmydata start', then extracts the amount of memory they consume (both
|
||||
# VmSize and VmRSS) from /proc
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys, re
|
||||
|
||||
# for testing
|
||||
@ -47,7 +49,7 @@ graph_info This graph shows the memory used by specific processes
|
||||
if f == "VmData":
|
||||
configinfo += "%s_%s.graph no\n" % (nodename, f)
|
||||
|
||||
print configinfo
|
||||
print(configinfo)
|
||||
sys.exit(0)
|
||||
|
||||
nodestats = {}
|
||||
@ -67,4 +69,4 @@ for node,stats in nodestats.items():
|
||||
for f,value in stats.items():
|
||||
# TODO: not sure if /proc/%d/status means 1000 or 1024 when it says
|
||||
# 'kB'
|
||||
print "%s_%s.value %d" % (node, f, 1024*value)
|
||||
print("%s_%s.value %d" % (node, f, 1024*value))
|
||||
|
@ -27,10 +27,12 @@
|
||||
# This plugin should be configured with env_diskwatcher_url= pointing at the
|
||||
# diskwatcher.tac webport, and env_deepsize_url= pointing at the PHP script.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys, urllib, json
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||
print """\
|
||||
print("""\
|
||||
graph_title Tahoe Overhead Calculator
|
||||
graph_vlabel Percentage
|
||||
graph_category tahoe
|
||||
@ -40,7 +42,7 @@ overhead.draw LINE2
|
||||
inactive.label inactive account usage
|
||||
inactive.draw LINE1
|
||||
effective_expansion.label Effective Expansion Factor
|
||||
effective_expansion.graph no"""
|
||||
effective_expansion.graph no""")
|
||||
sys.exit(0)
|
||||
|
||||
diskwatcher_url = os.environ["diskwatcher_url"]
|
||||
@ -54,12 +56,12 @@ ideal = expansion * deepsize["all"]
|
||||
overhead = (total - ideal) / ideal
|
||||
if overhead > 0:
|
||||
# until all the storage-servers come online, this number will be nonsense
|
||||
print "overhead.value %f" % (100.0 * overhead)
|
||||
print("overhead.value %f" % (100.0 * overhead))
|
||||
|
||||
# same for this one
|
||||
effective_expansion = total / deepsize["all"]
|
||||
print "effective_expansion.value %f" % effective_expansion
|
||||
print("effective_expansion.value %f" % effective_expansion)
|
||||
|
||||
# this value remains valid, though
|
||||
inactive_savings = (deepsize["all"] - deepsize["active"]) / deepsize["active"]
|
||||
print "inactive.value %f" % (100.0 * inactive_savings)
|
||||
print("inactive.value %f" % (100.0 * inactive_savings))
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys
|
||||
import urllib
|
||||
|
||||
@ -14,10 +16,10 @@ space.draw LINE2
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == "config":
|
||||
print configinfo.rstrip()
|
||||
print(configinfo.rstrip())
|
||||
sys.exit(0)
|
||||
|
||||
url = os.environ["url"]
|
||||
|
||||
data = int(urllib.urlopen(url).read().strip())
|
||||
print "space.value %d" % data
|
||||
print("space.value %d" % data)
|
||||
|
@ -42,6 +42,8 @@
|
||||
# of course, these URLs must match the webports you have configured into the
|
||||
# storage nodes.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys
|
||||
import urllib
|
||||
import json
|
||||
@ -78,7 +80,7 @@ for nodename, url in node_urls:
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == "config":
|
||||
print configinfo.rstrip()
|
||||
print(configinfo.rstrip())
|
||||
sys.exit(0)
|
||||
|
||||
for nodename, url in node_urls:
|
||||
@ -89,5 +91,5 @@ for nodename, url in node_urls:
|
||||
p_key = percentile + "_percentile"
|
||||
key = "storage_server.latencies.%s.%s" % (operation, p_key)
|
||||
value = data["stats"][key]
|
||||
print "%s.value %s" % (nodename, value)
|
||||
print("%s.value %s" % (nodename, value))
|
||||
|
||||
|
@ -32,6 +32,8 @@
|
||||
# of course, these URLs must match the webports you have configured into the
|
||||
# storage nodes.
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys
|
||||
import urllib
|
||||
import json
|
||||
@ -64,12 +66,12 @@ for nodename, url in node_urls:
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == "config":
|
||||
print configinfo.rstrip()
|
||||
print(configinfo.rstrip())
|
||||
sys.exit(0)
|
||||
|
||||
for nodename, url in node_urls:
|
||||
data = json.loads(urllib.urlopen(url).read())
|
||||
key = "storage_server.%s" % operation
|
||||
value = data["counters"][key]
|
||||
print "%s.value %s" % (nodename, value)
|
||||
print("%s.value %s" % (nodename, value))
|
||||
|
||||
|
@ -5,6 +5,8 @@
|
||||
# then extrapolate to guess how many weeks/months/years of storage space we
|
||||
# have left, and output it to another munin graph
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import sys, os, time
|
||||
import rrdtool
|
||||
|
||||
@ -82,7 +84,7 @@ def write_to_file(samples):
|
||||
os.rename(WEBFILE + ".tmp", WEBFILE)
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||
print """\
|
||||
print("""\
|
||||
graph_title Tahoe Remaining Space Predictor
|
||||
graph_vlabel days remaining
|
||||
graph_category tahoe
|
||||
@ -90,17 +92,17 @@ graph_info This graph shows the estimated number of days left until storage spac
|
||||
days_2wk.label days left (2wk sample)
|
||||
days_2wk.draw LINE2
|
||||
days_4wk.label days left (4wk sample)
|
||||
days_4wk.draw LINE2"""
|
||||
days_4wk.draw LINE2""")
|
||||
sys.exit(0)
|
||||
|
||||
#rsync_rrd()
|
||||
samples = {}
|
||||
remaining_4wk = predict_future("4wk")
|
||||
if remaining_4wk is not None:
|
||||
print "days_4wk.value", remaining_4wk
|
||||
print("days_4wk.value", remaining_4wk)
|
||||
samples["remaining_4wk"] = remaining_4wk
|
||||
remaining_2wk = predict_future("2wk")
|
||||
if remaining_2wk is not None:
|
||||
print "days_2wk.value", remaining_2wk
|
||||
print("days_2wk.value", remaining_2wk)
|
||||
samples["remaining_2wk"] = remaining_2wk
|
||||
write_to_file(samples)
|
||||
|
@ -1,5 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
@ -460,11 +462,11 @@ def main(argv):
|
||||
value = nodestats['stats'][category].get(statid)
|
||||
if value is not None:
|
||||
args = { 'name': name, 'value': value }
|
||||
print plugin_conf[output_section] % args
|
||||
print(plugin_conf[output_section] % args)
|
||||
|
||||
if len(argv) > 1:
|
||||
if sys.argv[1] == 'config':
|
||||
print plugin_conf['configheader']
|
||||
print(plugin_conf['configheader'])
|
||||
output_nodes('graph_config', False)
|
||||
sys.exit(0)
|
||||
|
||||
|
@ -18,6 +18,8 @@
|
||||
# Allmydata-tahoe must be installed on the system where this plugin is used,
|
||||
# since it imports a utility module from allmydata.utils .
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import os, sys
|
||||
import commands
|
||||
|
||||
@ -44,7 +46,7 @@ for nodename, basedir in nodedirs:
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if sys.argv[1] == "config":
|
||||
print configinfo.rstrip()
|
||||
print(configinfo.rstrip())
|
||||
sys.exit(0)
|
||||
|
||||
for nodename, basedir in nodedirs:
|
||||
@ -54,5 +56,5 @@ for nodename, basedir in nodedirs:
|
||||
sys.exit(rc)
|
||||
bytes, extra = out.split()
|
||||
usage = int(bytes)
|
||||
print "%s.value %d" % (nodename, usage)
|
||||
print("%s.value %d" % (nodename, usage))
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user