mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-22 06:17:50 +00:00
Merge remote-tracking branch 'origin/master' into 3283.backdoor-statement-signatures
This commit is contained in:
commit
17dbbe0642
@ -1,95 +0,0 @@
|
|||||||
# adapted from https://packaging.python.org/en/latest/appveyor/
|
|
||||||
|
|
||||||
environment:
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
|
|
||||||
# For Python versions available on Appveyor, see
|
|
||||||
# http://www.appveyor.com/docs/installed-software#python
|
|
||||||
- PYTHON: "C:\\Python27"
|
|
||||||
- PYTHON: "C:\\Python27-x64"
|
|
||||||
# DISTUTILS_USE_SDK: "1"
|
|
||||||
# TOX_TESTENV_PASSENV: "DISTUTILS_USE_SDK INCLUDE LIB"
|
|
||||||
|
|
||||||
install:
|
|
||||||
- |
|
|
||||||
%PYTHON%\python.exe -m pip install -U pip
|
|
||||||
%PYTHON%\python.exe -m pip install wheel tox==3.9.0 virtualenv
|
|
||||||
|
|
||||||
# note:
|
|
||||||
# %PYTHON% has: python.exe
|
|
||||||
# %PYTHON%\Scripts has: pip.exe, tox.exe (and others installed by bare pip)
|
|
||||||
|
|
||||||
# We have a custom "build" system. We don't need MSBuild or whatever.
|
|
||||||
build: off
|
|
||||||
|
|
||||||
# Do not build feature branch with open pull requests. This is documented but
|
|
||||||
# it's not clear it does anything.
|
|
||||||
skip_branch_with_pr: true
|
|
||||||
|
|
||||||
# This, perhaps, is effective.
|
|
||||||
branches:
|
|
||||||
# whitelist
|
|
||||||
only:
|
|
||||||
- 'master'
|
|
||||||
|
|
||||||
skip_commits:
|
|
||||||
files:
|
|
||||||
# The Windows builds are unaffected by news fragments.
|
|
||||||
- 'newsfragments/*'
|
|
||||||
# Also, all this build junk.
|
|
||||||
- '.circleci/*'
|
|
||||||
- '.lgtm.yml'
|
|
||||||
- '.travis.yml'
|
|
||||||
|
|
||||||
# we run from C:\projects\tahoe-lafs
|
|
||||||
|
|
||||||
test_script:
|
|
||||||
# Put your test command here.
|
|
||||||
# Note that you must use the environment variable %PYTHON% to refer to
|
|
||||||
# the interpreter you're using - Appveyor does not do anything special
|
|
||||||
# to put the Python version you want to use on PATH.
|
|
||||||
- |
|
|
||||||
%PYTHON%\Scripts\tox.exe -e coverage
|
|
||||||
%PYTHON%\Scripts\tox.exe -e pyinstaller
|
|
||||||
# To verify that the resultant PyInstaller-generated binary executes
|
|
||||||
# cleanly (i.e., that it terminates with an exit code of 0 and isn't
|
|
||||||
# failing due to import/packaging-related errors, etc.).
|
|
||||||
- dist\Tahoe-LAFS\tahoe.exe --version
|
|
||||||
|
|
||||||
after_test:
|
|
||||||
# This builds the main tahoe wheel, and wheels for all dependencies.
|
|
||||||
# Again, you only need build.cmd if you're building C extensions for
|
|
||||||
# 64-bit Python 3.3/3.4. And you need to use %PYTHON% to get the correct
|
|
||||||
# interpreter. If _trial_temp still exists, the "pip wheel" fails on
|
|
||||||
# _trial_temp\local_dir (not sure why).
|
|
||||||
- |
|
|
||||||
copy _trial_temp\test.log trial_test_log.txt
|
|
||||||
rd /s /q _trial_temp
|
|
||||||
%PYTHON%\python.exe setup.py bdist_wheel
|
|
||||||
%PYTHON%\python.exe -m pip wheel -w dist .
|
|
||||||
- |
|
|
||||||
%PYTHON%\python.exe -m pip install codecov "coverage ~= 4.5"
|
|
||||||
%PYTHON%\python.exe -m coverage xml -o coverage.xml -i
|
|
||||||
%PYTHON%\python.exe -m codecov -X search -X gcov -f coverage.xml
|
|
||||||
|
|
||||||
artifacts:
|
|
||||||
# bdist_wheel puts your built wheel in the dist directory
|
|
||||||
# "pip wheel -w dist ." puts all the dependency wheels there too
|
|
||||||
# this gives us a zipfile with everything
|
|
||||||
- path: 'dist\*'
|
|
||||||
- path: trial_test_log.txt
|
|
||||||
name: Trial test.log
|
|
||||||
- path: eliot.log
|
|
||||||
name: Eliot test log
|
|
||||||
|
|
||||||
on_failure:
|
|
||||||
# Artifacts are not normally uploaded when the job fails. To get the test
|
|
||||||
# logs, we have to push them ourselves.
|
|
||||||
- ps: Push-AppveyorArtifact _trial_temp\test.log -Filename trial.log
|
|
||||||
- ps: Push-AppveyorArtifact eliot.log -Filename eliot.log
|
|
||||||
|
|
||||||
#on_success:
|
|
||||||
# You can use this step to upload your artifacts to a public website.
|
|
||||||
# See Appveyor's documentation for more details. Or you can simply
|
|
||||||
# access your wheels from the Appveyor "artifacts" tab for your build.
|
|
@ -1,5 +1,6 @@
|
|||||||
ARG TAG
|
ARG TAG
|
||||||
FROM centos:${TAG}
|
FROM centos:${TAG}
|
||||||
|
ARG PYTHON_VERSION
|
||||||
|
|
||||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||||
ENV VIRTUALENV_PATH /tmp/venv
|
ENV VIRTUALENV_PATH /tmp/venv
|
||||||
@ -11,11 +12,11 @@ RUN yum install --assumeyes \
|
|||||||
git \
|
git \
|
||||||
sudo \
|
sudo \
|
||||||
make automake gcc gcc-c++ \
|
make automake gcc gcc-c++ \
|
||||||
python \
|
python${PYTHON_VERSION} \
|
||||||
python-devel \
|
python${PYTHON_VERSION}-devel \
|
||||||
libffi-devel \
|
libffi-devel \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
libyaml-devel \
|
libyaml \
|
||||||
/usr/bin/virtualenv \
|
/usr/bin/virtualenv \
|
||||||
net-tools
|
net-tools
|
||||||
|
|
||||||
@ -23,4 +24,4 @@ RUN yum install --assumeyes \
|
|||||||
# *update* this checkout on each job run, saving us more time per-job.
|
# *update* this checkout on each job run, saving us more time per-job.
|
||||||
COPY . ${BUILD_SRC_ROOT}
|
COPY . ${BUILD_SRC_ROOT}
|
||||||
|
|
||||||
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python2.7"
|
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
ARG TAG
|
ARG TAG
|
||||||
FROM debian:${TAG}
|
FROM debian:${TAG}
|
||||||
|
ARG PYTHON_VERSION
|
||||||
|
|
||||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||||
ENV VIRTUALENV_PATH /tmp/venv
|
ENV VIRTUALENV_PATH /tmp/venv
|
||||||
@ -8,22 +9,22 @@ ENV BUILD_SRC_ROOT /tmp/project
|
|||||||
|
|
||||||
RUN apt-get --quiet update && \
|
RUN apt-get --quiet update && \
|
||||||
apt-get --quiet --yes install \
|
apt-get --quiet --yes install \
|
||||||
git \
|
git \
|
||||||
lsb-release \
|
lsb-release \
|
||||||
sudo \
|
sudo \
|
||||||
build-essential \
|
build-essential \
|
||||||
python2.7 \
|
python${PYTHON_VERSION} \
|
||||||
python2.7-dev \
|
python${PYTHON_VERSION}-dev \
|
||||||
libffi-dev \
|
libffi-dev \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
libyaml-dev \
|
libyaml-dev \
|
||||||
virtualenv
|
virtualenv
|
||||||
|
|
||||||
# Get the project source. This is better than it seems. CircleCI will
|
# Get the project source. This is better than it seems. CircleCI will
|
||||||
# *update* this checkout on each job run, saving us more time per-job.
|
# *update* this checkout on each job run, saving us more time per-job.
|
||||||
COPY . ${BUILD_SRC_ROOT}
|
COPY . ${BUILD_SRC_ROOT}
|
||||||
|
|
||||||
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python2.7"
|
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"
|
||||||
|
|
||||||
# Only the integration tests currently need this but it doesn't hurt to always
|
# Only the integration tests currently need this but it doesn't hurt to always
|
||||||
# have it present and it's simpler than building a whole extra image just for
|
# have it present and it's simpler than building a whole extra image just for
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
ARG TAG
|
ARG TAG
|
||||||
FROM fedora:${TAG}
|
FROM fedora:${TAG}
|
||||||
|
ARG PYTHON_VERSION
|
||||||
|
|
||||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||||
ENV VIRTUALENV_PATH /tmp/venv
|
ENV VIRTUALENV_PATH /tmp/venv
|
||||||
@ -11,8 +12,8 @@ RUN yum install --assumeyes \
|
|||||||
git \
|
git \
|
||||||
sudo \
|
sudo \
|
||||||
make automake gcc gcc-c++ \
|
make automake gcc gcc-c++ \
|
||||||
python \
|
python${PYTHON_VERSION} \
|
||||||
python-devel \
|
python${PYTHON_VERSION}-devel \
|
||||||
libffi-devel \
|
libffi-devel \
|
||||||
openssl-devel \
|
openssl-devel \
|
||||||
libyaml-devel \
|
libyaml-devel \
|
||||||
@ -23,4 +24,4 @@ RUN yum install --assumeyes \
|
|||||||
# *update* this checkout on each job run, saving us more time per-job.
|
# *update* this checkout on each job run, saving us more time per-job.
|
||||||
COPY . ${BUILD_SRC_ROOT}
|
COPY . ${BUILD_SRC_ROOT}
|
||||||
|
|
||||||
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python2.7"
|
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM pypy:2.7-7.1.1-jessie
|
FROM pypy:2.7-buster
|
||||||
|
|
||||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||||
ENV VIRTUALENV_PATH /tmp/venv
|
ENV VIRTUALENV_PATH /tmp/venv
|
||||||
|
@ -1,49 +0,0 @@
|
|||||||
ARG TAG
|
|
||||||
FROM vbatts/slackware:${TAG}
|
|
||||||
|
|
||||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
|
||||||
ENV VIRTUALENV_PATH /tmp/venv
|
|
||||||
# This will get updated by the CircleCI checkout step.
|
|
||||||
ENV BUILD_SRC_ROOT /tmp/project
|
|
||||||
|
|
||||||
# Be careful with slackpkg. If the package name given doesn't match anything,
|
|
||||||
# slackpkg still claims to succeed but you're totally screwed. Slackware
|
|
||||||
# updates versions of packaged software so including too much version prefix
|
|
||||||
# is a good way to have your install commands suddenly begin not installing
|
|
||||||
# anything.
|
|
||||||
RUN slackpkg update && \
|
|
||||||
slackpkg install \
|
|
||||||
openssh-7 git-2 \
|
|
||||||
ca-certificates \
|
|
||||||
sudo-1 \
|
|
||||||
make-4 \
|
|
||||||
automake-1 \
|
|
||||||
kernel-headers \
|
|
||||||
glibc-2 \
|
|
||||||
binutils-2 \
|
|
||||||
gcc-5 \
|
|
||||||
gcc-g++-5 \
|
|
||||||
python-2 \
|
|
||||||
libffi-3 \
|
|
||||||
libyaml-0 \
|
|
||||||
sqlite-3 \
|
|
||||||
icu4c-56 \
|
|
||||||
libmpc-1 </dev/null && \
|
|
||||||
slackpkg upgrade \
|
|
||||||
openssl-1 </dev/null
|
|
||||||
|
|
||||||
# neither virtualenv nor pip is packaged.
|
|
||||||
# do it the hard way.
|
|
||||||
# and it is extra hard since it is slackware.
|
|
||||||
RUN slackpkg install \
|
|
||||||
cyrus-sasl-2 \
|
|
||||||
curl-7 </dev/null && \
|
|
||||||
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && \
|
|
||||||
python get-pip.py && \
|
|
||||||
pip install virtualenv
|
|
||||||
|
|
||||||
# Get the project source. This is better than it seems. CircleCI will
|
|
||||||
# *update* this checkout on each job run, saving us more time per-job.
|
|
||||||
COPY . ${BUILD_SRC_ROOT}
|
|
||||||
|
|
||||||
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python2.7"
|
|
@ -1,5 +1,6 @@
|
|||||||
ARG TAG
|
ARG TAG
|
||||||
FROM ubuntu:${TAG}
|
FROM ubuntu:${TAG}
|
||||||
|
ARG PYTHON_VERSION
|
||||||
|
|
||||||
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
ENV WHEELHOUSE_PATH /tmp/wheelhouse
|
||||||
ENV VIRTUALENV_PATH /tmp/venv
|
ENV VIRTUALENV_PATH /tmp/venv
|
||||||
@ -13,8 +14,8 @@ RUN apt-get --quiet update && \
|
|||||||
apt-get --quiet --yes install \
|
apt-get --quiet --yes install \
|
||||||
sudo \
|
sudo \
|
||||||
build-essential \
|
build-essential \
|
||||||
python2.7 \
|
python${PYTHON_VERSION} \
|
||||||
python2.7-dev \
|
python${PYTHON_VERSION}-dev \
|
||||||
libffi-dev \
|
libffi-dev \
|
||||||
libssl-dev \
|
libssl-dev \
|
||||||
libyaml-dev \
|
libyaml-dev \
|
||||||
@ -26,4 +27,4 @@ RUN apt-get --quiet update && \
|
|||||||
# *update* this checkout on each job run, saving us more time per-job.
|
# *update* this checkout on each job run, saving us more time per-job.
|
||||||
COPY . ${BUILD_SRC_ROOT}
|
COPY . ${BUILD_SRC_ROOT}
|
||||||
|
|
||||||
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python2.7"
|
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"
|
||||||
|
@ -1,44 +1,86 @@
|
|||||||
# https://circleci.com/docs/2.0/
|
# https://circleci.com/docs/2.0/
|
||||||
|
|
||||||
version: 2
|
# We use version 2.1 of CircleCI's configuration format (the docs are still at
|
||||||
|
# the 2.0 link) in order to have access to Windows executors. This means we
|
||||||
|
# can't use dots in job names anymore. They have a new "parameters" feature
|
||||||
|
# that is supposed to remove the need to have version numbers in job names (the
|
||||||
|
# source of our dots), but switching to that is going to be a bigger refactor:
|
||||||
|
#
|
||||||
|
# https://discuss.circleci.com/t/v2-1-job-name-validation/31123
|
||||||
|
# https://circleci.com/docs/2.0/reusing-config/
|
||||||
|
#
|
||||||
|
version: 2.1
|
||||||
|
|
||||||
workflows:
|
workflows:
|
||||||
version: 2
|
|
||||||
ci:
|
ci:
|
||||||
jobs:
|
jobs:
|
||||||
# Platforms
|
# Start with jobs testing various platforms.
|
||||||
- "debian-9"
|
|
||||||
|
# Every job that pulls a Docker image from Docker Hub needs to provide
|
||||||
|
# credentials for that pull operation to avoid being subjected to
|
||||||
|
# unauthenticated pull limits shared across all of CircleCI. Use this
|
||||||
|
# first job to define a yaml anchor that can be used to supply a
|
||||||
|
# CircleCI job context which makes Docker Hub credentials available in
|
||||||
|
# the environment.
|
||||||
|
#
|
||||||
|
# Contexts are managed in the CircleCI web interface:
|
||||||
|
#
|
||||||
|
# https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts
|
||||||
|
- "debian-9": &DOCKERHUB_CONTEXT
|
||||||
|
context: "dockerhub-auth"
|
||||||
|
|
||||||
- "debian-8":
|
- "debian-8":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
requires:
|
requires:
|
||||||
- "debian-9"
|
- "debian-9"
|
||||||
|
|
||||||
- "ubuntu-18.04"
|
- "ubuntu-20-04":
|
||||||
- "ubuntu-16.04":
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "ubuntu-18-04":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
requires:
|
requires:
|
||||||
- "ubuntu-18.04"
|
- "ubuntu-20-04"
|
||||||
|
- "ubuntu-16-04":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
requires:
|
||||||
|
- "ubuntu-20-04"
|
||||||
|
|
||||||
- "fedora-29"
|
- "fedora-29":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "fedora-28":
|
- "fedora-28":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
requires:
|
requires:
|
||||||
- "fedora-29"
|
- "fedora-29"
|
||||||
|
|
||||||
- "centos-7"
|
- "centos-8":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
- "slackware-14.2"
|
- "nixos-19-09":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
- "nixos-19.09"
|
# Test against PyPy 2.7
|
||||||
|
- "pypy27-buster":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
# Test against PyPy 2.7/7.1.1
|
# Just one Python 3.6 configuration while the port is in-progress.
|
||||||
- "pypy2.7-7.1"
|
- "python36":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
# Other assorted tasks and configurations
|
# Other assorted tasks and configurations
|
||||||
- "lint"
|
- "lint":
|
||||||
- "pyinstaller"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "deprecations"
|
- "pyinstaller":
|
||||||
- "c-locale"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "deprecations":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "c-locale":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
# Any locale other than C or UTF-8.
|
# Any locale other than C or UTF-8.
|
||||||
- "another-locale"
|
- "another-locale":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
- "integration":
|
- "integration":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
requires:
|
requires:
|
||||||
# If the unit test suite doesn't pass, don't bother running the
|
# If the unit test suite doesn't pass, don't bother running the
|
||||||
# integration tests.
|
# integration tests.
|
||||||
@ -46,7 +88,8 @@ workflows:
|
|||||||
|
|
||||||
# Generate the underlying data for a visualization to aid with Python 3
|
# Generate the underlying data for a visualization to aid with Python 3
|
||||||
# porting.
|
# porting.
|
||||||
- "build-porting-depgraph"
|
- "build-porting-depgraph":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
images:
|
images:
|
||||||
# Build the Docker images used by the ci jobs. This makes the ci jobs
|
# Build the Docker images used by the ci jobs. This makes the ci jobs
|
||||||
@ -61,21 +104,55 @@ workflows:
|
|||||||
- "master"
|
- "master"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
- "build-image-debian-8"
|
- "build-image-debian-8":
|
||||||
- "build-image-debian-9"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "build-image-ubuntu-16.04"
|
- "build-image-debian-9":
|
||||||
- "build-image-ubuntu-18.04"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "build-image-fedora-28"
|
- "build-image-ubuntu-16-04":
|
||||||
- "build-image-fedora-29"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "build-image-centos-7"
|
- "build-image-ubuntu-18-04":
|
||||||
- "build-image-slackware-14.2"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "build-image-pypy-2.7-7.1.1-jessie"
|
- "build-image-ubuntu-20-04":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-fedora-28":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-fedora-29":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-centos-8":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-pypy27-buster":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-python36-ubuntu":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
dockerhub-auth-template:
|
||||||
|
# This isn't a real job. It doesn't get scheduled as part of any
|
||||||
|
# workflow. Instead, it's just a place we can hang a yaml anchor to
|
||||||
|
# finish the Docker Hub authentication configuration. Workflow jobs using
|
||||||
|
# the DOCKERHUB_CONTEXT anchor will have access to the environment
|
||||||
|
# variables used here. These variables will allow the Docker Hub image
|
||||||
|
# pull to be authenticated and hopefully avoid hitting and rate limits.
|
||||||
|
docker: &DOCKERHUB_AUTH
|
||||||
|
- image: "null"
|
||||||
|
auth:
|
||||||
|
username: $DOCKERHUB_USERNAME
|
||||||
|
password: $DOCKERHUB_PASSWORD
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: "CircleCI YAML schema conformity"
|
||||||
|
command: |
|
||||||
|
# This isn't a real command. We have to have something in this
|
||||||
|
# space, though, or the CircleCI yaml schema validator gets angry.
|
||||||
|
# Since this job is never scheduled this step is never run so the
|
||||||
|
# actual value here is irrelevant.
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
docker:
|
docker:
|
||||||
- image: "circleci/python:2"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "circleci/python:2"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- "checkout"
|
- "checkout"
|
||||||
@ -92,7 +169,8 @@ jobs:
|
|||||||
|
|
||||||
pyinstaller:
|
pyinstaller:
|
||||||
docker:
|
docker:
|
||||||
- image: "circleci/python:2"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "circleci/python:2"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- "checkout"
|
- "checkout"
|
||||||
@ -117,7 +195,8 @@ jobs:
|
|||||||
|
|
||||||
debian-9: &DEBIAN
|
debian-9: &DEBIAN
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/debian:9"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/debian:9-py2.7"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
environment: &UTF_8_ENVIRONMENT
|
environment: &UTF_8_ENVIRONMENT
|
||||||
@ -140,6 +219,8 @@ jobs:
|
|||||||
# we maintain.
|
# we maintain.
|
||||||
WHEELHOUSE_PATH: &WHEELHOUSE_PATH "/tmp/wheelhouse"
|
WHEELHOUSE_PATH: &WHEELHOUSE_PATH "/tmp/wheelhouse"
|
||||||
PIP_FIND_LINKS: "file:///tmp/wheelhouse"
|
PIP_FIND_LINKS: "file:///tmp/wheelhouse"
|
||||||
|
# Upload the coverage report.
|
||||||
|
UPLOAD_COVERAGE: "yes"
|
||||||
|
|
||||||
# pip cannot install packages if the working directory is not readable.
|
# pip cannot install packages if the working directory is not readable.
|
||||||
# We want to run a lot of steps as nobody instead of as root.
|
# We want to run a lot of steps as nobody instead of as root.
|
||||||
@ -188,26 +269,32 @@ jobs:
|
|||||||
- run: &SUBMIT_COVERAGE
|
- run: &SUBMIT_COVERAGE
|
||||||
name: "Submit coverage results"
|
name: "Submit coverage results"
|
||||||
command: |
|
command: |
|
||||||
/tmp/venv/bin/codecov
|
if [ -n "${UPLOAD_COVERAGE}" ]; then
|
||||||
|
/tmp/venv/bin/codecov
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
debian-8:
|
debian-8:
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/debian:8"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/debian:8-py2.7"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
pypy2.7-7.1:
|
pypy27-buster:
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/pypy:2.7-7.1.1-jessie"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/pypy:buster-py2"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
<<: *UTF_8_ENVIRONMENT
|
<<: *UTF_8_ENVIRONMENT
|
||||||
TAHOE_LAFS_TOX_ENVIRONMENT: "pypy27-coverage"
|
# We don't do coverage since it makes PyPy far too slow:
|
||||||
ALLOWED_FAILURE: "yes"
|
TAHOE_LAFS_TOX_ENVIRONMENT: "pypy27"
|
||||||
|
# Since we didn't collect it, don't upload it.
|
||||||
|
UPLOAD_COVERAGE: ""
|
||||||
|
|
||||||
|
|
||||||
c-locale:
|
c-locale:
|
||||||
@ -236,6 +323,8 @@ jobs:
|
|||||||
TAHOE_LAFS_TOX_ENVIRONMENT: "deprecations,upcoming-deprecations"
|
TAHOE_LAFS_TOX_ENVIRONMENT: "deprecations,upcoming-deprecations"
|
||||||
# Put the logs somewhere we can report them.
|
# Put the logs somewhere we can report them.
|
||||||
TAHOE_LAFS_WARNINGS_LOG: "/tmp/artifacts/deprecation-warnings.log"
|
TAHOE_LAFS_WARNINGS_LOG: "/tmp/artifacts/deprecation-warnings.log"
|
||||||
|
# The deprecations tox environments don't do coverage measurement.
|
||||||
|
UPLOAD_COVERAGE: ""
|
||||||
|
|
||||||
|
|
||||||
integration:
|
integration:
|
||||||
@ -255,23 +344,50 @@ jobs:
|
|||||||
- run: *RUN_TESTS
|
- run: *RUN_TESTS
|
||||||
|
|
||||||
|
|
||||||
ubuntu-16.04:
|
ubuntu-16-04:
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/ubuntu:16.04"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/ubuntu:16.04-py2.7"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
ubuntu-18.04:
|
ubuntu-18-04: &UBUNTU_18_04
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/ubuntu:18.04"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/ubuntu:18.04-py2.7"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
centos-7: &RHEL_DERIV
|
python36:
|
||||||
|
<<: *UBUNTU_18_04
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/centos:7"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/ubuntu:18.04-py3"
|
||||||
|
user: "nobody"
|
||||||
|
|
||||||
|
environment:
|
||||||
|
<<: *UTF_8_ENVIRONMENT
|
||||||
|
# The default trial args include --rterrors which is incompatible with
|
||||||
|
# this reporter on Python 3. So drop that and just specify the
|
||||||
|
# reporter.
|
||||||
|
TAHOE_LAFS_TRIAL_ARGS: "--reporter=subunitv2-file"
|
||||||
|
TAHOE_LAFS_TOX_ENVIRONMENT: "py36-coverage"
|
||||||
|
|
||||||
|
|
||||||
|
ubuntu-20-04:
|
||||||
|
<<: *DEBIAN
|
||||||
|
docker:
|
||||||
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/ubuntu:20.04"
|
||||||
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
|
centos-8: &RHEL_DERIV
|
||||||
|
docker:
|
||||||
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/centos:8-py2"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
environment: *UTF_8_ENVIRONMENT
|
environment: *UTF_8_ENVIRONMENT
|
||||||
@ -293,41 +409,24 @@ jobs:
|
|||||||
fedora-28:
|
fedora-28:
|
||||||
<<: *RHEL_DERIV
|
<<: *RHEL_DERIV
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/fedora:28"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/fedora:28-py"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
fedora-29:
|
fedora-29:
|
||||||
<<: *RHEL_DERIV
|
<<: *RHEL_DERIV
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/fedora:29"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/fedora:29-py"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
slackware-14.2:
|
nixos-19-09:
|
||||||
docker:
|
|
||||||
- image: "tahoelafsci/slackware:14.2"
|
|
||||||
user: "nobody"
|
|
||||||
|
|
||||||
environment: *UTF_8_ENVIRONMENT
|
|
||||||
|
|
||||||
# pip cannot install packages if the working directory is not readable.
|
|
||||||
# We want to run a lot of steps as nobody instead of as root.
|
|
||||||
working_directory: "/tmp/project"
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- "checkout"
|
|
||||||
- run: *SETUP_VIRTUALENV
|
|
||||||
- run: *RUN_TESTS
|
|
||||||
- store_test_results: *STORE_TEST_RESULTS
|
|
||||||
- store_artifacts: *STORE_TEST_LOG
|
|
||||||
- store_artifacts: *STORE_OTHER_ARTIFACTS
|
|
||||||
- run: *SUBMIT_COVERAGE
|
|
||||||
|
|
||||||
nixos-19.09:
|
|
||||||
docker:
|
docker:
|
||||||
# Run in a highly Nix-capable environment.
|
# Run in a highly Nix-capable environment.
|
||||||
- image: "nixorg/nix:circleci"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "nixorg/nix:circleci"
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
NIX_PATH: "nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.09-small.tar.gz"
|
NIX_PATH: "nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.09-small.tar.gz"
|
||||||
@ -384,69 +483,35 @@ jobs:
|
|||||||
#
|
#
|
||||||
# https://circleci.com/blog/how-to-build-a-docker-image-on-circleci-2-0/
|
# https://circleci.com/blog/how-to-build-a-docker-image-on-circleci-2-0/
|
||||||
docker:
|
docker:
|
||||||
- image: "docker:17.05.0-ce-git"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "docker:17.05.0-ce-git"
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
DISTRO: "tahoelafsci/<DISTRO>:foo"
|
DISTRO: "tahoelafsci/<DISTRO>:foo-py2"
|
||||||
TAG: "tahoelafsci/distro:<TAG>"
|
TAG: "tahoelafsci/distro:<TAG>-py2"
|
||||||
|
PYTHON_VERSION: "tahoelafsci/distro:tag-py<PYTHON_VERSION}"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- "checkout"
|
- "checkout"
|
||||||
- "setup_remote_docker"
|
- "setup_remote_docker"
|
||||||
- run:
|
|
||||||
name: "Get openssl"
|
|
||||||
command: |
|
|
||||||
apk add --no-cache openssl
|
|
||||||
- run:
|
|
||||||
name: "Get Dockerhub secrets"
|
|
||||||
command: |
|
|
||||||
# If you create an encryption key like this:
|
|
||||||
#
|
|
||||||
# openssl enc -aes-256-cbc -k secret -P -md sha256
|
|
||||||
|
|
||||||
# From the output that looks like:
|
|
||||||
#
|
|
||||||
# salt=...
|
|
||||||
# key=...
|
|
||||||
# iv =...
|
|
||||||
#
|
|
||||||
# extract just the value for ``key``.
|
|
||||||
|
|
||||||
# then you can re-generate ``secret-env-cipher`` locally using the
|
|
||||||
# command:
|
|
||||||
#
|
|
||||||
# openssl aes-256-cbc -e -md sha256 -in secret-env-plain -out .circleci/secret-env-cipher -pass env:KEY
|
|
||||||
#
|
|
||||||
# Make sure the key is set as the KEY environment variable in the
|
|
||||||
# CircleCI web interface. You can do this by visiting
|
|
||||||
# <https://circleci.com/gh/tahoe-lafs/tahoe-lafs/edit#env-vars>
|
|
||||||
# after logging in to CircleCI with an account in the tahoe-lafs
|
|
||||||
# CircleCI team.
|
|
||||||
#
|
|
||||||
# Then you can recover the environment plaintext (for example, to
|
|
||||||
# change and re-encrypt it) like just like CircleCI recovers it
|
|
||||||
# here:
|
|
||||||
#
|
|
||||||
openssl aes-256-cbc -d -md sha256 -in .circleci/secret-env-cipher -pass env:KEY >> ~/.env
|
|
||||||
- run:
|
- run:
|
||||||
name: "Log in to Dockerhub"
|
name: "Log in to Dockerhub"
|
||||||
command: |
|
command: |
|
||||||
. ~/.env
|
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
|
||||||
# TAHOELAFSCI_PASSWORD come from the secret env.
|
|
||||||
docker login -u tahoelafsci -p ${TAHOELAFSCI_PASSWORD}
|
|
||||||
- run:
|
- run:
|
||||||
name: "Build image"
|
name: "Build image"
|
||||||
command: |
|
command: |
|
||||||
docker \
|
docker \
|
||||||
build \
|
build \
|
||||||
--build-arg TAG=${TAG} \
|
--build-arg TAG=${TAG} \
|
||||||
-t tahoelafsci/${DISTRO}:${TAG} \
|
--build-arg PYTHON_VERSION=${PYTHON_VERSION} \
|
||||||
|
-t tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION} \
|
||||||
-f ~/project/.circleci/Dockerfile.${DISTRO} \
|
-f ~/project/.circleci/Dockerfile.${DISTRO} \
|
||||||
~/project/
|
~/project/
|
||||||
- run:
|
- run:
|
||||||
name: "Push image"
|
name: "Push image"
|
||||||
command: |
|
command: |
|
||||||
docker push tahoelafsci/${DISTRO}:${TAG}
|
docker push tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION}
|
||||||
|
|
||||||
|
|
||||||
build-image-debian-8:
|
build-image-debian-8:
|
||||||
@ -455,6 +520,7 @@ jobs:
|
|||||||
environment:
|
environment:
|
||||||
DISTRO: "debian"
|
DISTRO: "debian"
|
||||||
TAG: "8"
|
TAG: "8"
|
||||||
|
PYTHON_VERSION: "2.7"
|
||||||
|
|
||||||
|
|
||||||
build-image-debian-9:
|
build-image-debian-9:
|
||||||
@ -463,30 +529,52 @@ jobs:
|
|||||||
environment:
|
environment:
|
||||||
DISTRO: "debian"
|
DISTRO: "debian"
|
||||||
TAG: "9"
|
TAG: "9"
|
||||||
|
PYTHON_VERSION: "2.7"
|
||||||
|
|
||||||
|
|
||||||
build-image-ubuntu-16.04:
|
build-image-ubuntu-16-04:
|
||||||
<<: *BUILD_IMAGE
|
<<: *BUILD_IMAGE
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
DISTRO: "ubuntu"
|
DISTRO: "ubuntu"
|
||||||
TAG: "16.04"
|
TAG: "16.04"
|
||||||
|
PYTHON_VERSION: "2.7"
|
||||||
|
|
||||||
|
|
||||||
build-image-ubuntu-18.04:
|
build-image-ubuntu-18-04:
|
||||||
<<: *BUILD_IMAGE
|
<<: *BUILD_IMAGE
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
DISTRO: "ubuntu"
|
DISTRO: "ubuntu"
|
||||||
TAG: "18.04"
|
TAG: "18.04"
|
||||||
|
PYTHON_VERSION: "2.7"
|
||||||
|
|
||||||
|
|
||||||
build-image-centos-7:
|
build-image-python36-ubuntu:
|
||||||
|
<<: *BUILD_IMAGE
|
||||||
|
|
||||||
|
environment:
|
||||||
|
DISTRO: "ubuntu"
|
||||||
|
TAG: "18.04"
|
||||||
|
PYTHON_VERSION: "3"
|
||||||
|
|
||||||
|
|
||||||
|
build-image-ubuntu-20-04:
|
||||||
|
<<: *BUILD_IMAGE
|
||||||
|
|
||||||
|
environment:
|
||||||
|
DISTRO: "ubuntu"
|
||||||
|
TAG: "20.04"
|
||||||
|
PYTHON_VERSION: "2.7"
|
||||||
|
|
||||||
|
|
||||||
|
build-image-centos-8:
|
||||||
<<: *BUILD_IMAGE
|
<<: *BUILD_IMAGE
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
DISTRO: "centos"
|
DISTRO: "centos"
|
||||||
TAG: "7"
|
TAG: "8"
|
||||||
|
PYTHON_VERSION: "2"
|
||||||
|
|
||||||
|
|
||||||
build-image-fedora-28:
|
build-image-fedora-28:
|
||||||
@ -495,6 +583,8 @@ jobs:
|
|||||||
environment:
|
environment:
|
||||||
DISTRO: "fedora"
|
DISTRO: "fedora"
|
||||||
TAG: "28"
|
TAG: "28"
|
||||||
|
# The default on Fedora (this version anyway) is still Python 2.
|
||||||
|
PYTHON_VERSION: ""
|
||||||
|
|
||||||
|
|
||||||
build-image-fedora-29:
|
build-image-fedora-29:
|
||||||
@ -505,17 +595,13 @@ jobs:
|
|||||||
TAG: "29"
|
TAG: "29"
|
||||||
|
|
||||||
|
|
||||||
build-image-slackware-14.2:
|
build-image-pypy27-buster:
|
||||||
<<: *BUILD_IMAGE
|
|
||||||
|
|
||||||
environment:
|
|
||||||
DISTRO: "slackware"
|
|
||||||
TAG: "14.2"
|
|
||||||
|
|
||||||
|
|
||||||
build-image-pypy-2.7-7.1.1-jessie:
|
|
||||||
<<: *BUILD_IMAGE
|
<<: *BUILD_IMAGE
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
DISTRO: "pypy"
|
DISTRO: "pypy"
|
||||||
TAG: "2.7-7.1.1-jessie"
|
TAG: "buster"
|
||||||
|
# We only have Python 2 for PyPy right now so there's no support for
|
||||||
|
# setting up PyPy 3 in the image building toolchain. This value is just
|
||||||
|
# for constructing the right Docker image tag.
|
||||||
|
PYTHON_VERSION: "2"
|
||||||
|
@ -36,8 +36,9 @@ PIP="${BOOTSTRAP_VENV}/bin/pip"
|
|||||||
# Tell pip where it can find any existing wheels.
|
# Tell pip where it can find any existing wheels.
|
||||||
export PIP_FIND_LINKS="file://${WHEELHOUSE_PATH}"
|
export PIP_FIND_LINKS="file://${WHEELHOUSE_PATH}"
|
||||||
|
|
||||||
# Populate the wheelhouse, if necessary.
|
# Populate the wheelhouse, if necessary. zfec 1.5.3 can only be built with a
|
||||||
"${PIP}" \
|
# UTF-8 environment so make sure we have one, at least for this invocation.
|
||||||
|
LANG="en_US.UTF-8" "${PIP}" \
|
||||||
wheel \
|
wheel \
|
||||||
--wheel-dir "${WHEELHOUSE_PATH}" \
|
--wheel-dir "${WHEELHOUSE_PATH}" \
|
||||||
"${PROJECT_ROOT}"[test] \
|
"${PROJECT_ROOT}"[test] \
|
||||||
|
@ -65,9 +65,13 @@ TIMEOUT="timeout --kill-after 1m 15m"
|
|||||||
# Send the output directly to a file because transporting the binary subunit2
|
# Send the output directly to a file because transporting the binary subunit2
|
||||||
# via tox and then scraping it out is hideous and failure prone.
|
# via tox and then scraping it out is hideous and failure prone.
|
||||||
export SUBUNITREPORTER_OUTPUT_PATH="${SUBUNIT2}"
|
export SUBUNITREPORTER_OUTPUT_PATH="${SUBUNIT2}"
|
||||||
export TAHOE_LAFS_TRIAL_ARGS="--reporter=subunitv2-file --rterrors"
|
export TAHOE_LAFS_TRIAL_ARGS="${TAHOE_LAFS_TRIAL_ARGS:---reporter=subunitv2-file --rterrors}"
|
||||||
export PIP_NO_INDEX="1"
|
export PIP_NO_INDEX="1"
|
||||||
|
|
||||||
|
# Make output unbuffered, so progress reports from subunitv2-file get streamed
|
||||||
|
# and notify CircleCI we're still alive.
|
||||||
|
export PYTHONUNBUFFERED=1
|
||||||
|
|
||||||
if [ "${ALLOWED_FAILURE}" = "yes" ]; then
|
if [ "${ALLOWED_FAILURE}" = "yes" ]; then
|
||||||
alternative="true"
|
alternative="true"
|
||||||
else
|
else
|
||||||
@ -81,7 +85,12 @@ ${TIMEOUT} ${BOOTSTRAP_VENV}/bin/tox \
|
|||||||
${TAHOE_LAFS_TOX_ARGS} || "${alternative}"
|
${TAHOE_LAFS_TOX_ARGS} || "${alternative}"
|
||||||
|
|
||||||
if [ -n "${ARTIFACTS}" ]; then
|
if [ -n "${ARTIFACTS}" ]; then
|
||||||
|
if [ ! -e "${SUBUNIT2}" ]; then
|
||||||
|
echo "subunitv2 output file does not exist: ${SUBUNIT2}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Create a junitxml results area.
|
# Create a junitxml results area.
|
||||||
mkdir -p "$(dirname "${JUNITXML}")"
|
mkdir -p "$(dirname "${JUNITXML}")"
|
||||||
${BOOTSTRAP_VENV}/bin/subunit2junitxml < "${SUBUNIT2}" > "${JUNITXML}" || "${alternative}"
|
"${BOOTSTRAP_VENV}"/bin/subunit2junitxml < "${SUBUNIT2}" > "${JUNITXML}" || "${alternative}"
|
||||||
fi
|
fi
|
||||||
|
@ -1 +0,0 @@
|
|||||||
Salted__ •GPÁøÊ)|!÷[©U[‡ûvSÚ,F¿–m:ö š~ÓY[Uú_¸Fx×’¤Ÿ%<25>“4l×Ö»Š8¼œ¹„1öø‰/lƒÌ`nÆ^·Z]óqš¬æ¢&ø°÷£Ý‚‚ß%T¡n
|
|
34
.codecov.yml
Normal file
34
.codecov.yml
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
# Override defaults for codecov.io checks.
|
||||||
|
#
|
||||||
|
# Documentation is at https://docs.codecov.io/docs/codecov-yaml;
|
||||||
|
# reference is at https://docs.codecov.io/docs/codecovyml-reference.
|
||||||
|
#
|
||||||
|
# To validate this file, use:
|
||||||
|
#
|
||||||
|
# curl --data-binary @.codecov.yml https://codecov.io/validate
|
||||||
|
#
|
||||||
|
# Codecov's defaults seem to leave red marks in GitHub CI checks in a
|
||||||
|
# rather arbitrary manner, probably because of non-determinism in
|
||||||
|
# coverage (see https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2891)
|
||||||
|
# and maybe because computers are bad with floating point numbers.
|
||||||
|
|
||||||
|
# Allow coverage percentage a precision of zero decimals, and round to
|
||||||
|
# the nearest number (for example, 89.957 to to 90; 89.497 to 89%).
|
||||||
|
# Coverage above 90% is good, below 80% is bad.
|
||||||
|
coverage:
|
||||||
|
round: nearest
|
||||||
|
range: 80..90
|
||||||
|
precision: 0
|
||||||
|
|
||||||
|
# Aim for a target test coverage of 90% in codecov/project check (do
|
||||||
|
# not allow project coverage to drop below that), and allow
|
||||||
|
# codecov/patch a threshold of 1% (allow coverage in changes to drop
|
||||||
|
# by that much, and no less). That should be good enough for us.
|
||||||
|
status:
|
||||||
|
project:
|
||||||
|
default:
|
||||||
|
target: 90%
|
||||||
|
threshold: 1%
|
||||||
|
patch:
|
||||||
|
default:
|
||||||
|
threshold: 1%
|
15
.coveragerc
15
.coveragerc
@ -10,3 +10,18 @@ omit =
|
|||||||
*/allmydata/_version.py
|
*/allmydata/_version.py
|
||||||
parallel = True
|
parallel = True
|
||||||
branch = True
|
branch = True
|
||||||
|
|
||||||
|
[report]
|
||||||
|
show_missing = True
|
||||||
|
skip_covered = True
|
||||||
|
|
||||||
|
[paths]
|
||||||
|
source =
|
||||||
|
# It looks like this in the checkout
|
||||||
|
src/
|
||||||
|
# It looks like this in the Windows build environment
|
||||||
|
D:/a/tahoe-lafs/tahoe-lafs/.tox/py*-coverage/Lib/site-packages/
|
||||||
|
# Although sometimes it looks like this instead. Also it looks like this on macOS.
|
||||||
|
.tox/py*-coverage/lib/python*/site-packages/
|
||||||
|
# On some Linux CI jobs it looks like this
|
||||||
|
/tmp/tahoe-lafs.tox/py*-coverage/lib/python*/site-packages/
|
||||||
|
183
.github/workflows/ci.yml
vendored
Normal file
183
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
name: CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- "master"
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
|
||||||
|
coverage:
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- macos-latest
|
||||||
|
- windows-latest
|
||||||
|
python-version:
|
||||||
|
- 2.7
|
||||||
|
|
||||||
|
steps:
|
||||||
|
|
||||||
|
# Get vcpython27 on Windows + Python 2.7, to build zfec
|
||||||
|
# extension. See https://chocolatey.org/packages/vcpython27 and
|
||||||
|
# https://github.com/crazy-max/ghaction-chocolatey
|
||||||
|
- name: Install MSVC 9.0 for Python 2.7 [Windows]
|
||||||
|
if: matrix.os == 'windows-latest' && matrix.python-version == '2.7'
|
||||||
|
uses: crazy-max/ghaction-chocolatey@v1
|
||||||
|
with:
|
||||||
|
args: install vcpython27
|
||||||
|
|
||||||
|
- name: Check out Tahoe-LAFS sources
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Fetch all history for all tags and branches
|
||||||
|
run: git fetch --prune --unshallow
|
||||||
|
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v1
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Install Python packages
|
||||||
|
run: |
|
||||||
|
pip install --upgrade codecov tox setuptools
|
||||||
|
pip list
|
||||||
|
|
||||||
|
- name: Display tool versions
|
||||||
|
run: python misc/build_helpers/show-tool-versions.py
|
||||||
|
|
||||||
|
- name: Run "tox -e py27-coverage"
|
||||||
|
run: tox -e py27-coverage
|
||||||
|
|
||||||
|
- name: Upload eliot.log in case of failure
|
||||||
|
uses: actions/upload-artifact@v1
|
||||||
|
if: failure()
|
||||||
|
with:
|
||||||
|
name: eliot.log
|
||||||
|
path: eliot.log
|
||||||
|
|
||||||
|
- name: Upload coverage report
|
||||||
|
uses: codecov/codecov-action@v1
|
||||||
|
with:
|
||||||
|
token: abf679b6-e2e6-4b33-b7b5-6cfbd41ee691
|
||||||
|
file: coverage.xml
|
||||||
|
|
||||||
|
integration:
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- macos-latest
|
||||||
|
- windows-latest
|
||||||
|
python-version:
|
||||||
|
- 2.7
|
||||||
|
|
||||||
|
steps:
|
||||||
|
|
||||||
|
- name: Install Tor [Ubuntu]
|
||||||
|
if: matrix.os == 'ubuntu-latest'
|
||||||
|
run: sudo apt install tor
|
||||||
|
|
||||||
|
- name: Install Tor [macOS]
|
||||||
|
if: matrix.os == 'macos-latest'
|
||||||
|
run: brew install tor
|
||||||
|
|
||||||
|
- name: Install Tor [Windows]
|
||||||
|
if: matrix.os == 'windows-latest'
|
||||||
|
uses: crazy-max/ghaction-chocolatey@v1
|
||||||
|
with:
|
||||||
|
args: install tor
|
||||||
|
|
||||||
|
- name: Install MSVC 9.0 for Python 2.7 [Windows]
|
||||||
|
if: matrix.os == 'windows-latest' && matrix.python-version == '2.7'
|
||||||
|
uses: crazy-max/ghaction-chocolatey@v1
|
||||||
|
with:
|
||||||
|
args: install vcpython27
|
||||||
|
|
||||||
|
- name: Check out Tahoe-LAFS sources
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Fetch all history for all tags and branches
|
||||||
|
run: git fetch --prune --unshallow
|
||||||
|
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v1
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Install Python packages
|
||||||
|
run: |
|
||||||
|
pip install --upgrade tox
|
||||||
|
pip list
|
||||||
|
|
||||||
|
- name: Display tool versions
|
||||||
|
run: python misc/build_helpers/show-tool-versions.py
|
||||||
|
|
||||||
|
- name: Run "tox -e integration"
|
||||||
|
run: tox -e integration
|
||||||
|
|
||||||
|
- name: Upload eliot.log in case of failure
|
||||||
|
uses: actions/upload-artifact@v1
|
||||||
|
if: failure()
|
||||||
|
with:
|
||||||
|
name: integration.eliot.json
|
||||||
|
path: integration.eliot.json
|
||||||
|
|
||||||
|
packaging:
|
||||||
|
runs-on: ${{ matrix.os }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
os:
|
||||||
|
- macos-latest
|
||||||
|
- windows-latest
|
||||||
|
- ubuntu-latest
|
||||||
|
python-version:
|
||||||
|
- 2.7
|
||||||
|
|
||||||
|
steps:
|
||||||
|
|
||||||
|
# Get vcpython27 on Windows + Python 2.7, to build zfec
|
||||||
|
# extension. See https://chocolatey.org/packages/vcpython27 and
|
||||||
|
# https://github.com/crazy-max/ghaction-chocolatey
|
||||||
|
- name: Install MSVC 9.0 for Python 2.7 [Windows]
|
||||||
|
if: matrix.os == 'windows-latest' && matrix.python-version == '2.7'
|
||||||
|
uses: crazy-max/ghaction-chocolatey@v1
|
||||||
|
with:
|
||||||
|
args: install vcpython27
|
||||||
|
|
||||||
|
- name: Check out Tahoe-LAFS sources
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Fetch all history for all tags and branches
|
||||||
|
run: git fetch --prune --unshallow
|
||||||
|
|
||||||
|
- name: Set up Python ${{ matrix.python-version }}
|
||||||
|
uses: actions/setup-python@v1
|
||||||
|
with:
|
||||||
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Install Python packages
|
||||||
|
run: |
|
||||||
|
pip install --upgrade tox
|
||||||
|
pip list
|
||||||
|
|
||||||
|
- name: Display tool versions
|
||||||
|
run: python misc/build_helpers/show-tool-versions.py
|
||||||
|
|
||||||
|
- name: Run "tox -e pyinstaller"
|
||||||
|
run: tox -e pyinstaller
|
||||||
|
|
||||||
|
# This step is to ensure there are no packaging/import errors.
|
||||||
|
- name: Test PyInstaller executable
|
||||||
|
run: dist/Tahoe-LAFS/tahoe --version
|
||||||
|
|
||||||
|
- name: Upload PyInstaller package
|
||||||
|
uses: actions/upload-artifact@v2
|
||||||
|
with:
|
||||||
|
name: Tahoe-LAFS-${{ matrix.os }}-Python-${{ matrix.python-version }}
|
||||||
|
path: dist/Tahoe-LAFS-*-*.*
|
9
.gitignore
vendored
9
.gitignore
vendored
@ -1,4 +1,4 @@
|
|||||||
venv
|
venv*
|
||||||
|
|
||||||
# vim swap files
|
# vim swap files
|
||||||
*.swp
|
*.swp
|
||||||
@ -9,6 +9,7 @@ venv
|
|||||||
*~
|
*~
|
||||||
*.DS_Store
|
*.DS_Store
|
||||||
.*.kate-swp
|
.*.kate-swp
|
||||||
|
*.bak
|
||||||
|
|
||||||
/build/
|
/build/
|
||||||
/support/
|
/support/
|
||||||
@ -36,6 +37,7 @@ zope.interface-*.egg
|
|||||||
/tahoe-deps/
|
/tahoe-deps/
|
||||||
/tahoe-deps.tar.gz
|
/tahoe-deps.tar.gz
|
||||||
/.coverage
|
/.coverage
|
||||||
|
/.coverage.*
|
||||||
/.coverage.el
|
/.coverage.el
|
||||||
/coverage-html/
|
/coverage-html/
|
||||||
/miscaptures.txt
|
/miscaptures.txt
|
||||||
@ -43,8 +45,11 @@ zope.interface-*.egg
|
|||||||
/.tox/
|
/.tox/
|
||||||
/docs/_build/
|
/docs/_build/
|
||||||
/coverage.xml
|
/coverage.xml
|
||||||
/smoke_magicfolder/
|
/.pre-commit-config.local.yaml
|
||||||
/.hypothesis/
|
/.hypothesis/
|
||||||
|
/eliot.log
|
||||||
|
/misc/python3/results.xml
|
||||||
|
/misc/python3/results.subunit2
|
||||||
|
|
||||||
# This is the plaintext of the private environment needed for some CircleCI
|
# This is the plaintext of the private environment needed for some CircleCI
|
||||||
# operations. It's never supposed to be checked in.
|
# operations. It's never supposed to be checked in.
|
||||||
|
9
.pre-commit-config.yaml
Normal file
9
.pre-commit-config.yaml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
repos:
|
||||||
|
- repo: local
|
||||||
|
hooks:
|
||||||
|
- id: codechecks
|
||||||
|
name: codechecks
|
||||||
|
stages: ["push"]
|
||||||
|
entry: "tox -e codechecks"
|
||||||
|
language: system
|
||||||
|
pass_filenames: false
|
77
.travis.yml
77
.travis.yml
@ -1,77 +0,0 @@
|
|||||||
sudo: false
|
|
||||||
language: python
|
|
||||||
cache: pip
|
|
||||||
dist: trusty
|
|
||||||
before_cache:
|
|
||||||
- rm -f $HOME/.cache/pip/log/debug.log
|
|
||||||
git:
|
|
||||||
depth: 1000
|
|
||||||
|
|
||||||
env:
|
|
||||||
global:
|
|
||||||
- TAHOE_LAFS_HYPOTHESIS_PROFILE=ci
|
|
||||||
|
|
||||||
install:
|
|
||||||
# ~/.local/bin is on $PATH by default, but on OS-X, --user puts it elsewhere
|
|
||||||
- if [ "${TRAVIS_OS_NAME}" = "osx" ]; then export PATH=$HOME/Library/Python/2.7/bin:$PATH; fi
|
|
||||||
- if [ "${TRAVIS_OS_NAME}" = "osx" ]; then wget https://bootstrap.pypa.io/get-pip.py && sudo python ./get-pip.py; fi
|
|
||||||
- pip list
|
|
||||||
- if [ "${TRAVIS_OS_NAME}" = "osx" ]; then pip install --user --upgrade codecov tox setuptools; fi
|
|
||||||
- if [ "${TRAVIS_OS_NAME}" = "linux" ]; then pip install --upgrade codecov tox setuptools; fi
|
|
||||||
- echo $PATH; which python; which pip; which tox
|
|
||||||
- python misc/build_helpers/show-tool-versions.py
|
|
||||||
|
|
||||||
script:
|
|
||||||
- |
|
|
||||||
set -eo pipefail
|
|
||||||
if [ "${T}" = "py35" ]; then
|
|
||||||
python3 -m compileall -f -x tahoe-depgraph.py .
|
|
||||||
else
|
|
||||||
tox -e ${T}
|
|
||||||
fi
|
|
||||||
# To verify that the resultant PyInstaller-generated binary executes
|
|
||||||
# cleanly (i.e., that it terminates with an exit code of 0 and isn't
|
|
||||||
# failing due to import/packaging-related errors, etc.).
|
|
||||||
if [ "${T}" = "pyinstaller" ]; then dist/Tahoe-LAFS/tahoe --version; fi
|
|
||||||
|
|
||||||
after_success:
|
|
||||||
- if [ "${T}" = "coverage" ]; then codecov; fi
|
|
||||||
|
|
||||||
notifications:
|
|
||||||
email: false
|
|
||||||
irc:
|
|
||||||
channels: "chat.freenode.net#tahoe-lafs"
|
|
||||||
on_success: always # for testing
|
|
||||||
on_failure: always
|
|
||||||
template:
|
|
||||||
- "%{repository}#%{build_number} [%{branch}: %{commit} by %{author}] %{message}"
|
|
||||||
- "Changes: %{compare_url} | Details: %{build_url}"
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- os: linux
|
|
||||||
python: '2.7'
|
|
||||||
env: T=coverage LANG=en_US.UTF-8
|
|
||||||
- os: linux
|
|
||||||
python: '2.7'
|
|
||||||
env: T=codechecks LANG=en_US.UTF-8
|
|
||||||
- os: linux
|
|
||||||
python: '2.7'
|
|
||||||
env: T=pyinstaller LANG=en_US.UTF-8
|
|
||||||
- os: linux
|
|
||||||
python: '2.7'
|
|
||||||
env: T=py27 LANG=C
|
|
||||||
- os: osx
|
|
||||||
python: '2.7'
|
|
||||||
env: T=py27 LANG=en_US.UTF-8
|
|
||||||
language: generic # "python" is not available on OS-X
|
|
||||||
- os: osx
|
|
||||||
python: '2.7'
|
|
||||||
env: T=pyinstaller LANG=en_US.UTF-8
|
|
||||||
language: generic # "python" is not available on OS-X
|
|
||||||
# this is a "lint" job that checks for python3 compatibility
|
|
||||||
- os: linux
|
|
||||||
python: '3.5'
|
|
||||||
env: T=py35
|
|
||||||
|
|
||||||
fast_finish: true
|
|
116
Makefile
116
Makefile
@ -1,16 +1,68 @@
|
|||||||
|
# Tahoe LFS Development and maintenance tasks
|
||||||
|
#
|
||||||
# NOTE: this Makefile requires GNU make
|
# NOTE: this Makefile requires GNU make
|
||||||
|
|
||||||
|
### Defensive settings for make:
|
||||||
|
# https://tech.davis-hansson.com/p/make/
|
||||||
|
SHELL := bash
|
||||||
|
.ONESHELL:
|
||||||
|
.SHELLFLAGS := -xeu -o pipefail -c
|
||||||
|
.SILENT:
|
||||||
|
.DELETE_ON_ERROR:
|
||||||
|
MAKEFLAGS += --warn-undefined-variables
|
||||||
|
MAKEFLAGS += --no-builtin-rules
|
||||||
|
|
||||||
|
# Local target variables
|
||||||
|
VCS_HOOK_SAMPLES=$(wildcard .git/hooks/*.sample)
|
||||||
|
VCS_HOOKS=$(VCS_HOOK_SAMPLES:%.sample=%)
|
||||||
|
PYTHON=python
|
||||||
|
export PYTHON
|
||||||
|
PYFLAKES=flake8
|
||||||
|
export PYFLAKES
|
||||||
|
VIRTUAL_ENV=./.tox/py27
|
||||||
|
SOURCES=src/allmydata static misc setup.py
|
||||||
|
APPNAME=tahoe-lafs
|
||||||
|
TEST_SUITE=allmydata
|
||||||
|
|
||||||
|
|
||||||
|
# Top-level, phony targets
|
||||||
|
|
||||||
|
.PHONY: default
|
||||||
default:
|
default:
|
||||||
@echo "no default target"
|
@echo "no default target"
|
||||||
|
|
||||||
PYTHON=python
|
.PHONY: install-vcs-hooks
|
||||||
export PYTHON
|
## Install the VCS hooks to run linters on commit and all tests on push
|
||||||
PYFLAKES=pyflakes
|
install-vcs-hooks: .git/hooks/pre-commit .git/hooks/pre-push
|
||||||
export PYFLAKES
|
.PHONY: uninstall-vcs-hooks
|
||||||
|
## Remove the VCS hooks
|
||||||
|
uninstall-vcs-hooks: .tox/create-venvs.log
|
||||||
|
"./$(dir $(<))py36/bin/pre-commit" uninstall || true
|
||||||
|
"./$(dir $(<))py36/bin/pre-commit" uninstall -t pre-push || true
|
||||||
|
|
||||||
SOURCES=src/allmydata static misc setup.py
|
.PHONY: test
|
||||||
APPNAME=tahoe-lafs
|
## Run all tests and code reports
|
||||||
|
test: .tox/create-venvs.log
|
||||||
|
# Run codechecks first since it takes the least time to report issues early.
|
||||||
|
tox --develop -e codechecks
|
||||||
|
# Run all the test environments in parallel to reduce run-time
|
||||||
|
tox --develop -p auto -e 'py27,py36,pypy27'
|
||||||
|
.PHONY: test-venv-coverage
|
||||||
|
## Run all tests with coverage collection and reporting.
|
||||||
|
test-venv-coverage:
|
||||||
|
# Special handling for reporting coverage even when the test run fails
|
||||||
|
rm -f ./.coverage.*
|
||||||
|
test_exit=
|
||||||
|
$(VIRTUAL_ENV)/bin/coverage run -m twisted.trial --rterrors --reporter=timing \
|
||||||
|
$(TEST_SUITE) || test_exit="$$?"
|
||||||
|
$(VIRTUAL_ENV)/bin/coverage combine
|
||||||
|
$(VIRTUAL_ENV)/bin/coverage xml || true
|
||||||
|
$(VIRTUAL_ENV)/bin/coverage report
|
||||||
|
if [ ! -z "$$test_exit" ]; then exit "$$test_exit"; fi
|
||||||
|
.PHONY: test-py3-all
|
||||||
|
## Run all tests under Python 3
|
||||||
|
test-py3-all: .tox/create-venvs.log
|
||||||
|
tox --develop -e py36 allmydata
|
||||||
|
|
||||||
# This is necessary only if you want to automatically produce a new
|
# This is necessary only if you want to automatically produce a new
|
||||||
# _version.py file from the current git history (without doing a build).
|
# _version.py file from the current git history (without doing a build).
|
||||||
@ -18,20 +70,16 @@ APPNAME=tahoe-lafs
|
|||||||
make-version:
|
make-version:
|
||||||
$(PYTHON) ./setup.py update_version
|
$(PYTHON) ./setup.py update_version
|
||||||
|
|
||||||
.built:
|
|
||||||
$(MAKE) build
|
|
||||||
|
|
||||||
src/allmydata/_version.py:
|
|
||||||
$(MAKE) make-version
|
|
||||||
|
|
||||||
# Build OS X pkg packages.
|
# Build OS X pkg packages.
|
||||||
.PHONY: build-osx-pkg test-osx-pkg upload-osx-pkg
|
.PHONY: build-osx-pkg
|
||||||
build-osx-pkg:
|
build-osx-pkg:
|
||||||
misc/build_helpers/build-osx-pkg.sh $(APPNAME)
|
misc/build_helpers/build-osx-pkg.sh $(APPNAME)
|
||||||
|
|
||||||
|
.PHONY: test-osx-pkg
|
||||||
test-osx-pkg:
|
test-osx-pkg:
|
||||||
$(PYTHON) misc/build_helpers/test-osx-pkg.py
|
$(PYTHON) misc/build_helpers/test-osx-pkg.py
|
||||||
|
|
||||||
|
.PHONY: upload-osx-pkg
|
||||||
upload-osx-pkg:
|
upload-osx-pkg:
|
||||||
# [Failure instance: Traceback: <class 'OpenSSL.SSL.Error'>: [('SSL routines', 'ssl3_read_bytes', 'tlsv1 alert unknown ca'), ('SSL routines', 'ssl3_write_bytes', 'ssl handshake failure')]
|
# [Failure instance: Traceback: <class 'OpenSSL.SSL.Error'>: [('SSL routines', 'ssl3_read_bytes', 'tlsv1 alert unknown ca'), ('SSL routines', 'ssl3_write_bytes', 'ssl handshake failure')]
|
||||||
#
|
#
|
||||||
@ -42,35 +90,12 @@ upload-osx-pkg:
|
|||||||
# echo not uploading tahoe-lafs-osx-pkg because this is not trunk but is branch \"${BB_BRANCH}\" ; \
|
# echo not uploading tahoe-lafs-osx-pkg because this is not trunk but is branch \"${BB_BRANCH}\" ; \
|
||||||
# fi
|
# fi
|
||||||
|
|
||||||
.PHONY: smoketest
|
|
||||||
smoketest:
|
|
||||||
-python ./src/allmydata/test/check_magicfolder_smoke.py kill
|
|
||||||
-rm -rf smoke_magicfolder/
|
|
||||||
python ./src/allmydata/test/check_magicfolder_smoke.py
|
|
||||||
|
|
||||||
# code coverage-based testing is disabled temporarily, as we switch to tox.
|
|
||||||
# This will eventually be added to a tox environment. The following comments
|
|
||||||
# and variable settings are retained as notes for that future effort.
|
|
||||||
|
|
||||||
## # code coverage: install the "coverage" package from PyPI, do "make
|
|
||||||
## # test-coverage" to do a unit test run with coverage-gathering enabled, then
|
|
||||||
## # use "make coverage-output" to generate an HTML report. Also see "make
|
|
||||||
## # .coverage.el" and misc/coding_tools/coverage.el for Emacs integration.
|
|
||||||
##
|
|
||||||
## # This might need to be python-coverage on Debian-based distros.
|
|
||||||
## COVERAGE=coverage
|
|
||||||
##
|
|
||||||
## COVERAGEARGS=--branch --source=src/allmydata
|
|
||||||
##
|
|
||||||
## # --include appeared in coverage-3.4
|
|
||||||
## COVERAGE_OMIT=--include '$(CURDIR)/src/allmydata/*' --omit '$(CURDIR)/src/allmydata/test/*'
|
|
||||||
|
|
||||||
|
|
||||||
.PHONY: code-checks
|
.PHONY: code-checks
|
||||||
#code-checks: build version-and-path check-interfaces check-miscaptures -find-trailing-spaces -check-umids pyflakes
|
#code-checks: build version-and-path check-interfaces check-miscaptures -find-trailing-spaces -check-umids pyflakes
|
||||||
code-checks: check-interfaces check-debugging check-miscaptures -find-trailing-spaces -check-umids pyflakes
|
code-checks: check-interfaces check-debugging check-miscaptures -find-trailing-spaces -check-umids pyflakes
|
||||||
|
|
||||||
.PHONY: check-interfaces
|
.PHONY: check-interfaces
|
||||||
|
check-interfaces:
|
||||||
$(PYTHON) misc/coding_tools/check-interfaces.py 2>&1 |tee violations.txt
|
$(PYTHON) misc/coding_tools/check-interfaces.py 2>&1 |tee violations.txt
|
||||||
@echo
|
@echo
|
||||||
|
|
||||||
@ -190,10 +215,11 @@ clean:
|
|||||||
rm -f *.pkg
|
rm -f *.pkg
|
||||||
|
|
||||||
.PHONY: distclean
|
.PHONY: distclean
|
||||||
distclean: clean
|
distclean: clean uninstall-vcs-hooks
|
||||||
rm -rf src/*.egg-info
|
rm -rf src/*.egg-info
|
||||||
rm -f src/allmydata/_version.py
|
rm -f src/allmydata/_version.py
|
||||||
rm -f src/allmydata/_appname.py
|
rm -f src/allmydata/_appname.py
|
||||||
|
rm -rf ./.tox/
|
||||||
|
|
||||||
|
|
||||||
.PHONY: find-trailing-spaces
|
.PHONY: find-trailing-spaces
|
||||||
@ -226,3 +252,15 @@ tarballs: # delegated to tox, so setup.py can update setuptools if needed
|
|||||||
.PHONY: upload-tarballs
|
.PHONY: upload-tarballs
|
||||||
upload-tarballs:
|
upload-tarballs:
|
||||||
@if [ "X${BB_BRANCH}" = "Xmaster" ] || [ "X${BB_BRANCH}" = "X" ]; then for f in dist/*; do flappclient --furlfile ~/.tahoe-tarball-upload.furl upload-file $$f; done ; else echo not uploading tarballs because this is not trunk but is branch \"${BB_BRANCH}\" ; fi
|
@if [ "X${BB_BRANCH}" = "Xmaster" ] || [ "X${BB_BRANCH}" = "X" ]; then for f in dist/*; do flappclient --furlfile ~/.tahoe-tarball-upload.furl upload-file $$f; done ; else echo not uploading tarballs because this is not trunk but is branch \"${BB_BRANCH}\" ; fi
|
||||||
|
|
||||||
|
|
||||||
|
# Real targets
|
||||||
|
|
||||||
|
src/allmydata/_version.py:
|
||||||
|
$(MAKE) make-version
|
||||||
|
|
||||||
|
.tox/create-venvs.log: tox.ini setup.py
|
||||||
|
tox --notest -p all | tee -a "$(@)"
|
||||||
|
|
||||||
|
$(VCS_HOOKS): .tox/create-venvs.log .pre-commit-config.yaml
|
||||||
|
"./$(dir $(<))py36/bin/pre-commit" install --hook-type $(@:.git/hooks/%=%)
|
||||||
|
109
NEWS.rst
109
NEWS.rst
@ -5,6 +5,115 @@ User-Visible Changes in Tahoe-LAFS
|
|||||||
==================================
|
==================================
|
||||||
|
|
||||||
.. towncrier start line
|
.. towncrier start line
|
||||||
|
Release 1.14.0 (2020-03-11)
|
||||||
|
'''''''''''''''''''''''''''
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- Magic-Folders are now supported on macOS. (`#1432 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1432>`_)
|
||||||
|
- Add a "tox -e draftnews" which runs towncrier in draft mode (`#2942 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2942>`_)
|
||||||
|
- Fedora 29 is now tested as part of the project's continuous integration system. (`#2955 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2955>`_)
|
||||||
|
- The Magic-Folder frontend now emits structured, causal logs. This makes it easier for developers to make sense of its behavior and for users to submit useful debugging information alongside problem reports. (`#2972 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2972>`_)
|
||||||
|
- The `tahoe` CLI now accepts arguments for configuring structured logging messages which Tahoe-LAFS is being converted to emit. This change does not introduce any new defaults for on-filesystem logging. (`#2975 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2975>`_)
|
||||||
|
- The web API now publishes streaming Eliot logs via a token-protected WebSocket at /private/logs/v1. (`#3006 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3006>`_)
|
||||||
|
- End-to-end in-memory tests for websocket features (`#3041 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3041>`_)
|
||||||
|
- allmydata.interfaces.IFoolscapStoragePlugin has been introduced, an extension point for customizing the storage protocol. (`#3049 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3049>`_)
|
||||||
|
- Static storage server "announcements" in ``private/servers.yaml`` are now individually logged and ignored if they cannot be interpreted. (`#3051 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3051>`_)
|
||||||
|
- Storage servers can now be configured to load plugins for allmydata.interfaces.IFoolscapStoragePlugin and offer them to clients. (`#3053 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3053>`_)
|
||||||
|
- Storage clients can now be configured to load plugins for allmydata.interfaces.IFoolscapStoragePlugin and use them to negotiate with servers. (`#3054 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3054>`_)
|
||||||
|
- The [storage] configuration section now accepts a boolean *anonymous* item to enable or disable anonymous storage access. The default behavior remains unchanged. (`#3184 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3184>`_)
|
||||||
|
- Enable the helper when creating a node with `tahoe create-node --helper` (`#3235 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3235>`_)
|
||||||
|
|
||||||
|
|
||||||
|
Bug Fixes
|
||||||
|
---------
|
||||||
|
|
||||||
|
- refactor initialization code to be more async-friendly (`#2870 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2870>`_)
|
||||||
|
- Configuration-checking code wasn't being called due to indenting (`#2935 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2935>`_)
|
||||||
|
- refactor configuration handling out of Node into _Config (`#2936 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2936>`_)
|
||||||
|
- "tox -e codechecks" no longer dirties the working tree. (`#2941 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2941>`_)
|
||||||
|
- Updated the Tor release key, used by the integration tests. (`#2944 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2944>`_)
|
||||||
|
- `tahoe backup` no longer fails with an unhandled exception when it encounters a special file (device, fifo) in the backup source. (`#2950 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2950>`_)
|
||||||
|
- Magic-Folders now creates spurious conflict files in fewer cases. In particular, if files are added to the folder while a client is offline, that client will not create conflict files for all those new files when it starts up. (`#2965 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2965>`_)
|
||||||
|
- The confusing and misplaced sub-command group headings in `tahoe --help` output have been removed. (`#2976 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2976>`_)
|
||||||
|
- The Magic-Folder frontend is now more responsive to subtree changes on Windows. (`#2997 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2997>`_)
|
||||||
|
- remove ancient bundled jquery and d3, and the "dowload timeline" feature they support (`#3228 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3228>`_)
|
||||||
|
|
||||||
|
|
||||||
|
Dependency/Installation Changes
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
- Tahoe-LAFS no longer makes start-up time assertions about the versions of its dependencies. It is the responsibility of the administrator of the installation to ensure the correct version of dependencies are supplied. (`#2749 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2749>`_)
|
||||||
|
- Tahoe-LAFS now depends on Twisted 16.6 or newer. (`#2957 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2957>`_)
|
||||||
|
|
||||||
|
|
||||||
|
Removed Features
|
||||||
|
----------------
|
||||||
|
|
||||||
|
- "tahoe rm", an old alias for "tahoe unlink", has been removed. (`#1827 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1827>`_)
|
||||||
|
- The direct dependencies on pyutil and zbase32 have been removed. (`#2098 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2098>`_)
|
||||||
|
- Untested and unmaintained code for running Tahoe-LAFS as a Windows service has been removed. (`#2239 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2239>`_)
|
||||||
|
- The redundant "pypywin32" dependency has been removed. (`#2392 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2392>`_)
|
||||||
|
- Fedora 27 is no longer tested as part of the project's continuous integration system. (`#2955 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2955>`_)
|
||||||
|
- "tahoe start", "tahoe daemonize", "tahoe restart", and "tahoe stop" are now deprecated in favor of using "tahoe run", possibly with a third-party process manager. (`#3273 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3273>`_)
|
||||||
|
|
||||||
|
|
||||||
|
Other Changes
|
||||||
|
-------------
|
||||||
|
|
||||||
|
- Tahoe-LAFS now tests for PyPy compatibility on CI. (`#2479 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2479>`_)
|
||||||
|
- Tahoe-LAFS now requires Twisted 18.4.0 or newer. (`#2771 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2771>`_)
|
||||||
|
- Tahoe-LAFS now uses towncrier to maintain the NEWS file. (`#2908 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2908>`_)
|
||||||
|
- The release process document has been updated. (`#2920 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2920>`_)
|
||||||
|
- allmydata.test.test_system.SystemTest is now more reliable with respect to bound address collisions. (`#2933 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2933>`_)
|
||||||
|
- The Tox configuration has been fixed to work around a problem on Windows CI. (`#2956 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2956>`_)
|
||||||
|
- The PyInstaller CI job now works around a pip/pyinstaller incompatibility. (`#2958 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2958>`_)
|
||||||
|
- Some CI jobs for integration tests have been moved from TravisCI to CircleCI. (`#2959 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2959>`_)
|
||||||
|
- Several warnings from a new release of pyflakes have been fixed. (`#2960 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2960>`_)
|
||||||
|
- Some Slackware 14.2 continuous integration problems have been resolved. (`#2961 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2961>`_)
|
||||||
|
- Some macOS continuous integration failures have been fixed. (`#2962 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2962>`_)
|
||||||
|
- The NoNetworkGrid implementation has been somewhat improved. (`#2966 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2966>`_)
|
||||||
|
- A bug in the test suite for the create-alias command has been fixed. (`#2967 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2967>`_)
|
||||||
|
- The integration test suite has been updated to use pytest-twisted instead of deprecated pytest APIs. (`#2968 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2968>`_)
|
||||||
|
- The magic-folder integration test suite now performs more aggressive cleanup of the processes it launches. (`#2969 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2969>`_)
|
||||||
|
- The integration tests now correctly document the `--keep-tempdir` option. (`#2970 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2970>`_)
|
||||||
|
- A misuse of super() in the integration tests has been fixed. (`#2971 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2971>`_)
|
||||||
|
- Several utilities to facilitate the use of the Eliot causal logging library have been introduced. (`#2973 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2973>`_)
|
||||||
|
- The Windows CI configuration has been tweaked. (`#2974 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2974>`_)
|
||||||
|
- The Magic-Folder frontend has had additional logging improvements. (`#2977 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2977>`_)
|
||||||
|
- (`#2981 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2981>`_, `#2982 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2982>`_)
|
||||||
|
- Added a simple sytax checker so that once a file has reached python3 compatibility, it will not regress. (`#3001 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3001>`_)
|
||||||
|
- Converted all uses of the print statement to the print function in the ./misc/ directory. (`#3002 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3002>`_)
|
||||||
|
- The contributor guidelines are now linked from the GitHub pull request creation page. (`#3003 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3003>`_)
|
||||||
|
- Updated the testing code to use the print function instead of the print statement. (`#3008 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3008>`_)
|
||||||
|
- Replaced print statement with print fuction for all tahoe_* scripts. (`#3009 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3009>`_)
|
||||||
|
- Replaced all remaining instances of the print statement with the print function. (`#3010 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3010>`_)
|
||||||
|
- Replace StringIO imports with six.moves. (`#3011 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3011>`_)
|
||||||
|
- Updated all Python files to use PEP-3110 exception syntax for Python3 compatibility. (`#3013 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3013>`_)
|
||||||
|
- Update raise syntax for Python3 compatibility. (`#3014 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3014>`_)
|
||||||
|
- Updated instances of octal literals to use the format 0o123 for Python3 compatibility. (`#3015 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3015>`_)
|
||||||
|
- allmydata.test.no_network, allmydata.test.test_system, and allmydata.test.web.test_introducer are now more reliable with respect to bound address collisions. (`#3016 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3016>`_)
|
||||||
|
- Removed tuple unpacking from function and lambda definitions for Python3 compatibility. (`#3019 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3019>`_)
|
||||||
|
- Updated Python2 long numeric literals for Python3 compatibility. (`#3020 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3020>`_)
|
||||||
|
- CircleCI jobs are now faster as a result of pre-building configured Docker images for the CI jobs. (`#3024 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3024>`_)
|
||||||
|
- Removed used of backticks for "repr" for Python3 compatibility. (`#3027 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3027>`_)
|
||||||
|
- Updated string literal syntax for Python3 compatibility. (`#3028 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3028>`_)
|
||||||
|
- Updated CI to enforce Python3 syntax for entire repo. (`#3030 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3030>`_)
|
||||||
|
- Replaced pycryptopp with cryptography. (`#3031 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3031>`_)
|
||||||
|
- All old-style classes ported to new-style. (`#3042 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3042>`_)
|
||||||
|
- Whitelisted "/bin/mv" as command for codechecks performed by tox. This fixes a current warning and prevents future errors (for tox 4). (`#3043 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3043>`_)
|
||||||
|
- Progress towards Python 3 compatibility is now visible at <https://tahoe-lafs.github.io/tahoe-depgraph/>. (`#3152 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3152>`_)
|
||||||
|
- Collect coverage information from integration tests (`#3234 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3234>`_)
|
||||||
|
- NixOS is now a supported Tahoe-LAFS platform. (`#3266 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3266>`_)
|
||||||
|
|
||||||
|
|
||||||
|
Misc/Other
|
||||||
|
----------
|
||||||
|
|
||||||
|
- `#1893 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1893>`_, `#2266 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2266>`_, `#2283 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2283>`_, `#2766 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2766>`_, `#2980 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2980>`_, `#2985 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2985>`_, `#2986 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2986>`_, `#2987 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2987>`_, `#2988 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2988>`_, `#2989 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2989>`_, `#2990 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2990>`_, `#2991 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2991>`_, `#2992 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2992>`_, `#2995 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2995>`_, `#3000 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3000>`_, `#3004 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3004>`_, `#3005 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3005>`_, `#3007 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3007>`_, `#3012 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3012>`_, `#3017 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3017>`_, `#3021 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3021>`_, `#3023 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3023>`_, `#3025 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3025>`_, `#3026 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3026>`_, `#3029 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3029>`_, `#3036 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3036>`_, `#3038 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3038>`_, `#3048 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3048>`_, `#3086 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3086>`_, `#3097 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3097>`_, `#3111 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3111>`_, `#3118 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3118>`_, `#3119 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3119>`_, `#3227 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3227>`_, `#3229 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3229>`_, `#3232 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3232>`_, `#3233 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3233>`_, `#3237 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3237>`_, `#3238 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3238>`_, `#3239 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3239>`_, `#3240 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3240>`_, `#3242 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3242>`_, `#3243 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3243>`_, `#3245 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3245>`_, `#3246 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3246>`_, `#3248 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3248>`_, `#3250 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3250>`_, `#3252 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3252>`_, `#3255 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3255>`_, `#3256 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3256>`_, `#3259 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3259>`_, `#3261 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3261>`_, `#3262 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3262>`_, `#3263 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3263>`_, `#3264 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3264>`_, `#3265 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3265>`_, `#3267 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3267>`_, `#3268 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3268>`_, `#3271 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3271>`_, `#3272 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3272>`_, `#3274 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3274>`_, `#3275 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3275>`_, `#3276 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3276>`_, `#3279 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3279>`_, `#3281 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3281>`_, `#3282 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3282>`_, `#3285 <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3285>`_
|
||||||
|
|
||||||
|
|
||||||
Release 1.13.0 (05-August-2018)
|
Release 1.13.0 (05-August-2018)
|
||||||
'''''''''''''''''''''''''''''''
|
'''''''''''''''''''''''''''''''
|
||||||
|
|
||||||
|
@ -10,7 +10,8 @@ function correctly, preserving your privacy and security.
|
|||||||
For full documentation, please see
|
For full documentation, please see
|
||||||
http://tahoe-lafs.readthedocs.io/en/latest/ .
|
http://tahoe-lafs.readthedocs.io/en/latest/ .
|
||||||
|
|
||||||
|readthedocs| |travis| |circleci| |codecov|
|
|Contributor Covenant| |readthedocs| |travis| |circleci| |codecov|
|
||||||
|
|
||||||
|
|
||||||
INSTALLING
|
INSTALLING
|
||||||
==========
|
==========
|
||||||
@ -105,3 +106,7 @@ slides.
|
|||||||
.. |codecov| image:: https://codecov.io/github/tahoe-lafs/tahoe-lafs/coverage.svg?branch=master
|
.. |codecov| image:: https://codecov.io/github/tahoe-lafs/tahoe-lafs/coverage.svg?branch=master
|
||||||
:alt: test coverage percentage
|
:alt: test coverage percentage
|
||||||
:target: https://codecov.io/github/tahoe-lafs/tahoe-lafs?branch=master
|
:target: https://codecov.io/github/tahoe-lafs/tahoe-lafs?branch=master
|
||||||
|
|
||||||
|
.. |Contributor Covenant| image:: https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg
|
||||||
|
:alt: code of conduct
|
||||||
|
:target: docs/CODE_OF_CONDUCT.md
|
||||||
|
54
docs/CODE_OF_CONDUCT.md
Normal file
54
docs/CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# Contributor Code of Conduct
|
||||||
|
|
||||||
|
As contributors and maintainers of this project, and in the interest of
|
||||||
|
fostering an open and welcoming community, we pledge to respect all people who
|
||||||
|
contribute through reporting issues, posting feature requests, updating
|
||||||
|
documentation, submitting pull requests or patches, and other activities.
|
||||||
|
|
||||||
|
We are committed to making participation in this project a harassment-free
|
||||||
|
experience for everyone, regardless of level of experience, gender, gender
|
||||||
|
identity and expression, sexual orientation, disability, personal appearance,
|
||||||
|
body size, race, ethnicity, age, religion, or nationality.
|
||||||
|
|
||||||
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
|
* The use of sexualized language or imagery
|
||||||
|
* Personal attacks
|
||||||
|
* Trolling or insulting/derogatory comments
|
||||||
|
* Public or private harassment
|
||||||
|
* Publishing other's private information, such as physical or electronic
|
||||||
|
addresses, without explicit permission
|
||||||
|
* Other unethical or unprofessional conduct
|
||||||
|
|
||||||
|
Project maintainers have the right and responsibility to remove, edit, or
|
||||||
|
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||||
|
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||||
|
permanently any contributor for other behaviors that they deem inappropriate,
|
||||||
|
threatening, offensive, or harmful.
|
||||||
|
|
||||||
|
By adopting this Code of Conduct, project maintainers commit themselves to
|
||||||
|
fairly and consistently applying these principles to every aspect of managing
|
||||||
|
this project. Project maintainers who do not follow or enforce the Code of
|
||||||
|
Conduct may be permanently removed from the project team.
|
||||||
|
|
||||||
|
This Code of Conduct applies both within project spaces and in public spaces
|
||||||
|
when an individual is representing the project or its community.
|
||||||
|
|
||||||
|
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||||
|
reported by contacting a project maintainer (see below). All
|
||||||
|
complaints will be reviewed and investigated and will result in a response that
|
||||||
|
is deemed necessary and appropriate to the circumstances. Maintainers are
|
||||||
|
obligated to maintain confidentiality with regard to the reporter of an
|
||||||
|
incident.
|
||||||
|
|
||||||
|
The following community members have made themselves available for conduct issues:
|
||||||
|
|
||||||
|
- Jean-Paul Calderone (jean-paul at leastauthority dot com)
|
||||||
|
- meejah (meejah at meejah dot ca)
|
||||||
|
|
||||||
|
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||||
|
version 1.3.0, available at
|
||||||
|
[http://contributor-covenant.org/version/1/3/0/][version]
|
||||||
|
|
||||||
|
[homepage]: http://contributor-covenant.org
|
||||||
|
[version]: http://contributor-covenant.org/version/1/3/0/
|
@ -163,7 +163,7 @@ from PyPI with ``venv/bin/pip install tahoe-lafs``. After installation, run
|
|||||||
Successfully installed ...
|
Successfully installed ...
|
||||||
|
|
||||||
% venv/bin/tahoe --version
|
% venv/bin/tahoe --version
|
||||||
tahoe-lafs: 1.13.0
|
tahoe-lafs: 1.14.0
|
||||||
foolscap: ...
|
foolscap: ...
|
||||||
|
|
||||||
%
|
%
|
||||||
@ -183,14 +183,14 @@ You can also install directly from the source tarball URL::
|
|||||||
New python executable in ~/venv/bin/python2.7
|
New python executable in ~/venv/bin/python2.7
|
||||||
Installing setuptools, pip, wheel...done.
|
Installing setuptools, pip, wheel...done.
|
||||||
|
|
||||||
% venv/bin/pip install https://tahoe-lafs.org/downloads/tahoe-lafs-1.13.0.tar.bz2
|
% venv/bin/pip install https://tahoe-lafs.org/downloads/tahoe-lafs-1.14.0.tar.bz2
|
||||||
Collecting https://tahoe-lafs.org/downloads/tahoe-lafs-1.13.0.tar.bz2
|
Collecting https://tahoe-lafs.org/downloads/tahoe-lafs-1.14.0.tar.bz2
|
||||||
...
|
...
|
||||||
Installing collected packages: ...
|
Installing collected packages: ...
|
||||||
Successfully installed ...
|
Successfully installed ...
|
||||||
|
|
||||||
% venv/bin/tahoe --version
|
% venv/bin/tahoe --version
|
||||||
tahoe-lafs: 1.13.0
|
tahoe-lafs: 1.14.0
|
||||||
...
|
...
|
||||||
|
|
||||||
Extras
|
Extras
|
||||||
@ -224,7 +224,7 @@ the additional libraries needed to run the unit tests::
|
|||||||
Successfully installed ...
|
Successfully installed ...
|
||||||
|
|
||||||
% venv/bin/tahoe --version
|
% venv/bin/tahoe --version
|
||||||
tahoe-lafs: 1.13.0.post34.dev0
|
tahoe-lafs: 1.14.0.post34.dev0
|
||||||
...
|
...
|
||||||
|
|
||||||
This way, you won't have to re-run the ``pip install`` step each time you
|
This way, you won't have to re-run the ``pip install`` step each time you
|
||||||
@ -273,7 +273,7 @@ result in a "all tests passed" mesage::
|
|||||||
% tox
|
% tox
|
||||||
GLOB sdist-make: ~/tahoe-lafs/setup.py
|
GLOB sdist-make: ~/tahoe-lafs/setup.py
|
||||||
py27 recreate: ~/tahoe-lafs/.tox/py27
|
py27 recreate: ~/tahoe-lafs/.tox/py27
|
||||||
py27 inst: ~/tahoe-lafs/.tox/dist/tahoe-lafs-1.13.0.post8.dev0.zip
|
py27 inst: ~/tahoe-lafs/.tox/dist/tahoe-lafs-1.14.0.post8.dev0.zip
|
||||||
py27 runtests: commands[0] | tahoe --version
|
py27 runtests: commands[0] | tahoe --version
|
||||||
py27 runtests: commands[1] | trial --rterrors allmydata
|
py27 runtests: commands[1] | trial --rterrors allmydata
|
||||||
allmydata.test.test_auth
|
allmydata.test.test_auth
|
||||||
@ -286,7 +286,7 @@ result in a "all tests passed" mesage::
|
|||||||
PASSED (skips=7, expectedFailures=3, successes=1176)
|
PASSED (skips=7, expectedFailures=3, successes=1176)
|
||||||
__________________________ summary ___________________________________
|
__________________________ summary ___________________________________
|
||||||
py27: commands succeeded
|
py27: commands succeeded
|
||||||
congratulations :)
|
congratulations :)
|
||||||
|
|
||||||
Common Problems
|
Common Problems
|
||||||
===============
|
===============
|
||||||
|
@ -82,7 +82,6 @@ Client/server nodes provide one or more of the following services:
|
|||||||
* web-API service
|
* web-API service
|
||||||
* SFTP service
|
* SFTP service
|
||||||
* FTP service
|
* FTP service
|
||||||
* Magic Folder service
|
|
||||||
* helper service
|
* helper service
|
||||||
* storage service.
|
* storage service.
|
||||||
|
|
||||||
@ -719,12 +718,6 @@ SFTP, FTP
|
|||||||
for instructions on configuring these services, and the ``[sftpd]`` and
|
for instructions on configuring these services, and the ``[sftpd]`` and
|
||||||
``[ftpd]`` sections of ``tahoe.cfg``.
|
``[ftpd]`` sections of ``tahoe.cfg``.
|
||||||
|
|
||||||
Magic Folder
|
|
||||||
|
|
||||||
A node running on Linux or Windows can be configured to automatically
|
|
||||||
upload files that are created or changed in a specified local directory.
|
|
||||||
See :doc:`frontends/magic-folder` for details.
|
|
||||||
|
|
||||||
|
|
||||||
Storage Server Configuration
|
Storage Server Configuration
|
||||||
============================
|
============================
|
||||||
|
89
docs/developer-guide.rst
Normal file
89
docs/developer-guide.rst
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
Developer Guide
|
||||||
|
===============
|
||||||
|
|
||||||
|
|
||||||
|
Pre-commit Checks
|
||||||
|
-----------------
|
||||||
|
|
||||||
|
This project is configured for use with `pre-commit`_ to install `VCS/git hooks`_ which
|
||||||
|
perform some static code analysis checks and other code checks to catch common errors
|
||||||
|
before each commit and to run the full self-test suite to find less obvious regressions
|
||||||
|
before each push to a remote.
|
||||||
|
|
||||||
|
For example::
|
||||||
|
|
||||||
|
tahoe-lafs $ make install-vcs-hooks
|
||||||
|
...
|
||||||
|
+ ./.tox//py36/bin/pre-commit install --hook-type pre-commit
|
||||||
|
pre-commit installed at .git/hooks/pre-commit
|
||||||
|
+ ./.tox//py36/bin/pre-commit install --hook-type pre-push
|
||||||
|
pre-commit installed at .git/hooks/pre-push
|
||||||
|
tahoe-lafs $ python -c "import pathlib; pathlib.Path('src/allmydata/tabbed.py').write_text('def foo():\\n\\tpass\\n')"
|
||||||
|
tahoe-lafs $ git add src/allmydata/tabbed.py
|
||||||
|
tahoe-lafs $ git commit -a -m "Add a file that violates flake8"
|
||||||
|
...
|
||||||
|
codechecks...............................................................Failed
|
||||||
|
- hook id: codechecks
|
||||||
|
- exit code: 1
|
||||||
|
|
||||||
|
GLOB sdist-make: ./tahoe-lafs/setup.py
|
||||||
|
codechecks inst-nodeps: ...
|
||||||
|
codechecks installed: ...
|
||||||
|
codechecks run-test-pre: PYTHONHASHSEED='...'
|
||||||
|
codechecks run-test: commands[0] | flake8 src static misc setup.py
|
||||||
|
src/allmydata/tabbed.py:2:1: W191 indentation contains tabs
|
||||||
|
ERROR: InvocationError for command ./tahoe-lafs/.tox/codechecks/bin/flake8 src static misc setup.py (exited with code 1)
|
||||||
|
___________________________________ summary ____________________________________
|
||||||
|
ERROR: codechecks: commands failed
|
||||||
|
...
|
||||||
|
|
||||||
|
To uninstall::
|
||||||
|
|
||||||
|
tahoe-lafs $ make uninstall-vcs-hooks
|
||||||
|
...
|
||||||
|
+ ./.tox/py36/bin/pre-commit uninstall
|
||||||
|
pre-commit uninstalled
|
||||||
|
+ ./.tox/py36/bin/pre-commit uninstall -t pre-push
|
||||||
|
pre-push uninstalled
|
||||||
|
|
||||||
|
Note that running the full self-test suite takes several minutes so expect pushing to
|
||||||
|
take some time. If you can't or don't want to wait for the hooks in some cases, use the
|
||||||
|
``--no-verify`` option to ``$ git commit ...`` or ``$ git push ...``. Alternatively,
|
||||||
|
see the `pre-commit`_ documentation and CLI help output and use the committed
|
||||||
|
`pre-commit configuration`_ as a starting point to write a local, uncommitted
|
||||||
|
``../.pre-commit-config.local.yaml`` configuration to use instead. For example::
|
||||||
|
|
||||||
|
tahoe-lafs $ ./.tox/py36/bin/pre-commit --help
|
||||||
|
tahoe-lafs $ ./.tox/py36/bin/pre-commit instll --help
|
||||||
|
tahoe-lafs $ cp "./.pre-commit-config.yaml" "./.pre-commit-config.local.yaml"
|
||||||
|
tahoe-lafs $ editor "./.pre-commit-config.local.yaml"
|
||||||
|
...
|
||||||
|
tahoe-lafs $ ./.tox/py36/bin/pre-commit install -c "./.pre-commit-config.local.yaml" -t pre-push
|
||||||
|
pre-commit installed at .git/hooks/pre-push
|
||||||
|
tahoe-lafs $ git commit -a -m "Add a file that violates flake8"
|
||||||
|
[3398.pre-commit 29f8f43d2] Add a file that violates flake8
|
||||||
|
1 file changed, 2 insertions(+)
|
||||||
|
create mode 100644 src/allmydata/tabbed.py
|
||||||
|
tahoe-lafs $ git push
|
||||||
|
...
|
||||||
|
codechecks...............................................................Failed
|
||||||
|
- hook id: codechecks
|
||||||
|
- exit code: 1
|
||||||
|
|
||||||
|
GLOB sdist-make: ./tahoe-lafs/setup.py
|
||||||
|
codechecks inst-nodeps: ...
|
||||||
|
codechecks installed: ...
|
||||||
|
codechecks run-test-pre: PYTHONHASHSEED='...'
|
||||||
|
codechecks run-test: commands[0] | flake8 src static misc setup.py
|
||||||
|
src/allmydata/tabbed.py:2:1: W191 indentation contains tabs
|
||||||
|
ERROR: InvocationError for command ./tahoe-lafs/.tox/codechecks/bin/flake8 src static misc setup.py (exited with code 1)
|
||||||
|
___________________________________ summary ____________________________________
|
||||||
|
ERROR: codechecks: commands failed
|
||||||
|
...
|
||||||
|
|
||||||
|
error: failed to push some refs to 'github.com:jaraco/tahoe-lafs.git'
|
||||||
|
|
||||||
|
|
||||||
|
.. _`pre-commit`: https://pre-commit.com
|
||||||
|
.. _`VCS/git hooks`: `pre-commit`_
|
||||||
|
.. _`pre-commit configuration`: ../.pre-commit-config.yaml
|
@ -1,148 +0,0 @@
|
|||||||
.. -*- coding: utf-8-with-signature -*-
|
|
||||||
|
|
||||||
================================
|
|
||||||
Tahoe-LAFS Magic Folder Frontend
|
|
||||||
================================
|
|
||||||
|
|
||||||
1. `Introduction`_
|
|
||||||
2. `Configuration`_
|
|
||||||
3. `Known Issues and Limitations With Magic-Folder`_
|
|
||||||
|
|
||||||
|
|
||||||
Introduction
|
|
||||||
============
|
|
||||||
|
|
||||||
The Magic Folder frontend synchronizes local directories on two or more
|
|
||||||
clients, using a Tahoe-LAFS grid for storage. Whenever a file is created
|
|
||||||
or changed under the local directory of one of the clients, the change is
|
|
||||||
propagated to the grid and then to the other clients.
|
|
||||||
|
|
||||||
The implementation of the "drop-upload" frontend, on which Magic Folder is
|
|
||||||
based, was written as a prototype at the First International Tahoe-LAFS
|
|
||||||
Summit in June 2011. In 2015, with the support of a grant from the
|
|
||||||
`Open Technology Fund`_, it was redesigned and extended to support
|
|
||||||
synchronization between clients. It currently works on Linux and Windows.
|
|
||||||
|
|
||||||
Magic Folder is not currently in as mature a state as the other frontends
|
|
||||||
(web, CLI, SFTP and FTP). This means that you probably should not rely on
|
|
||||||
all changes to files in the local directory to result in successful uploads.
|
|
||||||
There might be (and have been) incompatible changes to how the feature is
|
|
||||||
configured.
|
|
||||||
|
|
||||||
We are very interested in feedback on how well this feature works for you, and
|
|
||||||
suggestions to improve its usability, functionality, and reliability.
|
|
||||||
|
|
||||||
.. _`Open Technology Fund`: https://www.opentech.fund/
|
|
||||||
|
|
||||||
|
|
||||||
Configuration
|
|
||||||
=============
|
|
||||||
|
|
||||||
The Magic Folder frontend runs as part of a gateway node. To set it up, you
|
|
||||||
must use the tahoe magic-folder CLI. For detailed information see our
|
|
||||||
:doc:`Magic-Folder CLI design
|
|
||||||
documentation<../proposed/magic-folder/user-interface-design>`. For a
|
|
||||||
given Magic-Folder collective directory you need to run the ``tahoe
|
|
||||||
magic-folder create`` command. After that the ``tahoe magic-folder invite``
|
|
||||||
command must used to generate an *invite code* for each member of the
|
|
||||||
magic-folder collective. A confidential, authenticated communications channel
|
|
||||||
should be used to transmit the invite code to each member, who will be
|
|
||||||
joining using the ``tahoe magic-folder join`` command.
|
|
||||||
|
|
||||||
These settings are persisted in the ``[magic_folder]`` section of the
|
|
||||||
gateway's ``tahoe.cfg`` file.
|
|
||||||
|
|
||||||
``[magic_folder]``
|
|
||||||
|
|
||||||
``enabled = (boolean, optional)``
|
|
||||||
|
|
||||||
If this is ``True``, Magic Folder will be enabled. The default value is
|
|
||||||
``False``.
|
|
||||||
|
|
||||||
``local.directory = (UTF-8 path)``
|
|
||||||
|
|
||||||
This specifies the local directory to be monitored for new or changed
|
|
||||||
files. If the path contains non-ASCII characters, it should be encoded
|
|
||||||
in UTF-8 regardless of the system's filesystem encoding. Relative paths
|
|
||||||
will be interpreted starting from the node's base directory.
|
|
||||||
|
|
||||||
You should not normally need to set these fields manually because they are
|
|
||||||
set by the ``tahoe magic-folder create`` and/or ``tahoe magic-folder join``
|
|
||||||
commands. Use the ``--help`` option to these commands for more information.
|
|
||||||
|
|
||||||
After setting up a Magic Folder collective and starting or restarting each
|
|
||||||
gateway, you can confirm that the feature is working by copying a file into
|
|
||||||
any local directory, and checking that it appears on other clients.
|
|
||||||
Large files may take some time to appear.
|
|
||||||
|
|
||||||
The 'Operational Statistics' page linked from the Welcome page shows counts
|
|
||||||
of the number of files uploaded, the number of change events currently
|
|
||||||
queued, and the number of failed uploads. The 'Recent Uploads and Downloads'
|
|
||||||
page and the node :doc:`log<../logging>` may be helpful to determine the
|
|
||||||
cause of any failures.
|
|
||||||
|
|
||||||
|
|
||||||
.. _Known Issues in Magic-Folder:
|
|
||||||
|
|
||||||
Known Issues and Limitations With Magic-Folder
|
|
||||||
==============================================
|
|
||||||
|
|
||||||
This feature only works on Linux and Windows. There is a ticket to add
|
|
||||||
support for Mac OS X and BSD-based systems (`#1432`_).
|
|
||||||
|
|
||||||
The only way to determine whether uploads have failed is to look at the
|
|
||||||
'Operational Statistics' page linked from the Welcome page. This only shows
|
|
||||||
a count of failures, not the names of files. Uploads are never retried.
|
|
||||||
|
|
||||||
The Magic Folder frontend performs its uploads sequentially (i.e. it waits
|
|
||||||
until each upload is finished before starting the next), even when there
|
|
||||||
would be enough memory and bandwidth to efficiently perform them in parallel.
|
|
||||||
A Magic Folder upload can occur in parallel with an upload by a different
|
|
||||||
frontend, though. (`#1459`_)
|
|
||||||
|
|
||||||
On Linux, if there are a large number of near-simultaneous file creation or
|
|
||||||
change events (greater than the number specified in the file
|
|
||||||
``/proc/sys/fs/inotify/max_queued_events``), it is possible that some events
|
|
||||||
could be missed. This is fairly unlikely under normal circumstances, because
|
|
||||||
the default value of ``max_queued_events`` in most Linux distributions is
|
|
||||||
16384, and events are removed from this queue immediately without waiting for
|
|
||||||
the corresponding upload to complete. (`#1430`_)
|
|
||||||
|
|
||||||
The Windows implementation might also occasionally miss file creation or
|
|
||||||
change events, due to limitations of the underlying Windows API
|
|
||||||
(ReadDirectoryChangesW). We do not know how likely or unlikely this is.
|
|
||||||
(`#1431`_)
|
|
||||||
|
|
||||||
Some filesystems may not support the necessary change notifications.
|
|
||||||
So, it is recommended for the local directory to be on a directly attached
|
|
||||||
disk-based filesystem, not a network filesystem or one provided by a virtual
|
|
||||||
machine.
|
|
||||||
|
|
||||||
The ``private/magic_folder_dircap`` and ``private/collective_dircap`` files
|
|
||||||
cannot use an alias or path to specify the upload directory. (`#1711`_)
|
|
||||||
|
|
||||||
If a file in the upload directory is changed (actually relinked to a new
|
|
||||||
file), then the old file is still present on the grid, and any other caps
|
|
||||||
to it will remain valid. Eventually it will be possible to use
|
|
||||||
:doc:`../garbage-collection` to reclaim the space used by these files; however
|
|
||||||
currently they are retained indefinitely. (`#2440`_)
|
|
||||||
|
|
||||||
Unicode filenames are supported on both Linux and Windows, but on Linux, the
|
|
||||||
local name of a file must be encoded correctly in order for it to be uploaded.
|
|
||||||
The expected encoding is that printed by
|
|
||||||
``python -c "import sys; print sys.getfilesystemencoding()"``.
|
|
||||||
|
|
||||||
On Windows, local directories with non-ASCII names are not currently working.
|
|
||||||
(`#2219`_)
|
|
||||||
|
|
||||||
On Windows, when a node has Magic Folder enabled, it is unresponsive to Ctrl-C
|
|
||||||
(it can only be killed using Task Manager or similar). (`#2218`_)
|
|
||||||
|
|
||||||
.. _`#1430`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1430
|
|
||||||
.. _`#1431`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1431
|
|
||||||
.. _`#1432`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1432
|
|
||||||
.. _`#1459`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1459
|
|
||||||
.. _`#1711`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1711
|
|
||||||
.. _`#2218`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2218
|
|
||||||
.. _`#2219`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2219
|
|
||||||
.. _`#2440`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2440
|
|
@ -17,13 +17,14 @@ people are Release Maintainers:
|
|||||||
- [ ] all appveyor checks pass
|
- [ ] all appveyor checks pass
|
||||||
- [ ] all buildbot workers pass their checks
|
- [ ] all buildbot workers pass their checks
|
||||||
|
|
||||||
* freeze master branch [0/]
|
* freeze master branch [0/1]
|
||||||
- [ ] announced the freeze of the master branch on IRC (i.e. non-release PRs won't be merged until after release)
|
- [ ] announced the freeze of the master branch on IRC (i.e. non-release PRs won't be merged until after release)
|
||||||
|
|
||||||
* sync documentation [0/7]
|
* sync documentation [0/7]
|
||||||
- [ ] NEWS.rst: summarize user-visible changes, aim for one page of text
|
|
||||||
|
- [ ] NEWS.rst: (run "tox -e news")
|
||||||
- [ ] added final release name and date to top-most item in NEWS.rst
|
- [ ] added final release name and date to top-most item in NEWS.rst
|
||||||
- [ ] updated relnotes.txt
|
- [ ] updated relnotes.txt (change next, last versions; summarize NEWS)
|
||||||
- [ ] updated CREDITS
|
- [ ] updated CREDITS
|
||||||
- [ ] updated docs/known_issues.rst
|
- [ ] updated docs/known_issues.rst
|
||||||
- [ ] docs/INSTALL.rst only points to current tahoe-lafs-X.Y.Z.tar.gz source code file
|
- [ ] docs/INSTALL.rst only points to current tahoe-lafs-X.Y.Z.tar.gz source code file
|
||||||
@ -35,7 +36,7 @@ people are Release Maintainers:
|
|||||||
- [ ] documentation is ready (see above)
|
- [ ] documentation is ready (see above)
|
||||||
- [ ] (Release Maintainer): git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-X.Y.Z" tahoe-lafs-X.Y.Z
|
- [ ] (Release Maintainer): git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-X.Y.Z" tahoe-lafs-X.Y.Z
|
||||||
- [ ] build code locally:
|
- [ ] build code locally:
|
||||||
tox -e py27,codechecks,coverage,deprecations,docs,integration,upcoming-deprecations
|
tox -e py27,codechecks,deprecations,docs,integration,upcoming-deprecations
|
||||||
- [ ] created tarballs (they'll be in dist/ for later comparison)
|
- [ ] created tarballs (they'll be in dist/ for later comparison)
|
||||||
tox -e tarballs
|
tox -e tarballs
|
||||||
- [ ] release version is reporting itself as intended version
|
- [ ] release version is reporting itself as intended version
|
||||||
|
@ -20,11 +20,11 @@ Contents:
|
|||||||
frontends/CLI
|
frontends/CLI
|
||||||
frontends/webapi
|
frontends/webapi
|
||||||
frontends/FTP-and-SFTP
|
frontends/FTP-and-SFTP
|
||||||
frontends/magic-folder
|
|
||||||
frontends/download-status
|
frontends/download-status
|
||||||
|
|
||||||
known_issues
|
known_issues
|
||||||
../.github/CONTRIBUTING
|
../.github/CONTRIBUTING
|
||||||
|
CODE_OF_CONDUCT
|
||||||
|
|
||||||
servers
|
servers
|
||||||
helper
|
helper
|
||||||
@ -37,9 +37,10 @@ Contents:
|
|||||||
expenses
|
expenses
|
||||||
cautions
|
cautions
|
||||||
write_coordination
|
write_coordination
|
||||||
magic-folder-howto
|
|
||||||
backupdb
|
backupdb
|
||||||
|
|
||||||
|
developer-guide
|
||||||
|
|
||||||
anonymity-configuration
|
anonymity-configuration
|
||||||
|
|
||||||
nodekeys
|
nodekeys
|
||||||
|
@ -1,176 +0,0 @@
|
|||||||
.. _magic-folder-howto:
|
|
||||||
|
|
||||||
=========================
|
|
||||||
Magic Folder Set-up Howto
|
|
||||||
=========================
|
|
||||||
|
|
||||||
#. `This document`_
|
|
||||||
#. `Setting up a local test grid`_
|
|
||||||
#. `Setting up Magic Folder`_
|
|
||||||
#. `Testing`_
|
|
||||||
|
|
||||||
|
|
||||||
This document
|
|
||||||
=============
|
|
||||||
|
|
||||||
This is preliminary documentation of how to set up Magic Folder using a test
|
|
||||||
grid on a single Linux or Windows machine, with two clients and one server.
|
|
||||||
It is aimed at a fairly technical audience.
|
|
||||||
|
|
||||||
For an introduction to Magic Folder and how to configure it
|
|
||||||
more generally, see :doc:`frontends/magic-folder`.
|
|
||||||
|
|
||||||
It it possible to adapt these instructions to run the nodes on
|
|
||||||
different machines, to synchronize between three or more clients,
|
|
||||||
to mix Windows and Linux clients, and to use multiple servers
|
|
||||||
(if the Tahoe-LAFS encoding parameters are changed).
|
|
||||||
|
|
||||||
|
|
||||||
Setting up a local test grid
|
|
||||||
============================
|
|
||||||
|
|
||||||
Linux
|
|
||||||
-----
|
|
||||||
|
|
||||||
Run these commands::
|
|
||||||
|
|
||||||
mkdir ../grid
|
|
||||||
bin/tahoe create-introducer ../grid/introducer
|
|
||||||
bin/tahoe start ../grid/introducer
|
|
||||||
export FURL=`cat ../grid/introducer/private/introducer.furl`
|
|
||||||
bin/tahoe create-node --introducer="$FURL" ../grid/server
|
|
||||||
bin/tahoe create-client --introducer="$FURL" ../grid/alice
|
|
||||||
bin/tahoe create-client --introducer="$FURL" ../grid/bob
|
|
||||||
|
|
||||||
|
|
||||||
Windows
|
|
||||||
-------
|
|
||||||
|
|
||||||
Run::
|
|
||||||
|
|
||||||
mkdir ..\grid
|
|
||||||
bin\tahoe create-introducer ..\grid\introducer
|
|
||||||
bin\tahoe start ..\grid\introducer
|
|
||||||
|
|
||||||
Leave the introducer running in that Command Prompt,
|
|
||||||
and in a separate Command Prompt (with the same current
|
|
||||||
directory), run::
|
|
||||||
|
|
||||||
set /p FURL=<..\grid\introducer\private\introducer.furl
|
|
||||||
bin\tahoe create-node --introducer=%FURL% ..\grid\server
|
|
||||||
bin\tahoe create-client --introducer=%FURL% ..\grid\alice
|
|
||||||
bin\tahoe create-client --introducer=%FURL% ..\grid\bob
|
|
||||||
|
|
||||||
|
|
||||||
Both Linux and Windows
|
|
||||||
----------------------
|
|
||||||
|
|
||||||
(Replace ``/`` with ``\`` for Windows paths.)
|
|
||||||
|
|
||||||
Edit ``../grid/alice/tahoe.cfg``, and make the following
|
|
||||||
changes to the ``[node]`` and ``[client]`` sections::
|
|
||||||
|
|
||||||
[node]
|
|
||||||
nickname = alice
|
|
||||||
web.port = tcp:3457:interface=127.0.0.1
|
|
||||||
|
|
||||||
[client]
|
|
||||||
shares.needed = 1
|
|
||||||
shares.happy = 1
|
|
||||||
shares.total = 1
|
|
||||||
|
|
||||||
Edit ``../grid/bob/tahoe.cfg``, and make the following
|
|
||||||
change to the ``[node]`` section, and the same change as
|
|
||||||
above to the ``[client]`` section::
|
|
||||||
|
|
||||||
[node]
|
|
||||||
nickname = bob
|
|
||||||
web.port = tcp:3458:interface=127.0.0.1
|
|
||||||
|
|
||||||
Note that when running nodes on a single machine,
|
|
||||||
unique port numbers must be used for each node (and they
|
|
||||||
must not clash with ports used by other server software).
|
|
||||||
Here we have used the default of 3456 for the server,
|
|
||||||
3457 for alice, and 3458 for bob.
|
|
||||||
|
|
||||||
Now start all of the nodes (the introducer should still be
|
|
||||||
running from above)::
|
|
||||||
|
|
||||||
bin/tahoe start ../grid/server
|
|
||||||
bin/tahoe start ../grid/alice
|
|
||||||
bin/tahoe start ../grid/bob
|
|
||||||
|
|
||||||
On Windows, a separate Command Prompt is needed to run each
|
|
||||||
node.
|
|
||||||
|
|
||||||
Open a web browser on http://127.0.0.1:3457/ and verify that
|
|
||||||
alice is connected to the introducer and one storage server.
|
|
||||||
Then do the same for http://127.0.0.1:3568/ to verify that
|
|
||||||
bob is connected. Leave all of the nodes running for the
|
|
||||||
next stage.
|
|
||||||
|
|
||||||
|
|
||||||
Setting up Magic Folder
|
|
||||||
=======================
|
|
||||||
|
|
||||||
Linux
|
|
||||||
-----
|
|
||||||
|
|
||||||
Run::
|
|
||||||
|
|
||||||
mkdir -p ../local/alice ../local/bob
|
|
||||||
bin/tahoe -d ../grid/alice magic-folder create magic: alice ../local/alice
|
|
||||||
bin/tahoe -d ../grid/alice magic-folder invite magic: bob >invitecode
|
|
||||||
export INVITECODE=`cat invitecode`
|
|
||||||
bin/tahoe -d ../grid/bob magic-folder join "$INVITECODE" ../local/bob
|
|
||||||
|
|
||||||
bin/tahoe restart ../grid/alice
|
|
||||||
bin/tahoe restart ../grid/bob
|
|
||||||
|
|
||||||
Windows
|
|
||||||
-------
|
|
||||||
|
|
||||||
Run::
|
|
||||||
|
|
||||||
mkdir ..\local\alice ..\local\bob
|
|
||||||
bin\tahoe -d ..\grid\alice magic-folder create magic: alice ..\local\alice
|
|
||||||
bin\tahoe -d ..\grid\alice magic-folder invite magic: bob >invitecode
|
|
||||||
set /p INVITECODE=<invitecode
|
|
||||||
bin\tahoe -d ..\grid\bob magic-folder join %INVITECODE% ..\local\bob
|
|
||||||
|
|
||||||
Then close the Command Prompt windows that are running the alice and bob
|
|
||||||
nodes, and open two new ones in which to run::
|
|
||||||
|
|
||||||
bin\tahoe start ..\grid\alice
|
|
||||||
bin\tahoe start ..\grid\bob
|
|
||||||
|
|
||||||
|
|
||||||
Testing
|
|
||||||
=======
|
|
||||||
|
|
||||||
You can now experiment with creating files and directories in
|
|
||||||
``../local/alice`` and ``/local/bob``; any changes should be
|
|
||||||
propagated to the other directory.
|
|
||||||
|
|
||||||
Note that when a file is deleted, the corresponding file in the
|
|
||||||
other directory will be renamed to a filename ending in ``.backup``.
|
|
||||||
Deleting a directory will have no effect.
|
|
||||||
|
|
||||||
For other known issues and limitations, see :ref:`Known Issues in
|
|
||||||
Magic-Folder`.
|
|
||||||
|
|
||||||
As mentioned earlier, it is also possible to run the nodes on
|
|
||||||
different machines, to synchronize between three or more clients,
|
|
||||||
to mix Windows and Linux clients, and to use multiple servers
|
|
||||||
(if the Tahoe-LAFS encoding parameters are changed).
|
|
||||||
|
|
||||||
|
|
||||||
Configuration
|
|
||||||
=============
|
|
||||||
|
|
||||||
There will be a ``[magic_folder]`` section in your ``tahoe.cfg`` file
|
|
||||||
after setting up Magic Folder.
|
|
||||||
|
|
||||||
There is an option you can add to this called ``poll_interval=`` to
|
|
||||||
control how often (in seconds) the Downloader will check for new things
|
|
||||||
to download.
|
|
@ -19,9 +19,7 @@ Invites and Joins
|
|||||||
|
|
||||||
Inside Tahoe-LAFS we are using a channel created using `magic
|
Inside Tahoe-LAFS we are using a channel created using `magic
|
||||||
wormhole`_ to exchange configuration and the secret fURL of the
|
wormhole`_ to exchange configuration and the secret fURL of the
|
||||||
Introducer with new clients. In the future, we would like to make the
|
Introducer with new clients.
|
||||||
Magic Folder (:ref:`Magic Folder HOWTO <magic-folder-howto>`) invites and joins work this way
|
|
||||||
as well.
|
|
||||||
|
|
||||||
This is a two-part process. Alice runs a grid and wishes to have her
|
This is a two-part process. Alice runs a grid and wishes to have her
|
||||||
friend Bob use it as a client. She runs ``tahoe invite bob`` which
|
friend Bob use it as a client. She runs ``tahoe invite bob`` which
|
||||||
|
@ -14,8 +14,4 @@ index only lists the files that are in .rst format.
|
|||||||
:maxdepth: 2
|
:maxdepth: 2
|
||||||
|
|
||||||
leasedb
|
leasedb
|
||||||
magic-folder/filesystem-integration
|
|
||||||
magic-folder/remote-to-local-sync
|
|
||||||
magic-folder/user-interface-design
|
|
||||||
magic-folder/multi-party-conflict-detection
|
|
||||||
http-storage-node-protocol
|
http-storage-node-protocol
|
||||||
|
@ -1,118 +0,0 @@
|
|||||||
Magic Folder local filesystem integration design
|
|
||||||
================================================
|
|
||||||
|
|
||||||
*Scope*
|
|
||||||
|
|
||||||
This document describes how to integrate the local filesystem with Magic
|
|
||||||
Folder in an efficient and reliable manner. For now we ignore Remote to
|
|
||||||
Local synchronization; the design and implementation of this is scheduled
|
|
||||||
for a later time. We also ignore multiple writers for the same Magic
|
|
||||||
Folder, which may or may not be supported in future. The design here will
|
|
||||||
be updated to account for those features in later Objectives. Objective 3
|
|
||||||
may require modifying the database schema or operation, and Objective 5
|
|
||||||
may modify the User interface.
|
|
||||||
|
|
||||||
Tickets on the Tahoe-LAFS trac with the `otf-magic-folder-objective2`_
|
|
||||||
keyword are within the scope of the local filesystem integration for
|
|
||||||
Objective 2.
|
|
||||||
|
|
||||||
.. _otf-magic-folder-objective2: https://tahoe-lafs.org/trac/tahoe-lafs/query?status=!closed&keywords=~otf-magic-folder-objective2
|
|
||||||
|
|
||||||
.. _filesystem_integration-local-scanning-and-database:
|
|
||||||
|
|
||||||
*Local scanning and database*
|
|
||||||
|
|
||||||
When a Magic-Folder-enabled node starts up, it scans all directories
|
|
||||||
under the local directory and adds every file to a first-in first-out
|
|
||||||
"scan queue". When processing the scan queue, redundant uploads are
|
|
||||||
avoided by using the same mechanism the Tahoe backup command uses: we
|
|
||||||
keep track of previous uploads by recording each file's metadata such as
|
|
||||||
size, ``ctime`` and ``mtime``. This information is stored in a database,
|
|
||||||
referred to from now on as the magic folder db. Using this recorded
|
|
||||||
state, we ensure that when Magic Folder is subsequently started, the
|
|
||||||
local directory tree can be scanned quickly by comparing current
|
|
||||||
filesystem metadata with the previously recorded metadata. Each file
|
|
||||||
referenced in the scan queue is uploaded only if its metadata differs at
|
|
||||||
the time it is processed. If a change event is detected for a file that
|
|
||||||
is already queued (and therefore will be processed later), the redundant
|
|
||||||
event is ignored.
|
|
||||||
|
|
||||||
To implement the magic folder db, we will use an SQLite schema that
|
|
||||||
initially is the existing Tahoe-LAFS backup schema. This schema may
|
|
||||||
change in later objectives; this will cause no backward compatibility
|
|
||||||
problems, because this new feature will be developed on a branch that
|
|
||||||
makes no compatibility guarantees. However we will have a separate SQLite
|
|
||||||
database file and separate mutex lock just for Magic Folder. This avoids
|
|
||||||
usability problems related to mutual exclusion. (If a single file and
|
|
||||||
lock were used, a backup would block Magic Folder updates for a long
|
|
||||||
time, and a user would not be able to tell when backups are possible
|
|
||||||
because Magic Folder would acquire a lock at arbitrary times.)
|
|
||||||
|
|
||||||
|
|
||||||
*Eventual consistency property*
|
|
||||||
|
|
||||||
During the process of reading a file in order to upload it, it is not
|
|
||||||
possible to prevent further local writes. Such writes will result in
|
|
||||||
temporary inconsistency (that is, the uploaded file will not reflect
|
|
||||||
what the contents of the local file were at any specific time). Eventual
|
|
||||||
consistency is reached when the queue of pending uploads is empty. That
|
|
||||||
is, a consistent snapshot will be achieved eventually when local writes
|
|
||||||
to the target folder cease for a sufficiently long period of time.
|
|
||||||
|
|
||||||
|
|
||||||
*Detecting filesystem changes*
|
|
||||||
|
|
||||||
For the Linux implementation, we will use the `inotify`_ Linux kernel
|
|
||||||
subsystem to gather events on the local Magic Folder directory tree. This
|
|
||||||
implementation was already present in Tahoe-LAFS 1.9.0, but needs to be
|
|
||||||
changed to gather directory creation and move events, in addition to the
|
|
||||||
events indicating that a file has been written that are gathered by the
|
|
||||||
current code.
|
|
||||||
|
|
||||||
.. _`inotify`: https://en.wikipedia.org/wiki/Inotify
|
|
||||||
|
|
||||||
For the Windows implementation, we will use the ``ReadDirectoryChangesW``
|
|
||||||
Win32 API. The prototype implementation simulates a Python interface to
|
|
||||||
the inotify API in terms of ``ReadDirectoryChangesW``, allowing most of
|
|
||||||
the code to be shared across platforms.
|
|
||||||
|
|
||||||
The alternative of using `NTFS Change Journals`_ for Windows was
|
|
||||||
considered, but appears to be more complicated and does not provide any
|
|
||||||
additional functionality over the scanning approach described above.
|
|
||||||
The Change Journal mechanism is also only available for NTFS filesystems,
|
|
||||||
but FAT32 filesystems are still common in user installations of Windows.
|
|
||||||
|
|
||||||
.. _`NTFS Change Journals`: https://msdn.microsoft.com/en-us/library/aa363803%28VS.85%29.aspx
|
|
||||||
|
|
||||||
When we detect the creation of a new directory below the local Magic
|
|
||||||
Folder directory, we create it in the Tahoe-LAFS filesystem, and also
|
|
||||||
scan the new local directory for new files. This scan is necessary to
|
|
||||||
avoid missing events for creation of files in a new directory before it
|
|
||||||
can be watched, and to correctly handle cases where an existing directory
|
|
||||||
is moved to be under the local Magic Folder directory.
|
|
||||||
|
|
||||||
|
|
||||||
*User interface*
|
|
||||||
|
|
||||||
The Magic Folder local filesystem integration will initially have a
|
|
||||||
provisional configuration file-based interface that may not be ideal from
|
|
||||||
a usability perspective. Creating our local filesystem integration in
|
|
||||||
this manner will allow us to use and test it independently of the rest of
|
|
||||||
the Magic Folder software components. We will focus greater attention on
|
|
||||||
user interface design as a later milestone in our development roadmap.
|
|
||||||
|
|
||||||
The configuration file, ``tahoe.cfg``, must define a target local
|
|
||||||
directory to be synchronized. Provisionally, this configuration will
|
|
||||||
replace the current ``[drop_upload]`` section::
|
|
||||||
|
|
||||||
[magic_folder]
|
|
||||||
enabled = true
|
|
||||||
local.directory = "/home/human"
|
|
||||||
|
|
||||||
When a filesystem directory is first configured for Magic Folder, the user
|
|
||||||
needs to create the remote Tahoe-LAFS directory using ``tahoe mkdir``,
|
|
||||||
and configure the Magic-Folder-enabled node with its URI (e.g. by putting
|
|
||||||
it in a file ``private/magic_folder_dircap``). If there are existing
|
|
||||||
files in the local directory, they will be uploaded as a result of the
|
|
||||||
initial scan described earlier.
|
|
||||||
|
|
@ -1,373 +0,0 @@
|
|||||||
Multi-party Conflict Detection
|
|
||||||
==============================
|
|
||||||
|
|
||||||
The current Magic-Folder remote conflict detection design does not properly detect remote conflicts
|
|
||||||
for groups of three or more parties. This design is specified in the "Fire Dragon" section of this document:
|
|
||||||
https://github.com/tahoe-lafs/tahoe-lafs/blob/2551.wip.2/docs/proposed/magic-folder/remote-to-local-sync.rst#fire-dragons-distinguishing-conflicts-from-overwrites
|
|
||||||
|
|
||||||
This Tahoe-LAFS trac ticket comment outlines a scenario with
|
|
||||||
three parties in which a remote conflict is falsely detected:
|
|
||||||
|
|
||||||
.. _`ticket comment`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2551#comment:22
|
|
||||||
|
|
||||||
|
|
||||||
Summary and definitions
|
|
||||||
=======================
|
|
||||||
|
|
||||||
Abstract file: a file being shared by a Magic Folder.
|
|
||||||
|
|
||||||
Local file: a file in a client's local filesystem corresponding to an abstract file.
|
|
||||||
|
|
||||||
Relative path: the path of an abstract or local file relative to the Magic Folder root.
|
|
||||||
|
|
||||||
Version: a snapshot of an abstract file, with associated metadata, that is uploaded by a Magic Folder client.
|
|
||||||
|
|
||||||
A version is associated with the file's relative path, its contents, and
|
|
||||||
mtime and ctime timestamps. Versions also have a unique identity.
|
|
||||||
|
|
||||||
Follows relation:
|
|
||||||
* If and only if a change to a client's local file at relative path F that results in an upload of version V',
|
|
||||||
was made when the client already had version V of that file, then we say that V' directly follows V.
|
|
||||||
* The follows relation is the irreflexive transitive closure of the "directly follows" relation.
|
|
||||||
|
|
||||||
The follows relation is transitive and acyclic, and therefore defines a DAG called the
|
|
||||||
Version DAG. Different abstract files correspond to disconnected sets of nodes in the Version DAG
|
|
||||||
(in other words there are no "follows" relations between different files).
|
|
||||||
|
|
||||||
The DAG is only ever extended, not mutated.
|
|
||||||
|
|
||||||
The desired behaviour for initially classifying overwrites and conflicts is as follows:
|
|
||||||
|
|
||||||
* if a client Bob currently has version V of a file at relative path F, and it sees a new version V'
|
|
||||||
of that file in another client Alice's DMD, such that V' follows V, then the write of the new version
|
|
||||||
is initially an overwrite and should be to the same filename.
|
|
||||||
* if, in the same situation, V' does not follow V, then the write of the new version should be
|
|
||||||
classified as a conflict.
|
|
||||||
|
|
||||||
The existing :doc:`remote-to-local-sync` document defines when an initial
|
|
||||||
overwrite should be reclassified as a conflict.
|
|
||||||
|
|
||||||
The above definitions completely specify the desired solution of the false
|
|
||||||
conflict behaviour described in the `ticket comment`_. However, they do not give
|
|
||||||
a concrete algorithm to compute the follows relation, or a representation in the
|
|
||||||
Tahoe-LAFS file store of the metadata needed to compute it.
|
|
||||||
|
|
||||||
We will consider two alternative designs, proposed by Leif Ryge and
|
|
||||||
Zooko Wilcox-O'Hearn, that aim to fill this gap.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Leif's Proposal: Magic-Folder "single-file" snapshot design
|
|
||||||
===========================================================
|
|
||||||
|
|
||||||
Abstract
|
|
||||||
--------
|
|
||||||
|
|
||||||
We propose a relatively simple modification to the initial Magic Folder design which
|
|
||||||
adds merkle DAGs of immutable historical snapshots for each file. The full history
|
|
||||||
does not necessarily need to be retained, and the choice of how much history to retain
|
|
||||||
can potentially be made on a per-file basis.
|
|
||||||
|
|
||||||
Motivation:
|
|
||||||
-----------
|
|
||||||
|
|
||||||
no SPOFs, no admins
|
|
||||||
```````````````````
|
|
||||||
|
|
||||||
Additionally, the initial design had two cases of excess authority:
|
|
||||||
|
|
||||||
1. The magic folder administrator (inviter) has everyone's write-caps and is thus essentially "root"
|
|
||||||
2. Each client shares ambient authority and can delete anything or everything and
|
|
||||||
(assuming there is not a conflict) the data will be deleted from all clients. So, each client
|
|
||||||
is effectively "root" too.
|
|
||||||
|
|
||||||
Thus, while it is useful for file synchronization, the initial design is a much less safe place
|
|
||||||
to store data than in a single mutable tahoe directory (because more client computers have the
|
|
||||||
possibility to delete it).
|
|
||||||
|
|
||||||
|
|
||||||
Glossary
|
|
||||||
--------
|
|
||||||
|
|
||||||
- merkle DAG: like a merkle tree but with multiple roots, and with each node potentially having multiple parents
|
|
||||||
- magic folder: a logical directory that can be synchronized between many clients
|
|
||||||
(devices, users, ...) using a Tahoe-LAFS storage grid
|
|
||||||
- client: a Magic-Folder-enabled Tahoe-LAFS client instance that has access to a magic folder
|
|
||||||
- DMD: "distributed mutable directory", a physical Tahoe-LAFS mutable directory.
|
|
||||||
Each client has the write cap to their own DMD, and read caps to all other client's DMDs
|
|
||||||
(as in the original Magic Folder design).
|
|
||||||
- snapshot: a reference to a version of a file; represented as an immutable directory containing
|
|
||||||
an entry called "content" (pointing to the immutable file containing the file's contents),
|
|
||||||
and an entry called "parent0" (pointing to a parent snapshot), and optionally parent1 through
|
|
||||||
parentN pointing at other parents. The Magic Folder snapshot object is conceptually very similar
|
|
||||||
to a git commit object, except for that it is created automatically and it records the history of an
|
|
||||||
individual file rather than an entire repository. Also, commits do not need to have authors
|
|
||||||
(although an author field could be easily added later).
|
|
||||||
- deletion snapshot: immutable directory containing no content entry (only one or more parents)
|
|
||||||
- capability: a Tahoe-LAFS diminishable cryptographic capability
|
|
||||||
- cap: short for capability
|
|
||||||
- conflict: the situation when another client's current snapshot for a file is different than our current snapshot, and is not a descendant of ours.
|
|
||||||
- overwrite: the situation when another client's current snapshot for a file is a (not necessarily direct) descendant of our current snapshot.
|
|
||||||
|
|
||||||
|
|
||||||
Overview
|
|
||||||
--------
|
|
||||||
|
|
||||||
This new design will track the history of each file using "snapshots" which are
|
|
||||||
created at each upload. Each snapshot will specify one or more parent snapshots,
|
|
||||||
forming a directed acyclic graph. A Magic-Folder user's DMD uses a flattened directory
|
|
||||||
hierarchy naming scheme, as in the original design. But, instead of pointing directly
|
|
||||||
at file contents, each file name will link to that user's latest snapshot for that file.
|
|
||||||
|
|
||||||
Inside the dmd there will also be an immutable directory containing the client's subscriptions
|
|
||||||
(read-caps to other clients' dmds).
|
|
||||||
|
|
||||||
Clients periodically poll each other's DMDs. When they see the current snapshot for a file is
|
|
||||||
different than their own current snapshot for that file, they immediately begin downloading its
|
|
||||||
contents and then walk backwards through the DAG from the new snapshot until they find their own
|
|
||||||
snapshot or a common ancestor.
|
|
||||||
|
|
||||||
For the common ancestor search to be efficient, the client will need to keep a local store (in the magic folder db) of all of the snapshots
|
|
||||||
(but not their contents) between the oldest current snapshot of any of their subscriptions and their own current snapshot.
|
|
||||||
See "local cache purging policy" below for more details.
|
|
||||||
|
|
||||||
If the new snapshot is a descendant of the client's existing snapshot, then this update
|
|
||||||
is an "overwrite" - like a git fast-forward. So, when the download of the new file completes it can overwrite
|
|
||||||
the existing local file with the new contents and update its dmd to point at the new snapshot.
|
|
||||||
|
|
||||||
If the new snapshot is not a descendant of the client's current snapshot, then the update is a
|
|
||||||
conflict. The new file is downloaded and named $filename.conflict-$user1,$user2 (including a list
|
|
||||||
of other subscriptions who have that version as their current version).
|
|
||||||
|
|
||||||
Changes to the local .conflict- file are not tracked. When that file disappears
|
|
||||||
(either by deletion, or being renamed) a new snapshot for the conflicting file is
|
|
||||||
created which has two parents - the client's snapshot prior to the conflict, and the
|
|
||||||
new conflicting snapshot. If multiple .conflict files are deleted or renamed in a short
|
|
||||||
period of time, a single conflict-resolving snapshot with more than two parents can be created.
|
|
||||||
|
|
||||||
! I think this behavior will confuse users.
|
|
||||||
|
|
||||||
Tahoe-LAFS snapshot objects
|
|
||||||
---------------------------
|
|
||||||
|
|
||||||
These Tahoe-LAFS snapshot objects only track the history of a single file, not a directory hierarchy.
|
|
||||||
Snapshot objects contain only two field types:
|
|
||||||
- ``Content``: an immutable capability of the file contents (omitted if deletion snapshot)
|
|
||||||
- ``Parent0..N``: immutable capabilities representing parent snapshots
|
|
||||||
|
|
||||||
Therefore in this system an interesting side effect of this Tahoe snapshot object is that there is no
|
|
||||||
snapshot author. The only notion of an identity in the Magic-Folder system is the write capability of the user's DMD.
|
|
||||||
|
|
||||||
The snapshot object is an immutable directory which looks like this:
|
|
||||||
content -> immutable cap to file content
|
|
||||||
parent0 -> immutable cap to a parent snapshot object
|
|
||||||
parent1..N -> more parent snapshots
|
|
||||||
|
|
||||||
|
|
||||||
Snapshot Author Identity
|
|
||||||
------------------------
|
|
||||||
|
|
||||||
Snapshot identity might become an important feature so that bad actors
|
|
||||||
can be recognized and other clients can stop "subscribing" to (polling for) updates from them.
|
|
||||||
|
|
||||||
Perhaps snapshots could be signed by the user's Magic-Folder write key for this purpose? Probably a bad idea to reuse the write-cap key for this. Better to introduce ed25519 identity keys which can (optionally) sign snapshot contents and store the signature as another member of the immutable directory.
|
|
||||||
|
|
||||||
|
|
||||||
Conflict Resolution
|
|
||||||
-------------------
|
|
||||||
|
|
||||||
detection of conflicts
|
|
||||||
``````````````````````
|
|
||||||
|
|
||||||
A Magic-Folder client updates a given file's current snapshot link to a snapshot which is a descendent
|
|
||||||
of the previous snapshot. For a given file, let's say "file1", Alice can detect that Bob's DMD has a "file1"
|
|
||||||
that links to a snapshot which conflicts. Two snapshots conflict if one is not an ancestor of the other.
|
|
||||||
|
|
||||||
|
|
||||||
a possible UI for resolving conflicts
|
|
||||||
`````````````````````````````````````
|
|
||||||
|
|
||||||
If Alice links a conflicting snapshot object for a file named "file1",
|
|
||||||
Bob and Carole will see a file in their Magic-Folder called "file1.conflicted.Alice".
|
|
||||||
Alice conversely will see an additional file called "file1.conflicted.previous".
|
|
||||||
If Alice wishes to resolve the conflict with her new version of the file then
|
|
||||||
she simply deletes the file called "file1.conflicted.previous". If she wants to
|
|
||||||
choose the other version then she moves it into place:
|
|
||||||
|
|
||||||
mv file1.conflicted.previous file1
|
|
||||||
|
|
||||||
|
|
||||||
This scheme works for N number of conflicts. Bob for instance could choose
|
|
||||||
the same resolution for the conflict, like this:
|
|
||||||
|
|
||||||
mv file1.Alice file1
|
|
||||||
|
|
||||||
|
|
||||||
Deletion propagation and eventual Garbage Collection
|
|
||||||
----------------------------------------------------
|
|
||||||
|
|
||||||
When a user deletes a file, this is represented by a link from their DMD file
|
|
||||||
object to a deletion snapshot. Eventually all users will link this deletion
|
|
||||||
snapshot into their DMD. When all users have the link then they locally cache
|
|
||||||
the deletion snapshot and remove the link to that file in their DMD.
|
|
||||||
Deletions can of course be undeleted; this means creating a new snapshot
|
|
||||||
object that specifies itself a descent of the deletion snapshot.
|
|
||||||
|
|
||||||
Clients periodically renew leases to all capabilities recursively linked
|
|
||||||
to in their DMD. Files which are unlinked by ALL the users of a
|
|
||||||
given Magic-Folder will eventually be garbage collected.
|
|
||||||
|
|
||||||
Lease expirey duration must be tuned properly by storage servers such that
|
|
||||||
Garbage Collection does not occur too frequently.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Performance Considerations
|
|
||||||
--------------------------
|
|
||||||
|
|
||||||
local changes
|
|
||||||
`````````````
|
|
||||||
|
|
||||||
Our old scheme requires two remote Tahoe-LAFS operations per local file modification:
|
|
||||||
1. upload new file contents (as an immutable file)
|
|
||||||
2. modify mutable directory (DMD) to link to the immutable file cap
|
|
||||||
|
|
||||||
Our new scheme requires three remote operations:
|
|
||||||
1. upload new file contents (as in immutable file)
|
|
||||||
2. upload immutable directory representing Tahoe-LAFS snapshot object
|
|
||||||
3. modify mutable directory (DMD) to link to the immutable snapshot object
|
|
||||||
|
|
||||||
remote changes
|
|
||||||
``````````````
|
|
||||||
|
|
||||||
Our old scheme requires one remote Tahoe-LAFS operation per remote file modification (not counting the polling of the dmd):
|
|
||||||
1. Download new file content
|
|
||||||
|
|
||||||
Our new scheme requires a minimum of two remote operations (not counting the polling of the dmd) for conflicting downloads, or three remote operations for overwrite downloads:
|
|
||||||
1. Download new snapshot object
|
|
||||||
2. Download the content it points to
|
|
||||||
3. If the download is an overwrite, modify the DMD to indicate that the downloaded version is their current version.
|
|
||||||
|
|
||||||
If the new snapshot is not a direct descendant of our current snapshot or the other party's previous snapshot we saw, we will also need to download more snapshots to determine if it is a conflict or an overwrite. However, those can be done in
|
|
||||||
parallel with the content download since we will need to download the content in either case.
|
|
||||||
|
|
||||||
While the old scheme is obviously more efficient, we think that the properties provided by the new scheme make it worth the additional cost.
|
|
||||||
|
|
||||||
Physical updates to the DMD overiouslly need to be serialized, so multiple logical updates should be combined when an update is already in progress.
|
|
||||||
|
|
||||||
conflict detection and local caching
|
|
||||||
````````````````````````````````````
|
|
||||||
|
|
||||||
Local caching of snapshots is important for performance.
|
|
||||||
We refer to the client's local snapshot cache as the ``magic-folder db``.
|
|
||||||
|
|
||||||
Conflict detection can be expensive because it may require the client
|
|
||||||
to download many snapshots from the other user's DMD in order to try
|
|
||||||
and find it's own current snapshot or a descendent. The cost of scanning
|
|
||||||
the remote DMDs should not be very high unless the client conducting the
|
|
||||||
scan has lots of history to download because of being offline for a long
|
|
||||||
time while many new snapshots were distributed.
|
|
||||||
|
|
||||||
|
|
||||||
local cache purging policy
|
|
||||||
``````````````````````````
|
|
||||||
|
|
||||||
The client's current snapshot for each file should be cached at all times.
|
|
||||||
When all clients' views of a file are synchronized (they all have the same
|
|
||||||
snapshot for that file), no ancestry for that file needs to be cached.
|
|
||||||
When clients' views of a file are *not* synchronized, the most recent
|
|
||||||
common ancestor of all clients' snapshots must be kept cached, as must
|
|
||||||
all intermediate snapshots.
|
|
||||||
|
|
||||||
|
|
||||||
Local Merge Property
|
|
||||||
--------------------
|
|
||||||
|
|
||||||
Bob can in fact, set a pre-existing directory (with files) as his new Magic-Folder directory, resulting
|
|
||||||
in a merge of the Magic-Folder with Bob's local directory. Filename collisions will result in conflicts
|
|
||||||
because Bob's new snapshots are not descendent's of the existing Magic-Folder file snapshots.
|
|
||||||
|
|
||||||
|
|
||||||
Example: simultaneous update with four parties:
|
|
||||||
|
|
||||||
1. A, B, C, D are in sync for file "foo" at snapshot X
|
|
||||||
2. A and B simultaneously change the file, creating snapshots XA and XB (both descendants of X).
|
|
||||||
3. C hears about XA first, and D hears about XB first. Both accept an overwrite.
|
|
||||||
4. All four parties hear about the other update they hadn't heard about yet.
|
|
||||||
5. Result:
|
|
||||||
- everyone's local file "foo" has the content pointed to by the snapshot in their DMD's "foo" entry
|
|
||||||
- A and C's DMDs each have the "foo" entry pointing at snapshot XA
|
|
||||||
- B and D's DMDs each have the "foo" entry pointing at snapshot XB
|
|
||||||
- A and C have a local file called foo.conflict-B,D with XB's content
|
|
||||||
- B and D have a local file called foo.conflict-A,C with XA's content
|
|
||||||
|
|
||||||
Later:
|
|
||||||
|
|
||||||
- Everyone ignores the conflict, and continue updating their local "foo". but slowly enough that there are no further conflicts, so that A and C remain in sync with eachother, and B and D remain in sync with eachother.
|
|
||||||
|
|
||||||
- A and C's foo.conflict-B,D file continues to be updated with the latest version of the file B and D are working on, and vice-versa.
|
|
||||||
|
|
||||||
- A and C edit the file at the same time again, causing a new conflict.
|
|
||||||
|
|
||||||
- Local files are now:
|
|
||||||
|
|
||||||
A: "foo", "foo.conflict-B,D", "foo.conflict-C"
|
|
||||||
|
|
||||||
C: "foo", "foo.conflict-B,D", "foo.conflict-A"
|
|
||||||
|
|
||||||
B and D: "foo", "foo.conflict-A", "foo.conflict-C"
|
|
||||||
|
|
||||||
- Finally, D decides to look at "foo.conflict-A" and "foo.conflict-C", and they manually integrate (or decide to ignore) the differences into their own local file "foo".
|
|
||||||
|
|
||||||
- D deletes their conflict files.
|
|
||||||
|
|
||||||
- D's DMD now points to a snapshot that is a descendant of everyone else's current snapshot, resolving all conflicts.
|
|
||||||
|
|
||||||
- The conflict files on A, B, and C disappear, and everyone's local file "foo" contains D's manually-merged content.
|
|
||||||
|
|
||||||
|
|
||||||
Daira: I think it is too complicated to include multiple nicknames in the .conflict files
|
|
||||||
(e.g. "foo.conflict-B,D"). It should be sufficient to have one file for each other client,
|
|
||||||
reflecting that client's latest version, regardless of who else it conflicts with.
|
|
||||||
|
|
||||||
|
|
||||||
Zooko's Design (as interpreted by Daira)
|
|
||||||
========================================
|
|
||||||
|
|
||||||
A version map is a mapping from client nickname to version number.
|
|
||||||
|
|
||||||
Definition: a version map M' strictly-follows a mapping M iff for every entry c->v
|
|
||||||
in M, there is an entry c->v' in M' such that v' > v.
|
|
||||||
|
|
||||||
|
|
||||||
Each client maintains a 'local version map' and a 'conflict version map' for each file
|
|
||||||
in its magic folder db.
|
|
||||||
If it has never written the file, then the entry for its own nickname in the local version
|
|
||||||
map is zero. The conflict version map only contains entries for nicknames B where
|
|
||||||
"$FILENAME.conflict-$B" exists.
|
|
||||||
|
|
||||||
When a client A uploads a file, it increments the version for its own nickname in its
|
|
||||||
local version map for the file, and includes that map as metadata with its upload.
|
|
||||||
|
|
||||||
A download by client A from client B is an overwrite iff the downloaded version map
|
|
||||||
strictly-follows A's local version map for that file; in this case A replaces its local
|
|
||||||
version map with the downloaded version map. Otherwise it is a conflict, and the
|
|
||||||
download is put into "$FILENAME.conflict-$B"; in this case A's
|
|
||||||
local version map remains unchanged, and the entry B->v taken from the downloaded
|
|
||||||
version map is added to its conflict version map.
|
|
||||||
|
|
||||||
If client A deletes or renames a conflict file "$FILENAME.conflict-$B", then A copies
|
|
||||||
the entry for B from its conflict version map to its local version map, deletes
|
|
||||||
the entry for B in its conflict version map, and performs another upload (with
|
|
||||||
incremented version number) of $FILENAME.
|
|
||||||
|
|
||||||
|
|
||||||
Example:
|
|
||||||
A, B, C = (10, 20, 30) everyone agrees.
|
|
||||||
A updates: (11, 20, 30)
|
|
||||||
B updates: (10, 21, 30)
|
|
||||||
|
|
||||||
C will see either A or B first. Both would be an overwrite, if considered alone.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,951 +0,0 @@
|
|||||||
Magic Folder design for remote-to-local sync
|
|
||||||
============================================
|
|
||||||
|
|
||||||
Scope
|
|
||||||
-----
|
|
||||||
|
|
||||||
In this Objective we will design remote-to-local synchronization:
|
|
||||||
|
|
||||||
* How to efficiently determine which objects (files and directories) have
|
|
||||||
to be downloaded in order to bring the current local filesystem into sync
|
|
||||||
with the newly-discovered version of the remote filesystem.
|
|
||||||
* How to distinguish overwrites, in which the remote side was aware of
|
|
||||||
your most recent version and overwrote it with a new version, from
|
|
||||||
conflicts, in which the remote side was unaware of your most recent
|
|
||||||
version when it published its new version. The latter needs to be raised
|
|
||||||
to the user as an issue the user will have to resolve and the former must
|
|
||||||
not bother the user.
|
|
||||||
* How to overwrite the (stale) local versions of those objects with the
|
|
||||||
newly acquired objects, while preserving backed-up versions of those
|
|
||||||
overwritten objects in case the user didn't want this overwrite and wants
|
|
||||||
to recover the old version.
|
|
||||||
|
|
||||||
Tickets on the Tahoe-LAFS trac with the `otf-magic-folder-objective4`_
|
|
||||||
keyword are within the scope of the remote-to-local synchronization
|
|
||||||
design.
|
|
||||||
|
|
||||||
.. _otf-magic-folder-objective4: https://tahoe-lafs.org/trac/tahoe-lafs/query?status=!closed&keywords=~otf-magic-folder-objective4
|
|
||||||
|
|
||||||
|
|
||||||
Glossary
|
|
||||||
''''''''
|
|
||||||
|
|
||||||
Object: a file or directory
|
|
||||||
|
|
||||||
DMD: distributed mutable directory
|
|
||||||
|
|
||||||
Folder: an abstract directory that is synchronized between clients.
|
|
||||||
(A folder is not the same as the directory corresponding to it on
|
|
||||||
any particular client, nor is it the same as a DMD.)
|
|
||||||
|
|
||||||
Collective: the set of clients subscribed to a given Magic Folder.
|
|
||||||
|
|
||||||
Descendant: a direct or indirect child in a directory or folder tree
|
|
||||||
|
|
||||||
Subfolder: a folder that is a descendant of a magic folder
|
|
||||||
|
|
||||||
Subpath: the path from a magic folder to one of its descendants
|
|
||||||
|
|
||||||
Write: a modification to a local filesystem object by a client
|
|
||||||
|
|
||||||
Read: a read from a local filesystem object by a client
|
|
||||||
|
|
||||||
Upload: an upload of a local object to the Tahoe-LAFS file store
|
|
||||||
|
|
||||||
Download: a download from the Tahoe-LAFS file store to a local object
|
|
||||||
|
|
||||||
Pending notification: a local filesystem change that has been detected
|
|
||||||
but not yet processed.
|
|
||||||
|
|
||||||
|
|
||||||
Representing the Magic Folder in Tahoe-LAFS
|
|
||||||
-------------------------------------------
|
|
||||||
|
|
||||||
Unlike the local case where we use inotify or ReadDirectoryChangesW to
|
|
||||||
detect filesystem changes, we have no mechanism to register a monitor for
|
|
||||||
changes to a Tahoe-LAFS directory. Therefore, we must periodically poll
|
|
||||||
for changes.
|
|
||||||
|
|
||||||
An important constraint on the solution is Tahoe-LAFS' ":doc:`write
|
|
||||||
coordination directive<../../write_coordination>`", which prohibits
|
|
||||||
concurrent writes by different storage clients to the same mutable object:
|
|
||||||
|
|
||||||
Tahoe does not provide locking of mutable files and directories. If
|
|
||||||
there is more than one simultaneous attempt to change a mutable file
|
|
||||||
or directory, then an UncoordinatedWriteError may result. This might,
|
|
||||||
in rare cases, cause the file or directory contents to be accidentally
|
|
||||||
deleted. The user is expected to ensure that there is at most one
|
|
||||||
outstanding write or update request for a given file or directory at
|
|
||||||
a time. One convenient way to accomplish this is to make a different
|
|
||||||
file or directory for each person or process that wants to write.
|
|
||||||
|
|
||||||
Since it is a goal to allow multiple users to write to a Magic Folder,
|
|
||||||
if the write coordination directive remains the same as above, then we
|
|
||||||
will not be able to implement the Magic Folder as a single Tahoe-LAFS
|
|
||||||
DMD. In general therefore, we will have multiple DMDs —spread across
|
|
||||||
clients— that together represent the Magic Folder. Each client in a
|
|
||||||
Magic Folder collective polls the other clients' DMDs in order to detect
|
|
||||||
remote changes.
|
|
||||||
|
|
||||||
Six possible designs were considered for the representation of subfolders
|
|
||||||
of the Magic Folder:
|
|
||||||
|
|
||||||
1. All subfolders written by a given Magic Folder client are collapsed
|
|
||||||
into a single client DMD, containing immutable files. The child name of
|
|
||||||
each file encodes the full subpath of that file relative to the Magic
|
|
||||||
Folder.
|
|
||||||
|
|
||||||
2. The DMD tree under a client DMD is a direct copy of the folder tree
|
|
||||||
written by that client to the Magic Folder. Not all subfolders have
|
|
||||||
corresponding DMDs; only those to which that client has written files or
|
|
||||||
child subfolders.
|
|
||||||
|
|
||||||
3. The directory tree under a client DMD is a ``tahoe backup`` structure
|
|
||||||
containing immutable snapshots of the folder tree written by that client
|
|
||||||
to the Magic Folder. As in design 2, only objects written by that client
|
|
||||||
are present.
|
|
||||||
|
|
||||||
4. *Each* client DMD contains an eventually consistent mirror of all
|
|
||||||
files and folders written by *any* Magic Folder client. Thus each client
|
|
||||||
must also copy changes made by other Magic Folder clients to its own
|
|
||||||
client DMD.
|
|
||||||
|
|
||||||
5. *Each* client DMD contains a ``tahoe backup`` structure containing
|
|
||||||
immutable snapshots of all files and folders written by *any* Magic
|
|
||||||
Folder client. Thus each client must also create another snapshot in its
|
|
||||||
own client DMD when changes are made by another client. (It can potentially
|
|
||||||
batch changes, subject to latency requirements.)
|
|
||||||
|
|
||||||
6. The write coordination problem is solved by implementing `two-phase
|
|
||||||
commit`_. Then, the representation consists of a single DMD tree which is
|
|
||||||
written by all clients.
|
|
||||||
|
|
||||||
.. _`two-phase commit`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1755
|
|
||||||
|
|
||||||
Here is a summary of advantages and disadvantages of each design:
|
|
||||||
|
|
||||||
+----------------------------+
|
|
||||||
| Key |
|
|
||||||
+=======+====================+
|
|
||||||
| \+\+ | major advantage |
|
|
||||||
+-------+--------------------+
|
|
||||||
| \+ | minor advantage |
|
|
||||||
+-------+--------------------+
|
|
||||||
| ‒ | minor disadvantage |
|
|
||||||
+-------+--------------------+
|
|
||||||
| ‒ ‒ | major disadvantage |
|
|
||||||
+-------+--------------------+
|
|
||||||
| ‒ ‒ ‒ | showstopper |
|
|
||||||
+-------+--------------------+
|
|
||||||
|
|
||||||
|
|
||||||
123456+: All designs have the property that a recursive add-lease operation
|
|
||||||
starting from a *collective directory* containing all of the client DMDs,
|
|
||||||
will find all of the files and directories used in the Magic Folder
|
|
||||||
representation. Therefore the representation is compatible with :doc:`garbage
|
|
||||||
collection <../../garbage-collection>`, even when a pre-Magic-Folder client
|
|
||||||
does the lease marking.
|
|
||||||
|
|
||||||
123456+: All designs avoid "breaking" pre-Magic-Folder clients that read
|
|
||||||
a directory or file that is part of the representation.
|
|
||||||
|
|
||||||
456++: Only these designs allow a readcap to one of the client
|
|
||||||
directories —or one of their subdirectories— to be directly shared
|
|
||||||
with other Tahoe-LAFS clients (not necessarily Magic Folder clients),
|
|
||||||
so that such a client sees all of the contents of the Magic Folder.
|
|
||||||
Note that this was not a requirement of the OTF proposal, although it
|
|
||||||
is useful.
|
|
||||||
|
|
||||||
135+: A Magic Folder client has only one mutable Tahoe-LAFS object to
|
|
||||||
monitor per other client. This minimizes communication bandwidth for
|
|
||||||
polling, or alternatively the latency possible for a given polling
|
|
||||||
bandwidth.
|
|
||||||
|
|
||||||
1236+: A client does not need to make changes to its own DMD that repeat
|
|
||||||
changes that another Magic Folder client had previously made. This reduces
|
|
||||||
write bandwidth and complexity.
|
|
||||||
|
|
||||||
1‒: If the Magic Folder has many subfolders, their files will all be
|
|
||||||
collapsed into the same DMD, which could get quite large. In practice a
|
|
||||||
single DMD can easily handle the number of files expected to be written
|
|
||||||
by a client, so this is unlikely to be a significant issue.
|
|
||||||
|
|
||||||
123‒ ‒: In these designs, the set of files in a Magic Folder is
|
|
||||||
represented as the union of the files in all client DMDs. However,
|
|
||||||
when a file is modified by more than one client, it will be linked
|
|
||||||
from multiple client DMDs. We therefore need a mechanism, such as a
|
|
||||||
version number or a monotonically increasing timestamp, to determine
|
|
||||||
which copy takes priority.
|
|
||||||
|
|
||||||
35‒ ‒: When a Magic Folder client detects a remote change, it must
|
|
||||||
traverse an immutable directory structure to see what has changed.
|
|
||||||
Completely unchanged subtrees will have the same URI, allowing some of
|
|
||||||
this traversal to be shortcutted.
|
|
||||||
|
|
||||||
24‒ ‒ ‒: When a Magic Folder client detects a remote change, it must
|
|
||||||
traverse a mutable directory structure to see what has changed. This is
|
|
||||||
more complex and less efficient than traversing an immutable structure,
|
|
||||||
because shortcutting is not possible (each DMD retains the same URI even
|
|
||||||
if a descendant object has changed), and because the structure may change
|
|
||||||
while it is being traversed. Also the traversal needs to be robust
|
|
||||||
against cycles, which can only occur in mutable structures.
|
|
||||||
|
|
||||||
45‒ ‒: When a change occurs in one Magic Folder client, it will propagate
|
|
||||||
to all the other clients. Each client will therefore see multiple
|
|
||||||
representation changes for a single logical change to the Magic Folder
|
|
||||||
contents, and must suppress the duplicates. This is particularly
|
|
||||||
problematic for design 4 where it interacts with the preceding issue.
|
|
||||||
|
|
||||||
4‒ ‒ ‒, 5‒ ‒: There is the potential for client DMDs to get "out of sync"
|
|
||||||
with each other, potentially for long periods if errors occur. Thus each
|
|
||||||
client must be able to "repair" its client directory (and its
|
|
||||||
subdirectory structure) concurrently with performing its own writes. This
|
|
||||||
is a significant complexity burden and may introduce failure modes that
|
|
||||||
could not otherwise happen.
|
|
||||||
|
|
||||||
6‒ ‒ ‒: While two-phase commit is a well-established protocol, its
|
|
||||||
application to Tahoe-LAFS requires significant design work, and may still
|
|
||||||
leave some corner cases of the write coordination problem unsolved.
|
|
||||||
|
|
||||||
|
|
||||||
+------------------------------------------------+-----------------------------------------+
|
|
||||||
| Design Property | Designs Proposed |
|
|
||||||
+================================================+======+======+======+======+======+======+
|
|
||||||
| **advantages** | *1* | *2* | *3* | *4* | *5* | *6* |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| Compatible with garbage collection |\+ |\+ |\+ |\+ |\+ |\+ |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| Does not break old clients |\+ |\+ |\+ |\+ |\+ |\+ |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| Allows direct sharing | | | |\+\+ |\+\+ |\+\+ |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| Efficient use of bandwidth |\+ | |\+ | |\+ | |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| No repeated changes |\+ |\+ |\+ | | |\+ |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| **disadvantages** | *1* | *2* | *3* | *4* | *5* | *6* |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| Can result in large DMDs |‒ | | | | | |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| Need version number to determine priority |‒ ‒ |‒ ‒ |‒ ‒ | | | |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| Must traverse immutable directory structure | | |‒ ‒ | |‒ ‒ | |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| Must traverse mutable directory structure | |‒ ‒ ‒ | |‒ ‒ ‒ | | |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| Must suppress duplicate representation changes | | | |‒ ‒ |‒ ‒ | |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| "Out of sync" problem | | | |‒ ‒ ‒ |‒ ‒ | |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
| Unsolved design problems | | | | | |‒ ‒ ‒ |
|
|
||||||
+------------------------------------------------+------+------+------+------+------+------+
|
|
||||||
|
|
||||||
|
|
||||||
Evaluation of designs
|
|
||||||
'''''''''''''''''''''
|
|
||||||
|
|
||||||
Designs 2 and 3 have no significant advantages over design 1, while
|
|
||||||
requiring higher polling bandwidth and greater complexity due to the need
|
|
||||||
to create subdirectories. These designs were therefore rejected.
|
|
||||||
|
|
||||||
Design 4 was rejected due to the out-of-sync problem, which is severe
|
|
||||||
and possibly unsolvable for mutable structures.
|
|
||||||
|
|
||||||
For design 5, the out-of-sync problem is still present but possibly
|
|
||||||
solvable. However, design 5 is substantially more complex, less efficient
|
|
||||||
in bandwidth/latency, and less scalable in number of clients and
|
|
||||||
subfolders than design 1. It only gains over design 1 on the ability to
|
|
||||||
share directory readcaps to the Magic Folder (or subfolders), which was
|
|
||||||
not a requirement. It would be possible to implement this feature in
|
|
||||||
future by switching to design 6.
|
|
||||||
|
|
||||||
For the time being, however, design 6 was considered out-of-scope for
|
|
||||||
this project.
|
|
||||||
|
|
||||||
Therefore, design 1 was chosen. That is:
|
|
||||||
|
|
||||||
All subfolders written by a given Magic Folder client are collapsed
|
|
||||||
into a single client DMD, containing immutable files. The child name
|
|
||||||
of each file encodes the full subpath of that file relative to the
|
|
||||||
Magic Folder.
|
|
||||||
|
|
||||||
Each directory entry in a DMD also stores a version number, so that the
|
|
||||||
latest version of a file is well-defined when it has been modified by
|
|
||||||
multiple clients.
|
|
||||||
|
|
||||||
To enable representing empty directories, a client that creates a
|
|
||||||
directory should link a corresponding zero-length file in its DMD,
|
|
||||||
at a name that ends with the encoded directory separator character.
|
|
||||||
|
|
||||||
We want to enable dynamic configuration of the membership of a Magic
|
|
||||||
Folder collective, without having to reconfigure or restart each client
|
|
||||||
when another client joins. To support this, we have a single collective
|
|
||||||
directory that links to all of the client DMDs, named by their client
|
|
||||||
nicknames. If the collective directory is mutable, then it is possible
|
|
||||||
to change its contents in order to add clients. Note that a client DMD
|
|
||||||
should not be unlinked from the collective directory unless all of its
|
|
||||||
files are first copied to some other client DMD.
|
|
||||||
|
|
||||||
A client needs to be able to write to its own DMD, and read from other DMDs.
|
|
||||||
To be consistent with the `Principle of Least Authority`_, each client's
|
|
||||||
reference to its own DMD is a write capability, whereas its reference
|
|
||||||
to the collective directory is a read capability. The latter transitively
|
|
||||||
grants read access to all of the other client DMDs and the files linked
|
|
||||||
from them, as required.
|
|
||||||
|
|
||||||
.. _`Principle of Least Authority`: http://www.eros-os.org/papers/secnotsep.pdf
|
|
||||||
|
|
||||||
Design and implementation of the user interface for maintaining this
|
|
||||||
DMD structure and configuration will be addressed in Objectives 5 and 6.
|
|
||||||
|
|
||||||
During operation, each client will poll for changes on other clients
|
|
||||||
at a predetermined frequency. On each poll, it will reread the collective
|
|
||||||
directory (to allow for added or removed clients), and then read each
|
|
||||||
client DMD linked from it.
|
|
||||||
|
|
||||||
"Hidden" files, and files with names matching the patterns used for backup,
|
|
||||||
temporary, and conflicted files, will be ignored, i.e. not synchronized
|
|
||||||
in either direction. A file is hidden if it has a filename beginning with
|
|
||||||
"." (on any platform), or has the hidden or system attribute on Windows.
|
|
||||||
|
|
||||||
|
|
||||||
Conflict Detection and Resolution
|
|
||||||
---------------------------------
|
|
||||||
|
|
||||||
The combination of local filesystems and distributed objects is
|
|
||||||
an example of shared state concurrency, which is highly error-prone
|
|
||||||
and can result in race conditions that are complex to analyze.
|
|
||||||
Unfortunately we have no option but to use shared state in this
|
|
||||||
situation.
|
|
||||||
|
|
||||||
We call the resulting design issues "dragons" (as in "Here be dragons"),
|
|
||||||
which as a convenient mnemonic we have named after the classical
|
|
||||||
Greek elements Earth, Fire, Air, and Water.
|
|
||||||
|
|
||||||
Note: all filenames used in the following sections are examples,
|
|
||||||
and the filename patterns we use in the actual implementation may
|
|
||||||
differ. The actual patterns will probably include timestamps, and
|
|
||||||
for conflicted files, the nickname of the client that last changed
|
|
||||||
the file.
|
|
||||||
|
|
||||||
|
|
||||||
Earth Dragons: Collisions between local filesystem operations and downloads
|
|
||||||
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
|
||||||
|
|
||||||
Write/download collisions
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Suppose that Alice's Magic Folder client is about to write a
|
|
||||||
version of ``foo`` that it has downloaded in response to a remote
|
|
||||||
change.
|
|
||||||
|
|
||||||
The criteria for distinguishing overwrites from conflicts are
|
|
||||||
described later in the `Fire Dragons`_ section. Suppose that the
|
|
||||||
remote change has been initially classified as an overwrite.
|
|
||||||
(As we will see, it may be reclassified in some circumstances.)
|
|
||||||
|
|
||||||
.. _`Fire Dragons`: #fire-dragons-distinguishing-conflicts-from-overwrites
|
|
||||||
|
|
||||||
Note that writing a file that does not already have an entry in the
|
|
||||||
:ref:`magic folder db<filesystem_integration-local-scanning-and-database>` is
|
|
||||||
initially classed as an overwrite.
|
|
||||||
|
|
||||||
A *write/download collision* occurs when another program writes
|
|
||||||
to ``foo`` in the local filesystem, concurrently with the new
|
|
||||||
version being written by the Magic Folder client. We need to
|
|
||||||
ensure that this does not cause data loss, as far as possible.
|
|
||||||
|
|
||||||
An important constraint on the design is that on Windows, it is
|
|
||||||
not possible to rename a file to the same name as an existing
|
|
||||||
file in that directory. Also, on Windows it may not be possible to
|
|
||||||
delete or rename a file that has been opened by another process
|
|
||||||
(depending on the sharing flags specified by that process).
|
|
||||||
Therefore we need to consider carefully how to handle failure
|
|
||||||
conditions.
|
|
||||||
|
|
||||||
In our proposed design, Alice's Magic Folder client follows
|
|
||||||
this procedure for an overwrite in response to a remote change:
|
|
||||||
|
|
||||||
1. Write a temporary file, say ``.foo.tmp``.
|
|
||||||
2. Use the procedure described in the `Fire Dragons_` section
|
|
||||||
to obtain an initial classification as an overwrite or a
|
|
||||||
conflict. (This takes as input the ``last_downloaded_uri``
|
|
||||||
field from the directory entry of the changed ``foo``.)
|
|
||||||
3. Set the ``mtime`` of the replacement file to be at least *T* seconds
|
|
||||||
before the current local time. Stat the replacement file
|
|
||||||
to obtain its ``mtime`` and ``ctime`` as stored in the local
|
|
||||||
filesystem, and update the file's last-seen statinfo in
|
|
||||||
the magic folder db with this information. (Note that the
|
|
||||||
retrieved ``mtime`` may differ from the one that was set due
|
|
||||||
to rounding.)
|
|
||||||
4. Perform a ''file replacement'' operation (explained below)
|
|
||||||
with backup filename ``foo.backup``, replaced file ``foo``,
|
|
||||||
and replacement file ``.foo.tmp``. If any step of this
|
|
||||||
operation fails, reclassify as a conflict and stop.
|
|
||||||
|
|
||||||
To reclassify as a conflict, attempt to rename ``.foo.tmp`` to
|
|
||||||
``foo.conflicted``, suppressing errors.
|
|
||||||
|
|
||||||
The implementation of file replacement differs between Unix
|
|
||||||
and Windows. On Unix, it can be implemented as follows:
|
|
||||||
|
|
||||||
* 4a. Stat the replaced path, and set the permissions of the
|
|
||||||
replacement file to be the same as the replaced file,
|
|
||||||
bitwise-or'd with octal 600 (``rw-------``). If the replaced
|
|
||||||
file does not exist, set the permissions according to the
|
|
||||||
user's umask. If there is a directory at the replaced path,
|
|
||||||
fail.
|
|
||||||
* 4b. Attempt to move the replaced file (``foo``) to the
|
|
||||||
backup filename (``foo.backup``). If an ``ENOENT`` error
|
|
||||||
occurs because the replaced file does not exist, ignore this
|
|
||||||
error and continue with steps 4c and 4d.
|
|
||||||
* 4c. Attempt to create a hard link at the replaced filename
|
|
||||||
(``foo``) pointing to the replacement file (``.foo.tmp``).
|
|
||||||
* 4d. Attempt to unlink the replacement file (``.foo.tmp``),
|
|
||||||
suppressing errors.
|
|
||||||
|
|
||||||
Note that, if there is no conflict, the entry for ``foo``
|
|
||||||
recorded in the :ref:`magic folder
|
|
||||||
db<filesystem_integration-local-scanning-and-database>` will
|
|
||||||
reflect the ``mtime`` set in step 3. The move operation in step
|
|
||||||
4b will cause a ``MOVED_FROM`` event for ``foo``, and the link
|
|
||||||
operation in step 4c will cause an ``IN_CREATE`` event for
|
|
||||||
``foo``. However, these events will not trigger an upload,
|
|
||||||
because they are guaranteed to be processed only after the file
|
|
||||||
replacement has finished, at which point the last-seen statinfo
|
|
||||||
recorded in the database entry will exactly match the metadata
|
|
||||||
for the file's inode on disk. (The two hard links — ``foo``
|
|
||||||
and, while it still exists, ``.foo.tmp`` — share the same inode
|
|
||||||
and therefore the same metadata.)
|
|
||||||
|
|
||||||
On Windows, file replacement can be implemented by a call to
|
|
||||||
the `ReplaceFileW`_ API (with the
|
|
||||||
``REPLACEFILE_IGNORE_MERGE_ERRORS`` flag). If an error occurs
|
|
||||||
because the replaced file does not exist, then we ignore this
|
|
||||||
error and attempt to move the replacement file to the replaced
|
|
||||||
file.
|
|
||||||
|
|
||||||
Similar to the Unix case, the `ReplaceFileW`_ operation will
|
|
||||||
cause one or more change notifications for ``foo``. The replaced
|
|
||||||
``foo`` has the same ``mtime`` as the replacement file, and so any
|
|
||||||
such notification(s) will not trigger an unwanted upload.
|
|
||||||
|
|
||||||
.. _`ReplaceFileW`: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365512%28v=vs.85%29.aspx
|
|
||||||
|
|
||||||
To determine whether this procedure adequately protects against data
|
|
||||||
loss, we need to consider what happens if another process attempts to
|
|
||||||
update ``foo``, for example by renaming ``foo.other`` to ``foo``.
|
|
||||||
This requires us to analyze all possible interleavings between the
|
|
||||||
operations performed by the Magic Folder client and the other process.
|
|
||||||
(Note that atomic operations on a directory are totally ordered.)
|
|
||||||
The set of possible interleavings differs between Windows and Unix.
|
|
||||||
|
|
||||||
On Unix, for the case where the replaced file already exists, we have:
|
|
||||||
|
|
||||||
* Interleaving A: the other process' rename precedes our rename in
|
|
||||||
step 4b, and we get an ``IN_MOVED_TO`` event for its rename by
|
|
||||||
step 2. Then we reclassify as a conflict; its changes end up at
|
|
||||||
``foo`` and ours end up at ``foo.conflicted``. This avoids data
|
|
||||||
loss.
|
|
||||||
|
|
||||||
* Interleaving B: its rename precedes ours in step 4b, and we do
|
|
||||||
not get an event for its rename by step 2. Its changes end up at
|
|
||||||
``foo.backup``, and ours end up at ``foo`` after being linked there
|
|
||||||
in step 4c. This avoids data loss.
|
|
||||||
|
|
||||||
* Interleaving C: its rename happens between our rename in step 4b,
|
|
||||||
and our link operation in step 4c of the file replacement. The
|
|
||||||
latter fails with an ``EEXIST`` error because ``foo`` already
|
|
||||||
exists. We reclassify as a conflict; the old version ends up at
|
|
||||||
``foo.backup``, the other process' changes end up at ``foo``, and
|
|
||||||
ours at ``foo.conflicted``. This avoids data loss.
|
|
||||||
|
|
||||||
* Interleaving D: its rename happens after our link in step 4c, and
|
|
||||||
causes an ``IN_MOVED_TO`` event for ``foo``. Its rename also changes
|
|
||||||
the ``mtime`` for ``foo`` so that it is different from the ``mtime``
|
|
||||||
calculated in step 3, and therefore different from the metadata
|
|
||||||
recorded for ``foo`` in the magic folder db. (Assuming no system
|
|
||||||
clock changes, its rename will set an ``mtime`` timestamp
|
|
||||||
corresponding to a time after step 4c, which is after the timestamp
|
|
||||||
*T* seconds before step 4a, provided that *T* seconds is
|
|
||||||
sufficiently greater than the timestamp granularity.) Therefore, an
|
|
||||||
upload will be triggered for ``foo`` after its change, which is
|
|
||||||
correct and avoids data loss.
|
|
||||||
|
|
||||||
If the replaced file did not already exist, an ``ENOENT`` error
|
|
||||||
occurs at step 4b, and we continue with steps 4c and 4d. The other
|
|
||||||
process' rename races with our link operation in step 4c. If the
|
|
||||||
other process wins the race then the effect is similar to
|
|
||||||
Interleaving C, and if we win the race this it is similar to
|
|
||||||
Interleaving D. Either case avoids data loss.
|
|
||||||
|
|
||||||
|
|
||||||
On Windows, the internal implementation of `ReplaceFileW`_ is similar
|
|
||||||
to what we have described above for Unix; it works like this:
|
|
||||||
|
|
||||||
* 4a′. Copy metadata (which does not include ``mtime``) from the
|
|
||||||
replaced file (``foo``) to the replacement file (``.foo.tmp``).
|
|
||||||
|
|
||||||
* 4b′. Attempt to move the replaced file (``foo``) onto the
|
|
||||||
backup filename (``foo.backup``), deleting the latter if it
|
|
||||||
already exists.
|
|
||||||
|
|
||||||
* 4c′. Attempt to move the replacement file (``.foo.tmp``) to the
|
|
||||||
replaced filename (``foo``); fail if the destination already
|
|
||||||
exists.
|
|
||||||
|
|
||||||
Notice that this is essentially the same as the algorithm we use
|
|
||||||
for Unix, but steps 4c and 4d on Unix are combined into a single
|
|
||||||
step 4c′. (If there is a failure at steps 4c′ after step 4b′ has
|
|
||||||
completed, the `ReplaceFileW`_ call will fail with return code
|
|
||||||
``ERROR_UNABLE_TO_MOVE_REPLACEMENT_2``. However, it is still
|
|
||||||
preferable to use this API over two `MoveFileExW`_ calls, because
|
|
||||||
it retains the attributes and ACLs of ``foo`` where possible.
|
|
||||||
Also note that if the `ReplaceFileW`_ call fails with
|
|
||||||
``ERROR_FILE_NOT_FOUND`` because the replaced file does not exist,
|
|
||||||
then the replacment operation ignores this error and continues with
|
|
||||||
the equivalent of step 4c′, as on Unix.)
|
|
||||||
|
|
||||||
However, on Windows the other application will not be able to
|
|
||||||
directly rename ``foo.other`` onto ``foo`` (which would fail because
|
|
||||||
the destination already exists); it will have to rename or delete
|
|
||||||
``foo`` first. Without loss of generality, let's say ``foo`` is
|
|
||||||
deleted. This complicates the interleaving analysis, because we
|
|
||||||
have two operations done by the other process interleaving with
|
|
||||||
three done by the magic folder process (rather than one operation
|
|
||||||
interleaving with four as on Unix).
|
|
||||||
|
|
||||||
So on Windows, for the case where the replaced file already exists,
|
|
||||||
we have:
|
|
||||||
|
|
||||||
* Interleaving A′: the other process' deletion of ``foo`` and its
|
|
||||||
rename of ``foo.other`` to ``foo`` both precede our rename in
|
|
||||||
step 4b. We get an event corresponding to its rename by step 2.
|
|
||||||
Then we reclassify as a conflict; its changes end up at ``foo``
|
|
||||||
and ours end up at ``foo.conflicted``. This avoids data loss.
|
|
||||||
|
|
||||||
* Interleaving B′: the other process' deletion of ``foo`` and its
|
|
||||||
rename of ``foo.other`` to ``foo`` both precede our rename in
|
|
||||||
step 4b. We do not get an event for its rename by step 2.
|
|
||||||
Its changes end up at ``foo.backup``, and ours end up at ``foo``
|
|
||||||
after being moved there in step 4c′. This avoids data loss.
|
|
||||||
|
|
||||||
* Interleaving C′: the other process' deletion of ``foo`` precedes
|
|
||||||
our rename of ``foo`` to ``foo.backup`` done by `ReplaceFileW`_,
|
|
||||||
but its rename of ``foo.other`` to ``foo`` does not, so we get
|
|
||||||
an ``ERROR_FILE_NOT_FOUND`` error from `ReplaceFileW`_ indicating
|
|
||||||
that the replaced file does not exist. We ignore this error and
|
|
||||||
attempt to move ``foo.tmp`` to ``foo``, racing with the other
|
|
||||||
process which is attempting to move ``foo.other`` to ``foo``.
|
|
||||||
If we win the race, then our changes end up at ``foo``, and the
|
|
||||||
other process' move fails. If the other process wins the race,
|
|
||||||
then its changes end up at ``foo``, our move fails, and we
|
|
||||||
reclassify as a conflict, so that our changes end up at
|
|
||||||
``foo.conflicted``. Either possibility avoids data loss.
|
|
||||||
|
|
||||||
* Interleaving D′: the other process' deletion and/or rename happen
|
|
||||||
during the call to `ReplaceFileW`_, causing the latter to fail.
|
|
||||||
There are two subcases:
|
|
||||||
|
|
||||||
* if the error is ``ERROR_UNABLE_TO_MOVE_REPLACEMENT_2``, then
|
|
||||||
``foo`` is renamed to ``foo.backup`` and ``.foo.tmp`` remains
|
|
||||||
at its original name after the call.
|
|
||||||
* for all other errors, ``foo`` and ``.foo.tmp`` both remain at
|
|
||||||
their original names after the call.
|
|
||||||
|
|
||||||
In both subcases, we reclassify as a conflict and rename ``.foo.tmp``
|
|
||||||
to ``foo.conflicted``. This avoids data loss.
|
|
||||||
|
|
||||||
* Interleaving E′: the other process' deletion of ``foo`` and attempt
|
|
||||||
to rename ``foo.other`` to ``foo`` both happen after all internal
|
|
||||||
operations of `ReplaceFileW`_ have completed. This causes deletion
|
|
||||||
and rename events for ``foo`` (which will in practice be merged due
|
|
||||||
to the pending delay, although we don't rely on that for
|
|
||||||
correctness). The rename also changes the ``mtime`` for ``foo`` so
|
|
||||||
that it is different from the ``mtime`` calculated in step 3, and
|
|
||||||
therefore different from the metadata recorded for ``foo`` in the
|
|
||||||
magic folder db. (Assuming no system clock changes, its rename will
|
|
||||||
set an ``mtime`` timestamp corresponding to a time after the
|
|
||||||
internal operations of `ReplaceFileW`_ have completed, which is
|
|
||||||
after the timestamp *T* seconds before `ReplaceFileW`_ is called,
|
|
||||||
provided that *T* seconds is sufficiently greater than the timestamp
|
|
||||||
granularity.) Therefore, an upload will be triggered for ``foo``
|
|
||||||
after its change, which is correct and avoids data loss.
|
|
||||||
|
|
||||||
.. _`MoveFileExW`: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365240%28v=vs.85%29.aspx
|
|
||||||
|
|
||||||
If the replaced file did not already exist, we get an
|
|
||||||
``ERROR_FILE_NOT_FOUND`` error from `ReplaceFileW`_, and attempt to
|
|
||||||
move ``foo.tmp`` to ``foo``. This is similar to Interleaving C, and
|
|
||||||
either possibility for the resulting race avoids data loss.
|
|
||||||
|
|
||||||
We also need to consider what happens if another process opens ``foo``
|
|
||||||
and writes to it directly, rather than renaming another file onto it:
|
|
||||||
|
|
||||||
* On Unix, open file handles refer to inodes, not paths. If the other
|
|
||||||
process opens ``foo`` before it has been renamed to ``foo.backup``,
|
|
||||||
and then closes the file, changes will have been written to the file
|
|
||||||
at the same inode, even if that inode is now linked at ``foo.backup``.
|
|
||||||
This avoids data loss.
|
|
||||||
|
|
||||||
* On Windows, we have two subcases, depending on whether the sharing
|
|
||||||
flags specified by the other process when it opened its file handle
|
|
||||||
included ``FILE_SHARE_DELETE``. (This flag covers both deletion and
|
|
||||||
rename operations.)
|
|
||||||
|
|
||||||
i. If the sharing flags *do not* allow deletion/renaming, the
|
|
||||||
`ReplaceFileW`_ operation will fail without renaming ``foo``.
|
|
||||||
In this case we will end up with ``foo`` changed by the other
|
|
||||||
process, and the downloaded file still in ``foo.tmp``.
|
|
||||||
This avoids data loss.
|
|
||||||
|
|
||||||
ii. If the sharing flags *do* allow deletion/renaming, then
|
|
||||||
data loss or corruption may occur. This is unavoidable and
|
|
||||||
can be attributed to other process making a poor choice of
|
|
||||||
sharing flags (either explicitly if it used `CreateFile`_, or
|
|
||||||
via whichever higher-level API it used).
|
|
||||||
|
|
||||||
.. _`CreateFile`: https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858%28v=vs.85%29.aspx
|
|
||||||
|
|
||||||
Note that it is possible that another process tries to open the file
|
|
||||||
between steps 4b and 4c (or 4b′ and 4c′ on Windows). In this case the
|
|
||||||
open will fail because ``foo`` does not exist. Nevertheless, no data
|
|
||||||
will be lost, and in many cases the user will be able to retry the
|
|
||||||
operation.
|
|
||||||
|
|
||||||
Above we only described the case where the download was initially
|
|
||||||
classified as an overwrite. If it was classed as a conflict, the
|
|
||||||
procedure is the same except that we choose a unique filename
|
|
||||||
for the conflicted file (say, ``foo.conflicted_unique``). We write
|
|
||||||
the new contents to ``.foo.tmp`` and then rename it to
|
|
||||||
``foo.conflicted_unique`` in such a way that the rename will fail
|
|
||||||
if the destination already exists. (On Windows this is a simple
|
|
||||||
rename; on Unix it can be implemented as a link operation followed
|
|
||||||
by an unlink, similar to steps 4c and 4d above.) If this fails
|
|
||||||
because another process wrote ``foo.conflicted_unique`` after we
|
|
||||||
chose the filename, then we retry with a different filename.
|
|
||||||
|
|
||||||
|
|
||||||
Read/download collisions
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
A *read/download collision* occurs when another program reads
|
|
||||||
from ``foo`` in the local filesystem, concurrently with the new
|
|
||||||
version being written by the Magic Folder client. We want to
|
|
||||||
ensure that any successful attempt to read the file by the other
|
|
||||||
program obtains a consistent view of its contents.
|
|
||||||
|
|
||||||
On Unix, the above procedure for writing downloads is sufficient
|
|
||||||
to achieve this. There are three cases:
|
|
||||||
|
|
||||||
* A. The other process opens ``foo`` for reading before it is
|
|
||||||
renamed to ``foo.backup``. Then the file handle will continue to
|
|
||||||
refer to the old file across the rename, and the other process
|
|
||||||
will read the old contents.
|
|
||||||
|
|
||||||
* B. The other process attempts to open ``foo`` after it has been
|
|
||||||
renamed to ``foo.backup``, and before it is linked in step c.
|
|
||||||
The open call fails, which is acceptable.
|
|
||||||
|
|
||||||
* C. The other process opens ``foo`` after it has been linked to
|
|
||||||
the new file. Then it will read the new contents.
|
|
||||||
|
|
||||||
On Windows, the analysis is very similar, but case A′ needs to
|
|
||||||
be split into two subcases, depending on the sharing mode the other
|
|
||||||
process uses when opening the file for reading:
|
|
||||||
|
|
||||||
* A′. The other process opens ``foo`` before the Magic Folder
|
|
||||||
client's attempt to rename ``foo`` to ``foo.backup`` (as part
|
|
||||||
of the implementation of `ReplaceFileW`_). The subcases are:
|
|
||||||
|
|
||||||
i. The other process uses sharing flags that deny deletion and
|
|
||||||
renames. The `ReplaceFileW`_ call fails, and the download is
|
|
||||||
reclassified as a conflict. The downloaded file ends up at
|
|
||||||
``foo.conflicted``, which is correct.
|
|
||||||
|
|
||||||
ii. The other process uses sharing flags that allow deletion
|
|
||||||
and renames. The `ReplaceFileW`_ call succeeds, and the
|
|
||||||
other process reads inconsistent data. This can be attributed
|
|
||||||
to a poor choice of sharing flags by the other process.
|
|
||||||
|
|
||||||
* B′. The other process attempts to open ``foo`` at the point
|
|
||||||
during the `ReplaceFileW`_ call where it does not exist.
|
|
||||||
The open call fails, which is acceptable.
|
|
||||||
|
|
||||||
* C′. The other process opens ``foo`` after it has been linked to
|
|
||||||
the new file. Then it will read the new contents.
|
|
||||||
|
|
||||||
|
|
||||||
For both write/download and read/download collisions, we have
|
|
||||||
considered only interleavings with a single other process, and
|
|
||||||
only the most common possibilities for the other process'
|
|
||||||
interaction with the file. If multiple other processes are
|
|
||||||
involved, or if a process performs operations other than those
|
|
||||||
considered, then we cannot say much about the outcome in general;
|
|
||||||
however, we believe that such cases will be much less common.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Fire Dragons: Distinguishing conflicts from overwrites
|
|
||||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
|
||||||
|
|
||||||
When synchronizing a file that has changed remotely, the Magic Folder
|
|
||||||
client needs to distinguish between overwrites, in which the remote
|
|
||||||
side was aware of your most recent version (if any) and overwrote it
|
|
||||||
with a new version, and conflicts, in which the remote side was unaware
|
|
||||||
of your most recent version when it published its new version. Those two
|
|
||||||
cases have to be handled differently — the latter needs to be raised
|
|
||||||
to the user as an issue the user will have to resolve and the former
|
|
||||||
must not bother the user.
|
|
||||||
|
|
||||||
For example, suppose that Alice's Magic Folder client sees a change
|
|
||||||
to ``foo`` in Bob's DMD. If the version it downloads from Bob's DMD
|
|
||||||
is "based on" the version currently in Alice's local filesystem at
|
|
||||||
the time Alice's client attempts to write the downloaded file ‒or if
|
|
||||||
there is no existing version in Alice's local filesystem at that time‒
|
|
||||||
then it is an overwrite. Otherwise it is initially classified as a
|
|
||||||
conflict.
|
|
||||||
|
|
||||||
This initial classification is used by the procedure for writing a
|
|
||||||
file described in the `Earth Dragons`_ section above. As explained
|
|
||||||
in that section, we may reclassify an overwrite as a conflict if an
|
|
||||||
error occurs during the write procedure.
|
|
||||||
|
|
||||||
.. _`Earth Dragons`: #earth-dragons-collisions-between-local-filesystem-operations-and-downloads
|
|
||||||
|
|
||||||
In order to implement this policy, we need to specify how the
|
|
||||||
"based on" relation between file versions is recorded and updated.
|
|
||||||
|
|
||||||
We propose to record this information:
|
|
||||||
|
|
||||||
* in the :ref:`magic folder
|
|
||||||
db<filesystem_integration-local-scanning-and-database>`, for
|
|
||||||
local files;
|
|
||||||
* in the Tahoe-LAFS directory metadata, for files stored in the
|
|
||||||
Magic Folder.
|
|
||||||
|
|
||||||
In the magic folder db we will add a *last-downloaded record*,
|
|
||||||
consisting of ``last_downloaded_uri`` and ``last_downloaded_timestamp``
|
|
||||||
fields, for each path stored in the database. Whenever a Magic Folder
|
|
||||||
client downloads a file, it stores the downloaded version's URI and
|
|
||||||
the current local timestamp in this record. Since only immutable
|
|
||||||
files are used, the URI will be an immutable file URI, which is
|
|
||||||
deterministically and uniquely derived from the file contents and
|
|
||||||
the Tahoe-LAFS node's :doc:`convergence secret<../../convergence-secret>`.
|
|
||||||
|
|
||||||
(Note that the last-downloaded record is updated regardless of
|
|
||||||
whether the download is an overwrite or a conflict. The rationale
|
|
||||||
for this to avoid "conflict loops" between clients, where every
|
|
||||||
new version after the first conflict would be considered as another
|
|
||||||
conflict.)
|
|
||||||
|
|
||||||
Later, in response to a local filesystem change at a given path, the
|
|
||||||
Magic Folder client reads the last-downloaded record associated with
|
|
||||||
that path (if any) from the database and then uploads the current
|
|
||||||
file. When it links the uploaded file into its client DMD, it
|
|
||||||
includes the ``last_downloaded_uri`` field in the metadata of the
|
|
||||||
directory entry, overwriting any existing field of that name. If
|
|
||||||
there was no last-downloaded record associated with the path, this
|
|
||||||
field is omitted.
|
|
||||||
|
|
||||||
Note that ``last_downloaded_uri`` field does *not* record the URI of
|
|
||||||
the uploaded file (which would be redundant); it records the URI of
|
|
||||||
the last download before the local change that caused the upload.
|
|
||||||
The field will be absent if the file has never been downloaded by
|
|
||||||
this client (i.e. if it was created on this client and no change
|
|
||||||
by any other client has been detected).
|
|
||||||
|
|
||||||
A possible refinement also takes into account the
|
|
||||||
``last_downloaded_timestamp`` field from the magic folder db, and
|
|
||||||
compares it to the timestamp of the change that caused the upload
|
|
||||||
(which should be later, assuming no system clock changes).
|
|
||||||
If the duration between these timestamps is very short, then we
|
|
||||||
are uncertain about whether the process on Bob's system that wrote
|
|
||||||
the local file could have taken into account the last download.
|
|
||||||
We can use this information to be conservative about treating
|
|
||||||
changes as conflicts. So, if the duration is less than a configured
|
|
||||||
threshold, we omit the ``last_downloaded_uri`` field from the
|
|
||||||
metadata. This will have the effect of making other clients treat
|
|
||||||
this change as a conflict whenever they already have a copy of the
|
|
||||||
file.
|
|
||||||
|
|
||||||
Conflict/overwrite decision algorithm
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Now we are ready to describe the algorithm for determining whether a
|
|
||||||
download for the file ``foo`` is an overwrite or a conflict (refining
|
|
||||||
step 2 of the procedure from the `Earth Dragons`_ section).
|
|
||||||
|
|
||||||
Let ``last_downloaded_uri`` be the field of that name obtained from
|
|
||||||
the directory entry metadata for ``foo`` in Bob's DMD (this field
|
|
||||||
may be absent). Then the algorithm is:
|
|
||||||
|
|
||||||
* 2a. Attempt to "stat" ``foo`` to get its *current statinfo* (size
|
|
||||||
in bytes, ``mtime``, and ``ctime``). If Alice has no local copy
|
|
||||||
of ``foo``, classify as an overwrite.
|
|
||||||
|
|
||||||
* 2b. Read the following information for the path ``foo`` from the
|
|
||||||
local magic folder db:
|
|
||||||
|
|
||||||
* the *last-seen statinfo*, if any (this is the size in
|
|
||||||
bytes, ``mtime``, and ``ctime`` stored in the ``local_files``
|
|
||||||
table when the file was last uploaded);
|
|
||||||
* the ``last_uploaded_uri`` field of the ``local_files`` table
|
|
||||||
for this file, which is the URI under which the file was last
|
|
||||||
uploaded.
|
|
||||||
|
|
||||||
* 2c. If any of the following are true, then classify as a conflict:
|
|
||||||
|
|
||||||
* i. there are pending notifications of changes to ``foo``;
|
|
||||||
* ii. the last-seen statinfo is either absent (i.e. there is
|
|
||||||
no entry in the database for this path), or different from the
|
|
||||||
current statinfo;
|
|
||||||
* iii. either ``last_downloaded_uri`` or ``last_uploaded_uri``
|
|
||||||
(or both) are absent, or they are different.
|
|
||||||
|
|
||||||
Otherwise, classify as an overwrite.
|
|
||||||
|
|
||||||
|
|
||||||
Air Dragons: Collisions between local writes and uploads
|
|
||||||
''''''''''''''''''''''''''''''''''''''''''''''''''''''''
|
|
||||||
|
|
||||||
Short of filesystem-specific features on Unix or the `shadow copy service`_
|
|
||||||
on Windows (which is per-volume and therefore difficult to use in this
|
|
||||||
context), there is no way to *read* the whole contents of a file
|
|
||||||
atomically. Therefore, when we read a file in order to upload it, we
|
|
||||||
may read an inconsistent version if it was also being written locally.
|
|
||||||
|
|
||||||
.. _`shadow copy service`: https://technet.microsoft.com/en-us/library/ee923636%28v=ws.10%29.aspx
|
|
||||||
|
|
||||||
A well-behaved application can avoid this problem for its writes:
|
|
||||||
|
|
||||||
* On Unix, if another process modifies a file by renaming a temporary
|
|
||||||
file onto it, then we will consistently read either the old contents
|
|
||||||
or the new contents.
|
|
||||||
* On Windows, if the other process uses sharing flags to deny reads
|
|
||||||
while it is writing a file, then we will consistently read either
|
|
||||||
the old contents or the new contents, unless a sharing error occurs.
|
|
||||||
In the case of a sharing error we should retry later, up to a
|
|
||||||
maximum number of retries.
|
|
||||||
|
|
||||||
In the case of a not-so-well-behaved application writing to a file
|
|
||||||
at the same time we read from it, the magic folder will still be
|
|
||||||
eventually consistent, but inconsistent versions may be visible to
|
|
||||||
other users' clients.
|
|
||||||
|
|
||||||
In Objective 2 we implemented a delay, called the *pending delay*,
|
|
||||||
after the notification of a filesystem change and before the file is
|
|
||||||
read in order to upload it (Tahoe-LAFS ticket `#1440`_). If another
|
|
||||||
change notification occurs within the pending delay time, the delay
|
|
||||||
is restarted. This helps to some extent because it means that if
|
|
||||||
files are written more quickly than the pending delay and less
|
|
||||||
frequently than the pending delay, we shouldn't encounter this
|
|
||||||
inconsistency.
|
|
||||||
|
|
||||||
.. _`#1440`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1440
|
|
||||||
|
|
||||||
The likelihood of inconsistency could be further reduced, even for
|
|
||||||
writes by not-so-well-behaved applications, by delaying the actual
|
|
||||||
upload for a further period —called the *stability delay*— after the
|
|
||||||
file has finished being read. If a notification occurs between the
|
|
||||||
end of the pending delay and the end of the stability delay, then
|
|
||||||
the read would be aborted and the notification requeued.
|
|
||||||
|
|
||||||
This would have the effect of ensuring that no write notifications
|
|
||||||
have been received for the file during a time window that brackets
|
|
||||||
the period when it was being read, with margin before and after
|
|
||||||
this period defined by the pending and stability delays. The delays
|
|
||||||
are intended to account for asynchronous notification of events, and
|
|
||||||
caching in the filesystem.
|
|
||||||
|
|
||||||
Note however that we cannot guarantee that the delays will be long
|
|
||||||
enough to prevent inconsistency in any particular case. Also, the
|
|
||||||
stability delay would potentially affect performance significantly
|
|
||||||
because (unlike the pending delay) it is not overlapped when there
|
|
||||||
are multiple files on the upload queue. This performance impact
|
|
||||||
could be mitigated by uploading files in parallel where possible
|
|
||||||
(Tahoe-LAFS ticket `#1459`_).
|
|
||||||
|
|
||||||
We have not yet decided whether to implement the stability delay, and
|
|
||||||
it is not planned to be implemented for the OTF objective 4 milestone.
|
|
||||||
Ticket `#2431`_ has been opened to track this idea.
|
|
||||||
|
|
||||||
.. _`#1459`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1459
|
|
||||||
.. _`#2431`: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2431
|
|
||||||
|
|
||||||
Note that the situation of both a local process and the Magic Folder
|
|
||||||
client reading a file at the same time cannot cause any inconsistency.
|
|
||||||
|
|
||||||
|
|
||||||
Water Dragons: Handling deletion and renames
|
|
||||||
''''''''''''''''''''''''''''''''''''''''''''
|
|
||||||
|
|
||||||
Deletion of a file
|
|
||||||
~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
When a file is deleted from the filesystem of a Magic Folder client,
|
|
||||||
the most intuitive behavior is for it also to be deleted under that
|
|
||||||
name from other clients. To avoid data loss, the other clients should
|
|
||||||
actually rename their copies to a backup filename.
|
|
||||||
|
|
||||||
It would not be sufficient for a Magic Folder client that deletes
|
|
||||||
a file to implement this simply by removing the directory entry from
|
|
||||||
its DMD. Indeed, the entry may not exist in the client's DMD if it
|
|
||||||
has never previously changed the file.
|
|
||||||
|
|
||||||
Instead, the client links a zero-length file into its DMD and sets
|
|
||||||
``deleted: true`` in the directory entry metadata. Other clients
|
|
||||||
take this as a signal to rename their copies to the backup filename.
|
|
||||||
|
|
||||||
Note that the entry for this zero-length file has a version number as
|
|
||||||
usual, and later versions may restore the file.
|
|
||||||
|
|
||||||
When the downloader deletes a file (or renames it to a filename
|
|
||||||
ending in ``.backup``) in response to a remote change, a local
|
|
||||||
filesystem notification will occur, and we must make sure that this
|
|
||||||
is not treated as a local change. To do this we have the downloader
|
|
||||||
set the ``size`` field in the magic folder db to ``None`` (SQL NULL)
|
|
||||||
just before deleting the file, and suppress notifications for which
|
|
||||||
the local file does not exist, and the recorded ``size`` field is
|
|
||||||
``None``.
|
|
||||||
|
|
||||||
When a Magic Folder client restarts, we can detect files that had
|
|
||||||
been downloaded but were deleted while it was not running, because
|
|
||||||
their paths will have last-downloaded records in the magic folder db
|
|
||||||
with a ``size`` other than ``None``, and without any corresponding
|
|
||||||
local file.
|
|
||||||
|
|
||||||
Deletion of a directory
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~
|
|
||||||
|
|
||||||
Local filesystems (unlike a Tahoe-LAFS filesystem) normally cannot
|
|
||||||
unlink a directory that has any remaining children. Therefore a
|
|
||||||
Magic Folder client cannot delete local copies of directories in
|
|
||||||
general, because they will typically contain backup files. This must
|
|
||||||
be done manually on each client if desired.
|
|
||||||
|
|
||||||
Nevertheless, a Magic Folder client that deletes a directory should
|
|
||||||
set ``deleted: true`` on the metadata entry for the corresponding
|
|
||||||
zero-length file. This avoids the directory being recreated after
|
|
||||||
it has been manually deleted from a client.
|
|
||||||
|
|
||||||
Renaming
|
|
||||||
~~~~~~~~
|
|
||||||
|
|
||||||
It is sufficient to handle renaming of a file by treating it as a
|
|
||||||
deletion and an addition under the new name.
|
|
||||||
|
|
||||||
This also applies to directories, although users may find the
|
|
||||||
resulting behavior unintuitive: all of the files under the old name
|
|
||||||
will be renamed to backup filenames, and a new directory structure
|
|
||||||
created under the new name. We believe this is the best that can be
|
|
||||||
done without imposing unreasonable implementation complexity.
|
|
||||||
|
|
||||||
|
|
||||||
Summary
|
|
||||||
-------
|
|
||||||
|
|
||||||
This completes the design of remote-to-local synchronization.
|
|
||||||
We realize that it may seem very complicated. Anecdotally, proprietary
|
|
||||||
filesystem synchronization designs we are aware of, such as Dropbox,
|
|
||||||
are said to incur similar or greater design complexity.
|
|
@ -1,205 +0,0 @@
|
|||||||
Magic Folder user interface design
|
|
||||||
==================================
|
|
||||||
|
|
||||||
Scope
|
|
||||||
-----
|
|
||||||
|
|
||||||
In this Objective we will design a user interface to allow users to conveniently
|
|
||||||
and securely indicate which folders on some devices should be "magically" linked
|
|
||||||
to which folders on other devices.
|
|
||||||
|
|
||||||
This is a critical usability and security issue for which there is no known perfect
|
|
||||||
solution, but which we believe is amenable to a "good enough" trade-off solution.
|
|
||||||
This document explains the design and justifies its trade-offs in terms of security,
|
|
||||||
usability, and time-to-market.
|
|
||||||
|
|
||||||
Tickets on the Tahoe-LAFS trac with the `otf-magic-folder-objective6`_
|
|
||||||
keyword are within the scope of the user interface design.
|
|
||||||
|
|
||||||
.. _otf-magic-folder-objective6: https://tahoe-lafs.org/trac/tahoe-lafs/query?status=!closed&keywords=~otf-magic-folder-objective6
|
|
||||||
|
|
||||||
Glossary
|
|
||||||
''''''''
|
|
||||||
|
|
||||||
Object: a file or directory
|
|
||||||
|
|
||||||
DMD: distributed mutable directory
|
|
||||||
|
|
||||||
Folder: an abstract directory that is synchronized between clients.
|
|
||||||
(A folder is not the same as the directory corresponding to it on
|
|
||||||
any particular client, nor is it the same as a DMD.)
|
|
||||||
|
|
||||||
Collective: the set of clients subscribed to a given Magic Folder.
|
|
||||||
|
|
||||||
Diminishing: the process of deriving, from an existing capability,
|
|
||||||
another capability that gives less authority (for example, deriving a
|
|
||||||
read cap from a read/write cap).
|
|
||||||
|
|
||||||
|
|
||||||
Design Constraints
|
|
||||||
------------------
|
|
||||||
|
|
||||||
The design of the Tahoe-side representation of a Magic Folder, and the
|
|
||||||
polling mechanism that the Magic Folder clients will use to detect remote
|
|
||||||
changes was discussed in :doc:`remote-to-local-sync<remote-to-local-sync>`,
|
|
||||||
and we will not revisit that here. The assumption made by that design was
|
|
||||||
that each client would be configured with the following information:
|
|
||||||
|
|
||||||
* a write cap to its own *client DMD*.
|
|
||||||
* a read cap to a *collective directory*.
|
|
||||||
|
|
||||||
The collective directory contains links to each client DMD named by the
|
|
||||||
corresponding client's nickname.
|
|
||||||
|
|
||||||
This design was chosen to allow straightforward addition of clients without
|
|
||||||
requiring each existing client to change its configuration.
|
|
||||||
|
|
||||||
Note that each client in a Magic Folder collective has the authority to add,
|
|
||||||
modify or delete any object within the Magic Folder. It is also able to control
|
|
||||||
to some extent whether its writes will be treated by another client as overwrites
|
|
||||||
or as conflicts. However, there is still a reliability benefit to preventing a
|
|
||||||
client from accidentally modifying another client's DMD, or from accidentally
|
|
||||||
modifying the collective directory in a way that would lose data. This motivates
|
|
||||||
ensuring that each client only has access to the caps above, rather than, say,
|
|
||||||
every client having a write cap to the collective directory.
|
|
||||||
|
|
||||||
Another important design constraint is that we cannot violate the :doc:`write
|
|
||||||
coordination directive<../../write_coordination>`; that is, we cannot write to
|
|
||||||
the same mutable directory from multiple clients, even during the setup phase
|
|
||||||
when adding a client.
|
|
||||||
|
|
||||||
Within these constraints, for usability we want to minimize the number of steps
|
|
||||||
required to configure a Magic Folder collective.
|
|
||||||
|
|
||||||
|
|
||||||
Proposed Design
|
|
||||||
---------------
|
|
||||||
|
|
||||||
Three ``tahoe`` subcommands are added::
|
|
||||||
|
|
||||||
tahoe magic-folder create MAGIC: [MY_NICKNAME LOCAL_DIR]
|
|
||||||
|
|
||||||
Create an empty Magic Folder. The MAGIC: local alias is set
|
|
||||||
to a write cap which can be used to refer to this Magic Folder
|
|
||||||
in future ``tahoe magic-folder invite`` commands.
|
|
||||||
|
|
||||||
If MY_NICKNAME and LOCAL_DIR are given, the current client
|
|
||||||
immediately joins the newly created Magic Folder with that
|
|
||||||
nickname and local directory.
|
|
||||||
|
|
||||||
|
|
||||||
tahoe magic-folder invite MAGIC: THEIR_NICKNAME
|
|
||||||
|
|
||||||
Print an "invitation" that can be used to invite another
|
|
||||||
client to join a Magic Folder, with the given nickname.
|
|
||||||
|
|
||||||
The invitation must be sent to the user of the other client
|
|
||||||
over a secure channel (e.g. PGP email, OTR, or ssh).
|
|
||||||
|
|
||||||
This command will normally be run by the same client that
|
|
||||||
created the Magic Folder. However, it may be run by a
|
|
||||||
different client if the ``MAGIC:`` alias is copied to
|
|
||||||
the ``private/aliases`` file of that other client, or if
|
|
||||||
``MAGIC:`` is replaced by the write cap to which it points.
|
|
||||||
|
|
||||||
|
|
||||||
tahoe magic-folder join INVITATION LOCAL_DIR
|
|
||||||
|
|
||||||
Accept an invitation created by ``tahoe magic-folder invite``.
|
|
||||||
The current client joins the specified Magic Folder, which will
|
|
||||||
appear in the local filesystem at the given directory.
|
|
||||||
|
|
||||||
|
|
||||||
There are no commands to remove a client or to revoke an
|
|
||||||
invitation, although those are possible features that could
|
|
||||||
be added in future. (When removing a client, it is necessary
|
|
||||||
to copy each file it added to some other client's DMD, if it
|
|
||||||
is the most recent version of that file.)
|
|
||||||
|
|
||||||
|
|
||||||
Implementation
|
|
||||||
''''''''''''''
|
|
||||||
|
|
||||||
For "``tahoe magic-folder create MAGIC: [MY_NICKNAME LOCAL_DIR]``" :
|
|
||||||
|
|
||||||
1. Run "``tahoe create-alias MAGIC:``".
|
|
||||||
2. If ``MY_NICKNAME`` and ``LOCAL_DIR`` are given, do the equivalent of::
|
|
||||||
|
|
||||||
INVITATION=`tahoe invite-magic-folder MAGIC: MY_NICKNAME`
|
|
||||||
tahoe join-magic-folder INVITATION LOCAL_DIR
|
|
||||||
|
|
||||||
|
|
||||||
For "``tahoe magic-folder invite COLLECTIVE_WRITECAP NICKNAME``" :
|
|
||||||
|
|
||||||
(``COLLECTIVE_WRITECAP`` can, as a special case, be an alias such as ``MAGIC:``.)
|
|
||||||
|
|
||||||
1. Create an empty client DMD. Let its write URI be ``CLIENT_WRITECAP``.
|
|
||||||
2. Diminish ``CLIENT_WRITECAP`` to ``CLIENT_READCAP``, and
|
|
||||||
diminish ``COLLECTIVE_WRITECAP`` to ``COLLECTIVE_READCAP``.
|
|
||||||
3. Run "``tahoe ln CLIENT_READCAP COLLECTIVE_WRITECAP/NICKNAME``".
|
|
||||||
4. Print "``COLLECTIVE_READCAP+CLIENT_WRITECAP``" as the invitation,
|
|
||||||
accompanied by instructions on how to accept the invitation and
|
|
||||||
the need to send it over a secure channel.
|
|
||||||
|
|
||||||
|
|
||||||
For "``tahoe magic-folder join INVITATION LOCAL_DIR``" :
|
|
||||||
|
|
||||||
1. Parse ``INVITATION`` as ``COLLECTIVE_READCAP+CLIENT_WRITECAP``.
|
|
||||||
2. Write ``CLIENT_WRITECAP`` to the file ``magic_folder_dircap``
|
|
||||||
under the client's ``private`` directory.
|
|
||||||
3. Write ``COLLECTIVE_READCAP`` to the file ``collective_dircap``
|
|
||||||
under the client's ``private`` directory.
|
|
||||||
4. Edit the client's ``tahoe.cfg`` to set
|
|
||||||
``[magic_folder] enabled = True`` and
|
|
||||||
``[magic_folder] local.directory = LOCAL_DIR``.
|
|
||||||
|
|
||||||
|
|
||||||
Discussion
|
|
||||||
----------
|
|
||||||
|
|
||||||
The proposed design has a minor violation of the
|
|
||||||
`Principle of Least Authority`_ in order to reduce the number
|
|
||||||
of steps needed. The invoker of "``tahoe magic-folder invite``"
|
|
||||||
creates the client DMD on behalf of the invited client, and
|
|
||||||
could retain its write cap (which is part of the invitation).
|
|
||||||
|
|
||||||
.. _`Principle of Least Authority`: http://www.eros-os.org/papers/secnotsep.pdf
|
|
||||||
|
|
||||||
A possible alternative design would be for the invited client
|
|
||||||
to create its own client DMD, and send it back to the inviter
|
|
||||||
to be linked into the collective directory. However this would
|
|
||||||
require another secure communication and another command
|
|
||||||
invocation per client. Given that, as mentioned earlier, each
|
|
||||||
client in a Magic Folder collective already has the authority
|
|
||||||
to add, modify or delete any object within the Magic Folder,
|
|
||||||
we considered the potential security/reliability improvement
|
|
||||||
here not to be worth the loss of usability.
|
|
||||||
|
|
||||||
We also considered a design where each client had write access to
|
|
||||||
the collective directory. This would arguably be a more serious
|
|
||||||
violation of the Principle of Least Authority than the one above
|
|
||||||
(because all clients would have excess authority rather than just
|
|
||||||
the inviter). In any case, it was not clear how to make such a
|
|
||||||
design satisfy the :doc:`write coordination
|
|
||||||
directive<../../write_coordination>`, because the collective
|
|
||||||
directory would have needed to be written to by multiple clients.
|
|
||||||
|
|
||||||
The reliance on a secure channel to send the invitation to its
|
|
||||||
intended recipient is not ideal, since it may involve additional
|
|
||||||
software such as clients for PGP, OTR, ssh etc. However, we believe
|
|
||||||
that this complexity is necessary rather than incidental, because
|
|
||||||
there must be some way to distinguish the intended recipient from
|
|
||||||
potential attackers who would try to become members of the Magic
|
|
||||||
Folder collective without authorization. By making use of existing
|
|
||||||
channels that have likely already been set up by security-conscious
|
|
||||||
users, we avoid reinventing the wheel or imposing substantial extra
|
|
||||||
implementation costs.
|
|
||||||
|
|
||||||
The length of an invitation will be approximately the combined
|
|
||||||
length of a Tahoe-LAFS read cap and write cap. This is several
|
|
||||||
lines long, but still short enough to be cut-and-pasted successfully
|
|
||||||
if care is taken. Errors in copying the invitation can be detected
|
|
||||||
since Tahoe-LAFS cap URIs are self-authenticating.
|
|
||||||
|
|
||||||
The implementation of the ``tahoe`` subcommands is straightforward
|
|
||||||
and raises no further difficult design issues.
|
|
@ -8,6 +8,10 @@ from os.path import join, exists
|
|||||||
from tempfile import mkdtemp, mktemp
|
from tempfile import mkdtemp, mktemp
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
|
||||||
|
from foolscap.furl import (
|
||||||
|
decode_furl,
|
||||||
|
)
|
||||||
|
|
||||||
from eliot import (
|
from eliot import (
|
||||||
to_file,
|
to_file,
|
||||||
log_call,
|
log_call,
|
||||||
@ -226,6 +230,16 @@ def introducer_furl(introducer, temp_dir):
|
|||||||
print("Don't see {} yet".format(furl_fname))
|
print("Don't see {} yet".format(furl_fname))
|
||||||
sleep(.1)
|
sleep(.1)
|
||||||
furl = open(furl_fname, 'r').read()
|
furl = open(furl_fname, 'r').read()
|
||||||
|
tubID, location_hints, name = decode_furl(furl)
|
||||||
|
if not location_hints:
|
||||||
|
# If there are no location hints then nothing can ever possibly
|
||||||
|
# connect to it and the only thing that can happen next is something
|
||||||
|
# will hang or time out. So just give up right now.
|
||||||
|
raise ValueError(
|
||||||
|
"Introducer ({!r}) fURL has no location hints!".format(
|
||||||
|
introducer_furl,
|
||||||
|
),
|
||||||
|
)
|
||||||
return furl
|
return furl
|
||||||
|
|
||||||
|
|
||||||
@ -332,11 +346,6 @@ def storage_nodes(reactor, temp_dir, introducer, introducer_furl, flog_gatherer,
|
|||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
@log_call(action_type=u"integration:alice", include_args=[], include_result=False)
|
@log_call(action_type=u"integration:alice", include_args=[], include_result=False)
|
||||||
def alice(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request):
|
def alice(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request):
|
||||||
try:
|
|
||||||
mkdir(join(temp_dir, 'magic-alice'))
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
process = pytest_twisted.blockon(
|
process = pytest_twisted.blockon(
|
||||||
_create_node(
|
_create_node(
|
||||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, "alice",
|
reactor, request, temp_dir, introducer_furl, flog_gatherer, "alice",
|
||||||
@ -351,11 +360,6 @@ def alice(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, requ
|
|||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
@log_call(action_type=u"integration:bob", include_args=[], include_result=False)
|
@log_call(action_type=u"integration:bob", include_args=[], include_result=False)
|
||||||
def bob(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request):
|
def bob(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, request):
|
||||||
try:
|
|
||||||
mkdir(join(temp_dir, 'magic-bob'))
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
process = pytest_twisted.blockon(
|
process = pytest_twisted.blockon(
|
||||||
_create_node(
|
_create_node(
|
||||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, "bob",
|
reactor, request, temp_dir, introducer_furl, flog_gatherer, "bob",
|
||||||
@ -368,98 +372,10 @@ def bob(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, reques
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
@log_call(action_type=u"integration:alice:invite", include_args=["temp_dir"])
|
@pytest.mark.skipif(sys.platform.startswith('win'),
|
||||||
def alice_invite(reactor, alice, temp_dir, request):
|
'Tor tests are unstable on Windows')
|
||||||
node_dir = join(temp_dir, 'alice')
|
|
||||||
|
|
||||||
with start_action(action_type=u"integration:alice:magic_folder:create"):
|
|
||||||
# FIXME XXX by the time we see "client running" in the logs, the
|
|
||||||
# storage servers aren't "really" ready to roll yet (uploads fairly
|
|
||||||
# consistently fail if we don't hack in this pause...)
|
|
||||||
proto = _CollectOutputProtocol()
|
|
||||||
_tahoe_runner_optional_coverage(
|
|
||||||
proto,
|
|
||||||
reactor,
|
|
||||||
request,
|
|
||||||
[
|
|
||||||
'magic-folder', 'create',
|
|
||||||
'--poll-interval', '2',
|
|
||||||
'--basedir', node_dir, 'magik:', 'alice',
|
|
||||||
join(temp_dir, 'magic-alice'),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
pytest_twisted.blockon(proto.done)
|
|
||||||
|
|
||||||
with start_action(action_type=u"integration:alice:magic_folder:invite") as a:
|
|
||||||
proto = _CollectOutputProtocol()
|
|
||||||
_tahoe_runner_optional_coverage(
|
|
||||||
proto,
|
|
||||||
reactor,
|
|
||||||
request,
|
|
||||||
[
|
|
||||||
'magic-folder', 'invite',
|
|
||||||
'--basedir', node_dir, 'magik:', 'bob',
|
|
||||||
]
|
|
||||||
)
|
|
||||||
pytest_twisted.blockon(proto.done)
|
|
||||||
invite = proto.output.getvalue()
|
|
||||||
a.add_success_fields(invite=invite)
|
|
||||||
|
|
||||||
with start_action(action_type=u"integration:alice:magic_folder:restart"):
|
|
||||||
# before magic-folder works, we have to stop and restart (this is
|
|
||||||
# crappy for the tests -- can we fix it in magic-folder?)
|
|
||||||
try:
|
|
||||||
alice.transport.signalProcess('TERM')
|
|
||||||
pytest_twisted.blockon(alice.transport.exited)
|
|
||||||
except ProcessExitedAlready:
|
|
||||||
pass
|
|
||||||
with start_action(action_type=u"integration:alice:magic_folder:magic-text"):
|
|
||||||
magic_text = 'Completed initial Magic Folder scan successfully'
|
|
||||||
pytest_twisted.blockon(_run_node(reactor, node_dir, request, magic_text))
|
|
||||||
await_client_ready(alice)
|
|
||||||
return invite
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='session')
|
|
||||||
@log_call(
|
|
||||||
action_type=u"integration:magic_folder",
|
|
||||||
include_args=["alice_invite", "temp_dir"],
|
|
||||||
)
|
|
||||||
def magic_folder(reactor, alice_invite, alice, bob, temp_dir, request):
|
|
||||||
print("pairing magic-folder")
|
|
||||||
bob_dir = join(temp_dir, 'bob')
|
|
||||||
proto = _CollectOutputProtocol()
|
|
||||||
_tahoe_runner_optional_coverage(
|
|
||||||
proto,
|
|
||||||
reactor,
|
|
||||||
request,
|
|
||||||
[
|
|
||||||
'magic-folder', 'join',
|
|
||||||
'--poll-interval', '1',
|
|
||||||
'--basedir', bob_dir,
|
|
||||||
alice_invite,
|
|
||||||
join(temp_dir, 'magic-bob'),
|
|
||||||
]
|
|
||||||
)
|
|
||||||
pytest_twisted.blockon(proto.done)
|
|
||||||
|
|
||||||
# before magic-folder works, we have to stop and restart (this is
|
|
||||||
# crappy for the tests -- can we fix it in magic-folder?)
|
|
||||||
try:
|
|
||||||
print("Sending TERM to Bob")
|
|
||||||
bob.transport.signalProcess('TERM')
|
|
||||||
pytest_twisted.blockon(bob.transport.exited)
|
|
||||||
except ProcessExitedAlready:
|
|
||||||
pass
|
|
||||||
|
|
||||||
magic_text = 'Completed initial Magic Folder scan successfully'
|
|
||||||
pytest_twisted.blockon(_run_node(reactor, bob_dir, request, magic_text))
|
|
||||||
await_client_ready(bob)
|
|
||||||
return (join(temp_dir, 'magic-alice'), join(temp_dir, 'magic-bob'))
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='session')
|
|
||||||
def chutney(reactor, temp_dir):
|
def chutney(reactor, temp_dir):
|
||||||
|
|
||||||
chutney_dir = join(temp_dir, 'chutney')
|
chutney_dir = join(temp_dir, 'chutney')
|
||||||
mkdir(chutney_dir)
|
mkdir(chutney_dir)
|
||||||
|
|
||||||
@ -478,18 +394,39 @@ def chutney(reactor, temp_dir):
|
|||||||
proto,
|
proto,
|
||||||
'git',
|
'git',
|
||||||
(
|
(
|
||||||
'git', 'clone', '--depth=1',
|
'git', 'clone',
|
||||||
'https://git.torproject.org/chutney.git',
|
'https://git.torproject.org/chutney.git',
|
||||||
chutney_dir,
|
chutney_dir,
|
||||||
),
|
),
|
||||||
env=environ,
|
env=environ,
|
||||||
)
|
)
|
||||||
pytest_twisted.blockon(proto.done)
|
pytest_twisted.blockon(proto.done)
|
||||||
|
|
||||||
|
# XXX: Here we reset Chutney to the last revision known to work
|
||||||
|
# with Python 2, as a workaround for Chutney moving to Python 3.
|
||||||
|
# When this is no longer necessary, we will have to drop this and
|
||||||
|
# add '--depth=1' back to the above 'git clone' subprocess.
|
||||||
|
proto = _DumpOutputProtocol(None)
|
||||||
|
reactor.spawnProcess(
|
||||||
|
proto,
|
||||||
|
'git',
|
||||||
|
(
|
||||||
|
'git', '-C', chutney_dir,
|
||||||
|
'reset', '--hard',
|
||||||
|
'99bd06c7554b9113af8c0877b6eca4ceb95dcbaa'
|
||||||
|
),
|
||||||
|
env=environ,
|
||||||
|
)
|
||||||
|
pytest_twisted.blockon(proto.done)
|
||||||
|
|
||||||
return chutney_dir
|
return chutney_dir
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope='session')
|
@pytest.fixture(scope='session')
|
||||||
|
@pytest.mark.skipif(sys.platform.startswith('win'),
|
||||||
|
reason='Tor tests are unstable on Windows')
|
||||||
def tor_network(reactor, temp_dir, chutney, request):
|
def tor_network(reactor, temp_dir, chutney, request):
|
||||||
|
|
||||||
# this is the actual "chutney" script at the root of a chutney checkout
|
# this is the actual "chutney" script at the root of a chutney checkout
|
||||||
chutney_dir = chutney
|
chutney_dir = chutney
|
||||||
chut = join(chutney_dir, 'chutney')
|
chut = join(chutney_dir, 'chutney')
|
||||||
|
@ -16,7 +16,3 @@ def test_create_introducer(introducer):
|
|||||||
|
|
||||||
def test_create_storage(storage_nodes):
|
def test_create_storage(storage_nodes):
|
||||||
print("Created {} storage nodes".format(len(storage_nodes)))
|
print("Created {} storage nodes".format(len(storage_nodes)))
|
||||||
|
|
||||||
|
|
||||||
def test_create_alice_bob_magicfolder(magic_folder):
|
|
||||||
print("Alice and Bob have paired magic-folders")
|
|
||||||
|
@ -1,462 +0,0 @@
|
|||||||
import sys
|
|
||||||
import time
|
|
||||||
import shutil
|
|
||||||
from os import mkdir, unlink, utime
|
|
||||||
from os.path import join, exists, getmtime
|
|
||||||
|
|
||||||
import util
|
|
||||||
|
|
||||||
import pytest_twisted
|
|
||||||
|
|
||||||
|
|
||||||
# tests converted from check_magicfolder_smoke.py
|
|
||||||
# see "conftest.py" for the fixtures (e.g. "magic_folder")
|
|
||||||
|
|
||||||
def test_eliot_logs_are_written(alice, bob, temp_dir):
|
|
||||||
# The integration test configuration arranges for this logging
|
|
||||||
# configuration. Verify it actually does what we want.
|
|
||||||
#
|
|
||||||
# The alice and bob arguments looks unused but they actually tell pytest
|
|
||||||
# to set up all the magic-folder stuff. The assertions here are about
|
|
||||||
# side-effects of that setup.
|
|
||||||
assert exists(join(temp_dir, "alice", "logs", "eliot.json"))
|
|
||||||
assert exists(join(temp_dir, "bob", "logs", "eliot.json"))
|
|
||||||
|
|
||||||
|
|
||||||
def test_alice_writes_bob_receives(magic_folder):
|
|
||||||
alice_dir, bob_dir = magic_folder
|
|
||||||
|
|
||||||
with open(join(alice_dir, "first_file"), "w") as f:
|
|
||||||
f.write("alice wrote this")
|
|
||||||
|
|
||||||
util.await_file_contents(join(bob_dir, "first_file"), "alice wrote this")
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
def test_alice_writes_bob_receives_multiple(magic_folder):
|
|
||||||
"""
|
|
||||||
When Alice does a series of updates, Bob should just receive them
|
|
||||||
with no .backup or .conflict files being produced.
|
|
||||||
"""
|
|
||||||
alice_dir, bob_dir = magic_folder
|
|
||||||
|
|
||||||
unwanted_files = [
|
|
||||||
join(bob_dir, "multiple.backup"),
|
|
||||||
join(bob_dir, "multiple.conflict")
|
|
||||||
]
|
|
||||||
|
|
||||||
# first update
|
|
||||||
with open(join(alice_dir, "multiple"), "w") as f:
|
|
||||||
f.write("alice wrote this")
|
|
||||||
|
|
||||||
util.await_file_contents(
|
|
||||||
join(bob_dir, "multiple"), "alice wrote this",
|
|
||||||
error_if=unwanted_files,
|
|
||||||
)
|
|
||||||
|
|
||||||
# second update
|
|
||||||
with open(join(alice_dir, "multiple"), "w") as f:
|
|
||||||
f.write("someone changed their mind")
|
|
||||||
|
|
||||||
util.await_file_contents(
|
|
||||||
join(bob_dir, "multiple"), "someone changed their mind",
|
|
||||||
error_if=unwanted_files,
|
|
||||||
)
|
|
||||||
|
|
||||||
# third update
|
|
||||||
with open(join(alice_dir, "multiple"), "w") as f:
|
|
||||||
f.write("absolutely final version ship it")
|
|
||||||
|
|
||||||
util.await_file_contents(
|
|
||||||
join(bob_dir, "multiple"), "absolutely final version ship it",
|
|
||||||
error_if=unwanted_files,
|
|
||||||
)
|
|
||||||
|
|
||||||
# forth update, but both "at once" so one should conflict
|
|
||||||
time.sleep(2)
|
|
||||||
with open(join(alice_dir, "multiple"), "w") as f:
|
|
||||||
f.write("okay one more attempt")
|
|
||||||
with open(join(bob_dir, "multiple"), "w") as f:
|
|
||||||
f.write("...but just let me add")
|
|
||||||
|
|
||||||
bob_conflict = join(bob_dir, "multiple.conflict")
|
|
||||||
alice_conflict = join(alice_dir, "multiple.conflict")
|
|
||||||
|
|
||||||
found = util.await_files_exist([
|
|
||||||
bob_conflict,
|
|
||||||
alice_conflict,
|
|
||||||
])
|
|
||||||
|
|
||||||
assert len(found) > 0, "Should have found a conflict"
|
|
||||||
print("conflict found (as expected)")
|
|
||||||
|
|
||||||
|
|
||||||
def test_alice_writes_bob_receives_old_timestamp(magic_folder):
|
|
||||||
alice_dir, bob_dir = magic_folder
|
|
||||||
fname = join(alice_dir, "ts_file")
|
|
||||||
ts = time.time() - (60 * 60 * 36) # 36 hours ago
|
|
||||||
|
|
||||||
with open(fname, "w") as f:
|
|
||||||
f.write("alice wrote this")
|
|
||||||
utime(fname, (time.time(), ts))
|
|
||||||
|
|
||||||
fname = join(bob_dir, "ts_file")
|
|
||||||
util.await_file_contents(fname, "alice wrote this")
|
|
||||||
# make sure the timestamp is correct
|
|
||||||
assert int(getmtime(fname)) == int(ts)
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
def test_bob_writes_alice_receives(magic_folder):
|
|
||||||
alice_dir, bob_dir = magic_folder
|
|
||||||
|
|
||||||
with open(join(bob_dir, "second_file"), "w") as f:
|
|
||||||
f.write("bob wrote this")
|
|
||||||
|
|
||||||
util.await_file_contents(join(alice_dir, "second_file"), "bob wrote this")
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
def test_alice_deletes(magic_folder):
|
|
||||||
# alice writes a file, waits for bob to get it and then deletes it.
|
|
||||||
alice_dir, bob_dir = magic_folder
|
|
||||||
|
|
||||||
with open(join(alice_dir, "delfile"), "w") as f:
|
|
||||||
f.write("alice wrote this")
|
|
||||||
|
|
||||||
util.await_file_contents(join(bob_dir, "delfile"), "alice wrote this")
|
|
||||||
|
|
||||||
# bob has the file; now alices deletes it
|
|
||||||
unlink(join(alice_dir, "delfile"))
|
|
||||||
|
|
||||||
# bob should remove his copy, but preserve a backup
|
|
||||||
util.await_file_vanishes(join(bob_dir, "delfile"))
|
|
||||||
util.await_file_contents(join(bob_dir, "delfile.backup"), "alice wrote this")
|
|
||||||
return
|
|
||||||
|
|
||||||
|
|
||||||
def test_alice_creates_bob_edits(magic_folder):
|
|
||||||
alice_dir, bob_dir = magic_folder
|
|
||||||
|
|
||||||
# alice writes a file
|
|
||||||
with open(join(alice_dir, "editfile"), "w") as f:
|
|
||||||
f.write("alice wrote this")
|
|
||||||
|
|
||||||
util.await_file_contents(join(bob_dir, "editfile"), "alice wrote this")
|
|
||||||
|
|
||||||
# now bob edits it
|
|
||||||
with open(join(bob_dir, "editfile"), "w") as f:
|
|
||||||
f.write("bob says foo")
|
|
||||||
|
|
||||||
util.await_file_contents(join(alice_dir, "editfile"), "bob says foo")
|
|
||||||
|
|
||||||
|
|
||||||
def test_bob_creates_sub_directory(magic_folder):
|
|
||||||
alice_dir, bob_dir = magic_folder
|
|
||||||
|
|
||||||
# bob makes a sub-dir, with a file in it
|
|
||||||
mkdir(join(bob_dir, "subdir"))
|
|
||||||
with open(join(bob_dir, "subdir", "a_file"), "w") as f:
|
|
||||||
f.write("bob wuz here")
|
|
||||||
|
|
||||||
# alice gets it
|
|
||||||
util.await_file_contents(join(alice_dir, "subdir", "a_file"), "bob wuz here")
|
|
||||||
|
|
||||||
# now bob deletes it again
|
|
||||||
shutil.rmtree(join(bob_dir, "subdir"))
|
|
||||||
|
|
||||||
# alice should delete it as well
|
|
||||||
util.await_file_vanishes(join(alice_dir, "subdir", "a_file"))
|
|
||||||
# i *think* it's by design that the subdir won't disappear,
|
|
||||||
# because a "a_file.backup" should appear...
|
|
||||||
util.await_file_contents(join(alice_dir, "subdir", "a_file.backup"), "bob wuz here")
|
|
||||||
|
|
||||||
|
|
||||||
def test_bob_creates_alice_deletes_bob_restores(magic_folder):
|
|
||||||
alice_dir, bob_dir = magic_folder
|
|
||||||
|
|
||||||
# bob creates a file
|
|
||||||
with open(join(bob_dir, "boom"), "w") as f:
|
|
||||||
f.write("bob wrote this")
|
|
||||||
|
|
||||||
util.await_file_contents(
|
|
||||||
join(alice_dir, "boom"),
|
|
||||||
"bob wrote this"
|
|
||||||
)
|
|
||||||
|
|
||||||
# alice deletes it (so bob should as well .. but keep a backup)
|
|
||||||
unlink(join(alice_dir, "boom"))
|
|
||||||
util.await_file_vanishes(join(bob_dir, "boom"))
|
|
||||||
assert exists(join(bob_dir, "boom.backup"))
|
|
||||||
|
|
||||||
# bob restore it, with new contents
|
|
||||||
unlink(join(bob_dir, "boom.backup"))
|
|
||||||
with open(join(bob_dir, "boom"), "w") as f:
|
|
||||||
f.write("bob wrote this again, because reasons")
|
|
||||||
|
|
||||||
# XXX double-check this behavior is correct!
|
|
||||||
|
|
||||||
# alice sees bob's update, but marks it as a conflict (because
|
|
||||||
# .. she previously deleted it? does that really make sense)
|
|
||||||
|
|
||||||
util.await_file_contents(
|
|
||||||
join(alice_dir, "boom"),
|
|
||||||
"bob wrote this again, because reasons",
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_bob_creates_alice_deletes_alice_restores(magic_folder):
|
|
||||||
alice_dir, bob_dir = magic_folder
|
|
||||||
|
|
||||||
# bob creates a file
|
|
||||||
with open(join(bob_dir, "boom2"), "w") as f:
|
|
||||||
f.write("bob wrote this")
|
|
||||||
|
|
||||||
util.await_file_contents(
|
|
||||||
join(alice_dir, "boom2"),
|
|
||||||
"bob wrote this"
|
|
||||||
)
|
|
||||||
|
|
||||||
# alice deletes it (so bob should as well)
|
|
||||||
unlink(join(alice_dir, "boom2"))
|
|
||||||
util.await_file_vanishes(join(bob_dir, "boom2"))
|
|
||||||
|
|
||||||
# alice restore it, with new contents
|
|
||||||
with open(join(alice_dir, "boom2"), "w") as f:
|
|
||||||
f.write("alice re-wrote this again, because reasons")
|
|
||||||
|
|
||||||
util.await_file_contents(
|
|
||||||
join(bob_dir, "boom2"),
|
|
||||||
"alice re-wrote this again, because reasons"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def test_bob_conflicts_with_alice_fresh(magic_folder):
|
|
||||||
# both alice and bob make a file at "the same time".
|
|
||||||
alice_dir, bob_dir = magic_folder
|
|
||||||
|
|
||||||
# either alice or bob will "win" by uploading to the DMD first.
|
|
||||||
with open(join(bob_dir, 'alpha'), 'w') as f0, open(join(alice_dir, 'alpha'), 'w') as f1:
|
|
||||||
f0.write("this is bob's alpha\n")
|
|
||||||
f1.write("this is alice's alpha\n")
|
|
||||||
|
|
||||||
# there should be conflicts
|
|
||||||
_bob_conflicts_alice_await_conflicts('alpha', alice_dir, bob_dir)
|
|
||||||
|
|
||||||
|
|
||||||
def test_bob_conflicts_with_alice_preexisting(magic_folder):
|
|
||||||
# both alice and bob edit a file at "the same time" (similar to
|
|
||||||
# above, but the file already exists before the edits)
|
|
||||||
alice_dir, bob_dir = magic_folder
|
|
||||||
|
|
||||||
# have bob create the file
|
|
||||||
with open(join(bob_dir, 'beta'), 'w') as f:
|
|
||||||
f.write("original beta (from bob)\n")
|
|
||||||
util.await_file_contents(join(alice_dir, 'beta'), "original beta (from bob)\n")
|
|
||||||
|
|
||||||
# both alice and bob now have a "beta" file, at version 0
|
|
||||||
|
|
||||||
# either alice or bob will "win" by uploading to the DMD first
|
|
||||||
# (however, they should both detect a conflict)
|
|
||||||
with open(join(bob_dir, 'beta'), 'w') as f:
|
|
||||||
f.write("this is bob's beta\n")
|
|
||||||
with open(join(alice_dir, 'beta'), 'w') as f:
|
|
||||||
f.write("this is alice's beta\n")
|
|
||||||
|
|
||||||
# both alice and bob should see a conflict
|
|
||||||
_bob_conflicts_alice_await_conflicts("beta", alice_dir, bob_dir)
|
|
||||||
|
|
||||||
|
|
||||||
def _bob_conflicts_alice_await_conflicts(name, alice_dir, bob_dir):
|
|
||||||
"""
|
|
||||||
shared code between _fresh and _preexisting conflict test
|
|
||||||
"""
|
|
||||||
found = util.await_files_exist(
|
|
||||||
[
|
|
||||||
join(bob_dir, '{}.conflict'.format(name)),
|
|
||||||
join(alice_dir, '{}.conflict'.format(name)),
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
assert len(found) >= 1, "should be at least one conflict"
|
|
||||||
assert open(join(bob_dir, name), 'r').read() == "this is bob's {}\n".format(name)
|
|
||||||
assert open(join(alice_dir, name), 'r').read() == "this is alice's {}\n".format(name)
|
|
||||||
|
|
||||||
alice_conflict = join(alice_dir, '{}.conflict'.format(name))
|
|
||||||
bob_conflict = join(bob_dir, '{}.conflict'.format(name))
|
|
||||||
if exists(bob_conflict):
|
|
||||||
assert open(bob_conflict, 'r').read() == "this is alice's {}\n".format(name)
|
|
||||||
if exists(alice_conflict):
|
|
||||||
assert open(alice_conflict, 'r').read() == "this is bob's {}\n".format(name)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest_twisted.inlineCallbacks
|
|
||||||
def test_edmond_uploads_then_restarts(reactor, request, temp_dir, introducer_furl, flog_gatherer, storage_nodes):
|
|
||||||
"""
|
|
||||||
ticket 2880: if a magic-folder client uploads something, then
|
|
||||||
re-starts a spurious .backup file should not appear
|
|
||||||
"""
|
|
||||||
|
|
||||||
edmond_dir = join(temp_dir, 'edmond')
|
|
||||||
edmond = yield util._create_node(
|
|
||||||
reactor, request, temp_dir, introducer_furl, flog_gatherer,
|
|
||||||
"edmond", web_port="tcp:9985:interface=localhost",
|
|
||||||
storage=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
magic_folder = join(temp_dir, 'magic-edmond')
|
|
||||||
mkdir(magic_folder)
|
|
||||||
created = False
|
|
||||||
# create a magic-folder
|
|
||||||
# (how can we know that the grid is ready?)
|
|
||||||
for _ in range(10): # try 10 times
|
|
||||||
try:
|
|
||||||
proto = util._CollectOutputProtocol()
|
|
||||||
transport = reactor.spawnProcess(
|
|
||||||
proto,
|
|
||||||
sys.executable,
|
|
||||||
[
|
|
||||||
sys.executable, '-m', 'allmydata.scripts.runner',
|
|
||||||
'magic-folder', 'create',
|
|
||||||
'--poll-interval', '2',
|
|
||||||
'--basedir', edmond_dir,
|
|
||||||
'magik:',
|
|
||||||
'edmond_magic',
|
|
||||||
magic_folder,
|
|
||||||
]
|
|
||||||
)
|
|
||||||
yield proto.done
|
|
||||||
created = True
|
|
||||||
break
|
|
||||||
except Exception as e:
|
|
||||||
print("failed to create magic-folder: {}".format(e))
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
assert created, "Didn't create a magic-folder"
|
|
||||||
|
|
||||||
# to actually-start the magic-folder we have to re-start
|
|
||||||
edmond.transport.signalProcess('TERM')
|
|
||||||
yield edmond.transport.exited
|
|
||||||
edmond = yield util._run_node(reactor, edmond.node_dir, request, 'Completed initial Magic Folder scan successfully')
|
|
||||||
util.await_client_ready(edmond)
|
|
||||||
|
|
||||||
# add a thing to the magic-folder
|
|
||||||
with open(join(magic_folder, "its_a_file"), "w") as f:
|
|
||||||
f.write("edmond wrote this")
|
|
||||||
|
|
||||||
# fixme, do status-update attempts in a loop below
|
|
||||||
time.sleep(5)
|
|
||||||
|
|
||||||
# let it upload; poll the HTTP magic-folder status API until it is
|
|
||||||
# uploaded
|
|
||||||
from allmydata.scripts.magic_folder_cli import _get_json_for_fragment
|
|
||||||
|
|
||||||
with open(join(edmond_dir, u'private', u'api_auth_token'), 'rb') as f:
|
|
||||||
token = f.read()
|
|
||||||
|
|
||||||
uploaded = False
|
|
||||||
for _ in range(10):
|
|
||||||
options = {
|
|
||||||
"node-url": open(join(edmond_dir, u'node.url'), 'r').read().strip(),
|
|
||||||
}
|
|
||||||
try:
|
|
||||||
magic_data = _get_json_for_fragment(
|
|
||||||
options,
|
|
||||||
'magic_folder?t=json',
|
|
||||||
method='POST',
|
|
||||||
post_args=dict(
|
|
||||||
t='json',
|
|
||||||
name='default',
|
|
||||||
token=token,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
for mf in magic_data:
|
|
||||||
if mf['status'] == u'success' and mf['path'] == u'its_a_file':
|
|
||||||
uploaded = True
|
|
||||||
break
|
|
||||||
except Exception as e:
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
assert uploaded, "expected to upload 'its_a_file'"
|
|
||||||
|
|
||||||
# re-starting edmond right now would "normally" trigger the 2880 bug
|
|
||||||
|
|
||||||
# kill edmond
|
|
||||||
edmond.transport.signalProcess('TERM')
|
|
||||||
yield edmond.transport.exited
|
|
||||||
time.sleep(1)
|
|
||||||
edmond = yield util._run_node(reactor, edmond.node_dir, request, 'Completed initial Magic Folder scan successfully')
|
|
||||||
util.await_client_ready(edmond)
|
|
||||||
|
|
||||||
# XXX how can we say for sure if we've waited long enough? look at
|
|
||||||
# tail of logs for magic-folder ... somethingsomething?
|
|
||||||
print("waiting 20 seconds to see if a .backup appears")
|
|
||||||
for _ in range(20):
|
|
||||||
assert exists(join(magic_folder, "its_a_file"))
|
|
||||||
assert not exists(join(magic_folder, "its_a_file.backup"))
|
|
||||||
time.sleep(1)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest_twisted.inlineCallbacks
|
|
||||||
def test_alice_adds_files_while_bob_is_offline(reactor, request, temp_dir, magic_folder):
|
|
||||||
"""
|
|
||||||
Alice can add new files to a magic folder while Bob is offline. When Bob
|
|
||||||
comes back online his copy is updated to reflect the new files.
|
|
||||||
"""
|
|
||||||
alice_magic_dir, bob_magic_dir = magic_folder
|
|
||||||
alice_node_dir = join(temp_dir, "alice")
|
|
||||||
bob_node_dir = join(temp_dir, "bob")
|
|
||||||
|
|
||||||
# Take Bob offline.
|
|
||||||
yield util.cli(request, reactor, bob_node_dir, "stop")
|
|
||||||
|
|
||||||
# Create a couple files in Alice's local directory.
|
|
||||||
some_files = list(
|
|
||||||
(name * 3) + ".added-while-offline"
|
|
||||||
for name
|
|
||||||
in "xyz"
|
|
||||||
)
|
|
||||||
for name in some_files:
|
|
||||||
with open(join(alice_magic_dir, name), "w") as f:
|
|
||||||
f.write(name + " some content")
|
|
||||||
|
|
||||||
good = False
|
|
||||||
for i in range(15):
|
|
||||||
status = yield util.magic_folder_cli(request, reactor, alice_node_dir, "status")
|
|
||||||
good = status.count(".added-while-offline (36 B): good, version=0") == len(some_files) * 2
|
|
||||||
if good:
|
|
||||||
# We saw each file as having a local good state and a remote good
|
|
||||||
# state. That means we're ready to involve Bob.
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
time.sleep(1.0)
|
|
||||||
|
|
||||||
assert good, (
|
|
||||||
"Timed out waiting for good Alice state. Last status:\n{}".format(status)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Start Bob up again
|
|
||||||
magic_text = 'Completed initial Magic Folder scan successfully'
|
|
||||||
yield util._run_node(reactor, bob_node_dir, request, magic_text)
|
|
||||||
|
|
||||||
yield util.await_files_exist(
|
|
||||||
list(
|
|
||||||
join(bob_magic_dir, name)
|
|
||||||
for name
|
|
||||||
in some_files
|
|
||||||
),
|
|
||||||
await_all=True,
|
|
||||||
)
|
|
||||||
# Let it settle. It would be nicer to have a readable status output we
|
|
||||||
# could query. Parsing the current text format is more than I want to
|
|
||||||
# deal with right now.
|
|
||||||
time.sleep(1.0)
|
|
||||||
conflict_files = list(name + ".conflict" for name in some_files)
|
|
||||||
assert all(
|
|
||||||
list(
|
|
||||||
not exists(join(bob_magic_dir, name))
|
|
||||||
for name
|
|
||||||
in conflict_files
|
|
||||||
),
|
|
||||||
)
|
|
@ -53,7 +53,12 @@ class _StreamingLogClientProtocol(WebSocketClientProtocol):
|
|||||||
self.factory.on_open.callback(self)
|
self.factory.on_open.callback(self)
|
||||||
|
|
||||||
def onMessage(self, payload, isBinary):
|
def onMessage(self, payload, isBinary):
|
||||||
self.on_message.callback(payload)
|
if self.on_message is None:
|
||||||
|
# Already did our job, ignore it
|
||||||
|
return
|
||||||
|
on_message = self.on_message
|
||||||
|
self.on_message = None
|
||||||
|
on_message.callback(payload)
|
||||||
|
|
||||||
def onClose(self, wasClean, code, reason):
|
def onClose(self, wasClean, code, reason):
|
||||||
self.on_close.callback(reason)
|
self.on_close.callback(reason)
|
||||||
@ -131,10 +136,13 @@ def _test_streaming_logs(reactor, temp_dir, alice):
|
|||||||
client.on_close = Deferred()
|
client.on_close = Deferred()
|
||||||
client.on_message = Deferred()
|
client.on_message = Deferred()
|
||||||
|
|
||||||
|
# Capture this now before on_message perhaps goes away.
|
||||||
|
racing = _race(client.on_close, client.on_message)
|
||||||
|
|
||||||
# Provoke _some_ log event.
|
# Provoke _some_ log event.
|
||||||
yield treq.get(node_url)
|
yield treq.get(node_url)
|
||||||
|
|
||||||
result = yield _race(client.on_close, client.on_message)
|
result = yield racing
|
||||||
|
|
||||||
assert isinstance(result, Right)
|
assert isinstance(result, Right)
|
||||||
json.loads(result.value)
|
json.loads(result.value)
|
||||||
|
@ -10,11 +10,20 @@ from six.moves import StringIO
|
|||||||
from twisted.internet.protocol import ProcessProtocol
|
from twisted.internet.protocol import ProcessProtocol
|
||||||
from twisted.internet.error import ProcessExitedAlready, ProcessDone
|
from twisted.internet.error import ProcessExitedAlready, ProcessDone
|
||||||
from twisted.internet.defer import inlineCallbacks, Deferred
|
from twisted.internet.defer import inlineCallbacks, Deferred
|
||||||
|
|
||||||
|
import pytest
|
||||||
import pytest_twisted
|
import pytest_twisted
|
||||||
|
|
||||||
import util
|
import util
|
||||||
|
|
||||||
# see "conftest.py" for the fixtures (e.g. "magic_folder")
|
# see "conftest.py" for the fixtures (e.g. "tor_network")
|
||||||
|
|
||||||
|
# XXX: Integration tests that involve Tor do not run reliably on
|
||||||
|
# Windows. They are skipped for now, in order to reduce CI noise.
|
||||||
|
#
|
||||||
|
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3347
|
||||||
|
if sys.platform.startswith('win'):
|
||||||
|
pytest.skip('Skipping Tor tests on Windows', allow_module_level=True)
|
||||||
|
|
||||||
@pytest_twisted.inlineCallbacks
|
@pytest_twisted.inlineCallbacks
|
||||||
def test_onion_service_storage(reactor, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl):
|
def test_onion_service_storage(reactor, request, temp_dir, flog_gatherer, tor_network, tor_introducer_furl):
|
||||||
|
@ -219,23 +219,21 @@ def test_status(alice):
|
|||||||
found_upload = False
|
found_upload = False
|
||||||
found_download = False
|
found_download = False
|
||||||
for href in hrefs:
|
for href in hrefs:
|
||||||
if href.startswith(u"/") or not href:
|
if href == u"/" or not href:
|
||||||
continue
|
continue
|
||||||
resp = requests.get(
|
resp = requests.get(util.node_url(alice.node_dir, href))
|
||||||
util.node_url(alice.node_dir, u"status/{}".format(href)),
|
if href.startswith(u"/status/up"):
|
||||||
)
|
|
||||||
if href.startswith(u'up'):
|
|
||||||
assert "File Upload Status" in resp.content
|
assert "File Upload Status" in resp.content
|
||||||
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
|
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
|
||||||
found_upload = True
|
found_upload = True
|
||||||
elif href.startswith(u'down'):
|
elif href.startswith(u"/status/down"):
|
||||||
assert "File Download Status" in resp.content
|
assert "File Download Status" in resp.content
|
||||||
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
|
if "Total Size: {}".format(len(FILE_CONTENTS)) in resp.content:
|
||||||
found_download = True
|
found_download = True
|
||||||
|
|
||||||
# download the specialized event information
|
# download the specialized event information
|
||||||
resp = requests.get(
|
resp = requests.get(
|
||||||
util.node_url(alice.node_dir, u"status/{}/event_json".format(href)),
|
util.node_url(alice.node_dir, u"{}/event_json".format(href)),
|
||||||
)
|
)
|
||||||
js = json.loads(resp.content)
|
js = json.loads(resp.content)
|
||||||
# there's usually just one "read" operation, but this can handle many ..
|
# there's usually just one "read" operation, but this can handle many ..
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import json
|
import json
|
||||||
from os import mkdir
|
from os import mkdir, environ
|
||||||
from os.path import exists, join
|
from os.path import exists, join
|
||||||
from six.moves import StringIO
|
from six.moves import StringIO
|
||||||
from functools import partial
|
from functools import partial
|
||||||
@ -145,6 +145,7 @@ def _tahoe_runner_optional_coverage(proto, reactor, request, other_args):
|
|||||||
proto,
|
proto,
|
||||||
sys.executable,
|
sys.executable,
|
||||||
args,
|
args,
|
||||||
|
env=environ,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -498,7 +499,3 @@ def await_client_ready(tahoe, timeout=10, liveness=60*2):
|
|||||||
tahoe,
|
tahoe,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def magic_folder_cli(request, reactor, node_dir, *argv):
|
|
||||||
return cli(request, reactor, node_dir, "magic-folder", *argv)
|
|
||||||
|
@ -156,6 +156,6 @@ for pkg in sorted(platform_independent_pkgs):
|
|||||||
print('</table>')
|
print('</table>')
|
||||||
|
|
||||||
# The document does validate, but not when it is included at the bottom of a directory listing.
|
# The document does validate, but not when it is included at the bottom of a directory listing.
|
||||||
#print '<hr>'
|
#print('<hr>')
|
||||||
#print '<a href="http://validator.w3.org/check?uri=referer" target="_blank"><img border="0" src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01 Transitional" height="31" width="88"></a>'
|
#print('<a href="http://validator.w3.org/check?uri=referer" target="_blank"><img border="0" src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01 Transitional" height="31" width="88"></a>')
|
||||||
print('</body></html>')
|
print('</body></html>')
|
||||||
|
@ -143,7 +143,6 @@ print_py_pkg_ver('coverage')
|
|||||||
print_py_pkg_ver('cryptography')
|
print_py_pkg_ver('cryptography')
|
||||||
print_py_pkg_ver('foolscap')
|
print_py_pkg_ver('foolscap')
|
||||||
print_py_pkg_ver('mock')
|
print_py_pkg_ver('mock')
|
||||||
print_py_pkg_ver('Nevow', 'nevow')
|
|
||||||
print_py_pkg_ver('pyasn1')
|
print_py_pkg_ver('pyasn1')
|
||||||
print_py_pkg_ver('pycparser')
|
print_py_pkg_ver('pycparser')
|
||||||
print_py_pkg_ver('cryptography')
|
print_py_pkg_ver('cryptography')
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
#! /usr/bin/python
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import math
|
import math
|
||||||
from allmydata.util import statistics
|
from allmydata.util import statistics
|
||||||
from numpy import array, matrix, dot
|
from numpy import array, matrix, dot
|
||||||
@ -72,11 +74,11 @@ class ReliabilityModel(object):
|
|||||||
|
|
||||||
repair = self.build_repair_matrix(k, N, R)
|
repair = self.build_repair_matrix(k, N, R)
|
||||||
|
|
||||||
#print "DECAY:", decay
|
#print("DECAY:", decay)
|
||||||
#print "OLD-POST-REPAIR:", old_post_repair
|
#print("OLD-POST-REPAIR:", old_post_repair)
|
||||||
#print "NEW-POST-REPAIR:", decay * repair
|
#print("NEW-POST-REPAIR:", decay * repair)
|
||||||
#print "REPAIR:", repair
|
#print("REPAIR:", repair)
|
||||||
#print "DIFF:", (old_post_repair - decay * repair)
|
#print("DIFF:", (old_post_repair - decay * repair))
|
||||||
|
|
||||||
START = array([0]*N + [1])
|
START = array([0]*N + [1])
|
||||||
DEAD = array([1]*k + [0]*(1+N-k))
|
DEAD = array([1]*k + [0]*(1+N-k))
|
||||||
@ -85,9 +87,9 @@ class ReliabilityModel(object):
|
|||||||
[N-i for i in range(k, R)] +
|
[N-i for i in range(k, R)] +
|
||||||
[0]*(1+N-R))
|
[0]*(1+N-R))
|
||||||
assert REPAIR_newshares.shape[0] == N+1
|
assert REPAIR_newshares.shape[0] == N+1
|
||||||
#print "START", START
|
#print("START", START)
|
||||||
#print "REPAIRp", REPAIRp
|
#print("REPAIRp", REPAIRp)
|
||||||
#print "REPAIR_newshares", REPAIR_newshares
|
#print("REPAIR_newshares", REPAIR_newshares)
|
||||||
|
|
||||||
unmaintained_state = START
|
unmaintained_state = START
|
||||||
maintained_state = START
|
maintained_state = START
|
||||||
@ -141,15 +143,15 @@ class ReliabilityModel(object):
|
|||||||
# return "%dy.%dm" % (int(seconds/YEAR), int( (seconds%YEAR)/MONTH))
|
# return "%dy.%dm" % (int(seconds/YEAR), int( (seconds%YEAR)/MONTH))
|
||||||
#needed_repairs_total = sum(needed_repairs)
|
#needed_repairs_total = sum(needed_repairs)
|
||||||
#needed_new_shares_total = sum(needed_new_shares)
|
#needed_new_shares_total = sum(needed_new_shares)
|
||||||
#print "at 2y:"
|
#print("at 2y:")
|
||||||
#print " unmaintained", unmaintained_state
|
#print(" unmaintained", unmaintained_state)
|
||||||
#print " maintained", maintained_state
|
#print(" maintained", maintained_state)
|
||||||
#print " number of repairs", needed_repairs_total
|
#print(" number of repairs", needed_repairs_total)
|
||||||
#print " new shares generated", needed_new_shares_total
|
#print(" new shares generated", needed_new_shares_total)
|
||||||
#repair_rate_inv = report_span / needed_repairs_total
|
#repair_rate_inv = report_span / needed_repairs_total
|
||||||
#print " avg repair rate: once every %s" % yandm(repair_rate_inv)
|
#print(" avg repair rate: once every %s" % yandm(repair_rate_inv))
|
||||||
#print " avg repair download: one share every %s" % yandm(repair_rate_inv/k)
|
#print(" avg repair download: one share every %s" % yandm(repair_rate_inv/k))
|
||||||
#print " avg repair upload: one share every %s" % yandm(report_span / needed_new_shares_total)
|
#print(" avg repair upload: one share every %s" % yandm(report_span / needed_new_shares_total))
|
||||||
|
|
||||||
return report
|
return report
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
import unittest
|
import unittest
|
||||||
from allmydata import provisioning
|
from allmydata import provisioning
|
||||||
@ -99,7 +100,7 @@ class Reliability(unittest.TestCase):
|
|||||||
self.failUnlessEqual(len(r.samples), 20)
|
self.failUnlessEqual(len(r.samples), 20)
|
||||||
|
|
||||||
last_row = r.samples[-1]
|
last_row = r.samples[-1]
|
||||||
#print last_row
|
#print(last_row)
|
||||||
(when, unmaintained_shareprobs, maintained_shareprobs,
|
(when, unmaintained_shareprobs, maintained_shareprobs,
|
||||||
P_repaired_last_check_period,
|
P_repaired_last_check_period,
|
||||||
cumulative_number_of_repairs,
|
cumulative_number_of_repairs,
|
||||||
|
53
misc/python3/Makefile
Normal file
53
misc/python3/Makefile
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
# Python 3 porting targets
|
||||||
|
#
|
||||||
|
# NOTE: this Makefile requires GNU make
|
||||||
|
|
||||||
|
### Defensive settings for make:
|
||||||
|
# https://tech.davis-hansson.com/p/make/
|
||||||
|
SHELL := bash
|
||||||
|
.ONESHELL:
|
||||||
|
.SHELLFLAGS := -xeu -o pipefail -c
|
||||||
|
.SILENT:
|
||||||
|
.DELETE_ON_ERROR:
|
||||||
|
MAKEFLAGS += --warn-undefined-variables
|
||||||
|
MAKEFLAGS += --no-builtin-rules
|
||||||
|
|
||||||
|
|
||||||
|
# Top-level, phony targets
|
||||||
|
|
||||||
|
.PHONY: default
|
||||||
|
default:
|
||||||
|
@echo "no default target"
|
||||||
|
|
||||||
|
.PHONY: test-py3-all-before
|
||||||
|
## Log the output of running all tests under Python 3 before changes
|
||||||
|
test-py3-all-before: ../../.tox/make-test-py3-all-old.log
|
||||||
|
.PHONY: test-py3-all-diff
|
||||||
|
## Compare the output of running all tests under Python 3 after changes
|
||||||
|
test-py3-all-diff: ../../.tox/make-test-py3-all.diff
|
||||||
|
|
||||||
|
|
||||||
|
# Real targets
|
||||||
|
|
||||||
|
# Gauge the impact of changes on Python 3 compatibility
|
||||||
|
# Compare the output from running all tests under Python 3 before and after changes.
|
||||||
|
# Before changes:
|
||||||
|
# `$ rm -f .tox/make-test-py3-all-*.log && make .tox/make-test-py3-all-old.log`
|
||||||
|
# After changes:
|
||||||
|
# `$ make .tox/make-test-py3-all.diff`
|
||||||
|
$(foreach side,old new,../../.tox/make-test-py3-all-$(side).log):
|
||||||
|
cd "../../"
|
||||||
|
tox --develop --notest -e py36-coverage
|
||||||
|
(make VIRTUAL_ENV=./.tox/py36-coverage TEST_SUITE=allmydata \
|
||||||
|
test-venv-coverage || true) | \
|
||||||
|
sed -E 's/\([0-9]+\.[0-9]{3} secs\)/(#.### secs)/' | \
|
||||||
|
tee "./misc/python3/$(@)"
|
||||||
|
../../.tox/make-test-py3-all.diff: ../../.tox/make-test-py3-all-new.log
|
||||||
|
(diff -u "$(<:%-new.log=%-old.log)" "$(<)" || true) | tee "$(@)"
|
||||||
|
|
||||||
|
# Locate modules that are candidates for naively converting `unicode` -> `str`.
|
||||||
|
# List all Python source files that reference `unicode` but don't reference `str`
|
||||||
|
../../.tox/py3-unicode-no-str.ls:
|
||||||
|
cd "../../"
|
||||||
|
find src -type f -iname '*.py' -exec grep -l -E '\Wunicode\W' '{}' ';' | \
|
||||||
|
xargs grep -L '\Wstr\W' | xargs ls -ld | tee "./misc/python3/$(@)"
|
43
misc/python3/audit-dict-for-loops.py
Normal file
43
misc/python3/audit-dict-for-loops.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
"""
|
||||||
|
The following code is valid in Python 2:
|
||||||
|
|
||||||
|
for x in my_dict.keys():
|
||||||
|
if something(x):
|
||||||
|
del my_dict[x]
|
||||||
|
|
||||||
|
But broken in Python 3.
|
||||||
|
|
||||||
|
One solution is:
|
||||||
|
|
||||||
|
for x in list(my_dict.keys()):
|
||||||
|
if something(x):
|
||||||
|
del my_dict[x]
|
||||||
|
|
||||||
|
Some but not all code in Tahoe has been changed to that. In other cases, the code was left unchanged since there was no `del`.
|
||||||
|
|
||||||
|
However, some mistakes may have slept through.
|
||||||
|
|
||||||
|
To help catch cases that were incorrectly ported, this script runs futurize on all ported modules, which should convert it into the `list()` form.
|
||||||
|
You can then look at git diffs to see if any of the impacted would be buggy without the newly added `list()`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
from subprocess import check_call
|
||||||
|
|
||||||
|
from allmydata.util import _python3
|
||||||
|
|
||||||
|
|
||||||
|
def fix_potential_issue():
|
||||||
|
for module in _python3.PORTED_MODULES + _python3.PORTED_TEST_MODULES:
|
||||||
|
filename = "src/" + module.replace(".", "/") + ".py"
|
||||||
|
if not os.path.exists(filename):
|
||||||
|
# Package, probably
|
||||||
|
filename = "src/" + module.replace(".", "/") + "/__init__.py"
|
||||||
|
check_call(["futurize", "-f", "lib2to3.fixes.fix_dict", "-w", filename])
|
||||||
|
print(
|
||||||
|
"All loops converted. Check diff to see if there are any that need to be commitedd."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
fix_potential_issue()
|
@ -60,7 +60,8 @@ class mymf(modulefinder.ModuleFinder):
|
|||||||
self._depgraph[last_caller.__name__].add(fqname)
|
self._depgraph[last_caller.__name__].add(fqname)
|
||||||
return r
|
return r
|
||||||
|
|
||||||
def load_module(self, fqname, fp, pathname, (suffix, mode, type)):
|
def load_module(self, fqname, fp, pathname, additional_info):
|
||||||
|
(suffix, mode, type) = additional_info
|
||||||
r = modulefinder.ModuleFinder.load_module(
|
r = modulefinder.ModuleFinder.load_module(
|
||||||
self, fqname, fp, pathname, (suffix, mode, type))
|
self, fqname, fp, pathname, (suffix, mode, type))
|
||||||
if r is not None:
|
if r is not None:
|
||||||
@ -71,7 +72,7 @@ class mymf(modulefinder.ModuleFinder):
|
|||||||
return {
|
return {
|
||||||
'depgraph': {
|
'depgraph': {
|
||||||
name: dict.fromkeys(deps, 1)
|
name: dict.fromkeys(deps, 1)
|
||||||
for name, deps in self._depgraph.iteritems()},
|
for name, deps in self._depgraph.items()},
|
||||||
'types': self._types,
|
'types': self._types,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,20 +102,25 @@ def main(target):
|
|||||||
filepath = path
|
filepath = path
|
||||||
moduleNames.append(reflect.filenameToModuleName(filepath))
|
moduleNames.append(reflect.filenameToModuleName(filepath))
|
||||||
|
|
||||||
with tempfile.NamedTemporaryFile() as tmpfile:
|
with tempfile.NamedTemporaryFile("w") as tmpfile:
|
||||||
for moduleName in moduleNames:
|
for moduleName in moduleNames:
|
||||||
tmpfile.write('import %s\n' % moduleName)
|
tmpfile.write('import %s\n' % moduleName)
|
||||||
tmpfile.flush()
|
tmpfile.flush()
|
||||||
mf.run_script(tmpfile.name)
|
mf.run_script(tmpfile.name)
|
||||||
|
|
||||||
with open('tahoe-deps.json', 'wb') as outfile:
|
with open('tahoe-deps.json', 'w') as outfile:
|
||||||
json_dump(mf.as_json(), outfile)
|
json_dump(mf.as_json(), outfile)
|
||||||
outfile.write('\n')
|
outfile.write('\n')
|
||||||
|
|
||||||
ported_modules_path = os.path.join(target, "src", "allmydata", "ported-modules.txt")
|
ported_modules_path = os.path.join(target, "src", "allmydata", "util", "_python3.py")
|
||||||
with open(ported_modules_path) as ported_modules:
|
with open(ported_modules_path) as f:
|
||||||
port_status = dict.fromkeys((line.strip() for line in ported_modules), "ported")
|
ported_modules = {}
|
||||||
with open('tahoe-ported.json', 'wb') as outfile:
|
exec(f.read(), ported_modules, ported_modules)
|
||||||
|
port_status = dict.fromkeys(
|
||||||
|
ported_modules["PORTED_MODULES"] + ported_modules["PORTED_TEST_MODULES"],
|
||||||
|
"ported"
|
||||||
|
)
|
||||||
|
with open('tahoe-ported.json', 'w') as outfile:
|
||||||
json_dump(port_status, outfile)
|
json_dump(port_status, outfile)
|
||||||
outfile.write('\n')
|
outfile.write('\n')
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ class B(object):
|
|||||||
count += 1
|
count += 1
|
||||||
inline = self.inf.readline()
|
inline = self.inf.readline()
|
||||||
|
|
||||||
# print self.stats
|
# print(self.stats)
|
||||||
|
|
||||||
benchutil.print_bench_footer(UNITS_PER_SECOND=1000000)
|
benchutil.print_bench_footer(UNITS_PER_SECOND=1000000)
|
||||||
print("(microseconds)")
|
print("(microseconds)")
|
||||||
|
@ -89,9 +89,9 @@ def scan(root):
|
|||||||
num_files = 0
|
num_files = 0
|
||||||
num_dirs = 0
|
num_dirs = 0
|
||||||
for absroot, dirs, files in os.walk(root):
|
for absroot, dirs, files in os.walk(root):
|
||||||
#print absroot
|
#print(absroot)
|
||||||
#print " %d files" % len(files)
|
#print(" %d files" % len(files))
|
||||||
#print " %d subdirs" % len(dirs)
|
#print(" %d subdirs" % len(dirs))
|
||||||
num_files += len(files)
|
num_files += len(files)
|
||||||
num_dirs += len(dirs)
|
num_dirs += len(dirs)
|
||||||
stringsize = len(''.join(files) + ''.join(dirs))
|
stringsize = len(''.join(files) + ''.join(dirs))
|
||||||
|
@ -146,8 +146,8 @@ def calculate(K, K1, K2, q_max, L_hash, trees):
|
|||||||
lg_q = lg(q_cand)
|
lg_q = lg(q_cand)
|
||||||
lg_pforge = [lg_px[x] + (lg_q*x - lg_K2)*q_cand for x in xrange(1, j)]
|
lg_pforge = [lg_px[x] + (lg_q*x - lg_K2)*q_cand for x in xrange(1, j)]
|
||||||
if max(lg_pforge) < -L_hash + lg(j) and lg_px[j-1] + 1.0 < -L_hash:
|
if max(lg_pforge) < -L_hash + lg(j) and lg_px[j-1] + 1.0 < -L_hash:
|
||||||
#print "K = %d, K1 = %d, K2 = %d, L_hash = %d, lg_K2 = %.3f, q = %d, lg_pforge_1 = %.3f, lg_pforge_2 = %.3f, lg_pforge_3 = %.3f" \
|
#print("K = %d, K1 = %d, K2 = %d, L_hash = %d, lg_K2 = %.3f, q = %d, lg_pforge_1 = %.3f, lg_pforge_2 = %.3f, lg_pforge_3 = %.3f"
|
||||||
# % (K, K1, K2, L_hash, lg_K2, q, lg_pforge_1, lg_pforge_2, lg_pforge_3)
|
# % (K, K1, K2, L_hash, lg_K2, q, lg_pforge_1, lg_pforge_2, lg_pforge_3))
|
||||||
q = q_cand
|
q = q_cand
|
||||||
break
|
break
|
||||||
|
|
||||||
@ -268,7 +268,7 @@ def search():
|
|||||||
trees[y] = (h, c_y, (dau, tri))
|
trees[y] = (h, c_y, (dau, tri))
|
||||||
|
|
||||||
#for x in xrange(1, K_max+1):
|
#for x in xrange(1, K_max+1):
|
||||||
# print x, trees[x]
|
# print(x, trees[x])
|
||||||
|
|
||||||
candidates = []
|
candidates = []
|
||||||
progress = 0
|
progress = 0
|
||||||
|
@ -130,8 +130,8 @@ class Ring(object):
|
|||||||
# used is actual per-server ciphertext
|
# used is actual per-server ciphertext
|
||||||
usedpf = [1.0*u/numfiles for u in used]
|
usedpf = [1.0*u/numfiles for u in used]
|
||||||
# usedpf is actual per-server-per-file ciphertext
|
# usedpf is actual per-server-per-file ciphertext
|
||||||
#print "min/max usage: %s/%s" % (abbreviate_space(used[-1]),
|
#print("min/max usage: %s/%s" % (abbreviate_space(used[-1]),
|
||||||
# abbreviate_space(used[0]))
|
# abbreviate_space(used[0])))
|
||||||
avg_usage_per_file = avg_space_per_file/len(self.servers)
|
avg_usage_per_file = avg_space_per_file/len(self.servers)
|
||||||
# avg_usage_per_file is expected per-server-per-file ciphertext
|
# avg_usage_per_file is expected per-server-per-file ciphertext
|
||||||
spreadpf = usedpf[0] - usedpf[-1]
|
spreadpf = usedpf[0] - usedpf[-1]
|
||||||
@ -146,7 +146,7 @@ class Ring(object):
|
|||||||
abbreviate_space(avg_usage_per_file) ), end=' ')
|
abbreviate_space(avg_usage_per_file) ), end=' ')
|
||||||
print("spread-pf: %s (%.2f%%)" % (
|
print("spread-pf: %s (%.2f%%)" % (
|
||||||
abbreviate_space(spreadpf), 100.0*spreadpf/avg_usage_per_file), end=' ')
|
abbreviate_space(spreadpf), 100.0*spreadpf/avg_usage_per_file), end=' ')
|
||||||
#print "average_usage:", abbreviate_space(average_usagepf)
|
#print("average_usage:", abbreviate_space(average_usagepf))
|
||||||
print("stddev: %s (%.2f%%)" % (abbreviate_space(std_deviation),
|
print("stddev: %s (%.2f%%)" % (abbreviate_space(std_deviation),
|
||||||
100.0*sd_of_total))
|
100.0*sd_of_total))
|
||||||
if self.SHOW_MINMAX:
|
if self.SHOW_MINMAX:
|
||||||
@ -176,14 +176,14 @@ def do_run(ring, opts):
|
|||||||
for filenum in count(0):
|
for filenum in count(0):
|
||||||
#used = list(reversed(sorted([s.used for s in ring.servers])))
|
#used = list(reversed(sorted([s.used for s in ring.servers])))
|
||||||
#used = [s.used for s in ring.servers]
|
#used = [s.used for s in ring.servers]
|
||||||
#print used
|
#print(used)
|
||||||
si = myhash(fileseed+str(filenum)).hexdigest()
|
si = myhash(fileseed+str(filenum)).hexdigest()
|
||||||
filesize = make_up_a_file_size(si)
|
filesize = make_up_a_file_size(si)
|
||||||
sharesize = filesize / opts["k"]
|
sharesize = filesize / opts["k"]
|
||||||
if filenum%4000==0 and filenum > 1:
|
if filenum%4000==0 and filenum > 1:
|
||||||
ring.dump_usage(filenum, avg_space_per_file)
|
ring.dump_usage(filenum, avg_space_per_file)
|
||||||
servers = ring.servers_for_si(si)
|
servers = ring.servers_for_si(si)
|
||||||
#print ring.show_servers(servers[:opts["N"]])
|
#print(ring.show_servers(servers[:opts["N"]]))
|
||||||
remaining_shares = opts["N"]
|
remaining_shares = opts["N"]
|
||||||
index = 0
|
index = 0
|
||||||
server_was_full = False
|
server_was_full = False
|
||||||
|
@ -59,7 +59,7 @@ def go(permutedpeerlist):
|
|||||||
server.full_at_tick = tick
|
server.full_at_tick = tick
|
||||||
fullservers += 1
|
fullservers += 1
|
||||||
if fullservers == len(servers):
|
if fullservers == len(servers):
|
||||||
# print "Couldn't place share -- all servers full. Stopping."
|
# print("Couldn't place share -- all servers full. Stopping.")
|
||||||
return (servers, doubled_up_shares)
|
return (servers, doubled_up_shares)
|
||||||
|
|
||||||
i += 1
|
i += 1
|
||||||
|
@ -96,9 +96,9 @@ class Sizes(object):
|
|||||||
# means storing (and eventually transmitting) more hashes. This
|
# means storing (and eventually transmitting) more hashes. This
|
||||||
# count includes all the low-level share hashes and the root.
|
# count includes all the low-level share hashes and the root.
|
||||||
hash_nodes = (num_leaves*k - 1) / (k - 1)
|
hash_nodes = (num_leaves*k - 1) / (k - 1)
|
||||||
#print "hash_depth", d
|
#print("hash_depth", d)
|
||||||
#print "num_leaves", num_leaves
|
#print("num_leaves", num_leaves)
|
||||||
#print "hash_nodes", hash_nodes
|
#print("hash_nodes", hash_nodes)
|
||||||
# the storage overhead is this
|
# the storage overhead is this
|
||||||
self.share_storage_overhead = 32 * (hash_nodes - 1)
|
self.share_storage_overhead = 32 * (hash_nodes - 1)
|
||||||
# the transmission overhead is smaller: if we actually transmit
|
# the transmission overhead is smaller: if we actually transmit
|
||||||
|
@ -1 +0,0 @@
|
|||||||
Magic-Folders are now supported on macOS.
|
|
1
newsfragments/1792.feature
Normal file
1
newsfragments/1792.feature
Normal file
@ -0,0 +1 @@
|
|||||||
|
PyPy is now a supported platform.
|
@ -1 +0,0 @@
|
|||||||
"tahoe rm", an old alias for "tahoe unlink", has been removed.
|
|
@ -1 +0,0 @@
|
|||||||
The direct dependencies on pyutil and zbase32 have been removed.
|
|
@ -1 +0,0 @@
|
|||||||
The redundant "pypywin32" dependency has been removed.
|
|
@ -1 +0,0 @@
|
|||||||
Tahoe-LAFS now tests for PyPy compatibility on CI.
|
|
@ -1 +0,0 @@
|
|||||||
Tahoe-LAFS no longer makes start-up time assertions about the versions of its dependencies. It is the responsibility of the administrator of the installation to ensure the correct version of dependencies are supplied.
|
|
1
newsfragments/2755.other
Normal file
1
newsfragments/2755.other
Normal file
@ -0,0 +1 @@
|
|||||||
|
The Tahoe-LAFS project has adopted a formal code of conduct.
|
@ -1 +0,0 @@
|
|||||||
Tahoe-LAFS now requires Twisted 18.4.0 or newer.
|
|
@ -1 +0,0 @@
|
|||||||
refactor initialization code to be more async-friendly
|
|
@ -1 +0,0 @@
|
|||||||
Tahoe-LAFS now uses towncrier to maintain the NEWS file.
|
|
@ -1 +0,0 @@
|
|||||||
The release process document has been updated.
|
|
@ -1 +0,0 @@
|
|||||||
allmydata.test.test_system.SystemTest is now more reliable with respect to bound address collisions.
|
|
@ -1 +0,0 @@
|
|||||||
Configuration-checking code wasn't being called due to indenting
|
|
@ -1 +0,0 @@
|
|||||||
refactor configuration handling out of Node into _Config
|
|
@ -1 +0,0 @@
|
|||||||
"tox -e codechecks" no longer dirties the working tree.
|
|
@ -1 +0,0 @@
|
|||||||
Add a "tox -e draftnews" which runs towncrier in draft mode
|
|
@ -1 +0,0 @@
|
|||||||
Updated the Tor release key, used by the integration tests.
|
|
@ -1 +0,0 @@
|
|||||||
`tahoe backup` no longer fails with an unhandled exception when it encounters a special file (device, fifo) in the backup source.
|
|
@ -1 +0,0 @@
|
|||||||
Fedora 29 is now tested as part of the project's continuous integration system.
|
|
@ -1 +0,0 @@
|
|||||||
Fedora 27 is no longer tested as part of the project's continuous integration system.
|
|
@ -1 +0,0 @@
|
|||||||
The Tox configuration has been fixed to work around a problem on Windows CI.
|
|
@ -1 +0,0 @@
|
|||||||
Tahoe-LAFS now depends on Twisted 16.6 or newer.
|
|
@ -1 +0,0 @@
|
|||||||
The PyInstaller CI job now works around a pip/pyinstaller incompatibility.
|
|
@ -1 +0,0 @@
|
|||||||
Some CI jobs for integration tests have been moved from TravisCI to CircleCI.
|
|
@ -1 +0,0 @@
|
|||||||
Several warnings from a new release of pyflakes have been fixed.
|
|
@ -1 +0,0 @@
|
|||||||
Some Slackware 14.2 continuous integration problems have been resolved.
|
|
@ -1 +0,0 @@
|
|||||||
Some macOS continuous integration failures have been fixed.
|
|
@ -1 +0,0 @@
|
|||||||
Magic-Folders now creates spurious conflict files in fewer cases. In particular, if files are added to the folder while a client is offline, that client will not create conflict files for all those new files when it starts up.
|
|
@ -1 +0,0 @@
|
|||||||
The NoNetworkGrid implementation has been somewhat improved.
|
|
@ -1 +0,0 @@
|
|||||||
A bug in the test suite for the create-alias command has been fixed.
|
|
@ -1 +0,0 @@
|
|||||||
The integration test suite has been updated to use pytest-twisted instead of deprecated pytest APIs.
|
|
@ -1 +0,0 @@
|
|||||||
The magic-folder integration test suite now performs more aggressive cleanup of the processes it launches.
|
|
@ -1 +0,0 @@
|
|||||||
The integration tests now correctly document the `--keep-tempdir` option.
|
|
@ -1 +0,0 @@
|
|||||||
A misuse of super() in the integration tests has been fixed.
|
|
@ -1 +0,0 @@
|
|||||||
The Magic-Folder frontend now emits structured, causal logs. This makes it easier for developers to make sense of its behavior and for users to submit useful debugging information alongside problem reports.
|
|
@ -1 +0,0 @@
|
|||||||
Several utilities to facilitate the use of the Eliot causal logging library have been introduced.
|
|
@ -1 +0,0 @@
|
|||||||
The Windows CI configuration has been tweaked.
|
|
@ -1 +0,0 @@
|
|||||||
The `tahoe` CLI now accepts arguments for configuring structured logging messages which Tahoe-LAFS is being converted to emit. This change does not introduce any new defaults for on-filesystem logging.
|
|
@ -1 +0,0 @@
|
|||||||
The confusing and misplaced sub-command group headings in `tahoe --help` output have been removed.
|
|
@ -1 +0,0 @@
|
|||||||
The Magic-Folder frontend has had additional logging improvements.
|
|
@ -1 +0,0 @@
|
|||||||
The Magic-Folder frontend is now more responsive to subtree changes on Windows.
|
|
@ -1 +0,0 @@
|
|||||||
Added a simple sytax checker so that once a file has reached python3 compatibility, it will not regress.
|
|
@ -1 +0,0 @@
|
|||||||
Converted all uses of the print statement to the print function in the ./misc/ directory.
|
|
@ -1 +0,0 @@
|
|||||||
The contributor guidelines are now linked from the GitHub pull request creation page.
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user