Merge remote-tracking branch 'origin/master' into 3373.happinessutil-python-3

This commit is contained in:
Itamar Turner-Trauring 2020-08-17 10:45:12 -04:00
commit 81ba354357
40 changed files with 1417 additions and 1349 deletions

View File

@ -1,5 +1,6 @@
ARG TAG
FROM centos:${TAG}
ARG PYTHON_VERSION
ENV WHEELHOUSE_PATH /tmp/wheelhouse
ENV VIRTUALENV_PATH /tmp/venv
@ -11,8 +12,8 @@ RUN yum install --assumeyes \
git \
sudo \
make automake gcc gcc-c++ \
python2 \
python2-devel \
python${PYTHON_VERSION} \
python${PYTHON_VERSION}-devel \
libffi-devel \
openssl-devel \
libyaml \
@ -23,4 +24,4 @@ RUN yum install --assumeyes \
# *update* this checkout on each job run, saving us more time per-job.
COPY . ${BUILD_SRC_ROOT}
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python2.7"
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"

View File

@ -1,5 +1,6 @@
ARG TAG
FROM debian:${TAG}
ARG PYTHON_VERSION
ENV WHEELHOUSE_PATH /tmp/wheelhouse
ENV VIRTUALENV_PATH /tmp/venv
@ -8,22 +9,22 @@ ENV BUILD_SRC_ROOT /tmp/project
RUN apt-get --quiet update && \
apt-get --quiet --yes install \
git \
lsb-release \
git \
lsb-release \
sudo \
build-essential \
python2.7 \
python2.7-dev \
libffi-dev \
libssl-dev \
libyaml-dev \
virtualenv
build-essential \
python${PYTHON_VERSION} \
python${PYTHON_VERSION}-dev \
libffi-dev \
libssl-dev \
libyaml-dev \
virtualenv
# Get the project source. This is better than it seems. CircleCI will
# *update* this checkout on each job run, saving us more time per-job.
COPY . ${BUILD_SRC_ROOT}
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python2.7"
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"
# Only the integration tests currently need this but it doesn't hurt to always
# have it present and it's simpler than building a whole extra image just for

View File

@ -1,5 +1,6 @@
ARG TAG
FROM fedora:${TAG}
ARG PYTHON_VERSION
ENV WHEELHOUSE_PATH /tmp/wheelhouse
ENV VIRTUALENV_PATH /tmp/venv
@ -11,8 +12,8 @@ RUN yum install --assumeyes \
git \
sudo \
make automake gcc gcc-c++ \
python \
python-devel \
python${PYTHON_VERSION} \
python${PYTHON_VERSION}-devel \
libffi-devel \
openssl-devel \
libyaml-devel \
@ -23,4 +24,4 @@ RUN yum install --assumeyes \
# *update* this checkout on each job run, saving us more time per-job.
COPY . ${BUILD_SRC_ROOT}
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python2.7"
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"

View File

@ -1,5 +1,6 @@
ARG TAG
FROM ubuntu:${TAG}
ARG PYTHON_VERSION
ENV WHEELHOUSE_PATH /tmp/wheelhouse
ENV VIRTUALENV_PATH /tmp/venv
@ -13,8 +14,8 @@ RUN apt-get --quiet update && \
apt-get --quiet --yes install \
sudo \
build-essential \
python2.7 \
python2.7-dev \
python${PYTHON_VERSION} \
python${PYTHON_VERSION}-dev \
libffi-dev \
libssl-dev \
libyaml-dev \
@ -26,4 +27,4 @@ RUN apt-get --quiet update && \
# *update* this checkout on each job run, saving us more time per-job.
COPY . ${BUILD_SRC_ROOT}
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python2.7"
RUN "${BUILD_SRC_ROOT}"/.circleci/prepare-image.sh "${WHEELHOUSE_PATH}" "${VIRTUALENV_PATH}" "${BUILD_SRC_ROOT}" "python${PYTHON_VERSION}"

View File

@ -31,6 +31,9 @@ workflows:
# Test against PyPy 2.7
- "pypy2.7-buster"
# Just one Python 3.6 configuration while the port is in-progress.
- "python3.6"
# Other assorted tasks and configurations
- "lint"
- "pyinstaller"
@ -71,6 +74,7 @@ workflows:
- "build-image-fedora-29"
- "build-image-centos-8"
- "build-image-pypy-2.7-buster"
- "build-image-python36-ubuntu"
jobs:
@ -118,7 +122,7 @@ jobs:
debian-9: &DEBIAN
docker:
- image: "tahoelafsci/debian:9"
- image: "tahoelafsci/debian:9-py2.7"
user: "nobody"
environment: &UTF_8_ENVIRONMENT
@ -195,14 +199,14 @@ jobs:
debian-8:
<<: *DEBIAN
docker:
- image: "tahoelafsci/debian:8"
- image: "tahoelafsci/debian:8-py2.7"
user: "nobody"
pypy2.7-buster:
<<: *DEBIAN
docker:
- image: "tahoelafsci/pypy:2.7-buster"
- image: "tahoelafsci/pypy:buster-py2"
user: "nobody"
environment:
@ -258,17 +262,32 @@ jobs:
ubuntu-16.04:
<<: *DEBIAN
docker:
- image: "tahoelafsci/ubuntu:16.04"
- image: "tahoelafsci/ubuntu:16.04-py2.7"
user: "nobody"
ubuntu-18.04:
ubuntu-18.04: &UBUNTU_18_04
<<: *DEBIAN
docker:
- image: "tahoelafsci/ubuntu:18.04"
- image: "tahoelafsci/ubuntu:18.04-py2.7"
user: "nobody"
python3.6:
<<: *UBUNTU_18_04
docker:
- image: "tahoelafsci/ubuntu:18.04-py3"
user: "nobody"
environment:
<<: *UTF_8_ENVIRONMENT
# The default trial args include --rterrors which is incompatible with
# this reporter on Python 3. So drop that and just specify the
# reporter.
TAHOE_LAFS_TRIAL_ARGS: "--reporter=subunitv2-file"
TAHOE_LAFS_TOX_ENVIRONMENT: "py36"
ubuntu-20.04:
<<: *DEBIAN
docker:
@ -278,7 +297,7 @@ jobs:
centos-8: &RHEL_DERIV
docker:
- image: "tahoelafsci/centos:8"
- image: "tahoelafsci/centos:8-py2"
user: "nobody"
environment: *UTF_8_ENVIRONMENT
@ -300,14 +319,14 @@ jobs:
fedora-28:
<<: *RHEL_DERIV
docker:
- image: "tahoelafsci/fedora:28"
- image: "tahoelafsci/fedora:28-py"
user: "nobody"
fedora-29:
<<: *RHEL_DERIV
docker:
- image: "tahoelafsci/fedora:29"
- image: "tahoelafsci/fedora:29-py"
user: "nobody"
@ -374,8 +393,9 @@ jobs:
- image: "docker:17.05.0-ce-git"
environment:
DISTRO: "tahoelafsci/<DISTRO>:foo"
TAG: "tahoelafsci/distro:<TAG>"
DISTRO: "tahoelafsci/<DISTRO>:foo-py2"
TAG: "tahoelafsci/distro:<TAG>-py2"
PYTHON_VERSION: "tahoelafsci/distro:tag-py<PYTHON_VERSION}"
steps:
- "checkout"
@ -427,13 +447,14 @@ jobs:
docker \
build \
--build-arg TAG=${TAG} \
-t tahoelafsci/${DISTRO}:${TAG} \
--build-arg PYTHON_VERSION=${PYTHON_VERSION} \
-t tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION} \
-f ~/project/.circleci/Dockerfile.${DISTRO} \
~/project/
- run:
name: "Push image"
command: |
docker push tahoelafsci/${DISTRO}:${TAG}
docker push tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION}
build-image-debian-8:
@ -442,6 +463,7 @@ jobs:
environment:
DISTRO: "debian"
TAG: "8"
PYTHON_VERSION: "2.7"
build-image-debian-9:
@ -450,6 +472,7 @@ jobs:
environment:
DISTRO: "debian"
TAG: "9"
PYTHON_VERSION: "2.7"
build-image-ubuntu-16.04:
@ -458,6 +481,7 @@ jobs:
environment:
DISTRO: "ubuntu"
TAG: "16.04"
PYTHON_VERSION: "2.7"
build-image-ubuntu-18.04:
@ -466,6 +490,16 @@ jobs:
environment:
DISTRO: "ubuntu"
TAG: "18.04"
PYTHON_VERSION: "2.7"
build-image-python36-ubuntu:
<<: *BUILD_IMAGE
environment:
DISTRO: "ubuntu"
TAG: "18.04"
PYTHON_VERSION: "3"
build-image-ubuntu-20.04:
@ -482,6 +516,7 @@ jobs:
environment:
DISTRO: "centos"
TAG: "8"
PYTHON_VERSION: "2"
build-image-fedora-28:
@ -490,6 +525,8 @@ jobs:
environment:
DISTRO: "fedora"
TAG: "28"
# The default on Fedora (this version anyway) is still Python 2.
PYTHON_VERSION: ""
build-image-fedora-29:
@ -505,4 +542,8 @@ jobs:
environment:
DISTRO: "pypy"
TAG: "2.7-buster"
TAG: "buster"
# We only have Python 2 for PyPy right now so there's no support for
# setting up PyPy 3 in the image building toolchain. This value is just
# for constructing the right Docker image tag.
PYTHON_VERSION: "2"

View File

@ -36,8 +36,9 @@ PIP="${BOOTSTRAP_VENV}/bin/pip"
# Tell pip where it can find any existing wheels.
export PIP_FIND_LINKS="file://${WHEELHOUSE_PATH}"
# Populate the wheelhouse, if necessary.
"${PIP}" \
# Populate the wheelhouse, if necessary. zfec 1.5.3 can only be built with a
# UTF-8 environment so make sure we have one, at least for this invocation.
LANG="en_US.UTF-8" "${PIP}" \
wheel \
--wheel-dir "${WHEELHOUSE_PATH}" \
"${PROJECT_ROOT}"[test] \

View File

@ -65,7 +65,7 @@ TIMEOUT="timeout --kill-after 1m 15m"
# Send the output directly to a file because transporting the binary subunit2
# via tox and then scraping it out is hideous and failure prone.
export SUBUNITREPORTER_OUTPUT_PATH="${SUBUNIT2}"
export TAHOE_LAFS_TRIAL_ARGS="--reporter=subunitv2-file --rterrors"
export TAHOE_LAFS_TRIAL_ARGS="${TAHOE_LAFS_TRIAL_ARGS:---reporter=subunitv2-file --rterrors}"
export PIP_NO_INDEX="1"
if [ "${ALLOWED_FAILURE}" = "yes" ]; then
@ -81,7 +81,12 @@ ${TIMEOUT} ${BOOTSTRAP_VENV}/bin/tox \
${TAHOE_LAFS_TOX_ARGS} || "${alternative}"
if [ -n "${ARTIFACTS}" ]; then
if [ ! -e "${SUBUNIT2}" ]; then
echo "subunitv2 output file does not exist: ${SUBUNIT2}"
exit 1
fi
# Create a junitxml results area.
mkdir -p "$(dirname "${JUNITXML}")"
${BOOTSTRAP_VENV}/bin/subunit2junitxml < "${SUBUNIT2}" > "${JUNITXML}" || "${alternative}"
"${BOOTSTRAP_VENV}"/bin/subunit2junitxml < "${SUBUNIT2}" > "${JUNITXML}" || "${alternative}"
fi

View File

@ -1,40 +0,0 @@
sudo: false
language: python
cache: pip
dist: xenial
before_cache:
- rm -f $HOME/.cache/pip/log/debug.log
git:
depth: 1000
env:
global:
- TAHOE_LAFS_HYPOTHESIS_PROFILE=ci
install:
- pip install --upgrade tox setuptools virtualenv
- echo $PATH; which python; which pip; which tox
- python misc/build_helpers/show-tool-versions.py
script:
- |
set -eo pipefail
tox -e ${T}
notifications:
email: false
irc:
channels: "chat.freenode.net#tahoe-lafs"
on_success: always # for testing
on_failure: always
template:
- "%{repository}#%{build_number} [%{branch}: %{commit} by %{author}] %{message}"
- "Changes: %{compare_url} | Details: %{build_url}"
matrix:
include:
- os: linux
python: '3.6'
env: T=py36
fast_finish: true

View File

@ -1,160 +0,0 @@
allmydata.test.mutable.test_exceptions.Exceptions.test_repr
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_1s
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_25s
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_day
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_future_5_minutes
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_hours
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_month
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_year
allmydata.test.test_abbreviate.Abbreviate.test_parse_space
allmydata.test.test_abbreviate.Abbreviate.test_space
allmydata.test.test_abbreviate.Abbreviate.test_time
allmydata.test.test_base32.Base32.test_a2b
allmydata.test.test_base32.Base32.test_a2b_b2a_match_Pythons
allmydata.test.test_base32.Base32.test_b2a
allmydata.test.test_base32.Base32.test_b2a_or_none
allmydata.test.test_base62.Base62.test_ende_0x00
allmydata.test.test_base62.Base62.test_ende_0x000000
allmydata.test.test_base62.Base62.test_ende_0x01
allmydata.test.test_base62.Base62.test_ende_0x0100
allmydata.test.test_base62.Base62.test_ende_0x010000
allmydata.test.test_base62.Base62.test_ende_longrandstr
allmydata.test.test_base62.Base62.test_ende_randstr
allmydata.test.test_base62.Base62.test_known_values
allmydata.test.test_base62.Base62.test_num_octets_that_encode_to_this_many_chars
allmydata.test.test_base62.Base62.test_odd_sizes
allmydata.test.test_base62.Base62.test_roundtrip
allmydata.test.test_crypto.TestEd25519.test_deserialize_private_not_bytes
allmydata.test.test_crypto.TestEd25519.test_deserialize_public_not_bytes
allmydata.test.test_crypto.TestEd25519.test_key_serialization
allmydata.test.test_crypto.TestEd25519.test_sign_invalid_pubkey
allmydata.test.test_crypto.TestEd25519.test_signature_data_not_bytes
allmydata.test.test_crypto.TestEd25519.test_signature_not_bytes
allmydata.test.test_crypto.TestEd25519.test_signed_data_not_bytes
allmydata.test.test_crypto.TestEd25519.test_verify_invalid_pubkey
allmydata.test.test_crypto.TestRegression.test_aes_no_iv_process_long_input
allmydata.test.test_crypto.TestRegression.test_aes_no_iv_process_short_input
allmydata.test.test_crypto.TestRegression.test_aes_with_iv_process_long_input
allmydata.test.test_crypto.TestRegression.test_aes_with_iv_process_short_input
allmydata.test.test_crypto.TestRegression.test_decode_ed15519_keypair
allmydata.test.test_crypto.TestRegression.test_decode_rsa_keypair
allmydata.test.test_crypto.TestRegression.test_encrypt_data_not_bytes
allmydata.test.test_crypto.TestRegression.test_incorrect_iv_size
allmydata.test.test_crypto.TestRegression.test_iv_not_bytes
allmydata.test.test_crypto.TestRegression.test_key_incorrect_size
allmydata.test.test_crypto.TestRegression.test_old_start_up_test
allmydata.test.test_crypto.TestRsa.test_keys
allmydata.test.test_crypto.TestRsa.test_sign_invalid_pubkey
allmydata.test.test_crypto.TestRsa.test_verify_invalid_pubkey
allmydata.test.test_crypto.TestUtil.test_remove_prefix_bad
allmydata.test.test_crypto.TestUtil.test_remove_prefix_entire_string
allmydata.test.test_crypto.TestUtil.test_remove_prefix_good
allmydata.test.test_crypto.TestUtil.test_remove_prefix_partial
allmydata.test.test_crypto.TestUtil.test_remove_prefix_zero
allmydata.test.test_deferredutil.DeferredUtilTests.test_failure
allmydata.test.test_deferredutil.DeferredUtilTests.test_gather_results
allmydata.test.test_deferredutil.DeferredUtilTests.test_success
allmydata.test.test_deferredutil.DeferredUtilTests.test_wait_for_delayed_calls
allmydata.test.test_dictutil.DictUtil.test_auxdict
allmydata.test.test_dictutil.DictUtil.test_dict_of_sets
allmydata.test.test_happiness.Happiness.test_100
allmydata.test.test_happiness.Happiness.test_calc_happy
allmydata.test.test_happiness.Happiness.test_everything_broken
allmydata.test.test_happiness.Happiness.test_hypothesis0
allmydata.test.test_happiness.Happiness.test_hypothesis_0
allmydata.test.test_happiness.Happiness.test_hypothesis_1
allmydata.test.test_happiness.Happiness.test_placement_1
allmydata.test.test_happiness.Happiness.test_placement_simple
allmydata.test.test_happiness.Happiness.test_redistribute
allmydata.test.test_happiness.Happiness.test_unhappy
allmydata.test.test_happiness.HappinessUploadUtils.test_residual_0
allmydata.test.test_happiness.HappinessUploadUtils.test_trivial_flow_graph
allmydata.test.test_happiness.HappinessUploadUtils.test_trivial_maximum_graph
allmydata.test.test_happiness.HappinessUtilTests.test_merge_servers
allmydata.test.test_happiness.HappinessUtilTests.test_servers_of_happiness_utility_function
allmydata.test.test_happiness.HappinessUtilTests.test_shares_by_server
allmydata.test.test_happiness.PlacementTests.test_hypothesis_unhappy
allmydata.test.test_happiness.PlacementTests.test_more_hypothesis
allmydata.test.test_hashtree.Complete.test_create
allmydata.test.test_hashtree.Complete.test_dump
allmydata.test.test_hashtree.Complete.test_needed_hashes
allmydata.test.test_hashtree.Incomplete.test_check
allmydata.test.test_hashtree.Incomplete.test_create
allmydata.test.test_hashtree.Incomplete.test_depth_of
allmydata.test.test_hashtree.Incomplete.test_large
allmydata.test.test_hashtree.Incomplete.test_needed_hashes
allmydata.test.test_hashutil.HashUtilTests.test_chk
allmydata.test.test_hashutil.HashUtilTests.test_hashers
allmydata.test.test_hashutil.HashUtilTests.test_known_answers
allmydata.test.test_hashutil.HashUtilTests.test_random_key
allmydata.test.test_hashutil.HashUtilTests.test_sha256d
allmydata.test.test_hashutil.HashUtilTests.test_sha256d_truncated
allmydata.test.test_hashutil.HashUtilTests.test_timing_safe_compare
allmydata.test.test_humanreadable.HumanReadable.test_repr
allmydata.test.test_iputil.GcUtil.test_gc_after_allocations
allmydata.test.test_iputil.GcUtil.test_release_delays_gc
allmydata.test.test_iputil.ListAddresses.test_get_local_ip_for
allmydata.test.test_iputil.ListAddresses.test_list_async
allmydata.test.test_iputil.ListAddresses.test_list_async_mock_cygwin
allmydata.test.test_iputil.ListAddresses.test_list_async_mock_ifconfig
allmydata.test.test_iputil.ListAddresses.test_list_async_mock_ip_addr
allmydata.test.test_iputil.ListAddresses.test_list_async_mock_route
allmydata.test.test_iputil.ListenOnUsed.test_random_port
allmydata.test.test_iputil.ListenOnUsed.test_specific_port
allmydata.test.test_log.Log.test_default_facility
allmydata.test.test_log.Log.test_err
allmydata.test.test_log.Log.test_grandparent_id
allmydata.test.test_log.Log.test_no_prefix
allmydata.test.test_log.Log.test_numming
allmydata.test.test_log.Log.test_parent_id
allmydata.test.test_log.Log.test_with_bytes_prefix
allmydata.test.test_log.Log.test_with_prefix
allmydata.test.test_netstring.Netstring.test_encode
allmydata.test.test_netstring.Netstring.test_extra
allmydata.test.test_netstring.Netstring.test_nested
allmydata.test.test_netstring.Netstring.test_split
allmydata.test.test_observer.Observer.test_lazy_oneshot
allmydata.test.test_observer.Observer.test_observerlist
allmydata.test.test_observer.Observer.test_oneshot
allmydata.test.test_observer.Observer.test_oneshot_fireagain
allmydata.test.test_pipeline.Pipeline.test_basic
allmydata.test.test_pipeline.Pipeline.test_errors
allmydata.test.test_pipeline.Pipeline.test_errors2
allmydata.test.test_python3.Python3PortingEffortTests.test_finished_porting
allmydata.test.test_python3.Python3PortingEffortTests.test_ported_modules_distinct
allmydata.test.test_python3.Python3PortingEffortTests.test_ported_modules_exist
allmydata.test.test_spans.ByteSpans.test_basic
allmydata.test.test_spans.ByteSpans.test_large
allmydata.test.test_spans.ByteSpans.test_math
allmydata.test.test_spans.ByteSpans.test_overlap
allmydata.test.test_spans.ByteSpans.test_random
allmydata.test.test_spans.StringSpans.test_basic
allmydata.test.test_spans.StringSpans.test_random
allmydata.test.test_spans.StringSpans.test_test
allmydata.test.test_statistics.Statistics.test_binomial_coeff
allmydata.test.test_statistics.Statistics.test_binomial_distribution_pmf
allmydata.test.test_statistics.Statistics.test_convolve
allmydata.test.test_statistics.Statistics.test_find_k
allmydata.test.test_statistics.Statistics.test_pr_backup_file_loss
allmydata.test.test_statistics.Statistics.test_pr_file_loss
allmydata.test.test_statistics.Statistics.test_repair_cost
allmydata.test.test_statistics.Statistics.test_repair_count_pmf
allmydata.test.test_statistics.Statistics.test_survival_pmf
allmydata.test.test_time_format.TimeFormat.test_epoch
allmydata.test.test_time_format.TimeFormat.test_epoch_in_London
allmydata.test.test_time_format.TimeFormat.test_format_delta
allmydata.test.test_time_format.TimeFormat.test_format_time
allmydata.test.test_time_format.TimeFormat.test_format_time_y2038
allmydata.test.test_time_format.TimeFormat.test_iso_utc
allmydata.test.test_time_format.TimeFormat.test_parse_date
allmydata.test.test_time_format.TimeFormat.test_parse_duration
allmydata.test.test_version.CheckRequirement.test_cross_check
allmydata.test.test_version.CheckRequirement.test_cross_check_unparseable_versions
allmydata.test.test_version.CheckRequirement.test_extract_openssl_version
allmydata.test.test_version.CheckRequirement.test_packages_from_pkg_resources
allmydata.test.test_version.T.test_report_import_error
allmydata.test.test_version.VersionTestCase.test_basic_versions
allmydata.test.test_version.VersionTestCase.test_comparison
allmydata.test.test_version.VersionTestCase.test_from_parts
allmydata.test.test_version.VersionTestCase.test_irrational_versions
allmydata.test.test_version.VersionTestCase.test_suggest_normalized_version

View File

@ -1,409 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''Ratchet up passing tests, or ratchet down failing tests.
Usage:
ratchet.py <"up" or "down"> <junitxml file path> <tracking file path>
This script helps when you expect a large test suite to fail spectactularly in
some environment, and you want to gradually improve the situation with minimal
impact to forward development of the same codebase for other environments. The
initial and primary usecase is porting from Python 2 to Python 3.
The idea is to emit JUnit XML from your test runner, and then invoke ratchet.py
to consume this XML output and operate on a so-called "tracking" file. When
ratcheting up passing tests, the tracking file will contain a list of tests,
one per line, that passed. When ratching down, the tracking file contains a
list of failing tests. On each subsequent run, ratchet.py will compare the
prior results in the tracking file with the new results in the XML, and will
report on both welcome and unwelcome changes. It will modify the tracking file
in the case of welcome changes, and therein lies the ratcheting.
The exit codes are:
0 - no changes observed
1 - changes observed, whether welcome or unwelcome
2 - invocation error
If <junitxml file path> does not exist, you'll get a FileNotFoundError:
>>> _test('up', None, None) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
FileNotFoundError: ...
If <tracking file path> does not exist, that's fine:
>>> _test('up', '1', None)
Some tests not required to pass did:
c0.t
Conveniently, they have been added to `<tracking_path>` for you. Perhaps commit that?
Eep! 0 test(s) were required to pass, but instead 1 did. 🐭
Same if you're ratcheting down:
>>> _test('down', '1', None)
All and only tests expected to fail did. 💃
If the test run has the same output as last time, it's all good:
>>> _test('up', '01001110', '01001110')
All and only tests required to pass did. 💃
>>> _test('down', '01001110', '10110001')
All and only tests expected to fail did. 💃
If there's a welcome change, that's noted:
>>> _test('up', '0101', '0100')
Some tests not required to pass did:
c3.t
Conveniently, they have been added to `<tracking_path>` for you. Perhaps commit that?
Eep! 1 test(s) were required to pass, but instead 2 did. 🐭
>>> _test('down', '0011', '1110')
Some tests expected to fail didn't:
c2.t
Conveniently, they have been removed from `<tracking_path>` for you. Perhaps commit that?
Eep! 3 test(s) were expected to fail, but instead 2 did. 🐭
And if there is an unwelcome change, that is noted as well:
>>> _test('up', '1101', '1111')
Some tests required to pass didn't:
c2.t
Eep! 4 test(s) were required to pass, but instead 3 did. 🐭
>>> _test('down', '0000', '1101')
Some tests not expected to fail did:
c2.t
Eep! 3 test(s) were expected to fail, but instead 4 did. 🐭
And if there are both welcome and unwelcome changes, they are both noted:
>>> _test('up', '1101', '1011')
Some tests not required to pass did:
c1.t
Conveniently, they have been added to `<tracking_path>` for you. Perhaps commit that?
Some tests required to pass didn't:
c2.t
Eep! 3 test(s) were required to pass, but instead 3 did. 🐭
>>> _test('down', '0100', '1100')
Some tests not expected to fail did:
c2.t
c3.t
Some tests expected to fail didn't:
c1.t
Conveniently, they have been removed from `<tracking_path>` for you. Perhaps commit that?
Eep! 2 test(s) were expected to fail, but instead 3 did. 🐭
To test ratchet.py itself:
python3 -m doctest ratchet.py
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import io
import os
import re
import sys
import tempfile
import xml.etree.ElementTree as Etree
class JUnitXMLFile(object):
'''Represent a file containing test results in JUnit XML format.
>>> eg = _mktemp_junitxml('0100111')
>>> results = JUnitXMLFile(eg.name).parse()
>>> results.failed
['c0.t', 'c2.t', 'c3.t']
>>> results.passed
['c1.t', 'c4.t', 'c5.t', 'c6.t']
'''
def __init__(self, filepath):
self.filepath = filepath
self.failed = []
self.failed_aggregates = {}
self.stderr_output = []
self.passed = []
self._tree = None
def parse(self):
if self._tree:
raise RuntimeError('already parsed')
self._tree = Etree.parse(self.filepath)
for testcase in self._tree.findall('testcase'):
self.process_testcase(testcase)
return self
def process_testcase(self, case):
key = self.case_key(case)
# look at children but throw away stderr output
nonpassing = [c for c in case if not c.tag == 'system-err']
n = len(nonpassing)
if n > 1:
raise RuntimeError(f'multiple results for {key}: {nonpassing}')
elif n == 1:
result = nonpassing.pop()
self.failed.append(key)
message = result.get('message')
self.failed_aggregates.setdefault(message, []).append(key)
else:
self.passed.append(key)
@staticmethod
def case_key(case):
return f'{case.get("classname")}.{case.get("name")}'
def report(self, details=False):
for k, v in sorted(
self.failed_aggregates.items(),
key = lambda i: len(i[1]),
reverse=True):
print(f'# {k}')
for t in v:
print(f' - {t}')
def load_previous_results(txt):
try:
previous_results = open(txt).read()
except FileNotFoundError:
previous_results = ''
parsed = set()
for line in previous_results.splitlines():
if not line or line.startswith('#'):
continue
parsed.add(line)
return parsed
def print_tests(tests):
for test in sorted(tests):
print(' ', test)
def ratchet_up_passing(tracking_path, tests):
try:
old = set(open(tracking_path, 'r'))
except FileNotFoundError:
old = set()
new = set(t + '\n' for t in tests)
merged = sorted(old | new)
open(tracking_path, 'w+').writelines(merged)
def ratchet_down_failing(tracking_path, tests):
new = set(t + '\n' for t in tests)
open(tracking_path, 'w+').writelines(sorted(new))
def main(direction, junitxml_path, tracking_path):
'''Takes a string indicating which direction to ratchet, "up" or "down,"
and two paths, one to test-runner output in JUnit XML format, the other to
a file tracking test results (one test case dotted name per line). Walk the
former looking for the latter, and react appropriately.
>>> inp = _mktemp_junitxml('0100111')
>>> out = _mktemp_tracking('0000000')
>>> _test_main('up', inp.name, out.name)
Some tests not required to pass did:
c1.t
c4.t
c5.t
c6.t
Conveniently, they have been added to `<tracking_path>` for you. Perhaps commit that?
Eep! 0 test(s) were required to pass, but instead 4 did. 🐭
'''
results = JUnitXMLFile(junitxml_path).parse()
if tracking_path == '...':
# Shortcut to aid in debugging XML parsing issues.
results.report()
return
previous = load_previous_results(tracking_path)
current = set(results.passed if direction == 'up' else results.failed)
subjunctive = {'up': 'required to pass', 'down': 'expected to fail'}[direction]
ratchet = None
too_many = current - previous
if too_many:
print(f'Some tests not {subjunctive} did:')
print_tests(too_many)
if direction == 'up':
# Too many passing tests is good -- let's do more of those!
ratchet_up_passing(tracking_path, current)
print(f'Conveniently, they have been added to `{tracking_path}` for you. Perhaps commit that?')
not_enough = previous - current
if not_enough:
print(f'Some tests {subjunctive} didn\'t:')
print_tests(not_enough)
if direction == 'down':
# Not enough failing tests is good -- let's do more of those!
ratchet_down_failing(tracking_path, current)
print(f'Conveniently, they have been removed from `{tracking_path}` for you. Perhaps commit that?')
if too_many or not_enough:
print(f'Eep! {len(previous)} test(s) were {subjunctive}, but instead {len(current)} did. 🐭')
return 1
print(f'All and only tests {subjunctive} did. 💃')
return 0
# When called as an executable ...
if __name__ == '__main__':
try:
direction, junitxml_path, tracking_path = sys.argv[1:4]
if direction not in ('up', 'down'):
raise ValueError
except ValueError:
doc = '\n'.join(__doc__.splitlines()[:6])
doc = re.sub(' ratchet.py', f' {sys.argv[0]}', doc)
print(doc, file=sys.stderr)
exit_code = 2
else:
exit_code = main(direction, junitxml_path, tracking_path)
sys.exit(exit_code)
# Helpers for when called under doctest ...
def _test(*a):
return _test_main(*_mk(*a))
def _test_main(direction, junitxml, tracking):
'''Takes a string 'up' or 'down' and paths to (or open file objects for)
the JUnit XML and tracking files to use for this test run. Captures and
emits stdout (slightly modified) for inspection via doctest.'''
junitxml_path = junitxml.name if hasattr(junitxml, 'name') else junitxml
tracking_path = tracking.name if hasattr(tracking, 'name') else tracking
old_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
main(direction, junitxml_path, tracking_path)
finally:
sys.stdout.seek(0)
out = sys.stdout.read()
out = re.sub('`.*?`', '`<tracking_path>`', out).strip()
sys.stdout = old_stdout
print(out)
class _PotentialFile(object):
'''Represent a file that we are able to create but which doesn't exist yet,
and which, if we create it, will be automatically torn down when the test
run is over.'''
def __init__(self, filename):
self.d = tempfile.TemporaryDirectory()
self.name = os.path.join(self.d.name, filename)
def _mk(direction, spec_junitxml, spec_tracking):
'''Takes a string 'up' or 'down' and two bit strings specifying the state
of the JUnit XML results file and the tracking file to set up for this test
case. Returns the direction (unharmed) and two file-ish objects.
If a spec string is None the corresponding return value will be a
_PotentialFile object, which has a .name attribute (like a true file
object) that points to a file that does not exist, but could.
The reason not to simply return the path in all cases is that the file
objects are actually temporary file objects that destroy the underlying
file when they go out of scope, and we want to keep the underlying file
around until the end of the test run.'''
if None not in(spec_junitxml, spec_tracking):
if len(spec_junitxml) != len(spec_tracking):
raise ValueError('if both given, must be the same length: `{spec_junitxml}` and `{spec_tracking}`')
if spec_junitxml is None:
junitxml_fp = _PotentialFile('results.xml')
else:
junitxml_fp = _mktemp_junitxml(spec_junitxml)
if spec_tracking is None:
tracking_fp = _PotentialFile('tracking')
else:
tracking_fp = _mktemp_tracking(spec_tracking)
return direction, junitxml_fp, tracking_fp
def _mktemp_junitxml(spec):
'''Test helper to generate a raw JUnit XML file.
>>> fp = _mktemp_junitxml('00101')
>>> open(fp.name).read()[:11]
'<testsuite>'
'''
fp = tempfile.NamedTemporaryFile()
fp.write(b'<testsuite>')
passed = '''\
<testcase classname="c{i}" name="t"></testcase>
'''
failed = '''\
<testcase classname="c{i}" name="t">
<failure>Traceback (most recent call last):
File "/foo/bar/baz/buz.py", line 1, in &lt;module>
NameError: name 'heck' is not defined
</failure>
</testcase>
'''
i = 0
for c in spec:
if c == '0':
out = failed
elif c == '1':
out = passed
else:
raise ValueError(f'bad c: `{c}`')
fp.write(out.format(i=i).encode('utf8'))
i += 1
fp.write(b'</testsuite>')
fp.flush()
return fp
def _mktemp_tracking(spec):
'''Test helper to prefabricate a tracking file.
>>> fp = _mktemp_tracking('01101')
>>> print(open(fp.name).read()[:-1])
c1.t
c2.t
c4.t
'''
fp = tempfile.NamedTemporaryFile()
i = 0
for c in spec:
if c == '0':
pass
elif c == '1':
fp.write(f'c{i}.t\n'.encode('utf8'))
else:
raise ValueError(f'bad c: `{c}`')
i += 1
fp.flush()
return fp

View File

@ -1,37 +0,0 @@
#!/usr/bin/env bash
set -euxo pipefail
tracking_filename="ratchet-passing"
# Start somewhere predictable.
cd "$(dirname $0)"
base=$(pwd)
# Actually, though, trial outputs some things that are only gitignored in the project root.
cd "../.."
# Since both of the next calls are expected to exit non-0, relax our guard.
set +e
SUBUNITREPORTER_OUTPUT_PATH="$base/results.subunit2" trial --reporter subunitv2-file allmydata
subunit2junitxml < "$base/results.subunit2" > "$base/results.xml"
set -e
# Okay, now we're clear.
cd "$base"
# Make sure ratchet.py itself is clean.
python3 -m doctest ratchet.py
# Now see about Tahoe-LAFS (also expected to fail) ...
set +e
python3 ratchet.py up results.xml "$tracking_filename"
code=$?
set -e
# Emit a diff of the tracking file, to aid in the situation where changes are
# not discovered until CI (where TERM might `dumb`).
if [ $TERM = 'dumb' ]; then
export TERM=ansi
fi
git diff "$tracking_filename"
exit $code

1
newsfragments/3316.minor Normal file
View File

@ -0,0 +1 @@
Port checker result pages' rendering from nevow to twisted web templates.

0
newsfragments/3336.minor Normal file
View File

0
newsfragments/3358.minor Normal file
View File

View File

@ -0,0 +1 @@

0
newsfragments/3376.minor Normal file
View File

0
newsfragments/3380.minor Normal file
View File

View File

@ -741,7 +741,7 @@ class _Client(node.Node, pollmixin.PollMixin):
private_key_str = self.config.get_or_create_private_config("node.privkey", _make_key)
private_key, public_key = ed25519.signing_keypair_from_string(private_key_str)
public_key_str = ed25519.string_from_verifying_key(public_key)
self.config.write_config_file("node.pubkey", public_key_str + "\n")
self.config.write_config_file("node.pubkey", public_key_str + "\n", "w")
self._node_private_key = private_key
self._node_public_key = public_key

View File

@ -1,5 +1,5 @@
"""Directory Node implementation."""
import time, unicodedata
import time
from zope.interface import implementer
from twisted.internet import defer
@ -18,7 +18,7 @@ from allmydata.check_results import DeepCheckResults, \
DeepCheckAndRepairResults
from allmydata.monitor import Monitor
from allmydata.util import hashutil, base32, log
from allmydata.util.encodingutil import quote_output
from allmydata.util.encodingutil import quote_output, normalize
from allmydata.util.assertutil import precondition
from allmydata.util.netstring import netstring, split_netstring
from allmydata.util.consumer import download_to_data
@ -101,12 +101,6 @@ def update_metadata(metadata, new_metadata, now):
return metadata
# 'x' at the end of a variable name indicates that it holds a Unicode string that may not
# be NFC-normalized.
def normalize(namex):
return unicodedata.normalize('NFC', namex)
# TODO: {Deleter,MetadataSetter,Adder}.modify all start by unpacking the
# contents and end by repacking them. It might be better to apply them to
# the unpacked contents.

View File

@ -20,6 +20,10 @@ import signal
from twisted.internet import defer, reactor
from twisted.python import failure
from twisted.trial import unittest
from ..util.assertutil import precondition
from ..util.encodingutil import unicode_platform, get_filesystem_encoding
class TimezoneMixin(object):
@ -90,3 +94,21 @@ class ShouldFailMixin(object):
(which, expected_failure, res))
d.addBoth(done)
return d
class ReallyEqualMixin(object):
def failUnlessReallyEqual(self, a, b, msg=None):
self.assertEqual(a, b, msg)
self.assertEqual(type(a), type(b), "a :: %r, b :: %r, %r" % (a, b, msg))
def skip_if_cannot_represent_filename(u):
precondition(isinstance(u, unicode))
enc = get_filesystem_encoding()
if not unicode_platform():
try:
u.encode(enc)
except UnicodeEncodeError:
raise unittest.SkipTest("A non-ASCII filename could not be encoded on this platform.")

View File

@ -8,22 +8,17 @@ from twisted.internet import reactor, defer
from twisted.trial import unittest
from ..util.assertutil import precondition
from allmydata.util.encodingutil import (unicode_platform, get_filesystem_encoding,
get_io_encoding)
from ..scripts import runner
from .common_py3 import SignalMixin, ShouldFailMixin # noqa
from allmydata.util.encodingutil import get_io_encoding
from future.utils import PY2
if PY2: # XXX this is a hack that makes some tests pass on Python3, remove
# in the future
from ..scripts import runner
# Imported for backwards compatibility:
from .common_py3 import (
SignalMixin, skip_if_cannot_represent_filename, ReallyEqualMixin, ShouldFailMixin
)
def skip_if_cannot_represent_filename(u):
precondition(isinstance(u, unicode))
enc = get_filesystem_encoding()
if not unicode_platform():
try:
u.encode(enc)
except UnicodeEncodeError:
raise unittest.SkipTest("A non-ASCII filename could not be encoded on this platform.")
def skip_if_cannot_represent_argv(u):
precondition(isinstance(u, unicode))
try:
@ -83,12 +78,6 @@ def flip_one_bit(s, offset=0, size=None):
return result
class ReallyEqualMixin(object):
def failUnlessReallyEqual(self, a, b, msg=None):
self.assertEqual(a, b, msg)
self.assertEqual(type(a), type(b), "a :: %r, b :: %r, %r" % (a, b, msg))
class StallMixin(object):
def stall(self, res=None, delay=1):
d = defer.Deferred()
@ -160,3 +149,11 @@ except ImportError:
os.chmod(path, stat.S_IWRITE | stat.S_IEXEC | stat.S_IREAD)
make_readonly = _make_readonly
make_accessible = _make_accessible
__all__ = [
"make_readonly", "make_accessible", "TestMixin", "ShouldFailMixin",
"StallMixin", "skip_if_cannot_represent_argv", "run_cli", "parse_cli",
"DevNullDictionary", "insecurerandstr", "flip_bit", "flip_one_bit",
"SignalMixin", "skip_if_cannot_represent_filename", "ReallyEqualMixin"
]

View File

@ -0,0 +1,37 @@
"""
This module defines the subset of the full test suite which is expected to
pass on Python 3 in a way which makes that suite discoverable by trial.
This module has been ported to Python 3.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from future.utils import PY2
if PY2:
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
from twisted.python.reflect import (
namedModule,
)
from twisted.trial.runner import (
TestLoader,
)
from twisted.trial.unittest import (
TestSuite,
)
from allmydata.util._python3 import (
PORTED_TEST_MODULES,
)
def testSuite():
loader = TestLoader()
return TestSuite(list(
loader.loadModule(namedModule(module))
for module
in PORTED_TEST_MODULES
))

View File

@ -1,10 +1,25 @@
import json
import os.path, shutil
from bs4 import BeautifulSoup
from twisted.trial import unittest
from twisted.internet import defer
from nevow.inevow import IRequest
from zope.interface import implementer
from twisted.web.server import Request
from twisted.web.test.requesthelper import DummyChannel
from twisted.web.template import flattenString
from allmydata import check_results, uri
from allmydata import uri as tahoe_uri
from allmydata.interfaces import (
IServer,
ICheckResults,
ICheckAndRepairResults,
)
from allmydata.util import base32
from allmydata.web import check_results as web_check_results
from allmydata.storage_client import StorageFarmBroker, NativeStorageServer
@ -12,18 +27,115 @@ from allmydata.storage.server import storage_index_to_dir
from allmydata.monitor import Monitor
from allmydata.test.no_network import GridTestMixin
from allmydata.immutable.upload import Data
from allmydata.test.common_web import WebRenderingMixin
from allmydata.mutable.publish import MutableData
from .common import (
EMPTY_CLIENT_CONFIG,
)
from .web.common import (
assert_soup_has_favicon,
assert_soup_has_tag_with_content,
)
class FakeClient(object):
def get_storage_broker(self):
return self.storage_broker
class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
@implementer(IRequest)
class TestRequest(Request, object):
"""
A minimal Request class to use in tests.
XXX: We have to have this class because `common.get_arg()` expects
a `nevow.inevow.IRequest`, which `twisted.web.server.Request`
isn't. The request needs to have `args`, `fields`, `prepath`, and
`postpath` properties so that `allmydata.web.common.get_arg()`
won't complain.
"""
def __init__(self, args=None, fields=None):
super(TestRequest, self).__init__(DummyChannel())
self.args = args or {}
self.fields = fields or {}
self.prepath = [b""]
self.postpath = [b""]
@implementer(IServer)
class FakeServer(object):
def get_name(self):
return "fake name"
def get_longname(self):
return "fake longname"
def get_nickname(self):
return "fake nickname"
@implementer(ICheckResults)
class FakeCheckResults(object):
def __init__(self, si=None,
healthy=False, recoverable=False,
summary="fake summary"):
self._storage_index = si
self._is_healthy = healthy
self._is_recoverable = recoverable
self._summary = summary
def get_storage_index(self):
return self._storage_index
def get_storage_index_string(self):
return base32.b2a_or_none(self._storage_index)
def is_healthy(self):
return self._is_healthy
def is_recoverable(self):
return self._is_recoverable
def get_summary(self):
return self._summary
def get_corrupt_shares(self):
# returns a list of (IServer, storage_index, sharenum)
return [(FakeServer(), "<fake-si>", 0)]
@implementer(ICheckAndRepairResults)
class FakeCheckAndRepairResults(object):
def __init__(self, si=None,
repair_attempted=False,
repair_success=False):
self._storage_index = si
self._repair_attempted = repair_attempted
self._repair_success = repair_success
def get_storage_index(self):
return self._storage_index
def get_pre_repair_results(self):
return FakeCheckResults()
def get_post_repair_results(self):
return FakeCheckResults()
def get_repair_attempted(self):
return self._repair_attempted
def get_repair_successful(self):
return self._repair_success
class WebResultsRendering(unittest.TestCase):
@staticmethod
def remove_tags(html):
return BeautifulSoup(html).get_text(separator=" ")
def create_fake_client(self):
sb = StorageFarmBroker(True, None, EMPTY_CLIENT_CONFIG)
@ -51,34 +163,31 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
c.storage_broker = sb
return c
def render_json(self, page):
d = self.render1(page, args={"output": ["json"]})
return d
def render_json(self, resource):
return resource.render(TestRequest(args={"output": ["json"]}))
def render_element(self, element, args=None):
d = flattenString(TestRequest(args), element)
return unittest.TestCase().successResultOf(d)
def test_literal(self):
lcr = web_check_results.LiteralCheckResultsRendererElement()
html = self.render_element(lcr)
self.failUnlessIn("Literal files are always healthy", html)
html = self.render_element(lcr, args={"return_to": ["FOOURL"]})
self.failUnlessIn("Literal files are always healthy", html)
self.failUnlessIn('<a href="FOOURL">Return to file.</a>', html)
c = self.create_fake_client()
lcr = web_check_results.LiteralCheckResultsRenderer(c)
d = self.render1(lcr)
def _check(html):
s = self.remove_tags(html)
self.failUnlessIn("Literal files are always healthy", s)
d.addCallback(_check)
d.addCallback(lambda ignored:
self.render1(lcr, args={"return_to": ["FOOURL"]}))
def _check_return_to(html):
s = self.remove_tags(html)
self.failUnlessIn("Literal files are always healthy", s)
self.failUnlessIn('<a href="FOOURL">Return to file.</a>',
html)
d.addCallback(_check_return_to)
d.addCallback(lambda ignored: self.render_json(lcr))
def _check_json(js):
j = json.loads(js)
self.failUnlessEqual(j["storage-index"], "")
self.failUnlessEqual(j["results"]["healthy"], True)
d.addCallback(_check_json)
return d
js = self.render_json(lcr)
j = json.loads(js)
self.failUnlessEqual(j["storage-index"], "")
self.failUnlessEqual(j["results"]["healthy"], True)
def test_check(self):
c = self.create_fake_client()
@ -108,8 +217,8 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
healthy=True, recoverable=True,
summary="groovy",
**data)
w = web_check_results.CheckResultsRenderer(c, cr)
html = self.render2(w)
w = web_check_results.CheckResultsRendererElement(c, cr)
html = self.render_element(w)
s = self.remove_tags(html)
self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
self.failUnlessIn("Healthy : groovy", s)
@ -120,14 +229,14 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
self.failUnlessIn("Wrong Shares: 0", s)
self.failUnlessIn("Recoverable Versions: 1", s)
self.failUnlessIn("Unrecoverable Versions: 0", s)
self.failUnlessIn("Good Shares (sorted in share order): Share ID Nickname Node ID shareid1 peer-0 00000000 peer-f ffffffff", s)
self.failUnlessIn("Good Shares (sorted in share order): Share ID Nickname Node ID shareid1 peer-0 00000000 peer-f ffffffff", s)
cr = check_results.CheckResults(u, u.get_storage_index(),
healthy=False, recoverable=True,
summary="ungroovy",
**data)
w = web_check_results.CheckResultsRenderer(c, cr)
html = self.render2(w)
w = web_check_results.CheckResultsRendererElement(c, cr)
html = self.render_element(w)
s = self.remove_tags(html)
self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
self.failUnlessIn("Not Healthy! : ungroovy", s)
@ -138,22 +247,23 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
healthy=False, recoverable=False,
summary="rather dead",
**data)
w = web_check_results.CheckResultsRenderer(c, cr)
html = self.render2(w)
w = web_check_results.CheckResultsRendererElement(c, cr)
html = self.render_element(w)
s = self.remove_tags(html)
self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
self.failUnlessIn("Not Recoverable! : rather dead", s)
self.failUnlessIn("Corrupt shares: Share ID Nickname Node ID sh#2 peer-0 00000000", s)
self.failUnlessIn("Corrupt shares: Share ID Nickname Node ID sh#2 peer-0 00000000", s)
html = self.render2(w)
html = self.render_element(w)
s = self.remove_tags(html)
self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated
self.failUnlessIn("Not Recoverable! : rather dead", s)
html = self.render2(w, args={"return_to": ["FOOURL"]})
html = self.render_element(w, args={"return_to": ["FOOURL"]})
self.failUnlessIn('<a href="FOOURL">Return to file/directory.</a>',
html)
w = web_check_results.CheckResultsRenderer(c, cr)
d = self.render_json(w)
def _check_json(jdata):
j = json.loads(jdata)
@ -178,15 +288,15 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
'recoverable': False,
}
self.failUnlessEqual(j["results"], expected)
d.addCallback(_check_json)
d.addCallback(lambda ignored: self.render1(w))
_check_json(d)
w = web_check_results.CheckResultsRendererElement(c, cr)
d = self.render_element(w)
def _check(html):
s = self.remove_tags(html)
self.failUnlessIn("File Check Results for SI=2k6avp", s)
self.failUnlessIn("Not Recoverable! : rather dead", s)
d.addCallback(_check)
return d
_check(html)
def test_check_and_repair(self):
c = self.create_fake_client()
@ -244,8 +354,8 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
crr.post_repair_results = post_cr
crr.repair_attempted = False
w = web_check_results.CheckAndRepairResultsRenderer(c, crr)
html = self.render2(w)
w = web_check_results.CheckAndRepairResultsRendererElement(c, crr)
html = self.render_element(w)
s = self.remove_tags(html)
self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
@ -256,7 +366,7 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
crr.repair_attempted = True
crr.repair_successful = True
html = self.render2(w)
html = self.render_element(w)
s = self.remove_tags(html)
self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
@ -271,7 +381,7 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
summary="better",
**data)
crr.post_repair_results = post_cr
html = self.render2(w)
html = self.render_element(w)
s = self.remove_tags(html)
self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
@ -286,7 +396,7 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
summary="worse",
**data)
crr.post_repair_results = post_cr
html = self.render2(w)
html = self.render_element(w)
s = self.remove_tags(html)
self.failUnlessIn("File Check-And-Repair Results for SI=2k6avp", s)
@ -294,24 +404,218 @@ class WebResultsRendering(unittest.TestCase, WebRenderingMixin):
self.failUnlessIn("Repair unsuccessful", s)
self.failUnlessIn("Post-Repair Checker Results:", s)
d = self.render_json(w)
def _got_json(data):
j = json.loads(data)
self.failUnlessEqual(j["repair-attempted"], True)
self.failUnlessEqual(j["storage-index"],
"2k6avpjga3dho3zsjo6nnkt7n4")
self.failUnlessEqual(j["pre-repair-results"]["summary"], "illing")
self.failUnlessEqual(j["post-repair-results"]["summary"], "worse")
d.addCallback(_got_json)
w = web_check_results.CheckAndRepairResultsRenderer(c, crr)
j = json.loads(self.render_json(w))
self.failUnlessEqual(j["repair-attempted"], True)
self.failUnlessEqual(j["storage-index"],
"2k6avpjga3dho3zsjo6nnkt7n4")
self.failUnlessEqual(j["pre-repair-results"]["summary"], "illing")
self.failUnlessEqual(j["post-repair-results"]["summary"], "worse")
w = web_check_results.CheckAndRepairResultsRenderer(c, None)
j = json.loads(self.render_json(w))
self.failUnlessEqual(j["repair-attempted"], False)
self.failUnlessEqual(j["storage-index"], "")
def test_deep_check_renderer(self):
status = check_results.DeepCheckResults("fake-root-si")
status.add_check(
FakeCheckResults("<unhealthy/unrecoverable>", False, False),
(u"fake", u"unhealthy", u"unrecoverable")
)
status.add_check(
FakeCheckResults("<healthy/recoverable>", True, True),
(u"fake", u"healthy", u"recoverable")
)
status.add_check(
FakeCheckResults("<healthy/unrecoverable>", True, False),
(u"fake", u"healthy", u"unrecoverable")
)
status.add_check(
FakeCheckResults("<unhealthy/unrecoverable>", False, True),
(u"fake", u"unhealthy", u"recoverable")
)
monitor = Monitor()
monitor.set_status(status)
elem = web_check_results.DeepCheckResultsRendererElement(monitor)
doc = self.render_element(elem)
soup = BeautifulSoup(doc, 'html5lib')
assert_soup_has_favicon(self, soup)
assert_soup_has_tag_with_content(
self, soup, u"title",
u"Tahoe-LAFS - Deep Check Results"
)
assert_soup_has_tag_with_content(
self, soup, u"h1",
"Deep-Check Results for root SI="
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Objects Checked: 4"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Objects Healthy: 2"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Objects Unhealthy: 2"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Objects Unrecoverable: 2"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Corrupt Shares: 4"
)
assert_soup_has_tag_with_content(
self, soup, u"h2",
u"Files/Directories That Had Problems:"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"fake/unhealthy/recoverable: fake summary"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"fake/unhealthy/unrecoverable: fake summary"
)
assert_soup_has_tag_with_content(
self, soup, u"h2",
u"Servers on which corrupt shares were found"
)
assert_soup_has_tag_with_content(
self, soup, u"h2",
u"Corrupt Shares"
)
assert_soup_has_tag_with_content(
self, soup, u"h2",
u"All Results"
)
def test_deep_check_and_repair_renderer(self):
status = check_results.DeepCheckAndRepairResults("")
status.add_check_and_repair(
FakeCheckAndRepairResults("attempted/success", True, True),
(u"attempted", u"success")
)
status.add_check_and_repair(
FakeCheckAndRepairResults("attempted/failure", True, False),
(u"attempted", u"failure")
)
status.add_check_and_repair(
FakeCheckAndRepairResults("unattempted/failure", False, False),
(u"unattempted", u"failure")
)
monitor = Monitor()
monitor.set_status(status)
elem = web_check_results.DeepCheckAndRepairResultsRendererElement(monitor)
doc = self.render_element(elem)
soup = BeautifulSoup(doc, 'html5lib')
assert_soup_has_favicon(self, soup)
assert_soup_has_tag_with_content(
self, soup, u"title",
u"Tahoe-LAFS - Deep Check Results"
)
assert_soup_has_tag_with_content(
self, soup, u"h1",
u"Deep-Check-And-Repair Results for root SI="
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Objects Checked: 3"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Objects Healthy (before repair): 0"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Objects Unhealthy (before repair): 3"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Corrupt Shares (before repair): 3"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Repairs Attempted: 2"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Repairs Successful: 1"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
"Repairs Unsuccessful: 1"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Objects Healthy (after repair): 0"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Objects Unhealthy (after repair): 3"
)
assert_soup_has_tag_with_content(
self, soup, u"li",
u"Corrupt Shares (after repair): 3"
)
assert_soup_has_tag_with_content(
self, soup, u"h2",
u"Files/Directories That Had Problems:"
)
assert_soup_has_tag_with_content(
self, soup, u"h2",
u"Files/Directories That Still Have Problems:"
)
assert_soup_has_tag_with_content(
self, soup, u"h2",
u"Servers on which corrupt shares were found"
)
assert_soup_has_tag_with_content(
self, soup, u"h2",
u"Remaining Corrupt Shares"
)
w2 = web_check_results.CheckAndRepairResultsRenderer(c, None)
d.addCallback(lambda ignored: self.render_json(w2))
def _got_lit_results(data):
j = json.loads(data)
self.failUnlessEqual(j["repair-attempted"], False)
self.failUnlessEqual(j["storage-index"], "")
d.addCallback(_got_lit_results)
return d
class BalancingAct(GridTestMixin, unittest.TestCase):
# test for #1115 regarding the 'count-good-share-hosts' metric

View File

@ -1,4 +1,14 @@
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future.utils import PY2, PY3
if PY2:
# We don't import str because omg way too ambiguous in this context.
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, max, min # noqa: F401
from past.builtins import unicode
lumiere_nfc = u"lumi\u00E8re"
Artonwall_nfc = u"\u00C4rtonwall.mp3"
@ -43,8 +53,10 @@ if __name__ == "__main__":
for fname in TEST_FILENAMES:
open(os.path.join(tmpdir, fname), 'w').close()
# Use Unicode API under Windows or MacOS X
if sys.platform in ('win32', 'darwin'):
# On Python 2, listing directories returns unicode under Windows or
# MacOS X if the input is unicode. On Python 3, it always returns
# Unicode.
if PY2 and sys.platform in ('win32', 'darwin'):
dirlist = os.listdir(unicode(tmpdir))
else:
dirlist = os.listdir(tmpdir)
@ -59,20 +71,22 @@ if __name__ == "__main__":
import os, sys, locale
from unittest import skipIf
from twisted.trial import unittest
from twisted.python.filepath import FilePath
from allmydata.test.common_util import ReallyEqualMixin
from allmydata.test.common_py3 import (
ReallyEqualMixin, skip_if_cannot_represent_filename,
)
from allmydata.util import encodingutil, fileutil
from allmydata.util.encodingutil import argv_to_unicode, unicode_to_url, \
unicode_to_output, quote_output, quote_path, quote_local_unicode_path, \
quote_filepath, unicode_platform, listdir_unicode, FilenameEncodingError, \
get_io_encoding, get_filesystem_encoding, to_str, from_utf8_or_none, _reload, \
to_filepath, extend_filepath, unicode_from_filepath, unicode_segments_from
from allmydata.dirnode import normalize
from .common_util import skip_if_cannot_represent_filename
to_filepath, extend_filepath, unicode_from_filepath, unicode_segments_from, \
unicode_to_argv
from twisted.python import usage
@ -90,7 +104,7 @@ class EncodingUtilErrors(ReallyEqualMixin, unittest.TestCase):
mock_stdout.encoding = 'cp65001'
_reload()
self.failUnlessReallyEqual(get_io_encoding(), 'utf-8')
self.assertEqual(get_io_encoding(), 'utf-8')
mock_stdout.encoding = 'koi8-r'
expected = sys.platform == "win32" and 'utf-8' or 'koi8-r'
@ -122,7 +136,7 @@ class EncodingUtilErrors(ReallyEqualMixin, unittest.TestCase):
preferredencoding = None
_reload()
self.failUnlessReallyEqual(get_io_encoding(), 'utf-8')
self.assertEqual(get_io_encoding(), 'utf-8')
def test_argv_to_unicode(self):
encodingutil.io_encoding = 'utf-8'
@ -150,6 +164,7 @@ class EncodingUtilErrors(ReallyEqualMixin, unittest.TestCase):
# The following tests apply only to platforms that don't store filenames as
# Unicode entities on the filesystem.
class EncodingUtilNonUnicodePlatform(unittest.TestCase):
@skipIf(PY3, "Python 3 is always Unicode, regardless of OS.")
def setUp(self):
# Mock sys.platform because unicode_platform() uses it
self.original_platform = sys.platform
@ -211,7 +226,7 @@ class EncodingUtil(ReallyEqualMixin):
self.failUnlessReallyEqual(argv_to_unicode(argv), argu)
def test_unicode_to_url(self):
self.failUnless(unicode_to_url(lumiere_nfc), "lumi\xc3\xa8re")
self.failUnless(unicode_to_url(lumiere_nfc), b"lumi\xc3\xa8re")
def test_unicode_to_output(self):
if 'argv' not in dir(self):
@ -224,7 +239,18 @@ class EncodingUtil(ReallyEqualMixin):
_reload()
self.failUnlessReallyEqual(unicode_to_output(lumiere_nfc), self.argv)
def test_unicode_platform(self):
@skipIf(PY3, "Python 2 only.")
def test_unicode_to_argv_py2(self):
"""unicode_to_argv() converts to bytes on Python 2."""
self.assertEqual(unicode_to_argv("abc"), u"abc".encode(self.io_encoding))
@skipIf(PY2, "Python 3 only.")
def test_unicode_to_argv_py3(self):
"""unicode_to_argv() is noop on Python 3."""
self.assertEqual(unicode_to_argv("abc"), "abc")
@skipIf(PY3, "Python 3 only.")
def test_unicode_platform_py2(self):
matrix = {
'linux2': False,
'linux3': False,
@ -236,6 +262,11 @@ class EncodingUtil(ReallyEqualMixin):
_reload()
self.failUnlessReallyEqual(unicode_platform(), matrix[self.platform])
@skipIf(PY2, "Python 3 isn't Python 2.")
def test_unicode_platform_py3(self):
_reload()
self.failUnlessReallyEqual(unicode_platform(), True)
def test_listdir_unicode(self):
if 'dirlist' not in dir(self):
return
@ -248,7 +279,14 @@ class EncodingUtil(ReallyEqualMixin):
% (self.filesystem_encoding,))
def call_os_listdir(path):
return self.dirlist
if PY2:
return self.dirlist
else:
# Python 3 always lists unicode filenames:
return [d.decode(self.filesystem_encoding) if isinstance(d, bytes)
else d
for d in self.dirlist]
self.patch(os, 'listdir', call_os_listdir)
def call_sys_getfilesystemencoding():
@ -258,7 +296,7 @@ class EncodingUtil(ReallyEqualMixin):
_reload()
filenames = listdir_unicode(u'/dummy')
self.failUnlessEqual(set([normalize(fname) for fname in filenames]),
self.failUnlessEqual(set([encodingutil.normalize(fname) for fname in filenames]),
set(TEST_FILENAMES))
@ -278,12 +316,16 @@ class StdlibUnicode(unittest.TestCase):
fn = lumiere_nfc + u'/' + lumiere_nfc + u'.txt'
open(fn, 'wb').close()
self.failUnless(os.path.exists(fn))
self.failUnless(os.path.exists(os.path.join(os.getcwdu(), fn)))
if PY2:
getcwdu = os.getcwdu
else:
getcwdu = os.getcwd
self.failUnless(os.path.exists(os.path.join(getcwdu(), fn)))
filenames = listdir_unicode(lumiere_nfc)
# We only require that the listing includes a filename that is canonically equivalent
# to lumiere_nfc (on Mac OS X, it will be the NFD equivalent).
self.failUnlessIn(lumiere_nfc + ".txt", set([normalize(fname) for fname in filenames]))
self.failUnlessIn(lumiere_nfc + u".txt", set([encodingutil.normalize(fname) for fname in filenames]))
expanded = fileutil.expanduser(u"~/" + lumiere_nfc)
self.failIfIn(u"~", expanded)
@ -314,59 +356,70 @@ class QuoteOutput(ReallyEqualMixin, unittest.TestCase):
self.failUnlessReallyEqual(quote_output(inp, encoding=enc, quotemarks=False, quote_newlines=quote_newlines), out2)
if out[0:2] == 'b"':
pass
elif isinstance(inp, str):
self.failUnlessReallyEqual(quote_output(unicode(inp), encoding=enc, quote_newlines=quote_newlines), out)
self.failUnlessReallyEqual(quote_output(unicode(inp), encoding=enc, quotemarks=False, quote_newlines=quote_newlines), out2)
elif isinstance(inp, bytes):
try:
unicode_inp = inp.decode("utf-8")
except UnicodeDecodeError:
# Some things decode on Python 2, but not Python 3...
return
self.failUnlessReallyEqual(quote_output(unicode_inp, encoding=enc, quote_newlines=quote_newlines), out)
self.failUnlessReallyEqual(quote_output(unicode_inp, encoding=enc, quotemarks=False, quote_newlines=quote_newlines), out2)
else:
self.failUnlessReallyEqual(quote_output(inp.encode('utf-8'), encoding=enc, quote_newlines=quote_newlines), out)
self.failUnlessReallyEqual(quote_output(inp.encode('utf-8'), encoding=enc, quotemarks=False, quote_newlines=quote_newlines), out2)
try:
bytes_inp = inp.encode('utf-8')
except UnicodeEncodeError:
# Some things encode on Python 2, but not Python 3, e.g.
# surrogates like u"\uDC00\uD800"...
return
self.failUnlessReallyEqual(quote_output(bytes_inp, encoding=enc, quote_newlines=quote_newlines), out)
self.failUnlessReallyEqual(quote_output(bytes_inp, encoding=enc, quotemarks=False, quote_newlines=quote_newlines), out2)
def _test_quote_output_all(self, enc):
def check(inp, out, optional_quotes=False, quote_newlines=None):
self._check(inp, out, enc, optional_quotes, quote_newlines)
# optional single quotes
check("foo", "'foo'", True)
check("\\", "'\\'", True)
check("$\"`", "'$\"`'", True)
check("\n", "'\n'", True, quote_newlines=False)
check(b"foo", b"'foo'", True)
check(b"\\", b"'\\'", True)
check(b"$\"`", b"'$\"`'", True)
check(b"\n", b"'\n'", True, quote_newlines=False)
# mandatory single quotes
check("\"", "'\"'")
check(b"\"", b"'\"'")
# double quotes
check("'", "\"'\"")
check("\n", "\"\\x0a\"", quote_newlines=True)
check("\x00", "\"\\x00\"")
check(b"'", b"\"'\"")
check(b"\n", b"\"\\x0a\"", quote_newlines=True)
check(b"\x00", b"\"\\x00\"")
# invalid Unicode and astral planes
check(u"\uFDD0\uFDEF", "\"\\ufdd0\\ufdef\"")
check(u"\uDC00\uD800", "\"\\udc00\\ud800\"")
check(u"\uDC00\uD800\uDC00", "\"\\udc00\\U00010000\"")
check(u"\uD800\uDC00", "\"\\U00010000\"")
check(u"\uD800\uDC01", "\"\\U00010001\"")
check(u"\uD801\uDC00", "\"\\U00010400\"")
check(u"\uDBFF\uDFFF", "\"\\U0010ffff\"")
check(u"'\uDBFF\uDFFF", "\"'\\U0010ffff\"")
check(u"\"\uDBFF\uDFFF", "\"\\\"\\U0010ffff\"")
check(u"\uFDD0\uFDEF", b"\"\\ufdd0\\ufdef\"")
check(u"\uDC00\uD800", b"\"\\udc00\\ud800\"")
check(u"\uDC00\uD800\uDC00", b"\"\\udc00\\U00010000\"")
check(u"\uD800\uDC00", b"\"\\U00010000\"")
check(u"\uD800\uDC01", b"\"\\U00010001\"")
check(u"\uD801\uDC00", b"\"\\U00010400\"")
check(u"\uDBFF\uDFFF", b"\"\\U0010ffff\"")
check(u"'\uDBFF\uDFFF", b"\"'\\U0010ffff\"")
check(u"\"\uDBFF\uDFFF", b"\"\\\"\\U0010ffff\"")
# invalid UTF-8
check("\xFF", "b\"\\xff\"")
check("\x00\"$\\`\x80\xFF", "b\"\\x00\\\"\\$\\\\\\`\\x80\\xff\"")
check(b"\xFF", b"b\"\\xff\"")
check(b"\x00\"$\\`\x80\xFF", b"b\"\\x00\\\"\\$\\\\\\`\\x80\\xff\"")
def test_quote_output_ascii(self, enc='ascii'):
def check(inp, out, optional_quotes=False, quote_newlines=None):
self._check(inp, out, enc, optional_quotes, quote_newlines)
self._test_quote_output_all(enc)
check(u"\u00D7", "\"\\xd7\"")
check(u"'\u00D7", "\"'\\xd7\"")
check(u"\"\u00D7", "\"\\\"\\xd7\"")
check(u"\u2621", "\"\\u2621\"")
check(u"'\u2621", "\"'\\u2621\"")
check(u"\"\u2621", "\"\\\"\\u2621\"")
check(u"\n", "'\n'", True, quote_newlines=False)
check(u"\n", "\"\\x0a\"", quote_newlines=True)
check(u"\u00D7", b"\"\\xd7\"")
check(u"'\u00D7", b"\"'\\xd7\"")
check(u"\"\u00D7", b"\"\\\"\\xd7\"")
check(u"\u2621", b"\"\\u2621\"")
check(u"'\u2621", b"\"'\\u2621\"")
check(u"\"\u2621", b"\"\\\"\\u2621\"")
check(u"\n", b"'\n'", True, quote_newlines=False)
check(u"\n", b"\"\\x0a\"", quote_newlines=True)
def test_quote_output_latin1(self, enc='latin1'):
def check(inp, out, optional_quotes=False, quote_newlines=None):
@ -411,43 +464,43 @@ def win32_other(win32, other):
class QuotePaths(ReallyEqualMixin, unittest.TestCase):
def test_quote_path(self):
self.failUnlessReallyEqual(quote_path([u'foo', u'bar']), "'foo/bar'")
self.failUnlessReallyEqual(quote_path([u'foo', u'bar'], quotemarks=True), "'foo/bar'")
self.failUnlessReallyEqual(quote_path([u'foo', u'bar'], quotemarks=False), "foo/bar")
self.failUnlessReallyEqual(quote_path([u'foo', u'\nbar']), '"foo/\\x0abar"')
self.failUnlessReallyEqual(quote_path([u'foo', u'\nbar'], quotemarks=True), '"foo/\\x0abar"')
self.failUnlessReallyEqual(quote_path([u'foo', u'\nbar'], quotemarks=False), '"foo/\\x0abar"')
self.failUnlessReallyEqual(quote_path([u'foo', u'bar']), b"'foo/bar'")
self.failUnlessReallyEqual(quote_path([u'foo', u'bar'], quotemarks=True), b"'foo/bar'")
self.failUnlessReallyEqual(quote_path([u'foo', u'bar'], quotemarks=False), b"foo/bar")
self.failUnlessReallyEqual(quote_path([u'foo', u'\nbar']), b'"foo/\\x0abar"')
self.failUnlessReallyEqual(quote_path([u'foo', u'\nbar'], quotemarks=True), b'"foo/\\x0abar"')
self.failUnlessReallyEqual(quote_path([u'foo', u'\nbar'], quotemarks=False), b'"foo/\\x0abar"')
self.failUnlessReallyEqual(quote_local_unicode_path(u"\\\\?\\C:\\foo"),
win32_other("'C:\\foo'", "'\\\\?\\C:\\foo'"))
win32_other(b"'C:\\foo'", b"'\\\\?\\C:\\foo'"))
self.failUnlessReallyEqual(quote_local_unicode_path(u"\\\\?\\C:\\foo", quotemarks=True),
win32_other("'C:\\foo'", "'\\\\?\\C:\\foo'"))
win32_other(b"'C:\\foo'", b"'\\\\?\\C:\\foo'"))
self.failUnlessReallyEqual(quote_local_unicode_path(u"\\\\?\\C:\\foo", quotemarks=False),
win32_other("C:\\foo", "\\\\?\\C:\\foo"))
win32_other(b"C:\\foo", b"\\\\?\\C:\\foo"))
self.failUnlessReallyEqual(quote_local_unicode_path(u"\\\\?\\UNC\\foo\\bar"),
win32_other("'\\\\foo\\bar'", "'\\\\?\\UNC\\foo\\bar'"))
win32_other(b"'\\\\foo\\bar'", b"'\\\\?\\UNC\\foo\\bar'"))
self.failUnlessReallyEqual(quote_local_unicode_path(u"\\\\?\\UNC\\foo\\bar", quotemarks=True),
win32_other("'\\\\foo\\bar'", "'\\\\?\\UNC\\foo\\bar'"))
win32_other(b"'\\\\foo\\bar'", b"'\\\\?\\UNC\\foo\\bar'"))
self.failUnlessReallyEqual(quote_local_unicode_path(u"\\\\?\\UNC\\foo\\bar", quotemarks=False),
win32_other("\\\\foo\\bar", "\\\\?\\UNC\\foo\\bar"))
win32_other(b"\\\\foo\\bar", b"\\\\?\\UNC\\foo\\bar"))
def test_quote_filepath(self):
foo_bar_fp = FilePath(win32_other(u'C:\\foo\\bar', u'/foo/bar'))
self.failUnlessReallyEqual(quote_filepath(foo_bar_fp),
win32_other("'C:\\foo\\bar'", "'/foo/bar'"))
win32_other(b"'C:\\foo\\bar'", b"'/foo/bar'"))
self.failUnlessReallyEqual(quote_filepath(foo_bar_fp, quotemarks=True),
win32_other("'C:\\foo\\bar'", "'/foo/bar'"))
win32_other(b"'C:\\foo\\bar'", b"'/foo/bar'"))
self.failUnlessReallyEqual(quote_filepath(foo_bar_fp, quotemarks=False),
win32_other("C:\\foo\\bar", "/foo/bar"))
win32_other(b"C:\\foo\\bar", b"/foo/bar"))
if sys.platform == "win32":
foo_longfp = FilePath(u'\\\\?\\C:\\foo')
self.failUnlessReallyEqual(quote_filepath(foo_longfp),
"'C:\\foo'")
b"'C:\\foo'")
self.failUnlessReallyEqual(quote_filepath(foo_longfp, quotemarks=True),
"'C:\\foo'")
b"'C:\\foo'")
self.failUnlessReallyEqual(quote_filepath(foo_longfp, quotemarks=False),
"C:\\foo")
b"C:\\foo")
class FilePaths(ReallyEqualMixin, unittest.TestCase):
@ -501,23 +554,23 @@ class FilePaths(ReallyEqualMixin, unittest.TestCase):
class UbuntuKarmicUTF8(EncodingUtil, unittest.TestCase):
uname = 'Linux korn 2.6.31-14-generic #48-Ubuntu SMP Fri Oct 16 14:05:01 UTC 2009 x86_64'
argv = 'lumi\xc3\xa8re'
argv = b'lumi\xc3\xa8re'
platform = 'linux2'
filesystem_encoding = 'UTF-8'
io_encoding = 'UTF-8'
dirlist = ['test_file', '\xc3\x84rtonwall.mp3', 'Blah blah.txt']
dirlist = [b'test_file', b'\xc3\x84rtonwall.mp3', b'Blah blah.txt']
class UbuntuKarmicLatin1(EncodingUtil, unittest.TestCase):
uname = 'Linux korn 2.6.31-14-generic #48-Ubuntu SMP Fri Oct 16 14:05:01 UTC 2009 x86_64'
argv = 'lumi\xe8re'
argv = b'lumi\xe8re'
platform = 'linux2'
filesystem_encoding = 'ISO-8859-1'
io_encoding = 'ISO-8859-1'
dirlist = ['test_file', 'Blah blah.txt', '\xc4rtonwall.mp3']
dirlist = [b'test_file', b'Blah blah.txt', b'\xc4rtonwall.mp3']
class Windows(EncodingUtil, unittest.TestCase):
uname = 'Windows XP 5.1.2600 x86 x86 Family 15 Model 75 Step ping 2, AuthenticAMD'
argv = 'lumi\xc3\xa8re'
argv = b'lumi\xc3\xa8re'
platform = 'win32'
filesystem_encoding = 'mbcs'
io_encoding = 'utf-8'
@ -525,7 +578,7 @@ class Windows(EncodingUtil, unittest.TestCase):
class MacOSXLeopard(EncodingUtil, unittest.TestCase):
uname = 'Darwin g5.local 9.8.0 Darwin Kernel Version 9.8.0: Wed Jul 15 16:57:01 PDT 2009; root:xnu-1228.15.4~1/RELEASE_PPC Power Macintosh powerpc'
output = 'lumi\xc3\xa8re'
output = b'lumi\xc3\xa8re'
platform = 'darwin'
filesystem_encoding = 'utf-8'
io_encoding = 'UTF-8'
@ -548,14 +601,14 @@ class OpenBSD(EncodingUtil, unittest.TestCase):
class TestToFromStr(ReallyEqualMixin, unittest.TestCase):
def test_to_str(self):
self.failUnlessReallyEqual(to_str("foo"), "foo")
self.failUnlessReallyEqual(to_str("lumi\xc3\xa8re"), "lumi\xc3\xa8re")
self.failUnlessReallyEqual(to_str("\xFF"), "\xFF") # passes through invalid UTF-8 -- is this what we want?
self.failUnlessReallyEqual(to_str(u"lumi\u00E8re"), "lumi\xc3\xa8re")
self.failUnlessReallyEqual(to_str(b"foo"), b"foo")
self.failUnlessReallyEqual(to_str(b"lumi\xc3\xa8re"), b"lumi\xc3\xa8re")
self.failUnlessReallyEqual(to_str(b"\xFF"), b"\xFF") # passes through invalid UTF-8 -- is this what we want?
self.failUnlessReallyEqual(to_str(u"lumi\u00E8re"), b"lumi\xc3\xa8re")
self.failUnlessReallyEqual(to_str(None), None)
def test_from_utf8_or_none(self):
self.failUnlessRaises(AssertionError, from_utf8_or_none, u"foo")
self.failUnlessReallyEqual(from_utf8_or_none("lumi\xc3\xa8re"), u"lumi\u00E8re")
self.failUnlessReallyEqual(from_utf8_or_none(b"lumi\xc3\xa8re"), u"lumi\u00E8re")
self.failUnlessReallyEqual(from_utf8_or_none(None), None)
self.failUnlessRaises(UnicodeDecodeError, from_utf8_or_none, "\xFF")
self.failUnlessRaises(UnicodeDecodeError, from_utf8_or_none, b"\xFF")

View File

@ -1,5 +1,15 @@
from __future__ import print_function
"""
Ported to Python3.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future.utils import PY2
if PY2:
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
import six
import os, time, sys
import yaml
@ -19,7 +29,7 @@ if six.PY3:
class IDLib(unittest.TestCase):
def test_nodeid_b2a(self):
self.failUnlessEqual(idlib.nodeid_b2a("\x00"*20), "a"*32)
self.failUnlessEqual(idlib.nodeid_b2a(b"\x00"*20), "a"*32)
class MyList(list):
@ -85,10 +95,10 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
basedir = "util/FileUtil/test_write_atomically"
fileutil.make_dirs(basedir)
fn = os.path.join(basedir, "here")
fileutil.write_atomically(fn, "one")
self.failUnlessEqual(fileutil.read(fn), "one")
fileutil.write_atomically(fn, "two", mode="") # non-binary
self.failUnlessEqual(fileutil.read(fn), "two")
fileutil.write_atomically(fn, b"one", "b")
self.failUnlessEqual(fileutil.read(fn), b"one")
fileutil.write_atomically(fn, u"two", mode="") # non-binary
self.failUnlessEqual(fileutil.read(fn), b"two")
def test_rename(self):
basedir = "util/FileUtil/test_rename"
@ -111,20 +121,20 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
self.failUnlessRaises(OSError, fileutil.rename_no_overwrite, source_path, dest_path)
# when only dest exists
fileutil.write(dest_path, "dest")
fileutil.write(dest_path, b"dest")
self.failUnlessRaises(OSError, fileutil.rename_no_overwrite, source_path, dest_path)
self.failUnlessEqual(fileutil.read(dest_path), "dest")
self.failUnlessEqual(fileutil.read(dest_path), b"dest")
# when both exist
fileutil.write(source_path, "source")
fileutil.write(source_path, b"source")
self.failUnlessRaises(OSError, fileutil.rename_no_overwrite, source_path, dest_path)
self.failUnlessEqual(fileutil.read(source_path), "source")
self.failUnlessEqual(fileutil.read(dest_path), "dest")
self.failUnlessEqual(fileutil.read(source_path), b"source")
self.failUnlessEqual(fileutil.read(dest_path), b"dest")
# when only source exists
os.remove(dest_path)
fileutil.rename_no_overwrite(source_path, dest_path)
self.failUnlessEqual(fileutil.read(dest_path), "source")
self.failUnlessEqual(fileutil.read(dest_path), b"source")
self.failIf(os.path.exists(source_path))
def test_replace_file(self):
@ -138,21 +148,21 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
self.failUnlessRaises(fileutil.ConflictError, fileutil.replace_file, replaced_path, replacement_path)
# when only replaced exists
fileutil.write(replaced_path, "foo")
fileutil.write(replaced_path, b"foo")
self.failUnlessRaises(fileutil.ConflictError, fileutil.replace_file, replaced_path, replacement_path)
self.failUnlessEqual(fileutil.read(replaced_path), "foo")
self.failUnlessEqual(fileutil.read(replaced_path), b"foo")
# when both replaced and replacement exist
fileutil.write(replacement_path, "bar")
fileutil.write(replacement_path, b"bar")
fileutil.replace_file(replaced_path, replacement_path)
self.failUnlessEqual(fileutil.read(replaced_path), "bar")
self.failUnlessEqual(fileutil.read(replaced_path), b"bar")
self.failIf(os.path.exists(replacement_path))
# when only replacement exists
os.remove(replaced_path)
fileutil.write(replacement_path, "bar")
fileutil.write(replacement_path, b"bar")
fileutil.replace_file(replaced_path, replacement_path)
self.failUnlessEqual(fileutil.read(replaced_path), "bar")
self.failUnlessEqual(fileutil.read(replaced_path), b"bar")
self.failIf(os.path.exists(replacement_path))
def test_du(self):
@ -170,13 +180,15 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
self.failUnlessEqual(10+11+12+13, used)
def test_abspath_expanduser_unicode(self):
self.failUnlessRaises(AssertionError, fileutil.abspath_expanduser_unicode, "bytestring")
self.failUnlessRaises(AssertionError, fileutil.abspath_expanduser_unicode, b"bytestring")
saved_cwd = os.path.normpath(os.getcwdu())
saved_cwd = os.path.normpath(os.getcwd())
if PY2:
saved_cwd = saved_cwd.decode("utf8")
abspath_cwd = fileutil.abspath_expanduser_unicode(u".")
abspath_cwd_notlong = fileutil.abspath_expanduser_unicode(u".", long_path=False)
self.failUnless(isinstance(saved_cwd, unicode), saved_cwd)
self.failUnless(isinstance(abspath_cwd, unicode), abspath_cwd)
self.failUnless(isinstance(saved_cwd, str), saved_cwd)
self.failUnless(isinstance(abspath_cwd, str), abspath_cwd)
if sys.platform == "win32":
self.failUnlessReallyEqual(abspath_cwd, fileutil.to_windows_long_path(saved_cwd))
else:
@ -237,10 +249,10 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
os.chdir(cwd)
for upath in (u'', u'fuu', u'f\xf9\xf9', u'/fuu', u'U:\\', u'~'):
uabspath = fileutil.abspath_expanduser_unicode(upath)
self.failUnless(isinstance(uabspath, unicode), uabspath)
self.failUnless(isinstance(uabspath, str), uabspath)
uabspath_notlong = fileutil.abspath_expanduser_unicode(upath, long_path=False)
self.failUnless(isinstance(uabspath_notlong, unicode), uabspath_notlong)
self.failUnless(isinstance(uabspath_notlong, str), uabspath_notlong)
finally:
os.chdir(saved_cwd)
@ -293,9 +305,9 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
fileutil.remove(long_path)
self.addCleanup(_cleanup)
fileutil.write(long_path, "test")
fileutil.write(long_path, b"test")
self.failUnless(os.path.exists(long_path))
self.failUnlessEqual(fileutil.read(long_path), "test")
self.failUnlessEqual(fileutil.read(long_path), b"test")
_cleanup()
self.failIf(os.path.exists(long_path))
@ -353,7 +365,7 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
# create a file
f = os.path.join(basedir, "1.txt")
fileutil.write(f, "a"*10)
fileutil.write(f, b"a"*10)
fileinfo = fileutil.get_pathinfo(f)
self.failUnlessTrue(fileinfo.isfile)
self.failUnlessTrue(fileinfo.exists)
@ -381,7 +393,7 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
fileutil.make_dirs(basedir)
f = os.path.join(basedir, "1.txt")
fileutil.write(f, "a"*10)
fileutil.write(f, b"a"*10)
# create a symlink pointing to 1.txt
slname = os.path.join(basedir, "linkto1.txt")
@ -394,7 +406,7 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
def test_encrypted_tempfile(self):
f = EncryptedTemporaryFile()
f.write("foobar")
f.write(b"foobar")
f.close()
@ -409,7 +421,7 @@ class PollMixinTests(unittest.TestCase):
def test_PollMixin_False_then_True(self):
i = iter([False, True])
d = self.pm.poll(check_f=i.next,
d = self.pm.poll(check_f=lambda: next(i),
pollinterval=0.1)
return d
@ -454,6 +466,6 @@ class YAML(unittest.TestCase):
def test_convert(self):
data = yaml.safe_dump(["str", u"unicode", u"\u1234nicode"])
back = yamlutil.safe_load(data)
self.failUnlessEqual(type(back[0]), unicode)
self.failUnlessEqual(type(back[1]), unicode)
self.failUnlessEqual(type(back[2]), unicode)
self.assertIsInstance(back[0], str)
self.assertIsInstance(back[1], str)
self.assertIsInstance(back[2], str)

View File

@ -5,8 +5,6 @@ unknown_rwcap = u"lafs://from_the_future_rw_\u263A".encode('utf-8')
unknown_rocap = u"ro.lafs://readonly_from_the_future_ro_\u263A".encode('utf-8')
unknown_immcap = u"imm.lafs://immutable_from_the_future_imm_\u263A".encode('utf-8')
FAVICON_MARKUP = '<link href="/icon.png" rel="shortcut icon" />'
def assert_soup_has_favicon(testcase, soup):
"""

View File

@ -21,7 +21,12 @@ from allmydata.mutable import publish
from .. import common_util as testutil
from ..common import WebErrorMixin, ShouldFailMixin
from ..no_network import GridTestMixin
from .common import unknown_rwcap, unknown_rocap, unknown_immcap, FAVICON_MARKUP
from .common import (
assert_soup_has_favicon,
unknown_immcap,
unknown_rocap,
unknown_rwcap,
)
DIR_HTML_TAG = '<html lang="en">'
@ -92,7 +97,9 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi
def _got_html_good(res):
self.failUnlessIn("Healthy", res)
self.failIfIn("Not Healthy", res)
self.failUnlessIn(FAVICON_MARKUP, res)
soup = BeautifulSoup(res, 'html5lib')
assert_soup_has_favicon(self, soup)
d.addCallback(_got_html_good)
d.addCallback(self.CHECK, "good", "t=check&return_to=somewhere")
def _got_html_good_return_to(res):
@ -235,7 +242,9 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi
self.failUnlessIn("Healthy", res)
self.failIfIn("Not Healthy", res)
self.failUnlessIn("No repair necessary", res)
self.failUnlessIn(FAVICON_MARKUP, res)
soup = BeautifulSoup(res, 'html5lib')
assert_soup_has_favicon(self, soup)
d.addCallback(_got_html_good)
d.addCallback(self.CHECK, "sick", "t=check&repair=true")

View File

@ -54,6 +54,9 @@ from .common import (
assert_soup_has_tag_with_attributes,
assert_soup_has_tag_with_content,
assert_soup_has_tag_with_attributes_and_content,
unknown_rwcap,
unknown_rocap,
unknown_immcap,
)
from allmydata.interfaces import IMutableFileNode, SDMF_VERSION, MDMF_VERSION
@ -65,7 +68,6 @@ from ..common_web import (
Error,
)
from allmydata.client import _Client, SecretHolder
from .common import unknown_rwcap, unknown_rocap, unknown_immcap, FAVICON_MARKUP
# create a fake uploader/downloader, and a couple of fake dirnodes, then
# create a webserver that works against them
@ -3262,13 +3264,15 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi
res = yield self.get_operation_results(None, "123", "html")
self.failUnlessIn("Objects Checked: <span>11</span>", res)
self.failUnlessIn("Objects Healthy: <span>11</span>", res)
self.failUnlessIn(FAVICON_MARKUP, res)
soup = BeautifulSoup(res, 'html5lib')
assert_soup_has_favicon(self, soup)
res = yield self.GET("/operations/123/")
# should be the same as without the slash
self.failUnlessIn("Objects Checked: <span>11</span>", res)
self.failUnlessIn("Objects Healthy: <span>11</span>", res)
self.failUnlessIn(FAVICON_MARKUP, res)
soup = BeautifulSoup(res, 'html5lib')
assert_soup_has_favicon(self, soup)
yield self.shouldFail2(error.Error, "one", "404 Not Found",
"No detailed results for SI bogus",
@ -3318,7 +3322,8 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi
self.failUnlessIn("Objects Unhealthy (after repair): <span>0</span>", res)
self.failUnlessIn("Corrupt Shares (after repair): <span>0</span>", res)
self.failUnlessIn(FAVICON_MARKUP, res)
soup = BeautifulSoup(res, 'html5lib')
assert_soup_has_favicon(self, soup)
d.addCallback(_check_html)
return d

View File

@ -1,6 +1,15 @@
"""
Track the port to Python 3.
The two easiest ways to run the part of the test suite which is expected to
pass on Python 3 are::
$ tox -e py36
and::
$ trial allmydata.test.python3_tests
This module has been ported to Python 3.
"""
@ -30,7 +39,9 @@ PORTED_MODULES = [
"allmydata.util.base32",
"allmydata.util.base62",
"allmydata.util.deferredutil",
"allmydata.util.fileutil",
"allmydata.util.dictutil",
"allmydata.util.encodingutil",
"allmydata.util.gcutil",
"allmydata.util.happinessutil",
"allmydata.util.hashutil",
@ -55,6 +66,7 @@ PORTED_TEST_MODULES = [
"allmydata.test.test_crypto",
"allmydata.test.test_deferredutil",
"allmydata.test.test_dictutil",
"allmydata.test.test_encodingutil",
"allmydata.test.test_happiness",
"allmydata.test.test_hashtree",
"allmydata.test.test_hashutil",
@ -68,10 +80,6 @@ PORTED_TEST_MODULES = [
"allmydata.test.test_spans",
"allmydata.test.test_statistics",
"allmydata.test.test_time_format",
"allmydata.test.test_util",
"allmydata.test.test_version",
]
if __name__ == '__main__':
from subprocess import check_call
check_call(["trial"] + PORTED_TEST_MODULES)

View File

@ -1,9 +1,26 @@
"""
Functions used to convert inputs from whatever encoding used in the system to
unicode and back.
Ported to Python 3.
Once Python 2 support is dropped, most of this module will obsolete, since
Unicode is the default everywhere in Python 3.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future.utils import PY2, PY3, native_str
if PY2:
# We omit str() because that seems too tricky to get right.
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, max, min # noqa: F401
from past.builtins import unicode
import sys, os, re, locale
import unicodedata
from allmydata.util.assertutil import precondition, _assert
from twisted.python import usage
@ -62,13 +79,14 @@ def _reload():
check_encoding(io_encoding)
is_unicode_platform = sys.platform in ["win32", "darwin"]
is_unicode_platform = PY3 or sys.platform in ["win32", "darwin"]
# Despite the Unicode-mode FilePath support added to Twisted in
# <https://twistedmatrix.com/trac/ticket/7805>, we can't yet use
# Unicode-mode FilePaths with INotify on non-Windows platforms
# due to <https://twistedmatrix.com/trac/ticket/7928>.
use_unicode_filepath = sys.platform == "win32"
# due to <https://twistedmatrix.com/trac/ticket/7928>. Supposedly
# 7928 is fixed, though...
use_unicode_filepath = PY3 or sys.platform == "win32"
_reload()
@ -89,7 +107,10 @@ def argv_to_unicode(s):
"""
Decode given argv element to unicode. If this fails, raise a UsageError.
"""
precondition(isinstance(s, str), s)
if isinstance(s, unicode):
return s
precondition(isinstance(s, bytes), s)
try:
return unicode(s, io_encoding)
@ -114,18 +135,22 @@ def unicode_to_argv(s, mangle=False):
If the argument is to be passed to a different process, then the 'mangle' argument
should be true; on Windows, this uses a mangled encoding that will be reversed by
code in runner.py.
On Python 3, just return the string unchanged, since argv is unicode.
"""
precondition(isinstance(s, unicode), s)
if PY3:
return s
if mangle and sys.platform == "win32":
# This must be the same as 'mangle' in bin/tahoe-script.template.
return str(re.sub(u'[^\\x20-\\x7F]', lambda m: u'\x7F%x;' % (ord(m.group(0)),), s))
return bytes(re.sub(u'[^\\x20-\\x7F]', lambda m: u'\x7F%x;' % (ord(m.group(0)),), s), io_encoding)
else:
return s.encode(io_encoding)
def unicode_to_url(s):
"""
Encode an unicode object used in an URL.
Encode an unicode object used in an URL to bytes.
"""
# According to RFC 2718, non-ascii characters in URLs must be UTF-8 encoded.
@ -134,19 +159,19 @@ def unicode_to_url(s):
#precondition(isinstance(s, unicode), s)
#return s.encode('utf-8')
def to_str(s):
if s is None or isinstance(s, str):
def to_str(s): # TODO rename to to_bytes
if s is None or isinstance(s, bytes):
return s
return s.encode('utf-8')
def from_utf8_or_none(s):
precondition(isinstance(s, (NoneType, str)), s)
precondition(isinstance(s, bytes) or s is None, s)
if s is None:
return s
return s.decode('utf-8')
PRINTABLE_ASCII = re.compile(r'^[\n\r\x20-\x7E]*$', re.DOTALL)
PRINTABLE_8BIT = re.compile(r'^[\n\r\x20-\x7E\x80-\xFF]*$', re.DOTALL)
PRINTABLE_ASCII = re.compile(br'^[\n\r\x20-\x7E]*$', re.DOTALL)
PRINTABLE_8BIT = re.compile(br'^[\n\r\x20-\x7E\x80-\xFF]*$', re.DOTALL)
def is_printable_ascii(s):
return PRINTABLE_ASCII.search(s) is not None
@ -160,14 +185,14 @@ def unicode_to_output(s):
try:
out = s.encode(io_encoding)
except (UnicodeEncodeError, UnicodeDecodeError):
raise UnicodeEncodeError(io_encoding, s, 0, 0,
"A string could not be encoded as %s for output to the terminal:\n%r" %
(io_encoding, repr(s)))
raise UnicodeEncodeError(native_str(io_encoding), s, 0, 0,
native_str("A string could not be encoded as %s for output to the terminal:\n%r" %
(io_encoding, repr(s))))
if PRINTABLE_8BIT.search(out) is None:
raise UnicodeEncodeError(io_encoding, s, 0, 0,
"A string encoded as %s for output to the terminal contained unsafe bytes:\n%r" %
(io_encoding, repr(s)))
raise UnicodeEncodeError(native_str(io_encoding), s, 0, 0,
native_str("A string encoded as %s for output to the terminal contained unsafe bytes:\n%r" %
(io_encoding, repr(s))))
return out
@ -188,14 +213,17 @@ def _unicode_escape(m, quote_newlines):
else:
return u'\\x%02x' % (codepoint,)
def _str_escape(m, quote_newlines):
def _str_escape(m, quote_newlines): # TODO rename to _bytes_escape
"""
Takes a re match on bytes, the result is escaped bytes of group(0).
"""
c = m.group(0)
if c == '"' or c == '$' or c == '`' or c == '\\':
return '\\' + c
elif c == '\n' and not quote_newlines:
if c == b'"' or c == b'$' or c == b'`' or c == b'\\':
return b'\\' + c
elif c == b'\n' and not quote_newlines:
return c
else:
return '\\x%02x' % (ord(c),)
return b'\\x%02x' % (ord(c),)
MUST_DOUBLE_QUOTE_NL = re.compile(u'[^\\x20-\\x26\\x28-\\x7E\u00A0-\uD7FF\uE000-\uFDCF\uFDF0-\uFFFC]', re.DOTALL)
MUST_DOUBLE_QUOTE = re.compile(u'[^\\n\\x20-\\x26\\x28-\\x7E\u00A0-\uD7FF\uE000-\uFDCF\uFDF0-\uFFFC]', re.DOTALL)
@ -205,7 +233,7 @@ ESCAPABLE_UNICODE = re.compile(u'([\uD800-\uDBFF][\uDC00-\uDFFF])|' # valid sur
u'[^ !#\\x25-\\x5B\\x5D-\\x5F\\x61-\\x7E\u00A0-\uD7FF\uE000-\uFDCF\uFDF0-\uFFFC]',
re.DOTALL)
ESCAPABLE_8BIT = re.compile( r'[^ !#\x25-\x5B\x5D-\x5F\x61-\x7E]', re.DOTALL)
ESCAPABLE_8BIT = re.compile( br'[^ !#\x25-\x5B\x5D-\x5F\x61-\x7E]', re.DOTALL)
def quote_output(s, quotemarks=True, quote_newlines=None, encoding=None):
"""
@ -221,32 +249,32 @@ def quote_output(s, quotemarks=True, quote_newlines=None, encoding=None):
If not explicitly given, quote_newlines is True when quotemarks is True.
"""
precondition(isinstance(s, (str, unicode)), s)
precondition(isinstance(s, (bytes, unicode)), s)
if quote_newlines is None:
quote_newlines = quotemarks
if isinstance(s, str):
if isinstance(s, bytes):
try:
s = s.decode('utf-8')
except UnicodeDecodeError:
return 'b"%s"' % (ESCAPABLE_8BIT.sub(lambda m: _str_escape(m, quote_newlines), s),)
return b'b"%s"' % (ESCAPABLE_8BIT.sub(lambda m: _str_escape(m, quote_newlines), s),)
must_double_quote = quote_newlines and MUST_DOUBLE_QUOTE_NL or MUST_DOUBLE_QUOTE
if must_double_quote.search(s) is None:
try:
out = s.encode(encoding or io_encoding)
if quotemarks or out.startswith('"'):
return "'%s'" % (out,)
if quotemarks or out.startswith(b'"'):
return b"'%s'" % (out,)
else:
return out
except (UnicodeDecodeError, UnicodeEncodeError):
pass
escaped = ESCAPABLE_UNICODE.sub(lambda m: _unicode_escape(m, quote_newlines), s)
return '"%s"' % (escaped.encode(encoding or io_encoding, 'backslashreplace'),)
return b'"%s"' % (escaped.encode(encoding or io_encoding, 'backslashreplace'),)
def quote_path(path, quotemarks=True):
return quote_output("/".join(map(to_str, path)), quotemarks=quotemarks, quote_newlines=True)
return quote_output(b"/".join(map(to_str, path)), quotemarks=quotemarks, quote_newlines=True)
def quote_local_unicode_path(path, quotemarks=True):
precondition(isinstance(path, unicode), path)
@ -275,7 +303,7 @@ def extend_filepath(fp, segments):
return fp
def to_filepath(path):
precondition(isinstance(path, unicode if use_unicode_filepath else basestring),
precondition(isinstance(path, unicode if use_unicode_filepath else (bytes, unicode)),
path=path)
if isinstance(path, unicode) and not use_unicode_filepath:
@ -290,7 +318,7 @@ def to_filepath(path):
return FilePath(path)
def _decode(s):
precondition(isinstance(s, basestring), s=s)
precondition(isinstance(s, (bytes, unicode)), s=s)
if isinstance(s, bytes):
return s.decode(filesystem_encoding)
@ -356,3 +384,9 @@ def listdir_unicode(path):
def listdir_filepath(fp):
return listdir_unicode(unicode_from_filepath(fp))
# 'x' at the end of a variable name indicates that it holds a Unicode string that may not
# be NFC-normalized.
def normalize(namex):
return unicodedata.normalize('NFC', namex)

View File

@ -1,9 +1,19 @@
from __future__ import print_function
"""
Ported to Python3.
Futz with files like a pro.
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from future.utils import PY2
if PY2:
# open is not here because we want to use native strings on Py2
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
import sys, os, stat, tempfile, time, binascii
import six
from collections import namedtuple
@ -253,6 +263,9 @@ def move_into_place(source, dest):
os.rename(source, dest)
def write_atomically(target, contents, mode="b"):
assert (
isinstance(contents, bytes) and "b" in mode or
isinstance(contents, str) and "t" in mode or mode == ""), (type(contents), mode)
with open(target+".tmp", "w"+mode) as f:
f.write(contents)
move_into_place(target+".tmp", target)
@ -277,7 +290,7 @@ def put_file(path, inf):
outf.write(data)
def precondition_abspath(path):
if not isinstance(path, unicode):
if not isinstance(path, str):
raise AssertionError("an abspath must be a Unicode string")
if sys.platform == "win32":
@ -309,7 +322,7 @@ def abspath_expanduser_unicode(path, base=None, long_path=True):
abspath_expanduser_unicode.
On Windows, the result will be a long path unless long_path is given as False.
"""
if not isinstance(path, unicode):
if not isinstance(path, str):
raise AssertionError("paths must be Unicode strings")
if base is not None and long_path:
precondition_abspath(base)
@ -330,7 +343,10 @@ def abspath_expanduser_unicode(path, base=None, long_path=True):
if not os.path.isabs(path):
if base is None:
path = os.path.join(os.getcwdu(), path)
cwd = os.getcwd()
if PY2:
cwd = cwd.decode('utf8')
path = os.path.join(cwd, path)
else:
path = os.path.join(base, path)
@ -415,7 +431,7 @@ ERROR_ENVVAR_NOT_FOUND = 203
def windows_getenv(name):
# Based on <http://stackoverflow.com/questions/2608200/problems-with-umlauts-in-python-appdata-environvent-variable/2608368#2608368>,
# with improved error handling. Returns None if there is no enivronment variable of the given name.
if not isinstance(name, unicode):
if not isinstance(name, str):
raise AssertionError("name must be Unicode")
n = GetEnvironmentVariableW(name, None, 0)

View File

@ -20,10 +20,9 @@ from future.utils import PY2
if PY2:
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
from functools import reduce
from allmydata.util.mathutil import round_sigfigs
import math
from functools import reduce
import sys
def pr_file_loss(p_list, k):

View File

@ -1,4 +1,4 @@
<html xmlns:n="http://nevow.com/ns/nevow/0.1">
<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">
<head>
<title>Tahoe-LAFS - Check Results</title>
<link href="/tahoe.css" rel="stylesheet" type="text/css"/>
@ -7,17 +7,17 @@
</head>
<body>
<h1>File Check-And-Repair Results for SI=<span n:render="storage_index" /></h1>
<h1>File Check-And-Repair Results for SI=<span t:render="storage_index" /></h1>
<div n:render="summary" />
<div t:render="summary" />
<div n:render="repair_results" />
<div t:render="repair_results" />
<div n:render="post_repair_results" />
<div t:render="post_repair_results" />
<div n:render="maybe_pre_repair_results" />
<div t:render="maybe_pre_repair_results" />
<div n:render="return" />
<div t:render="return_to" />
</body>
</html>

View File

@ -1,4 +1,4 @@
<html xmlns:n="http://nevow.com/ns/nevow/0.1">
<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">
<head>
<title>Tahoe-LAFS - Check Results</title>
<link href="/tahoe.css" rel="stylesheet" type="text/css"/>
@ -7,17 +7,17 @@
</head>
<body>
<h1>File Check Results for SI=<span n:render="storage_index" /></h1>
<h1>File Check Results for SI=<span t:render="storage_index" /></h1>
<div>
<span n:render="summary" />
<span t:render="summary" />
</div>
<div n:render="repair" />
<div t:render="repair" />
<div n:render="results" />
<div t:render="results" />
<div n:render="return" />
<div t:render="return_to" />
</body>
</html>

View File

@ -1,12 +1,35 @@
import time
import json
from nevow import rend, inevow, tags as T
from twisted.web import http, html
from allmydata.web.common import getxmlfile, get_arg, get_root, WebError
from twisted.web import (
http,
html,
)
from twisted.python.filepath import FilePath
from twisted.web.template import (
Element,
XMLFile,
renderer,
renderElement,
tags,
)
from allmydata.web.common import (
get_arg,
get_root,
WebError,
MultiFormatResource,
SlotsSequenceElement,
)
from allmydata.web.operations import ReloadMixin
from allmydata.interfaces import ICheckAndRepairResults, ICheckResults
from allmydata.util import base32, dictutil
from allmydata.interfaces import (
ICheckAndRepairResults,
ICheckResults,
)
from allmydata.util import (
base32,
dictutil,
)
def json_check_counts(r):
@ -64,53 +87,64 @@ def json_check_and_repair_results(r):
return data
class ResultsBase(object):
# self.client must point to the Client, so we can get nicknames and
# self._client must point to the Client, so we can get nicknames and
# determine the permuted peer order
def _join_pathstring(self, path):
"""
:param tuple path: a path represented by a tuple, such as
``(u'some', u'dir', u'file')``.
:return: a string joined by path separaters, such as
``u'some/dir/file'``.
"""
if path:
pathstring = "/".join(self._html(path))
else:
pathstring = "<root>"
return pathstring
def _render_results(self, ctx, cr):
def _render_results(self, req, cr):
assert ICheckResults(cr)
c = self.client
c = self._client
sb = c.get_storage_broker()
r = []
def add(name, value):
r.append(T.li[name + ": ", value])
r.append(tags.li(name + ": ", value))
add("Report", tags.pre("\n".join(self._html(cr.get_report()))))
add("Report", T.pre["\n".join(self._html(cr.get_report()))])
add("Share Counts",
"need %d-of-%d, have %d" % (cr.get_encoding_needed(),
cr.get_encoding_expected(),
cr.get_share_counter_good()))
add("Happiness Level", cr.get_happiness())
add("Hosts with good shares", cr.get_host_counter_good_shares())
add("Happiness Level", str(cr.get_happiness()))
add("Hosts with good shares", str(cr.get_host_counter_good_shares()))
if cr.get_corrupt_shares():
badsharemap = []
for (s, si, shnum) in cr.get_corrupt_shares():
d = T.tr[T.td["sh#%d" % shnum],
T.td[T.div(class_="nickname")[s.get_nickname()],
T.div(class_="nodeid")[T.tt[s.get_name()]]],
]
d = tags.tr(tags.td("sh#%d" % shnum),
tags.td(tags.div(s.get_nickname(), class_="nickname"),
tags.div(tags.tt(s.get_name()), class_="nodeid")),)
badsharemap.append(d)
add("Corrupt shares", T.table()[
T.tr[T.th["Share ID"],
T.th(class_="nickname-and-peerid")[T.div["Nickname"], T.div(class_="nodeid")["Node ID"]]],
badsharemap])
add("Corrupt shares",
tags.table(
tags.tr(tags.th("Share ID"),
tags.th((tags.div("Nickname"), tags.div("Node ID", class_="nodeid")), class_="nickname-and-peerid")),
badsharemap))
else:
add("Corrupt shares", "none")
add("Wrong Shares", cr.get_share_counter_wrong())
add("Wrong Shares", str(cr.get_share_counter_wrong()))
sharemap_data = []
shares_on_server = dictutil.DictOfSets()
# FIXME: The two tables below contain nickname-and-nodeid table column markup which is duplicated with each other, introducer.xhtml, and deep-check-results.xhtml. All of these (and any other presentations of nickname-and-nodeid) should be combined.
# FIXME: The two tables below contain nickname-and-nodeid
# table column markup which is duplicated with each other,
# introducer.xhtml, and deep-check-results.xhtml. All of these
# (and any other presentations of nickname-and-nodeid) should be combined.
for shareid in sorted(cr.get_sharemap().keys()):
servers = sorted(cr.get_sharemap()[shareid],
@ -119,19 +153,20 @@ class ResultsBase(object):
shares_on_server.add(s, shareid)
shareid_s = ""
if i == 0:
shareid_s = shareid
d = T.tr[T.td[shareid_s],
T.td[T.div(class_="nickname")[s.get_nickname()],
T.div(class_="nodeid")[T.tt[s.get_name()]]]
]
shareid_s = str(shareid)
d = tags.tr(tags.td(shareid_s),
tags.td(tags.div(s.get_nickname(), class_="nickname"),
tags.div(tags.tt(s.get_name()), class_="nodeid")))
sharemap_data.append(d)
add("Good Shares (sorted in share order)",
T.table()[T.tr[T.th["Share ID"], T.th(class_="nickname-and-peerid")[T.div["Nickname"], T.div(class_="nodeid")["Node ID"]]],
sharemap_data])
tags.table(tags.tr(tags.th("Share ID"),
tags.th(tags.div("Nickname"),
tags.div("Node ID", class_="nodeid"), class_="nickname-and-peerid")),
sharemap_data))
add("Recoverable Versions", cr.get_version_counter_recoverable())
add("Unrecoverable Versions", cr.get_version_counter_unrecoverable())
add("Recoverable Versions", str(cr.get_version_counter_recoverable()))
add("Unrecoverable Versions", str(cr.get_version_counter_unrecoverable()))
# this table is sorted by permuted order
permuted_servers = [s
@ -144,20 +179,23 @@ class ResultsBase(object):
for s in permuted_servers:
shareids = list(shares_on_server.get(s, []))
shareids.reverse()
shareids_s = [ T.tt[shareid, " "] for shareid in sorted(shareids) ]
d = T.tr[T.td[T.div(class_="nickname")[s.get_nickname()],
T.div(class_="nodeid")[T.tt[s.get_name()]]],
T.td[shareids_s],
]
shareids_s = [tags.tt(str(shareid), " ") for shareid in sorted(shareids)]
d = tags.tr(tags.td(tags.div(s.get_nickname(), class_="nickname"),
tags.div(tags.tt(s.get_name()), class_="nodeid")),
tags.td(shareids_s), )
servermap.append(d)
num_shares_left -= len(shareids)
if not num_shares_left:
break
add("Share Balancing (servers in permuted order)",
T.table()[T.tr[T.th(class_="nickname-and-peerid")[T.div["Nickname"], T.div(class_="nodeid")["Node ID"]], T.th["Share IDs"]],
servermap])
return T.ul[r]
add("Share Balancing (servers in permuted order)",
tags.table(tags.tr(tags.th(tags.div("Nickname"),
tags.div("Node ID", class_="nodeid"), class_="nickname-and-peerid"),
tags.th("Share IDs")),
servermap))
return tags.ul(r)
def _html(self, s):
if isinstance(s, (str, unicode)):
@ -165,91 +203,114 @@ class ResultsBase(object):
assert isinstance(s, (list, tuple))
return [html.escape(w) for w in s]
def want_json(self, ctx):
output = get_arg(inevow.IRequest(ctx), "output", "").lower()
if output.lower() == "json":
return True
return False
def _render_si_link(self, ctx, storage_index):
def _render_si_link(self, req, storage_index):
si_s = base32.b2a(storage_index)
req = inevow.IRequest(ctx)
ophandle = req.prepath[-1]
target = "%s/operations/%s/%s" % (get_root(ctx), ophandle, si_s)
output = get_arg(ctx, "output")
target = "%s/operations/%s/%s" % (get_root(req), ophandle, si_s)
output = get_arg(req, "output")
if output:
target = target + "?output=%s" % output
return T.a(href=target)[si_s]
return tags.a(si_s, href=target)
class LiteralCheckResultsRenderer(rend.Page, ResultsBase):
docFactory = getxmlfile("literal-check-results.xhtml")
class LiteralCheckResultsRenderer(MultiFormatResource, ResultsBase):
formatArgument = "output"
def __init__(self, client):
self.client = client
rend.Page.__init__(self, client)
"""
:param allmydata.interfaces.IStatsProducer client: stats provider.
"""
super(LiteralCheckResultsRenderer, self).__init__()
self._client = client
def renderHTTP(self, ctx):
if self.want_json(ctx):
return self.json(ctx)
return rend.Page.renderHTTP(self, ctx)
def render_HTML(self, req):
return renderElement(req, LiteralCheckResultsRendererElement())
def json(self, ctx):
inevow.IRequest(ctx).setHeader("content-type", "text/plain")
def render_JSON(self, req):
req.setHeader("content-type", "text/plain")
data = json_check_results(None)
return json.dumps(data, indent=1) + "\n"
def render_return(self, ctx, data):
req = inevow.IRequest(ctx)
class LiteralCheckResultsRendererElement(Element):
loader = XMLFile(FilePath(__file__).sibling("literal-check-results.xhtml"))
def __init__(self):
super(LiteralCheckResultsRendererElement, self).__init__()
@renderer
def return_to(self, req, tag):
return_to = get_arg(req, "return_to", None)
if return_to:
return T.div[T.a(href=return_to)["Return to file."]]
return tags.div(tags.a("Return to file.", href=return_to))
return ""
class CheckerBase(object):
def renderHTTP(self, ctx):
if self.want_json(ctx):
return self.json(ctx)
return rend.Page.renderHTTP(self, ctx)
@renderer
def storage_index(self, req, tag):
return self._results.get_storage_index_string()
def render_storage_index(self, ctx, data):
return self.r.get_storage_index_string()
def render_return(self, ctx, data):
req = inevow.IRequest(ctx)
@renderer
def return_to(self, req, tag):
return_to = get_arg(req, "return_to", None)
if return_to:
return T.div[T.a(href=return_to)["Return to file/directory."]]
return tags.div(tags.a("Return to file/directory.", href=return_to))
return ""
class CheckResultsRenderer(CheckerBase, rend.Page, ResultsBase):
docFactory = getxmlfile("check-results.xhtml")
class CheckResultsRenderer(MultiFormatResource):
formatArgument = "output"
def __init__(self, client, results):
self.client = client
self.r = ICheckResults(results)
rend.Page.__init__(self, results)
"""
:param allmydata.interfaces.IStatsProducer client: stats provider.
:param allmydata.interfaces.ICheckResults results: results of check/vefify operation.
"""
super(CheckResultsRenderer, self).__init__()
self._client = client
self._results = ICheckResults(results)
def json(self, ctx):
inevow.IRequest(ctx).setHeader("content-type", "text/plain")
data = json_check_results(self.r)
def render_HTML(self, req):
return renderElement(req, CheckResultsRendererElement(self._client, self._results))
def render_JSON(self, req):
req.setHeader("content-type", "text/plain")
data = json_check_results(self._results)
return json.dumps(data, indent=1) + "\n"
def render_summary(self, ctx, data):
class CheckResultsRendererElement(Element, CheckerBase, ResultsBase):
loader = XMLFile(FilePath(__file__).sibling("check-results.xhtml"))
def __init__(self, client, results):
super(CheckResultsRendererElement, self).__init__()
self._client = client
self._results = results
@renderer
def summary(self, req, tag):
results = []
if data.is_healthy():
if self._results.is_healthy():
results.append("Healthy")
elif data.is_recoverable():
elif self._results.is_recoverable():
results.append("Not Healthy!")
else:
results.append("Not Recoverable!")
results.append(" : ")
results.append(self._html(data.get_summary()))
return ctx.tag[results]
results.append(self._html(self._results.get_summary()))
return tag(results)
def render_repair(self, ctx, data):
if data.is_healthy():
@renderer
def repair(self, req, tag):
if self._results.is_healthy():
return ""
#repair = T.form(action=".", method="post",
# enctype="multipart/form-data")[
# T.fieldset[
@ -258,30 +319,52 @@ class CheckResultsRenderer(CheckerBase, rend.Page, ResultsBase):
# T.input(type="submit", value="Repair"),
# ]]
#return ctx.tag[repair]
return "" # repair button disabled until we make it work correctly,
# see #622 for details
def render_results(self, ctx, data):
cr = self._render_results(ctx, data)
return ctx.tag[cr]
@renderer
def results(self, req, tag):
cr = self._render_results(req, self._results)
return tag(cr)
class CheckAndRepairResultsRenderer(CheckerBase, rend.Page, ResultsBase):
docFactory = getxmlfile("check-and-repair-results.xhtml")
class CheckAndRepairResultsRenderer(MultiFormatResource):
formatArgument = "output"
def __init__(self, client, results):
self.client = client
self.r = None
"""
:param allmydata.interfaces.IStatsProducer client: stats provider.
:param allmydata.interfaces.ICheckResults results: check/verify results.
"""
super(CheckAndRepairResultsRenderer, self).__init__()
self._client = client
self._results = None
if results:
self.r = ICheckAndRepairResults(results)
rend.Page.__init__(self, results)
self._results = ICheckAndRepairResults(results)
def json(self, ctx):
inevow.IRequest(ctx).setHeader("content-type", "text/plain")
data = json_check_and_repair_results(self.r)
def render_HTML(self, req):
elem = CheckAndRepairResultsRendererElement(self._client, self._results)
return renderElement(req, elem)
def render_JSON(self, req):
req.setHeader("content-type", "text/plain")
data = json_check_and_repair_results(self._results)
return json.dumps(data, indent=1) + "\n"
def render_summary(self, ctx, data):
cr = data.get_post_repair_results()
class CheckAndRepairResultsRendererElement(Element, CheckerBase, ResultsBase):
loader = XMLFile(FilePath(__file__).sibling("check-and-repair-results.xhtml"))
def __init__(self, client, results):
super(CheckAndRepairResultsRendererElement, self).__init__()
self._client = client
self._results = results
@renderer
def summary(self, req, tag):
cr = self._results.get_post_repair_results()
results = []
if cr.is_healthy():
results.append("Healthy")
@ -291,35 +374,44 @@ class CheckAndRepairResultsRenderer(CheckerBase, rend.Page, ResultsBase):
results.append("Not Recoverable!")
results.append(" : ")
results.append(self._html(cr.get_summary()))
return ctx.tag[results]
return tag(results)
def render_repair_results(self, ctx, data):
if data.get_repair_attempted():
if data.get_repair_successful():
return ctx.tag["Repair successful"]
@renderer
def repair_results(self, req, tag):
if self._results.get_repair_attempted():
if self._results.get_repair_successful():
return tag("Repair successful")
else:
return ctx.tag["Repair unsuccessful"]
return ctx.tag["No repair necessary"]
return tag("Repair unsuccessful")
return tag("No repair necessary")
def render_post_repair_results(self, ctx, data):
cr = self._render_results(ctx, data.get_post_repair_results())
return ctx.tag[T.div["Post-Repair Checker Results:"], cr]
@renderer
def post_repair_results(self, req, tag):
cr = self._render_results(req, self._results.get_post_repair_results())
return tag(tags.div("Post-Repair Checker Results:"), cr)
def render_maybe_pre_repair_results(self, ctx, data):
if data.get_repair_attempted():
cr = self._render_results(ctx, data.get_pre_repair_results())
return ctx.tag[T.div["Pre-Repair Checker Results:"], cr]
@renderer
def maybe_pre_repair_results(self, req, tag):
if self._results.get_repair_attempted():
cr = self._render_results(req, self._results.get_pre_repair_results())
return tag(tags.div("Pre-Repair Checker Results:"), cr)
return ""
class DeepCheckResultsRenderer(rend.Page, ResultsBase, ReloadMixin):
docFactory = getxmlfile("deep-check-results.xhtml")
class DeepCheckResultsRenderer(MultiFormatResource):
formatArgument = "output"
def __init__(self, client, monitor):
self.client = client
"""
:param allmydata.interfaces.IStatsProducer client: stats provider.
:param allmydata.monitor.IMonitor monitor: status, progress, and cancellation provider.
"""
super(DeepCheckResultsRenderer, self).__init__()
self._client = client
self.monitor = monitor
def childFactory(self, ctx, name):
def getChild(self, name, req):
if not name:
return self
# /operation/$OPHANDLE/$STORAGEINDEX provides detailed information
@ -327,19 +419,18 @@ class DeepCheckResultsRenderer(rend.Page, ResultsBase, ReloadMixin):
si = base32.a2b(name)
r = self.monitor.get_status()
try:
return CheckResultsRenderer(self.client,
return CheckResultsRenderer(self._client,
r.get_results_for_storage_index(si))
except KeyError:
raise WebError("No detailed results for SI %s" % html.escape(name),
http.NOT_FOUND)
def renderHTTP(self, ctx):
if self.want_json(ctx):
return self.json(ctx)
return rend.Page.renderHTTP(self, ctx)
def render_HTML(self, req):
elem = DeepCheckResultsRendererElement(self.monitor)
return renderElement(req, elem)
def json(self, ctx):
inevow.IRequest(ctx).setHeader("content-type", "text/plain")
def render_JSON(self, req):
req.setHeader("content-type", "text/plain")
data = {}
data["finished"] = self.monitor.is_finished()
res = self.monitor.get_status()
@ -361,116 +452,170 @@ class DeepCheckResultsRenderer(rend.Page, ResultsBase, ReloadMixin):
data["stats"] = res.get_stats()
return json.dumps(data, indent=1) + "\n"
def render_root_storage_index(self, ctx, data):
class DeepCheckResultsRendererElement(Element, ResultsBase, ReloadMixin):
loader = XMLFile(FilePath(__file__).sibling("deep-check-results.xhtml"))
def __init__(self, monitor):
super(DeepCheckResultsRendererElement, self).__init__()
self.monitor = monitor
@renderer
def root_storage_index(self, req, tag):
if not self.monitor.get_status():
return ""
return self.monitor.get_status().get_root_storage_index_string()
def data_objects_checked(self, ctx, data):
return self.monitor.get_status().get_counters()["count-objects-checked"]
def data_objects_healthy(self, ctx, data):
return self.monitor.get_status().get_counters()["count-objects-healthy"]
def data_objects_unhealthy(self, ctx, data):
return self.monitor.get_status().get_counters()["count-objects-unhealthy"]
def data_objects_unrecoverable(self, ctx, data):
return self.monitor.get_status().get_counters()["count-objects-unrecoverable"]
def _get_monitor_counter(self, name):
if not self.monitor.get_status():
return ""
return str(self.monitor.get_status().get_counters().get(name))
def data_count_corrupt_shares(self, ctx, data):
return self.monitor.get_status().get_counters()["count-corrupt-shares"]
@renderer
def objects_checked(self, req, tag):
return self._get_monitor_counter("count-objects-checked")
def render_problems_p(self, ctx, data):
c = self.monitor.get_status().get_counters()
if c["count-objects-unhealthy"]:
return ctx.tag
@renderer
def objects_healthy(self, req, tag):
return self._get_monitor_counter("count-objects-healthy")
@renderer
def objects_unhealthy(self, req, tag):
return self._get_monitor_counter("count-objects-unhealthy")
@renderer
def objects_unrecoverable(self, req, tag):
return self._get_monitor_counter("count-objects-unrecoverable")
@renderer
def count_corrupt_shares(self, req, tag):
return self._get_monitor_counter("count-corrupt-shares")
@renderer
def problems_p(self, req, tag):
if self._get_monitor_counter("count-objects-unhealthy"):
return tag
return ""
def data_problems(self, ctx, data):
@renderer
def problems(self, req, tag):
all_objects = self.monitor.get_status().get_all_results()
problems = []
for path in sorted(all_objects.keys()):
cr = all_objects[path]
assert ICheckResults.providedBy(cr)
if not cr.is_healthy():
yield path, cr
summary_text = ""
summary = cr.get_summary()
if summary:
summary_text = ": " + summary
summary_text += " [SI: %s]" % cr.get_storage_index_string()
problems.append({
# Not sure self._join_pathstring(path) is the
# right thing to use here.
"problem": self._join_pathstring(path) + self._html(summary_text),
})
def render_problem(self, ctx, data):
path, cr = data
summary_text = ""
summary = cr.get_summary()
if summary:
summary_text = ": " + summary
summary_text += " [SI: %s]" % cr.get_storage_index_string()
return ctx.tag[self._join_pathstring(path), self._html(summary_text)]
return SlotsSequenceElement(tag, problems)
def render_servers_with_corrupt_shares_p(self, ctx, data):
if self.monitor.get_status().get_counters()["count-corrupt-shares"]:
return ctx.tag
@renderer
def servers_with_corrupt_shares_p(self, req, tag):
if self._get_monitor_counter("count-corrupt-shares"):
return tag
return ""
def data_servers_with_corrupt_shares(self, ctx, data):
@renderer
def servers_with_corrupt_shares(self, req, tag):
servers = [s
for (s, storage_index, sharenum)
in self.monitor.get_status().get_corrupt_shares()]
servers.sort(key=lambda s: s.get_longname())
return servers
def render_server_problem(self, ctx, server):
data = [server.get_name()]
nickname = server.get_nickname()
if nickname:
data.append(" (%s)" % self._html(nickname))
return ctx.tag[data]
problems = []
for server in servers:
name = [server.get_name()]
nickname = server.get_nickname()
if nickname:
name.append(" (%s)" % self._html(nickname))
problems.append({"problem": name})
def render_corrupt_shares_p(self, ctx, data):
if self.monitor.get_status().get_counters()["count-corrupt-shares"]:
return ctx.tag
return SlotsSequenceElement(tag, problems)
@renderer
def corrupt_shares_p(self, req, tag):
if self._get_monitor_counter("count-corrupt-shares"):
return tag
return ""
def data_corrupt_shares(self, ctx, data):
return self.monitor.get_status().get_corrupt_shares()
def render_share_problem(self, ctx, data):
server, storage_index, sharenum = data
nickname = server.get_nickname()
ctx.fillSlots("serverid", server.get_name())
if nickname:
ctx.fillSlots("nickname", self._html(nickname))
ctx.fillSlots("si", self._render_si_link(ctx, storage_index))
ctx.fillSlots("shnum", str(sharenum))
return ctx.tag
def render_return(self, ctx, data):
req = inevow.IRequest(ctx)
@renderer
def corrupt_shares(self, req, tag):
shares = self.monitor.get_status().get_corrupt_shares()
problems = []
for share in shares:
server, storage_index, sharenum = share
nickname = server.get_nickname()
problem = {
"serverid": server.get_name(),
"nickname": self._html(nickname),
"si": self._render_si_link(req, storage_index),
"shnum": str(sharenum),
}
problems.append(problem)
return SlotsSequenceElement(tag, problems)
@renderer
def return_to(self, req, tag):
return_to = get_arg(req, "return_to", None)
if return_to:
return T.div[T.a(href=return_to)["Return to file/directory."]]
return tags.div(tags.a("Return to file/directory.", href=return_to))
return ""
def data_all_objects(self, ctx, data):
r = self.monitor.get_status().get_all_results()
for path in sorted(r.keys()):
yield (path, r[path])
@renderer
def all_objects(self, req, tag):
results = self.monitor.get_status().get_all_results()
objects = []
def render_object(self, ctx, data):
path, r = data
ctx.fillSlots("path", self._join_pathstring(path))
ctx.fillSlots("healthy", str(r.is_healthy()))
ctx.fillSlots("recoverable", str(r.is_recoverable()))
storage_index = r.get_storage_index()
ctx.fillSlots("storage_index", self._render_si_link(ctx, storage_index))
ctx.fillSlots("summary", self._html(r.get_summary()))
return ctx.tag
for path in sorted(results.keys()):
result = results.get(path)
storage_index = result.get_storage_index()
object = {
"path": self._join_pathstring(path),
"healthy": str(result.is_healthy()),
"recoverable": str(result.is_recoverable()),
"storage_index": self._render_si_link(req, storage_index),
"summary": self._html(result.get_summary()),
}
objects.append(object)
def render_runtime(self, ctx, data):
req = inevow.IRequest(ctx)
runtime = time.time() - req.processing_started_timestamp
return ctx.tag["runtime: %s seconds" % runtime]
return SlotsSequenceElement(tag, objects)
class DeepCheckAndRepairResultsRenderer(rend.Page, ResultsBase, ReloadMixin):
docFactory = getxmlfile("deep-check-and-repair-results.xhtml")
@renderer
def runtime(self, req, tag):
runtime = 'unknown'
if hasattr(req, 'processing_started_timestamp'):
runtime = time.time() - req.processing_started_timestamp
return tag("runtime: %s seconds" % runtime)
class DeepCheckAndRepairResultsRenderer(MultiFormatResource):
formatArgument = "output"
def __init__(self, client, monitor):
self.client = client
"""
:param allmydata.interfaces.IStatsProducer client: stats provider.
:param allmydata.monitor.IMonitor monitor: status, progress, and cancellation provider.
"""
super(DeepCheckAndRepairResultsRenderer, self).__init__()
self._client = client
self.monitor = monitor
def childFactory(self, ctx, name):
def getChild(self, name, req):
if not name:
return self
# /operation/$OPHANDLE/$STORAGEINDEX provides detailed information
@ -479,18 +624,17 @@ class DeepCheckAndRepairResultsRenderer(rend.Page, ResultsBase, ReloadMixin):
s = self.monitor.get_status()
try:
results = s.get_results_for_storage_index(si)
return CheckAndRepairResultsRenderer(self.client, results)
return CheckAndRepairResultsRenderer(self._client, results)
except KeyError:
raise WebError("No detailed results for SI %s" % html.escape(name),
http.NOT_FOUND)
def renderHTTP(self, ctx):
if self.want_json(ctx):
return self.json(ctx)
return rend.Page.renderHTTP(self, ctx)
def render_HTML(self, req):
elem = DeepCheckAndRepairResultsRendererElement(self.monitor)
return renderElement(req, elem)
def json(self, ctx):
inevow.IRequest(ctx).setHeader("content-type", "text/plain")
def render_JSON(self, req):
req.setHeader("content-type", "text/plain")
res = self.monitor.get_status()
data = {}
data["finished"] = self.monitor.is_finished()
@ -531,119 +675,132 @@ class DeepCheckAndRepairResultsRenderer(rend.Page, ResultsBase, ReloadMixin):
data["stats"] = res.get_stats()
return json.dumps(data, indent=1) + "\n"
def render_root_storage_index(self, ctx, data):
return self.monitor.get_status().get_root_storage_index_string()
def data_objects_checked(self, ctx, data):
return self.monitor.get_status().get_counters()["count-objects-checked"]
class DeepCheckAndRepairResultsRendererElement(DeepCheckResultsRendererElement):
"""
The page generated here has several elements common to "deep check
results" page; hence the code reuse.
"""
def data_objects_healthy(self, ctx, data):
return self.monitor.get_status().get_counters()["count-objects-healthy-pre-repair"]
def data_objects_unhealthy(self, ctx, data):
return self.monitor.get_status().get_counters()["count-objects-unhealthy-pre-repair"]
def data_corrupt_shares(self, ctx, data):
return self.monitor.get_status().get_counters()["count-corrupt-shares-pre-repair"]
loader = XMLFile(FilePath(__file__).sibling("deep-check-and-repair-results.xhtml"))
def data_repairs_attempted(self, ctx, data):
return self.monitor.get_status().get_counters()["count-repairs-attempted"]
def data_repairs_successful(self, ctx, data):
return self.monitor.get_status().get_counters()["count-repairs-successful"]
def data_repairs_unsuccessful(self, ctx, data):
return self.monitor.get_status().get_counters()["count-repairs-unsuccessful"]
def __init__(self, monitor):
super(DeepCheckAndRepairResultsRendererElement, self).__init__(monitor)
self.monitor = monitor
def data_objects_healthy_post(self, ctx, data):
return self.monitor.get_status().get_counters()["count-objects-healthy-post-repair"]
def data_objects_unhealthy_post(self, ctx, data):
return self.monitor.get_status().get_counters()["count-objects-unhealthy-post-repair"]
def data_corrupt_shares_post(self, ctx, data):
return self.monitor.get_status().get_counters()["count-corrupt-shares-post-repair"]
@renderer
def objects_healthy(self, req, tag):
return self._get_monitor_counter("count-objects-healthy-pre-repair")
def render_pre_repair_problems_p(self, ctx, data):
c = self.monitor.get_status().get_counters()
if c["count-objects-unhealthy-pre-repair"]:
return ctx.tag
@renderer
def objects_unhealthy(self, req, tag):
return self._get_monitor_counter("count-objects-unhealthy-pre-repair")
@renderer
def corrupt_shares(self, req, tag):
return self._get_monitor_counter("count-corrupt-shares-pre-repair")
@renderer
def repairs_attempted(self, req, tag):
return self._get_monitor_counter("count-repairs-attempted")
@renderer
def repairs_successful(self, req, tag):
return self._get_monitor_counter("count-repairs-successful")
@renderer
def repairs_unsuccessful(self, req, tag):
return self._get_monitor_counter("count-repairs-unsuccessful")
@renderer
def objects_healthy_post(self, req, tag):
return self._get_monitor_counter("count-objects-healthy-post-repair")
@renderer
def objects_unhealthy_post(self, req, tag):
return self._get_monitor_counter("count-objects-unhealthy-post-repair")
@renderer
def corrupt_shares_post(self, req, tag):
return self._get_monitor_counter("count-corrupt-shares-post-repair")
@renderer
def pre_repair_problems_p(self, req, tag):
if self._get_monitor_counter("count-objects-unhealthy-pre-repair"):
return tag
return ""
def data_pre_repair_problems(self, ctx, data):
@renderer
def pre_repair_problems(self, req, tag):
all_objects = self.monitor.get_status().get_all_results()
problems = []
for path in sorted(all_objects.keys()):
r = all_objects[path]
assert ICheckAndRepairResults.providedBy(r)
cr = r.get_pre_repair_results()
if not cr.is_healthy():
yield path, cr
problem = self._join_pathstring(path), ": ", self._html(cr.get_summary())
problems.append({"problem": problem})
def render_problem(self, ctx, data):
path, cr = data
return ctx.tag[self._join_pathstring(path), ": ",
self._html(cr.get_summary())]
return SlotsSequenceElement(tag, problems)
def render_post_repair_problems_p(self, ctx, data):
c = self.monitor.get_status().get_counters()
if (c["count-objects-unhealthy-post-repair"]
or c["count-corrupt-shares-post-repair"]):
return ctx.tag
@renderer
def post_repair_problems_p(self, req, tag):
if (self._get_monitor_counter("count-objects-unhealthy-post-repair")
or self._get_monitor_counter("count-corrupt-shares-post-repair")):
return tag
return ""
def data_post_repair_problems(self, ctx, data):
@renderer
def post_repair_problems(self, req, tag):
all_objects = self.monitor.get_status().get_all_results()
problems = []
for path in sorted(all_objects.keys()):
r = all_objects[path]
assert ICheckAndRepairResults.providedBy(r)
cr = r.get_post_repair_results()
if not cr.is_healthy():
yield path, cr
problem = self._join_pathstring(path), ": ", self._html(cr.get_summary())
problems.append({"problem": problem})
def render_servers_with_corrupt_shares_p(self, ctx, data):
if self.monitor.get_status().get_counters()["count-corrupt-shares-pre-repair"]:
return ctx.tag
return ""
def data_servers_with_corrupt_shares(self, ctx, data):
return [] # TODO
def render_server_problem(self, ctx, data):
pass
return SlotsSequenceElement(tag, problems)
def render_remaining_corrupt_shares_p(self, ctx, data):
if self.monitor.get_status().get_counters()["count-corrupt-shares-post-repair"]:
return ctx.tag
return ""
def data_post_repair_corrupt_shares(self, ctx, data):
return [] # TODO
def render_share_problem(self, ctx, data):
pass
def render_return(self, ctx, data):
req = inevow.IRequest(ctx)
return_to = get_arg(req, "return_to", None)
if return_to:
return T.div[T.a(href=return_to)["Return to file/directory."]]
@renderer
def remaining_corrupt_shares_p(self, req, tag):
if self._get_monitor_counter("count-corrupt-shares-post-repair"):
return tag
return ""
def data_all_objects(self, ctx, data):
r = self.monitor.get_status().get_all_results()
for path in sorted(r.keys()):
yield (path, r[path])
@renderer
def post_repair_corrupt_shares(self, req, tag):
# TODO: this was not implemented before porting to
# twisted.web.template; leaving it as such.
#
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3371
corrupt = [{"share":"unimplemented"}]
return SlotsSequenceElement(tag, corrupt)
def render_object(self, ctx, data):
path, r = data
ctx.fillSlots("path", self._join_pathstring(path))
ctx.fillSlots("healthy_pre_repair",
str(r.get_pre_repair_results().is_healthy()))
ctx.fillSlots("recoverable_pre_repair",
str(r.get_pre_repair_results().is_recoverable()))
ctx.fillSlots("healthy_post_repair",
str(r.get_post_repair_results().is_healthy()))
storage_index = r.get_storage_index()
ctx.fillSlots("storage_index",
self._render_si_link(ctx, storage_index))
ctx.fillSlots("summary",
self._html(r.get_pre_repair_results().get_summary()))
return ctx.tag
@renderer
def all_objects(self, req, tag):
results = {}
if self.monitor.get_status():
results = self.monitor.get_status().get_all_results()
objects = []
for path in sorted(results.keys()):
result = results[path]
storage_index = result.get_storage_index()
obj = {
"path": self._join_pathstring(path),
"healthy_pre_repair": str(result.get_pre_repair_results().is_healthy()),
"recoverable_pre_repair": str(result.get_pre_repair_results().is_recoverable()),
"healthy_post_repair": str(result.get_post_repair_results().is_healthy()),
"storage_index": self._render_si_link(req, storage_index),
"summary": self._html(result.get_pre_repair_results().get_summary()),
}
objects.append(obj)
return SlotsSequenceElement(tag, objects)
def render_runtime(self, ctx, data):
req = inevow.IRequest(ctx)
runtime = time.time() - req.processing_started_timestamp
return ctx.tag["runtime: %s seconds" % runtime]

View File

@ -1,95 +1,106 @@
<html xmlns:n="http://nevow.com/ns/nevow/0.1">
<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">
<head>
<title>Tahoe-LAFS - Deep Check Results</title>
<link href="/tahoe.css" rel="stylesheet" type="text/css"/>
<link href="/icon.png" rel="shortcut icon" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta n:render="refresh" />
<meta t:render="refresh" />
</head>
<body>
<h1>Deep-Check-And-Repair Results for root
SI=<span n:render="root_storage_index" /></h1>
SI=<span t:render="root_storage_index" /></h1>
<h2 n:render="reload" />
<h2 t:render="reload" />
<p>Counters:</p>
<ul>
<li>Objects Checked: <span n:render="data" n:data="objects_checked" /></li>
<li>Objects Checked: <span><t:transparent t:render="objects_checked" /></span></li>
<li>Objects Healthy (before repair): <span n:render="data" n:data="objects_healthy" /></li>
<li>Objects Unhealthy (before repair): <span n:render="data" n:data="objects_unhealthy" /></li>
<li>Corrupt Shares (before repair): <span n:render="data" n:data="corrupt_shares" /></li>
<li>Objects Healthy (before repair): <span><t:transparent t:render="objects_healthy" /></span></li>
<li>Objects Unhealthy (before repair): <span><t:transparent t:render="objects_unhealthy" /></span></li>
<li>Corrupt Shares (before repair): <span><t:transparent t:render="corrupt_shares" /></span></li>
<li>Repairs Attempted: <span n:render="data" n:data="repairs_attempted" /></li>
<li>Repairs Successful: <span n:render="data" n:data="repairs_successful" /></li>
<li>Repairs Unsuccessful: <span n:render="data" n:data="repairs_unsuccessful" /></li>
<li>Repairs Attempted: <span><t:transparent t:render="repairs_attempted" /></span></li>
<li>Repairs Successful: <span><t:transparent t:render="repairs_successful" /></span></li>
<li>Repairs Unsuccessful: <span><t:transparent t:render="repairs_unsuccessful" /></span></li>
<li>Objects Healthy (after repair): <span n:render="data" n:data="objects_healthy_post" /></li>
<li>Objects Unhealthy (after repair): <span n:render="data" n:data="objects_unhealthy_post" /></li>
<li>Corrupt Shares (after repair): <span n:render="data" n:data="corrupt_shares_post" /></li>
<li>Objects Healthy (after repair): <span><t:transparent t:render="objects_healthy_post" /></span></li>
<li>Objects Unhealthy (after repair): <span><t:transparent t:render="objects_unhealthy_post" /></span></li>
<li>Corrupt Shares (after repair): <span><t:transparent t:render="corrupt_shares_post" /></span></li>
</ul>
<div n:render="pre_repair_problems_p">
<div t:render="pre_repair_problems_p">
<h2>Files/Directories That Had Problems:</h2>
<ul n:render="sequence" n:data="pre_repair_problems">
<li n:pattern="item" n:render="problem"/>
<li n:pattern="empty">None</li>
<ul t:render="pre_repair_problems">
<li t:render="item">
<t:slot name="problem" />
</li>
<li t:render="empty">None</li>
</ul>
</div>
<div n:render="post_repair_problems_p">
<div t:render="post_repair_problems_p">
<h2>Files/Directories That Still Have Problems:</h2>
<ul n:render="sequence" n:data="post_repair_problems">
<li n:pattern="item" n:render="problem"/>
<li n:pattern="empty">None</li>
<ul t:render="post_repair_problems">
<li t:render="item">
<t:slot name="problem" />
</li>
<li t:render="empty">None</li>
</ul>
</div>
<div n:render="servers_with_corrupt_shares_p">
<div t:render="servers_with_corrupt_shares_p">
<h2>Servers on which corrupt shares were found</h2>
<ul n:render="sequence" n:data="servers_with_corrupt_shares">
<li n:pattern="item" n:render="server_problem"/>
<li n:pattern="empty">None</li>
<ul t:render="servers_with_corrupt_shares">
<li t:render="item">
<t:slot name="problem" />
</li>
<li t:render="empty">None</li>
</ul>
</div>
<div n:render="remaining_corrupt_shares_p">
<div t:render="remaining_corrupt_shares_p">
<h2>Remaining Corrupt Shares</h2>
<p>These shares need to be manually inspected and removed.</p>
<ul n:render="sequence" n:data="post_repair_corrupt_shares">
<li n:pattern="item" n:render="share_problem"/>
<li n:pattern="empty">None</li>
<ul t:render="post_repair_corrupt_shares">
<li t:render="item">
<t:slot name="share" />
</li>
<li t:render="empty">None</li>
</ul>
</div>
<div n:render="return" />
<div t:render="return_to" />
<div>
<table n:render="sequence" n:data="all_objects">
<tr n:pattern="header">
<td>Relative Path</td>
<td>Healthy Pre-Repair</td>
<td>Recoverable Pre-Repair</td>
<td>Healthy Post-Repair</td>
<td>Storage Index</td>
<td>Summary</td>
<table t:render="all_objects">
<tr t:render="header">
<th>Relative Path</th>
<th>Healthy Pre-Repair</th>
<th>Recoverable Pre-Repair</th>
<th>Healthy Post-Repair</th>
<th>Storage Index</th>
<th>Summary</th>
</tr>
<tr n:pattern="item" n:render="object">
<td><n:slot name="path"/></td>
<td><n:slot name="healthy_pre_repair"/></td>
<td><n:slot name="recoverable_pre_repair"/></td>
<td><n:slot name="healthy_post_repair"/></td>
<td><n:slot name="storage_index"/></td>
<td><n:slot name="summary"/></td>
<tr t:render="item">
<td><t:slot name="path"/></td>
<td><t:slot name="healthy_pre_repair"/></td>
<td><t:slot name="recoverable_pre_repair"/></td>
<td><t:slot name="healthy_post_repair"/></td>
<td><t:slot name="storage_index"/></td>
<td><t:slot name="summary"/></td>
</tr>
<tr t:render="empty">
<td>Nothing to report yet.</td>
</tr>
</table>
</div>
<div n:render="runtime" />
<div t:render="runtime" />
</body>
</html>

View File

@ -1,87 +1,93 @@
<html xmlns:n="http://nevow.com/ns/nevow/0.1">
<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">
<head>
<title>Tahoe-LAFS - Deep Check Results</title>
<link href="/tahoe.css" rel="stylesheet" type="text/css"/>
<link href="/icon.png" rel="shortcut icon" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta n:render="refresh" />
<meta t:render="refresh" />
</head>
<body>
<h1>Deep-Check Results for root SI=<span n:render="root_storage_index" /></h1>
<h1>Deep-Check Results for root SI=<span t:render="root_storage_index" /></h1>
<h2 n:render="reload" />
<h2 t:render="reload" />
<p>Counters:</p>
<ul>
<li>Objects Checked: <span n:render="data" n:data="objects_checked" /></li>
<li>Objects Healthy: <span n:render="data" n:data="objects_healthy" /></li>
<li>Objects Unhealthy: <span n:render="data" n:data="objects_unhealthy" /></li>
<li>Objects Unrecoverable: <span n:render="data" n:data="objects_unrecoverable" /></li>
<li>Corrupt Shares: <span n:render="data" n:data="count_corrupt_shares" /></li>
<li>Objects Checked: <span><t:transparent t:render="objects_checked" /></span></li>
<li>Objects Healthy: <span><t:transparent t:render="objects_healthy" /></span></li>
<li>Objects Unhealthy: <span><t:transparent t:render="objects_unhealthy" /></span></li>
<li>Objects Unrecoverable: <span><t:transparent t:render="objects_unrecoverable" /></span></li>
<li>Corrupt Shares: <span><t:transparent t:render="count_corrupt_shares" /></span></li>
</ul>
<div n:render="problems_p">
<div t:render="problems_p">
<h2>Files/Directories That Had Problems:</h2>
<ul n:render="sequence" n:data="problems">
<li n:pattern="item" n:render="problem"/>
<li n:pattern="empty">None</li>
<ul t:render="problems">
<li t:render="item">
<t:slot name="problem" />
</li>
<li t:render="empty">None</li>
</ul>
</div>
<div n:render="servers_with_corrupt_shares_p">
<div t:render="servers_with_corrupt_shares_p">
<h2>Servers on which corrupt shares were found</h2>
<ul n:render="sequence" n:data="servers_with_corrupt_shares">
<li n:pattern="item" n:render="server_problem"/>
<li n:pattern="empty">None</li>
<ul t:render="servers_with_corrupt_shares">
<li t:render="item">
<t:slot name="problem" />
</li>
<li t:render="empty">None</li>
</ul>
</div>
<div n:render="corrupt_shares_p">
<div t:render="corrupt_shares_p">
<h2>Corrupt Shares</h2>
<p>If repair fails, these shares need to be manually inspected and removed.</p>
<table n:render="sequence" n:data="corrupt_shares">
<tr n:pattern="header">
<td>Server</td>
<td>Server Nickname</td>
<td>Storage Index</td>
<td>Share Number</td>
<table t:render="corrupt_shares">
<tr t:render="header">
<th>Server</th>
<th>Server Nickname</th>
<th>Storage Index</th>
<th>Share Number</th>
</tr>
<tr n:pattern="item" n:render="share_problem">
<td><n:slot name="serverid"/></td>
<td><n:slot name="nickname"/></td>
<td><n:slot name="si"/></td>
<td><n:slot name="shnum"/></td>
<tr t:render="item">
<td><t:slot name="serverid"/></td>
<td><t:slot name="nickname"/></td>
<td><t:slot name="si"/></td>
<td><t:slot name="shnum"/></td>
</tr>
</table>
</div>
<div n:render="return" />
<div t:render="return_to" />
<div>
<h2>All Results</h2>
<table n:render="sequence" n:data="all_objects">
<tr n:pattern="header">
<td>Relative Path</td>
<td>Healthy</td>
<td>Recoverable</td>
<td>Storage Index</td>
<td>Summary</td>
<table t:render="all_objects">
<tr t:render="header">
<th>Relative Path</th>
<th>Healthy</th>
<th>Recoverable</th>
<th>Storage Index</th>
<th>Summary</th>
</tr>
<tr n:pattern="item" n:render="object">
<td><n:slot name="path"/></td>
<td><n:slot name="healthy"/></td>
<td><n:slot name="recoverable"/></td>
<td><tt><n:slot name="storage_index"/></tt></td>
<td><n:slot name="summary"/></td>
<tr t:render="item">
<td><t:slot name="path"/></td>
<td><t:slot name="healthy"/></td>
<td><t:slot name="recoverable"/></td>
<td><tt><t:slot name="storage_index"/></tt></td>
<td><t:slot name="summary"/></td>
</tr>
<tr t:render="empty">
<td>Nothing to report yet.</td>
</tr>
</table>
</div>
<div n:render="runtime" />
<div t:render="runtime" />
</body>
</html>

View File

@ -261,7 +261,7 @@ class MoreInfoElement(Element):
@renderer
def deep_check_form(self, req, tag):
ophandle = base32.b2a(os.urandom(16))
deep_check = T.form(action=".", method="post",
deep_check = T.form(action=req.path, method="post",
enctype="multipart/form-data")(
T.fieldset(
T.input(type="hidden", name="t", value="start-deep-check"),
@ -287,7 +287,7 @@ class MoreInfoElement(Element):
@renderer
def deep_size_form(self, req, tag):
ophandle = base32.b2a(os.urandom(16))
deep_size = T.form(action=".", method="post",
deep_size = T.form(action=req.path, method="post",
enctype="multipart/form-data")(
T.fieldset(
T.input(type="hidden", name="t", value="start-deep-size"),
@ -300,7 +300,7 @@ class MoreInfoElement(Element):
@renderer
def deep_stats_form(self, req, tag):
ophandle = base32.b2a(os.urandom(16))
deep_stats = T.form(action=".", method="post",
deep_stats = T.form(action=req.path, method="post",
enctype="multipart/form-data")(
T.fieldset(
T.input(type="hidden", name="t", value="start-deep-stats"),
@ -313,7 +313,7 @@ class MoreInfoElement(Element):
@renderer
def manifest_form(self, req, tag):
ophandle = base32.b2a(os.urandom(16))
manifest = T.form(action=".", method="post",
manifest = T.form(action=req.path, method="post",
enctype="multipart/form-data")(
T.fieldset(
T.input(type="hidden", name="t", value="start-manifest"),

View File

@ -1,4 +1,4 @@
<html xmlns:n="http://nevow.com/ns/nevow/0.1">
<html xmlns:t="http://twistedmatrix.com/ns/twisted.web.template/0.1">
<head>
<title>Tahoe-LAFS - Check Results</title>
<link href="/tahoe.css" rel="stylesheet" type="text/css"/>
@ -11,7 +11,7 @@
<div>Literal files are always healthy: their data is contained in the URI</div>
<div n:render="return" />
<div t:render="return_to" />
</body>
</html>

View File

@ -49,9 +49,8 @@ commands =
tahoe --version
[testenv:py36]
# On macOS, git inside of ratchet.sh needs $HOME.
passenv = HOME
commands = {toxinidir}/misc/python3/ratchet.sh
commands =
trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors} {posargs:allmydata.test.python3_tests}
[testenv:integration]
setenv =