diff --git a/.circleci/config.yml b/.circleci/config.yml index afa3fafa1..b00bcdcec 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -29,7 +29,7 @@ workflows: - "debian-9": &DOCKERHUB_CONTEXT context: "dockerhub-auth" - - "debian-8": + - "debian-10": <<: *DOCKERHUB_CONTEXT requires: - "debian-9" @@ -86,9 +86,7 @@ workflows: # integration tests. - "debian-9" - # Generate the underlying data for a visualization to aid with Python 3 - # porting. - - "build-porting-depgraph": + - "typechecks": <<: *DOCKERHUB_CONTEXT images: @@ -104,7 +102,7 @@ workflows: - "master" jobs: - - "build-image-debian-8": + - "build-image-debian-10": <<: *DOCKERHUB_CONTEXT - "build-image-debian-9": <<: *DOCKERHUB_CONTEXT @@ -210,7 +208,7 @@ jobs: # filenames and argv). LANG: "en_US.UTF-8" # Select a tox environment to run for this job. - TAHOE_LAFS_TOX_ENVIRONMENT: "py27-coverage" + TAHOE_LAFS_TOX_ENVIRONMENT: "py27" # Additional arguments to pass to tox. TAHOE_LAFS_TOX_ARGS: "" # The path in which test artifacts will be placed. @@ -220,7 +218,7 @@ jobs: WHEELHOUSE_PATH: &WHEELHOUSE_PATH "/tmp/wheelhouse" PIP_FIND_LINKS: "file:///tmp/wheelhouse" # Upload the coverage report. - UPLOAD_COVERAGE: "yes" + UPLOAD_COVERAGE: "" # pip cannot install packages if the working directory is not readable. # We want to run a lot of steps as nobody instead of as root. @@ -274,11 +272,11 @@ jobs: fi - debian-8: + debian-10: <<: *DEBIAN docker: - <<: *DOCKERHUB_AUTH - image: "tahoelafsci/debian:8-py2.7" + image: "tahoelafsci/debian:10-py2.7" user: "nobody" @@ -373,7 +371,7 @@ jobs: # this reporter on Python 3. So drop that and just specify the # reporter. TAHOE_LAFS_TRIAL_ARGS: "--reporter=subunitv2-file" - TAHOE_LAFS_TOX_ENVIRONMENT: "py36-coverage" + TAHOE_LAFS_TOX_ENVIRONMENT: "py36" ubuntu-20-04: @@ -448,32 +446,17 @@ jobs: # them in parallel. nix-build --cores 3 --max-jobs 2 nix/ - # Generate up-to-date data for the dependency graph visualizer. - build-porting-depgraph: - # Get a system in which we can easily install Tahoe-LAFS and all its - # dependencies. The dependency graph analyzer works by executing the code. - # It's Python, what do you expect? - <<: *DEBIAN + typechecks: + docker: + - <<: *DOCKERHUB_AUTH + image: "tahoelafsci/ubuntu:18.04-py3" steps: - "checkout" - - - add_ssh_keys: - fingerprints: - # Jean-Paul Calderone (CircleCI depgraph key) - # This lets us push to tahoe-lafs/tahoe-depgraph in the next step. - - "86:38:18:a7:c0:97:42:43:18:46:55:d6:21:b0:5f:d4" - - run: - name: "Setup Python Environment" + name: "Validate Types" command: | - /tmp/venv/bin/pip install -e /tmp/project - - - run: - name: "Generate dependency graph data" - command: | - . /tmp/venv/bin/activate - ./misc/python3/depgraph.sh + /tmp/venv/bin/tox -e typechecks build-image: &BUILD_IMAGE # This is a template for a job to build a Docker image that has as much of @@ -514,12 +497,12 @@ jobs: docker push tahoelafsci/${DISTRO}:${TAG}-py${PYTHON_VERSION} - build-image-debian-8: + build-image-debian-10: <<: *BUILD_IMAGE environment: DISTRO: "debian" - TAG: "8" + TAG: "10" PYTHON_VERSION: "2.7" diff --git a/.codecov.yml b/.codecov.yml deleted file mode 100644 index df1eb5e01..000000000 --- a/.codecov.yml +++ /dev/null @@ -1,42 +0,0 @@ -# Override defaults for codecov.io checks. -# -# Documentation is at https://docs.codecov.io/docs/codecov-yaml; -# reference is at https://docs.codecov.io/docs/codecovyml-reference. -# -# To validate this file, use: -# -# curl --data-binary @.codecov.yml https://codecov.io/validate -# -# Codecov's defaults seem to leave red marks in GitHub CI checks in a -# rather arbitrary manner, probably because of non-determinism in -# coverage (see https://tahoe-lafs.org/trac/tahoe-lafs/ticket/2891) -# and maybe because computers are bad with floating point numbers. - -# Allow coverage percentage a precision of zero decimals, and round to -# the nearest number (for example, 89.957 to to 90; 89.497 to 89%). -# Coverage above 90% is good, below 80% is bad. -coverage: - round: nearest - range: 80..90 - precision: 0 - - # Aim for a target test coverage of 90% in codecov/project check (do - # not allow project coverage to drop below that), and allow - # codecov/patch a threshold of 1% (allow coverage in changes to drop - # by that much, and no less). That should be good enough for us. - status: - project: - default: - target: 90% - threshold: 1% - patch: - default: - threshold: 1% - - -codecov: - # This is a public repository so supposedly we don't "need" to use an upload - # token. However, using one makes sure that CI jobs running against forked - # repositories have coverage uploaded to the right place in codecov so - # their reports aren't incomplete. - token: "abf679b6-e2e6-4b33-b7b5-6cfbd41ee691" diff --git a/.github/CONTRIBUTING.rst b/.github/CONTRIBUTING.rst index c8f5093f1..b59385aa4 100644 --- a/.github/CONTRIBUTING.rst +++ b/.github/CONTRIBUTING.rst @@ -17,4 +17,4 @@ Examples of contributions include: * `Patch reviews `_ Before authoring or reviewing a patch, -please familiarize yourself with the `coding standard `_. +please familiarize yourself with the `Coding Standards `_ and the `Contributor Code of Conduct <../docs/CODE_OF_CONDUCT.md>`_. diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fd5049104..cae14c013 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,17 +30,37 @@ jobs: with: args: install vcpython27 + # See https://github.com/actions/checkout. A fetch-depth of 0 + # fetches all tags and branches. - name: Check out Tahoe-LAFS sources uses: actions/checkout@v2 - - - name: Fetch all history for all tags and branches - run: git fetch --prune --unshallow + with: + fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v1 with: python-version: ${{ matrix.python-version }} + # To use pip caching with GitHub Actions in an OS-independent + # manner, we need `pip cache dir` command, which became + # available since pip v20.1+. At the time of writing this, + # GitHub Actions offers pip v20.3.3 for both ubuntu-latest and + # windows-latest, and pip v20.3.1 for macos-latest. + - name: Get pip cache directory + id: pip-cache + run: | + echo "::set-output name=dir::$(pip cache dir)" + + # See https://github.com/actions/cache + - name: Use pip cache + uses: actions/cache@v2 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} + restore-keys: | + ${{ runner.os }}-pip- + - name: Install Python packages run: | pip install --upgrade codecov tox setuptools @@ -59,11 +79,110 @@ jobs: name: eliot.log path: eliot.log - - name: Upload coverage report - uses: codecov/codecov-action@v1 - with: - token: abf679b6-e2e6-4b33-b7b5-6cfbd41ee691 - file: coverage.xml + # Upload this job's coverage data to Coveralls. While there is a GitHub + # Action for this, as of Jan 2021 it does not support Python coverage + # files - only lcov files. Therefore, we use coveralls-python, the + # coveralls.io-supplied Python reporter, for this. + - name: "Report Coverage to Coveralls" + run: | + pip install coveralls + python -m coveralls + env: + # Some magic value required for some magic reason. + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + # Help coveralls identify our project. + COVERALLS_REPO_TOKEN: "JPf16rLB7T2yjgATIxFzTsEgMdN1UNq6o" + # Every source of coverage reports needs a unique "flag name". + # Construct one by smashing a few variables from the matrix together + # here. + COVERALLS_FLAG_NAME: "run-${{ matrix.os }}-${{ matrix.python-version }}" + # Mark the data as just one piece of many because we have more than + # one instance of this job (Windows, macOS) which collects and + # reports coverage. This is necessary to cause Coveralls to merge + # multiple coverage results into a single report. Note the merge + # only happens when we "finish" a particular build, as identified by + # its "build_num" (aka "service_number"). + COVERALLS_PARALLEL: true + + # Tell Coveralls that we're done reporting coverage data. Since we're using + # the "parallel" mode where more than one coverage data file is merged into + # a single report, we have to tell Coveralls when we've uploaded all of the + # data files. This does it. We make sure it runs last by making it depend + # on *all* of the coverage-collecting jobs. + finish-coverage-report: + # There happens to just be one coverage-collecting job at the moment. If + # the coverage reports are broken and someone added more + # coverage-collecting jobs to this workflow but didn't update this, that's + # why. + needs: + - "coverage" + runs-on: "ubuntu-latest" + steps: + - name: "Check out Tahoe-LAFS sources" + uses: "actions/checkout@v2" + + - name: "Finish Coveralls Reporting" + run: | + # coveralls-python does have a `--finish` option but it doesn't seem + # to work, at least for us. + # https://github.com/coveralls-clients/coveralls-python/issues/248 + # + # But all it does is this simple POST so we can just send it + # ourselves. The only hard part is guessing what the POST + # parameters mean. And I've done that for you already. + # + # Since the build is done I'm going to guess that "done" is a fine + # value for status. + # + # That leaves "build_num". The coveralls documentation gives some + # hints about it. It suggests using $CIRCLE_WORKFLOW_ID if your job + # is on CircleCI. CircleCI documentation says this about + # CIRCLE_WORKFLOW_ID: + # + # Observation of the coveralls.io web interface, logs from the + # coveralls command in action, and experimentation suggests the + # value for PRs is something more like: + # + # -PR- + # + # For branches, it's just the git branch tip hash. + + # For pull requests, refs/pull//merge was just checked out + # by so HEAD will refer to the right revision. For branches, HEAD + # is also the tip of the branch. + REV=$(git rev-parse HEAD) + + # We can get the PR number from the "context". + # + # https://docs.github.com/en/free-pro-team@latest/developers/webhooks-and-events/webhook-events-and-payloads#pull_request + # + # (via ). + # + # If this is a pull request, `github.event` is a `pull_request` + # structure which has `number` right in it. + # + # If this is a push, `github.event` is a `push` instead but we only + # need the revision to construct the build_num. + + PR=${{ github.event.number }} + + if [ "${PR}" = "" ]; then + BUILD_NUM=$REV + else + BUILD_NUM=$REV-PR-$PR + fi + REPO_NAME=$GITHUB_REPOSITORY + + curl \ + -k \ + https://coveralls.io/webhook?repo_token=$COVERALLS_REPO_TOKEN \ + -d \ + "payload[build_num]=$BUILD_NUM&payload[status]=done&payload[repo_name]=$REPO_NAME" + env: + # Some magic value required for some magic reason. + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + # Help coveralls identify our project. + COVERALLS_REPO_TOKEN: "JPf16rLB7T2yjgATIxFzTsEgMdN1UNq6o" integration: runs-on: ${{ matrix.os }} @@ -103,15 +222,27 @@ jobs: - name: Check out Tahoe-LAFS sources uses: actions/checkout@v2 - - - name: Fetch all history for all tags and branches - run: git fetch --prune --unshallow + with: + fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v1 with: python-version: ${{ matrix.python-version }} + - name: Get pip cache directory + id: pip-cache + run: | + echo "::set-output name=dir::$(pip cache dir)" + + - name: Use pip cache + uses: actions/cache@v2 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} + restore-keys: | + ${{ runner.os }}-pip- + - name: Install Python packages run: | pip install --upgrade tox @@ -155,15 +286,27 @@ jobs: - name: Check out Tahoe-LAFS sources uses: actions/checkout@v2 - - - name: Fetch all history for all tags and branches - run: git fetch --prune --unshallow + with: + fetch-depth: 0 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v1 with: python-version: ${{ matrix.python-version }} + - name: Get pip cache directory + id: pip-cache + run: | + echo "::set-output name=dir::$(pip cache dir)" + + - name: Use pip cache + uses: actions/cache@v2 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-pip-${{ hashFiles('**/setup.py') }} + restore-keys: | + ${{ runner.os }}-pip- + - name: Install Python packages run: | pip install --upgrade tox diff --git a/CREDITS b/CREDITS index 1394d87d8..07ac1e476 100644 --- a/CREDITS +++ b/CREDITS @@ -206,4 +206,9 @@ D: various bug-fixes and features N: Viktoriia Savchuk W: https://twitter.com/viktoriiasvchk -D: Developer community focused improvements on the README file. \ No newline at end of file +D: Developer community focused improvements on the README file. + +N: Lukas Pirl +E: tahoe@lukas-pirl.de +W: http://lukas-pirl.de +D: Buildslaves (Debian, Fedora, CentOS; 2016-2021) diff --git a/README.rst b/README.rst index 98150ed27..b1f6d2563 100644 --- a/README.rst +++ b/README.rst @@ -6,7 +6,7 @@ Free and Open decentralized data store `Tahoe-LAFS `__ (Tahoe Least-Authority File Store) is the first free software / open-source storage technology that distributes your data across multiple servers. Even if some servers fail or are taken over by an attacker, the entire file store continues to function correctly, preserving your privacy and security. -|Contributor Covenant| |readthedocs| |travis| |circleci| |codecov| +|Contributor Covenant| |readthedocs| |travis| |circleci| |coveralls| Table of contents @@ -125,9 +125,9 @@ See `TGPPL.PDF `__ for why the TGPPL ex .. |circleci| image:: https://circleci.com/gh/tahoe-lafs/tahoe-lafs.svg?style=svg :target: https://circleci.com/gh/tahoe-lafs/tahoe-lafs -.. |codecov| image:: https://codecov.io/github/tahoe-lafs/tahoe-lafs/coverage.svg?branch=master - :alt: test coverage percentage - :target: https://codecov.io/github/tahoe-lafs/tahoe-lafs?branch=master +.. |coveralls| image:: https://coveralls.io/repos/github/tahoe-lafs/tahoe-lafs/badge.svg + :alt: code coverage + :target: https://coveralls.io/github/tahoe-lafs/tahoe-lafs .. |Contributor Covenant| image:: https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg :alt: code of conduct diff --git a/docs/INSTALL.rst b/docs/INSTALL.rst index 3a724b790..e47d87bd6 100644 --- a/docs/INSTALL.rst +++ b/docs/INSTALL.rst @@ -173,7 +173,9 @@ from PyPI with ``venv/bin/pip install tahoe-lafs``. After installation, run Install From a Source Tarball ----------------------------- -You can also install directly from the source tarball URL:: +You can also install directly from the source tarball URL. To verify +signatures, first see verifying_signatures_ and replace the URL in the +following instructions with the local filename. % virtualenv venv New python executable in ~/venv/bin/python2.7 @@ -189,6 +191,40 @@ You can also install directly from the source tarball URL:: tahoe-lafs: 1.14.0 ... +.. _verifying_signatures: + +Verifying Signatures +-------------------- + +First download the source tarball and then any signatures. There are several +developers who are able to produce signatures for a release. A release may +have multiple signatures. All should be valid and you should confirm at least +one of them (ideally, confirm all). + +This statement, signed by the existing Tahoe release-signing key, attests to +those developers authorized to sign a Tahoe release: + +.. include:: developer-release-signatures + :code: + +Signatures are made available beside the release. So for example, a release +like ``https://tahoe-lafs.org/downloads/tahoe-lafs-1.16.0.tar.bz2`` might +have signatures ``tahoe-lafs-1.16.0.tar.bz2.meejah.asc`` and +``tahoe-lafs-1.16.0.tar.bz2.warner.asc``. + +To verify the signatures using GnuPG:: + + % gpg --verify tahoe-lafs-1.16.0.tar.bz2.meejah.asc tahoe-lafs-1.16.0.tar.bz2 + gpg: Signature made XXX + gpg: using RSA key 9D5A2BD5688ECB889DEBCD3FC2602803128069A7 + gpg: Good signature from "meejah " [full] + % gpg --verify tahoe-lafs-1.16.0.tar.bz2.warner.asc tahoe-lafs-1.16.0.tar.bz2 + gpg: Signature made XXX + gpg: using RSA key 967EFE06699872411A77DF36D43B4C9C73225AAF + gpg: Good signature from "Brian Warner " [full] + + + Extras ------ diff --git a/docs/README.md b/docs/README.txt similarity index 100% rename from docs/README.md rename to docs/README.txt diff --git a/docs/about.rst b/docs/about.rst index 626792d6b..120abb079 100644 --- a/docs/about.rst +++ b/docs/about.rst @@ -67,12 +67,12 @@ Here's how it works: A "storage grid" is made up of a number of storage servers. A storage server has direct attached storage (typically one or more hard disks). A "gateway" communicates with storage nodes, and uses them to provide access to the -grid over protocols such as HTTP(S), SFTP or FTP. +grid over protocols such as HTTP(S) and SFTP. Note that you can find "client" used to refer to gateway nodes (which act as a client to storage servers), and also to processes or programs connecting to a gateway node and performing operations on the grid -- for example, a CLI -command, Web browser, SFTP client, or FTP client. +command, Web browser, or SFTP client. Users do not rely on storage servers to provide *confidentiality* nor *integrity* for their data -- instead all of the data is encrypted and diff --git a/docs/conf.py b/docs/conf.py index 34ddd1bd4..612c324a3 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -28,7 +28,7 @@ import os # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = [] +extensions = ['recommonmark'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] @@ -36,7 +36,7 @@ templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ['.rst', '.md'] # The encoding of source files. #source_encoding = 'utf-8-sig' diff --git a/docs/configuration.rst b/docs/configuration.rst index 2c0746ba2..93c9aa0f1 100644 --- a/docs/configuration.rst +++ b/docs/configuration.rst @@ -81,7 +81,6 @@ Client/server nodes provide one or more of the following services: * web-API service * SFTP service -* FTP service * helper service * storage service. @@ -708,12 +707,12 @@ CLI file store, uploading/downloading files, and creating/running Tahoe nodes. See :doc:`frontends/CLI` for details. -SFTP, FTP +SFTP - Tahoe can also run both SFTP and FTP servers, and map a username/password + Tahoe can also run SFTP servers, and map a username/password pair to a top-level Tahoe directory. See :doc:`frontends/FTP-and-SFTP` - for instructions on configuring these services, and the ``[sftpd]`` and - ``[ftpd]`` sections of ``tahoe.cfg``. + for instructions on configuring this service, and the ``[sftpd]`` + section of ``tahoe.cfg``. Storage Server Configuration diff --git a/docs/contributing.rst b/docs/contributing.rst new file mode 100644 index 000000000..15e1b6432 --- /dev/null +++ b/docs/contributing.rst @@ -0,0 +1 @@ +.. include:: ../.github/CONTRIBUTING.rst diff --git a/docs/developer-release-signatures b/docs/developer-release-signatures new file mode 100644 index 000000000..1b55641d9 --- /dev/null +++ b/docs/developer-release-signatures @@ -0,0 +1,42 @@ +-----BEGIN PGP SIGNED MESSAGE----- +Hash: SHA512 + + +January 20, 2021 + +Any of the following core Tahoe contributers may sign a release. Each +release MUST be signed by at least one developer but MAY have +additional signatures. Each developer independently produces a +signature which is made available beside Tahoe releases after 1.15.0 + +This statement is signed by the existing Tahoe release key. Any future +such statements may be signed by it OR by any two developers (for +example, to add or remove developers from the list). + +meejah +0xC2602803128069A7 +9D5A 2BD5 688E CB88 9DEB CD3F C260 2803 1280 69A7 +https://meejah.ca/meejah.asc + +jean-paul calderone (exarkun) +0xE27B085EDEAA4B1B +96B9 C5DA B2EA 9EB6 7941 9DB7 E27B 085E DEAA 4B1B +https://twistedmatrix.com/~exarkun/E27B085EDEAA4B1B.asc + +brian warner (lothar) +0x863333C265497810 +5810 F125 7F8C F753 7753 895A 8633 33C2 6549 7810 +https://www.lothar.com/warner-gpg.html + + +-----BEGIN PGP SIGNATURE----- + +iQEzBAEBCgAdFiEE405i0G0Oac/KQXn/veDTHWhmanoFAmAHIyIACgkQveDTHWhm +anqhqQf/YSbMXL+gwFhAZsjX39EVlbr/Ik7WPPkJW7v1oHybTnwFpFIc52COU1x/ +sqRfk4OyYtz9IBgOPXoWgXu9R4qdK6vYKxEsekcGT9C5l0OyDz8YWXEWgbGK5mvI +aEub9WucD8r2uOQnnW6DtznFuEpvOjtf/+2BU767+bvLsbViW88ocbuLfCqLdOgD +WZT9j3M+Y2Dc56DAJzP/4fkrUSVIofZStYp5u9HBjburgcYIp0g/cyc4xXRoi6Mp +lFTRFv3MIjmoamzSQseoIgP6fi8QRqPrffPrsyqAp+06mJnPhxxFqxtO/ZErmpSa ++BGrLBxdWa8IF9U1A4Fs5nuAzAKMEg== +=E9J+ +-----END PGP SIGNATURE----- diff --git a/docs/frontends/FTP-and-SFTP.rst b/docs/frontends/FTP-and-SFTP.rst index dc348af34..ee6371812 100644 --- a/docs/frontends/FTP-and-SFTP.rst +++ b/docs/frontends/FTP-and-SFTP.rst @@ -1,22 +1,21 @@ .. -*- coding: utf-8-with-signature -*- -================================= -Tahoe-LAFS SFTP and FTP Frontends -================================= +======================== +Tahoe-LAFS SFTP Frontend +======================== -1. `SFTP/FTP Background`_ +1. `SFTP Background`_ 2. `Tahoe-LAFS Support`_ 3. `Creating an Account File`_ 4. `Running An Account Server (accounts.url)`_ 5. `Configuring SFTP Access`_ -6. `Configuring FTP Access`_ -7. `Dependencies`_ -8. `Immutable and Mutable Files`_ -9. `Known Issues`_ +6. `Dependencies`_ +7. `Immutable and Mutable Files`_ +8. `Known Issues`_ -SFTP/FTP Background -=================== +SFTP Background +=============== FTP is the venerable internet file-transfer protocol, first developed in 1971. The FTP server usually listens on port 21. A separate connection is @@ -33,20 +32,18 @@ Both FTP and SFTP were developed assuming a UNIX-like server, with accounts and passwords, octal file modes (user/group/other, read/write/execute), and ctime/mtime timestamps. -We recommend SFTP over FTP, because the protocol is better, and the server -implementation in Tahoe-LAFS is more complete. See `Known Issues`_, below, -for details. +Previous versions of Tahoe-LAFS supported FTP, but now only the superior SFTP +frontend is supported. See `Known Issues`_, below, for details on the +limitations of SFTP. Tahoe-LAFS Support ================== All Tahoe-LAFS client nodes can run a frontend SFTP server, allowing regular SFTP clients (like ``/usr/bin/sftp``, the ``sshfs`` FUSE plugin, and many -others) to access the file store. They can also run an FTP server, so FTP -clients (like ``/usr/bin/ftp``, ``ncftp``, and others) can too. These -frontends sit at the same level as the web-API interface. +others) to access the file store. -Since Tahoe-LAFS does not use user accounts or passwords, the SFTP/FTP +Since Tahoe-LAFS does not use user accounts or passwords, the SFTP servers must be configured with a way to first authenticate a user (confirm that a prospective client has a legitimate claim to whatever authorities we might grant a particular user), and second to decide what directory cap @@ -173,39 +170,6 @@ clients and with the sshfs filesystem, see wiki:SftpFrontend_ .. _wiki:SftpFrontend: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/SftpFrontend -Configuring FTP Access -====================== - -To enable the FTP server with an accounts file, add the following lines to -the BASEDIR/tahoe.cfg file:: - - [ftpd] - enabled = true - port = tcp:8021:interface=127.0.0.1 - accounts.file = private/accounts - -The FTP server will listen on the given port number and on the loopback -interface only. The "accounts.file" pathname will be interpreted relative to -the node's BASEDIR. - -To enable the FTP server with an account server instead, provide the URL of -that server in an "accounts.url" directive:: - - [ftpd] - enabled = true - port = tcp:8021:interface=127.0.0.1 - accounts.url = https://example.com/login - -You can provide both accounts.file and accounts.url, although it probably -isn't very useful except for testing. - -FTP provides no security, and so your password or caps could be eavesdropped -if you connect to the FTP server remotely. The examples above include -":interface=127.0.0.1" in the "port" option, which causes the server to only -accept connections from localhost. - -Public key authentication is not supported for FTP. - Dependencies ============ @@ -216,7 +180,7 @@ separately: debian puts it in the "python-twisted-conch" package. Immutable and Mutable Files =========================== -All files created via SFTP (and FTP) are immutable files. However, files can +All files created via SFTP are immutable files. However, files can only be created in writeable directories, which allows the directory entry to be relinked to a different file. Normally, when the path of an immutable file is opened for writing by SFTP, the directory entry is relinked to another @@ -256,18 +220,3 @@ See also wiki:SftpFrontend_. .. _ticket #1059: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1059 .. _ticket #1089: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1089 - -Known Issues in the FTP Frontend --------------------------------- - -Mutable files are not supported by the FTP frontend (`ticket #680`_). - -Non-ASCII filenames are not supported by FTP (`ticket #682`_). - -The FTP frontend sometimes fails to report errors, for example if an upload -fails because it does meet the "servers of happiness" threshold (`ticket -#1081`_). - -.. _ticket #680: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/680 -.. _ticket #682: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/682 -.. _ticket #1081: https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1081 diff --git a/docs/frontends/webapi.rst b/docs/frontends/webapi.rst index 99fa44979..77ce11974 100644 --- a/docs/frontends/webapi.rst +++ b/docs/frontends/webapi.rst @@ -2032,10 +2032,11 @@ potential for surprises when the file store structure is changed. Tahoe-LAFS provides a mutable file store, but the ways that the store can change are limited. The only things that can change are: - * the mapping from child names to child objects inside mutable directories - (by adding a new child, removing an existing child, or changing an - existing child to point to a different object) - * the contents of mutable files + +* the mapping from child names to child objects inside mutable directories + (by adding a new child, removing an existing child, or changing an + existing child to point to a different object) +* the contents of mutable files Obviously if you query for information about the file store and then act to change it (such as by getting a listing of the contents of a mutable @@ -2157,7 +2158,7 @@ When modifying the file, be careful to update it atomically, otherwise a request may arrive while the file is only halfway written, and the partial file may be incorrectly parsed. -The blacklist is applied to all access paths (including SFTP, FTP, and CLI +The blacklist is applied to all access paths (including SFTP and CLI operations), not just the web-API. The blacklist also applies to directories. If a directory is blacklisted, the gateway will refuse access to both that directory and any child files/directories underneath it, when accessed via diff --git a/docs/helper.rst b/docs/helper.rst index 0fcdf4601..55d302cac 100644 --- a/docs/helper.rst +++ b/docs/helper.rst @@ -122,7 +122,7 @@ Who should consider using a Helper? * clients who experience problems with TCP connection fairness: if other programs or machines in the same home are getting less than their fair share of upload bandwidth. If the connection is being shared fairly, then - a Tahoe upload that is happening at the same time as a single FTP upload + a Tahoe upload that is happening at the same time as a single SFTP upload should get half the bandwidth. * clients who have been given the helper.furl by someone who is running a Helper and is willing to let them use it diff --git a/docs/index.rst b/docs/index.rst index 3d0a41302..60a3aa5d4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -23,8 +23,9 @@ Contents: frontends/download-status known_issues - ../.github/CONTRIBUTING + contributing CODE_OF_CONDUCT + release-checklist servers helper diff --git a/docs/known_issues.rst b/docs/known_issues.rst index e040ffaf6..98bd1b35d 100644 --- a/docs/known_issues.rst +++ b/docs/known_issues.rst @@ -23,7 +23,7 @@ Known Issues in Tahoe-LAFS v1.10.3, released 30-Mar-2016 * `Disclosure of file through embedded hyperlinks or JavaScript in that file`_ * `Command-line arguments are leaked to other local users`_ * `Capabilities may be leaked to web browser phishing filter / "safe browsing" servers`_ - * `Known issues in the FTP and SFTP frontends`_ + * `Known issues in the SFTP frontend`_ * `Traffic analysis based on sizes of files/directories, storage indices, and timing`_ * `Privacy leak via Google Chart API link in map-update timing web page`_ @@ -213,8 +213,8 @@ To disable the filter in Chrome: ---- -Known issues in the FTP and SFTP frontends ------------------------------------------- +Known issues in the SFTP frontend +--------------------------------- These are documented in :doc:`frontends/FTP-and-SFTP` and on `the SftpFrontend page`_ on the wiki. diff --git a/docs/release-checklist.rst b/docs/release-checklist.rst index be32aea6c..75ab74bb1 100644 --- a/docs/release-checklist.rst +++ b/docs/release-checklist.rst @@ -40,23 +40,31 @@ Create Branch and Apply Updates - Create a branch for release-candidates (e.g. `XXXX.release-1.15.0.rc0`) - run `tox -e news` to produce a new NEWS.txt file (this does a commit) - create the news for the release + - newsfragments/.minor - commit it + - manually fix NEWS.txt + - proper title for latest release ("Release 1.15.0" instead of "Release ...post1432") - double-check date (maybe release will be in the future) - spot-check the release notes (these come from the newsfragments files though so don't do heavy editing) - commit these changes + - update "relnotes.txt" + - update all mentions of 1.14.0 -> 1.15.0 - update "previous release" statement and date - summarize major changes - commit it + - update "CREDITS" + - are there any new contributors in this release? - one way: git log release-1.14.0.. | grep Author | sort | uniq - commit it + - update "docs/known_issues.rst" if appropriate - update "docs/INSTALL.rst" references to the new release - Push the branch to github @@ -82,25 +90,36 @@ they will need to evaluate which contributors' signatures they trust. - (all steps above are completed) - sign the release + - git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-1.15.0rc0" tahoe-lafs-1.15.0rc0 - (replace the key-id above with your own) + - build all code locally - these should all pass: + - tox -e py27,codechecks,docs,integration + - these can fail (ideally they should not of course): + - tox -e deprecations,upcoming-deprecations + - build tarballs + - tox -e tarballs - confirm it at least exists: - ls dist/ | grep 1.15.0rc0 + - inspect and test the tarballs + - install each in a fresh virtualenv - run `tahoe` command + - when satisfied, sign the tarballs: - - gpg --pinentry=loopback --armor --sign dist/tahoe_lafs-1.15.0rc0-py2-none-any.whl - - gpg --pinentry=loopback --armor --sign dist/tahoe_lafs-1.15.0rc0.tar.bz2 - - gpg --pinentry=loopback --armor --sign dist/tahoe_lafs-1.15.0rc0.tar.gz - - gpg --pinentry=loopback --armor --sign dist/tahoe_lafs-1.15.0rc0.zip + + - gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0-py2-none-any.whl + - gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.bz2 + - gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.gz + - gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.zip Privileged Contributor @@ -118,6 +137,12 @@ Did anyone contribute a hack since the last release? If so, then https://tahoe-lafs.org/hacktahoelafs/ needs to be updated. +Sign Git Tag +```````````` + +- git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-X.Y.Z" tahoe-lafs-X.Y.Z + + Upload Artifacts ```````````````` @@ -129,6 +154,7 @@ need to be uploaded to https://tahoe-lafs.org in `~source/downloads` https://tahoe-lafs.org/downloads/ on the Web. - scp dist/*1.15.0* username@tahoe-lafs.org:/home/source/downloads - the following developers have access to do this: + - exarkun - meejah - warner @@ -137,8 +163,9 @@ For the actual release, the tarball and signature files need to be uploaded to PyPI as well. - how to do this? -- (original guide says only "twine upload dist/*") +- (original guide says only `twine upload dist/*`) - the following developers have access to do this: + - warner - exarkun (partial?) - meejah (partial?) diff --git a/docs/running.rst b/docs/running.rst index 6d82a97f2..82b0443f9 100644 --- a/docs/running.rst +++ b/docs/running.rst @@ -207,10 +207,10 @@ create a new directory and lose the capability to it, then you cannot access that directory ever again. -The SFTP and FTP frontends --------------------------- +The SFTP frontend +----------------- -You can access your Tahoe-LAFS grid via any SFTP_ or FTP_ client. See +You can access your Tahoe-LAFS grid via any SFTP_ client. See :doc:`frontends/FTP-and-SFTP` for how to set this up. On most Unix platforms, you can also use SFTP to plug Tahoe-LAFS into your computer's local filesystem via ``sshfs``, but see the `FAQ about performance @@ -220,7 +220,6 @@ The SftpFrontend_ page on the wiki has more information about using SFTP with Tahoe-LAFS. .. _SFTP: https://en.wikipedia.org/wiki/SSH_file_transfer_protocol -.. _FTP: https://en.wikipedia.org/wiki/File_Transfer_Protocol .. _FAQ about performance problems: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/FAQ#Q23_FUSE .. _SftpFrontend: https://tahoe-lafs.org/trac/tahoe-lafs/wiki/SftpFrontend diff --git a/integration/conftest.py b/integration/conftest.py index f37ec9353..533cbdb67 100644 --- a/integration/conftest.py +++ b/integration/conftest.py @@ -7,6 +7,7 @@ from os import mkdir, listdir, environ from os.path import join, exists from tempfile import mkdtemp, mktemp from functools import partial +from json import loads from foolscap.furl import ( decode_furl, @@ -37,6 +38,10 @@ from util import ( _tahoe_runner_optional_coverage, await_client_ready, TahoeProcess, + cli, + _run_node, + generate_ssh_key, + block_with_timeout, ) @@ -152,7 +157,7 @@ def flog_gatherer(reactor, temp_dir, flog_binary, request): ) print("Waiting for flogtool to complete") try: - pytest_twisted.blockon(flog_protocol.done) + block_with_timeout(flog_protocol.done, reactor) except ProcessTerminated as e: print("flogtool exited unexpectedly: {}".format(str(e))) print("Flogtool completed") @@ -293,7 +298,7 @@ log_gatherer.furl = {log_furl} def cleanup(): try: transport.signalProcess('TERM') - pytest_twisted.blockon(protocol.exited) + block_with_timeout(protocol.exited, reactor) except ProcessExitedAlready: pass request.addfinalizer(cleanup) @@ -347,8 +352,50 @@ def alice(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, requ reactor, request, temp_dir, introducer_furl, flog_gatherer, "alice", web_port="tcp:9980:interface=localhost", storage=False, + # We're going to kill this ourselves, so no need for finalizer to + # do it: + finalize=False, ) ) + await_client_ready(process) + + # 1. Create a new RW directory cap: + cli(process, "create-alias", "test") + rwcap = loads(cli(process, "list-aliases", "--json"))["test"]["readwrite"] + + # 2. Enable SFTP on the node: + host_ssh_key_path = join(process.node_dir, "private", "ssh_host_rsa_key") + accounts_path = join(process.node_dir, "private", "accounts") + with open(join(process.node_dir, "tahoe.cfg"), "a") as f: + f.write("""\ +[sftpd] +enabled = true +port = tcp:8022:interface=127.0.0.1 +host_pubkey_file = {ssh_key_path}.pub +host_privkey_file = {ssh_key_path} +accounts.file = {accounts_path} +""".format(ssh_key_path=host_ssh_key_path, accounts_path=accounts_path)) + generate_ssh_key(host_ssh_key_path) + + # 3. Add a SFTP access file with username/password and SSH key auth. + + # The client SSH key path is typically going to be somewhere else (~/.ssh, + # typically), but for convenience sake for testing we'll put it inside node. + client_ssh_key_path = join(process.node_dir, "private", "ssh_client_rsa_key") + generate_ssh_key(client_ssh_key_path) + # Pub key format is "ssh-rsa ". We want the key. + ssh_public_key = open(client_ssh_key_path + ".pub").read().strip().split()[1] + with open(accounts_path, "w") as f: + f.write("""\ +alice password {rwcap} + +alice2 ssh-rsa {ssh_public_key} {rwcap} +""".format(rwcap=rwcap, ssh_public_key=ssh_public_key)) + + # 4. Restart the node with new SFTP config. + process.kill() + pytest_twisted.blockon(_run_node(reactor, process.node_dir, request, None)) + await_client_ready(process) return process @@ -490,7 +537,13 @@ def tor_network(reactor, temp_dir, chutney, request): path=join(chutney_dir), env=env, ) - pytest_twisted.blockon(proto.done) + try: + block_with_timeout(proto.done, reactor) + except ProcessTerminated: + # If this doesn't exit cleanly, that's fine, that shouldn't fail + # the test suite. + pass + request.addfinalizer(cleanup) return chut diff --git a/integration/test_sftp.py b/integration/test_sftp.py new file mode 100644 index 000000000..6171c7413 --- /dev/null +++ b/integration/test_sftp.py @@ -0,0 +1,162 @@ +""" +It's possible to create/rename/delete files and directories in Tahoe-LAFS using +SFTP. + +These tests use Paramiko, rather than Twisted's Conch, because: + + 1. It's a different implementation, so we're not testing Conch against + itself. + + 2. Its API is much simpler to use. +""" + +from __future__ import unicode_literals +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + +from posixpath import join +from stat import S_ISDIR + +from paramiko import SSHClient +from paramiko.client import AutoAddPolicy +from paramiko.sftp_client import SFTPClient +from paramiko.ssh_exception import AuthenticationException +from paramiko.rsakey import RSAKey + +import pytest + +from .util import generate_ssh_key, run_in_thread + + +def connect_sftp(connect_args={"username": "alice", "password": "password"}): + """Create an SFTP client.""" + client = SSHClient() + client.set_missing_host_key_policy(AutoAddPolicy) + client.connect("localhost", port=8022, look_for_keys=False, + allow_agent=False, **connect_args) + sftp = SFTPClient.from_transport(client.get_transport()) + + def rmdir(path, delete_root=True): + for f in sftp.listdir_attr(path=path): + childpath = join(path, f.filename) + if S_ISDIR(f.st_mode): + rmdir(childpath) + else: + sftp.remove(childpath) + if delete_root: + sftp.rmdir(path) + + # Delete any files left over from previous tests :( + rmdir("/", delete_root=False) + + return sftp + + +@run_in_thread +def test_bad_account_password_ssh_key(alice, tmpdir): + """ + Can't login with unknown username, wrong password, or wrong SSH pub key. + """ + # Wrong password, wrong username: + for u, p in [("alice", "wrong"), ("someuser", "password")]: + with pytest.raises(AuthenticationException): + connect_sftp(connect_args={ + "username": u, "password": p, + }) + + another_key = join(str(tmpdir), "ssh_key") + generate_ssh_key(another_key) + good_key = RSAKey(filename=join(alice.node_dir, "private", "ssh_client_rsa_key")) + bad_key = RSAKey(filename=another_key) + + # Wrong key: + with pytest.raises(AuthenticationException): + connect_sftp(connect_args={ + "username": "alice2", "pkey": bad_key, + }) + + # Wrong username: + with pytest.raises(AuthenticationException): + connect_sftp(connect_args={ + "username": "someoneelse", "pkey": good_key, + }) + + +@run_in_thread +def test_ssh_key_auth(alice): + """It's possible to login authenticating with SSH public key.""" + key = RSAKey(filename=join(alice.node_dir, "private", "ssh_client_rsa_key")) + sftp = connect_sftp(connect_args={ + "username": "alice2", "pkey": key + }) + assert sftp.listdir() == [] + + +@run_in_thread +def test_read_write_files(alice): + """It's possible to upload and download files.""" + sftp = connect_sftp() + with sftp.file("myfile", "wb") as f: + f.write(b"abc") + f.write(b"def") + + with sftp.file("myfile", "rb") as f: + assert f.read(4) == b"abcd" + assert f.read(2) == b"ef" + assert f.read(1) == b"" + + +@run_in_thread +def test_directories(alice): + """ + It's possible to create, list directories, and create and remove files in + them. + """ + sftp = connect_sftp() + assert sftp.listdir() == [] + + sftp.mkdir("childdir") + assert sftp.listdir() == ["childdir"] + + with sftp.file("myfile", "wb") as f: + f.write(b"abc") + assert sorted(sftp.listdir()) == ["childdir", "myfile"] + + sftp.chdir("childdir") + assert sftp.listdir() == [] + + with sftp.file("myfile2", "wb") as f: + f.write(b"def") + assert sftp.listdir() == ["myfile2"] + + sftp.chdir(None) # root + with sftp.file("childdir/myfile2", "rb") as f: + assert f.read() == b"def" + + sftp.remove("myfile") + assert sftp.listdir() == ["childdir"] + + sftp.rmdir("childdir") + assert sftp.listdir() == [] + + +@run_in_thread +def test_rename(alice): + """Directories and files can be renamed.""" + sftp = connect_sftp() + sftp.mkdir("dir") + + filepath = join("dir", "file") + with sftp.file(filepath, "wb") as f: + f.write(b"abc") + + sftp.rename(filepath, join("dir", "file2")) + sftp.rename("dir", "dir2") + + with sftp.file(join("dir2", "file2"), "rb") as f: + assert f.read() == b"abc" diff --git a/integration/test_web.py b/integration/test_web.py index fe2137ff3..aab11412f 100644 --- a/integration/test_web.py +++ b/integration/test_web.py @@ -127,12 +127,12 @@ def test_deep_stats(alice): dircap_uri, data={ u"t": u"upload", - u"when_done": u".", }, files={ u"file": FILE_CONTENTS, }, ) + resp.raise_for_status() # confirm the file is in the directory resp = requests.get( @@ -175,6 +175,7 @@ def test_deep_stats(alice): time.sleep(.5) +@util.run_in_thread def test_status(alice): """ confirm we get something sensible from /status and the various sub-types diff --git a/integration/util.py b/integration/util.py index eed073225..256fd68c1 100644 --- a/integration/util.py +++ b/integration/util.py @@ -5,6 +5,7 @@ from os import mkdir, environ from os.path import exists, join from six.moves import StringIO from functools import partial +from subprocess import check_output from twisted.python.filepath import ( FilePath, @@ -12,9 +13,13 @@ from twisted.python.filepath import ( from twisted.internet.defer import Deferred, succeed from twisted.internet.protocol import ProcessProtocol from twisted.internet.error import ProcessExitedAlready, ProcessDone +from twisted.internet.threads import deferToThread import requests +from paramiko.rsakey import RSAKey +from boltons.funcutils import wraps + from allmydata.util.configutil import ( get_config, set_config, @@ -25,6 +30,12 @@ from allmydata import client import pytest_twisted +def block_with_timeout(deferred, reactor, timeout=120): + """Block until Deferred has result, but timeout instead of waiting forever.""" + deferred.addTimeout(timeout, reactor) + return pytest_twisted.blockon(deferred) + + class _ProcessExitedProtocol(ProcessProtocol): """ Internal helper that .callback()s on self.done when the process @@ -123,11 +134,12 @@ def _cleanup_tahoe_process(tahoe_transport, exited): :return: After the process has exited. """ + from twisted.internet import reactor try: print("signaling {} with TERM".format(tahoe_transport.pid)) tahoe_transport.signalProcess('TERM') print("signaled, blocking on exit") - pytest_twisted.blockon(exited) + block_with_timeout(exited, reactor) print("exited, goodbye") except ProcessExitedAlready: pass @@ -175,11 +187,15 @@ class TahoeProcess(object): u"portnum", ) + def kill(self): + """Kill the process, block until it's done.""" + _cleanup_tahoe_process(self.transport, self.transport.exited) + def __str__(self): return "".format(self._node_dir) -def _run_node(reactor, node_dir, request, magic_text): +def _run_node(reactor, node_dir, request, magic_text, finalize=True): """ Run a tahoe process from its node_dir. @@ -203,7 +219,8 @@ def _run_node(reactor, node_dir, request, magic_text): ) transport.exited = protocol.exited - request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited)) + if finalize: + request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited)) # XXX abusing the Deferred; should use .when_magic_seen() pattern @@ -222,7 +239,8 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam magic_text=None, needed=2, happy=3, - total=4): + total=4, + finalize=True): """ Helper to create a single node, run it and return the instance spawnProcess returned (ITransport) @@ -270,7 +288,7 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam d = Deferred() d.callback(None) d.addCallback(lambda _: created_d) - d.addCallback(lambda _: _run_node(reactor, node_dir, request, magic_text)) + d.addCallback(lambda _: _run_node(reactor, node_dir, request, magic_text, finalize=finalize)) return d @@ -390,17 +408,13 @@ def await_file_vanishes(path, timeout=10): raise FileShouldVanishException(path, timeout) -def cli(request, reactor, node_dir, *argv): +def cli(node, *argv): """ - Run a tahoe CLI subcommand for a given node, optionally running - under coverage if '--coverage' was supplied. + Run a tahoe CLI subcommand for a given node in a blocking manner, returning + the output. """ - proto = _CollectOutputProtocol() - _tahoe_runner_optional_coverage( - proto, reactor, request, - ['--node-directory', node_dir] + list(argv), - ) - return proto.done + arguments = ["tahoe", '--node-directory', node.node_dir] + return check_output(arguments + list(argv)) def node_url(node_dir, uri_fragment): @@ -505,3 +519,36 @@ def await_client_ready(tahoe, timeout=10, liveness=60*2): tahoe, ) ) + + +def generate_ssh_key(path): + """Create a new SSH private/public key pair.""" + key = RSAKey.generate(2048) + key.write_private_key_file(path) + with open(path + ".pub", "wb") as f: + f.write(b"%s %s" % (key.get_name(), key.get_base64())) + + +def run_in_thread(f): + """Decorator for integration tests that runs code in a thread. + + Because we're using pytest_twisted, tests that rely on the reactor are + expected to return a Deferred and use async APIs so the reactor can run. + + In the case of the integration test suite, it launches nodes in the + background using Twisted APIs. The nodes stdout and stderr is read via + Twisted code. If the reactor doesn't run, reads don't happen, and + eventually the buffers fill up, and the nodes block when they try to flush + logs. + + We can switch to Twisted APIs (treq instead of requests etc.), but + sometimes it's easier or expedient to just have a blocking test. So this + decorator allows you to run the test in a thread, and the reactor can keep + running in the main thread. + + See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3597 for tracking bug. + """ + @wraps(f) + def test(*args, **kwargs): + return deferToThread(lambda: f(*args, **kwargs)) + return test diff --git a/misc/operations_helpers/provisioning/provisioning.py b/misc/operations_helpers/provisioning/provisioning.py index 37acd16d2..d6dfc4cd7 100644 --- a/misc/operations_helpers/provisioning/provisioning.py +++ b/misc/operations_helpers/provisioning/provisioning.py @@ -46,7 +46,7 @@ class ProvisioningTool(rend.Page): req = inevow.IRequest(ctx) def getarg(name, astype=int): - if req.method != "POST": + if req.method != b"POST": return None if name in req.fields: return astype(req.fields[name].value) diff --git a/mypy.ini b/mypy.ini new file mode 100644 index 000000000..01cbb57a8 --- /dev/null +++ b/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +ignore_missing_imports = True +plugins=mypy_zope:plugin diff --git a/newsfragments/3326.installation b/newsfragments/3326.installation new file mode 100644 index 000000000..2a3a64e32 --- /dev/null +++ b/newsfragments/3326.installation @@ -0,0 +1 @@ +Debian 8 support has been replaced with Debian 10 support. diff --git a/newsfragments/3384.minor b/newsfragments/3384.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3385.minor b/newsfragments/3385.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3399.feature b/newsfragments/3399.feature new file mode 100644 index 000000000..d30a91679 --- /dev/null +++ b/newsfragments/3399.feature @@ -0,0 +1 @@ +Added 'typechecks' environment for tox running mypy and performing static typechecks. diff --git a/newsfragments/3529.minor b/newsfragments/3529.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3534.minor b/newsfragments/3534.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3536.minor b/newsfragments/3536.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3552.minor b/newsfragments/3552.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3564.minor b/newsfragments/3564.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3565.minor b/newsfragments/3565.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3566.minor b/newsfragments/3566.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3567.minor b/newsfragments/3567.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3568.minor b/newsfragments/3568.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3572.minor b/newsfragments/3572.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3574.minor b/newsfragments/3574.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3575.minor b/newsfragments/3575.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3576.minor b/newsfragments/3576.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3577.minor b/newsfragments/3577.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3578.minor b/newsfragments/3578.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3579.minor b/newsfragments/3579.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3580.minor b/newsfragments/3580.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3582.minor b/newsfragments/3582.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3583.removed b/newsfragments/3583.removed new file mode 100644 index 000000000..a3fce48be --- /dev/null +++ b/newsfragments/3583.removed @@ -0,0 +1 @@ +FTP is no longer supported by Tahoe-LAFS. Please use the SFTP support instead. \ No newline at end of file diff --git a/newsfragments/3584.bugfix b/newsfragments/3584.bugfix new file mode 100644 index 000000000..faf57713b --- /dev/null +++ b/newsfragments/3584.bugfix @@ -0,0 +1 @@ +SFTP public key auth likely works more consistently, and SFTP in general was previously broken. \ No newline at end of file diff --git a/newsfragments/3587.minor b/newsfragments/3587.minor new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/newsfragments/3587.minor @@ -0,0 +1 @@ + diff --git a/newsfragments/3588.incompat b/newsfragments/3588.incompat new file mode 100644 index 000000000..402ae8479 --- /dev/null +++ b/newsfragments/3588.incompat @@ -0,0 +1 @@ +The Tahoe command line now always uses UTF-8 to decode its arguments, regardless of locale. diff --git a/newsfragments/3588.minor b/newsfragments/3588.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3589.minor b/newsfragments/3589.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3590.bugfix b/newsfragments/3590.bugfix new file mode 100644 index 000000000..aa504a5e3 --- /dev/null +++ b/newsfragments/3590.bugfix @@ -0,0 +1 @@ +Fixed issue where redirecting old-style URIs (/uri/?uri=...) didn't work. \ No newline at end of file diff --git a/newsfragments/3591.minor b/newsfragments/3591.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3592.minor b/newsfragments/3592.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3593.minor b/newsfragments/3593.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3594.minor b/newsfragments/3594.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3595.minor b/newsfragments/3595.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3596.minor b/newsfragments/3596.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3599.minor b/newsfragments/3599.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3600.minor b/newsfragments/3600.minor new file mode 100644 index 000000000..e69de29bb diff --git a/newsfragments/3612.minor b/newsfragments/3612.minor new file mode 100644 index 000000000..e69de29bb diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix index 148763c2f..f3ccf950d 100644 --- a/nix/tahoe-lafs.nix +++ b/nix/tahoe-lafs.nix @@ -23,17 +23,12 @@ python.pkgs.buildPythonPackage rec { # This list is over-zealous because it's more work to disable individual # tests with in a module. - # test_system is a lot of integration-style tests that do a lot of real - # networking between many processes. They sometimes fail spuriously. - rm src/allmydata/test/test_system.py - # Many of these tests don't properly skip when i2p or tor dependencies are # not supplied (and we are not supplying them). rm src/allmydata/test/test_i2p_provider.py rm src/allmydata/test/test_connections.py rm src/allmydata/test/cli/test_create.py rm src/allmydata/test/test_client.py - rm src/allmydata/test/test_runner.py ''; diff --git a/setup.py b/setup.py index c26805684..5dc68d367 100644 --- a/setup.py +++ b/setup.py @@ -63,12 +63,8 @@ install_requires = [ # version of cryptography will *really* be installed. "cryptography >= 2.6", - # * We need Twisted 10.1.0 for the FTP frontend in order for - # Twisted's FTP server to support asynchronous close. # * The SFTP frontend depends on Twisted 11.0.0 to fix the SSH server # rekeying bug - # * The FTP frontend depends on Twisted >= 11.1.0 for - # filepath.Permissions # * The SFTP frontend and manhole depend on the conch extra. However, we # can't explicitly declare that without an undesirable dependency on gmpy, # as explained in ticket #2740. @@ -111,7 +107,9 @@ install_requires = [ # Eliot is contemplating dropping Python 2 support. Stick to a version we # know works on Python 2.7. - "eliot ~= 1.7", + "eliot ~= 1.7 ; python_version < '3.0'", + # On Python 3, we want a new enough version to support custom JSON encoders. + "eliot >= 1.13.0 ; python_version > '3.0'", # Pyrsistent 0.17.0 (which we use by way of Eliot) has dropped # Python 2 entirely; stick to the version known to work for us. @@ -383,10 +381,7 @@ setup(name="tahoe-lafs", # also set in __init__.py # this version from time to time, but we will do it # intentionally. "pyflakes == 2.2.0", - # coverage 5.0 breaks the integration tests in some opaque way. - # This probably needs to be addressed in a more permanent way - # eventually... - "coverage ~= 4.5", + "coverage ~= 5.0", "mock", "tox", "pytest", @@ -400,6 +395,8 @@ setup(name="tahoe-lafs", # also set in __init__.py "html5lib", "junitxml", "tenacity", + "paramiko", + "pytest-timeout", ] + tor_requires + i2p_requires, "tor": tor_requires, "i2p": i2p_requires, diff --git a/src/allmydata/__init__.py b/src/allmydata/__init__.py index 15d5fb240..3157c8c80 100644 --- a/src/allmydata/__init__.py +++ b/src/allmydata/__init__.py @@ -14,7 +14,9 @@ __all__ = [ __version__ = "unknown" try: - from allmydata._version import __version__ + # type ignored as it fails in CI + # (https://app.circleci.com/pipelines/github/tahoe-lafs/tahoe-lafs/1647/workflows/60ae95d4-abe8-492c-8a03-1ad3b9e42ed3/jobs/40972) + from allmydata._version import __version__ # type: ignore except ImportError: # We're running in a tree that hasn't run update_version, and didn't # come with a _version.py, so we don't know what our version is. @@ -24,7 +26,9 @@ except ImportError: full_version = "unknown" branch = "unknown" try: - from allmydata._version import full_version, branch + # type ignored as it fails in CI + # (https://app.circleci.com/pipelines/github/tahoe-lafs/tahoe-lafs/1647/workflows/60ae95d4-abe8-492c-8a03-1ad3b9e42ed3/jobs/40972) + from allmydata._version import full_version, branch # type: ignore except ImportError: # We're running in a tree that hasn't run update_version, and didn't # come with a _version.py, so we don't know what our full version or diff --git a/src/allmydata/blacklist.py b/src/allmydata/blacklist.py index 89ee81a96..b7e1d0956 100644 --- a/src/allmydata/blacklist.py +++ b/src/allmydata/blacklist.py @@ -1,3 +1,14 @@ +""" +Ported to Python 3. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 import os @@ -34,10 +45,10 @@ class Blacklist(object): try: if self.last_mtime is None or current_mtime > self.last_mtime: self.entries.clear() - with open(self.blacklist_fn, "r") as f: + with open(self.blacklist_fn, "rb") as f: for line in f: line = line.strip() - if not line or line.startswith("#"): + if not line or line.startswith(b"#"): continue si_s, reason = line.split(None, 1) si = base32.a2b(si_s) # must be valid base32 diff --git a/src/allmydata/client.py b/src/allmydata/client.py index 75e20951b..f5e603490 100644 --- a/src/allmydata/client.py +++ b/src/allmydata/client.py @@ -86,12 +86,6 @@ _client_config = configutil.ValidConfiguration( "shares.total", "storage.plugins", ), - "ftpd": ( - "accounts.file", - "accounts.url", - "enabled", - "port", - ), "storage": ( "debug_discard", "enabled", @@ -656,7 +650,6 @@ class _Client(node.Node, pollmixin.PollMixin): raise ValueError("config error: helper is enabled, but tub " "is not listening ('tub.port=' is empty)") self.init_helper() - self.init_ftp_server() self.init_sftp_server() # If the node sees an exit_trigger file, it will poll every second to see @@ -714,7 +707,7 @@ class _Client(node.Node, pollmixin.PollMixin): def get_long_nodeid(self): # this matches what IServer.get_longname() says about us elsewhere vk_string = ed25519.string_from_verifying_key(self._node_public_key) - return remove_prefix(vk_string, "pub-") + return remove_prefix(vk_string, b"pub-") def get_long_tubid(self): return idlib.nodeid_b2a(self.nodeid) @@ -898,10 +891,6 @@ class _Client(node.Node, pollmixin.PollMixin): if helper_furl in ("None", ""): helper_furl = None - # FURLs need to be bytes: - if helper_furl is not None: - helper_furl = helper_furl.encode("utf-8") - DEP = self.encoding_params DEP["k"] = int(self.config.get_config("client", "shares.needed", DEP["k"])) DEP["n"] = int(self.config.get_config("client", "shares.total", DEP["n"])) @@ -1036,18 +1025,6 @@ class _Client(node.Node, pollmixin.PollMixin): ) ws.setServiceParent(self) - def init_ftp_server(self): - if self.config.get_config("ftpd", "enabled", False, boolean=True): - accountfile = self.config.get_config("ftpd", "accounts.file", None) - if accountfile: - accountfile = self.config.get_config_path(accountfile) - accounturl = self.config.get_config("ftpd", "accounts.url", None) - ftp_portstr = self.config.get_config("ftpd", "port", "8021") - - from allmydata.frontends import ftpd - s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr) - s.setServiceParent(self) - def init_sftp_server(self): if self.config.get_config("sftpd", "enabled", False, boolean=True): accountfile = self.config.get_config("sftpd", "accounts.file", None) diff --git a/src/allmydata/codec.py b/src/allmydata/codec.py index a4baab4b6..19345959e 100644 --- a/src/allmydata/codec.py +++ b/src/allmydata/codec.py @@ -57,6 +57,10 @@ class CRSEncoder(object): return defer.succeed((shares, desired_share_ids)) + def encode_proposal(self, data, desired_share_ids=None): + raise NotImplementedError() + + @implementer(ICodecDecoder) class CRSDecoder(object): diff --git a/src/allmydata/deep_stats.py b/src/allmydata/deep_stats.py index c18adb5be..bfb43ebae 100644 --- a/src/allmydata/deep_stats.py +++ b/src/allmydata/deep_stats.py @@ -1,4 +1,15 @@ -"""Implementation of the deep stats class.""" +"""Implementation of the deep stats class. + +Ported to Python 3. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 import math @@ -13,7 +24,7 @@ from allmydata.util import mathutil class DeepStats(object): """Deep stats object. - Holds results of the deep-stats opetation. + Holds results of the deep-stats operation. Used for json generation in the API.""" # Json API version. @@ -121,7 +132,7 @@ class DeepStats(object): h[bucket] += 1 def get_results(self): - """Returns deep-stats resutls.""" + """Returns deep-stats results.""" stats = self.stats.copy() for key in self.histograms: h = self.histograms[key] diff --git a/src/allmydata/dirnode.py b/src/allmydata/dirnode.py index fd6a9cc8c..6871b94c7 100644 --- a/src/allmydata/dirnode.py +++ b/src/allmydata/dirnode.py @@ -18,7 +18,6 @@ import time from zope.interface import implementer from twisted.internet import defer from foolscap.api import fireEventually -import json from allmydata.crypto import aes from allmydata.deep_stats import DeepStats @@ -31,7 +30,7 @@ from allmydata.interfaces import IFilesystemNode, IDirectoryNode, IFileNode, \ from allmydata.check_results import DeepCheckResults, \ DeepCheckAndRepairResults from allmydata.monitor import Monitor -from allmydata.util import hashutil, base32, log +from allmydata.util import hashutil, base32, log, jsonbytes as json from allmydata.util.encodingutil import quote_output, normalize from allmydata.util.assertutil import precondition from allmydata.util.netstring import netstring, split_netstring @@ -569,7 +568,7 @@ class DirectoryNode(object): d = self.get_child_and_metadata(childnamex) return d - def set_uri(self, namex, writecap, readcap, metadata=None, overwrite=True): + def set_uri(self, namex, writecap, readcap=None, metadata=None, overwrite=True): precondition(isinstance(writecap, (bytes, type(None))), writecap) precondition(isinstance(readcap, (bytes, type(None))), readcap) diff --git a/src/allmydata/frontends/auth.py b/src/allmydata/frontends/auth.py index 1bd481321..de406d604 100644 --- a/src/allmydata/frontends/auth.py +++ b/src/allmydata/frontends/auth.py @@ -4,8 +4,8 @@ from zope.interface import implementer from twisted.web.client import getPage from twisted.internet import defer from twisted.cred import error, checkers, credentials -from twisted.conch import error as conch_error from twisted.conch.ssh import keys +from twisted.conch.checkers import SSHPublicKeyChecker, InMemorySSHKeyDB from allmydata.util import base32 from allmydata.util.fileutil import abspath_expanduser_unicode @@ -29,7 +29,7 @@ class AccountFileChecker(object): def __init__(self, client, accountfile): self.client = client self.passwords = {} - self.pubkeys = {} + pubkeys = {} self.rootcaps = {} with open(abspath_expanduser_unicode(accountfile), "r") as f: for line in f: @@ -40,12 +40,14 @@ class AccountFileChecker(object): if passwd.startswith("ssh-"): bits = rest.split() keystring = " ".join([passwd] + bits[:-1]) + key = keys.Key.fromString(keystring) rootcap = bits[-1] - self.pubkeys[name] = keystring + pubkeys[name] = [key] else: self.passwords[name] = passwd rootcap = rest self.rootcaps[name] = rootcap + self._pubkeychecker = SSHPublicKeyChecker(InMemorySSHKeyDB(pubkeys)) def _avatarId(self, username): return FTPAvatarID(username, self.rootcaps[username]) @@ -57,11 +59,9 @@ class AccountFileChecker(object): def requestAvatarId(self, creds): if credentials.ISSHPrivateKey.providedBy(creds): - # Re-using twisted.conch.checkers.SSHPublicKeyChecker here, rather - # than re-implementing all of the ISSHPrivateKey checking logic, - # would be better. That would require Twisted 14.1.0 or newer, - # though. - return self._checkKey(creds) + d = defer.maybeDeferred(self._pubkeychecker.requestAvatarId, creds) + d.addCallback(self._avatarId) + return d elif credentials.IUsernameHashedPassword.providedBy(creds): return self._checkPassword(creds) elif credentials.IUsernamePassword.providedBy(creds): @@ -86,28 +86,6 @@ class AccountFileChecker(object): d.addCallback(self._cbPasswordMatch, str(creds.username)) return d - def _checkKey(self, creds): - """ - Determine whether some key-based credentials correctly authenticates a - user. - - Returns a Deferred that fires with the username if so or with an - UnauthorizedLogin failure otherwise. - """ - - # Is the public key indicated by the given credentials allowed to - # authenticate the username in those credentials? - if creds.blob == self.pubkeys.get(creds.username): - if creds.signature is None: - return defer.fail(conch_error.ValidPublicKey()) - - # Is the signature in the given credentials the correct - # signature for the data in those credentials? - key = keys.Key.fromString(creds.blob) - if key.verify(creds.signature, creds.sigData): - return defer.succeed(self._avatarId(creds.username)) - - return defer.fail(error.UnauthorizedLogin()) @implementer(checkers.ICredentialsChecker) class AccountURLChecker(object): diff --git a/src/allmydata/frontends/ftpd.py b/src/allmydata/frontends/ftpd.py deleted file mode 100644 index 0b18df85b..000000000 --- a/src/allmydata/frontends/ftpd.py +++ /dev/null @@ -1,340 +0,0 @@ -from six import ensure_str - -from types import NoneType - -from zope.interface import implementer -from twisted.application import service, strports -from twisted.internet import defer -from twisted.internet.interfaces import IConsumer -from twisted.cred import portal -from twisted.python import filepath -from twisted.protocols import ftp - -from allmydata.interfaces import IDirectoryNode, ExistingChildError, \ - NoSuchChildError -from allmydata.immutable.upload import FileHandle -from allmydata.util.fileutil import EncryptedTemporaryFile -from allmydata.util.assertutil import precondition - -@implementer(ftp.IReadFile) -class ReadFile(object): - def __init__(self, node): - self.node = node - def send(self, consumer): - d = self.node.read(consumer) - return d # when consumed - -@implementer(IConsumer) -class FileWriter(object): - - def registerProducer(self, producer, streaming): - if not streaming: - raise NotImplementedError("Non-streaming producer not supported.") - # we write the data to a temporary file, since Tahoe can't do - # streaming upload yet. - self.f = EncryptedTemporaryFile() - return None - - def unregisterProducer(self): - # the upload actually happens in WriteFile.close() - pass - - def write(self, data): - self.f.write(data) - -@implementer(ftp.IWriteFile) -class WriteFile(object): - - def __init__(self, parent, childname, convergence): - self.parent = parent - self.childname = childname - self.convergence = convergence - - def receive(self): - self.c = FileWriter() - return defer.succeed(self.c) - - def close(self): - u = FileHandle(self.c.f, self.convergence) - d = self.parent.add_file(self.childname, u) - return d - - -class NoParentError(Exception): - pass - -# filepath.Permissions was added in Twisted-11.1.0, which we require. Twisted -# <15.0.0 expected an int, and only does '&' on it. Twisted >=15.0.0 expects -# a filepath.Permissions. This satisfies both. - -class IntishPermissions(filepath.Permissions): - def __init__(self, statModeInt): - self._tahoe_statModeInt = statModeInt - filepath.Permissions.__init__(self, statModeInt) - def __and__(self, other): - return self._tahoe_statModeInt & other - -@implementer(ftp.IFTPShell) -class Handler(object): - def __init__(self, client, rootnode, username, convergence): - self.client = client - self.root = rootnode - self.username = username - self.convergence = convergence - - def makeDirectory(self, path): - d = self._get_root(path) - d.addCallback(lambda root_and_path: - self._get_or_create_directories(root_and_path[0], root_and_path[1])) - return d - - def _get_or_create_directories(self, node, path): - if not IDirectoryNode.providedBy(node): - # unfortunately it is too late to provide the name of the - # blocking directory in the error message. - raise ftp.FileExistsError("cannot create directory because there " - "is a file in the way") - if not path: - return defer.succeed(node) - d = node.get(path[0]) - def _maybe_create(f): - f.trap(NoSuchChildError) - return node.create_subdirectory(path[0]) - d.addErrback(_maybe_create) - d.addCallback(self._get_or_create_directories, path[1:]) - return d - - def _get_parent(self, path): - # fire with (parentnode, childname) - path = [unicode(p) for p in path] - if not path: - raise NoParentError - childname = path[-1] - d = self._get_root(path) - def _got_root(root_and_path): - (root, path) = root_and_path - if not path: - raise NoParentError - return root.get_child_at_path(path[:-1]) - d.addCallback(_got_root) - def _got_parent(parent): - return (parent, childname) - d.addCallback(_got_parent) - return d - - def _remove_thing(self, path, must_be_directory=False, must_be_file=False): - d = defer.maybeDeferred(self._get_parent, path) - def _convert_error(f): - f.trap(NoParentError) - raise ftp.PermissionDeniedError("cannot delete root directory") - d.addErrback(_convert_error) - def _got_parent(parent_and_childname): - (parent, childname) = parent_and_childname - d = parent.get(childname) - def _got_child(child): - if must_be_directory and not IDirectoryNode.providedBy(child): - raise ftp.IsNotADirectoryError("rmdir called on a file") - if must_be_file and IDirectoryNode.providedBy(child): - raise ftp.IsADirectoryError("rmfile called on a directory") - return parent.delete(childname) - d.addCallback(_got_child) - d.addErrback(self._convert_error) - return d - d.addCallback(_got_parent) - return d - - def removeDirectory(self, path): - return self._remove_thing(path, must_be_directory=True) - - def removeFile(self, path): - return self._remove_thing(path, must_be_file=True) - - def rename(self, fromPath, toPath): - # the target directory must already exist - d = self._get_parent(fromPath) - def _got_from_parent(fromparent_and_childname): - (fromparent, childname) = fromparent_and_childname - d = self._get_parent(toPath) - d.addCallback(lambda toparent_and_tochildname: - fromparent.move_child_to(childname, - toparent_and_tochildname[0], toparent_and_tochildname[1], - overwrite=False)) - return d - d.addCallback(_got_from_parent) - d.addErrback(self._convert_error) - return d - - def access(self, path): - # we allow access to everything that exists. We are required to raise - # an error for paths that don't exist: FTP clients (at least ncftp) - # uses this to decide whether to mkdir or not. - d = self._get_node_and_metadata_for_path(path) - d.addErrback(self._convert_error) - d.addCallback(lambda res: None) - return d - - def _convert_error(self, f): - if f.check(NoSuchChildError): - childname = f.value.args[0].encode("utf-8") - msg = "'%s' doesn't exist" % childname - raise ftp.FileNotFoundError(msg) - if f.check(ExistingChildError): - msg = f.value.args[0].encode("utf-8") - raise ftp.FileExistsError(msg) - return f - - def _get_root(self, path): - # return (root, remaining_path) - path = [unicode(p) for p in path] - if path and path[0] == "uri": - d = defer.maybeDeferred(self.client.create_node_from_uri, - str(path[1])) - d.addCallback(lambda root: (root, path[2:])) - else: - d = defer.succeed((self.root,path)) - return d - - def _get_node_and_metadata_for_path(self, path): - d = self._get_root(path) - def _got_root(root_and_path): - (root,path) = root_and_path - if path: - return root.get_child_and_metadata_at_path(path) - else: - return (root,{}) - d.addCallback(_got_root) - return d - - def _populate_row(self, keys, childnode_and_metadata): - (childnode, metadata) = childnode_and_metadata - values = [] - isdir = bool(IDirectoryNode.providedBy(childnode)) - for key in keys: - if key == "size": - if isdir: - value = 0 - else: - value = childnode.get_size() or 0 - elif key == "directory": - value = isdir - elif key == "permissions": - # Twisted-14.0.2 (and earlier) expected an int, and used it - # in a rendering function that did (mode & NUMBER). - # Twisted-15.0.0 expects a - # twisted.python.filepath.Permissions , and calls its - # .shorthand() method. This provides both. - value = IntishPermissions(0o600) - elif key == "hardlinks": - value = 1 - elif key == "modified": - # follow sftpd convention (i.e. linkmotime in preference to mtime) - if "linkmotime" in metadata.get("tahoe", {}): - value = metadata["tahoe"]["linkmotime"] - else: - value = metadata.get("mtime", 0) - elif key == "owner": - value = self.username - elif key == "group": - value = self.username - else: - value = "??" - values.append(value) - return values - - def stat(self, path, keys=()): - # for files only, I think - d = self._get_node_and_metadata_for_path(path) - def _render(node_and_metadata): - (node, metadata) = node_and_metadata - assert not IDirectoryNode.providedBy(node) - return self._populate_row(keys, (node,metadata)) - d.addCallback(_render) - d.addErrback(self._convert_error) - return d - - def list(self, path, keys=()): - # the interface claims that path is a list of unicodes, but in - # practice it is not - d = self._get_node_and_metadata_for_path(path) - def _list(node_and_metadata): - (node, metadata) = node_and_metadata - if IDirectoryNode.providedBy(node): - return node.list() - return { path[-1]: (node, metadata) } # need last-edge metadata - d.addCallback(_list) - def _render(children): - results = [] - for (name, childnode) in children.iteritems(): - # the interface claims that the result should have a unicode - # object as the name, but it fails unless you give it a - # bytestring - results.append( (name.encode("utf-8"), - self._populate_row(keys, childnode) ) ) - return results - d.addCallback(_render) - d.addErrback(self._convert_error) - return d - - def openForReading(self, path): - d = self._get_node_and_metadata_for_path(path) - d.addCallback(lambda node_and_metadata: ReadFile(node_and_metadata[0])) - d.addErrback(self._convert_error) - return d - - def openForWriting(self, path): - path = [unicode(p) for p in path] - if not path: - raise ftp.PermissionDeniedError("cannot STOR to root directory") - childname = path[-1] - d = self._get_root(path) - def _got_root(root_and_path): - (root, path) = root_and_path - if not path: - raise ftp.PermissionDeniedError("cannot STOR to root directory") - return root.get_child_at_path(path[:-1]) - d.addCallback(_got_root) - def _got_parent(parent): - return WriteFile(parent, childname, self.convergence) - d.addCallback(_got_parent) - return d - -from allmydata.frontends.auth import AccountURLChecker, AccountFileChecker, NeedRootcapLookupScheme - - -@implementer(portal.IRealm) -class Dispatcher(object): - def __init__(self, client): - self.client = client - - def requestAvatar(self, avatarID, mind, interface): - assert interface == ftp.IFTPShell - rootnode = self.client.create_node_from_uri(avatarID.rootcap) - convergence = self.client.convergence - s = Handler(self.client, rootnode, avatarID.username, convergence) - def logout(): pass - return (interface, s, None) - - -class FTPServer(service.MultiService): - def __init__(self, client, accountfile, accounturl, ftp_portstr): - precondition(isinstance(accountfile, (unicode, NoneType)), accountfile) - service.MultiService.__init__(self) - - r = Dispatcher(client) - p = portal.Portal(r) - - if accountfile: - c = AccountFileChecker(self, accountfile) - p.registerChecker(c) - if accounturl: - c = AccountURLChecker(self, accounturl) - p.registerChecker(c) - if not accountfile and not accounturl: - # we could leave this anonymous, with just the /uri/CAP form - raise NeedRootcapLookupScheme("must provide some translation") - - f = ftp.FTPFactory(p) - # strports requires a native string. - ftp_portstr = ensure_str(ftp_portstr) - s = strports.service(ftp_portstr, f) - s.setServiceParent(self) diff --git a/src/allmydata/frontends/sftpd.py b/src/allmydata/frontends/sftpd.py index b25ac0270..bc7196de6 100644 --- a/src/allmydata/frontends/sftpd.py +++ b/src/allmydata/frontends/sftpd.py @@ -1,6 +1,17 @@ +""" +Ported to Python 3. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + import six -import heapq, traceback, array, stat, struct -from types import NoneType +import heapq, traceback, stat, struct from stat import S_IFREG, S_IFDIR from time import time, strftime, localtime @@ -45,6 +56,17 @@ from allmydata.util.log import NOISY, OPERATIONAL, WEIRD, \ if six.PY3: long = int + +def createSFTPError(errorCode, errorMessage): + """ + SFTPError that can accept both Unicode and bytes. + + Twisted expects _native_ strings for the SFTPError message, but we often do + Unicode by default even on Python 2. + """ + return SFTPError(errorCode, six.ensure_str(errorMessage)) + + def eventually_callback(d): return lambda res: eventually(d.callback, res) @@ -53,9 +75,9 @@ def eventually_errback(d): def _utf8(x): - if isinstance(x, unicode): - return x.encode('utf-8') if isinstance(x, str): + return x.encode('utf-8') + if isinstance(x, bytes): return x return repr(x) @@ -64,7 +86,7 @@ def _to_sftp_time(t): """SFTP times are unsigned 32-bit integers representing UTC seconds (ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC. A Tahoe time is the corresponding float.""" - return long(t) & long(0xFFFFFFFF) + return int(t) & int(0xFFFFFFFF) def _convert_error(res, request): @@ -73,7 +95,7 @@ def _convert_error(res, request): if not isinstance(res, Failure): logged_res = res - if isinstance(res, str): logged_res = "" % (len(res),) + if isinstance(res, (bytes, str)): logged_res = "" % (len(res),) logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL) return res @@ -92,10 +114,10 @@ def _convert_error(res, request): raise err if err.check(NoSuchChildError): childname = _utf8(err.value.args[0]) - raise SFTPError(FX_NO_SUCH_FILE, childname) + raise createSFTPError(FX_NO_SUCH_FILE, childname) if err.check(NotWriteableError) or err.check(ChildOfWrongTypeError): msg = _utf8(err.value.args[0]) - raise SFTPError(FX_PERMISSION_DENIED, msg) + raise createSFTPError(FX_PERMISSION_DENIED, msg) if err.check(ExistingChildError): # Versions of SFTP after v3 (which is what twisted.conch implements) # define a specific error code for this case: FX_FILE_ALREADY_EXISTS. @@ -104,16 +126,16 @@ def _convert_error(res, request): # to translate the error to the equivalent of POSIX EEXIST, which is # necessary for some picky programs (such as gedit). msg = _utf8(err.value.args[0]) - raise SFTPError(FX_FAILURE, msg) + raise createSFTPError(FX_FAILURE, msg) if err.check(NotImplementedError): - raise SFTPError(FX_OP_UNSUPPORTED, _utf8(err.value)) + raise createSFTPError(FX_OP_UNSUPPORTED, _utf8(err.value)) if err.check(EOFError): - raise SFTPError(FX_EOF, "end of file reached") + raise createSFTPError(FX_EOF, "end of file reached") if err.check(defer.FirstError): _convert_error(err.value.subFailure, request) # We assume that the error message is not anonymity-sensitive. - raise SFTPError(FX_FAILURE, _utf8(err.value)) + raise createSFTPError(FX_FAILURE, _utf8(err.value)) def _repr_flags(flags): @@ -146,7 +168,7 @@ def _lsLine(name, attrs): # Since we now depend on Twisted v10.1, consider calling Twisted's version. mode = st_mode - perms = array.array('c', '-'*10) + perms = ["-"] * 10 ft = stat.S_IFMT(mode) if stat.S_ISDIR(ft): perms[0] = 'd' elif stat.S_ISREG(ft): perms[0] = '-' @@ -165,7 +187,7 @@ def _lsLine(name, attrs): if mode&stat.S_IXOTH: perms[9] = 'x' # suid/sgid never set - l = perms.tostring() + l = "".join(perms) l += str(st_nlink).rjust(5) + ' ' un = str(st_uid) l += un.ljust(9) @@ -182,6 +204,7 @@ def _lsLine(name, attrs): l += strftime("%b %d %Y ", localtime(st_mtime)) else: l += strftime("%b %d %H:%M ", localtime(st_mtime)) + l = l.encode("utf-8") l += name return l @@ -223,7 +246,7 @@ def _populate_attrs(childnode, metadata, size=None): if childnode and size is None: size = childnode.get_size() if size is not None: - _assert(isinstance(size, (int, long)) and not isinstance(size, bool), size=size) + _assert(isinstance(size, int) and not isinstance(size, bool), size=size) attrs['size'] = size perms = S_IFREG | 0o666 @@ -255,7 +278,7 @@ def _attrs_to_metadata(attrs): for key in attrs: if key == "mtime" or key == "ctime" or key == "createtime": - metadata[key] = long(attrs[key]) + metadata[key] = int(attrs[key]) elif key.startswith("ext_"): metadata[key] = str(attrs[key]) @@ -267,7 +290,7 @@ def _attrs_to_metadata(attrs): def _direntry_for(filenode_or_parent, childname, filenode=None): - precondition(isinstance(childname, (unicode, NoneType)), childname=childname) + precondition(isinstance(childname, (str, type(None))), childname=childname) if childname is None: filenode_or_parent = filenode @@ -275,7 +298,7 @@ def _direntry_for(filenode_or_parent, childname, filenode=None): if filenode_or_parent: rw_uri = filenode_or_parent.get_write_uri() if rw_uri and childname: - return rw_uri + "/" + childname.encode('utf-8') + return rw_uri + b"/" + childname.encode('utf-8') else: return rw_uri @@ -327,7 +350,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): if size < self.current_size or size < self.downloaded: self.f.truncate(size) if size > self.current_size: - self.overwrite(self.current_size, "\x00" * (size - self.current_size)) + self.overwrite(self.current_size, b"\x00" * (size - self.current_size)) self.current_size = size # make the invariant self.download_size <= self.current_size be true again @@ -335,7 +358,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): self.download_size = size if self.downloaded >= self.download_size: - self.download_done("size changed") + self.download_done(b"size changed") def registerProducer(self, p, streaming): if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY) @@ -410,21 +433,21 @@ class OverwriteableFileConsumer(PrefixingLogMixin): milestone = end while len(self.milestones) > 0: - (next, d) = self.milestones[0] - if next > milestone: + (next_, d) = self.milestones[0] + if next_ > milestone: return - if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY) + if noisy: self.log("MILESTONE %r %r" % (next_, d), level=NOISY) heapq.heappop(self.milestones) - eventually_callback(d)("reached") + eventually_callback(d)(b"reached") if milestone >= self.download_size: - self.download_done("reached download size") + self.download_done(b"reached download size") def overwrite(self, offset, data): if noisy: self.log(".overwrite(%r, )" % (offset, len(data)), level=NOISY) if self.is_closed: self.log("overwrite called on a closed OverwriteableFileConsumer", level=WEIRD) - raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle") + raise createSFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle") if offset > self.current_size: # Normally writing at an offset beyond the current end-of-file @@ -435,7 +458,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): # the gap between the current EOF and the offset. self.f.seek(self.current_size) - self.f.write("\x00" * (offset - self.current_size)) + self.f.write(b"\x00" * (offset - self.current_size)) start = self.current_size else: self.f.seek(offset) @@ -455,7 +478,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY) if self.is_closed: self.log("read called on a closed OverwriteableFileConsumer", level=WEIRD) - raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") + raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") # Note that the overwrite method is synchronous. When a write request is processed # (e.g. a writeChunk request on the async queue of GeneralSFTPFile), overwrite will @@ -509,7 +532,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): return d def download_done(self, res): - _assert(isinstance(res, (str, Failure)), res=res) + _assert(isinstance(res, (bytes, Failure)), res=res) # Only the first call to download_done counts, but we log subsequent calls # (multiple calls are normal). if self.done_status is not None: @@ -526,8 +549,8 @@ class OverwriteableFileConsumer(PrefixingLogMixin): eventually_callback(self.done)(None) while len(self.milestones) > 0: - (next, d) = self.milestones[0] - if noisy: self.log("MILESTONE FINISH %r %r %r" % (next, d, res), level=NOISY) + (next_, d) = self.milestones[0] + if noisy: self.log("MILESTONE FINISH %r %r %r" % (next_, d, res), level=NOISY) heapq.heappop(self.milestones) # The callback means that the milestone has been reached if # it is ever going to be. Note that the file may have been @@ -541,7 +564,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin): self.f.close() except Exception as e: self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD) - self.download_done("closed") + self.download_done(b"closed") return self.done_status def unregisterProducer(self): @@ -565,7 +588,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath) if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY) - precondition(isinstance(userpath, str) and IFileNode.providedBy(filenode), + precondition(isinstance(userpath, bytes) and IFileNode.providedBy(filenode), userpath=userpath, filenode=filenode) self.filenode = filenode self.metadata = metadata @@ -577,7 +600,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): self.log(request, level=OPERATIONAL) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") return defer.execute(_closed) d = defer.Deferred() @@ -594,7 +617,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): # i.e. we respond with an EOF error iff offset is already at EOF. if offset >= len(data): - eventually_errback(d)(Failure(SFTPError(FX_EOF, "read at or past end of file"))) + eventually_errback(d)(Failure(createSFTPError(FX_EOF, "read at or past end of file"))) else: eventually_callback(d)(data[offset:offset+length]) # truncated if offset+length > len(data) return data @@ -605,7 +628,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): def writeChunk(self, offset, data): self.log(".writeChunk(%r, ) denied" % (offset, len(data)), level=OPERATIONAL) - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) def close(self): @@ -619,7 +642,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): self.log(request, level=OPERATIONAL) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle") return defer.execute(_closed) d = defer.execute(_populate_attrs, self.filenode, self.metadata) @@ -628,7 +651,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin): def setAttrs(self, attrs): self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL) - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) @@ -649,7 +672,7 @@ class GeneralSFTPFile(PrefixingLogMixin): if noisy: self.log(".__init__(%r, %r = %r, %r, )" % (userpath, flags, _repr_flags(flags), close_notify), level=NOISY) - precondition(isinstance(userpath, str), userpath=userpath) + precondition(isinstance(userpath, bytes), userpath=userpath) self.userpath = userpath self.flags = flags self.close_notify = close_notify @@ -668,11 +691,11 @@ class GeneralSFTPFile(PrefixingLogMixin): # not be set before then. self.consumer = None - def open(self, parent=None, childname=None, filenode=None, metadata=None): + def open(self, parent=None, childname=None, filenode=None, metadata=None): # noqa: F811 self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" % (parent, childname, filenode, metadata), level=OPERATIONAL) - precondition(isinstance(childname, (unicode, NoneType)), childname=childname) + precondition(isinstance(childname, (str, type(None))), childname=childname) precondition(filenode is None or IFileNode.providedBy(filenode), filenode=filenode) precondition(not self.closed, sftpfile=self) @@ -689,7 +712,7 @@ class GeneralSFTPFile(PrefixingLogMixin): if (self.flags & FXF_TRUNC) or not filenode: # We're either truncating or creating the file, so we don't need the old contents. self.consumer = OverwriteableFileConsumer(0, tempfile_maker) - self.consumer.download_done("download not needed") + self.consumer.download_done(b"download not needed") else: self.async_.addCallback(lambda ignored: filenode.get_best_readable_version()) @@ -703,7 +726,7 @@ class GeneralSFTPFile(PrefixingLogMixin): d = version.read(self.consumer, 0, None) def _finished(res): if not isinstance(res, Failure): - res = "download finished" + res = b"download finished" self.consumer.download_done(res) d.addBoth(_finished) # It is correct to drop d here. @@ -723,7 +746,7 @@ class GeneralSFTPFile(PrefixingLogMixin): def rename(self, new_userpath, new_parent, new_childname): self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL) - precondition(isinstance(new_userpath, str) and isinstance(new_childname, unicode), + precondition(isinstance(new_userpath, bytes) and isinstance(new_childname, str), new_userpath=new_userpath, new_childname=new_childname) self.userpath = new_userpath self.parent = new_parent @@ -751,11 +774,11 @@ class GeneralSFTPFile(PrefixingLogMixin): self.log(request, level=OPERATIONAL) if not (self.flags & FXF_READ): - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading") return defer.execute(_denied) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle") return defer.execute(_closed) d = defer.Deferred() @@ -773,11 +796,11 @@ class GeneralSFTPFile(PrefixingLogMixin): self.log(".writeChunk(%r, )" % (offset, len(data)), level=OPERATIONAL) if not (self.flags & FXF_WRITE): - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle") return defer.execute(_closed) self.has_changed = True @@ -893,7 +916,7 @@ class GeneralSFTPFile(PrefixingLogMixin): self.log(request, level=OPERATIONAL) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle") return defer.execute(_closed) # Optimization for read-only handles, when we already know the metadata. @@ -917,16 +940,16 @@ class GeneralSFTPFile(PrefixingLogMixin): self.log(request, level=OPERATIONAL) if not (self.flags & FXF_WRITE): - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing") return defer.execute(_denied) if self.closed: - def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle") + def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle") return defer.execute(_closed) size = attrs.get("size", None) - if size is not None and (not isinstance(size, (int, long)) or size < 0): - def _bad(): raise SFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer") + if size is not None and (not isinstance(size, int) or size < 0): + def _bad(): raise createSFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer") return defer.execute(_bad) d = defer.Deferred() @@ -1012,7 +1035,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def logout(self): self.log(".logout()", level=OPERATIONAL) - for files in self._heisenfiles.itervalues(): + for files in self._heisenfiles.values(): for f in files: f.abandon() @@ -1039,7 +1062,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): request = "._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry) self.log(request, level=OPERATIONAL) - precondition(isinstance(userpath, str), userpath=userpath) + precondition(isinstance(userpath, bytes), userpath=userpath) # First we synchronously mark all heisenfiles matching the userpath or direntry # as abandoned, and remove them from the two heisenfile dicts. Then we .sync() @@ -1088,8 +1111,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): (from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite)) self.log(request, level=OPERATIONAL) - precondition((isinstance(from_userpath, str) and isinstance(from_childname, unicode) and - isinstance(to_userpath, str) and isinstance(to_childname, unicode)), + precondition((isinstance(from_userpath, bytes) and isinstance(from_childname, str) and + isinstance(to_userpath, bytes) and isinstance(to_childname, str)), from_userpath=from_userpath, from_childname=from_childname, to_userpath=to_userpath, to_childname=to_childname) if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY) @@ -1118,7 +1141,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): # does not mean that they were not committed; it is used to determine whether # a NoSuchChildError from the rename attempt should be suppressed). If overwrite # is False and there were already heisenfiles at the destination userpath or - # direntry, we return a Deferred that fails with SFTPError(FX_PERMISSION_DENIED). + # direntry, we return a Deferred that fails with createSFTPError(FX_PERMISSION_DENIED). from_direntry = _direntry_for(from_parent, from_childname) to_direntry = _direntry_for(to_parent, to_childname) @@ -1127,7 +1150,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): (from_direntry, to_direntry, len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY) if not overwrite and (to_userpath in self._heisenfiles or to_direntry in all_heisenfiles): - def _existing(): raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath) + def _existing(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8")) if noisy: self.log("existing", level=NOISY) return defer.execute(_existing) @@ -1161,7 +1184,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): request = "._update_attrs_for_heisenfiles(%r, %r, %r)" % (userpath, direntry, attrs) self.log(request, level=OPERATIONAL) - _assert(isinstance(userpath, str) and isinstance(direntry, str), + _assert(isinstance(userpath, bytes) and isinstance(direntry, bytes), userpath=userpath, direntry=direntry) files = [] @@ -1194,7 +1217,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): request = "._sync_heisenfiles(%r, %r, ignore=%r)" % (userpath, direntry, ignore) self.log(request, level=OPERATIONAL) - _assert(isinstance(userpath, str) and isinstance(direntry, (str, NoneType)), + _assert(isinstance(userpath, bytes) and isinstance(direntry, (bytes, type(None))), userpath=userpath, direntry=direntry) files = [] @@ -1219,7 +1242,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _remove_heisenfile(self, userpath, parent, childname, file_to_remove): if noisy: self.log("._remove_heisenfile(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY) - _assert(isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)), + _assert(isinstance(userpath, bytes) and isinstance(childname, (str, type(None))), userpath=userpath, childname=childname) direntry = _direntry_for(parent, childname) @@ -1246,8 +1269,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): (existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata), level=NOISY) - _assert((isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)) and - (metadata is None or 'no-write' in metadata)), + _assert((isinstance(userpath, bytes) and isinstance(childname, (str, type(None))) and + (metadata is None or 'no-write' in metadata)), userpath=userpath, childname=childname, metadata=metadata) writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0 @@ -1280,17 +1303,17 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): if not (flags & (FXF_READ | FXF_WRITE)): def _bad_readwrite(): - raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set") + raise createSFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set") return defer.execute(_bad_readwrite) if (flags & FXF_EXCL) and not (flags & FXF_CREAT): def _bad_exclcreat(): - raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT") + raise createSFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT") return defer.execute(_bad_exclcreat) path = self._path_from_string(pathstring) if not path: - def _emptypath(): raise SFTPError(FX_NO_SUCH_FILE, "path cannot be empty") + def _emptypath(): raise createSFTPError(FX_NO_SUCH_FILE, "path cannot be empty") return defer.execute(_emptypath) # The combination of flags is potentially valid. @@ -1349,20 +1372,20 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _got_root(root_and_path): (root, path) = root_and_path if root.is_unknown(): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open an unknown cap (or child of an unknown object). " "Upgrading the gateway to a later Tahoe-LAFS version may help") if not path: # case 1 if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY) if not IFileNode.providedBy(root): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a directory cap") if (flags & FXF_WRITE) and root.is_readonly(): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot write to a non-writeable filecap without a parent directory") if flags & FXF_EXCL: - raise SFTPError(FX_FAILURE, + raise createSFTPError(FX_FAILURE, "cannot create a file exclusively when it already exists") # The file does not need to be added to all_heisenfiles, because it is not @@ -1389,7 +1412,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _got_parent(parent): if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY) if parent.is_unknown(): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a child of an unknown object. " "Upgrading the gateway to a later Tahoe-LAFS version may help") @@ -1404,13 +1427,13 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): # which is consistent with what might happen on a POSIX filesystem. if parent_readonly: - raise SFTPError(FX_FAILURE, + raise createSFTPError(FX_FAILURE, "cannot create a file exclusively when the parent directory is read-only") # 'overwrite=False' ensures failure if the link already exists. # FIXME: should use a single call to set_uri and return (child, metadata) (#1035) - zero_length_lit = "URI:LIT:" + zero_length_lit = b"URI:LIT:" if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" % (parent, zero_length_lit, childname), level=NOISY) d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit, @@ -1436,14 +1459,14 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): metadata['no-write'] = _no_write(parent_readonly, filenode, current_metadata) if filenode.is_unknown(): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open an unknown cap. Upgrading the gateway " "to a later Tahoe-LAFS version may help") if not IFileNode.providedBy(filenode): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a directory as if it were a file") if (flags & FXF_WRITE) and metadata['no-write']: - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot open a non-writeable file for writing") return self._make_file(file, userpath, flags, parent=parent, childname=childname, @@ -1453,10 +1476,10 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): f.trap(NoSuchChildError) if not (flags & FXF_CREAT): - raise SFTPError(FX_NO_SUCH_FILE, + raise createSFTPError(FX_NO_SUCH_FILE, "the file does not exist, and was not opened with the creation (CREAT) flag") if parent_readonly: - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot create a file when the parent directory is read-only") return self._make_file(file, userpath, flags, parent=parent, childname=childname) @@ -1495,9 +1518,9 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): (to_parent, to_childname) = to_pair if from_childname is None: - raise SFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI") + raise createSFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI") if to_childname is None: - raise SFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI") + raise createSFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI") # # "It is an error if there already exists a file with the name specified @@ -1512,7 +1535,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): d2.addCallback(lambda ign: to_parent.get(to_childname)) def _expect_fail(res): if not isinstance(res, Failure): - raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath) + raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8")) # It is OK if we fail for errors other than NoSuchChildError, since that probably # indicates some problem accessing the destination directory. @@ -1537,7 +1560,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): if not isinstance(err, Failure) or (renamed and err.check(NoSuchChildError)): return None if not overwrite and err.check(ExistingChildError): - raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath) + raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8")) return err d3.addBoth(_check) @@ -1555,7 +1578,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): path = self._path_from_string(pathstring) metadata = _attrs_to_metadata(attrs) if 'no-write' in metadata: - def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only") + def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only") return defer.execute(_denied) d = self._get_root(path) @@ -1567,7 +1590,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _get_or_create_directories(self, node, path, metadata): if not IDirectoryNode.providedBy(node): # TODO: provide the name of the blocking file in the error message. - def _blocked(): raise SFTPError(FX_FAILURE, "cannot create directory because there " + def _blocked(): raise createSFTPError(FX_FAILURE, "cannot create directory because there " "is a file in the way") # close enough return defer.execute(_blocked) @@ -1605,7 +1628,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _got_parent(parent_and_childname): (parent, childname) = parent_and_childname if childname is None: - raise SFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI") + raise createSFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI") direntry = _direntry_for(parent, childname) d2 = defer.succeed(False) @@ -1636,18 +1659,18 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): d.addCallback(_got_parent_or_node) def _list(dirnode): if dirnode.is_unknown(): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot list an unknown cap as a directory. Upgrading the gateway " "to a later Tahoe-LAFS version may help") if not IDirectoryNode.providedBy(dirnode): - raise SFTPError(FX_PERMISSION_DENIED, + raise createSFTPError(FX_PERMISSION_DENIED, "cannot list a file as if it were a directory") d2 = dirnode.list() def _render(children): parent_readonly = dirnode.is_readonly() results = [] - for filename, (child, metadata) in children.iteritems(): + for filename, (child, metadata) in list(children.items()): # The file size may be cached or absent. metadata['no-write'] = _no_write(parent_readonly, child, metadata) attrs = _populate_attrs(child, metadata) @@ -1727,7 +1750,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): if "size" in attrs: # this would require us to download and re-upload the truncated/extended # file contents - def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported") + def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported") return defer.execute(_unsupported) path = self._path_from_string(pathstring) @@ -1744,7 +1767,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): if childname is None: if updated_heisenfiles: return None - raise SFTPError(FX_NO_SUCH_FILE, userpath) + raise createSFTPError(FX_NO_SUCH_FILE, userpath) else: desired_metadata = _attrs_to_metadata(attrs) if noisy: self.log("desired_metadata = %r" % (desired_metadata,), level=NOISY) @@ -1767,7 +1790,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def readLink(self, pathstring): self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL) - def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "readLink") + def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "readLink") return defer.execute(_unsupported) def makeLink(self, linkPathstring, targetPathstring): @@ -1776,7 +1799,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): # If this is implemented, note the reversal of arguments described in point 7 of # . - def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "makeLink") + def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "makeLink") return defer.execute(_unsupported) def extendedRequest(self, extensionName, extensionData): @@ -1785,8 +1808,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): # We implement the three main OpenSSH SFTP extensions; see # - if extensionName == 'posix-rename@openssh.com': - def _bad(): raise SFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request") + if extensionName == b'posix-rename@openssh.com': + def _bad(): raise createSFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request") if 4 > len(extensionData): return defer.execute(_bad) (fromPathLen,) = struct.unpack('>L', extensionData[0:4]) @@ -1803,11 +1826,11 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): # an error, or an FXP_EXTENDED_REPLY. But it happens to do the right thing # (respond with an FXP_STATUS message) if we return a Failure with code FX_OK. def _succeeded(ign): - raise SFTPError(FX_OK, "request succeeded") + raise createSFTPError(FX_OK, "request succeeded") d.addCallback(_succeeded) return d - if extensionName == 'statvfs@openssh.com' or extensionName == 'fstatvfs@openssh.com': + if extensionName == b'statvfs@openssh.com' or extensionName == b'fstatvfs@openssh.com': # f_bsize and f_frsize should be the same to avoid a bug in 'df' return defer.succeed(struct.pack('>11Q', 1024, # uint64 f_bsize /* file system block size */ @@ -1823,7 +1846,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): 65535, # uint64 f_namemax /* maximum filename length */ )) - def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "unsupported %r request " % + def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "unsupported %r request " % (extensionName, len(extensionData))) return defer.execute(_unsupported) @@ -1838,29 +1861,29 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin): def _path_from_string(self, pathstring): if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY) - _assert(isinstance(pathstring, str), pathstring=pathstring) + _assert(isinstance(pathstring, bytes), pathstring=pathstring) # The home directory is the root directory. - pathstring = pathstring.strip("/") - if pathstring == "" or pathstring == ".": + pathstring = pathstring.strip(b"/") + if pathstring == b"" or pathstring == b".": path_utf8 = [] else: - path_utf8 = pathstring.split("/") + path_utf8 = pathstring.split(b"/") # # "Servers SHOULD interpret a path name component ".." as referring to # the parent directory, and "." as referring to the current directory." path = [] for p_utf8 in path_utf8: - if p_utf8 == "..": + if p_utf8 == b"..": # ignore excess .. components at the root if len(path) > 0: path = path[:-1] - elif p_utf8 != ".": + elif p_utf8 != b".": try: p = p_utf8.decode('utf-8', 'strict') except UnicodeError: - raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8") + raise createSFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8") path.append(p) if noisy: self.log(" PATH %r" % (path,), level=NOISY) @@ -1979,9 +2002,9 @@ class SFTPServer(service.MultiService): def __init__(self, client, accountfile, accounturl, sftp_portstr, pubkey_file, privkey_file): - precondition(isinstance(accountfile, (unicode, NoneType)), accountfile) - precondition(isinstance(pubkey_file, unicode), pubkey_file) - precondition(isinstance(privkey_file, unicode), privkey_file) + precondition(isinstance(accountfile, (str, type(None))), accountfile) + precondition(isinstance(pubkey_file, str), pubkey_file) + precondition(isinstance(privkey_file, str), privkey_file) service.MultiService.__init__(self) r = Dispatcher(client) @@ -2012,5 +2035,5 @@ class SFTPServer(service.MultiService): f = SSHFactory() f.portal = p - s = strports.service(sftp_portstr, f) + s = strports.service(six.ensure_str(sftp_portstr), f) s.setServiceParent(self) diff --git a/src/allmydata/immutable/downloader/finder.py b/src/allmydata/immutable/downloader/finder.py index 6d222bc73..4f6d1aa14 100644 --- a/src/allmydata/immutable/downloader/finder.py +++ b/src/allmydata/immutable/downloader/finder.py @@ -9,6 +9,7 @@ from __future__ import unicode_literals from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 +from six import ensure_str import time now = time.time @@ -98,7 +99,7 @@ class ShareFinder(object): # internal methods def loop(self): - pending_s = ",".join([rt.server.get_name() + pending_s = ",".join([ensure_str(rt.server.get_name()) for rt in self.pending_requests]) # sort? self.log(format="ShareFinder loop: running=%(running)s" " hungry=%(hungry)s, pending=%(pending)s", diff --git a/src/allmydata/immutable/encode.py b/src/allmydata/immutable/encode.py index 9351df501..a9835b6b8 100644 --- a/src/allmydata/immutable/encode.py +++ b/src/allmydata/immutable/encode.py @@ -255,11 +255,11 @@ class Encoder(object): # captures the slot, not the value #d.addCallback(lambda res: self.do_segment(i)) # use this form instead: - d.addCallback(lambda res, i=i: self._encode_segment(i)) + d.addCallback(lambda res, i=i: self._encode_segment(i, is_tail=False)) d.addCallback(self._send_segment, i) d.addCallback(self._turn_barrier) last_segnum = self.num_segments - 1 - d.addCallback(lambda res: self._encode_tail_segment(last_segnum)) + d.addCallback(lambda res: self._encode_segment(last_segnum, is_tail=True)) d.addCallback(self._send_segment, last_segnum) d.addCallback(self._turn_barrier) @@ -317,8 +317,24 @@ class Encoder(object): dl.append(d) return self._gather_responses(dl) - def _encode_segment(self, segnum): - codec = self._codec + def _encode_segment(self, segnum, is_tail): + """ + Encode one segment of input into the configured number of shares. + + :param segnum: Ostensibly, the number of the segment to encode. In + reality, this parameter is ignored and the *next* segment is + encoded and returned. + + :param bool is_tail: ``True`` if this is the last segment, ``False`` + otherwise. + + :return: A ``Deferred`` which fires with a two-tuple. The first + element is a list of string-y objects representing the encoded + segment data for one of the shares. The second element is a list + of integers giving the share numbers of the shares in the first + element. + """ + codec = self._tail_codec if is_tail else self._codec start = time.time() # the ICodecEncoder API wants to receive a total of self.segment_size @@ -350,9 +366,11 @@ class Encoder(object): # footprint to 430KiB at the expense of more hash-tree overhead. d = self._gather_data(self.required_shares, input_piece_size, - crypttext_segment_hasher) + crypttext_segment_hasher, allow_short=is_tail) def _done_gathering(chunks): for c in chunks: + # If is_tail then a short trailing chunk will have been padded + # by _gather_data assert len(c) == input_piece_size self._crypttext_hashes.append(crypttext_segment_hasher.digest()) # during this call, we hit 5*segsize memory @@ -365,31 +383,6 @@ class Encoder(object): d.addCallback(_done) return d - def _encode_tail_segment(self, segnum): - - start = time.time() - codec = self._tail_codec - input_piece_size = codec.get_block_size() - - crypttext_segment_hasher = hashutil.crypttext_segment_hasher() - - d = self._gather_data(self.required_shares, input_piece_size, - crypttext_segment_hasher, allow_short=True) - def _done_gathering(chunks): - for c in chunks: - # a short trailing chunk will have been padded by - # _gather_data - assert len(c) == input_piece_size - self._crypttext_hashes.append(crypttext_segment_hasher.digest()) - return codec.encode(chunks) - d.addCallback(_done_gathering) - def _done(res): - elapsed = time.time() - start - self._times["cumulative_encoding"] += elapsed - return res - d.addCallback(_done) - return d - def _gather_data(self, num_chunks, input_chunk_size, crypttext_segment_hasher, allow_short=False): diff --git a/src/allmydata/immutable/literal.py b/src/allmydata/immutable/literal.py index 68db478f3..6ed5571b9 100644 --- a/src/allmydata/immutable/literal.py +++ b/src/allmydata/immutable/literal.py @@ -19,7 +19,7 @@ from twisted.protocols import basic from allmydata.interfaces import IImmutableFileNode, ICheckable from allmydata.uri import LiteralFileURI -@implementer(IImmutableFileNode, ICheckable) + class _ImmutableFileNodeBase(object): def get_write_uri(self): @@ -56,6 +56,7 @@ class _ImmutableFileNodeBase(object): return not self == other +@implementer(IImmutableFileNode, ICheckable) class LiteralFileNode(_ImmutableFileNodeBase): def __init__(self, filecap): diff --git a/src/allmydata/immutable/offloaded.py b/src/allmydata/immutable/offloaded.py index d574b980d..2d2c5c1f5 100644 --- a/src/allmydata/immutable/offloaded.py +++ b/src/allmydata/immutable/offloaded.py @@ -141,7 +141,7 @@ class CHKCheckerAndUEBFetcher(object): @implementer(interfaces.RICHKUploadHelper) -class CHKUploadHelper(Referenceable, upload.CHKUploader): +class CHKUploadHelper(Referenceable, upload.CHKUploader): # type: ignore # warner/foolscap#78 """I am the helper-server -side counterpart to AssistedUploader. I handle peer selection, encoding, and share pushing. I read ciphertext from the remote AssistedUploader. @@ -499,10 +499,13 @@ class LocalCiphertextReader(AskUntilSuccessMixin): # ??. I'm not sure if it makes sense to forward the close message. return self.call("close") + # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3561 + def set_upload_status(self, upload_status): + raise NotImplementedError @implementer(interfaces.RIHelper, interfaces.IStatsProducer) -class Helper(Referenceable): +class Helper(Referenceable): # type: ignore # warner/foolscap#78 """ :ivar dict[bytes, CHKUploadHelper] _active_uploads: For any uploads which have been started but not finished, a mapping from storage index to the diff --git a/src/allmydata/immutable/upload.py b/src/allmydata/immutable/upload.py index e77cbb30b..46e01184f 100644 --- a/src/allmydata/immutable/upload.py +++ b/src/allmydata/immutable/upload.py @@ -11,20 +11,32 @@ from future.utils import PY2, native_str if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 from past.builtins import long, unicode +from six import ensure_str + +try: + from typing import List +except ImportError: + pass import os, time, weakref, itertools + +import attr + from zope.interface import implementer from twisted.python import failure from twisted.internet import defer from twisted.application import service -from foolscap.api import Referenceable, Copyable, RemoteCopy, fireEventually +from foolscap.api import Referenceable, Copyable, RemoteCopy from allmydata.crypto import aes from allmydata.util.hashutil import file_renewal_secret_hash, \ file_cancel_secret_hash, bucket_renewal_secret_hash, \ bucket_cancel_secret_hash, plaintext_hasher, \ storage_index_hash, plaintext_segment_hasher, convergence_hasher -from allmydata.util.deferredutil import timeout_call +from allmydata.util.deferredutil import ( + timeout_call, + until, +) from allmydata import hashtree, uri from allmydata.storage.server import si_b2a from allmydata.immutable import encode @@ -385,6 +397,9 @@ class PeerSelector(object): ) return self.happiness_mappings + def add_peers(self, peerids=None): + raise NotImplementedError + class _QueryStatistics(object): @@ -896,13 +911,45 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin): raise UploadUnhappinessError(msg) +@attr.s +class _Accum(object): + """ + Accumulate up to some known amount of ciphertext. + + :ivar remaining: The number of bytes still expected. + :ivar ciphertext: The bytes accumulated so far. + """ + remaining = attr.ib(validator=attr.validators.instance_of(int)) # type: int + ciphertext = attr.ib(default=attr.Factory(list)) # type: List[bytes] + + def extend(self, + size, # type: int + ciphertext, # type: List[bytes] + ): + """ + Accumulate some more ciphertext. + + :param size: The amount of data the new ciphertext represents towards + the goal. This may be more than the actual size of the given + ciphertext if the source has run out of data. + + :param ciphertext: The new ciphertext to accumulate. + """ + self.remaining -= size + self.ciphertext.extend(ciphertext) + + @implementer(IEncryptedUploadable) class EncryptAnUploadable(object): """This is a wrapper that takes an IUploadable and provides IEncryptedUploadable.""" CHUNKSIZE = 50*1024 - def __init__(self, original, log_parent=None, progress=None): + def __init__(self, original, log_parent=None, progress=None, chunk_size=None): + """ + :param chunk_size: The number of bytes to read from the uploadable at a + time, or None for some default. + """ precondition(original.default_params_set, "set_default_encoding_parameters not called on %r before wrapping with EncryptAnUploadable" % (original,)) self.original = IUploadable(original) @@ -916,6 +963,8 @@ class EncryptAnUploadable(object): self._ciphertext_bytes_read = 0 self._status = None self._progress = progress + if chunk_size is not None: + self.CHUNKSIZE = chunk_size def set_upload_status(self, upload_status): self._status = IUploadStatus(upload_status) @@ -1022,47 +1071,53 @@ class EncryptAnUploadable(object): # and size d.addCallback(lambda ignored: self.get_size()) d.addCallback(lambda ignored: self._get_encryptor()) - # then fetch and encrypt the plaintext. The unusual structure here - # (passing a Deferred *into* a function) is needed to avoid - # overflowing the stack: Deferreds don't optimize out tail recursion. - # We also pass in a list, to which _read_encrypted will append - # ciphertext. - ciphertext = [] - d2 = defer.Deferred() - d.addCallback(lambda ignored: - self._read_encrypted(length, ciphertext, hash_only, d2)) - d.addCallback(lambda ignored: d2) + + accum = _Accum(length) + + def action(): + """ + Read some bytes into the accumulator. + """ + return self._read_encrypted(accum, hash_only) + + def condition(): + """ + Check to see if the accumulator has all the data. + """ + return accum.remaining == 0 + + d.addCallback(lambda ignored: until(action, condition)) + d.addCallback(lambda ignored: accum.ciphertext) return d - def _read_encrypted(self, remaining, ciphertext, hash_only, fire_when_done): - if not remaining: - fire_when_done.callback(ciphertext) - return None + def _read_encrypted(self, + ciphertext_accum, # type: _Accum + hash_only, # type: bool + ): + # type: (...) -> defer.Deferred + """ + Read the next chunk of plaintext, encrypt it, and extend the accumulator + with the resulting ciphertext. + """ # tolerate large length= values without consuming a lot of RAM by # reading just a chunk (say 50kB) at a time. This only really matters # when hash_only==True (i.e. resuming an interrupted upload), since # that's the case where we will be skipping over a lot of data. - size = min(remaining, self.CHUNKSIZE) - remaining = remaining - size + size = min(ciphertext_accum.remaining, self.CHUNKSIZE) + # read a chunk of plaintext.. d = defer.maybeDeferred(self.original.read, size) - # N.B.: if read() is synchronous, then since everything else is - # actually synchronous too, we'd blow the stack unless we stall for a - # tick. Once you accept a Deferred from IUploadable.read(), you must - # be prepared to have it fire immediately too. - d.addCallback(fireEventually) def _good(plaintext): # and encrypt it.. # o/' over the fields we go, hashing all the way, sHA! sHA! sHA! o/' ct = self._hash_and_encrypt_plaintext(plaintext, hash_only) - ciphertext.extend(ct) - self._read_encrypted(remaining, ciphertext, hash_only, - fire_when_done) - def _err(why): - fire_when_done.errback(why) + # Intentionally tell the accumulator about the expected size, not + # the actual size. If we run out of data we still want remaining + # to drop otherwise it will never reach 0 and the loop will never + # end. + ciphertext_accum.extend(size, ct) d.addCallback(_good) - d.addErrback(_err) - return None + return d def _hash_and_encrypt_plaintext(self, data, hash_only): assert isinstance(data, (tuple, list)), type(data) @@ -1423,7 +1478,7 @@ class LiteralUploader(object): return self._status @implementer(RIEncryptedUploadable) -class RemoteEncryptedUploadable(Referenceable): +class RemoteEncryptedUploadable(Referenceable): # type: ignore # warner/foolscap#78 def __init__(self, encrypted_uploadable, upload_status): self._eu = IEncryptedUploadable(encrypted_uploadable) @@ -1825,7 +1880,7 @@ class Uploader(service.MultiService, log.PrefixingLogMixin): def startService(self): service.MultiService.startService(self) if self._helper_furl: - self.parent.tub.connectTo(self._helper_furl, + self.parent.tub.connectTo(ensure_str(self._helper_furl), self._got_helper) def _got_helper(self, helper): diff --git a/src/allmydata/interfaces.py b/src/allmydata/interfaces.py index 6d0938dd5..0dd5ddc83 100644 --- a/src/allmydata/interfaces.py +++ b/src/allmydata/interfaces.py @@ -681,7 +681,7 @@ class IURI(Interface): passing into init_from_string.""" -class IVerifierURI(Interface, IURI): +class IVerifierURI(IURI): def init_from_string(uri): """Accept a string (as created by my to_string() method) and populate this instance with its data. I am not normally called directly, @@ -748,7 +748,7 @@ class IProgress(Interface): "Current amount of progress (in percentage)" ) - def set_progress(self, value): + def set_progress(value): """ Sets the current amount of progress. @@ -756,7 +756,7 @@ class IProgress(Interface): set_progress_total. """ - def set_progress_total(self, value): + def set_progress_total(value): """ Sets the total amount of expected progress @@ -859,12 +859,6 @@ class IPeerSelector(Interface): peer selection begins. """ - def confirm_share_allocation(peerid, shnum): - """ - Confirm that an allocated peer=>share pairing has been - successfully established. - """ - def add_peers(peerids=set): """ Update my internal state to include the peers in peerids as @@ -1824,11 +1818,6 @@ class IEncoder(Interface): willing to receive data. """ - def set_size(size): - """Specify the number of bytes that will be encoded. This must be - peformed before get_serialized_params() can be called. - """ - def set_encrypted_uploadable(u): """Provide a source of encrypted upload data. 'u' must implement IEncryptedUploadable. diff --git a/src/allmydata/introducer/client.py b/src/allmydata/introducer/client.py index fa1e1efe8..07f8a5f7a 100644 --- a/src/allmydata/introducer/client.py +++ b/src/allmydata/introducer/client.py @@ -11,12 +11,12 @@ if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 from past.builtins import long -from six import ensure_text +from six import ensure_text, ensure_str import time from zope.interface import implementer from twisted.application import service -from foolscap.api import Referenceable, eventually +from foolscap.api import Referenceable from allmydata.interfaces import InsufficientVersionError from allmydata.introducer.interfaces import IIntroducerClient, \ RIIntroducerSubscriberClient_v2 @@ -24,6 +24,9 @@ from allmydata.introducer.common import sign_to_foolscap, unsign_from_foolscap,\ get_tubid_string_from_ann from allmydata.util import log, yamlutil, connection_status from allmydata.util.rrefutil import add_version_to_remote_reference +from allmydata.util.observer import ( + ObserverList, +) from allmydata.crypto.error import BadSignature from allmydata.util.assertutil import precondition @@ -39,8 +42,6 @@ class IntroducerClient(service.Service, Referenceable): nickname, my_version, oldest_supported, sequencer, cache_filepath): self._tub = tub - if isinstance(introducer_furl, str): - introducer_furl = introducer_furl.encode("utf-8") self.introducer_furl = introducer_furl assert isinstance(nickname, str) @@ -64,8 +65,7 @@ class IntroducerClient(service.Service, Referenceable): self._publisher = None self._since = None - self._local_subscribers = [] # (servicename,cb,args,kwargs) tuples - self._subscribed_service_names = set() + self._local_subscribers = {} # {servicename: ObserverList} self._subscriptions = set() # requests we've actually sent # _inbound_announcements remembers one announcement per @@ -96,7 +96,7 @@ class IntroducerClient(service.Service, Referenceable): def startService(self): service.Service.startService(self) self._introducer_error = None - rc = self._tub.connectTo(self.introducer_furl, self._got_introducer) + rc = self._tub.connectTo(ensure_str(self.introducer_furl), self._got_introducer) self._introducer_reconnector = rc def connect_failed(failure): self.log("Initial Introducer connection failed: perhaps it's down", @@ -178,22 +178,22 @@ class IntroducerClient(service.Service, Referenceable): kwargs["facility"] = "tahoe.introducer.client" return log.msg(*args, **kwargs) - def subscribe_to(self, service_name, cb, *args, **kwargs): - self._local_subscribers.append( (service_name,cb,args,kwargs) ) - self._subscribed_service_names.add(service_name) + def subscribe_to(self, service_name, callback, *args, **kwargs): + obs = self._local_subscribers.setdefault(service_name, ObserverList()) + obs.subscribe(lambda key_s, ann: callback(key_s, ann, *args, **kwargs)) self._maybe_subscribe() for index,(ann,key_s,when) in list(self._inbound_announcements.items()): precondition(isinstance(key_s, bytes), key_s) servicename = index[0] if servicename == service_name: - eventually(cb, key_s, ann, *args, **kwargs) + obs.notify(key_s, ann) def _maybe_subscribe(self): if not self._publisher: self.log("want to subscribe, but no introducer yet", level=log.NOISY) return - for service_name in self._subscribed_service_names: + for service_name in self._local_subscribers: if service_name in self._subscriptions: continue self._subscriptions.add(service_name) @@ -272,7 +272,7 @@ class IntroducerClient(service.Service, Referenceable): precondition(isinstance(key_s, bytes), key_s) self._debug_counts["inbound_announcement"] += 1 service_name = str(ann["service-name"]) - if service_name not in self._subscribed_service_names: + if service_name not in self._local_subscribers: self.log("announcement for a service we don't care about [%s]" % (service_name,), level=log.UNUSUAL, umid="dIpGNA") self._debug_counts["wrong_service"] += 1 @@ -343,9 +343,9 @@ class IntroducerClient(service.Service, Referenceable): def _deliver_announcements(self, key_s, ann): precondition(isinstance(key_s, bytes), key_s) service_name = str(ann["service-name"]) - for (service_name2,cb,args,kwargs) in self._local_subscribers: - if service_name2 == service_name: - eventually(cb, key_s, ann, *args, **kwargs) + obs = self._local_subscribers.get(service_name) + if obs is not None: + obs.notify(key_s, ann) def connection_status(self): assert self.running # startService builds _introducer_reconnector diff --git a/src/allmydata/introducer/interfaces.py b/src/allmydata/introducer/interfaces.py index 9f08f1943..24fd3945f 100644 --- a/src/allmydata/introducer/interfaces.py +++ b/src/allmydata/introducer/interfaces.py @@ -73,7 +73,7 @@ class IIntroducerClient(Interface): publish their services to the rest of the world, and I help them learn about services available on other nodes.""" - def publish(service_name, ann, signing_key=None): + def publish(service_name, ann, signing_key): """Publish the given announcement dictionary (which must be JSON-serializable), plus some additional keys, to the world. @@ -83,8 +83,7 @@ class IIntroducerClient(Interface): the signing_key, if present, otherwise it is derived from the 'anonymous-storage-FURL' key. - If signing_key= is set to an instance of SigningKey, it will be - used to sign the announcement.""" + signing_key (a SigningKey) will be used to sign the announcement.""" def subscribe_to(service_name, callback, *args, **kwargs): """Call this if you will eventually want to use services with the diff --git a/src/allmydata/introducer/server.py b/src/allmydata/introducer/server.py index 237c30315..339c5a0ac 100644 --- a/src/allmydata/introducer/server.py +++ b/src/allmydata/introducer/server.py @@ -15,6 +15,12 @@ from past.builtins import long from six import ensure_text import time, os.path, textwrap + +try: + from typing import Any, Dict, Union +except ImportError: + pass + from zope.interface import implementer from twisted.application import service from twisted.internet import defer @@ -147,10 +153,12 @@ class IntroducerService(service.MultiService, Referenceable): name = "introducer" # v1 is the original protocol, added in 1.0 (but only advertised starting # in 1.3), removed in 1.12. v2 is the new signed protocol, added in 1.10 - VERSION = { #"http://allmydata.org/tahoe/protocols/introducer/v1": { }, + # TODO: reconcile bytes/str for keys + VERSION = { + #"http://allmydata.org/tahoe/protocols/introducer/v1": { }, b"http://allmydata.org/tahoe/protocols/introducer/v2": { }, b"application-version": allmydata.__full_version__.encode("utf-8"), - } + } # type: Dict[Union[bytes, str], Any] def __init__(self): service.MultiService.__init__(self) diff --git a/src/allmydata/mutable/filenode.py b/src/allmydata/mutable/filenode.py index 5afc84dec..39e8b76be 100644 --- a/src/allmydata/mutable/filenode.py +++ b/src/allmydata/mutable/filenode.py @@ -564,7 +564,7 @@ class MutableFileNode(object): return d - def upload(self, new_contents, servermap): + def upload(self, new_contents, servermap, progress=None): """ I overwrite the contents of the best recoverable version of this mutable file with new_contents, using servermap instead of @@ -951,7 +951,7 @@ class MutableFileVersion(object): return self._servermap.size_of_version(self._version) - def download_to_data(self, fetch_privkey=False, progress=None): + def download_to_data(self, fetch_privkey=False, progress=None): # type: ignore # fixme """ I return a Deferred that fires with the contents of this readable object as a byte string. @@ -1205,3 +1205,7 @@ class MutableFileVersion(object): self._servermap, mode=mode) return u.update() + + # https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3562 + def get_servermap(self): + raise NotImplementedError diff --git a/src/allmydata/node.py b/src/allmydata/node.py index c5433c33c..2f340f860 100644 --- a/src/allmydata/node.py +++ b/src/allmydata/node.py @@ -23,6 +23,11 @@ from base64 import b32decode, b32encode from errno import ENOENT, EPERM from warnings import warn +try: + from typing import Union +except ImportError: + pass + import attr # On Python 2 this will be the backported package. @@ -273,6 +278,11 @@ def _error_about_old_config_files(basedir, generated_files): raise e +def ensure_text_and_abspath_expanduser_unicode(basedir): + # type: (Union[bytes, str]) -> str + return abspath_expanduser_unicode(ensure_text(basedir)) + + @attr.s class _Config(object): """ @@ -300,8 +310,8 @@ class _Config(object): config = attr.ib(validator=attr.validators.instance_of(configparser.ConfigParser)) portnum_fname = attr.ib() _basedir = attr.ib( - converter=lambda basedir: abspath_expanduser_unicode(ensure_text(basedir)), - ) + converter=ensure_text_and_abspath_expanduser_unicode, + ) # type: str config_path = attr.ib( validator=attr.validators.optional( attr.validators.instance_of(FilePath), @@ -669,8 +679,8 @@ def create_connection_handlers(config, i2p_provider, tor_provider): # create that handler, so hints which want it will be ignored. handlers = { "tcp": _make_tcp_handler(), - "tor": tor_provider.get_tor_handler(), - "i2p": i2p_provider.get_i2p_handler(), + "tor": tor_provider.get_client_endpoint(), + "i2p": i2p_provider.get_client_endpoint(), } log.msg( format="built Foolscap connection handlers for: %(known_handlers)s", @@ -927,7 +937,6 @@ class Node(service.MultiService): """ NODETYPE = "unknown NODETYPE" CERTFILE = "node.pem" - GENERATED_FILES = [] def __init__(self, config, main_tub, control_tub, i2p_provider, tor_provider): """ diff --git a/src/allmydata/scripts/admin.py b/src/allmydata/scripts/admin.py index e472ffd8c..50dde9e43 100644 --- a/src/allmydata/scripts/admin.py +++ b/src/allmydata/scripts/admin.py @@ -1,5 +1,10 @@ from __future__ import print_function +try: + from allmydata.scripts.types_ import SubCommands +except ImportError: + pass + from twisted.python import usage from allmydata.scripts.common import BaseOptions @@ -79,8 +84,8 @@ def do_admin(options): subCommands = [ - ["admin", None, AdminCommand, "admin subcommands: use 'tahoe admin' for a list"], - ] + ("admin", None, AdminCommand, "admin subcommands: use 'tahoe admin' for a list"), + ] # type: SubCommands dispatch = { "admin": do_admin, diff --git a/src/allmydata/scripts/cli.py b/src/allmydata/scripts/cli.py index 379e1d212..6c5641b41 100644 --- a/src/allmydata/scripts/cli.py +++ b/src/allmydata/scripts/cli.py @@ -1,6 +1,12 @@ from __future__ import print_function import os.path, re, fnmatch + +try: + from allmydata.scripts.types_ import SubCommands, Parameters +except ImportError: + pass + from twisted.python import usage from allmydata.scripts.common import get_aliases, get_default_nodedir, \ DEFAULT_ALIAS, BaseOptions @@ -19,7 +25,7 @@ class FileStoreOptions(BaseOptions): "This overrides the URL found in the --node-directory ."], ["dir-cap", None, None, "Specify which dirnode URI should be used as the 'tahoe' alias."] - ] + ] # type: Parameters def postOptions(self): self["quiet"] = self.parent["quiet"] @@ -455,25 +461,25 @@ class DeepCheckOptions(FileStoreOptions): Optionally repair any problems found.""" subCommands = [ - ["mkdir", None, MakeDirectoryOptions, "Create a new directory."], - ["add-alias", None, AddAliasOptions, "Add a new alias cap."], - ["create-alias", None, CreateAliasOptions, "Create a new alias cap."], - ["list-aliases", None, ListAliasesOptions, "List all alias caps."], - ["ls", None, ListOptions, "List a directory."], - ["get", None, GetOptions, "Retrieve a file from the grid."], - ["put", None, PutOptions, "Upload a file into the grid."], - ["cp", None, CpOptions, "Copy one or more files or directories."], - ["unlink", None, UnlinkOptions, "Unlink a file or directory on the grid."], - ["mv", None, MvOptions, "Move a file within the grid."], - ["ln", None, LnOptions, "Make an additional link to an existing file or directory."], - ["backup", None, BackupOptions, "Make target dir look like local dir."], - ["webopen", None, WebopenOptions, "Open a web browser to a grid file or directory."], - ["manifest", None, ManifestOptions, "List all files/directories in a subtree."], - ["stats", None, StatsOptions, "Print statistics about all files/directories in a subtree."], - ["check", None, CheckOptions, "Check a single file or directory."], - ["deep-check", None, DeepCheckOptions, "Check all files/directories reachable from a starting point."], - ["status", None, TahoeStatusCommand, "Various status information."], - ] + ("mkdir", None, MakeDirectoryOptions, "Create a new directory."), + ("add-alias", None, AddAliasOptions, "Add a new alias cap."), + ("create-alias", None, CreateAliasOptions, "Create a new alias cap."), + ("list-aliases", None, ListAliasesOptions, "List all alias caps."), + ("ls", None, ListOptions, "List a directory."), + ("get", None, GetOptions, "Retrieve a file from the grid."), + ("put", None, PutOptions, "Upload a file into the grid."), + ("cp", None, CpOptions, "Copy one or more files or directories."), + ("unlink", None, UnlinkOptions, "Unlink a file or directory on the grid."), + ("mv", None, MvOptions, "Move a file within the grid."), + ("ln", None, LnOptions, "Make an additional link to an existing file or directory."), + ("backup", None, BackupOptions, "Make target dir look like local dir."), + ("webopen", None, WebopenOptions, "Open a web browser to a grid file or directory."), + ("manifest", None, ManifestOptions, "List all files/directories in a subtree."), + ("stats", None, StatsOptions, "Print statistics about all files/directories in a subtree."), + ("check", None, CheckOptions, "Check a single file or directory."), + ("deep-check", None, DeepCheckOptions, "Check all files/directories reachable from a starting point."), + ("status", None, TahoeStatusCommand, "Various status information."), + ] # type: SubCommands def mkdir(options): from allmydata.scripts import tahoe_mkdir @@ -495,7 +501,7 @@ def list_aliases(options): rc = tahoe_add_alias.list_aliases(options) return rc -def list(options): +def list_(options): from allmydata.scripts import tahoe_ls rc = tahoe_ls.list(options) return rc @@ -581,7 +587,7 @@ dispatch = { "add-alias": add_alias, "create-alias": create_alias, "list-aliases": list_aliases, - "ls": list, + "ls": list_, "get": get, "put": put, "cp": cp, diff --git a/src/allmydata/scripts/common.py b/src/allmydata/scripts/common.py index 106dad3f2..d73344274 100644 --- a/src/allmydata/scripts/common.py +++ b/src/allmydata/scripts/common.py @@ -4,6 +4,12 @@ import os, sys, urllib, textwrap import codecs from os.path import join +try: + from typing import Optional + from .types_ import Parameters +except ImportError: + pass + from yaml import ( safe_dump, ) @@ -41,8 +47,8 @@ class BaseOptions(usage.Options): def opt_version(self): raise usage.UsageError("--version not allowed on subcommands") - description = None - description_unwrapped = None + description = None # type: Optional[str] + description_unwrapped = None # type: Optional[str] def __str__(self): width = int(os.environ.get('COLUMNS', '80')) @@ -65,7 +71,7 @@ class BasedirOptions(BaseOptions): optParameters = [ ["basedir", "C", None, "Specify which Tahoe base directory should be used. [default: %s]" % quote_local_unicode_path(_default_nodedir)], - ] + ] # type: Parameters def parseArgs(self, basedir=None): # This finds the node-directory option correctly even if we are in a subcommand. @@ -102,7 +108,7 @@ class NoDefaultBasedirOptions(BasedirOptions): optParameters = [ ["basedir", "C", None, "Specify which Tahoe base directory should be used."], - ] + ] # type: Parameters # This is overridden in order to ensure we get a "Wrong number of arguments." # error when more than one argument is given. diff --git a/src/allmydata/scripts/create_node.py b/src/allmydata/scripts/create_node.py index ac17cf445..0f507f518 100644 --- a/src/allmydata/scripts/create_node.py +++ b/src/allmydata/scripts/create_node.py @@ -3,6 +3,11 @@ from __future__ import print_function import os import json +try: + from allmydata.scripts.types_ import SubCommands +except ImportError: + pass + from twisted.internet import reactor, defer from twisted.python.usage import UsageError from twisted.python.filepath import ( @@ -492,10 +497,10 @@ def create_introducer(config): subCommands = [ - ["create-node", None, CreateNodeOptions, "Create a node that acts as a client, server or both."], - ["create-client", None, CreateClientOptions, "Create a client node (with storage initially disabled)."], - ["create-introducer", None, CreateIntroducerOptions, "Create an introducer node."], -] + ("create-node", None, CreateNodeOptions, "Create a node that acts as a client, server or both."), + ("create-client", None, CreateClientOptions, "Create a client node (with storage initially disabled)."), + ("create-introducer", None, CreateIntroducerOptions, "Create an introducer node."), +] # type: SubCommands dispatch = { "create-node": create_node, diff --git a/src/allmydata/scripts/debug.py b/src/allmydata/scripts/debug.py index fd3f2b87c..550c37fde 100644 --- a/src/allmydata/scripts/debug.py +++ b/src/allmydata/scripts/debug.py @@ -1,5 +1,12 @@ from __future__ import print_function +try: + from allmydata.scripts.types_ import SubCommands +except ImportError: + pass + +from future.utils import bchr + # do not import any allmydata modules at this level. Do that from inside # individual functions instead. import struct, time, os, sys @@ -905,7 +912,7 @@ def corrupt_share(options): f = open(fn, "rb+") f.seek(offset) d = f.read(1) - d = chr(ord(d) ^ 0x01) + d = bchr(ord(d) ^ 0x01) f.seek(offset) f.write(d) f.close() @@ -920,7 +927,7 @@ def corrupt_share(options): f.seek(m.DATA_OFFSET) data = f.read(2000) # make sure this slot contains an SMDF share - assert data[0] == b"\x00", "non-SDMF mutable shares not supported" + assert data[0:1] == b"\x00", "non-SDMF mutable shares not supported" f.close() (version, ig_seqnum, ig_roothash, ig_IV, ig_k, ig_N, ig_segsize, @@ -1051,8 +1058,8 @@ def do_debug(options): subCommands = [ - ["debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list."], - ] + ("debug", None, DebugCommand, "debug subcommands: use 'tahoe debug' for a list."), + ] # type: SubCommands dispatch = { "debug": do_debug, diff --git a/src/allmydata/scripts/runner.py b/src/allmydata/scripts/runner.py index 6d3696d9b..e83b2b38d 100644 --- a/src/allmydata/scripts/runner.py +++ b/src/allmydata/scripts/runner.py @@ -4,6 +4,11 @@ import os, sys from six.moves import StringIO import six +try: + from allmydata.scripts.types_ import SubCommands +except ImportError: + pass + from twisted.python import usage from twisted.internet import defer, task, threads @@ -40,8 +45,8 @@ _control_node_dispatch = { } process_control_commands = [ - ["run", None, tahoe_run.RunOptions, "run a node without daemonizing"], -] + ("run", None, tahoe_run.RunOptions, "run a node without daemonizing"), +] # type: SubCommands class Options(usage.Options): @@ -98,7 +103,7 @@ class Options(usage.Options): create_dispatch = {} for module in (create_node,): - create_dispatch.update(module.dispatch) + create_dispatch.update(module.dispatch) # type: ignore def parse_options(argv, config=None): if not config: diff --git a/src/allmydata/scripts/tahoe_invite.py b/src/allmydata/scripts/tahoe_invite.py index dbc84d0ea..884536ec2 100644 --- a/src/allmydata/scripts/tahoe_invite.py +++ b/src/allmydata/scripts/tahoe_invite.py @@ -2,6 +2,11 @@ from __future__ import print_function import json +try: + from allmydata.scripts.types_ import SubCommands +except ImportError: + pass + from twisted.python import usage from twisted.internet import defer, reactor @@ -103,7 +108,7 @@ def invite(options): subCommands = [ ("invite", None, InviteOptions, "Invite a new node to this grid"), -] +] # type: SubCommands dispatch = { "invite": invite, diff --git a/src/allmydata/scripts/types_.py b/src/allmydata/scripts/types_.py new file mode 100644 index 000000000..3937cb803 --- /dev/null +++ b/src/allmydata/scripts/types_.py @@ -0,0 +1,12 @@ +from typing import List, Tuple, Type, Sequence, Any +from allmydata.scripts.common import BaseOptions + + +# Historically, subcommands were implemented as lists, but due to a +# [designed contraint in mypy](https://stackoverflow.com/a/52559625/70170), +# a Tuple is required. +SubCommand = Tuple[str, None, Type[BaseOptions], str] + +SubCommands = List[SubCommand] + +Parameters = List[Sequence[Any]] diff --git a/src/allmydata/stats.py b/src/allmydata/stats.py index 18b22c30a..91205a93c 100644 --- a/src/allmydata/stats.py +++ b/src/allmydata/stats.py @@ -1,11 +1,16 @@ +""" +Ported to Python 3. +""" +from __future__ import absolute_import +from __future__ import division from __future__ import print_function +from __future__ import unicode_literals -import time - -# Python 2 compatibility from future.utils import PY2 if PY2: - from future.builtins import str # noqa: F401 + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + +import time from twisted.application import service from twisted.application.internet import TimerService @@ -18,7 +23,7 @@ from allmydata.interfaces import IStatsProducer @implementer(IStatsProducer) class CPUUsageMonitor(service.MultiService): HISTORY_LENGTH = 15 - POLL_INTERVAL = 60 + POLL_INTERVAL = 60 # type: float def __init__(self): service.MultiService.__init__(self) diff --git a/src/allmydata/storage/crawler.py b/src/allmydata/storage/crawler.py index 24042c38b..f13f7cb99 100644 --- a/src/allmydata/storage/crawler.py +++ b/src/allmydata/storage/crawler.py @@ -19,7 +19,7 @@ import os, time, struct try: import cPickle as pickle except ImportError: - import pickle + import pickle # type: ignore from twisted.internet import reactor from twisted.application import service from allmydata.storage.common import si_b2a diff --git a/src/allmydata/storage/immutable.py b/src/allmydata/storage/immutable.py index 778c0ddf8..4b60d79f1 100644 --- a/src/allmydata/storage/immutable.py +++ b/src/allmydata/storage/immutable.py @@ -202,7 +202,7 @@ class ShareFile(object): @implementer(RIBucketWriter) -class BucketWriter(Referenceable): +class BucketWriter(Referenceable): # type: ignore # warner/foolscap#78 def __init__(self, ss, incominghome, finalhome, max_size, lease_info, canary): self.ss = ss @@ -301,7 +301,7 @@ class BucketWriter(Referenceable): @implementer(RIBucketReader) -class BucketReader(Referenceable): +class BucketReader(Referenceable): # type: ignore # warner/foolscap#78 def __init__(self, ss, sharefname, storage_index=None, shnum=None): self.ss = ss diff --git a/src/allmydata/storage/server.py b/src/allmydata/storage/server.py index 8a8138f26..5f2ef3ac2 100644 --- a/src/allmydata/storage/server.py +++ b/src/allmydata/storage/server.py @@ -581,7 +581,7 @@ class StorageServer(service.MultiService, Referenceable): for share in six.viewvalues(shares): share.add_or_renew_lease(lease_info) - def slot_testv_and_readv_and_writev( + def slot_testv_and_readv_and_writev( # type: ignore # warner/foolscap#78 self, storage_index, secrets, diff --git a/src/allmydata/storage_client.py b/src/allmydata/storage_client.py index 294a2d215..eb1572dcb 100644 --- a/src/allmydata/storage_client.py +++ b/src/allmydata/storage_client.py @@ -464,6 +464,7 @@ class StorageFarmBroker(service.MultiService): @implementer(IDisplayableServer) class StubServer(object): def __init__(self, serverid): + assert isinstance(serverid, bytes) self.serverid = serverid # binary tubid def get_serverid(self): return self.serverid diff --git a/src/allmydata/test/__init__.py b/src/allmydata/test/__init__.py index abbde919f..19c046eca 100644 --- a/src/allmydata/test/__init__.py +++ b/src/allmydata/test/__init__.py @@ -113,4 +113,5 @@ if sys.platform == "win32": initialize() from eliot import to_file -to_file(open("eliot.log", "w")) +from allmydata.util.jsonbytes import BytesJSONEncoder +to_file(open("eliot.log", "w"), encoder=BytesJSONEncoder) diff --git a/src/allmydata/test/_win_subprocess.py b/src/allmydata/test/_win_subprocess.py new file mode 100644 index 000000000..fe6960c73 --- /dev/null +++ b/src/allmydata/test/_win_subprocess.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- + +## Copyright (C) 2021 Valentin Lab +## +## Redistribution and use in source and binary forms, with or without +## modification, are permitted provided that the following conditions +## are met: +## +## 1. Redistributions of source code must retain the above copyright +## notice, this list of conditions and the following disclaimer. +## +## 2. Redistributions in binary form must reproduce the above +## copyright notice, this list of conditions and the following +## disclaimer in the documentation and/or other materials provided +## with the distribution. +## +## 3. Neither the name of the copyright holder nor the names of its +## contributors may be used to endorse or promote products derived +## from this software without specific prior written permission. +## +## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +## FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +## COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +## INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +## (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +## HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +## STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +## ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +## OF THE POSSIBILITY OF SUCH DAMAGE. +## + +## issue: https://bugs.python.org/issue19264 + +# See allmydata/windows/fixups.py +import sys +assert sys.platform == "win32" + +import os +import ctypes +import subprocess +import _subprocess +from ctypes import byref, windll, c_char_p, c_wchar_p, c_void_p, \ + Structure, sizeof, c_wchar, WinError +from ctypes.wintypes import BYTE, WORD, LPWSTR, BOOL, DWORD, LPVOID, \ + HANDLE + + +## +## Types +## + +CREATE_UNICODE_ENVIRONMENT = 0x00000400 +LPCTSTR = c_char_p +LPTSTR = c_wchar_p +LPSECURITY_ATTRIBUTES = c_void_p +LPBYTE = ctypes.POINTER(BYTE) + +class STARTUPINFOW(Structure): + _fields_ = [ + ("cb", DWORD), ("lpReserved", LPWSTR), + ("lpDesktop", LPWSTR), ("lpTitle", LPWSTR), + ("dwX", DWORD), ("dwY", DWORD), + ("dwXSize", DWORD), ("dwYSize", DWORD), + ("dwXCountChars", DWORD), ("dwYCountChars", DWORD), + ("dwFillAtrribute", DWORD), ("dwFlags", DWORD), + ("wShowWindow", WORD), ("cbReserved2", WORD), + ("lpReserved2", LPBYTE), ("hStdInput", HANDLE), + ("hStdOutput", HANDLE), ("hStdError", HANDLE), + ] + +LPSTARTUPINFOW = ctypes.POINTER(STARTUPINFOW) + + +class PROCESS_INFORMATION(Structure): + _fields_ = [ + ("hProcess", HANDLE), ("hThread", HANDLE), + ("dwProcessId", DWORD), ("dwThreadId", DWORD), + ] + +LPPROCESS_INFORMATION = ctypes.POINTER(PROCESS_INFORMATION) + + +class DUMMY_HANDLE(ctypes.c_void_p): + + def __init__(self, *a, **kw): + super(DUMMY_HANDLE, self).__init__(*a, **kw) + self.closed = False + + def Close(self): + if not self.closed: + windll.kernel32.CloseHandle(self) + self.closed = True + + def __int__(self): + return self.value + + +CreateProcessW = windll.kernel32.CreateProcessW +CreateProcessW.argtypes = [ + LPCTSTR, LPTSTR, LPSECURITY_ATTRIBUTES, + LPSECURITY_ATTRIBUTES, BOOL, DWORD, LPVOID, LPCTSTR, + LPSTARTUPINFOW, LPPROCESS_INFORMATION, +] +CreateProcessW.restype = BOOL + + +## +## Patched functions/classes +## + +def CreateProcess(executable, args, _p_attr, _t_attr, + inherit_handles, creation_flags, env, cwd, + startup_info): + """Create a process supporting unicode executable and args for win32 + + Python implementation of CreateProcess using CreateProcessW for Win32 + + """ + + si = STARTUPINFOW( + dwFlags=startup_info.dwFlags, + wShowWindow=startup_info.wShowWindow, + cb=sizeof(STARTUPINFOW), + ## XXXvlab: not sure of the casting here to ints. + hStdInput=int(startup_info.hStdInput), + hStdOutput=int(startup_info.hStdOutput), + hStdError=int(startup_info.hStdError), + ) + + wenv = None + if env is not None: + ## LPCWSTR seems to be c_wchar_p, so let's say CWSTR is c_wchar + env = (unicode("").join([ + unicode("%s=%s\0") % (k, v) + for k, v in env.items()])) + unicode("\0") + wenv = (c_wchar * len(env))() + wenv.value = env + + pi = PROCESS_INFORMATION() + creation_flags |= CREATE_UNICODE_ENVIRONMENT + + if CreateProcessW(executable, args, None, None, + inherit_handles, creation_flags, + wenv, cwd, byref(si), byref(pi)): + return (DUMMY_HANDLE(pi.hProcess), DUMMY_HANDLE(pi.hThread), + pi.dwProcessId, pi.dwThreadId) + raise WinError() + + +class Popen(subprocess.Popen): + """This superseeds Popen and corrects a bug in cPython 2.7 implem""" + + def _execute_child(self, args, executable, preexec_fn, close_fds, + cwd, env, universal_newlines, + startupinfo, creationflags, shell, to_close, + p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite): + """Code from part of _execute_child from Python 2.7 (9fbb65e) + + There are only 2 little changes concerning the construction of + the the final string in shell mode: we preempt the creation of + the command string when shell is True, because original function + will try to encode unicode args which we want to avoid to be able to + sending it as-is to ``CreateProcess``. + + """ + if not isinstance(args, subprocess.types.StringTypes): + args = subprocess.list2cmdline(args) + + if startupinfo is None: + startupinfo = subprocess.STARTUPINFO() + if shell: + startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW + startupinfo.wShowWindow = _subprocess.SW_HIDE + comspec = os.environ.get("COMSPEC", unicode("cmd.exe")) + args = unicode('{} /c "{}"').format(comspec, args) + if (_subprocess.GetVersion() >= 0x80000000 or + os.path.basename(comspec).lower() == "command.com"): + w9xpopen = self._find_w9xpopen() + args = unicode('"%s" %s') % (w9xpopen, args) + creationflags |= _subprocess.CREATE_NEW_CONSOLE + + cp = _subprocess.CreateProcess + _subprocess.CreateProcess = CreateProcess + try: + super(Popen, self)._execute_child( + args, executable, + preexec_fn, close_fds, cwd, env, universal_newlines, + startupinfo, creationflags, False, to_close, p2cread, + p2cwrite, c2pread, c2pwrite, errread, errwrite, + ) + finally: + _subprocess.CreateProcess = cp diff --git a/src/allmydata/test/check_load.py b/src/allmydata/test/check_load.py index 4058ddf77..21576ea3a 100644 --- a/src/allmydata/test/check_load.py +++ b/src/allmydata/test/check_load.py @@ -37,6 +37,11 @@ a mean of 10kB and a max of 100MB, so filesize=min(int(1.0/random(.0002)),1e8) import os, sys, httplib, binascii import urllib, json, random, time, urlparse +try: + from typing import Dict +except ImportError: + pass + # Python 2 compatibility from future.utils import PY2 if PY2: @@ -49,13 +54,13 @@ if sys.argv[1] == "--stats": DELAY = 10 MAXSAMPLES = 6 totals = [] - last_stats = {} + last_stats = {} # type: Dict[str, float] while True: - stats = {} + stats = {} # type: Dict[str, float] for sf in statsfiles: for line in open(sf, "r").readlines(): - name, value = line.split(":") - value = int(value.strip()) + name, str_value = line.split(":") + value = int(str_value.strip()) if name not in stats: stats[name] = 0 stats[name] += float(value) diff --git a/src/allmydata/test/check_memory.py b/src/allmydata/test/check_memory.py index 6ec90eeae..268d77451 100644 --- a/src/allmydata/test/check_memory.py +++ b/src/allmydata/test/check_memory.py @@ -508,13 +508,13 @@ if __name__ == '__main__': mode = "upload" if len(sys.argv) > 1: mode = sys.argv[1] - if sys.maxint == 2147483647: + if sys.maxsize == 2147483647: bits = "32" - elif sys.maxint == 9223372036854775807: + elif sys.maxsize == 9223372036854775807: bits = "64" else: bits = "?" - print("%s-bit system (sys.maxint=%d)" % (bits, sys.maxint)) + print("%s-bit system (sys.maxsize=%d)" % (bits, sys.maxsize)) # put the logfile and stats.out in _test_memory/ . These stick around. # put the nodes and other files in _test_memory/test/ . These are # removed each time we run. diff --git a/src/allmydata/test/cli/common.py b/src/allmydata/test/cli/common.py index bf175de44..f1c48d1af 100644 --- a/src/allmydata/test/cli/common.py +++ b/src/allmydata/test/cli/common.py @@ -1,4 +1,5 @@ -from ...util.encodingutil import unicode_to_argv +from six import ensure_str + from ...scripts import runner from ..common_util import ReallyEqualMixin, run_cli, run_cli_unicode @@ -45,6 +46,12 @@ class CLITestMixin(ReallyEqualMixin): # client_num is used to execute client CLI commands on a specific # client. client_num = kwargs.pop("client_num", 0) - client_dir = unicode_to_argv(self.get_clientdir(i=client_num)) + # If we were really going to launch a child process then + # `unicode_to_argv` would be the right thing to do here. However, + # we're just going to call some Python functions directly and those + # Python functions want native strings. So ignore the requirements + # for passing arguments to another process and make sure this argument + # is a native string. + client_dir = ensure_str(self.get_clientdir(i=client_num)) nodeargs = [ b"--node-directory", client_dir ] return run_cli(verb, *args, nodeargs=nodeargs, **kwargs) diff --git a/src/allmydata/test/cli/test_alias.py b/src/allmydata/test/cli/test_alias.py index 72b634608..07f42b29d 100644 --- a/src/allmydata/test/cli/test_alias.py +++ b/src/allmydata/test/cli/test_alias.py @@ -99,22 +99,6 @@ class ListAlias(GridTestMixin, CLITestMixin, unittest.TestCase): ) - def test_list_latin_1(self): - """ - An alias composed of all Latin-1-encodeable code points can be created - when the active encoding is Latin-1. - - This is very similar to ``test_list_utf_8`` but the assumption of - UTF-8 is nearly ubiquitous and explicitly exercising the codepaths - with a UTF-8-incompatible encoding helps flush out unintentional UTF-8 - assumptions. - """ - return self._check_create_alias( - u"taho\N{LATIN SMALL LETTER E WITH ACUTE}", - encoding="latin-1", - ) - - def test_list_utf_8(self): """ An alias composed of all UTF-8-encodeable code points can be created when diff --git a/src/allmydata/test/cli/test_put.py b/src/allmydata/test/cli/test_put.py index 08a66f98d..3392e67b4 100644 --- a/src/allmydata/test/cli/test_put.py +++ b/src/allmydata/test/cli/test_put.py @@ -7,7 +7,7 @@ from allmydata.scripts.common import get_aliases from allmydata.scripts import cli from ..no_network import GridTestMixin from ..common_util import skip_if_cannot_represent_filename -from allmydata.util.encodingutil import get_io_encoding, unicode_to_argv +from allmydata.util.encodingutil import get_io_encoding from allmydata.util.fileutil import abspath_expanduser_unicode from .common import CLITestMixin @@ -46,21 +46,21 @@ class Put(GridTestMixin, CLITestMixin, unittest.TestCase): self.basedir = "cli/Put/unlinked_immutable_from_file" self.set_up_grid(oneshare=True) - rel_fn = os.path.join(self.basedir, "DATAFILE") - abs_fn = unicode_to_argv(abspath_expanduser_unicode(unicode(rel_fn))) + rel_fn = unicode(os.path.join(self.basedir, "DATAFILE")) + abs_fn = abspath_expanduser_unicode(rel_fn) # we make the file small enough to fit in a LIT file, for speed fileutil.write(rel_fn, "short file") - d = self.do_cli("put", rel_fn) + d = self.do_cli_unicode(u"put", [rel_fn]) def _uploaded(args): (rc, out, err) = args readcap = out self.failUnless(readcap.startswith("URI:LIT:"), readcap) self.readcap = readcap d.addCallback(_uploaded) - d.addCallback(lambda res: self.do_cli("put", "./" + rel_fn)) + d.addCallback(lambda res: self.do_cli_unicode(u"put", [u"./" + rel_fn])) d.addCallback(lambda rc_stdout_stderr: self.failUnlessReallyEqual(rc_stdout_stderr[1], self.readcap)) - d.addCallback(lambda res: self.do_cli("put", abs_fn)) + d.addCallback(lambda res: self.do_cli_unicode(u"put", [abs_fn])) d.addCallback(lambda rc_stdout_stderr: self.failUnlessReallyEqual(rc_stdout_stderr[1], self.readcap)) # we just have to assume that ~ is handled properly diff --git a/src/allmydata/test/common.py b/src/allmydata/test/common.py index b2fe8cac8..230bca648 100644 --- a/src/allmydata/test/common.py +++ b/src/allmydata/test/common.py @@ -9,10 +9,15 @@ __all__ = [ "flush_logged_errors", "skip", "skipIf", + + # Selected based on platform and re-exported for convenience. + "Popen", + "PIPE", ] -from past.builtins import chr as byteschr +from past.builtins import chr as byteschr, unicode +import sys import os, random, struct import six import tempfile @@ -101,6 +106,21 @@ from .eliotutil import ( ) from .common_util import ShouldFailMixin # noqa: F401 +if sys.platform == "win32": + # Python 2.7 doesn't have good options for launching a process with + # non-ASCII in its command line. So use this alternative that does a + # better job. However, only use it on Windows because it doesn't work + # anywhere else. + from ._win_subprocess import ( + Popen, + ) +else: + from subprocess import ( + Popen, + ) +from subprocess import ( + PIPE, +) TEST_RSA_KEY_SIZE = 522 @@ -406,7 +426,7 @@ class DummyProducer(object): pass @implementer(IImmutableFileNode) -class FakeCHKFileNode(object): +class FakeCHKFileNode(object): # type: ignore # incomplete implementation """I provide IImmutableFileNode, but all of my data is stored in a class-level dictionary.""" @@ -432,7 +452,7 @@ class FakeCHKFileNode(object): return self.storage_index def check(self, monitor, verify=False, add_lease=False): - s = StubServer("\x00"*20) + s = StubServer(b"\x00"*20) r = CheckResults(self.my_uri, self.storage_index, healthy=True, recoverable=True, count_happiness=10, @@ -544,7 +564,7 @@ def create_chk_filenode(contents, all_contents): @implementer(IMutableFileNode, ICheckable) -class FakeMutableFileNode(object): +class FakeMutableFileNode(object): # type: ignore # incomplete implementation """I provide IMutableFileNode, but all of my data is stored in a class-level dictionary.""" @@ -566,12 +586,12 @@ class FakeMutableFileNode(object): self.file_types[self.storage_index] = version initial_contents = self._get_initial_contents(contents) data = initial_contents.read(initial_contents.get_size()) - data = "".join(data) + data = b"".join(data) self.all_contents[self.storage_index] = data return defer.succeed(self) def _get_initial_contents(self, contents): if contents is None: - return MutableData("") + return MutableData(b"") if IMutableUploadable.providedBy(contents): return contents @@ -625,7 +645,7 @@ class FakeMutableFileNode(object): def raise_error(self): pass def get_writekey(self): - return "\x00"*16 + return b"\x00"*16 def get_size(self): return len(self.all_contents[self.storage_index]) def get_current_size(self): @@ -644,7 +664,7 @@ class FakeMutableFileNode(object): return self.file_types[self.storage_index] def check(self, monitor, verify=False, add_lease=False): - s = StubServer("\x00"*20) + s = StubServer(b"\x00"*20) r = CheckResults(self.my_uri, self.storage_index, healthy=True, recoverable=True, count_happiness=10, @@ -655,7 +675,7 @@ class FakeMutableFileNode(object): count_recoverable_versions=1, count_unrecoverable_versions=0, servers_responding=[s], - sharemap={"seq1-abcd-sh0": [s]}, + sharemap={b"seq1-abcd-sh0": [s]}, count_wrong_shares=0, list_corrupt_shares=[], count_corrupt_shares=0, @@ -709,7 +729,7 @@ class FakeMutableFileNode(object): def overwrite(self, new_contents): assert not self.is_readonly() new_data = new_contents.read(new_contents.get_size()) - new_data = "".join(new_data) + new_data = b"".join(new_data) self.all_contents[self.storage_index] = new_data return defer.succeed(None) def modify(self, modifier): @@ -740,7 +760,7 @@ class FakeMutableFileNode(object): def update(self, data, offset): assert not self.is_readonly() def modifier(old, servermap, first_time): - new = old[:offset] + "".join(data.read(data.get_size())) + new = old[:offset] + b"".join(data.read(data.get_size())) new += old[len(new):] return new return self.modify(modifier) @@ -825,13 +845,18 @@ class WebErrorMixin(object): code=None, substring=None, response_substring=None, callable=None, *args, **kwargs): # returns a Deferred with the response body - assert substring is None or isinstance(substring, str) + if isinstance(substring, bytes): + substring = unicode(substring, "ascii") + if isinstance(response_substring, unicode): + response_substring = response_substring.encode("ascii") + assert substring is None or isinstance(substring, unicode) + assert response_substring is None or isinstance(response_substring, bytes) assert callable def _validate(f): if code is not None: - self.failUnlessEqual(f.value.status, str(code), which) + self.failUnlessEqual(f.value.status, b"%d" % code, which) if substring: - code_string = str(f) + code_string = unicode(f) self.failUnless(substring in code_string, "%s: substring '%s' not in '%s'" % (which, substring, code_string)) @@ -854,6 +879,8 @@ class WebErrorMixin(object): body = yield response.content() self.assertEquals(response.code, code) if response_substring is not None: + if isinstance(response_substring, unicode): + response_substring = response_substring.encode("utf-8") self.assertIn(response_substring, body) returnValue(body) diff --git a/src/allmydata/test/common_util.py b/src/allmydata/test/common_util.py index 8885e067e..70cc851f6 100644 --- a/src/allmydata/test/common_util.py +++ b/src/allmydata/test/common_util.py @@ -1,5 +1,9 @@ from __future__ import print_function +from future.utils import PY2, native_str, bchr, binary_type +from future.builtins import str as future_str +from past.builtins import unicode + import os import time import signal @@ -20,9 +24,6 @@ from twisted.trial import unittest from ..util.assertutil import precondition from ..scripts import runner from allmydata.util.encodingutil import unicode_platform, get_filesystem_encoding, get_io_encoding -# Imported for backwards compatibility: -from future.utils import bord, bchr, binary_type -from past.builtins import unicode def skip_if_cannot_represent_filename(u): @@ -51,24 +52,23 @@ def _getvalue(io): return io.read() -def run_cli_bytes(verb, *args, **kwargs): +def run_cli_native(verb, *args, **kwargs): """ - Run a Tahoe-LAFS CLI command specified as bytes. + Run a Tahoe-LAFS CLI command specified as bytes (on Python 2) or Unicode + (on Python 3); basically, it accepts a native string. Most code should prefer ``run_cli_unicode`` which deals with all the - necessary encoding considerations. This helper still exists so that novel - misconfigurations can be explicitly tested (for example, receiving UTF-8 - bytes when the system encoding claims to be ASCII). + necessary encoding considerations. - :param bytes verb: The command to run. For example, ``b"create-node"``. + :param native_str verb: The command to run. For example, ``"create-node"``. - :param [bytes] args: The arguments to pass to the command. For example, - ``(b"--hostname=localhost",)``. + :param [native_str] args: The arguments to pass to the command. For example, + ``("--hostname=localhost",)``. - :param [bytes] nodeargs: Extra arguments to pass to the Tahoe executable + :param [native_str] nodeargs: Extra arguments to pass to the Tahoe executable before ``verb``. - :param bytes stdin: Text to pass to the command via stdin. + :param native_str stdin: Text to pass to the command via stdin. :param NoneType|str encoding: The name of an encoding which stdout and stderr will be configured to use. ``None`` means stdout and stderr @@ -78,8 +78,8 @@ def run_cli_bytes(verb, *args, **kwargs): nodeargs = kwargs.pop("nodeargs", []) encoding = kwargs.pop("encoding", None) precondition( - all(isinstance(arg, bytes) for arg in [verb] + nodeargs + list(args)), - "arguments to run_cli must be bytes -- convert using unicode_to_argv", + all(isinstance(arg, native_str) for arg in [verb] + nodeargs + list(args)), + "arguments to run_cli must be a native string -- convert using unicode_to_argv", verb=verb, args=args, nodeargs=nodeargs, @@ -148,15 +148,19 @@ def run_cli_unicode(verb, argv, nodeargs=None, stdin=None, encoding=None): if nodeargs is None: nodeargs = [] precondition( - all(isinstance(arg, unicode) for arg in [verb] + nodeargs + argv), + all(isinstance(arg, future_str) for arg in [verb] + nodeargs + argv), "arguments to run_cli_unicode must be unicode", verb=verb, nodeargs=nodeargs, argv=argv, ) codec = encoding or "ascii" - encode = lambda t: None if t is None else t.encode(codec) - d = run_cli_bytes( + if PY2: + encode = lambda t: None if t is None else t.encode(codec) + else: + # On Python 3 command-line parsing expects Unicode! + encode = lambda t: t + d = run_cli_native( encode(verb), nodeargs=list(encode(arg) for arg in nodeargs), stdin=encode(stdin), @@ -174,7 +178,7 @@ def run_cli_unicode(verb, argv, nodeargs=None, stdin=None, encoding=None): return d -run_cli = run_cli_bytes +run_cli = run_cli_native def parse_cli(*argv): @@ -190,13 +194,12 @@ def insecurerandstr(n): return b''.join(map(bchr, map(randrange, [0]*n, [256]*n))) def flip_bit(good, which): - # TODO Probs need to update with bchr/bord as with flip_one_bit, below. - # flip the low-order bit of good[which] + """Flip the low-order bit of good[which].""" if which == -1: - pieces = good[:which], good[-1:], "" + pieces = good[:which], good[-1:], b"" else: pieces = good[:which], good[which:which+1], good[which+1:] - return pieces[0] + chr(ord(pieces[1]) ^ 0x01) + pieces[2] + return pieces[0] + bchr(ord(pieces[1]) ^ 0x01) + pieces[2] def flip_one_bit(s, offset=0, size=None): """ flip one random bit of the string s, in a byte greater than or equal to offset and less @@ -205,7 +208,7 @@ def flip_one_bit(s, offset=0, size=None): if size is None: size=len(s)-offset i = randrange(offset, offset+size) - result = s[:i] + bchr(bord(s[i])^(0x01< is not JSON-encodeable` from past.builtins import unicode as str -from future.utils import PY3 +from future.utils import PY2 +from six import ensure_text __all__ = [ "RUN_TEST", "EliotLoggedRunTest", - "eliot_logged_test", ] +try: + from typing import Callable +except ImportError: + pass + from functools import ( - wraps, partial, + wraps, ) import attr +from zope.interface import ( + implementer, +) + from eliot import ( ActionType, Field, + MemoryLogger, + ILogger, +) +from eliot.testing import ( + swap_logger, + check_for_errors, ) -from eliot.testing import capture_logging -from twisted.internet.defer import ( - maybeDeferred, +from twisted.python.monkey import ( + MonkeyPatcher, ) +from ..util.jsonbytes import BytesJSONEncoder + + _NAME = Field.for_types( u"name", [str], @@ -45,84 +62,12 @@ RUN_TEST = ActionType( ) -def eliot_logged_test(f): - """ - Decorate a test method to run in a dedicated Eliot action context. - - The action will finish after the test is done (after the returned Deferred - fires, if a Deferred is returned). It will note the name of the test - being run. - - All messages emitted by the test will be validated. They will still be - delivered to the global logger. - """ - # A convenient, mutable container into which nested functions can write - # state to be shared among them. - class storage(object): - pass - - @wraps(f) - def run_and_republish(self, *a, **kw): - # Unfortunately the only way to get at the global/default logger... - # This import is delayed here so that we get the *current* default - # logger at the time the decorated function is run. - from eliot._output import _DEFAULT_LOGGER as default_logger - - def republish(): - # This is called as a cleanup function after capture_logging has - # restored the global/default logger to its original state. We - # can now emit messages that go to whatever global destinations - # are installed. - - # storage.logger.serialize() seems like it would make more sense - # than storage.logger.messages here. However, serialize() - # explodes, seemingly as a result of double-serializing the logged - # messages. I don't understand this. - for msg in storage.logger.messages: - default_logger.write(msg) - - # And now that we've re-published all of the test's messages, we - # can finish the test's action. - storage.action.finish() - - @capture_logging(None) - def run(self, logger): - # Record the MemoryLogger for later message extraction. - storage.logger = logger - # Give the test access to the logger as well. It would be just - # fine to pass this as a keyword argument to `f` but implementing - # that now will give me conflict headaches so I'm not doing it. - self.eliot_logger = logger - return f(self, *a, **kw) - - # Arrange for all messages written to the memory logger that - # `capture_logging` installs to be re-written to the global/default - # logger so they might end up in a log file somewhere, if someone - # wants. This has to be done in a cleanup function (or later) because - # capture_logging restores the original logger in a cleanup function. - # We install our cleanup function here, before we call run, so that it - # runs *after* the cleanup function capture_logging installs (cleanup - # functions are a stack). - self.addCleanup(republish) - - # Begin an action that should comprise all messages from the decorated - # test method. - with RUN_TEST(name=self.id()).context() as action: - # When the test method Deferred fires, the RUN_TEST action is - # done. However, we won't have re-published the MemoryLogger - # messages into the global/default logger when this Deferred - # fires. So we need to delay finishing the action until that has - # happened. Record the action so we can do that. - storage.action = action - - # Support both Deferred-returning and non-Deferred-returning - # tests. - d = maybeDeferred(run, self) - - # Let the test runner do its thing. - return d - - return run_and_republish +# On Python 3, we want to use our custom JSON encoder when validating messages +# can be encoded to JSON: +if PY2: + _memory_logger = MemoryLogger +else: + _memory_logger = lambda: MemoryLogger(encoder=BytesJSONEncoder) @attr.s @@ -163,13 +108,91 @@ class EliotLoggedRunTest(object): def id(self): return self.case.id() - @eliot_logged_test - def run(self, result=None): - # Workaround for https://github.com/itamarst/eliot/issues/456 - if PY3: - self.case.eliot_logger._validate_message = lambda *args, **kwargs: None - return self._run_tests_with_factory( - self.case, - self.handlers, - self.last_resort, - ).run(result) + def run(self, result): + """ + Run the test case in the context of a distinct Eliot action. + + The action will finish after the test is done. It will note the name of + the test being run. + + All messages emitted by the test will be validated. They will still be + delivered to the global logger. + """ + # The idea here is to decorate the test method itself so that all of + # the extra logic happens at the point where test/application logic is + # expected to be. This `run` method is more like test infrastructure + # and things do not go well when we add too much extra behavior here. + # For example, exceptions raised here often just kill the whole + # runner. + patcher = MonkeyPatcher() + + # So, grab the test method. + name = self.case._testMethodName + original = getattr(self.case, name) + decorated = with_logging(ensure_text(self.case.id()), original) + patcher.addPatch(self.case, name, decorated) + try: + # Patch it in + patcher.patch() + # Then use the rest of the machinery to run it. + return self._run_tests_with_factory( + self.case, + self.handlers, + self.last_resort, + ).run(result) + finally: + # Clean up the patching for idempotency or something. + patcher.restore() + + +def with_logging( + test_id, # type: str + test_method, # type: Callable +): + """ + Decorate a test method with additional log-related behaviors. + + 1. The test method will run in a distinct Eliot action. + 2. Typed log messages will be validated. + 3. Logged tracebacks will be added as errors. + + :param test_id: The full identifier of the test being decorated. + :param test_method: The method itself. + """ + @wraps(test_method) + def run_with_logging(*args, **kwargs): + validating_logger = _memory_logger() + original = swap_logger(None) + try: + swap_logger(_TwoLoggers(original, validating_logger)) + with RUN_TEST(name=test_id): + try: + return test_method(*args, **kwargs) + finally: + check_for_errors(validating_logger) + finally: + swap_logger(original) + return run_with_logging + + +@implementer(ILogger) +class _TwoLoggers(object): + """ + Log to two loggers. + + A single logger can have multiple destinations so this isn't typically a + useful thing to do. However, MemoryLogger has inline validation instead + of destinations. That means this *is* useful to simultaneously write to + the normal places and validate all written log messages. + """ + def __init__(self, a, b): + """ + :param ILogger a: One logger + :param ILogger b: Another logger + """ + self._a = a # type: ILogger + self._b = b # type: ILogger + + def write(self, dictionary, serializer=None): + self._a.write(dictionary, serializer) + self._b.write(dictionary, serializer) diff --git a/src/allmydata/test/no_network.py b/src/allmydata/test/no_network.py index 59ab807bb..cbea0dfcd 100644 --- a/src/allmydata/test/no_network.py +++ b/src/allmydata/test/no_network.py @@ -24,6 +24,7 @@ from future.utils import PY2 if PY2: from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 from past.builtins import unicode +from six import ensure_text import os from base64 import b32encode @@ -67,7 +68,7 @@ class Marker(object): fireNow = partial(defer.succeed, None) -@implementer(IRemoteReference) +@implementer(IRemoteReference) # type: ignore # warner/foolscap#79 class LocalWrapper(object): """ A ``LocalWrapper`` presents the remote reference interface to a local @@ -212,9 +213,12 @@ class NoNetworkServer(object): return _StorageServer(lambda: self.rref) def get_version(self): return self.rref.version + def start_connecting(self, trigger_cb): + raise NotImplementedError + @implementer(IStorageBroker) -class NoNetworkStorageBroker(object): +class NoNetworkStorageBroker(object): # type: ignore # missing many methods def get_servers_for_psi(self, peer_selection_index): def _permuted(server): seed = server.get_permutation_seed() @@ -258,7 +262,7 @@ def create_no_network_client(basedir): return defer.succeed(client) -class _NoNetworkClient(_Client): +class _NoNetworkClient(_Client): # type: ignore # tahoe-lafs/ticket/3573 """ Overrides all _Client networking functionality to do nothing. """ @@ -614,8 +618,7 @@ class GridTestMixin(object): method="GET", clientnum=0, **kwargs): # if return_response=True, this fires with (data, statuscode, # respheaders) instead of just data. - assert not isinstance(urlpath, unicode) - url = self.client_baseurls[clientnum] + urlpath + url = self.client_baseurls[clientnum] + ensure_text(urlpath) response = yield treq.request(method, url, persistent=False, allow_redirects=followRedirect, diff --git a/src/allmydata/test/storage_plugin.py b/src/allmydata/test/storage_plugin.py index 4a1f84531..17ec89078 100644 --- a/src/allmydata/test/storage_plugin.py +++ b/src/allmydata/test/storage_plugin.py @@ -47,8 +47,9 @@ class RIDummy(RemoteInterface): """ - -@implementer(IFoolscapStoragePlugin) +# type ignored due to missing stubs for Twisted +# https://twistedmatrix.com/trac/ticket/9717 +@implementer(IFoolscapStoragePlugin) # type: ignore @attr.s class DummyStorage(object): name = attr.ib() @@ -107,7 +108,7 @@ class GetCounter(Resource, object): @implementer(RIDummy) @attr.s(frozen=True) -class DummyStorageServer(object): +class DummyStorageServer(object): # type: ignore # warner/foolscap#78 get_anonymous_storage_server = attr.ib() def remote_just_some_method(self): @@ -116,7 +117,7 @@ class DummyStorageServer(object): @implementer(IStorageServer) @attr.s -class DummyStorageClient(object): +class DummyStorageClient(object): # type: ignore # incomplete implementation get_rref = attr.ib() configuration = attr.ib() announcement = attr.ib() diff --git a/src/allmydata/test/test_checker.py b/src/allmydata/test/test_checker.py index 936d270ae..f56ecd089 100644 --- a/src/allmydata/test/test_checker.py +++ b/src/allmydata/test/test_checker.py @@ -62,7 +62,7 @@ class FakeClient(object): @implementer(IServer) -class FakeServer(object): +class FakeServer(object): # type: ignore # incomplete implementation def get_name(self): return "fake name" @@ -75,7 +75,7 @@ class FakeServer(object): @implementer(ICheckResults) -class FakeCheckResults(object): +class FakeCheckResults(object): # type: ignore # incomplete implementation def __init__(self, si=None, healthy=False, recoverable=False, @@ -106,7 +106,7 @@ class FakeCheckResults(object): @implementer(ICheckAndRepairResults) -class FakeCheckAndRepairResults(object): +class FakeCheckAndRepairResults(object): # type: ignore # incomplete implementation def __init__(self, si=None, repair_attempted=False, @@ -173,7 +173,7 @@ class WebResultsRendering(unittest.TestCase): return c def render_json(self, resource): - return self.successResultOf(render(resource, {"output": ["json"]})) + return self.successResultOf(render(resource, {b"output": [b"json"]})) def render_element(self, element, args=None): if args is None: @@ -186,7 +186,7 @@ class WebResultsRendering(unittest.TestCase): html = self.render_element(lcr) self.failUnlessIn(b"Literal files are always healthy", html) - html = self.render_element(lcr, args={"return_to": ["FOOURL"]}) + html = self.render_element(lcr, args={b"return_to": [b"FOOURL"]}) self.failUnlessIn(b"Literal files are always healthy", html) self.failUnlessIn(b'Return to file.', html) @@ -269,7 +269,7 @@ class WebResultsRendering(unittest.TestCase): self.failUnlessIn("File Check Results for SI=2k6avp", s) # abbreviated self.failUnlessIn("Not Recoverable! : rather dead", s) - html = self.render_element(w, args={"return_to": ["FOOURL"]}) + html = self.render_element(w, args={b"return_to": [b"FOOURL"]}) self.failUnlessIn(b'Return to file/directory.', html) diff --git a/src/allmydata/test/test_client.py b/src/allmydata/test/test_client.py index 342fe4af1..63a5ceaaa 100644 --- a/src/allmydata/test/test_client.py +++ b/src/allmydata/test/test_client.py @@ -51,7 +51,6 @@ from allmydata.nodemaker import ( NodeMaker, ) from allmydata.node import OldConfigError, UnescapedHashError, create_node_dir -from allmydata.frontends.auth import NeedRootcapLookupScheme from allmydata import client from allmydata.storage_client import ( StorageClientConfig, @@ -424,88 +423,8 @@ class Basic(testutil.ReallyEqualMixin, unittest.TestCase): expected = fileutil.abspath_expanduser_unicode(u"relative", abs_basedir) self.failUnlessReallyEqual(w.staticdir, expected) - # TODO: also test config options for SFTP. - - @defer.inlineCallbacks - def test_ftp_create(self): - """ - configuration for sftpd results in it being started - """ - root = FilePath(self.mktemp()) - root.makedirs() - accounts = root.child(b"sftp-accounts") - accounts.touch() - - data = FilePath(__file__).sibling(b"data") - privkey = data.child(b"openssh-rsa-2048.txt") - pubkey = data.child(b"openssh-rsa-2048.pub.txt") - - basedir = u"client.Basic.test_ftp_create" - create_node_dir(basedir, "testing") - with open(os.path.join(basedir, "tahoe.cfg"), "w") as f: - f.write(( - '[sftpd]\n' - 'enabled = true\n' - 'accounts.file = {}\n' - 'host_pubkey_file = {}\n' - 'host_privkey_file = {}\n' - ).format(accounts.path, pubkey.path, privkey.path)) - - client_node = yield client.create_client( - basedir, - ) - sftp = client_node.getServiceNamed("frontend:sftp") - self.assertIs(sftp.parent, client_node) - - - @defer.inlineCallbacks - def test_ftp_auth_keyfile(self): - """ - ftpd accounts.file is parsed properly - """ - basedir = u"client.Basic.test_ftp_auth_keyfile" - os.mkdir(basedir) - fileutil.write(os.path.join(basedir, "tahoe.cfg"), - (BASECONFIG + - "[ftpd]\n" - "enabled = true\n" - "port = tcp:0:interface=127.0.0.1\n" - "accounts.file = private/accounts\n")) - os.mkdir(os.path.join(basedir, "private")) - fileutil.write(os.path.join(basedir, "private", "accounts"), "\n") - c = yield client.create_client(basedir) # just make sure it can be instantiated - del c - - @defer.inlineCallbacks - def test_ftp_auth_url(self): - """ - ftpd accounts.url is parsed properly - """ - basedir = u"client.Basic.test_ftp_auth_url" - os.mkdir(basedir) - fileutil.write(os.path.join(basedir, "tahoe.cfg"), - (BASECONFIG + - "[ftpd]\n" - "enabled = true\n" - "port = tcp:0:interface=127.0.0.1\n" - "accounts.url = http://0.0.0.0/\n")) - c = yield client.create_client(basedir) # just make sure it can be instantiated - del c - - @defer.inlineCallbacks - def test_ftp_auth_no_accountfile_or_url(self): - """ - ftpd requires some way to look up accounts - """ - basedir = u"client.Basic.test_ftp_auth_no_accountfile_or_url" - os.mkdir(basedir) - fileutil.write(os.path.join(basedir, "tahoe.cfg"), - (BASECONFIG + - "[ftpd]\n" - "enabled = true\n" - "port = tcp:0:interface=127.0.0.1\n")) - with self.assertRaises(NeedRootcapLookupScheme): - yield client.create_client(basedir) + # TODO: also test config options for SFTP. See Git history for deleted FTP + # tests that could be used as basis for these tests. @defer.inlineCallbacks def _storage_dir_test(self, basedir, storage_path, expected_path): diff --git a/src/allmydata/test/test_connections.py b/src/allmydata/test/test_connections.py index 30aac8446..7a24ac794 100644 --- a/src/allmydata/test/test_connections.py +++ b/src/allmydata/test/test_connections.py @@ -1,149 +1,69 @@ -import os -import mock + from twisted.trial import unittest -from twisted.internet import reactor, endpoints, defer -from twisted.internet.interfaces import IStreamClientEndpoint +from twisted.internet import reactor + from foolscap.connections import tcp + +from testtools.matchers import ( + MatchesDict, + IsInstance, + Equals, +) + from ..node import PrivacyError, config_from_string from ..node import create_connection_handlers from ..node import create_main_tub from ..util.i2p_provider import create as create_i2p_provider from ..util.tor_provider import create as create_tor_provider +from .common import ( + SyncTestCase, + ConstantAddresses, +) + BASECONFIG = "" -class TCP(unittest.TestCase): - - def test_default(self): +class CreateConnectionHandlersTests(SyncTestCase): + """ + Tests for the Foolscap connection handlers return by + ``create_connection_handlers``. + """ + def test_foolscap_handlers(self): + """ + ``create_connection_handlers`` returns a Foolscap connection handlers + dictionary mapping ``"tcp"`` to + ``foolscap.connections.tcp.DefaultTCP``, ``"tor"`` to the supplied Tor + provider's handler, and ``"i2p"`` to the supplied I2P provider's + handler. + """ config = config_from_string( "fake.port", "no-basedir", BASECONFIG, ) - _, foolscap_handlers = create_connection_handlers(config, mock.Mock(), mock.Mock()) - self.assertIsInstance( - foolscap_handlers['tcp'], - tcp.DefaultTCP, + tor_endpoint = object() + tor = ConstantAddresses(handler=tor_endpoint) + i2p_endpoint = object() + i2p = ConstantAddresses(handler=i2p_endpoint) + _, foolscap_handlers = create_connection_handlers( + config, + i2p, + tor, + ) + self.assertThat( + foolscap_handlers, + MatchesDict({ + "tcp": IsInstance(tcp.DefaultTCP), + "i2p": Equals(i2p_endpoint), + "tor": Equals(tor_endpoint), + }), ) class Tor(unittest.TestCase): - def test_disabled(self): - config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG + "[tor]\nenabled = false\n", - ) - tor_provider = create_tor_provider(reactor, config) - h = tor_provider.get_tor_handler() - self.assertEqual(h, None) - - def test_unimportable(self): - with mock.patch("allmydata.util.tor_provider._import_tor", - return_value=None): - config = config_from_string("fake.port", "no-basedir", BASECONFIG) - tor_provider = create_tor_provider(reactor, config) - h = tor_provider.get_tor_handler() - self.assertEqual(h, None) - - def test_default(self): - h1 = mock.Mock() - with mock.patch("foolscap.connections.tor.default_socks", - return_value=h1) as f: - - config = config_from_string("fake.port", "no-basedir", BASECONFIG) - tor_provider = create_tor_provider(reactor, config) - h = tor_provider.get_tor_handler() - self.assertEqual(f.mock_calls, [mock.call()]) - self.assertIdentical(h, h1) - - def _do_test_launch(self, executable): - # the handler is created right away - config = BASECONFIG+"[tor]\nlaunch = true\n" - if executable: - config += "tor.executable = %s\n" % executable - h1 = mock.Mock() - with mock.patch("foolscap.connections.tor.control_endpoint_maker", - return_value=h1) as f: - - config = config_from_string("fake.port", ".", config) - tp = create_tor_provider("reactor", config) - h = tp.get_tor_handler() - - private_dir = config.get_config_path("private") - exp = mock.call(tp._make_control_endpoint, - takes_status=True) - self.assertEqual(f.mock_calls, [exp]) - self.assertIdentical(h, h1) - - # later, when Foolscap first connects, Tor should be launched - reactor = "reactor" - tcp = object() - tcep = object() - launch_tor = mock.Mock(return_value=defer.succeed(("ep_desc", tcp))) - cfs = mock.Mock(return_value=tcep) - with mock.patch("allmydata.util.tor_provider._launch_tor", launch_tor): - with mock.patch("allmydata.util.tor_provider.clientFromString", cfs): - d = tp._make_control_endpoint(reactor, - update_status=lambda status: None) - cep = self.successResultOf(d) - launch_tor.assert_called_with(reactor, executable, - os.path.abspath(private_dir), - tp._txtorcon) - cfs.assert_called_with(reactor, "ep_desc") - self.assertIs(cep, tcep) - - def test_launch(self): - self._do_test_launch(None) - - def test_launch_executable(self): - self._do_test_launch("/special/tor") - - def test_socksport_unix_endpoint(self): - h1 = mock.Mock() - with mock.patch("foolscap.connections.tor.socks_endpoint", - return_value=h1) as f: - config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG + "[tor]\nsocks.port = unix:/var/lib/fw-daemon/tor_socks.socket\n", - ) - tor_provider = create_tor_provider(reactor, config) - h = tor_provider.get_tor_handler() - self.assertTrue(IStreamClientEndpoint.providedBy(f.mock_calls[0][1][0])) - self.assertIdentical(h, h1) - - def test_socksport_endpoint(self): - h1 = mock.Mock() - with mock.patch("foolscap.connections.tor.socks_endpoint", - return_value=h1) as f: - config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG + "[tor]\nsocks.port = tcp:127.0.0.1:1234\n", - ) - tor_provider = create_tor_provider(reactor, config) - h = tor_provider.get_tor_handler() - self.assertTrue(IStreamClientEndpoint.providedBy(f.mock_calls[0][1][0])) - self.assertIdentical(h, h1) - - def test_socksport_endpoint_otherhost(self): - h1 = mock.Mock() - with mock.patch("foolscap.connections.tor.socks_endpoint", - return_value=h1) as f: - config = config_from_string( - "no-basedir", - "fake.port", - BASECONFIG + "[tor]\nsocks.port = tcp:otherhost:1234\n", - ) - tor_provider = create_tor_provider(reactor, config) - h = tor_provider.get_tor_handler() - self.assertTrue(IStreamClientEndpoint.providedBy(f.mock_calls[0][1][0])) - self.assertIdentical(h, h1) - def test_socksport_bad_endpoint(self): config = config_from_string( "fake.port", @@ -176,73 +96,8 @@ class Tor(unittest.TestCase): str(ctx.exception) ) - def test_controlport(self): - h1 = mock.Mock() - with mock.patch("foolscap.connections.tor.control_endpoint", - return_value=h1) as f: - config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG + "[tor]\ncontrol.port = tcp:localhost:1234\n", - ) - tor_provider = create_tor_provider(reactor, config) - h = tor_provider.get_tor_handler() - self.assertEqual(len(f.mock_calls), 1) - ep = f.mock_calls[0][1][0] - self.assertIsInstance(ep, endpoints.TCP4ClientEndpoint) - self.assertIdentical(h, h1) - class I2P(unittest.TestCase): - def test_disabled(self): - config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG + "[i2p]\nenabled = false\n", - ) - i2p_provider = create_i2p_provider(None, config) - h = i2p_provider.get_i2p_handler() - self.assertEqual(h, None) - - def test_unimportable(self): - config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG, - ) - with mock.patch("allmydata.util.i2p_provider._import_i2p", - return_value=None): - i2p_provider = create_i2p_provider(reactor, config) - h = i2p_provider.get_i2p_handler() - self.assertEqual(h, None) - - def test_default(self): - config = config_from_string("fake.port", "no-basedir", BASECONFIG) - h1 = mock.Mock() - with mock.patch("foolscap.connections.i2p.default", - return_value=h1) as f: - i2p_provider = create_i2p_provider(reactor, config) - h = i2p_provider.get_i2p_handler() - self.assertEqual(f.mock_calls, [mock.call(reactor, keyfile=None)]) - self.assertIdentical(h, h1) - - def test_samport(self): - config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG + "[i2p]\nsam.port = tcp:localhost:1234\n", - ) - h1 = mock.Mock() - with mock.patch("foolscap.connections.i2p.sam_endpoint", - return_value=h1) as f: - i2p_provider = create_i2p_provider(reactor, config) - h = i2p_provider.get_i2p_handler() - - self.assertEqual(len(f.mock_calls), 1) - ep = f.mock_calls[0][1][0] - self.assertIsInstance(ep, endpoints.TCP4ClientEndpoint) - self.assertIdentical(h, h1) - def test_samport_and_launch(self): config = config_from_string( "no-basedir", @@ -258,82 +113,6 @@ class I2P(unittest.TestCase): str(ctx.exception) ) - def test_launch(self): - config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG + "[i2p]\nlaunch = true\n", - ) - h1 = mock.Mock() - with mock.patch("foolscap.connections.i2p.launch", - return_value=h1) as f: - i2p_provider = create_i2p_provider(reactor, config) - h = i2p_provider.get_i2p_handler() - exp = mock.call(i2p_configdir=None, i2p_binary=None) - self.assertEqual(f.mock_calls, [exp]) - self.assertIdentical(h, h1) - - def test_launch_executable(self): - config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG + "[i2p]\nlaunch = true\n" + "i2p.executable = i2p\n", - ) - h1 = mock.Mock() - with mock.patch("foolscap.connections.i2p.launch", - return_value=h1) as f: - i2p_provider = create_i2p_provider(reactor, config) - h = i2p_provider.get_i2p_handler() - exp = mock.call(i2p_configdir=None, i2p_binary="i2p") - self.assertEqual(f.mock_calls, [exp]) - self.assertIdentical(h, h1) - - def test_launch_configdir(self): - config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG + "[i2p]\nlaunch = true\n" + "i2p.configdir = cfg\n", - ) - h1 = mock.Mock() - with mock.patch("foolscap.connections.i2p.launch", - return_value=h1) as f: - i2p_provider = create_i2p_provider(reactor, config) - h = i2p_provider.get_i2p_handler() - exp = mock.call(i2p_configdir="cfg", i2p_binary=None) - self.assertEqual(f.mock_calls, [exp]) - self.assertIdentical(h, h1) - - def test_launch_configdir_and_executable(self): - config = config_from_string( - "no-basedir", - "fake.port", - BASECONFIG + "[i2p]\nlaunch = true\n" + - "i2p.executable = i2p\n" + "i2p.configdir = cfg\n", - ) - h1 = mock.Mock() - with mock.patch("foolscap.connections.i2p.launch", - return_value=h1) as f: - i2p_provider = create_i2p_provider(reactor, config) - h = i2p_provider.get_i2p_handler() - exp = mock.call(i2p_configdir="cfg", i2p_binary="i2p") - self.assertEqual(f.mock_calls, [exp]) - self.assertIdentical(h, h1) - - def test_configdir(self): - config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG + "[i2p]\ni2p.configdir = cfg\n", - ) - h1 = mock.Mock() - with mock.patch("foolscap.connections.i2p.local_i2p", - return_value=h1) as f: - i2p_provider = create_i2p_provider(None, config) - h = i2p_provider.get_i2p_handler() - - self.assertEqual(f.mock_calls, [mock.call("cfg")]) - self.assertIdentical(h, h1) - class Connections(unittest.TestCase): def setUp(self): @@ -341,7 +120,11 @@ class Connections(unittest.TestCase): self.config = config_from_string("fake.port", self.basedir, BASECONFIG) def test_default(self): - default_connection_handlers, _ = create_connection_handlers(self.config, mock.Mock(), mock.Mock()) + default_connection_handlers, _ = create_connection_handlers( + self.config, + ConstantAddresses(handler=object()), + ConstantAddresses(handler=object()), + ) self.assertEqual(default_connection_handlers["tcp"], "tcp") self.assertEqual(default_connection_handlers["tor"], "tor") self.assertEqual(default_connection_handlers["i2p"], "i2p") @@ -352,23 +135,39 @@ class Connections(unittest.TestCase): "no-basedir", BASECONFIG + "[connections]\ntcp = tor\n", ) - default_connection_handlers, _ = create_connection_handlers(config, mock.Mock(), mock.Mock()) + default_connection_handlers, _ = create_connection_handlers( + config, + ConstantAddresses(handler=object()), + ConstantAddresses(handler=object()), + ) self.assertEqual(default_connection_handlers["tcp"], "tor") self.assertEqual(default_connection_handlers["tor"], "tor") self.assertEqual(default_connection_handlers["i2p"], "i2p") def test_tor_unimportable(self): - with mock.patch("allmydata.util.tor_provider._import_tor", - return_value=None): - self.config = config_from_string( - "fake.port", - "no-basedir", - BASECONFIG + "[connections]\ntcp = tor\n", + """ + If the configuration calls for substituting Tor for TCP and + ``foolscap.connections.tor`` is not importable then + ``create_connection_handlers`` raises ``ValueError`` with a message + explaining this makes Tor unusable. + """ + self.config = config_from_string( + "fake.port", + "no-basedir", + BASECONFIG + "[connections]\ntcp = tor\n", + ) + tor_provider = create_tor_provider( + reactor, + self.config, + import_tor=lambda: None, + ) + with self.assertRaises(ValueError) as ctx: + default_connection_handlers, _ = create_connection_handlers( + self.config, + i2p_provider=ConstantAddresses(handler=object()), + tor_provider=tor_provider, ) - with self.assertRaises(ValueError) as ctx: - tor_provider = create_tor_provider(reactor, self.config) - default_connection_handlers, _ = create_connection_handlers(self.config, mock.Mock(), tor_provider) self.assertEqual( str(ctx.exception), "'tahoe.cfg [connections] tcp='" @@ -383,7 +182,11 @@ class Connections(unittest.TestCase): BASECONFIG + "[connections]\ntcp = unknown\n", ) with self.assertRaises(ValueError) as ctx: - create_connection_handlers(config, mock.Mock(), mock.Mock()) + create_connection_handlers( + config, + ConstantAddresses(handler=object()), + ConstantAddresses(handler=object()), + ) self.assertIn("'tahoe.cfg [connections] tcp='", str(ctx.exception)) self.assertIn("uses unknown handler type 'unknown'", str(ctx.exception)) @@ -393,7 +196,11 @@ class Connections(unittest.TestCase): "no-basedir", BASECONFIG + "[connections]\ntcp = disabled\n", ) - default_connection_handlers, _ = create_connection_handlers(config, mock.Mock(), mock.Mock()) + default_connection_handlers, _ = create_connection_handlers( + config, + ConstantAddresses(handler=object()), + ConstantAddresses(handler=object()), + ) self.assertEqual(default_connection_handlers["tcp"], None) self.assertEqual(default_connection_handlers["tor"], "tor") self.assertEqual(default_connection_handlers["i2p"], "i2p") @@ -408,7 +215,11 @@ class Privacy(unittest.TestCase): ) with self.assertRaises(PrivacyError) as ctx: - create_connection_handlers(config, mock.Mock(), mock.Mock()) + create_connection_handlers( + config, + ConstantAddresses(handler=object()), + ConstantAddresses(handler=object()), + ) self.assertEqual( str(ctx.exception), @@ -423,7 +234,11 @@ class Privacy(unittest.TestCase): BASECONFIG + "[connections]\ntcp = disabled\n" + "[node]\nreveal-IP-address = false\n", ) - default_connection_handlers, _ = create_connection_handlers(config, mock.Mock(), mock.Mock()) + default_connection_handlers, _ = create_connection_handlers( + config, + ConstantAddresses(handler=object()), + ConstantAddresses(handler=object()), + ) self.assertEqual(default_connection_handlers["tcp"], None) def test_tub_location_auto(self): @@ -434,7 +249,14 @@ class Privacy(unittest.TestCase): ) with self.assertRaises(PrivacyError) as ctx: - create_main_tub(config, {}, {}, {}, mock.Mock(), mock.Mock()) + create_main_tub( + config, + tub_options={}, + default_connection_handlers={}, + foolscap_connection_handlers={}, + i2p_provider=ConstantAddresses(), + tor_provider=ConstantAddresses(), + ) self.assertEqual( str(ctx.exception), "tub.location uses AUTO", diff --git a/src/allmydata/test/test_deferredutil.py b/src/allmydata/test/test_deferredutil.py index 6ebc93556..2a155089f 100644 --- a/src/allmydata/test/test_deferredutil.py +++ b/src/allmydata/test/test_deferredutil.py @@ -74,3 +74,58 @@ class DeferredUtilTests(unittest.TestCase, deferredutil.WaitForDelayedCallsMixin d = defer.succeed(None) d.addBoth(self.wait_for_delayed_calls) return d + + +class UntilTests(unittest.TestCase): + """ + Tests for ``deferredutil.until``. + """ + def test_exception(self): + """ + If the action raises an exception, the ``Deferred`` returned by ``until`` + fires with a ``Failure``. + """ + self.assertFailure( + deferredutil.until(lambda: 1/0, lambda: True), + ZeroDivisionError, + ) + + def test_stops_on_condition(self): + """ + The action is called repeatedly until ``condition`` returns ``True``. + """ + calls = [] + def action(): + calls.append(None) + + def condition(): + return len(calls) == 3 + + self.assertIs( + self.successResultOf( + deferredutil.until(action, condition), + ), + None, + ) + self.assertEqual(3, len(calls)) + + def test_waits_for_deferred(self): + """ + If the action returns a ``Deferred`` then it is called again when the + ``Deferred`` fires. + """ + counter = [0] + r1 = defer.Deferred() + r2 = defer.Deferred() + results = [r1, r2] + def action(): + counter[0] += 1 + return results.pop(0) + + def condition(): + return False + + deferredutil.until(action, condition) + self.assertEqual([1], counter) + r1.callback(None) + self.assertEqual([2], counter) diff --git a/src/allmydata/test/test_dirnode.py b/src/allmydata/test/test_dirnode.py index 1c265492b..8e5e59b46 100644 --- a/src/allmydata/test/test_dirnode.py +++ b/src/allmydata/test/test_dirnode.py @@ -1561,7 +1561,7 @@ class Packing(testutil.ReallyEqualMixin, unittest.TestCase): kids, fn.get_writekey(), deep_immutable=True) @implementer(IMutableFileNode) -class FakeMutableFile(object): +class FakeMutableFile(object): # type: ignore # incomplete implementation counter = 0 def __init__(self, initial_contents=b""): data = self._get_initial_contents(initial_contents) @@ -1622,7 +1622,7 @@ class FakeNodeMaker(NodeMaker): def create_mutable_file(self, contents=b"", keysize=None, version=None): return defer.succeed(FakeMutableFile(contents)) -class FakeClient2(_Client): +class FakeClient2(_Client): # type: ignore # tahoe-lafs/ticket/3573 def __init__(self): self.nodemaker = FakeNodeMaker(None, None, None, None, None, diff --git a/src/allmydata/test/test_eliotutil.py b/src/allmydata/test/test_eliotutil.py index 1a8c2f801..aca677323 100644 --- a/src/allmydata/test/test_eliotutil.py +++ b/src/allmydata/test/test_eliotutil.py @@ -18,17 +18,25 @@ if PY2: from sys import stdout import logging +from unittest import ( + skip, +) + from fixtures import ( TempDir, ) from testtools import ( TestCase, ) +from testtools import ( + TestResult, +) from testtools.matchers import ( Is, IsInstance, MatchesStructure, Equals, + HasLength, AfterPreprocessing, ) from testtools.twistedsupport import ( @@ -38,12 +46,16 @@ from testtools.twistedsupport import ( from eliot import ( Message, + MessageType, + fields, FileDestination, + MemoryLogger, ) from eliot.twisted import DeferredContext from eliot.testing import ( capture_logging, assertHasAction, + swap_logger, ) from twisted.internet.defer import ( @@ -57,11 +69,14 @@ from ..util.eliotutil import ( _parse_destination_description, _EliotLogging, ) +from ..util.jsonbytes import BytesJSONEncoder + from .common import ( SyncTestCase, AsyncTestCase, ) + class EliotLoggedTestTests(AsyncTestCase): def test_returns_none(self): Message.log(hello="world") @@ -94,7 +109,7 @@ class ParseDestinationDescriptionTests(SyncTestCase): reactor = object() self.assertThat( _parse_destination_description("file:-")(reactor), - Equals(FileDestination(stdout)), + Equals(FileDestination(stdout, encoder=BytesJSONEncoder)), ) @@ -170,6 +185,62 @@ class EliotLoggingTests(TestCase): ), ) + def test_validation_failure(self): + """ + If a test emits a log message that fails validation then an error is added + to the result. + """ + # Make sure we preserve the original global Eliot state. + original = swap_logger(MemoryLogger()) + self.addCleanup(lambda: swap_logger(original)) + + class ValidationFailureProbe(SyncTestCase): + def test_bad_message(self): + # This message does not validate because "Hello" is not an + # int. + MSG = MessageType("test:eliotutil", fields(foo=int)) + MSG(foo="Hello").write() + + result = TestResult() + case = ValidationFailureProbe("test_bad_message") + case.run(result) + + self.assertThat( + result.errors, + HasLength(1), + ) + + def test_skip_cleans_up(self): + """ + After a skipped test the global Eliot logging state is restored. + """ + # Save the logger that's active before we do anything so that we can + # restore it later. Also install another logger so we can compare it + # to the active logger later. + expected = MemoryLogger() + original = swap_logger(expected) + + # Restore it, whatever else happens. + self.addCleanup(lambda: swap_logger(original)) + + class SkipProbe(SyncTestCase): + @skip("It's a skip test.") + def test_skipped(self): + pass + + case = SkipProbe("test_skipped") + case.run() + + # Retrieve the logger that's active now that the skipped test is done + # so we can check it against the expected value. + actual = swap_logger(MemoryLogger()) + self.assertThat( + actual, + Is(expected), + ) + + + class LogCallDeferredTests(TestCase): """ Tests for ``log_call_deferred``. diff --git a/src/allmydata/test/test_encodingutil.py b/src/allmydata/test/test_encodingutil.py index cbc9143b7..f7987d466 100644 --- a/src/allmydata/test/test_encodingutil.py +++ b/src/allmydata/test/test_encodingutil.py @@ -70,7 +70,7 @@ if __name__ == "__main__": sys.exit(0) -import os, sys, locale +import os, sys from unittest import skipIf from twisted.trial import unittest @@ -81,99 +81,28 @@ from allmydata.test.common_util import ( ReallyEqualMixin, skip_if_cannot_represent_filename, ) from allmydata.util import encodingutil, fileutil -from allmydata.util.encodingutil import argv_to_unicode, unicode_to_url, \ +from allmydata.util.encodingutil import unicode_to_url, \ unicode_to_output, quote_output, quote_path, quote_local_unicode_path, \ quote_filepath, unicode_platform, listdir_unicode, FilenameEncodingError, \ - get_io_encoding, get_filesystem_encoding, to_bytes, from_utf8_or_none, _reload, \ + get_filesystem_encoding, to_bytes, from_utf8_or_none, _reload, \ to_filepath, extend_filepath, unicode_from_filepath, unicode_segments_from, \ unicode_to_argv -from twisted.python import usage - class MockStdout(object): pass -class EncodingUtilErrors(ReallyEqualMixin, unittest.TestCase): - def test_get_io_encoding(self): - mock_stdout = MockStdout() - self.patch(sys, 'stdout', mock_stdout) - - mock_stdout.encoding = 'UTF-8' - _reload() - self.failUnlessReallyEqual(get_io_encoding(), 'utf-8') - - mock_stdout.encoding = 'cp65001' - _reload() - self.assertEqual(get_io_encoding(), 'utf-8') - - mock_stdout.encoding = 'koi8-r' - expected = sys.platform == "win32" and 'utf-8' or 'koi8-r' - _reload() - self.failUnlessReallyEqual(get_io_encoding(), expected) - - mock_stdout.encoding = 'nonexistent_encoding' - if sys.platform == "win32": - _reload() - self.failUnlessReallyEqual(get_io_encoding(), 'utf-8') - else: - self.failUnlessRaises(AssertionError, _reload) - - def test_get_io_encoding_not_from_stdout(self): - preferredencoding = 'koi8-r' - def call_locale_getpreferredencoding(): - return preferredencoding - self.patch(locale, 'getpreferredencoding', call_locale_getpreferredencoding) - mock_stdout = MockStdout() - self.patch(sys, 'stdout', mock_stdout) - - expected = sys.platform == "win32" and 'utf-8' or 'koi8-r' - _reload() - self.failUnlessReallyEqual(get_io_encoding(), expected) - - mock_stdout.encoding = None - _reload() - self.failUnlessReallyEqual(get_io_encoding(), expected) - - preferredencoding = None - _reload() - self.assertEqual(get_io_encoding(), 'utf-8') - - def test_argv_to_unicode(self): - encodingutil.io_encoding = 'utf-8' - self.failUnlessRaises(usage.UsageError, - argv_to_unicode, - lumiere_nfc.encode('latin1')) - - @skipIf(PY3, "Python 2 only.") - def test_unicode_to_output(self): - encodingutil.io_encoding = 'koi8-r' - self.failUnlessRaises(UnicodeEncodeError, unicode_to_output, lumiere_nfc) - - def test_no_unicode_normalization(self): - # Pretend to run on a Unicode platform. - # listdir_unicode normalized to NFC in 1.7beta, but now doesn't. - - def call_os_listdir(path): - return [Artonwall_nfd] - self.patch(os, 'listdir', call_os_listdir) - self.patch(sys, 'platform', 'darwin') - - _reload() - self.failUnlessReallyEqual(listdir_unicode(u'/dummy'), [Artonwall_nfd]) - - # The following tests apply only to platforms that don't store filenames as # Unicode entities on the filesystem. class EncodingUtilNonUnicodePlatform(unittest.TestCase): @skipIf(PY3, "Python 3 is always Unicode, regardless of OS.") def setUp(self): - # Mock sys.platform because unicode_platform() uses it - self.original_platform = sys.platform - sys.platform = 'linux' + # Make sure everything goes back to the way it was at the end of the + # test. + self.addCleanup(_reload) - def tearDown(self): - sys.platform = self.original_platform - _reload() + # Mock sys.platform because unicode_platform() uses it. Cleanups run + # in reverse order so we do this second so it gets undone first. + self.patch(sys, "platform", "linux") def test_listdir_unicode(self): # What happens if latin1-encoded filenames are encountered on an UTF-8 @@ -206,25 +135,8 @@ class EncodingUtilNonUnicodePlatform(unittest.TestCase): class EncodingUtil(ReallyEqualMixin): def setUp(self): - self.original_platform = sys.platform - sys.platform = self.platform - - def tearDown(self): - sys.platform = self.original_platform - _reload() - - def test_argv_to_unicode(self): - if 'argv' not in dir(self): - return - - mock_stdout = MockStdout() - mock_stdout.encoding = self.io_encoding - self.patch(sys, 'stdout', mock_stdout) - - argu = lumiere_nfc - argv = self.argv - _reload() - self.failUnlessReallyEqual(argv_to_unicode(argv), argu) + self.addCleanup(_reload) + self.patch(sys, "platform", self.platform) def test_unicode_to_url(self): self.failUnless(unicode_to_url(lumiere_nfc), b"lumi\xc3\xa8re") @@ -245,15 +157,19 @@ class EncodingUtil(ReallyEqualMixin): def test_unicode_to_output_py3(self): self.failUnlessReallyEqual(unicode_to_output(lumiere_nfc), lumiere_nfc) - @skipIf(PY3, "Python 2 only.") - def test_unicode_to_argv_py2(self): - """unicode_to_argv() converts to bytes on Python 2.""" - self.assertEqual(unicode_to_argv("abc"), u"abc".encode(self.io_encoding)) + def test_unicode_to_argv(self): + """ + unicode_to_argv() returns its unicode argument on Windows and Python 2 and + converts to bytes using UTF-8 elsewhere. + """ + result = unicode_to_argv(lumiere_nfc) + if PY3 or self.platform == "win32": + expected_value = lumiere_nfc + else: + expected_value = lumiere_nfc.encode(self.io_encoding) - @skipIf(PY2, "Python 3 only.") - def test_unicode_to_argv_py3(self): - """unicode_to_argv() is noop on Python 3.""" - self.assertEqual(unicode_to_argv("abc"), "abc") + self.assertIsInstance(result, type(expected_value)) + self.assertEqual(result, expected_value) @skipIf(PY3, "Python 3 only.") def test_unicode_platform_py2(self): @@ -463,13 +379,6 @@ class QuoteOutput(ReallyEqualMixin, unittest.TestCase): check(u"\n", u"\"\\x0a\"", quote_newlines=True) def test_quote_output_default(self): - self.patch(encodingutil, 'io_encoding', 'ascii') - self.test_quote_output_ascii(None) - - self.patch(encodingutil, 'io_encoding', 'latin1') - self.test_quote_output_latin1(None) - - self.patch(encodingutil, 'io_encoding', 'utf-8') self.test_quote_output_utf8(None) @@ -581,14 +490,6 @@ class UbuntuKarmicUTF8(EncodingUtil, unittest.TestCase): io_encoding = 'UTF-8' dirlist = [b'test_file', b'\xc3\x84rtonwall.mp3', b'Blah blah.txt'] -class UbuntuKarmicLatin1(EncodingUtil, unittest.TestCase): - uname = 'Linux korn 2.6.31-14-generic #48-Ubuntu SMP Fri Oct 16 14:05:01 UTC 2009 x86_64' - argv = b'lumi\xe8re' - platform = 'linux2' - filesystem_encoding = 'ISO-8859-1' - io_encoding = 'ISO-8859-1' - dirlist = [b'test_file', b'Blah blah.txt', b'\xc4rtonwall.mp3'] - class Windows(EncodingUtil, unittest.TestCase): uname = 'Windows XP 5.1.2600 x86 x86 Family 15 Model 75 Step ping 2, AuthenticAMD' argv = b'lumi\xc3\xa8re' @@ -605,20 +506,6 @@ class MacOSXLeopard(EncodingUtil, unittest.TestCase): io_encoding = 'UTF-8' dirlist = [u'A\u0308rtonwall.mp3', u'Blah blah.txt', u'test_file'] -class MacOSXLeopard7bit(EncodingUtil, unittest.TestCase): - uname = 'Darwin g5.local 9.8.0 Darwin Kernel Version 9.8.0: Wed Jul 15 16:57:01 PDT 2009; root:xnu-1228.15.4~1/RELEASE_PPC Power Macintosh powerpc' - platform = 'darwin' - filesystem_encoding = 'utf-8' - io_encoding = 'US-ASCII' - dirlist = [u'A\u0308rtonwall.mp3', u'Blah blah.txt', u'test_file'] - -class OpenBSD(EncodingUtil, unittest.TestCase): - uname = 'OpenBSD 4.1 GENERIC#187 i386 Intel(R) Celeron(R) CPU 2.80GHz ("GenuineIntel" 686-class)' - platform = 'openbsd4' - filesystem_encoding = '646' - io_encoding = '646' - # Oops, I cannot write filenames containing non-ascii characters - class TestToFromStr(ReallyEqualMixin, unittest.TestCase): def test_to_bytes(self): diff --git a/src/allmydata/test/test_ftp.py b/src/allmydata/test/test_ftp.py deleted file mode 100644 index 4eddef440..000000000 --- a/src/allmydata/test/test_ftp.py +++ /dev/null @@ -1,106 +0,0 @@ - -from twisted.trial import unittest - -from allmydata.frontends import ftpd -from allmydata.immutable import upload -from allmydata.mutable import publish -from allmydata.test.no_network import GridTestMixin -from allmydata.test.common_util import ReallyEqualMixin - -class Handler(GridTestMixin, ReallyEqualMixin, unittest.TestCase): - """ - This is a no-network unit test of ftpd.Handler and the abstractions - it uses. - """ - - FALL_OF_BERLIN_WALL = 626644800 - TURN_OF_MILLENIUM = 946684800 - - def _set_up(self, basedir, num_clients=1, num_servers=10): - self.basedir = "ftp/" + basedir - self.set_up_grid(num_clients=num_clients, num_servers=num_servers, - oneshare=True) - - self.client = self.g.clients[0] - self.username = "alice" - self.convergence = "" - - d = self.client.create_dirnode() - def _created_root(node): - self.root = node - self.root_uri = node.get_uri() - self.handler = ftpd.Handler(self.client, self.root, self.username, - self.convergence) - d.addCallback(_created_root) - return d - - def _set_metadata(self, name, metadata): - """Set metadata for `name', avoiding MetadataSetter's timestamp reset - behavior.""" - def _modifier(old_contents, servermap, first_time): - children = self.root._unpack_contents(old_contents) - children[name] = (children[name][0], metadata) - return self.root._pack_contents(children) - - return self.root._node.modify(_modifier) - - def _set_up_tree(self): - # add immutable file at root - immutable = upload.Data("immutable file contents", None) - d = self.root.add_file(u"immutable", immutable) - - # `mtime' and `linkmotime' both set - md_both = {'mtime': self.FALL_OF_BERLIN_WALL, - 'tahoe': {'linkmotime': self.TURN_OF_MILLENIUM}} - d.addCallback(lambda _: self._set_metadata(u"immutable", md_both)) - - # add link to root from root - d.addCallback(lambda _: self.root.set_node(u"loop", self.root)) - - # `mtime' set, but no `linkmotime' - md_just_mtime = {'mtime': self.FALL_OF_BERLIN_WALL, 'tahoe': {}} - d.addCallback(lambda _: self._set_metadata(u"loop", md_just_mtime)) - - # add mutable file at root - mutable = publish.MutableData("mutable file contents") - d.addCallback(lambda _: self.client.create_mutable_file(mutable)) - d.addCallback(lambda node: self.root.set_node(u"mutable", node)) - - # neither `mtime' nor `linkmotime' set - d.addCallback(lambda _: self._set_metadata(u"mutable", {})) - - return d - - def _compareDirLists(self, actual, expected): - actual_list = sorted(actual) - expected_list = sorted(expected) - - self.failUnlessReallyEqual(len(actual_list), len(expected_list), - "%r is wrong length, expecting %r" % ( - actual_list, expected_list)) - for (a, b) in zip(actual_list, expected_list): - (name, meta) = a - (expected_name, expected_meta) = b - self.failUnlessReallyEqual(name, expected_name) - self.failUnlessReallyEqual(meta, expected_meta) - - def test_list(self): - keys = ("size", "directory", "permissions", "hardlinks", "modified", - "owner", "group", "unexpected") - d = self._set_up("list") - - d.addCallback(lambda _: self._set_up_tree()) - d.addCallback(lambda _: self.handler.list("", keys=keys)) - - expected_root = [ - ('loop', - [0, True, ftpd.IntishPermissions(0o600), 1, self.FALL_OF_BERLIN_WALL, 'alice', 'alice', '??']), - ('immutable', - [23, False, ftpd.IntishPermissions(0o600), 1, self.TURN_OF_MILLENIUM, 'alice', 'alice', '??']), - ('mutable', - # timestamp should be 0 if no timestamp metadata is present - [0, False, ftpd.IntishPermissions(0o600), 1, 0, 'alice', 'alice', '??'])] - - d.addCallback(lambda root: self._compareDirLists(root, expected_root)) - - return d diff --git a/src/allmydata/test/test_hashutil.py b/src/allmydata/test/test_hashutil.py index abcd4f0fb..482e79c0b 100644 --- a/src/allmydata/test/test_hashutil.py +++ b/src/allmydata/test/test_hashutil.py @@ -102,9 +102,71 @@ class HashUtilTests(unittest.TestCase): got_a = base32.b2a(got) self.failUnlessEqual(got_a, expected_a) - def test_known_answers(self): - # assert backwards compatibility + def test_storage_index_hash_known_answers(self): + """ + Verify backwards compatibility by comparing ``storage_index_hash`` outputs + for some well-known (to us) inputs. + """ + # This is a marginal case. b"" is not a valid aes 128 key. The + # implementation does nothing to avoid producing a result for it, + # though. self._testknown(hashutil.storage_index_hash, b"qb5igbhcc5esa6lwqorsy7e6am", b"") + + # This is a little bit more realistic though clearly this is a poor key choice. + self._testknown(hashutil.storage_index_hash, b"wvggbrnrezdpa5yayrgiw5nzja", b"x" * 16) + + # Here's a much more realistic key that I generated by reading some + # bytes from /dev/urandom. I computed the expected hash value twice. + # First using hashlib.sha256 and then with sha256sum(1). The input + # string given to the hash function was "43:," + # in each case. + self._testknown( + hashutil.storage_index_hash, + b"aarbseqqrpsfowduchcjbonscq", + base32.a2b(b"2ckv3dfzh6rgjis6ogfqhyxnzy"), + ) + + def test_convergence_hasher_tag(self): + """ + ``_convergence_hasher_tag`` constructs the convergence hasher tag from a + unique prefix, the required, total, and segment size parameters, and a + convergence secret. + """ + self.assertEqual( + b"allmydata_immutable_content_to_key_with_added_secret_v1+" + b"16:\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42\x42," + b"9:3,10,1024,", + hashutil._convergence_hasher_tag( + k=3, + n=10, + segsize=1024, + convergence=b"\x42" * 16, + ), + ) + + def test_convergence_hasher_out_of_bounds(self): + """ + ``_convergence_hasher_tag`` raises ``ValueError`` if k or n is not between + 1 and 256 inclusive or if k is greater than n. + """ + segsize = 1024 + secret = b"\x42" * 16 + for bad_k in (0, 2, 257): + with self.assertRaises(ValueError): + hashutil._convergence_hasher_tag( + k=bad_k, n=1, segsize=segsize, convergence=secret, + ) + for bad_n in (0, 1, 257): + with self.assertRaises(ValueError): + hashutil._convergence_hasher_tag( + k=2, n=bad_n, segsize=segsize, convergence=secret, + ) + + def test_known_answers(self): + """ + Verify backwards compatibility by comparing hash outputs for some + well-known (to us) inputs. + """ self._testknown(hashutil.block_hash, b"msjr5bh4evuh7fa3zw7uovixfbvlnstr5b65mrerwfnvjxig2jvq", b"") self._testknown(hashutil.uri_extension_hash, b"wthsu45q7zewac2mnivoaa4ulh5xvbzdmsbuyztq2a5fzxdrnkka", b"") self._testknown(hashutil.plaintext_hash, b"5lz5hwz3qj3af7n6e3arblw7xzutvnd3p3fjsngqjcb7utf3x3da", b"") diff --git a/src/allmydata/test/test_helper.py b/src/allmydata/test/test_helper.py index 65c07135a..3faffbe0d 100644 --- a/src/allmydata/test/test_helper.py +++ b/src/allmydata/test/test_helper.py @@ -19,6 +19,12 @@ from functools import ( ) import attr +try: + from typing import List + from allmydata.introducer.client import IntroducerClient +except ImportError: + pass + from twisted.internet import defer from twisted.trial import unittest from twisted.application import service @@ -125,7 +131,7 @@ class FakeCHKCheckerAndUEBFetcher(object): )) class FakeClient(service.MultiService): - introducer_clients = [] + introducer_clients = [] # type: List[IntroducerClient] DEFAULT_ENCODING_PARAMETERS = {"k":25, "happy": 75, "n": 100, diff --git a/src/allmydata/test/test_i2p_provider.py b/src/allmydata/test/test_i2p_provider.py index a724b300e..37f2333f5 100644 --- a/src/allmydata/test/test_i2p_provider.py +++ b/src/allmydata/test/test_i2p_provider.py @@ -277,6 +277,20 @@ class Provider(unittest.TestCase): i2p.local_i2p.assert_called_with("configdir") self.assertIs(h, handler) + def test_handler_launch_executable(self): + i2p = mock.Mock() + handler = object() + i2p.launch = mock.Mock(return_value=handler) + reactor = object() + + with mock_i2p(i2p): + p = i2p_provider.create(reactor, + FakeConfig(launch=True, + **{"i2p.executable": "myi2p"})) + h = p.get_i2p_handler() + self.assertIs(h, handler) + i2p.launch.assert_called_with(i2p_configdir=None, i2p_binary="myi2p") + def test_handler_default(self): i2p = mock.Mock() handler = object() diff --git a/src/allmydata/test/test_introducer.py b/src/allmydata/test/test_introducer.py index b14b66ffb..0475d3f6c 100644 --- a/src/allmydata/test/test_introducer.py +++ b/src/allmydata/test/test_introducer.py @@ -15,7 +15,12 @@ from six import ensure_binary, ensure_text import os, re, itertools from base64 import b32decode import json -from mock import Mock, patch +from operator import ( + setitem, +) +from functools import ( + partial, +) from testtools.matchers import ( Is, @@ -84,7 +89,8 @@ class Node(testutil.SignalMixin, testutil.ReallyEqualMixin, AsyncTestCase): def test_introducer_clients_unloadable(self): """ - Error if introducers.yaml exists but we can't read it + ``create_introducer_clients`` raises ``EnvironmentError`` if + ``introducers.yaml`` exists but we can't read it. """ basedir = u"introducer.IntroducerNode.test_introducer_clients_unloadable" os.mkdir(basedir) @@ -94,17 +100,10 @@ class Node(testutil.SignalMixin, testutil.ReallyEqualMixin, AsyncTestCase): f.write(u'---\n') os.chmod(yaml_fname, 0o000) self.addCleanup(lambda: os.chmod(yaml_fname, 0o700)) - # just mocking the yaml failure, as "yamlutil.safe_load" only - # returns None on some platforms for unreadable files - with patch("allmydata.client.yamlutil") as p: - p.safe_load = Mock(return_value=None) - - fake_tub = Mock() - config = read_config(basedir, "portnum") - - with self.assertRaises(EnvironmentError): - create_introducer_clients(config, fake_tub) + config = read_config(basedir, "portnum") + with self.assertRaises(EnvironmentError): + create_introducer_clients(config, Tub()) @defer.inlineCallbacks def test_furl(self): @@ -1037,23 +1036,53 @@ class Signatures(SyncTestCase): unsign_from_foolscap, (bad_msg, sig, b"v999-key")) def test_unsigned_announcement(self): - ed25519.verifying_key_from_string(b"pub-v0-wodst6ly4f7i7akt2nxizsmmy2rlmer6apltl56zctn67wfyu5tq") - mock_tub = Mock() + """ + An incorrectly signed announcement is not delivered to subscribers. + """ + private_key, public_key = ed25519.create_signing_keypair() + public_key_str = ed25519.string_from_verifying_key(public_key) + ic = IntroducerClient( - mock_tub, + Tub(), "pb://", u"fake_nick", "0.0.0", "1.2.3", (0, u"i am a nonce"), - "invalid", + FilePath(self.mktemp()), + ) + received = {} + ic.subscribe_to("good-stuff", partial(setitem, received)) + + # Deliver a good message to prove our test code is valid. + ann = {"service-name": "good-stuff", "payload": "hello"} + ann_t = sign_to_foolscap(ann, private_key) + ic.got_announcements([ann_t]) + + self.assertEqual( + {public_key_str[len("pub-"):]: ann}, + received, + ) + received.clear() + + # Now deliver one without a valid signature and observe that it isn't + # delivered to the subscriber. + ann = {"service-name": "good-stuff", "payload": "bad stuff"} + (msg, sig, key) = sign_to_foolscap(ann, private_key) + # Drop a base32 word from the middle of the key to invalidate the + # signature. + sig_a = bytearray(sig) + sig_a[20:22] = [] + sig = bytes(sig_a) + ann_t = (msg, sig, key) + ic.got_announcements([ann_t]) + + # The received announcements dict should remain empty because we + # should not receive the announcement with the invalid signature. + self.assertEqual( + {}, + received, ) - self.assertEqual(0, ic._debug_counts["inbound_announcement"]) - ic.got_announcements([ - (b"message", b"v0-aaaaaaa", b"v0-wodst6ly4f7i7akt2nxizsmmy2rlmer6apltl56zctn67wfyu5tq") - ]) - # we should have rejected this announcement due to a bad signature - self.assertEqual(0, ic._debug_counts["inbound_announcement"]) # add tests of StorageFarmBroker: if it receives duplicate announcements, it diff --git a/src/allmydata/test/test_json_metadata.py b/src/allmydata/test/test_json_metadata.py index 75d4e1567..a0cb9c142 100644 --- a/src/allmydata/test/test_json_metadata.py +++ b/src/allmydata/test/test_json_metadata.py @@ -1,3 +1,14 @@ +""" +Ported to Python 3. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 from twisted.trial.unittest import TestCase diff --git a/src/allmydata/test/test_node.py b/src/allmydata/test/test_node.py index 1e0f3020c..e44fd5743 100644 --- a/src/allmydata/test/test_node.py +++ b/src/allmydata/test/test_node.py @@ -564,7 +564,7 @@ class TestMissingPorts(unittest.TestCase): config = config_from_string(self.basedir, "portnum", config_data) with self.assertRaises(PortAssignmentRequired): _tub_portlocation(config, None, None) - test_listen_on_zero_with_host.todo = native_str( + test_listen_on_zero_with_host.todo = native_str( # type: ignore "https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3563" ) diff --git a/src/allmydata/test/test_observer.py b/src/allmydata/test/test_observer.py index 0db13db58..134876be3 100644 --- a/src/allmydata/test/test_observer.py +++ b/src/allmydata/test/test_observer.py @@ -101,3 +101,56 @@ class Observer(unittest.TestCase): d.addCallback(_step2) d.addCallback(_check2) return d + + def test_observer_list_reentrant(self): + """ + ``ObserverList`` is reentrant. + """ + observed = [] + + def observer_one(): + obs.unsubscribe(observer_one) + + def observer_two(): + observed.append(None) + + obs = observer.ObserverList() + obs.subscribe(observer_one) + obs.subscribe(observer_two) + obs.notify() + + self.assertEqual([None], observed) + + def test_observer_list_observer_errors(self): + """ + An error in an earlier observer does not prevent notification from being + delivered to a later observer. + """ + observed = [] + + def observer_one(): + raise Exception("Some problem here") + + def observer_two(): + observed.append(None) + + obs = observer.ObserverList() + obs.subscribe(observer_one) + obs.subscribe(observer_two) + obs.notify() + + self.assertEqual([None], observed) + self.assertEqual(1, len(self.flushLoggedErrors(Exception))) + + def test_observer_list_propagate_keyboardinterrupt(self): + """ + ``KeyboardInterrupt`` escapes ``ObserverList.notify``. + """ + def observer_one(): + raise KeyboardInterrupt() + + obs = observer.ObserverList() + obs.subscribe(observer_one) + + with self.assertRaises(KeyboardInterrupt): + obs.notify() diff --git a/src/allmydata/test/test_python2_regressions.py b/src/allmydata/test/test_python2_regressions.py index 84484f1cf..fc9ebe17a 100644 --- a/src/allmydata/test/test_python2_regressions.py +++ b/src/allmydata/test/test_python2_regressions.py @@ -14,8 +14,11 @@ from testtools.matchers import ( ) BLACKLIST = { + "allmydata.scripts.types_", "allmydata.test.check_load", + "allmydata.test._win_subprocess", "allmydata.windows.registry", + "allmydata.windows.fixups", } diff --git a/src/allmydata/test/test_python3.py b/src/allmydata/test/test_python3.py index 80242f8a2..c1f0e83d6 100644 --- a/src/allmydata/test/test_python3.py +++ b/src/allmydata/test/test_python3.py @@ -44,7 +44,7 @@ class Python3PortingEffortTests(SynchronousTestCase): ), ), ) - test_finished_porting.todo = native_str( + test_finished_porting.todo = native_str( # type: ignore "https://tahoe-lafs.org/trac/tahoe-lafs/milestone/Support%20Python%203 should be completed", ) diff --git a/src/allmydata/test/test_runner.py b/src/allmydata/test/test_runner.py index 2c8e65fff..6ed9a65c6 100644 --- a/src/allmydata/test/test_runner.py +++ b/src/allmydata/test/test_runner.py @@ -11,6 +11,9 @@ import six from testtools import ( skipUnless, ) +from eliot import ( + log_call, +) from twisted.trial import unittest @@ -25,26 +28,29 @@ from twisted.python.runtime import ( platform, ) from allmydata.util import fileutil, pollmixin -from allmydata.util.encodingutil import unicode_to_argv, unicode_to_output +from allmydata.util.encodingutil import unicode_to_argv, get_filesystem_encoding from allmydata.test import common_util import allmydata from allmydata.scripts.runner import ( parse_options, ) -from .common_util import parse_cli, run_cli +from .common import ( + PIPE, + Popen, +) +from .common_util import ( + parse_cli, + run_cli, +) from .cli_node_api import ( CLINodeAPI, Expect, on_stdout, on_stdout_and_stderr, ) -from ._twisted_9607 import ( - getProcessOutputAndValue, -) from ..util.eliotutil import ( inline_callbacks, - log_call_deferred, ) from .common import ( SyncTestCase, @@ -67,24 +73,6 @@ srcfile = allmydata.__file__ rootdir = get_root_from_file(srcfile) -class RunBinTahoeMixin(object): - @log_call_deferred(action_type="run-bin-tahoe") - def run_bintahoe(self, args, stdin=None, python_options=[], env=None): - command = sys.executable - argv = python_options + ["-m", "allmydata.scripts.runner"] + args - - if env is None: - env = os.environ - - d = getProcessOutputAndValue(command, argv, env, stdinBytes=stdin) - def fix_signal(result): - # Mirror subprocess.Popen.returncode structure - (out, err, signal) = result - return (out, err, -signal) - d.addErrback(fix_signal) - return d - - class ParseOptionsTests(SyncTestCase): """ Tests for ``parse_options``. @@ -106,75 +94,92 @@ class ParseOptionsTests(SyncTestCase): ) -class BinTahoe(common_util.SignalMixin, unittest.TestCase, RunBinTahoeMixin): +@log_call(action_type="run-bin-tahoe") +def run_bintahoe(extra_argv, python_options=None): + """ + Run the main Tahoe entrypoint in a child process with the given additional + arguments. + + :param [unicode] extra_argv: More arguments for the child process argv. + + :return: A three-tuple of stdout (unicode), stderr (unicode), and the + child process "returncode" (int). + """ + argv = [sys.executable.decode(get_filesystem_encoding())] + if python_options is not None: + argv.extend(python_options) + argv.extend([u"-m", u"allmydata.scripts.runner"]) + argv.extend(extra_argv) + argv = list(unicode_to_argv(arg) for arg in argv) + p = Popen(argv, stdout=PIPE, stderr=PIPE) + out = p.stdout.read().decode("utf-8") + err = p.stderr.read().decode("utf-8") + returncode = p.wait() + return (out, err, returncode) + + +class BinTahoe(common_util.SignalMixin, unittest.TestCase): def test_unicode_arguments_and_output(self): + """ + The runner script receives unmangled non-ASCII values in argv. + """ tricky = u"\u2621" - try: - tricky_arg = unicode_to_argv(tricky, mangle=True) - tricky_out = unicode_to_output(tricky) - except UnicodeEncodeError: - raise unittest.SkipTest("A non-ASCII argument/output could not be encoded on this platform.") + out, err, returncode = run_bintahoe([tricky]) + self.assertEqual(returncode, 1) + self.assertIn(u"Unknown command: " + tricky, out) - d = self.run_bintahoe([tricky_arg]) - def _cb(res): - out, err, rc_or_sig = res - self.failUnlessEqual(rc_or_sig, 1, str(res)) - self.failUnlessIn("Unknown command: "+tricky_out, out) - d.addCallback(_cb) - return d + def test_with_python_options(self): + """ + Additional options for the Python interpreter don't prevent the runner + script from receiving the arguments meant for it. + """ + # This seems like a redundant test for someone else's functionality + # but on Windows we parse the whole command line string ourselves so + # we have to have our own implementation of skipping these options. - def test_run_with_python_options(self): - # -t is a harmless option that warns about tabs. - d = self.run_bintahoe(["--version"], python_options=["-t"]) - def _cb(res): - out, err, rc_or_sig = res - self.assertEqual(rc_or_sig, 0, str(res)) - self.assertTrue(out.startswith(allmydata.__appname__ + '/'), str(res)) - d.addCallback(_cb) - return d + # -t is a harmless option that warns about tabs so we can add it + # without impacting other behavior noticably. + out, err, returncode = run_bintahoe([u"--version"], python_options=[u"-t"]) + self.assertEqual(returncode, 0) + self.assertTrue(out.startswith(allmydata.__appname__ + '/')) - @inlineCallbacks def test_help_eliot_destinations(self): - out, err, rc_or_sig = yield self.run_bintahoe(["--help-eliot-destinations"]) - self.assertIn("\tfile:", out) - self.assertEqual(rc_or_sig, 0) + out, err, returncode = run_bintahoe([u"--help-eliot-destinations"]) + self.assertIn(u"\tfile:", out) + self.assertEqual(returncode, 0) - @inlineCallbacks def test_eliot_destination(self): - out, err, rc_or_sig = yield self.run_bintahoe([ + out, err, returncode = run_bintahoe([ # Proves little but maybe more than nothing. - "--eliot-destination=file:-", + u"--eliot-destination=file:-", # Throw in *some* command or the process exits with error, making # it difficult for us to see if the previous arg was accepted or # not. - "--help", + u"--help", ]) - self.assertEqual(rc_or_sig, 0) + self.assertEqual(returncode, 0) - @inlineCallbacks def test_unknown_eliot_destination(self): - out, err, rc_or_sig = yield self.run_bintahoe([ - "--eliot-destination=invalid:more", + out, err, returncode = run_bintahoe([ + u"--eliot-destination=invalid:more", ]) - self.assertEqual(1, rc_or_sig) - self.assertIn("Unknown destination description", out) - self.assertIn("invalid:more", out) + self.assertEqual(1, returncode) + self.assertIn(u"Unknown destination description", out) + self.assertIn(u"invalid:more", out) - @inlineCallbacks def test_malformed_eliot_destination(self): - out, err, rc_or_sig = yield self.run_bintahoe([ - "--eliot-destination=invalid", + out, err, returncode = run_bintahoe([ + u"--eliot-destination=invalid", ]) - self.assertEqual(1, rc_or_sig) - self.assertIn("must be formatted like", out) + self.assertEqual(1, returncode) + self.assertIn(u"must be formatted like", out) - @inlineCallbacks def test_escape_in_eliot_destination(self): - out, err, rc_or_sig = yield self.run_bintahoe([ - "--eliot-destination=file:@foo", + out, err, returncode = run_bintahoe([ + u"--eliot-destination=file:@foo", ]) - self.assertEqual(1, rc_or_sig) - self.assertIn("Unsupported escape character", out) + self.assertEqual(1, returncode) + self.assertIn(u"Unsupported escape character", out) class CreateNode(unittest.TestCase): @@ -284,8 +289,7 @@ class CreateNode(unittest.TestCase): ) -class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin, - RunBinTahoeMixin): +class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin): """ exercise "tahoe run" for both introducer and client node, by spawning "tahoe run" as a subprocess. This doesn't get us line-level coverage, but @@ -305,18 +309,18 @@ class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin, The introducer furl is stable across restarts. """ basedir = self.workdir("test_introducer") - c1 = os.path.join(basedir, "c1") + c1 = os.path.join(basedir, u"c1") tahoe = CLINodeAPI(reactor, FilePath(c1)) self.addCleanup(tahoe.stop_and_wait) - out, err, rc_or_sig = yield self.run_bintahoe([ - "--quiet", - "create-introducer", - "--basedir", c1, - "--hostname", "127.0.0.1", + out, err, returncode = run_bintahoe([ + u"--quiet", + u"create-introducer", + u"--basedir", c1, + u"--hostname", u"127.0.0.1", ]) - self.assertEqual(rc_or_sig, 0) + self.assertEqual(returncode, 0) # This makes sure that node.url is written, which allows us to # detect when the introducer restarts in _node_has_restarted below. @@ -384,18 +388,18 @@ class RunNode(common_util.SignalMixin, unittest.TestCase, pollmixin.PollMixin, 3) Verify that the pid file is removed after SIGTERM (on POSIX). """ basedir = self.workdir("test_client") - c1 = os.path.join(basedir, "c1") + c1 = os.path.join(basedir, u"c1") tahoe = CLINodeAPI(reactor, FilePath(c1)) # Set this up right now so we don't forget later. self.addCleanup(tahoe.cleanup) - out, err, rc_or_sig = yield self.run_bintahoe([ - "--quiet", "create-node", "--basedir", c1, - "--webport", "0", - "--hostname", "localhost", + out, err, returncode = run_bintahoe([ + u"--quiet", u"create-node", u"--basedir", c1, + u"--webport", u"0", + u"--hostname", u"localhost", ]) - self.failUnlessEqual(rc_or_sig, 0) + self.failUnlessEqual(returncode, 0) # Check that the --webport option worked. config = fileutil.read(tahoe.config_file.path) diff --git a/src/allmydata/test/test_sftp.py b/src/allmydata/test/test_sftp.py index b6f1fbc8a..2214e4e5b 100644 --- a/src/allmydata/test/test_sftp.py +++ b/src/allmydata/test/test_sftp.py @@ -1,4 +1,14 @@ +""" +Ported to Python 3. +""" from __future__ import print_function +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 import re, struct, traceback, time, calendar from stat import S_IFREG, S_IFDIR @@ -9,18 +19,15 @@ from twisted.python.failure import Failure from twisted.internet.error import ProcessDone, ProcessTerminated from allmydata.util import deferredutil -conch_interfaces = None -sftp = None -sftpd = None - try: from twisted.conch import interfaces as conch_interfaces from twisted.conch.ssh import filetransfer as sftp from allmydata.frontends import sftpd except ImportError as e: + conch_interfaces = sftp = sftpd = None # type: ignore conch_unavailable_reason = e else: - conch_unavailable_reason = None + conch_unavailable_reason = None # type: ignore from allmydata.interfaces import IDirectoryNode, ExistingChildError, NoSuchChildError from allmydata.mutable.common import NotWriteableError @@ -76,7 +83,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas return d def _set_up_tree(self): - u = publish.MutableData("mutable file contents") + u = publish.MutableData(b"mutable file contents") d = self.client.create_mutable_file(u) d.addCallback(lambda node: self.root.set_node(u"mutable", node)) def _created_mutable(n): @@ -92,33 +99,33 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas self.readonly_uri = n.get_uri() d.addCallback(_created_readonly) - gross = upload.Data("0123456789" * 101, None) + gross = upload.Data(b"0123456789" * 101, None) d.addCallback(lambda ign: self.root.add_file(u"gro\u00DF", gross)) def _created_gross(n): self.gross = n self.gross_uri = n.get_uri() d.addCallback(_created_gross) - small = upload.Data("0123456789", None) + small = upload.Data(b"0123456789", None) d.addCallback(lambda ign: self.root.add_file(u"small", small)) def _created_small(n): self.small = n self.small_uri = n.get_uri() d.addCallback(_created_small) - small2 = upload.Data("Small enough for a LIT too", None) + small2 = upload.Data(b"Small enough for a LIT too", None) d.addCallback(lambda ign: self.root.add_file(u"small2", small2)) def _created_small2(n): self.small2 = n self.small2_uri = n.get_uri() d.addCallback(_created_small2) - empty_litdir_uri = "URI:DIR2-LIT:" + empty_litdir_uri = b"URI:DIR2-LIT:" # contains one child which is itself also LIT: - tiny_litdir_uri = "URI:DIR2-LIT:gqytunj2onug64tufqzdcosvkjetutcjkq5gw4tvm5vwszdgnz5hgyzufqydulbshj5x2lbm" + tiny_litdir_uri = b"URI:DIR2-LIT:gqytunj2onug64tufqzdcosvkjetutcjkq5gw4tvm5vwszdgnz5hgyzufqydulbshj5x2lbm" - unknown_uri = "x-tahoe-crazy://I_am_from_the_future." + unknown_uri = b"x-tahoe-crazy://I_am_from_the_future." d.addCallback(lambda ign: self.root._create_and_validate_node(None, empty_litdir_uri, name=u"empty_lit_dir")) def _created_empty_lit_dir(n): @@ -154,55 +161,55 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas version = self.handler.gotVersion(3, {}) self.failUnless(isinstance(version, dict)) - self.failUnlessReallyEqual(self.handler._path_from_string(""), []) - self.failUnlessReallyEqual(self.handler._path_from_string("/"), []) - self.failUnlessReallyEqual(self.handler._path_from_string("."), []) - self.failUnlessReallyEqual(self.handler._path_from_string("//"), []) - self.failUnlessReallyEqual(self.handler._path_from_string("/."), []) - self.failUnlessReallyEqual(self.handler._path_from_string("/./"), []) - self.failUnlessReallyEqual(self.handler._path_from_string("foo"), [u"foo"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/foo"), [u"foo"]) - self.failUnlessReallyEqual(self.handler._path_from_string("foo/"), [u"foo"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/foo/"), [u"foo"]) - self.failUnlessReallyEqual(self.handler._path_from_string("foo/bar"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/foo/bar"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("foo/bar//"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/foo/bar//"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("foo/./bar"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("./foo/./bar"), [u"foo", u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("foo/../bar"), [u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/foo/../bar"), [u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("../bar"), [u"bar"]) - self.failUnlessReallyEqual(self.handler._path_from_string("/../bar"), [u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b""), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/"), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"."), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"//"), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/."), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/./"), []) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo"), [u"foo"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo"), [u"foo"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/"), [u"foo"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/"), [u"foo"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/bar"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/bar"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/bar//"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/bar//"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/./bar"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"./foo/./bar"), [u"foo", u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"foo/../bar"), [u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/foo/../bar"), [u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"../bar"), [u"bar"]) + self.failUnlessReallyEqual(self.handler._path_from_string(b"/../bar"), [u"bar"]) - self.failUnlessReallyEqual(self.handler.realPath(""), "/") - self.failUnlessReallyEqual(self.handler.realPath("/"), "/") - self.failUnlessReallyEqual(self.handler.realPath("."), "/") - self.failUnlessReallyEqual(self.handler.realPath("//"), "/") - self.failUnlessReallyEqual(self.handler.realPath("/."), "/") - self.failUnlessReallyEqual(self.handler.realPath("/./"), "/") - self.failUnlessReallyEqual(self.handler.realPath("foo"), "/foo") - self.failUnlessReallyEqual(self.handler.realPath("/foo"), "/foo") - self.failUnlessReallyEqual(self.handler.realPath("foo/"), "/foo") - self.failUnlessReallyEqual(self.handler.realPath("/foo/"), "/foo") - self.failUnlessReallyEqual(self.handler.realPath("foo/bar"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("/foo/bar"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("foo/bar//"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("/foo/bar//"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("foo/./bar"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("./foo/./bar"), "/foo/bar") - self.failUnlessReallyEqual(self.handler.realPath("foo/../bar"), "/bar") - self.failUnlessReallyEqual(self.handler.realPath("/foo/../bar"), "/bar") - self.failUnlessReallyEqual(self.handler.realPath("../bar"), "/bar") - self.failUnlessReallyEqual(self.handler.realPath("/../bar"), "/bar") + self.failUnlessReallyEqual(self.handler.realPath(b""), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"/"), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"."), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"//"), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"/."), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"/./"), b"/") + self.failUnlessReallyEqual(self.handler.realPath(b"foo"), b"/foo") + self.failUnlessReallyEqual(self.handler.realPath(b"/foo"), b"/foo") + self.failUnlessReallyEqual(self.handler.realPath(b"foo/"), b"/foo") + self.failUnlessReallyEqual(self.handler.realPath(b"/foo/"), b"/foo") + self.failUnlessReallyEqual(self.handler.realPath(b"foo/bar"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"/foo/bar"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"foo/bar//"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"/foo/bar//"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"foo/./bar"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"./foo/./bar"), b"/foo/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"foo/../bar"), b"/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"/foo/../bar"), b"/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"../bar"), b"/bar") + self.failUnlessReallyEqual(self.handler.realPath(b"/../bar"), b"/bar") d.addCallback(_check) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "_path_from_string invalid UTF-8", - self.handler._path_from_string, "\xFF")) + self.handler._path_from_string, b"\xFF")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "realPath invalid UTF-8", - self.handler.realPath, "\xFF")) + self.handler.realPath, b"\xFF")) return d @@ -243,10 +250,10 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "readLink link", - self.handler.readLink, "link")) + self.handler.readLink, b"link")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "makeLink link file", - self.handler.makeLink, "link", "file")) + self.handler.makeLink, b"link", b"file")) return d @@ -277,64 +284,64 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openDirectory small", - self.handler.openDirectory, "small")) + self.handler.openDirectory, b"small")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openDirectory unknown", - self.handler.openDirectory, "unknown")) + self.handler.openDirectory, b"unknown")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openDirectory nodir", - self.handler.openDirectory, "nodir")) + self.handler.openDirectory, b"nodir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openDirectory nodir/nodir", - self.handler.openDirectory, "nodir/nodir")) + self.handler.openDirectory, b"nodir/nodir")) gross = u"gro\u00DF".encode("utf-8") expected_root = [ - ('empty_lit_dir', r'dr-xr-xr-x .* 0 .* empty_lit_dir$', {'permissions': S_IFDIR | 0o555}), - (gross, r'-rw-rw-rw- .* 1010 .* '+gross+'$', {'permissions': S_IFREG | 0o666, 'size': 1010}), + (b'empty_lit_dir', br'dr-xr-xr-x .* 0 .* empty_lit_dir$', {'permissions': S_IFDIR | 0o555}), + (gross, br'-rw-rw-rw- .* 1010 .* '+gross+b'$', {'permissions': S_IFREG | 0o666, 'size': 1010}), # The fall of the Berlin wall may have been on 9th or 10th November 1989 depending on the gateway's timezone. #('loop', r'drwxrwxrwx .* 0 Nov (09|10) 1989 loop$', {'permissions': S_IFDIR | 0777}), - ('loop', r'drwxrwxrwx .* 0 .* loop$', {'permissions': S_IFDIR | 0o777}), - ('mutable', r'-rw-rw-rw- .* 0 .* mutable$', {'permissions': S_IFREG | 0o666}), - ('readonly', r'-r--r--r-- .* 0 .* readonly$', {'permissions': S_IFREG | 0o444}), - ('small', r'-rw-rw-rw- .* 10 .* small$', {'permissions': S_IFREG | 0o666, 'size': 10}), - ('small2', r'-rw-rw-rw- .* 26 .* small2$', {'permissions': S_IFREG | 0o666, 'size': 26}), - ('tiny_lit_dir', r'dr-xr-xr-x .* 0 .* tiny_lit_dir$', {'permissions': S_IFDIR | 0o555}), - ('unknown', r'\?--------- .* 0 .* unknown$', {'permissions': 0}), + (b'loop', br'drwxrwxrwx .* 0 .* loop$', {'permissions': S_IFDIR | 0o777}), + (b'mutable', br'-rw-rw-rw- .* 0 .* mutable$', {'permissions': S_IFREG | 0o666}), + (b'readonly', br'-r--r--r-- .* 0 .* readonly$', {'permissions': S_IFREG | 0o444}), + (b'small', br'-rw-rw-rw- .* 10 .* small$', {'permissions': S_IFREG | 0o666, 'size': 10}), + (b'small2', br'-rw-rw-rw- .* 26 .* small2$', {'permissions': S_IFREG | 0o666, 'size': 26}), + (b'tiny_lit_dir', br'dr-xr-xr-x .* 0 .* tiny_lit_dir$', {'permissions': S_IFDIR | 0o555}), + (b'unknown', br'\?--------- .* 0 .* unknown$', {'permissions': 0}), ] - d.addCallback(lambda ign: self.handler.openDirectory("")) + d.addCallback(lambda ign: self.handler.openDirectory(b"")) d.addCallback(lambda res: self._compareDirLists(res, expected_root)) - d.addCallback(lambda ign: self.handler.openDirectory("loop")) + d.addCallback(lambda ign: self.handler.openDirectory(b"loop")) d.addCallback(lambda res: self._compareDirLists(res, expected_root)) - d.addCallback(lambda ign: self.handler.openDirectory("loop/loop")) + d.addCallback(lambda ign: self.handler.openDirectory(b"loop/loop")) d.addCallback(lambda res: self._compareDirLists(res, expected_root)) - d.addCallback(lambda ign: self.handler.openDirectory("empty_lit_dir")) + d.addCallback(lambda ign: self.handler.openDirectory(b"empty_lit_dir")) d.addCallback(lambda res: self._compareDirLists(res, [])) # The UTC epoch may either be in Jan 1 1970 or Dec 31 1969 depending on the gateway's timezone. expected_tiny_lit = [ - ('short', r'-r--r--r-- .* 8 (Jan 01 1970|Dec 31 1969) short$', {'permissions': S_IFREG | 0o444, 'size': 8}), + (b'short', br'-r--r--r-- .* 8 (Jan 01 1970|Dec 31 1969) short$', {'permissions': S_IFREG | 0o444, 'size': 8}), ] - d.addCallback(lambda ign: self.handler.openDirectory("tiny_lit_dir")) + d.addCallback(lambda ign: self.handler.openDirectory(b"tiny_lit_dir")) d.addCallback(lambda res: self._compareDirLists(res, expected_tiny_lit)) - d.addCallback(lambda ign: self.handler.getAttrs("small", True)) + d.addCallback(lambda ign: self.handler.getAttrs(b"small", True)) d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) - d.addCallback(lambda ign: self.handler.setAttrs("small", {})) + d.addCallback(lambda ign: self.handler.setAttrs(b"small", {})) d.addCallback(lambda res: self.failUnlessReallyEqual(res, None)) - d.addCallback(lambda ign: self.handler.getAttrs("small", True)) + d.addCallback(lambda ign: self.handler.getAttrs(b"small", True)) d.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "setAttrs size", - self.handler.setAttrs, "small", {'size': 0})) + self.handler.setAttrs, b"small", {'size': 0})) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {})) @@ -346,53 +353,53 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "openFile small 0 bad", - self.handler.openFile, "small", 0, {})) + self.handler.openFile, b"small", 0, {})) # attempting to open a non-existent file should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile nofile READ nosuch", - self.handler.openFile, "nofile", sftp.FXF_READ, {})) + self.handler.openFile, b"nofile", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile nodir/file READ nosuch", - self.handler.openFile, "nodir/file", sftp.FXF_READ, {})) + self.handler.openFile, b"nodir/file", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown READ denied", - self.handler.openFile, "unknown", sftp.FXF_READ, {})) + self.handler.openFile, b"unknown", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown/file READ denied", - self.handler.openFile, "unknown/file", sftp.FXF_READ, {})) + self.handler.openFile, b"unknown/file", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir READ denied", - self.handler.openFile, "tiny_lit_dir", sftp.FXF_READ, {})) + self.handler.openFile, b"tiny_lit_dir", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown uri READ denied", - self.handler.openFile, "uri/"+self.unknown_uri, sftp.FXF_READ, {})) + self.handler.openFile, b"uri/"+self.unknown_uri, sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir uri READ denied", - self.handler.openFile, "uri/"+self.tiny_lit_dir_uri, sftp.FXF_READ, {})) + self.handler.openFile, b"uri/"+self.tiny_lit_dir_uri, sftp.FXF_READ, {})) # FIXME: should be FX_NO_SUCH_FILE? d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile noexist uri READ denied", - self.handler.openFile, "uri/URI:noexist", sftp.FXF_READ, {})) + self.handler.openFile, b"uri/URI:noexist", sftp.FXF_READ, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile invalid UTF-8 uri READ denied", - self.handler.openFile, "uri/URI:\xFF", sftp.FXF_READ, {})) + self.handler.openFile, b"uri/URI:\xFF", sftp.FXF_READ, {})) # reading an existing file should succeed - d.addCallback(lambda ign: self.handler.openFile("small", sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"small", sftp.FXF_READ, {})) def _read_small(rf): d2 = rf.readChunk(0, 10) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.readChunk(2, 6)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "234567")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"234567")) d2.addCallback(lambda ign: rf.readChunk(1, 0)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) d2.addCallback(lambda ign: rf.readChunk(8, 4)) # read that starts before EOF is OK - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "89")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"89")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF (0-byte)", @@ -407,12 +414,12 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d2.addCallback(lambda ign: rf.getAttrs()) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) - d2.addCallback(lambda ign: self.handler.getAttrs("small", followLinks=0)) + d2.addCallback(lambda ign: self.handler.getAttrs(b"small", followLinks=0)) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 10})) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "writeChunk on read-only handle denied", - rf.writeChunk, 0, "a")) + rf.writeChunk, 0, b"a")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "setAttrs on read-only handle denied", rf.setAttrs, {})) @@ -435,16 +442,16 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.handler.openFile(gross, sftp.FXF_READ, {})) def _read_gross(rf): d2 = rf.readChunk(0, 10) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.readChunk(2, 6)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "234567")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"234567")) d2.addCallback(lambda ign: rf.readChunk(1, 0)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) d2.addCallback(lambda ign: rf.readChunk(1008, 4)) # read that starts before EOF is OK - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "89")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"89")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF (0-byte)", @@ -464,7 +471,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "writeChunk on read-only handle denied", - rf.writeChunk, 0, "a")) + rf.writeChunk, 0, b"a")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "setAttrs on read-only handle denied", rf.setAttrs, {})) @@ -483,37 +490,37 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(_read_gross) # reading an existing small file via uri/ should succeed - d.addCallback(lambda ign: self.handler.openFile("uri/"+self.small_uri, sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.small_uri, sftp.FXF_READ, {})) def _read_small_uri(rf): d2 = rf.readChunk(0, 10) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_small_uri) # repeat for a large file - d.addCallback(lambda ign: self.handler.openFile("uri/"+self.gross_uri, sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.gross_uri, sftp.FXF_READ, {})) def _read_gross_uri(rf): d2 = rf.readChunk(0, 10) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_gross_uri) # repeat for a mutable file - d.addCallback(lambda ign: self.handler.openFile("uri/"+self.mutable_uri, sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.mutable_uri, sftp.FXF_READ, {})) def _read_mutable_uri(rf): d2 = rf.readChunk(0, 100) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable file contents")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable file contents")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_mutable_uri) # repeat for a file within a directory referenced by URI - d.addCallback(lambda ign: self.handler.openFile("uri/"+self.tiny_lit_dir_uri+"/short", sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.tiny_lit_dir_uri+b"/short", sftp.FXF_READ, {})) def _read_short(rf): d2 = rf.readChunk(0, 100) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "The end.")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"The end.")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_read_short) @@ -521,7 +528,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # check that failed downloads cause failed reads. Note that this # trashes the grid (by deleting all shares), so this must be at the # end of the test function. - d.addCallback(lambda ign: self.handler.openFile("uri/"+self.gross_uri, sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"uri/"+self.gross_uri, sftp.FXF_READ, {})) def _read_broken(rf): d2 = defer.succeed(None) d2.addCallback(lambda ign: self.g.nuke_from_orbit()) @@ -542,10 +549,10 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # The check at the end of openFile_read tested this for large files, # but it trashed the grid in the process, so this needs to be a # separate test. - small = upload.Data("0123456789"*10, None) + small = upload.Data(b"0123456789"*10, None) d = self._set_up("openFile_read_error") d.addCallback(lambda ign: self.root.add_file(u"small", small)) - d.addCallback(lambda n: self.handler.openFile("/uri/"+n.get_uri(), sftp.FXF_READ, {})) + d.addCallback(lambda n: self.handler.openFile(b"/uri/"+n.get_uri(), sftp.FXF_READ, {})) def _read_broken(rf): d2 = defer.succeed(None) d2.addCallback(lambda ign: self.g.nuke_from_orbit()) @@ -569,106 +576,106 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # '' is an invalid filename d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile '' WRITE|CREAT|TRUNC nosuch", - self.handler.openFile, "", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) + self.handler.openFile, b"", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) # TRUNC is not valid without CREAT if the file does not already exist d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile newfile WRITE|TRUNC nosuch", - self.handler.openFile, "newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {})) + self.handler.openFile, b"newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {})) # EXCL is not valid without CREAT d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "openFile small WRITE|EXCL bad", - self.handler.openFile, "small", sftp.FXF_WRITE | sftp.FXF_EXCL, {})) + self.handler.openFile, b"small", sftp.FXF_WRITE | sftp.FXF_EXCL, {})) # cannot write to an existing directory d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir WRITE denied", - self.handler.openFile, "tiny_lit_dir", sftp.FXF_WRITE, {})) + self.handler.openFile, b"tiny_lit_dir", sftp.FXF_WRITE, {})) # cannot write to an existing unknown d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown WRITE denied", - self.handler.openFile, "unknown", sftp.FXF_WRITE, {})) + self.handler.openFile, b"unknown", sftp.FXF_WRITE, {})) # cannot create a child of an unknown d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile unknown/newfile WRITE|CREAT denied", - self.handler.openFile, "unknown/newfile", + self.handler.openFile, b"unknown/newfile", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) # cannot write to a new file in an immutable directory d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/newfile WRITE|CREAT|TRUNC denied", - self.handler.openFile, "tiny_lit_dir/newfile", + self.handler.openFile, b"tiny_lit_dir/newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) # cannot write to an existing immutable file in an immutable directory (with or without CREAT and EXCL) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/short WRITE denied", - self.handler.openFile, "tiny_lit_dir/short", sftp.FXF_WRITE, {})) + self.handler.openFile, b"tiny_lit_dir/short", sftp.FXF_WRITE, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir/short WRITE|CREAT denied", - self.handler.openFile, "tiny_lit_dir/short", + self.handler.openFile, b"tiny_lit_dir/short", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) # cannot write to a mutable file via a readonly cap (by path or uri) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile readonly WRITE denied", - self.handler.openFile, "readonly", sftp.FXF_WRITE, {})) + self.handler.openFile, b"readonly", sftp.FXF_WRITE, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile readonly uri WRITE denied", - self.handler.openFile, "uri/"+self.readonly_uri, sftp.FXF_WRITE, {})) + self.handler.openFile, b"uri/"+self.readonly_uri, sftp.FXF_WRITE, {})) # cannot create a file with the EXCL flag if it already exists d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile small WRITE|CREAT|EXCL failure", - self.handler.openFile, "small", + self.handler.openFile, b"small", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile mutable WRITE|CREAT|EXCL failure", - self.handler.openFile, "mutable", + self.handler.openFile, b"mutable", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile mutable uri WRITE|CREAT|EXCL failure", - self.handler.openFile, "uri/"+self.mutable_uri, + self.handler.openFile, b"uri/"+self.mutable_uri, sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "openFile tiny_lit_dir/short WRITE|CREAT|EXCL failure", - self.handler.openFile, "tiny_lit_dir/short", + self.handler.openFile, b"tiny_lit_dir/short", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) # cannot write to an immutable file if we don't have its parent (with or without CREAT, TRUNC, or EXCL) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE denied", - self.handler.openFile, "uri/"+self.small_uri, sftp.FXF_WRITE, {})) + self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT denied", - self.handler.openFile, "uri/"+self.small_uri, + self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE | sftp.FXF_CREAT, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT|TRUNC denied", - self.handler.openFile, "uri/"+self.small_uri, + self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile small uri WRITE|CREAT|EXCL denied", - self.handler.openFile, "uri/"+self.small_uri, + self.handler.openFile, b"uri/"+self.small_uri, sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) # test creating a new file with truncation and extension d.addCallback(lambda ign: - self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) + self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {})) def _write(wf): - d2 = wf.writeChunk(0, "0123456789") + d2 = wf.writeChunk(0, b"0123456789") d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None)) - d2.addCallback(lambda ign: wf.writeChunk(8, "0123")) - d2.addCallback(lambda ign: wf.writeChunk(13, "abc")) + d2.addCallback(lambda ign: wf.writeChunk(8, b"0123")) + d2.addCallback(lambda ign: wf.writeChunk(13, b"abc")) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 16})) - d2.addCallback(lambda ign: self.handler.getAttrs("newfile", followLinks=0)) + d2.addCallback(lambda ign: self.handler.getAttrs(b"newfile", followLinks=0)) d2.addCallback(lambda attrs: self._compareAttributes(attrs, {'permissions': S_IFREG | 0o666, 'size': 16})) d2.addCallback(lambda ign: wf.setAttrs({})) @@ -688,7 +695,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d2.addCallback(lambda ign: wf.setAttrs({'size': 17})) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17)) - d2.addCallback(lambda ign: self.handler.getAttrs("newfile", followLinks=0)) + d2.addCallback(lambda ign: self.handler.getAttrs(b"newfile", followLinks=0)) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17)) d2.addCallback(lambda ign: @@ -699,7 +706,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "writeChunk on closed file bad", - wf.writeChunk, 0, "a")) + wf.writeChunk, 0, b"a")) d2.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "setAttrs on closed file bad", wf.setAttrs, {'size': 0})) @@ -709,77 +716,77 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(_write) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123\x00a\x00\x00\x00")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345670123\x00a\x00\x00\x00")) # test APPEND flag, and also replacing an existing file ("newfile" created by the previous test) d.addCallback(lambda ign: - self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC | sftp.FXF_APPEND, {})) def _write_append(wf): - d2 = wf.writeChunk(0, "0123456789") - d2.addCallback(lambda ign: wf.writeChunk(8, "0123")) + d2 = wf.writeChunk(0, b"0123456789") + d2.addCallback(lambda ign: wf.writeChunk(8, b"0123")) d2.addCallback(lambda ign: wf.setAttrs({'size': 17})) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 17)) - d2.addCallback(lambda ign: wf.writeChunk(0, "z")) + d2.addCallback(lambda ign: wf.writeChunk(0, b"z")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_append) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234567890123\x00\x00\x00z")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234567890123\x00\x00\x00z")) # test WRITE | TRUNC without CREAT, when the file already exists # This is invalid according to section 6.3 of the SFTP spec, but required for interoperability, # since POSIX does allow O_WRONLY | O_TRUNC. d.addCallback(lambda ign: - self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {})) + self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {})) def _write_trunc(wf): - d2 = wf.writeChunk(0, "01234") + d2 = wf.writeChunk(0, b"01234") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_trunc) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234")) # test WRITE | TRUNC with permissions: 0 d.addCallback(lambda ign: - self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {'permissions': 0})) + self.handler.openFile(b"newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {'permissions': 0})) d.addCallback(_write_trunc) d.addCallback(lambda ign: self.root.get(u"newfile")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234")) d.addCallback(lambda ign: self.root.get_metadata_for(u"newfile")) d.addCallback(lambda metadata: self.failIf(metadata.get('no-write', False), metadata)) # test EXCL flag d.addCallback(lambda ign: - self.handler.openFile("excl", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"excl", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC | sftp.FXF_EXCL, {})) def _write_excl(wf): d2 = self.root.get(u"excl") d2.addCallback(lambda node: download_to_data(node)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) - d2.addCallback(lambda ign: wf.writeChunk(0, "0123456789")) + d2.addCallback(lambda ign: wf.writeChunk(0, b"0123456789")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_excl) d.addCallback(lambda ign: self.root.get(u"excl")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) # test that writing a zero-length file with EXCL only updates the directory once d.addCallback(lambda ign: - self.handler.openFile("zerolength", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"zerolength", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) def _write_excl_zerolength(wf): d2 = self.root.get(u"zerolength") d2.addCallback(lambda node: download_to_data(node)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) # FIXME: no API to get the best version number exists (fix as part of #993) """ @@ -796,84 +803,84 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(_write_excl_zerolength) d.addCallback(lambda ign: self.root.get(u"zerolength")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) # test WRITE | CREAT | EXCL | APPEND d.addCallback(lambda ign: - self.handler.openFile("exclappend", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"exclappend", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL | sftp.FXF_APPEND, {})) def _write_excl_append(wf): d2 = self.root.get(u"exclappend") d2.addCallback(lambda node: download_to_data(node)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"")) - d2.addCallback(lambda ign: wf.writeChunk(10, "0123456789")) - d2.addCallback(lambda ign: wf.writeChunk(5, "01234")) + d2.addCallback(lambda ign: wf.writeChunk(10, b"0123456789")) + d2.addCallback(lambda ign: wf.writeChunk(5, b"01234")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_excl_append) d.addCallback(lambda ign: self.root.get(u"exclappend")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345678901234")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345678901234")) # test WRITE | CREAT | APPEND when the file does not already exist d.addCallback(lambda ign: - self.handler.openFile("creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_APPEND, {})) def _write_creat_append_new(wf): - d2 = wf.writeChunk(10, "0123456789") - d2.addCallback(lambda ign: wf.writeChunk(5, "01234")) + d2 = wf.writeChunk(10, b"0123456789") + d2.addCallback(lambda ign: wf.writeChunk(5, b"01234")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_append_new) d.addCallback(lambda ign: self.root.get(u"creatappend")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345678901234")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345678901234")) # ... and when it does exist d.addCallback(lambda ign: - self.handler.openFile("creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT | + self.handler.openFile(b"creatappend", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_APPEND, {})) def _write_creat_append_existing(wf): - d2 = wf.writeChunk(5, "01234") + d2 = wf.writeChunk(5, b"01234") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_append_existing) d.addCallback(lambda ign: self.root.get(u"creatappend")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234567890123401234")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"01234567890123401234")) # test WRITE | CREAT without TRUNC, when the file does not already exist d.addCallback(lambda ign: - self.handler.openFile("newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) + self.handler.openFile(b"newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_creat_new(wf): - d2 = wf.writeChunk(0, "0123456789") + d2 = wf.writeChunk(0, b"0123456789") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_new) d.addCallback(lambda ign: self.root.get(u"newfile2")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) # ... and when it does exist d.addCallback(lambda ign: - self.handler.openFile("newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) + self.handler.openFile(b"newfile2", sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_creat_existing(wf): - d2 = wf.writeChunk(0, "abcde") + d2 = wf.writeChunk(0, b"abcde") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_existing) d.addCallback(lambda ign: self.root.get(u"newfile2")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcde56789")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcde56789")) d.addCallback(lambda ign: self.root.set_node(u"mutable2", self.mutable)) # test writing to a mutable file d.addCallback(lambda ign: - self.handler.openFile("mutable", sftp.FXF_WRITE, {})) + self.handler.openFile(b"mutable", sftp.FXF_WRITE, {})) def _write_mutable(wf): - d2 = wf.writeChunk(8, "new!") + d2 = wf.writeChunk(8, b"new!") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_mutable) @@ -884,30 +891,30 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas self.failUnlessReallyEqual(node.get_uri(), self.mutable_uri) return node.download_best_version() d.addCallback(_check_same_file) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable new! contents")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable new! contents")) # ... and with permissions, which should be ignored d.addCallback(lambda ign: - self.handler.openFile("mutable", sftp.FXF_WRITE, {'permissions': 0})) + self.handler.openFile(b"mutable", sftp.FXF_WRITE, {'permissions': 0})) d.addCallback(_write_mutable) d.addCallback(lambda ign: self.root.get(u"mutable")) d.addCallback(_check_same_file) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable new! contents")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable new! contents")) # ... and with a setAttrs call that diminishes the parent link to read-only, first by path d.addCallback(lambda ign: - self.handler.openFile("mutable", sftp.FXF_WRITE, {})) + self.handler.openFile(b"mutable", sftp.FXF_WRITE, {})) def _write_mutable_setattr(wf): - d2 = wf.writeChunk(8, "read-only link from parent") + d2 = wf.writeChunk(8, b"read-only link from parent") - d2.addCallback(lambda ign: self.handler.setAttrs("mutable", {'permissions': 0o444})) + d2.addCallback(lambda ign: self.handler.setAttrs(b"mutable", {'permissions': 0o444})) d2.addCallback(lambda ign: self.root.get(u"mutable")) d2.addCallback(lambda node: self.failUnless(node.is_readonly())) d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o666)) - d2.addCallback(lambda ign: self.handler.getAttrs("mutable", followLinks=0)) + d2.addCallback(lambda ign: self.handler.getAttrs(b"mutable", followLinks=0)) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o444)) d2.addCallback(lambda ign: wf.close()) @@ -921,13 +928,13 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas self.failUnlessReallyEqual(node.get_storage_index(), self.mutable.get_storage_index()) return node.download_best_version() d.addCallback(_check_readonly_file) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable read-only link from parent")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable read-only link from parent")) # ... and then by handle d.addCallback(lambda ign: - self.handler.openFile("mutable2", sftp.FXF_WRITE, {})) + self.handler.openFile(b"mutable2", sftp.FXF_WRITE, {})) def _write_mutable2_setattr(wf): - d2 = wf.writeChunk(7, "2") + d2 = wf.writeChunk(7, b"2") d2.addCallback(lambda ign: wf.setAttrs({'permissions': 0o444, 'size': 8})) @@ -937,7 +944,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d2.addCallback(lambda ign: wf.getAttrs()) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o444)) - d2.addCallback(lambda ign: self.handler.getAttrs("mutable2", followLinks=0)) + d2.addCallback(lambda ign: self.handler.getAttrs(b"mutable2", followLinks=0)) d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0o666)) d2.addCallback(lambda ign: wf.close()) @@ -945,55 +952,55 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(_write_mutable2_setattr) d.addCallback(lambda ign: self.root.get(u"mutable2")) d.addCallback(_check_readonly_file) # from above - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable2")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"mutable2")) # test READ | WRITE without CREAT or TRUNC d.addCallback(lambda ign: - self.handler.openFile("small", sftp.FXF_READ | sftp.FXF_WRITE, {})) + self.handler.openFile(b"small", sftp.FXF_READ | sftp.FXF_WRITE, {})) def _read_write(rwf): - d2 = rwf.writeChunk(8, "0123") + d2 = rwf.writeChunk(8, b"0123") # test immediate read starting after the old end-of-file d2.addCallback(lambda ign: rwf.readChunk(11, 1)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "3")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"3")) d2.addCallback(lambda ign: rwf.readChunk(0, 100)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345670123")) d2.addCallback(lambda ign: rwf.close()) return d2 d.addCallback(_read_write) d.addCallback(lambda ign: self.root.get(u"small")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"012345670123")) # test WRITE and rename while still open d.addCallback(lambda ign: - self.handler.openFile("small", sftp.FXF_WRITE, {})) + self.handler.openFile(b"small", sftp.FXF_WRITE, {})) def _write_rename(wf): - d2 = wf.writeChunk(0, "abcd") - d2.addCallback(lambda ign: self.handler.renameFile("small", "renamed")) - d2.addCallback(lambda ign: wf.writeChunk(4, "efgh")) + d2 = wf.writeChunk(0, b"abcd") + d2.addCallback(lambda ign: self.handler.renameFile(b"small", b"renamed")) + d2.addCallback(lambda ign: wf.writeChunk(4, b"efgh")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_rename) d.addCallback(lambda ign: self.root.get(u"renamed")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcdefgh0123")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcdefgh0123")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "rename small while open", "small", self.root.get, u"small")) # test WRITE | CREAT | EXCL and rename while still open d.addCallback(lambda ign: - self.handler.openFile("newexcl", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) + self.handler.openFile(b"newexcl", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_EXCL, {})) def _write_creat_excl_rename(wf): - d2 = wf.writeChunk(0, "abcd") - d2.addCallback(lambda ign: self.handler.renameFile("newexcl", "renamedexcl")) - d2.addCallback(lambda ign: wf.writeChunk(4, "efgh")) + d2 = wf.writeChunk(0, b"abcd") + d2.addCallback(lambda ign: self.handler.renameFile(b"newexcl", b"renamedexcl")) + d2.addCallback(lambda ign: wf.writeChunk(4, b"efgh")) d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_creat_excl_rename) d.addCallback(lambda ign: self.root.get(u"renamedexcl")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcdefgh")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcdefgh")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "rename newexcl while open", "newexcl", self.root.get, u"newexcl")) @@ -1002,21 +1009,21 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas def _open_and_rename_race(ign): slow_open = defer.Deferred() reactor.callLater(1, slow_open.callback, None) - d2 = self.handler.openFile("new", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open) + d2 = self.handler.openFile(b"new", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open) # deliberate race between openFile and renameFile - d3 = self.handler.renameFile("new", "new2") + d3 = self.handler.renameFile(b"new", b"new2") d3.addErrback(lambda err: self.fail("renameFile failed: %r" % (err,))) return d2 d.addCallback(_open_and_rename_race) def _write_rename_race(wf): - d2 = wf.writeChunk(0, "abcd") + d2 = wf.writeChunk(0, b"abcd") d2.addCallback(lambda ign: wf.close()) return d2 d.addCallback(_write_rename_race) d.addCallback(lambda ign: self.root.get(u"new2")) d.addCallback(lambda node: download_to_data(node)) - d.addCallback(lambda data: self.failUnlessReallyEqual(data, "abcd")) + d.addCallback(lambda data: self.failUnlessReallyEqual(data, b"abcd")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "rename new while open", "new", self.root.get, u"new")) @@ -1027,7 +1034,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas gross = u"gro\u00DF".encode("utf-8") d.addCallback(lambda ign: self.handler.openFile(gross, sftp.FXF_READ | sftp.FXF_WRITE, {})) def _read_write_broken(rwf): - d2 = rwf.writeChunk(0, "abcdefghij") + d2 = rwf.writeChunk(0, b"abcdefghij") d2.addCallback(lambda ign: self.g.nuke_from_orbit()) # reading should fail (reliably if we read past the written chunk) @@ -1051,57 +1058,57 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nofile", - self.handler.removeFile, "nofile")) + self.handler.removeFile, b"nofile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nofile", - self.handler.removeFile, "nofile")) + self.handler.removeFile, b"nofile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeFile nodir/file", - self.handler.removeFile, "nodir/file")) + self.handler.removeFile, b"nodir/file")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removefile ''", - self.handler.removeFile, "")) + self.handler.removeFile, b"")) # removing a directory should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "removeFile tiny_lit_dir", - self.handler.removeFile, "tiny_lit_dir")) + self.handler.removeFile, b"tiny_lit_dir")) # removing a file should succeed d.addCallback(lambda ign: self.root.get(u"gro\u00DF")) d.addCallback(lambda ign: self.handler.removeFile(u"gro\u00DF".encode('utf-8'))) d.addCallback(lambda ign: - self.shouldFail(NoSuchChildError, "removeFile gross", "gro\\xdf", + self.shouldFail(NoSuchChildError, "removeFile gross", "gro", self.root.get, u"gro\u00DF")) # removing an unknown should succeed d.addCallback(lambda ign: self.root.get(u"unknown")) - d.addCallback(lambda ign: self.handler.removeFile("unknown")) + d.addCallback(lambda ign: self.handler.removeFile(b"unknown")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeFile unknown", "unknown", self.root.get, u"unknown")) # removing a link to an open file should not prevent it from being read - d.addCallback(lambda ign: self.handler.openFile("small", sftp.FXF_READ, {})) + d.addCallback(lambda ign: self.handler.openFile(b"small", sftp.FXF_READ, {})) def _remove_and_read_small(rf): - d2 = self.handler.removeFile("small") + d2 = self.handler.removeFile(b"small") d2.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeFile small", "small", self.root.get, u"small")) d2.addCallback(lambda ign: rf.readChunk(0, 10)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rf.close()) return d2 d.addCallback(_remove_and_read_small) # removing a link to a created file should prevent it from being created - d.addCallback(lambda ign: self.handler.openFile("tempfile", sftp.FXF_READ | sftp.FXF_WRITE | + d.addCallback(lambda ign: self.handler.openFile(b"tempfile", sftp.FXF_READ | sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_remove(rwf): - d2 = rwf.writeChunk(0, "0123456789") - d2.addCallback(lambda ign: self.handler.removeFile("tempfile")) + d2 = rwf.writeChunk(0, b"0123456789") + d2.addCallback(lambda ign: self.handler.removeFile(b"tempfile")) d2.addCallback(lambda ign: rwf.readChunk(0, 10)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rwf.close()) return d2 d.addCallback(_write_remove) @@ -1110,14 +1117,14 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas self.root.get, u"tempfile")) # ... even if the link is renamed while open - d.addCallback(lambda ign: self.handler.openFile("tempfile2", sftp.FXF_READ | sftp.FXF_WRITE | + d.addCallback(lambda ign: self.handler.openFile(b"tempfile2", sftp.FXF_READ | sftp.FXF_WRITE | sftp.FXF_CREAT, {})) def _write_rename_remove(rwf): - d2 = rwf.writeChunk(0, "0123456789") - d2.addCallback(lambda ign: self.handler.renameFile("tempfile2", "tempfile3")) - d2.addCallback(lambda ign: self.handler.removeFile("tempfile3")) + d2 = rwf.writeChunk(0, b"0123456789") + d2.addCallback(lambda ign: self.handler.renameFile(b"tempfile2", b"tempfile3")) + d2.addCallback(lambda ign: self.handler.removeFile(b"tempfile3")) d2.addCallback(lambda ign: rwf.readChunk(0, 10)) - d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789")) + d2.addCallback(lambda data: self.failUnlessReallyEqual(data, b"0123456789")) d2.addCallback(lambda ign: rwf.close()) return d2 d.addCallback(_write_rename_remove) @@ -1138,13 +1145,13 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory nodir", - self.handler.removeDirectory, "nodir")) + self.handler.removeDirectory, b"nodir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory nodir/nodir", - self.handler.removeDirectory, "nodir/nodir")) + self.handler.removeDirectory, b"nodir/nodir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "removeDirectory ''", - self.handler.removeDirectory, "")) + self.handler.removeDirectory, b"")) # removing a file should fail d.addCallback(lambda ign: @@ -1153,14 +1160,14 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # removing a directory should succeed d.addCallback(lambda ign: self.root.get(u"tiny_lit_dir")) - d.addCallback(lambda ign: self.handler.removeDirectory("tiny_lit_dir")) + d.addCallback(lambda ign: self.handler.removeDirectory(b"tiny_lit_dir")) d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, "removeDirectory tiny_lit_dir", "tiny_lit_dir", self.root.get, u"tiny_lit_dir")) # removing an unknown should succeed d.addCallback(lambda ign: self.root.get(u"unknown")) - d.addCallback(lambda ign: self.handler.removeDirectory("unknown")) + d.addCallback(lambda ign: self.handler.removeDirectory(b"unknown")) d.addCallback(lambda err: self.shouldFail(NoSuchChildError, "removeDirectory unknown", "unknown", self.root.get, u"unknown")) @@ -1176,58 +1183,58 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # renaming a non-existent file should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile nofile newfile", - self.handler.renameFile, "nofile", "newfile")) + self.handler.renameFile, b"nofile", b"newfile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile '' newfile", - self.handler.renameFile, "", "newfile")) + self.handler.renameFile, b"", b"newfile")) # renaming a file to a non-existent path should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small nodir/small", - self.handler.renameFile, "small", "nodir/small")) + self.handler.renameFile, b"small", b"nodir/small")) # renaming a file to an invalid UTF-8 name should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small invalid", - self.handler.renameFile, "small", "\xFF")) + self.handler.renameFile, b"small", b"\xFF")) # renaming a file to or from an URI should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small from uri", - self.handler.renameFile, "uri/"+self.small_uri, "new")) + self.handler.renameFile, b"uri/"+self.small_uri, b"new")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile small to uri", - self.handler.renameFile, "small", "uri/fake_uri")) + self.handler.renameFile, b"small", b"uri/fake_uri")) # renaming a file onto an existing file, directory or unknown should fail # The SFTP spec isn't clear about what error should be returned, but sshfs depends on # it being FX_PERMISSION_DENIED. d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small small2", - self.handler.renameFile, "small", "small2")) + self.handler.renameFile, b"small", b"small2")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small tiny_lit_dir", - self.handler.renameFile, "small", "tiny_lit_dir")) + self.handler.renameFile, b"small", b"tiny_lit_dir")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small unknown", - self.handler.renameFile, "small", "unknown")) + self.handler.renameFile, b"small", b"unknown")) # renaming a file onto a heisenfile should fail, even if the open hasn't completed def _rename_onto_heisenfile_race(wf): slow_open = defer.Deferred() reactor.callLater(1, slow_open.callback, None) - d2 = self.handler.openFile("heisenfile", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open) + d2 = self.handler.openFile(b"heisenfile", sftp.FXF_WRITE | sftp.FXF_CREAT, {}, delay=slow_open) # deliberate race between openFile and renameFile d3 = self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "renameFile small heisenfile", - self.handler.renameFile, "small", "heisenfile") + self.handler.renameFile, b"small", b"heisenfile") d2.addCallback(lambda wf: wf.close()) return deferredutil.gatherResults([d2, d3]) d.addCallback(_rename_onto_heisenfile_race) # renaming a file to a correct path should succeed - d.addCallback(lambda ign: self.handler.renameFile("small", "new_small")) + d.addCallback(lambda ign: self.handler.renameFile(b"small", b"new_small")) d.addCallback(lambda ign: self.root.get(u"new_small")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) @@ -1238,12 +1245,12 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.gross_uri)) # renaming a directory to a correct path should succeed - d.addCallback(lambda ign: self.handler.renameFile("tiny_lit_dir", "new_tiny_lit_dir")) + d.addCallback(lambda ign: self.handler.renameFile(b"tiny_lit_dir", b"new_tiny_lit_dir")) d.addCallback(lambda ign: self.root.get(u"new_tiny_lit_dir")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.tiny_lit_dir_uri)) # renaming an unknown to a correct path should succeed - d.addCallback(lambda ign: self.handler.renameFile("unknown", "new_unknown")) + d.addCallback(lambda ign: self.handler.renameFile(b"unknown", b"new_unknown")) d.addCallback(lambda ign: self.root.get(u"new_unknown")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.unknown_uri)) @@ -1256,7 +1263,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas extData = (struct.pack('>L', len(fromPathstring)) + fromPathstring + struct.pack('>L', len(toPathstring)) + toPathstring) - d2 = self.handler.extendedRequest('posix-rename@openssh.com', extData) + d2 = self.handler.extendedRequest(b'posix-rename@openssh.com', extData) def _check(res): res.trap(sftp.SFTPError) if res.value.code == sftp.FX_OK: @@ -1276,44 +1283,44 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas # POSIX-renaming a non-existent file should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix nofile newfile", - _renameFile, "nofile", "newfile")) + _renameFile, b"nofile", b"newfile")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix '' newfile", - _renameFile, "", "newfile")) + _renameFile, b"", b"newfile")) # POSIX-renaming a file to a non-existent path should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small nodir/small", - _renameFile, "small", "nodir/small")) + _renameFile, b"small", b"nodir/small")) # POSIX-renaming a file to an invalid UTF-8 name should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small invalid", - _renameFile, "small", "\xFF")) + _renameFile, b"small", b"\xFF")) # POSIX-renaming a file to or from an URI should fail d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small from uri", - _renameFile, "uri/"+self.small_uri, "new")) + _renameFile, b"uri/"+self.small_uri, b"new")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "renameFile_posix small to uri", - _renameFile, "small", "uri/fake_uri")) + _renameFile, b"small", b"uri/fake_uri")) # POSIX-renaming a file onto an existing file, directory or unknown should succeed - d.addCallback(lambda ign: _renameFile("small", "small2")) + d.addCallback(lambda ign: _renameFile(b"small", b"small2")) d.addCallback(lambda ign: self.root.get(u"small2")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) - d.addCallback(lambda ign: _renameFile("small2", "loop2")) + d.addCallback(lambda ign: _renameFile(b"small2", b"loop2")) d.addCallback(lambda ign: self.root.get(u"loop2")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) - d.addCallback(lambda ign: _renameFile("loop2", "unknown2")) + d.addCallback(lambda ign: _renameFile(b"loop2", b"unknown2")) d.addCallback(lambda ign: self.root.get(u"unknown2")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) # POSIX-renaming a file to a correct new path should succeed - d.addCallback(lambda ign: _renameFile("unknown2", "new_small")) + d.addCallback(lambda ign: _renameFile(b"unknown2", b"new_small")) d.addCallback(lambda ign: self.root.get(u"new_small")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri)) @@ -1324,12 +1331,12 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.gross_uri)) # POSIX-renaming a directory to a correct path should succeed - d.addCallback(lambda ign: _renameFile("tiny_lit_dir", "new_tiny_lit_dir")) + d.addCallback(lambda ign: _renameFile(b"tiny_lit_dir", b"new_tiny_lit_dir")) d.addCallback(lambda ign: self.root.get(u"new_tiny_lit_dir")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.tiny_lit_dir_uri)) # POSIX-renaming an unknown to a correct path should succeed - d.addCallback(lambda ign: _renameFile("unknown", "new_unknown")) + d.addCallback(lambda ign: _renameFile(b"unknown", b"new_unknown")) d.addCallback(lambda ign: self.root.get(u"new_unknown")) d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.unknown_uri)) @@ -1342,7 +1349,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self._set_up_tree()) # making a directory at a correct path should succeed - d.addCallback(lambda ign: self.handler.makeDirectory("newdir", {'ext_foo': 'bar', 'ctime': 42})) + d.addCallback(lambda ign: self.handler.makeDirectory(b"newdir", {'ext_foo': 'bar', 'ctime': 42})) d.addCallback(lambda ign: self.root.get_child_and_metadata(u"newdir")) def _got(child_and_metadata): @@ -1358,7 +1365,7 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(_got) # making intermediate directories should also succeed - d.addCallback(lambda ign: self.handler.makeDirectory("newparent/newchild", {})) + d.addCallback(lambda ign: self.handler.makeDirectory(b"newparent/newchild", {})) d.addCallback(lambda ign: self.root.get(u"newparent")) def _got_newparent(newparent): @@ -1374,17 +1381,17 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "makeDirectory invalid UTF-8", - self.handler.makeDirectory, "\xFF", {})) + self.handler.makeDirectory, b"\xFF", {})) # should fail because there is an existing file "small" d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_FAILURE, "makeDirectory small", - self.handler.makeDirectory, "small", {})) + self.handler.makeDirectory, b"small", {})) # directories cannot be created read-only via SFTP d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "makeDirectory newdir2 permissions:0444 denied", - self.handler.makeDirectory, "newdir2", + self.handler.makeDirectory, b"newdir2", {'permissions': 0o444})) d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {})) @@ -1464,24 +1471,24 @@ class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCas def test_extendedRequest(self): d = self._set_up("extendedRequest") - d.addCallback(lambda ign: self.handler.extendedRequest("statvfs@openssh.com", "/")) + d.addCallback(lambda ign: self.handler.extendedRequest(b"statvfs@openssh.com", b"/")) def _check(res): - self.failUnless(isinstance(res, str)) + self.failUnless(isinstance(res, bytes)) self.failUnlessEqual(len(res), 8*11) d.addCallback(_check) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "extendedRequest foo bar", - self.handler.extendedRequest, "foo", "bar")) + self.handler.extendedRequest, b"foo", b"bar")) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest posix-rename@openssh.com invalid 1", - self.handler.extendedRequest, 'posix-rename@openssh.com', '')) + self.handler.extendedRequest, b'posix-rename@openssh.com', b'')) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest posix-rename@openssh.com invalid 2", - self.handler.extendedRequest, 'posix-rename@openssh.com', '\x00\x00\x00\x01')) + self.handler.extendedRequest, b'posix-rename@openssh.com', b'\x00\x00\x00\x01')) d.addCallback(lambda ign: self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "extendedRequest posix-rename@openssh.com invalid 3", - self.handler.extendedRequest, 'posix-rename@openssh.com', '\x00\x00\x00\x01_\x00\x00\x00\x01')) + self.handler.extendedRequest, b'posix-rename@openssh.com', b'\x00\x00\x00\x01_\x00\x00\x00\x01')) return d diff --git a/src/allmydata/test/test_stats.py b/src/allmydata/test/test_stats.py index 3ee495927..e56f9d444 100644 --- a/src/allmydata/test/test_stats.py +++ b/src/allmydata/test/test_stats.py @@ -1,3 +1,14 @@ +""" +Ported to Python 3. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 from twisted.trial import unittest from twisted.application import service diff --git a/src/allmydata/test/test_storage_client.py b/src/allmydata/test/test_storage_client.py index 18caccc5d..8500d6bff 100644 --- a/src/allmydata/test/test_storage_client.py +++ b/src/allmydata/test/test_storage_client.py @@ -105,7 +105,8 @@ from allmydata.interfaces import ( SOME_FURL = "pb://abcde@nowhere/fake" -class NativeStorageServerWithVersion(NativeStorageServer): + +class NativeStorageServerWithVersion(NativeStorageServer): # type: ignore # tahoe-lafs/ticket/3573 def __init__(self, version): # note: these instances won't work for anything other than # get_available_space() because we don't upcall @@ -457,7 +458,8 @@ class StoragePluginWebPresence(AsyncTestCase): self.storage_plugin = u"tahoe-lafs-dummy-v1" from twisted.internet import reactor - _, port_endpoint = self.port_assigner.assign(reactor) + _, webport_endpoint = self.port_assigner.assign(reactor) + tubport_location, tubport_endpoint = self.port_assigner.assign(reactor) tempdir = TempDir() self.useFixture(tempdir) @@ -468,8 +470,12 @@ class StoragePluginWebPresence(AsyncTestCase): "web": "1", }, node_config={ - "tub.location": "127.0.0.1:1", - "web.port": ensure_text(port_endpoint), + # We don't really need the main Tub listening but if we + # disable it then we also have to disable storage (because + # config validation policy). + "tub.port": tubport_endpoint, + "tub.location": tubport_location, + "web.port": ensure_text(webport_endpoint), }, storage_plugin=self.storage_plugin, basedir=self.basedir, @@ -564,7 +570,7 @@ class SpyEndpoint(object): return d -@implementer(IConnectionHintHandler) +@implementer(IConnectionHintHandler) # type: ignore # warner/foolscap#78 @attr.s class SpyHandler(object): """ diff --git a/src/allmydata/test/test_storage_web.py b/src/allmydata/test/test_storage_web.py index ca0cd85fc..b3f5fac98 100644 --- a/src/allmydata/test/test_storage_web.py +++ b/src/allmydata/test/test_storage_web.py @@ -70,7 +70,7 @@ def renderJSON(resource): """ Render a JSON from the given resource. """ - return render(resource, {"t": ["json"]}) + return render(resource, {b"t": [b"json"]}) class MyBucketCountingCrawler(BucketCountingCrawler): def finished_prefix(self, cycle, prefix): diff --git a/src/allmydata/test/test_system.py b/src/allmydata/test/test_system.py index 33e55bd3b..bf115f127 100644 --- a/src/allmydata/test/test_system.py +++ b/src/allmydata/test/test_system.py @@ -1,7 +1,22 @@ +""" +Ported to Python 3, partially: test_filesystem* will be done in a future round. +""" from __future__ import print_function +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals + +from future.utils import PY2, PY3 +if PY2: + # Don't import bytes since it causes issues on (so far unported) modules on Python 2. + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, dict, list, object, range, max, min, str # noqa: F401 + +from past.builtins import chr as byteschr, long +from six import ensure_text, ensure_str import os, re, sys, time, json from functools import partial +from unittest import skipIf from bs4 import BeautifulSoup @@ -36,24 +51,69 @@ from twisted.python.filepath import ( FilePath, ) +from ._twisted_9607 import ( + getProcessOutputAndValue, +) + from .common import ( TEST_RSA_KEY_SIZE, SameProcessStreamEndpointAssigner, ) -from .common_web import do_http, Error +from .common_web import do_http as do_http_bytes, Error from .web.common import ( assert_soup_has_tag_with_attributes ) # TODO: move this to common or common_util -from allmydata.test.test_runner import RunBinTahoeMixin from . import common_util as testutil -from .common_util import run_cli +from .common_util import run_cli_unicode from ..scripts.common import ( write_introducer, ) -LARGE_DATA = """ +class RunBinTahoeMixin(object): + def run_bintahoe(self, args, stdin=None, python_options=[], env=None): + # test_runner.run_bintahoe has better unicode support but doesn't + # support env yet and is also synchronous. If we could get rid of + # this in favor of that, though, it would probably be an improvement. + command = sys.executable + argv = python_options + ["-m", "allmydata.scripts.runner"] + args + + if env is None: + env = os.environ + + d = getProcessOutputAndValue(command, argv, env, stdinBytes=stdin) + def fix_signal(result): + # Mirror subprocess.Popen.returncode structure + (out, err, signal) = result + return (out, err, -signal) + d.addErrback(fix_signal) + return d + + +def run_cli(*args, **kwargs): + """ + Run a Tahoe-LAFS CLI utility, but inline. + + Version of run_cli_unicode() that takes any kind of string, and the + command-line args inline instead of as verb + list. + + Backwards compatible version so we don't have to change all the tests that + expected this API. + """ + nodeargs = [ensure_text(a) for a in kwargs.pop("nodeargs", [])] + kwargs["nodeargs"] = nodeargs + return run_cli_unicode( + ensure_text(args[0]), [ensure_text(a) for a in args[1:]], **kwargs) + + +def do_http(*args, **kwargs): + """Wrapper for do_http() that returns Unicode.""" + return do_http_bytes(*args, **kwargs).addCallback( + lambda b: str(b, "utf-8")) + + +LARGE_DATA = b""" This is some data to publish to the remote grid.., which needs to be large enough to not fit inside a LIT uri. """ @@ -627,9 +687,9 @@ def flush_but_dont_ignore(res): def _render_config(config): """ - Convert a ``dict`` of ``dict`` of ``bytes`` to an ini-format string. + Convert a ``dict`` of ``dict`` of ``unicode`` to an ini-format string. """ - return "\n\n".join(list( + return u"\n\n".join(list( _render_config_section(k, v) for (k, v) in config.items() @@ -637,20 +697,20 @@ def _render_config(config): def _render_config_section(heading, values): """ - Convert a ``bytes`` heading and a ``dict`` of ``bytes`` to an ini-format - section as ``bytes``. + Convert a ``unicode`` heading and a ``dict`` of ``unicode`` to an ini-format + section as ``unicode``. """ - return "[{}]\n{}\n".format( + return u"[{}]\n{}\n".format( heading, _render_section_values(values) ) def _render_section_values(values): """ - Convert a ``dict`` of ``bytes`` to the body of an ini-format section as - ``bytes``. + Convert a ``dict`` of ``unicode`` to the body of an ini-format section as + ``unicode``. """ - return "\n".join(list( - "{} = {}".format(k, v) + return u"\n".join(list( + u"{} = {}".format(k, v) for (k, v) in sorted(values.items()) )) @@ -753,7 +813,7 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): self.helper_furl = helper_furl if self.numclients >= 4: - with open(os.path.join(basedirs[3], 'tahoe.cfg'), 'ab+') as f: + with open(os.path.join(basedirs[3], 'tahoe.cfg'), 'a+') as f: f.write( "[client]\n" "helper.furl = {}\n".format(helper_furl) @@ -796,8 +856,6 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): def setconf(config, which, section, feature, value): if which in feature_matrix.get((section, feature), {which}): - if isinstance(value, unicode): - value = value.encode("utf-8") config.setdefault(section, {})[feature] = value setnode = partial(setconf, config, which, "node") @@ -870,7 +928,7 @@ class SystemTestMixin(pollmixin.PollMixin, testutil.StallMixin): config = "[client]\n" if helper_furl: config += "helper.furl = %s\n" % helper_furl - basedir.child("tahoe.cfg").setContent(config) + basedir.child("tahoe.cfg").setContent(config.encode("utf-8")) private = basedir.child("private") private.makedirs() write_introducer( @@ -980,12 +1038,12 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): def test_upload_and_download_convergent(self): self.basedir = "system/SystemTest/test_upload_and_download_convergent" - return self._test_upload_and_download(convergence="some convergence string") + return self._test_upload_and_download(convergence=b"some convergence string") def _test_upload_and_download(self, convergence): # we use 4000 bytes of data, which will result in about 400k written # to disk among all our simulated nodes - DATA = "Some data to upload\n" * 200 + DATA = b"Some data to upload\n" * 200 d = self.set_up_nodes() def _check_connections(res): for c in self.clients: @@ -993,7 +1051,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): all_peerids = c.get_storage_broker().get_all_serverids() self.failUnlessEqual(len(all_peerids), self.numclients) sb = c.storage_broker - permuted_peers = sb.get_servers_for_psi("a") + permuted_peers = sb.get_servers_for_psi(b"a") self.failUnlessEqual(len(permuted_peers), self.numclients) d.addCallback(_check_connections) @@ -1016,7 +1074,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): theuri = results.get_uri() log.msg("upload finished: uri is %s" % (theuri,)) self.uri = theuri - assert isinstance(self.uri, str), self.uri + assert isinstance(self.uri, bytes), self.uri self.cap = uri.from_string(self.uri) self.n = self.clients[1].create_node_from_uri(self.uri) d.addCallback(_upload_done) @@ -1050,17 +1108,17 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): d.addCallback(lambda ign: n.read(MemoryConsumer(), offset=1, size=4)) def _read_portion_done(mc): - self.failUnlessEqual("".join(mc.chunks), DATA[1:1+4]) + self.failUnlessEqual(b"".join(mc.chunks), DATA[1:1+4]) d.addCallback(_read_portion_done) d.addCallback(lambda ign: n.read(MemoryConsumer(), offset=2, size=None)) def _read_tail_done(mc): - self.failUnlessEqual("".join(mc.chunks), DATA[2:]) + self.failUnlessEqual(b"".join(mc.chunks), DATA[2:]) d.addCallback(_read_tail_done) d.addCallback(lambda ign: n.read(MemoryConsumer(), size=len(DATA)+1000)) def _read_too_much(mc): - self.failUnlessEqual("".join(mc.chunks), DATA) + self.failUnlessEqual(b"".join(mc.chunks), DATA) d.addCallback(_read_too_much) return d @@ -1110,7 +1168,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): return connected d.addCallback(lambda ign: self.poll(_has_helper)) - HELPER_DATA = "Data that needs help to upload" * 1000 + HELPER_DATA = b"Data that needs help to upload" * 1000 def _upload_with_helper(res): u = upload.Data(HELPER_DATA, convergence=convergence) d = self.extra_node.upload(u) @@ -1144,7 +1202,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): d.addCallback(fireEventually) def _upload_resumable(res): - DATA = "Data that needs help to upload and gets interrupted" * 1000 + DATA = b"Data that needs help to upload and gets interrupted" * 1000 u1 = CountingDataUploadable(DATA, convergence=convergence) u2 = CountingDataUploadable(DATA, convergence=convergence) @@ -1266,7 +1324,9 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): s = stats["stats"] self.failUnlessEqual(s["storage_server.accepting_immutable_shares"], 1) c = stats["counters"] - self.failUnless("storage_server.allocate" in c) + # Probably this should be Unicode eventually? But we haven't ported + # stats code yet. + self.failUnless(b"storage_server.allocate" in c) d.addCallback(_grab_stats) return d @@ -1287,7 +1347,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): assert pieces[-5].startswith("client") client_num = int(pieces[-5][-1]) storage_index_s = pieces[-1] - storage_index = si_a2b(storage_index_s) + storage_index = si_a2b(storage_index_s.encode("ascii")) for sharename in filenames: shnum = int(sharename) filename = os.path.join(dirpath, sharename) @@ -1320,7 +1380,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): elif which == "signature": signature = self.flip_bit(signature) elif which == "share_hash_chain": - nodenum = share_hash_chain.keys()[0] + nodenum = list(share_hash_chain.keys())[0] share_hash_chain[nodenum] = self.flip_bit(share_hash_chain[nodenum]) elif which == "block_hash_tree": block_hash_tree[-1] = self.flip_bit(block_hash_tree[-1]) @@ -1343,11 +1403,11 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): def test_mutable(self): self.basedir = "system/SystemTest/test_mutable" - DATA = "initial contents go here." # 25 bytes % 3 != 0 + DATA = b"initial contents go here." # 25 bytes % 3 != 0 DATA_uploadable = MutableData(DATA) - NEWDATA = "new contents yay" + NEWDATA = b"new contents yay" NEWDATA_uploadable = MutableData(NEWDATA) - NEWERDATA = "this is getting old" + NEWERDATA = b"this is getting old" NEWERDATA_uploadable = MutableData(NEWERDATA) d = self.set_up_nodes() @@ -1396,7 +1456,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): self.failUnless(" share_hash_chain: " in output) self.failUnless(" block_hash_tree: 1 nodes\n" in output) expected = (" verify-cap: URI:SSK-Verifier:%s:" % - base32.b2a(storage_index)) + str(base32.b2a(storage_index), "ascii")) self.failUnless(expected in output) except unittest.FailTest: print() @@ -1475,7 +1535,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): for (client_num, storage_index, filename, shnum) in shares ]) assert len(where) == 10 # this test is designed for 3-of-10 - for shnum, filename in where.items(): + for shnum, filename in list(where.items()): # shares 7,8,9 are left alone. read will check # (share_hash_chain, block_hash_tree, share_data). New # seqnum+R pairs will trigger a check of (seqnum, R, IV, @@ -1525,9 +1585,9 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): def _check_empty_file(res): # make sure we can create empty files, this usually screws up the # segsize math - d1 = self.clients[2].create_mutable_file(MutableData("")) + d1 = self.clients[2].create_mutable_file(MutableData(b"")) d1.addCallback(lambda newnode: newnode.download_best_version()) - d1.addCallback(lambda res: self.failUnlessEqual("", res)) + d1.addCallback(lambda res: self.failUnlessEqual(b"", res)) return d1 d.addCallback(_check_empty_file) @@ -1550,7 +1610,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): return d def flip_bit(self, good): - return good[:-1] + chr(ord(good[-1]) ^ 0x01) + return good[:-1] + byteschr(ord(good[-1:]) ^ 0x01) def mangle_uri(self, gooduri): # change the key, which changes the storage index, which means we'll @@ -1571,6 +1631,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): # the key, which should cause the download to fail the post-download # plaintext_hash check. + @skipIf(PY3, "Python 3 web support hasn't happened yet.") def test_filesystem(self): self.basedir = "system/SystemTest/test_filesystem" self.data = LARGE_DATA @@ -1632,7 +1693,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): d1.addCallback(self.log, "publish finished") def _stash_uri(filenode): self.uri = filenode.get_uri() - assert isinstance(self.uri, str), (self.uri, filenode) + assert isinstance(self.uri, bytes), (self.uri, filenode) d1.addCallback(_stash_uri) return d1 d.addCallback(_made_subdir1) @@ -1650,7 +1711,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): return res def _do_publish_private(self, res): - self.smalldata = "sssh, very secret stuff" + self.smalldata = b"sssh, very secret stuff" ut = upload.Data(self.smalldata, convergence=None) d = self.clients[0].create_dirnode() d.addCallback(self.log, "GOT private directory") @@ -1737,7 +1798,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mkdir(nope)", None, dirnode.create_subdirectory, u"nope")) d1.addCallback(self.log, "doing add_file(ro)") - ut = upload.Data("I will disappear, unrecorded and unobserved. The tragedy of my demise is made more poignant by its silence, but this beauty is not for you to ever know.", convergence="99i-p1x4-xd4-18yc-ywt-87uu-msu-zo -- completely and totally unguessable string (unless you read this)") + ut = upload.Data(b"I will disappear, unrecorded and unobserved. The tragedy of my demise is made more poignant by its silence, but this beauty is not for you to ever know.", convergence=b"99i-p1x4-xd4-18yc-ywt-87uu-msu-zo -- completely and totally unguessable string (unless you read this)") d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "add_file(nope)", None, dirnode.add_file, u"hope", ut)) d1.addCallback(self.log, "doing get(ro)") @@ -1801,7 +1862,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): "largest-directory-children": 3, "largest-immutable-file": 112, } - for k,v in expected.iteritems(): + for k,v in list(expected.items()): self.failUnlessEqual(stats[k], v, "stats[%s] was %s, not %s" % (k, stats[k], v)) @@ -1850,33 +1911,33 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): return do_http("get", self.webish_url + urlpath) def POST(self, urlpath, use_helper=False, **fields): - sepbase = "boogabooga" - sep = "--" + sepbase + sepbase = b"boogabooga" + sep = b"--" + sepbase form = [] form.append(sep) - form.append('Content-Disposition: form-data; name="_charset"') - form.append('') - form.append('UTF-8') + form.append(b'Content-Disposition: form-data; name="_charset"') + form.append(b'') + form.append(b'UTF-8') form.append(sep) - for name, value in fields.iteritems(): + for name, value in fields.items(): if isinstance(value, tuple): filename, value = value - form.append('Content-Disposition: form-data; name="%s"; ' - 'filename="%s"' % (name, filename.encode("utf-8"))) + form.append(b'Content-Disposition: form-data; name="%s"; ' + b'filename="%s"' % (name, filename.encode("utf-8"))) else: - form.append('Content-Disposition: form-data; name="%s"' % name) - form.append('') - form.append(str(value)) + form.append(b'Content-Disposition: form-data; name="%s"' % name) + form.append(b'') + form.append(b"%s" % (value,)) form.append(sep) - form[-1] += "--" - body = "" + form[-1] += b"--" + body = b"" headers = {} if fields: - body = "\r\n".join(form) + "\r\n" - headers["content-type"] = "multipart/form-data; boundary=%s" % sepbase + body = b"\r\n".join(form) + b"\r\n" + headers["content-type"] = "multipart/form-data; boundary=%s" % str(sepbase, "ascii") return self.POST2(urlpath, body, headers, use_helper) - def POST2(self, urlpath, body="", headers={}, use_helper=False): + def POST2(self, urlpath, body=b"", headers={}, use_helper=False): if use_helper: url = self.helper_webish_url + urlpath else: @@ -1884,7 +1945,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): return do_http("post", url, data=body, headers=headers) def _test_web(self, res): - public = "uri/" + self._root_directory_uri + public = "uri/" + str(self._root_directory_uri, "ascii") d = self.GET("") def _got_welcome(page): html = page.replace('\n', ' ') @@ -1893,7 +1954,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): "I didn't see the right '%s' message in:\n%s" % (connected_re, page)) # nodeids/tubids don't have any regexp-special characters nodeid_re = r'Node ID:\s*%s' % ( - self.clients[0].get_long_tubid(), self.clients[0].get_long_nodeid()) + self.clients[0].get_long_tubid(), str(self.clients[0].get_long_nodeid(), "ascii")) self.failUnless(re.search(nodeid_re, html), "I didn't see the right '%s' message in:\n%s" % (nodeid_re, page)) self.failUnless("Helper: 0 active uploads" in page) @@ -1954,7 +2015,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): # upload a file with PUT d.addCallback(self.log, "about to try PUT") d.addCallback(lambda res: self.PUT(public + "/subdir3/new.txt", - "new.txt contents")) + b"new.txt contents")) d.addCallback(lambda res: self.GET(public + "/subdir3/new.txt")) d.addCallback(self.failUnlessEqual, "new.txt contents") # and again with something large enough to use multiple segments, @@ -1965,23 +2026,23 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): c.encoding_params['happy'] = 1 d.addCallback(_new_happy_semantics) d.addCallback(lambda res: self.PUT(public + "/subdir3/big.txt", - "big" * 500000)) # 1.5MB + b"big" * 500000)) # 1.5MB d.addCallback(lambda res: self.GET(public + "/subdir3/big.txt")) d.addCallback(lambda res: self.failUnlessEqual(len(res), 1500000)) # can we replace files in place? d.addCallback(lambda res: self.PUT(public + "/subdir3/new.txt", - "NEWER contents")) + b"NEWER contents")) d.addCallback(lambda res: self.GET(public + "/subdir3/new.txt")) d.addCallback(self.failUnlessEqual, "NEWER contents") # test unlinked POST - d.addCallback(lambda res: self.POST("uri", t="upload", - file=("new.txt", "data" * 10000))) + d.addCallback(lambda res: self.POST("uri", t=b"upload", + file=("new.txt", b"data" * 10000))) # and again using the helper, which exercises different upload-status # display code - d.addCallback(lambda res: self.POST("uri", use_helper=True, t="upload", - file=("foo.txt", "data2" * 10000))) + d.addCallback(lambda res: self.POST("uri", use_helper=True, t=b"upload", + file=("foo.txt", b"data2" * 10000))) # check that the status page exists d.addCallback(lambda res: self.GET("status")) @@ -2105,7 +2166,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): # exercise some of the diagnostic tools in runner.py # find a share - for (dirpath, dirnames, filenames) in os.walk(unicode(self.basedir)): + for (dirpath, dirnames, filenames) in os.walk(ensure_text(self.basedir)): if "storage" not in dirpath: continue if not filenames: @@ -2119,7 +2180,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): filename = os.path.join(dirpath, filenames[0]) # peek at the magic to see if it is a chk share magic = open(filename, "rb").read(4) - if magic == '\x00\x00\x00\x01': + if magic == b'\x00\x00\x00\x01': break else: self.fail("unable to find any uri_extension files in %r" @@ -2152,7 +2213,6 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): # 'find-shares' tool sharedir, shnum = os.path.split(filename) storagedir, storage_index_s = os.path.split(sharedir) - storage_index_s = str(storage_index_s) nodedirs = [self.getdir("client%d" % i) for i in range(self.numclients)] rc,out,err = yield run_cli("debug", "find-shares", storage_index_s, *nodedirs) @@ -2176,7 +2236,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): # allmydata.control (mostly used for performance tests) c0 = self.clients[0] control_furl_file = c0.config.get_private_path("control.furl") - control_furl = open(control_furl_file, "r").read().strip() + control_furl = ensure_str(open(control_furl_file, "r").read().strip()) # it doesn't really matter which Tub we use to connect to the client, # so let's just use our IntroducerNode's d = self.introducer.tub.getReference(control_furl) @@ -2208,7 +2268,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): # sure that works, before we add other aliases. root_file = os.path.join(client0_basedir, "private", "root_dir.cap") - f = open(root_file, "w") + f = open(root_file, "wb") f.write(private_uri) f.close() @@ -2290,7 +2350,8 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): files.append(fn) data = "data to be uploaded: file%d\n" % i datas.append(data) - open(fn,"wb").write(data) + with open(fn, "wb") as f: + f.write(data) def _check_stdout_against(out_and_err, filenum=None, data=None): (out, err) = out_and_err @@ -2468,13 +2529,18 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): # recursive copy: setup dn = os.path.join(self.basedir, "dir1") os.makedirs(dn) - open(os.path.join(dn, "rfile1"), "wb").write("rfile1") - open(os.path.join(dn, "rfile2"), "wb").write("rfile2") - open(os.path.join(dn, "rfile3"), "wb").write("rfile3") + with open(os.path.join(dn, "rfile1"), "wb") as f: + f.write("rfile1") + with open(os.path.join(dn, "rfile2"), "wb") as f: + f.write("rfile2") + with open(os.path.join(dn, "rfile3"), "wb") as f: + f.write("rfile3") sdn2 = os.path.join(dn, "subdir2") os.makedirs(sdn2) - open(os.path.join(sdn2, "rfile4"), "wb").write("rfile4") - open(os.path.join(sdn2, "rfile5"), "wb").write("rfile5") + with open(os.path.join(sdn2, "rfile4"), "wb") as f: + f.write("rfile4") + with open(os.path.join(sdn2, "rfile5"), "wb") as f: + f.write("rfile5") # from disk into tahoe d.addCallback(run, "cp", "-r", dn, "tahoe:") @@ -2551,6 +2617,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): return d + @skipIf(PY3, "Python 3 CLI support hasn't happened yet.") def test_filesystem_with_cli_in_subprocess(self): # We do this in a separate test so that test_filesystem doesn't skip if we can't run bin/tahoe. @@ -2574,12 +2641,12 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): out, err, rc_or_sig = res self.failUnlessEqual(rc_or_sig, 0, str(res)) if check_stderr: - self.failUnlessEqual(err, "") + self.failUnlessEqual(err, b"") d.addCallback(_run_in_subprocess, "create-alias", "newalias") d.addCallback(_check_succeeded) - STDIN_DATA = "This is the file to upload from stdin." + STDIN_DATA = b"This is the file to upload from stdin." d.addCallback(_run_in_subprocess, "put", "-", "newalias:tahoe-file", stdin=STDIN_DATA) d.addCallback(_check_succeeded, check_stderr=False) @@ -2601,7 +2668,7 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): return d def _test_checker(self, res): - ut = upload.Data("too big to be literal" * 200, convergence=None) + ut = upload.Data(b"too big to be literal" * 200, convergence=None) d = self._personal_node.add_file(u"big file", ut) d.addCallback(lambda res: self._personal_node.check(Monitor())) diff --git a/src/allmydata/test/test_tor_provider.py b/src/allmydata/test/test_tor_provider.py index bfc962831..f5dd2e29c 100644 --- a/src/allmydata/test/test_tor_provider.py +++ b/src/allmydata/test/test_tor_provider.py @@ -349,6 +349,10 @@ class Provider(unittest.TestCase): cfs2.assert_called_with(reactor, ep_desc) def test_handler_socks_endpoint(self): + """ + If not configured otherwise, the Tor provider returns a Socks-based + handler. + """ tor = mock.Mock() handler = object() tor.socks_endpoint = mock.Mock(return_value=handler) @@ -365,6 +369,46 @@ class Provider(unittest.TestCase): tor.socks_endpoint.assert_called_with(ep) self.assertIs(h, handler) + def test_handler_socks_unix_endpoint(self): + """ + ``socks.port`` can be configured as a UNIX client endpoint. + """ + tor = mock.Mock() + handler = object() + tor.socks_endpoint = mock.Mock(return_value=handler) + ep = object() + cfs = mock.Mock(return_value=ep) + reactor = object() + + with mock_tor(tor): + p = tor_provider.create(reactor, + FakeConfig(**{"socks.port": "unix:path"})) + with mock.patch("allmydata.util.tor_provider.clientFromString", cfs): + h = p.get_tor_handler() + cfs.assert_called_with(reactor, "unix:path") + tor.socks_endpoint.assert_called_with(ep) + self.assertIs(h, handler) + + def test_handler_socks_tcp_endpoint(self): + """ + ``socks.port`` can be configured as a UNIX client endpoint. + """ + tor = mock.Mock() + handler = object() + tor.socks_endpoint = mock.Mock(return_value=handler) + ep = object() + cfs = mock.Mock(return_value=ep) + reactor = object() + + with mock_tor(tor): + p = tor_provider.create(reactor, + FakeConfig(**{"socks.port": "tcp:127.0.0.1:1234"})) + with mock.patch("allmydata.util.tor_provider.clientFromString", cfs): + h = p.get_tor_handler() + cfs.assert_called_with(reactor, "tcp:127.0.0.1:1234") + tor.socks_endpoint.assert_called_with(ep) + self.assertIs(h, handler) + def test_handler_control_endpoint(self): tor = mock.Mock() handler = object() diff --git a/src/allmydata/test/test_upload.py b/src/allmydata/test/test_upload.py index 94d7575c3..fc9bfd697 100644 --- a/src/allmydata/test/test_upload.py +++ b/src/allmydata/test/test_upload.py @@ -14,6 +14,17 @@ if PY2: import os, shutil from io import BytesIO +from base64 import ( + b64encode, +) + +from hypothesis import ( + given, +) +from hypothesis.strategies import ( + just, + integers, +) from twisted.trial import unittest from twisted.python.failure import Failure @@ -877,6 +888,34 @@ def is_happy_enough(servertoshnums, h, k): return True +class FileHandleTests(unittest.TestCase): + """ + Tests for ``FileHandle``. + """ + def test_get_encryption_key_convergent(self): + """ + When ``FileHandle`` is initialized with a convergence secret, + ``FileHandle.get_encryption_key`` returns a deterministic result that + is a function of that secret. + """ + secret = b"\x42" * 16 + handle = upload.FileHandle(BytesIO(b"hello world"), secret) + handle.set_default_encoding_parameters({ + "k": 3, + "happy": 5, + "n": 10, + # Remember this is the *max* segment size. In reality, the data + # size is much smaller so the actual segment size incorporated + # into the encryption key is also smaller. + "max_segment_size": 128 * 1024, + }) + + self.assertEqual( + b64encode(self.successResultOf(handle.get_encryption_key())), + b"oBcuR/wKdCgCV2GKKXqiNg==", + ) + + class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin, ShouldFailMixin): @@ -2029,6 +2068,91 @@ class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin, f.close() return None + +class EncryptAnUploadableTests(unittest.TestCase): + """ + Tests for ``EncryptAnUploadable``. + """ + def test_same_length(self): + """ + ``EncryptAnUploadable.read_encrypted`` returns ciphertext of the same + length as the underlying plaintext. + """ + plaintext = b"hello world" + uploadable = upload.FileHandle(BytesIO(plaintext), None) + uploadable.set_default_encoding_parameters({ + # These values shouldn't matter. + "k": 3, + "happy": 5, + "n": 10, + "max_segment_size": 128 * 1024, + }) + encrypter = upload.EncryptAnUploadable(uploadable) + ciphertext = b"".join(self.successResultOf(encrypter.read_encrypted(1024, False))) + self.assertEqual(len(ciphertext), len(plaintext)) + + @given(just(b"hello world"), integers(min_value=0, max_value=len(b"hello world"))) + def test_known_result(self, plaintext, split_at): + """ + ``EncryptAnUploadable.read_encrypted`` returns a known-correct ciphertext + string for certain inputs. The ciphertext is independent of the read + sizes. + """ + convergence = b"\x42" * 16 + uploadable = upload.FileHandle(BytesIO(plaintext), convergence) + uploadable.set_default_encoding_parameters({ + # The convergence key is a function of k, n, and max_segment_size + # (among other things). The value for happy doesn't matter + # though. + "k": 3, + "happy": 5, + "n": 10, + "max_segment_size": 128 * 1024, + }) + encrypter = upload.EncryptAnUploadable(uploadable) + def read(n): + return b"".join(self.successResultOf(encrypter.read_encrypted(n, False))) + + # Read the string in one or two pieces to make sure underlying state + # is maintained properly. + first = read(split_at) + second = read(len(plaintext) - split_at) + third = read(1) + ciphertext = first + second + third + + self.assertEqual( + b"Jd2LHCRXozwrEJc=", + b64encode(ciphertext), + ) + + def test_large_read(self): + """ + ``EncryptAnUploadable.read_encrypted`` succeeds even when the requested + data length is much larger than the chunk size. + """ + convergence = b"\x42" * 16 + # 4kB of plaintext + plaintext = b"\xde\xad\xbe\xef" * 1024 + uploadable = upload.FileHandle(BytesIO(plaintext), convergence) + uploadable.set_default_encoding_parameters({ + "k": 3, + "happy": 5, + "n": 10, + "max_segment_size": 128 * 1024, + }) + # Make the chunk size very small so we don't have to operate on a huge + # amount of data to exercise the relevant codepath. + encrypter = upload.EncryptAnUploadable(uploadable, chunk_size=1) + d = encrypter.read_encrypted(len(plaintext), False) + ciphertext = self.successResultOf(d) + self.assertEqual( + list(map(len, ciphertext)), + # Chunk size was specified as 1 above so we will get the whole + # plaintext in one byte chunks. + [1] * len(plaintext), + ) + + # TODO: # upload with exactly 75 servers (shares_of_happiness) # have a download fail diff --git a/src/allmydata/test/test_util.py b/src/allmydata/test/test_util.py index c671caa31..5f5db82bd 100644 --- a/src/allmydata/test/test_util.py +++ b/src/allmydata/test/test_util.py @@ -33,7 +33,9 @@ if six.PY3: class IDLib(unittest.TestCase): def test_nodeid_b2a(self): - self.failUnlessEqual(idlib.nodeid_b2a(b"\x00"*20), "a"*32) + result = idlib.nodeid_b2a(b"\x00"*20) + self.assertEqual(result, "a"*32) + self.assertIsInstance(result, str) class MyList(list): @@ -489,12 +491,16 @@ class JSONBytes(unittest.TestCase): """Tests for BytesJSONEncoder.""" def test_encode_bytes(self): - """BytesJSONEncoder can encode bytes.""" + """BytesJSONEncoder can encode bytes. + + Bytes are presumed to be UTF-8 encoded. + """ + snowman = u"def\N{SNOWMAN}\uFF00" data = { - b"hello": [1, b"cd"], + b"hello": [1, b"cd", {b"abc": [123, snowman.encode("utf-8")]}], } expected = { - u"hello": [1, u"cd"], + u"hello": [1, u"cd", {u"abc": [123, snowman]}], } # Bytes get passed through as if they were UTF-8 Unicode: encoded = jsonbytes.dumps(data) diff --git a/src/allmydata/test/test_websocket_logs.py b/src/allmydata/test/test_websocket_logs.py deleted file mode 100644 index e666a4902..000000000 --- a/src/allmydata/test/test_websocket_logs.py +++ /dev/null @@ -1,54 +0,0 @@ -import json - -from twisted.trial import unittest -from twisted.internet.defer import inlineCallbacks - -from eliot import log_call - -from autobahn.twisted.testing import create_memory_agent, MemoryReactorClockResolver, create_pumper - -from allmydata.web.logs import TokenAuthenticatedWebSocketServerProtocol - - -class TestStreamingLogs(unittest.TestCase): - """ - Test websocket streaming of logs - """ - - def setUp(self): - self.reactor = MemoryReactorClockResolver() - self.pumper = create_pumper() - self.agent = create_memory_agent(self.reactor, self.pumper, TokenAuthenticatedWebSocketServerProtocol) - return self.pumper.start() - - def tearDown(self): - return self.pumper.stop() - - @inlineCallbacks - def test_one_log(self): - """ - write a single Eliot log and see it streamed via websocket - """ - - proto = yield self.agent.open( - transport_config=u"ws://localhost:1234/ws", - options={}, - ) - - messages = [] - def got_message(msg, is_binary=False): - messages.append(json.loads(msg)) - proto.on("message", got_message) - - @log_call(action_type=u"test:cli:some-exciting-action") - def do_a_thing(): - pass - - do_a_thing() - - proto.transport.loseConnection() - yield proto.is_closed - - self.assertEqual(len(messages), 2) - self.assertEqual("started", messages[0]["action_status"]) - self.assertEqual("succeeded", messages[1]["action_status"]) diff --git a/src/allmydata/test/test_windows.py b/src/allmydata/test/test_windows.py new file mode 100644 index 000000000..01e4a57c1 --- /dev/null +++ b/src/allmydata/test/test_windows.py @@ -0,0 +1,225 @@ +# -*- coding: utf-8 -*- +# Tahoe-LAFS -- secure, distributed storage grid +# +# Copyright © 2020 The Tahoe-LAFS Software Foundation +# +# This file is part of Tahoe-LAFS. +# +# See the docs/about.rst file for licensing information. + +""" +Tests for the ``allmydata.windows``. +""" + +from __future__ import division +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + +from sys import ( + executable, +) +from json import ( + load, +) +from textwrap import ( + dedent, +) +from twisted.python.filepath import ( + FilePath, +) +from twisted.python.runtime import ( + platform, +) + +from testtools import ( + skipUnless, +) + +from testtools.matchers import ( + MatchesAll, + AllMatch, + IsInstance, + Equals, +) + +from hypothesis import ( + HealthCheck, + settings, + given, + note, +) + +from hypothesis.strategies import ( + lists, + text, + characters, +) + +from .common import ( + PIPE, + Popen, + SyncTestCase, +) + +slow_settings = settings( + suppress_health_check=[HealthCheck.too_slow], + deadline=None, + + # Reduce the number of examples required to consider the test a success. + # The default is 100. Launching a process is expensive so we'll try to do + # it as few times as we can get away with. To maintain good coverage, + # we'll try to pass as much data to each process as we can so we're still + # covering a good portion of the space. + max_examples=10, +) + +@skipUnless(platform.isWindows(), "get_argv is Windows-only") +class GetArgvTests(SyncTestCase): + """ + Tests for ``get_argv``. + """ + def test_get_argv_return_type(self): + """ + ``get_argv`` returns a list of unicode strings + """ + # Hide the ``allmydata.windows.fixups.get_argv`` import here so it + # doesn't cause failures on non-Windows platforms. + from ..windows.fixups import ( + get_argv, + ) + argv = get_argv() + + # We don't know what this process's command line was so we just make + # structural assertions here. + self.assertThat( + argv, + MatchesAll( + IsInstance(list), + AllMatch(IsInstance(str)), + ), + ) + + # This test runs a child process. This is unavoidably slow and variable. + # Disable the two time-based Hypothesis health checks. + @slow_settings + @given( + lists( + text( + alphabet=characters( + blacklist_categories=('Cs',), + # Windows CommandLine is a null-terminated string, + # analogous to POSIX exec* arguments. So exclude nul from + # our generated arguments. + blacklist_characters=('\x00',), + ), + min_size=10, + max_size=20, + ), + min_size=10, + max_size=20, + ), + ) + def test_argv_values(self, argv): + """ + ``get_argv`` returns a list representing the result of tokenizing the + "command line" argument string provided to Windows processes. + """ + working_path = FilePath(self.mktemp()) + working_path.makedirs() + save_argv_path = working_path.child("script.py") + saved_argv_path = working_path.child("data.json") + with open(save_argv_path.path, "wt") as f: + # A simple program to save argv to a file. Using the file saves + # us having to figure out how to reliably get non-ASCII back over + # stdio which may pose an independent set of challenges. At least + # file I/O is relatively simple and well-understood. + f.write(dedent( + """ + from allmydata.windows.fixups import ( + get_argv, + ) + import json + with open({!r}, "wt") as f: + f.write(json.dumps(get_argv())) + """.format(saved_argv_path.path)), + ) + argv = [executable.decode("utf-8"), save_argv_path.path] + argv + p = Popen(argv, stdin=PIPE, stdout=PIPE, stderr=PIPE) + p.stdin.close() + stdout = p.stdout.read() + stderr = p.stderr.read() + returncode = p.wait() + + note("stdout: {!r}".format(stdout)) + note("stderr: {!r}".format(stderr)) + + self.assertThat( + returncode, + Equals(0), + ) + with open(saved_argv_path.path, "rt") as f: + saved_argv = load(f) + + self.assertThat( + saved_argv, + Equals(argv), + ) + + +@skipUnless(platform.isWindows(), "intended for Windows-only codepaths") +class UnicodeOutputTests(SyncTestCase): + """ + Tests for writing unicode to stdout and stderr. + """ + @slow_settings + @given(characters(), characters()) + def test_write_non_ascii(self, stdout_char, stderr_char): + """ + Non-ASCII unicode characters can be written to stdout and stderr with + automatic UTF-8 encoding. + """ + working_path = FilePath(self.mktemp()) + working_path.makedirs() + script = working_path.child("script.py") + script.setContent(dedent( + """ + from future.utils import PY2 + if PY2: + from future.builtins import chr + + from allmydata.windows.fixups import initialize + initialize() + + # XXX A shortcoming of the monkey-patch approach is that you'd + # better not import stdout or stderr before you call initialize. + from sys import argv, stdout, stderr + + stdout.write(chr(int(argv[1]))) + stdout.close() + stderr.write(chr(int(argv[2]))) + stderr.close() + """ + )) + p = Popen([ + executable, + script.path, + str(ord(stdout_char)), + str(ord(stderr_char)), + ], stdout=PIPE, stderr=PIPE) + stdout = p.stdout.read().decode("utf-8").replace("\r\n", "\n") + stderr = p.stderr.read().decode("utf-8").replace("\r\n", "\n") + returncode = p.wait() + + self.assertThat( + (stdout, stderr, returncode), + Equals(( + stdout_char, + stderr_char, + 0, + )), + ) diff --git a/src/allmydata/test/web/test_common.py b/src/allmydata/test/web/test_common.py index 5261c412f..84ab5cab2 100644 --- a/src/allmydata/test/web/test_common.py +++ b/src/allmydata/test/web/test_common.py @@ -1,6 +1,16 @@ """ Tests for ``allmydata.web.common``. + +Ported to Python 3. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 import gc @@ -163,7 +173,7 @@ class RenderExceptionTests(SyncTestCase): BeautifulSoup(value, 'html5lib'), "meta", {"http-equiv": "refresh", - "content": "0;URL={}".format(loc.encode("ascii")), + "content": "0;URL={}".format(loc), }, ) # The assertion will raise if it has a problem, otherwise diff --git a/src/allmydata/test/web/test_grid.py b/src/allmydata/test/web/test_grid.py index 8f61781d4..ef2718df4 100644 --- a/src/allmydata/test/web/test_grid.py +++ b/src/allmydata/test/web/test_grid.py @@ -1,6 +1,17 @@ +""" +Ported to Python 3. +""" from __future__ import print_function +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals -import os.path, re, urllib +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + +import os.path, re +from urllib.parse import quote as url_quote import json from six.moves import StringIO @@ -37,7 +48,7 @@ DIR_HTML_TAG = '' class CompletelyUnhandledError(Exception): pass -class ErrorBoom(object, resource.Resource): +class ErrorBoom(resource.Resource, object): @render_exception def render(self, req): raise CompletelyUnhandledError("whoops") @@ -47,32 +58,38 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi def CHECK(self, ign, which, args, clientnum=0): fileurl = self.fileurls[which] url = fileurl + "?" + args - return self.GET(url, method="POST", clientnum=clientnum) + return self.GET_unicode(url, method="POST", clientnum=clientnum) + + def GET_unicode(self, *args, **kwargs): + """Send an HTTP request, but convert result to Unicode string.""" + d = GridTestMixin.GET(self, *args, **kwargs) + d.addCallback(str, "utf-8") + return d def test_filecheck(self): self.basedir = "web/Grid/filecheck" self.set_up_grid() c0 = self.g.clients[0] self.uris = {} - DATA = "data" * 100 - d = c0.upload(upload.Data(DATA, convergence="")) + DATA = b"data" * 100 + d = c0.upload(upload.Data(DATA, convergence=b"")) def _stash_uri(ur, which): self.uris[which] = ur.get_uri() d.addCallback(_stash_uri, "good") d.addCallback(lambda ign: - c0.upload(upload.Data(DATA+"1", convergence=""))) + c0.upload(upload.Data(DATA+b"1", convergence=b""))) d.addCallback(_stash_uri, "sick") d.addCallback(lambda ign: - c0.upload(upload.Data(DATA+"2", convergence=""))) + c0.upload(upload.Data(DATA+b"2", convergence=b""))) d.addCallback(_stash_uri, "dead") def _stash_mutable_uri(n, which): self.uris[which] = n.get_uri() - assert isinstance(self.uris[which], str) + assert isinstance(self.uris[which], bytes) d.addCallback(lambda ign: - c0.create_mutable_file(publish.MutableData(DATA+"3"))) + c0.create_mutable_file(publish.MutableData(DATA+b"3"))) d.addCallback(_stash_mutable_uri, "corrupt") d.addCallback(lambda ign: - c0.upload(upload.Data("literal", convergence=""))) + c0.upload(upload.Data(b"literal", convergence=b""))) d.addCallback(_stash_uri, "small") d.addCallback(lambda ign: c0.create_immutable_dirnode({})) d.addCallback(_stash_mutable_uri, "smalldir") @@ -80,7 +97,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi def _compute_fileurls(ignored): self.fileurls = {} for which in self.uris: - self.fileurls[which] = "uri/" + urllib.quote(self.uris[which]) + self.fileurls[which] = "uri/" + url_quote(self.uris[which]) d.addCallback(_compute_fileurls) def _clobber_shares(ignored): @@ -203,28 +220,28 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.set_up_grid() c0 = self.g.clients[0] self.uris = {} - DATA = "data" * 100 - d = c0.upload(upload.Data(DATA, convergence="")) + DATA = b"data" * 100 + d = c0.upload(upload.Data(DATA, convergence=b"")) def _stash_uri(ur, which): self.uris[which] = ur.get_uri() d.addCallback(_stash_uri, "good") d.addCallback(lambda ign: - c0.upload(upload.Data(DATA+"1", convergence=""))) + c0.upload(upload.Data(DATA+b"1", convergence=b""))) d.addCallback(_stash_uri, "sick") d.addCallback(lambda ign: - c0.upload(upload.Data(DATA+"2", convergence=""))) + c0.upload(upload.Data(DATA+b"2", convergence=b""))) d.addCallback(_stash_uri, "dead") def _stash_mutable_uri(n, which): self.uris[which] = n.get_uri() - assert isinstance(self.uris[which], str) + assert isinstance(self.uris[which], bytes) d.addCallback(lambda ign: - c0.create_mutable_file(publish.MutableData(DATA+"3"))) + c0.create_mutable_file(publish.MutableData(DATA+b"3"))) d.addCallback(_stash_mutable_uri, "corrupt") def _compute_fileurls(ignored): self.fileurls = {} for which in self.uris: - self.fileurls[which] = "uri/" + urllib.quote(self.uris[which]) + self.fileurls[which] = "uri/" + url_quote(self.uris[which]) d.addCallback(_compute_fileurls) def _clobber_shares(ignored): @@ -286,8 +303,8 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.set_up_grid() c0 = self.g.clients[0] self.uris = {} - DATA = "data" * 100 - d = c0.upload(upload.Data(DATA+"1", convergence="")) + DATA = b"data" * 100 + d = c0.upload(upload.Data(DATA+b"1", convergence=b"")) def _stash_uri(ur, which): self.uris[which] = ur.get_uri() d.addCallback(_stash_uri, "sick") @@ -295,7 +312,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi def _compute_fileurls(ignored): self.fileurls = {} for which in self.uris: - self.fileurls[which] = "uri/" + urllib.quote(self.uris[which]) + self.fileurls[which] = "uri/" + url_quote(self.uris[which]) d.addCallback(_compute_fileurls) def _clobber_shares(ignored): @@ -329,7 +346,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.fileurls = {} # the future cap format may contain slashes, which must be tolerated - expected_info_url = "uri/%s?t=info" % urllib.quote(unknown_rwcap, + expected_info_url = "uri/%s?t=info" % url_quote(unknown_rwcap, safe="") if immutable: @@ -343,8 +360,8 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi def _stash_root_and_create_file(n): self.rootnode = n - self.rooturl = "uri/" + urllib.quote(n.get_uri()) - self.rourl = "uri/" + urllib.quote(n.get_readonly_uri()) + self.rooturl = "uri/" + url_quote(n.get_uri()) + self.rourl = "uri/" + url_quote(n.get_readonly_uri()) if not immutable: return self.rootnode.set_node(name, future_node) d.addCallback(_stash_root_and_create_file) @@ -352,18 +369,19 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi # make sure directory listing tolerates unknown nodes d.addCallback(lambda ign: self.GET(self.rooturl)) def _check_directory_html(res, expected_type_suffix): - pattern = re.compile(r'\?%s[ \t\n\r]*' - '%s' % (expected_type_suffix, str(name)), + pattern = re.compile(br'\?%s[ \t\n\r]*' + b'%s' % ( + expected_type_suffix, name.encode("ascii")), re.DOTALL) self.failUnless(re.search(pattern, res), res) # find the More Info link for name, should be relative - mo = re.search(r'More Info', res) + mo = re.search(br'More Info', res) info_url = mo.group(1) - self.failUnlessReallyEqual(info_url, "%s?t=info" % (str(name),)) + self.failUnlessReallyEqual(info_url, b"%s?t=info" % (name.encode("ascii"),)) if immutable: - d.addCallback(_check_directory_html, "-IMM") + d.addCallback(_check_directory_html, b"-IMM") else: - d.addCallback(_check_directory_html, "") + d.addCallback(_check_directory_html, b"") d.addCallback(lambda ign: self.GET(self.rooturl+"?t=json")) def _check_directory_json(res, expect_rw_uri): @@ -383,7 +401,6 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(_check_directory_json, expect_rw_uri=not immutable) def _check_info(res, expect_rw_uri, expect_ro_uri): - self.failUnlessIn("Object Type: unknown", res) if expect_rw_uri: self.failUnlessIn(unknown_rwcap, res) if expect_ro_uri: @@ -393,6 +410,8 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.failUnlessIn(unknown_rocap, res) else: self.failIfIn(unknown_rocap, res) + res = str(res, "utf-8") + self.failUnlessIn("Object Type: unknown", res) self.failIfIn("Raw data as", res) self.failIfIn("Directory writecap", res) self.failIfIn("Checker Operations", res) @@ -404,7 +423,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(lambda ign: self.GET(expected_info_url)) d.addCallback(_check_info, expect_rw_uri=False, expect_ro_uri=False) - d.addCallback(lambda ign: self.GET("%s/%s?t=info" % (self.rooturl, str(name)))) + d.addCallback(lambda ign: self.GET("%s/%s?t=info" % (self.rooturl, name))) d.addCallback(_check_info, expect_rw_uri=False, expect_ro_uri=True) def _check_json(res, expect_rw_uri): @@ -436,9 +455,9 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi # or not future_node was immutable. d.addCallback(lambda ign: self.GET(self.rourl)) if immutable: - d.addCallback(_check_directory_html, "-IMM") + d.addCallback(_check_directory_html, b"-IMM") else: - d.addCallback(_check_directory_html, "-RO") + d.addCallback(_check_directory_html, b"-RO") d.addCallback(lambda ign: self.GET(self.rourl+"?t=json")) d.addCallback(_check_directory_json, expect_rw_uri=False) @@ -462,9 +481,9 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.uris = {} self.fileurls = {} - lonely_uri = "URI:LIT:n5xgk" # LIT for "one" - mut_write_uri = "URI:SSK:vfvcbdfbszyrsaxchgevhmmlii:euw4iw7bbnkrrwpzuburbhppuxhc3gwxv26f6imekhz7zyw2ojnq" - mut_read_uri = "URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q" + lonely_uri = b"URI:LIT:n5xgk" # LIT for "one" + mut_write_uri = b"URI:SSK:vfvcbdfbszyrsaxchgevhmmlii:euw4iw7bbnkrrwpzuburbhppuxhc3gwxv26f6imekhz7zyw2ojnq" + mut_read_uri = b"URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q" # This method tests mainly dirnode, but we'd have to duplicate code in order to # test the dirnode and web layers separately. @@ -507,10 +526,10 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi rep = str(dn) self.failUnlessIn("RO-IMM", rep) cap = dn.get_cap() - self.failUnlessIn("CHK", cap.to_string()) + self.failUnlessIn(b"CHK", cap.to_string()) self.cap = cap self.rootnode = dn - self.rooturl = "uri/" + urllib.quote(dn.get_uri()) + self.rooturl = "uri/" + url_quote(dn.get_uri()) return download_to_data(dn._node) d.addCallback(_created) @@ -526,7 +545,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi entry = entries[0] (name_utf8, ro_uri, rwcapdata, metadata_s), subpos = split_netstring(entry, 4) name = name_utf8.decode("utf-8") - self.failUnlessEqual(rwcapdata, "") + self.failUnlessEqual(rwcapdata, b"") self.failUnlessIn(name, kids) (expected_child, ign) = kids[name] self.failUnlessReallyEqual(ro_uri, expected_child.get_readonly_uri()) @@ -553,13 +572,13 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(lambda ign: self.GET(self.rooturl)) def _check_html(res): soup = BeautifulSoup(res, 'html5lib') - self.failIfIn("URI:SSK", res) + self.failIfIn(b"URI:SSK", res) found = False for td in soup.find_all(u"td"): if td.text != u"FILE": continue a = td.findNextSibling()(u"a")[0] - self.assertIn(urllib.quote(lonely_uri), a[u"href"]) + self.assertIn(url_quote(lonely_uri), a[u"href"]) self.assertEqual(u"lonely", a.text) self.assertEqual(a[u"rel"], [u"noreferrer"]) self.assertEqual(u"{}".format(len("one")), td.findNextSibling().findNextSibling().text) @@ -573,7 +592,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi if a.text == u"More Info" ) self.assertEqual(1, len(infos)) - self.assertTrue(infos[0].endswith(urllib.quote(lonely_uri) + "?t=info")) + self.assertTrue(infos[0].endswith(url_quote(lonely_uri) + "?t=info")) d.addCallback(_check_html) # ... and in JSON. @@ -596,12 +615,12 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi c0 = self.g.clients[0] self.uris = {} self.fileurls = {} - DATA = "data" * 100 + DATA = b"data" * 100 d = c0.create_dirnode() def _stash_root_and_create_file(n): self.rootnode = n - self.fileurls["root"] = "uri/" + urllib.quote(n.get_uri()) - return n.add_file(u"good", upload.Data(DATA, convergence="")) + self.fileurls["root"] = "uri/" + url_quote(n.get_uri()) + return n.add_file(u"good", upload.Data(DATA, convergence=b"")) d.addCallback(_stash_root_and_create_file) def _stash_uri(fn, which): self.uris[which] = fn.get_uri() @@ -609,13 +628,13 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(_stash_uri, "good") d.addCallback(lambda ign: self.rootnode.add_file(u"small", - upload.Data("literal", - convergence=""))) + upload.Data(b"literal", + convergence=b""))) d.addCallback(_stash_uri, "small") d.addCallback(lambda ign: self.rootnode.add_file(u"sick", - upload.Data(DATA+"1", - convergence=""))) + upload.Data(DATA+b"1", + convergence=b""))) d.addCallback(_stash_uri, "sick") # this tests that deep-check and stream-manifest will ignore @@ -695,13 +714,13 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(_stash_uri, "subdir") d.addCallback(lambda subdir_node: subdir_node.add_file(u"grandchild", - upload.Data(DATA+"2", - convergence=""))) + upload.Data(DATA+b"2", + convergence=b""))) d.addCallback(_stash_uri, "grandchild") d.addCallback(lambda ign: self.delete_shares_numbered(self.uris["subdir"], - range(1, 10))) + list(range(1, 10)))) # root # root/good @@ -770,30 +789,30 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi c0 = self.g.clients[0] self.uris = {} self.fileurls = {} - DATA = "data" * 100 + DATA = b"data" * 100 d = c0.create_dirnode() def _stash_root_and_create_file(n): self.rootnode = n - self.fileurls["root"] = "uri/" + urllib.quote(n.get_uri()) - return n.add_file(u"good", upload.Data(DATA, convergence="")) + self.fileurls["root"] = "uri/" + url_quote(n.get_uri()) + return n.add_file(u"good", upload.Data(DATA, convergence=b"")) d.addCallback(_stash_root_and_create_file) def _stash_uri(fn, which): self.uris[which] = fn.get_uri() d.addCallback(_stash_uri, "good") d.addCallback(lambda ign: self.rootnode.add_file(u"small", - upload.Data("literal", - convergence=""))) + upload.Data(b"literal", + convergence=b""))) d.addCallback(_stash_uri, "small") d.addCallback(lambda ign: self.rootnode.add_file(u"sick", - upload.Data(DATA+"1", - convergence=""))) + upload.Data(DATA+b"1", + convergence=b""))) d.addCallback(_stash_uri, "sick") #d.addCallback(lambda ign: # self.rootnode.add_file(u"dead", - # upload.Data(DATA+"2", - # convergence=""))) + # upload.Data(DATA+b"2", + # convergence=b""))) #d.addCallback(_stash_uri, "dead") #d.addCallback(lambda ign: c0.create_mutable_file("mutable")) @@ -888,25 +907,25 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.set_up_grid(num_clients=2, oneshare=True) c0 = self.g.clients[0] self.uris = {} - DATA = "data" * 100 - d = c0.upload(upload.Data(DATA, convergence="")) + DATA = b"data" * 100 + d = c0.upload(upload.Data(DATA, convergence=b"")) def _stash_uri(ur, which): self.uris[which] = ur.get_uri() d.addCallback(_stash_uri, "one") d.addCallback(lambda ign: - c0.upload(upload.Data(DATA+"1", convergence=""))) + c0.upload(upload.Data(DATA+b"1", convergence=b""))) d.addCallback(_stash_uri, "two") def _stash_mutable_uri(n, which): self.uris[which] = n.get_uri() - assert isinstance(self.uris[which], str) + assert isinstance(self.uris[which], bytes) d.addCallback(lambda ign: - c0.create_mutable_file(publish.MutableData(DATA+"2"))) + c0.create_mutable_file(publish.MutableData(DATA+b"2"))) d.addCallback(_stash_mutable_uri, "mutable") def _compute_fileurls(ignored): self.fileurls = {} for which in self.uris: - self.fileurls[which] = "uri/" + urllib.quote(self.uris[which]) + self.fileurls[which] = "uri/" + url_quote(self.uris[which]) d.addCallback(_compute_fileurls) d.addCallback(self._count_leases, "one") @@ -982,25 +1001,25 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi c0 = self.g.clients[0] self.uris = {} self.fileurls = {} - DATA = "data" * 100 + DATA = b"data" * 100 d = c0.create_dirnode() def _stash_root_and_create_file(n): self.rootnode = n self.uris["root"] = n.get_uri() - self.fileurls["root"] = "uri/" + urllib.quote(n.get_uri()) - return n.add_file(u"one", upload.Data(DATA, convergence="")) + self.fileurls["root"] = "uri/" + url_quote(n.get_uri()) + return n.add_file(u"one", upload.Data(DATA, convergence=b"")) d.addCallback(_stash_root_and_create_file) def _stash_uri(fn, which): self.uris[which] = fn.get_uri() d.addCallback(_stash_uri, "one") d.addCallback(lambda ign: self.rootnode.add_file(u"small", - upload.Data("literal", - convergence=""))) + upload.Data(b"literal", + convergence=b""))) d.addCallback(_stash_uri, "small") d.addCallback(lambda ign: - c0.create_mutable_file(publish.MutableData("mutable"))) + c0.create_mutable_file(publish.MutableData(b"mutable"))) d.addCallback(lambda fn: self.rootnode.set_node(u"mutable", fn)) d.addCallback(_stash_uri, "mutable") @@ -1051,36 +1070,36 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi c0 = self.g.clients[0] c0.encoding_params['happy'] = 2 self.fileurls = {} - DATA = "data" * 100 + DATA = b"data" * 100 d = c0.create_dirnode() def _stash_root(n): - self.fileurls["root"] = "uri/" + urllib.quote(n.get_uri()) + self.fileurls["root"] = "uri/" + url_quote(n.get_uri()) self.fileurls["imaginary"] = self.fileurls["root"] + "/imaginary" return n d.addCallback(_stash_root) - d.addCallback(lambda ign: c0.upload(upload.Data(DATA, convergence=""))) + d.addCallback(lambda ign: c0.upload(upload.Data(DATA, convergence=b""))) def _stash_bad(ur): - self.fileurls["1share"] = "uri/" + urllib.quote(ur.get_uri()) - self.delete_shares_numbered(ur.get_uri(), range(1,10)) + self.fileurls["1share"] = "uri/" + url_quote(ur.get_uri()) + self.delete_shares_numbered(ur.get_uri(), list(range(1,10))) u = uri.from_string(ur.get_uri()) u.key = testutil.flip_bit(u.key, 0) baduri = u.to_string() - self.fileurls["0shares"] = "uri/" + urllib.quote(baduri) + self.fileurls["0shares"] = "uri/" + url_quote(baduri) d.addCallback(_stash_bad) d.addCallback(lambda ign: c0.create_dirnode()) def _mangle_dirnode_1share(n): u = n.get_uri() - url = self.fileurls["dir-1share"] = "uri/" + urllib.quote(u) + url = self.fileurls["dir-1share"] = "uri/" + url_quote(u) self.fileurls["dir-1share-json"] = url + "?t=json" - self.delete_shares_numbered(u, range(1,10)) + self.delete_shares_numbered(u, list(range(1,10))) d.addCallback(_mangle_dirnode_1share) d.addCallback(lambda ign: c0.create_dirnode()) def _mangle_dirnode_0share(n): u = n.get_uri() - url = self.fileurls["dir-0share"] = "uri/" + urllib.quote(u) + url = self.fileurls["dir-0share"] = "uri/" + url_quote(u) self.fileurls["dir-0share-json"] = url + "?t=json" - self.delete_shares_numbered(u, range(0,10)) + self.delete_shares_numbered(u, list(range(0,10))) d.addCallback(_mangle_dirnode_0share) # NotEnoughSharesError should be reported sensibly, with a @@ -1092,6 +1111,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi 410, "Gone", "NoSharesError", self.GET, self.fileurls["0shares"])) def _check_zero_shares(body): + body = str(body, "utf-8") self.failIfIn("", body) body = " ".join(body.strip().split()) exp = ("NoSharesError: no shares could be found. " @@ -1100,7 +1120,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi "severe corruption. You should perform a filecheck on " "this object to learn more. The full error message is: " "no shares (need 3). Last failure: None") - self.failUnlessReallyEqual(exp, body) + self.assertEqual(exp, body) d.addCallback(_check_zero_shares) @@ -1109,6 +1129,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi 410, "Gone", "NotEnoughSharesError", self.GET, self.fileurls["1share"])) def _check_one_share(body): + body = str(body, "utf-8") self.failIfIn("", body) body = " ".join(body.strip().split()) msgbase = ("NotEnoughSharesError: This indicates that some " @@ -1133,10 +1154,11 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi 404, "Not Found", None, self.GET, self.fileurls["imaginary"])) def _missing_child(body): + body = str(body, "utf-8") self.failUnlessIn("No such child: imaginary", body) d.addCallback(_missing_child) - d.addCallback(lambda ignored: self.GET(self.fileurls["dir-0share"])) + d.addCallback(lambda ignored: self.GET_unicode(self.fileurls["dir-0share"])) def _check_0shares_dir_html(body): self.failUnlessIn(DIR_HTML_TAG, body) # we should see the regular page, but without the child table or @@ -1155,7 +1177,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.failUnlessIn("No upload forms: directory is unreadable", body) d.addCallback(_check_0shares_dir_html) - d.addCallback(lambda ignored: self.GET(self.fileurls["dir-1share"])) + d.addCallback(lambda ignored: self.GET_unicode(self.fileurls["dir-1share"])) def _check_1shares_dir_html(body): # at some point, we'll split UnrecoverableFileError into 0-shares # and some-shares like we did for immutable files (since there @@ -1182,6 +1204,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.GET, self.fileurls["dir-0share-json"])) def _check_unrecoverable_file(body): + body = str(body, "utf-8") self.failIfIn("", body) body = " ".join(body.strip().split()) exp = ("UnrecoverableFileError: the directory (or mutable file) " @@ -1209,7 +1232,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi # attach a webapi child that throws a random error, to test how it # gets rendered. w = c0.getServiceNamed("webish") - w.root.putChild("ERRORBOOM", ErrorBoom()) + w.root.putChild(b"ERRORBOOM", ErrorBoom()) # "Accept: */*" : should get a text/html stack trace # "Accept: text/plain" : should get a text/plain stack trace @@ -1222,6 +1245,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.GET, "ERRORBOOM", headers={"accept": "*/*"})) def _internal_error_html1(body): + body = str(body, "utf-8") self.failUnlessIn("", "expected HTML, not '%s'" % body) d.addCallback(_internal_error_html1) @@ -1231,6 +1255,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.GET, "ERRORBOOM", headers={"accept": "text/plain"})) def _internal_error_text2(body): + body = str(body, "utf-8") self.failIfIn("", body) self.failUnless(body.startswith("Traceback "), body) d.addCallback(_internal_error_text2) @@ -1242,6 +1267,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.GET, "ERRORBOOM", headers={"accept": CLI_accepts})) def _internal_error_text3(body): + body = str(body, "utf-8") self.failIfIn("", body) self.failUnless(body.startswith("Traceback "), body) d.addCallback(_internal_error_text3) @@ -1251,7 +1277,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi 500, "Internal Server Error", None, self.GET, "ERRORBOOM")) def _internal_error_html4(body): - self.failUnlessIn("", body) + self.failUnlessIn(b"", body) d.addCallback(_internal_error_html4) def _flush_errors(res): @@ -1269,12 +1295,12 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi c0 = self.g.clients[0] fn = c0.config.get_config_path("access.blacklist") self.uris = {} - DATA = "off-limits " * 50 + DATA = b"off-limits " * 50 - d = c0.upload(upload.Data(DATA, convergence="")) + d = c0.upload(upload.Data(DATA, convergence=b"")) def _stash_uri_and_create_dir(ur): self.uri = ur.get_uri() - self.url = "uri/"+self.uri + self.url = b"uri/"+self.uri u = uri.from_string_filenode(self.uri) self.si = u.get_storage_index() childnode = c0.create_node_from_uri(self.uri, None) @@ -1283,9 +1309,9 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi def _stash_dir(node): self.dir_node = node self.dir_uri = node.get_uri() - self.dir_url = "uri/"+self.dir_uri + self.dir_url = b"uri/"+self.dir_uri d.addCallback(_stash_dir) - d.addCallback(lambda ign: self.GET(self.dir_url, followRedirect=True)) + d.addCallback(lambda ign: self.GET_unicode(self.dir_url, followRedirect=True)) def _check_dir_html(body): self.failUnlessIn(DIR_HTML_TAG, body) self.failUnlessIn("blacklisted.txt", body) @@ -1298,7 +1324,7 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi f.write(" # this is a comment\n") f.write(" \n") f.write("\n") # also exercise blank lines - f.write("%s %s\n" % (base32.b2a(self.si), "off-limits to you")) + f.write("%s off-limits to you\n" % (str(base32.b2a(self.si), "ascii"),)) f.close() # clients should be checking the blacklist each time, so we don't # need to restart the client @@ -1309,14 +1335,14 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi self.GET, self.url)) # We should still be able to list the parent directory, in HTML... - d.addCallback(lambda ign: self.GET(self.dir_url, followRedirect=True)) + d.addCallback(lambda ign: self.GET_unicode(self.dir_url, followRedirect=True)) def _check_dir_html2(body): self.failUnlessIn(DIR_HTML_TAG, body) self.failUnlessIn("blacklisted.txt", body) d.addCallback(_check_dir_html2) # ... and in JSON (used by CLI). - d.addCallback(lambda ign: self.GET(self.dir_url+"?t=json", followRedirect=True)) + d.addCallback(lambda ign: self.GET(self.dir_url+b"?t=json", followRedirect=True)) def _check_dir_json(res): data = json.loads(res) self.failUnless(isinstance(data, list), data) @@ -1355,14 +1381,14 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(_add_dir) def _get_dircap(dn): self.dir_si_b32 = base32.b2a(dn.get_storage_index()) - self.dir_url_base = "uri/"+dn.get_write_uri() - self.dir_url_json1 = "uri/"+dn.get_write_uri()+"?t=json" - self.dir_url_json2 = "uri/"+dn.get_write_uri()+"?t=json" - self.dir_url_json_ro = "uri/"+dn.get_readonly_uri()+"?t=json" - self.child_url = "uri/"+dn.get_readonly_uri()+"/child" + self.dir_url_base = b"uri/"+dn.get_write_uri() + self.dir_url_json1 = b"uri/"+dn.get_write_uri()+b"?t=json" + self.dir_url_json2 = b"uri/"+dn.get_write_uri()+b"?t=json" + self.dir_url_json_ro = b"uri/"+dn.get_readonly_uri()+b"?t=json" + self.child_url = b"uri/"+dn.get_readonly_uri()+b"/child" d.addCallback(_get_dircap) d.addCallback(lambda ign: self.GET(self.dir_url_base, followRedirect=True)) - d.addCallback(lambda body: self.failUnlessIn(DIR_HTML_TAG, body)) + d.addCallback(lambda body: self.failUnlessIn(DIR_HTML_TAG, str(body, "utf-8"))) d.addCallback(lambda ign: self.GET(self.dir_url_json1)) d.addCallback(lambda res: json.loads(res)) # just check it decodes d.addCallback(lambda ign: self.GET(self.dir_url_json2)) @@ -1373,8 +1399,8 @@ class Grid(GridTestMixin, WebErrorMixin, ShouldFailMixin, testutil.ReallyEqualMi d.addCallback(lambda body: self.failUnlessEqual(DATA, body)) def _block_dir(ign): - f = open(fn, "w") - f.write("%s %s\n" % (self.dir_si_b32, "dir-off-limits to you")) + f = open(fn, "wb") + f.write(b"%s %s\n" % (self.dir_si_b32, b"dir-off-limits to you")) f.close() self.g.clients[0].blacklist.last_mtime -= 2.0 d.addCallback(_block_dir) diff --git a/src/allmydata/test/web/test_introducer.py b/src/allmydata/test/web/test_introducer.py index 929fba507..08d95bda9 100644 --- a/src/allmydata/test/web/test_introducer.py +++ b/src/allmydata/test/web/test_introducer.py @@ -1,3 +1,15 @@ +""" +Ported to Python 3. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + import json from os.path import join @@ -213,7 +225,7 @@ class IntroducerRootTests(unittest.TestCase): resource = IntroducerRoot(introducer_node) response = json.loads( self.successResultOf( - render(resource, {"t": [b"json"]}), + render(resource, {b"t": [b"json"]}), ), ) self.assertEqual( diff --git a/src/allmydata/test/web/test_logs.py b/src/allmydata/test/web/test_logs.py index 4895ed6f0..5d697f910 100644 --- a/src/allmydata/test/web/test_logs.py +++ b/src/allmydata/test/web/test_logs.py @@ -1,5 +1,7 @@ """ Tests for ``allmydata.web.logs``. + +Ported to Python 3. """ from __future__ import ( @@ -9,6 +11,19 @@ from __future__ import ( division, ) +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + +import json + +from twisted.trial import unittest +from twisted.internet.defer import inlineCallbacks + +from eliot import log_call + +from autobahn.twisted.testing import create_memory_agent, MemoryReactorClockResolver, create_pumper + from testtools.matchers import ( Equals, ) @@ -37,6 +52,7 @@ from ..common import ( from ...web.logs import ( create_log_resources, + TokenAuthenticatedWebSocketServerProtocol, ) class StreamingEliotLogsTests(SyncTestCase): @@ -57,3 +73,47 @@ class StreamingEliotLogsTests(SyncTestCase): self.client.get(b"http:///v1"), succeeded(has_response_code(Equals(OK))), ) + + +class TestStreamingLogs(unittest.TestCase): + """ + Test websocket streaming of logs + """ + + def setUp(self): + self.reactor = MemoryReactorClockResolver() + self.pumper = create_pumper() + self.agent = create_memory_agent(self.reactor, self.pumper, TokenAuthenticatedWebSocketServerProtocol) + return self.pumper.start() + + def tearDown(self): + return self.pumper.stop() + + @inlineCallbacks + def test_one_log(self): + """ + write a single Eliot log and see it streamed via websocket + """ + + proto = yield self.agent.open( + transport_config=u"ws://localhost:1234/ws", + options={}, + ) + + messages = [] + def got_message(msg, is_binary=False): + messages.append(json.loads(msg)) + proto.on("message", got_message) + + @log_call(action_type=u"test:cli:some-exciting-action") + def do_a_thing(): + pass + + do_a_thing() + + proto.transport.loseConnection() + yield proto.is_closed + + self.assertEqual(len(messages), 2) + self.assertEqual("started", messages[0]["action_status"]) + self.assertEqual("succeeded", messages[1]["action_status"]) diff --git a/src/allmydata/test/web/test_private.py b/src/allmydata/test/web/test_private.py index 27ddbcf78..b426b4d93 100644 --- a/src/allmydata/test/web/test_private.py +++ b/src/allmydata/test/web/test_private.py @@ -1,5 +1,7 @@ """ Tests for ``allmydata.web.private``. + +Ported to Python 3. """ from __future__ import ( @@ -9,6 +11,10 @@ from __future__ import ( division, ) +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + from testtools.matchers import ( Equals, ) @@ -56,6 +62,7 @@ class PrivacyTests(SyncTestCase): return super(PrivacyTests, self).setUp() def _authorization(self, scheme, value): + value = str(value, "utf-8") return Headers({ u"authorization": [u"{} {}".format(scheme, value)], }) @@ -90,7 +97,7 @@ class PrivacyTests(SyncTestCase): self.assertThat( self.client.head( b"http:///foo/bar", - headers=self._authorization(SCHEME, u"foo bar"), + headers=self._authorization(str(SCHEME, "utf-8"), b"foo bar"), ), succeeded(has_response_code(Equals(UNAUTHORIZED))), ) @@ -103,7 +110,7 @@ class PrivacyTests(SyncTestCase): self.assertThat( self.client.head( b"http:///foo/bar", - headers=self._authorization(SCHEME, self.token), + headers=self._authorization(str(SCHEME, "utf-8"), self.token), ), # It's a made up URL so we don't get a 200, either, but a 404. succeeded(has_response_code(Equals(NOT_FOUND))), diff --git a/src/allmydata/test/web/test_root.py b/src/allmydata/test/web/test_root.py index 0715c8102..ca3cc695d 100644 --- a/src/allmydata/test/web/test_root.py +++ b/src/allmydata/test/web/test_root.py @@ -1,6 +1,18 @@ +""" +Ported to Python 3. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + import time -from urllib import ( +from urllib.parse import ( quote, ) @@ -77,7 +89,7 @@ class RenderSlashUri(unittest.TestCase): ) self.assertEqual( response_body, - "Invalid capability", + b"Invalid capability", ) @@ -92,7 +104,7 @@ class RenderServiceRow(unittest.TestCase): ann = {"anonymous-storage-FURL": "pb://w2hqnbaa25yw4qgcvghl5psa3srpfgw3@tcp:127.0.0.1:51309/vucto2z4fxment3vfxbqecblbf6zyp6x", "permutation-seed-base32": "w2hqnbaa25yw4qgcvghl5psa3srpfgw3", } - srv = NativeStorageServer("server_id", ann, None, {}, EMPTY_CLIENT_CONFIG) + srv = NativeStorageServer(b"server_id", ann, None, {}, EMPTY_CLIENT_CONFIG) srv.get_connection_status = lambda: ConnectionStatus(False, "summary", {}, 0, 0) class FakeClient(_Client): @@ -103,7 +115,7 @@ class RenderServiceRow(unittest.TestCase): tub_maker=None, node_config=EMPTY_CLIENT_CONFIG, ) - self.storage_broker.test_add_server("test-srv", srv) + self.storage_broker.test_add_server(b"test-srv", srv) root = RootElement(FakeClient(), time.time) req = DummyRequest(b"") diff --git a/src/allmydata/test/web/test_status.py b/src/allmydata/test/web/test_status.py index 5685a3938..414925446 100644 --- a/src/allmydata/test/web/test_status.py +++ b/src/allmydata/test/web/test_status.py @@ -1,6 +1,16 @@ """ Tests for ```allmydata.web.status```. + +Ported to Python 3. """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 from bs4 import BeautifulSoup from twisted.web.template import flattenString @@ -143,12 +153,12 @@ class DownloadStatusElementTests(TrialTestCase): See if we can render the page almost fully. """ status = FakeDownloadStatus( - "si-1", 123, - ["s-1", "s-2", "s-3"], - {"s-1": "unknown problem"}, - {"s-1": [1], "s-2": [1,2], "s-3": [2,3]}, + b"si-1", 123, + [b"s-1", b"s-2", b"s-3"], + {b"s-1": "unknown problem"}, + {b"s-1": [1], b"s-2": [1,2], b"s-3": [2,3]}, {"fetch_per_server": - {"s-1": [1], "s-2": [2,3], "s-3": [3,2]}} + {b"s-1": [1], b"s-2": [2,3], b"s-3": [3,2]}} ) result = self._render_download_status_element(status) diff --git a/src/allmydata/test/web/test_util.py b/src/allmydata/test/web/test_util.py index 24f865ebc..5f4d6bb88 100644 --- a/src/allmydata/test/web/test_util.py +++ b/src/allmydata/test/web/test_util.py @@ -1,3 +1,15 @@ +""" +Ported to Python 3. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 + from twisted.trial import unittest from allmydata.web import status, common from ..common import ShouldFailMixin diff --git a/src/allmydata/test/web/test_web.py b/src/allmydata/test/web/test_web.py index 326569a26..cebe709c1 100644 --- a/src/allmydata/test/web/test_web.py +++ b/src/allmydata/test/web/test_web.py @@ -1,8 +1,19 @@ +""" +Ported to Python 3. +""" from __future__ import print_function +from __future__ import absolute_import +from __future__ import division +from __future__ import unicode_literals -import os.path, re, urllib, time -import json +from future.utils import PY2 +if PY2: + from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 +from six import ensure_binary + +import os.path, re, time import treq +from urllib.parse import quote as urlquote, unquote as urlunquote from bs4 import BeautifulSoup @@ -23,7 +34,7 @@ from allmydata.immutable.downloader.status import DownloadStatus from allmydata.dirnode import DirectoryNode from allmydata.nodemaker import NodeMaker from allmydata.web.common import MultiFormatResource -from allmydata.util import fileutil, base32, hashutil +from allmydata.util import fileutil, base32, hashutil, jsonbytes as json from allmydata.util.consumer import download_to_data from allmydata.util.encodingutil import to_bytes from ...util.connection_status import ConnectionStatus @@ -90,7 +101,7 @@ class FakeNodeMaker(NodeMaker): return FakeMutableFileNode(None, None, self.encoding_params, None, self.all_contents).init_from_cap(cap) - def create_mutable_file(self, contents="", keysize=None, + def create_mutable_file(self, contents=b"", keysize=None, version=SDMF_VERSION): n = FakeMutableFileNode(None, None, self.encoding_params, None, self.all_contents) @@ -105,7 +116,7 @@ class FakeUploader(service.Service): d = uploadable.get_size() d.addCallback(lambda size: uploadable.read(size)) def _got_data(datav): - data = "".join(datav) + data = b"".join(datav) n = create_chk_filenode(data, self.all_contents) ur = upload.UploadResults(file_size=len(data), ciphertext_fetched=0, @@ -115,8 +126,8 @@ class FakeUploader(service.Service): servermap={}, timings={}, uri_extension_data={}, - uri_extension_hash="fake", - verifycapstr="fakevcap") + uri_extension_hash=b"fake", + verifycapstr=b"fakevcap") ur.set_uri(n.get_uri()) return ur d.addCallback(_got_data) @@ -127,12 +138,12 @@ class FakeUploader(service.Service): def build_one_ds(): - ds = DownloadStatus("storage_index", 1234) + ds = DownloadStatus(b"storage_index", 1234) now = time.time() - serverA = StubServer(hashutil.tagged_hash("foo", "serverid_a")[:20]) - serverB = StubServer(hashutil.tagged_hash("foo", "serverid_b")[:20]) - storage_index = hashutil.storage_index_hash("SI") + serverA = StubServer(hashutil.tagged_hash(b"foo", b"serverid_a")[:20]) + serverB = StubServer(hashutil.tagged_hash(b"foo", b"serverid_b")[:20]) + storage_index = hashutil.storage_index_hash(b"SI") e0 = ds.add_segment_request(0, now) e0.activate(now+0.5) e0.deliver(now+1, 0, 100, 0.5) # when, start,len, decodetime @@ -189,7 +200,7 @@ class FakeHistory(object): def list_all_helper_statuses(self): return [] -class FakeDisplayableServer(StubServer): +class FakeDisplayableServer(StubServer): # type: ignore # tahoe-lafs/ticket/3573 def __init__(self, serverid, nickname, connected, last_connect_time, last_loss_time, last_rx_time): StubServer.__init__(self, serverid) @@ -210,7 +221,7 @@ class FakeDisplayableServer(StubServer): "application-version": "1.0" } def get_permutation_seed(self): - return "" + return b"" def get_announcement(self): return self.announcement def get_nickname(self): @@ -255,33 +266,33 @@ class FakeStorageServer(service.MultiService): def on_status_changed(self, cb): cb(self) -class FakeClient(_Client): +class FakeClient(_Client): # type: ignore # tahoe-lafs/ticket/3573 def __init__(self): # don't upcall to Client.__init__, since we only want to initialize a # minimal subset service.MultiService.__init__(self) self.all_contents = {} - self.nodeid = "fake_nodeid" + self.nodeid = b"fake_nodeid" self.nickname = u"fake_nickname \u263A" self.introducer_furls = [] self.introducer_clients = [] self.stats_provider = FakeStatsProvider() - self._secret_holder = SecretHolder("lease secret", "convergence secret") + self._secret_holder = SecretHolder(b"lease secret", b"convergence secret") self.helper = None - self.convergence = "some random string" + self.convergence = b"some random string" self.storage_broker = StorageFarmBroker( permute_peers=True, tub_maker=None, node_config=EMPTY_CLIENT_CONFIG, ) # fake knowledge of another server - self.storage_broker.test_add_server("other_nodeid", + self.storage_broker.test_add_server(b"other_nodeid", FakeDisplayableServer( - serverid="other_nodeid", nickname=u"other_nickname \u263B", connected = True, + serverid=b"other_nodeid", nickname=u"other_nickname \u263B", connected = True, last_connect_time = 10, last_loss_time = 20, last_rx_time = 30)) - self.storage_broker.test_add_server("disconnected_nodeid", + self.storage_broker.test_add_server(b"disconnected_nodeid", FakeDisplayableServer( - serverid="disconnected_nodeid", nickname=u"disconnected_nickname \u263B", connected = False, + serverid=b"disconnected_nodeid", nickname=u"disconnected_nickname \u263B", connected = False, last_connect_time = None, last_loss_time = 25, last_rx_time = 35)) self.introducer_client = None self.history = FakeHistory() @@ -297,12 +308,12 @@ class FakeClient(_Client): self.addService(FakeStorageServer(self.nodeid, self.nickname)) def get_long_nodeid(self): - return "v0-nodeid" + return b"v0-nodeid" def get_long_tubid(self): - return "tubid" + return u"tubid" def get_auth_token(self): - return 'a fake debug auth token' + return b'a fake debug auth token' def startService(self): return service.MultiService.startService(self) @@ -340,7 +351,7 @@ class WebMixin(TimezoneMixin): def _then(res): self.public_root = res[0][1] assert interfaces.IDirectoryNode.providedBy(self.public_root), res - self.public_url = "/uri/" + self.public_root.get_uri() + self.public_url = "/uri/" + str(self.public_root.get_uri(), "ascii") self.private_root = res[1][1] foo = res[2][1] @@ -365,7 +376,7 @@ class WebMixin(TimezoneMixin): # mdmf self.QUUX_CONTENTS, n, self._quux_txt_uri, self._quux_txt_readonly_uri = self.makefile_mutable(0, mdmf=True) - assert self._quux_txt_uri.startswith("URI:MDMF") + assert self._quux_txt_uri.startswith(b"URI:MDMF") foo.set_uri(u"quux.txt", self._quux_txt_uri, self._quux_txt_readonly_uri) foo.set_uri(u"empty", res[3][1].get_uri(), @@ -382,7 +393,7 @@ class WebMixin(TimezoneMixin): # filenode to test for html encoding issues self._htmlname_unicode = u"<&weirdly'named\"file>>>_