mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-20 21:43:09 +00:00
Merge remote-tracking branch 'origin/master' into 2928.remote-allocate_tcp_port-test_node.py
This commit is contained in:
commit
7ad55e0fba
@ -14,44 +14,73 @@ version: 2.1
|
|||||||
workflows:
|
workflows:
|
||||||
ci:
|
ci:
|
||||||
jobs:
|
jobs:
|
||||||
# Platforms
|
# Start with jobs testing various platforms.
|
||||||
- "debian-9"
|
|
||||||
|
# Every job that pulls a Docker image from Docker Hub needs to provide
|
||||||
|
# credentials for that pull operation to avoid being subjected to
|
||||||
|
# unauthenticated pull limits shared across all of CircleCI. Use this
|
||||||
|
# first job to define a yaml anchor that can be used to supply a
|
||||||
|
# CircleCI job context which makes Docker Hub credentials available in
|
||||||
|
# the environment.
|
||||||
|
#
|
||||||
|
# Contexts are managed in the CircleCI web interface:
|
||||||
|
#
|
||||||
|
# https://app.circleci.com/settings/organization/github/tahoe-lafs/contexts
|
||||||
|
- "debian-9": &DOCKERHUB_CONTEXT
|
||||||
|
context: "dockerhub-auth"
|
||||||
|
|
||||||
- "debian-8":
|
- "debian-8":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
requires:
|
requires:
|
||||||
- "debian-9"
|
- "debian-9"
|
||||||
|
|
||||||
- "ubuntu-20-04"
|
- "ubuntu-20-04":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "ubuntu-18-04":
|
- "ubuntu-18-04":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
requires:
|
requires:
|
||||||
- "ubuntu-20-04"
|
- "ubuntu-20-04"
|
||||||
- "ubuntu-16-04":
|
- "ubuntu-16-04":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
requires:
|
requires:
|
||||||
- "ubuntu-20-04"
|
- "ubuntu-20-04"
|
||||||
|
|
||||||
- "fedora-29"
|
- "fedora-29":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "fedora-28":
|
- "fedora-28":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
requires:
|
requires:
|
||||||
- "fedora-29"
|
- "fedora-29"
|
||||||
|
|
||||||
- "centos-8"
|
- "centos-8":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
- "nixos-19-09"
|
- "nixos-19-09":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
# Test against PyPy 2.7
|
# Test against PyPy 2.7
|
||||||
- "pypy27-buster"
|
- "pypy27-buster":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
# Just one Python 3.6 configuration while the port is in-progress.
|
# Just one Python 3.6 configuration while the port is in-progress.
|
||||||
- "python36"
|
- "python36":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
# Other assorted tasks and configurations
|
# Other assorted tasks and configurations
|
||||||
- "lint"
|
- "lint":
|
||||||
- "pyinstaller"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "deprecations"
|
- "pyinstaller":
|
||||||
- "c-locale"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "deprecations":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "c-locale":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
# Any locale other than C or UTF-8.
|
# Any locale other than C or UTF-8.
|
||||||
- "another-locale"
|
- "another-locale":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
- "integration":
|
- "integration":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
requires:
|
requires:
|
||||||
# If the unit test suite doesn't pass, don't bother running the
|
# If the unit test suite doesn't pass, don't bother running the
|
||||||
# integration tests.
|
# integration tests.
|
||||||
@ -59,7 +88,8 @@ workflows:
|
|||||||
|
|
||||||
# Generate the underlying data for a visualization to aid with Python 3
|
# Generate the underlying data for a visualization to aid with Python 3
|
||||||
# porting.
|
# porting.
|
||||||
- "build-porting-depgraph"
|
- "build-porting-depgraph":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
images:
|
images:
|
||||||
# Build the Docker images used by the ci jobs. This makes the ci jobs
|
# Build the Docker images used by the ci jobs. This makes the ci jobs
|
||||||
@ -74,22 +104,55 @@ workflows:
|
|||||||
- "master"
|
- "master"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
- "build-image-debian-8"
|
- "build-image-debian-8":
|
||||||
- "build-image-debian-9"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "build-image-ubuntu-16-04"
|
- "build-image-debian-9":
|
||||||
- "build-image-ubuntu-18-04"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "build-image-ubuntu-20-04"
|
- "build-image-ubuntu-16-04":
|
||||||
- "build-image-fedora-28"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "build-image-fedora-29"
|
- "build-image-ubuntu-18-04":
|
||||||
- "build-image-centos-8"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
- "build-image-pypy27-buster"
|
- "build-image-ubuntu-20-04":
|
||||||
- "build-image-python36-ubuntu"
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-fedora-28":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-fedora-29":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-centos-8":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-pypy27-buster":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
- "build-image-python36-ubuntu":
|
||||||
|
<<: *DOCKERHUB_CONTEXT
|
||||||
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
dockerhub-auth-template:
|
||||||
|
# This isn't a real job. It doesn't get scheduled as part of any
|
||||||
|
# workflow. Instead, it's just a place we can hang a yaml anchor to
|
||||||
|
# finish the Docker Hub authentication configuration. Workflow jobs using
|
||||||
|
# the DOCKERHUB_CONTEXT anchor will have access to the environment
|
||||||
|
# variables used here. These variables will allow the Docker Hub image
|
||||||
|
# pull to be authenticated and hopefully avoid hitting and rate limits.
|
||||||
|
docker: &DOCKERHUB_AUTH
|
||||||
|
- image: "null"
|
||||||
|
auth:
|
||||||
|
username: $DOCKERHUB_USERNAME
|
||||||
|
password: $DOCKERHUB_PASSWORD
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- run:
|
||||||
|
name: "CircleCI YAML schema conformity"
|
||||||
|
command: |
|
||||||
|
# This isn't a real command. We have to have something in this
|
||||||
|
# space, though, or the CircleCI yaml schema validator gets angry.
|
||||||
|
# Since this job is never scheduled this step is never run so the
|
||||||
|
# actual value here is irrelevant.
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
docker:
|
docker:
|
||||||
- image: "circleci/python:2"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "circleci/python:2"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- "checkout"
|
- "checkout"
|
||||||
@ -106,7 +169,8 @@ jobs:
|
|||||||
|
|
||||||
pyinstaller:
|
pyinstaller:
|
||||||
docker:
|
docker:
|
||||||
- image: "circleci/python:2"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "circleci/python:2"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- "checkout"
|
- "checkout"
|
||||||
@ -131,7 +195,8 @@ jobs:
|
|||||||
|
|
||||||
debian-9: &DEBIAN
|
debian-9: &DEBIAN
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/debian:9-py2.7"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/debian:9-py2.7"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
environment: &UTF_8_ENVIRONMENT
|
environment: &UTF_8_ENVIRONMENT
|
||||||
@ -212,14 +277,16 @@ jobs:
|
|||||||
debian-8:
|
debian-8:
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/debian:8-py2.7"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/debian:8-py2.7"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
pypy27-buster:
|
pypy27-buster:
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/pypy:buster-py2"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/pypy:buster-py2"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
@ -280,21 +347,24 @@ jobs:
|
|||||||
ubuntu-16-04:
|
ubuntu-16-04:
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/ubuntu:16.04-py2.7"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/ubuntu:16.04-py2.7"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
ubuntu-18-04: &UBUNTU_18_04
|
ubuntu-18-04: &UBUNTU_18_04
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/ubuntu:18.04-py2.7"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/ubuntu:18.04-py2.7"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
python36:
|
python36:
|
||||||
<<: *UBUNTU_18_04
|
<<: *UBUNTU_18_04
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/ubuntu:18.04-py3"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/ubuntu:18.04-py3"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
@ -309,13 +379,15 @@ jobs:
|
|||||||
ubuntu-20-04:
|
ubuntu-20-04:
|
||||||
<<: *DEBIAN
|
<<: *DEBIAN
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/ubuntu:20.04"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/ubuntu:20.04"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
centos-8: &RHEL_DERIV
|
centos-8: &RHEL_DERIV
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/centos:8-py2"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/centos:8-py2"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
environment: *UTF_8_ENVIRONMENT
|
environment: *UTF_8_ENVIRONMENT
|
||||||
@ -337,21 +409,24 @@ jobs:
|
|||||||
fedora-28:
|
fedora-28:
|
||||||
<<: *RHEL_DERIV
|
<<: *RHEL_DERIV
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/fedora:28-py"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/fedora:28-py"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
fedora-29:
|
fedora-29:
|
||||||
<<: *RHEL_DERIV
|
<<: *RHEL_DERIV
|
||||||
docker:
|
docker:
|
||||||
- image: "tahoelafsci/fedora:29-py"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "tahoelafsci/fedora:29-py"
|
||||||
user: "nobody"
|
user: "nobody"
|
||||||
|
|
||||||
|
|
||||||
nixos-19-09:
|
nixos-19-09:
|
||||||
docker:
|
docker:
|
||||||
# Run in a highly Nix-capable environment.
|
# Run in a highly Nix-capable environment.
|
||||||
- image: "nixorg/nix:circleci"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "nixorg/nix:circleci"
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
NIX_PATH: "nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.09-small.tar.gz"
|
NIX_PATH: "nixpkgs=https://github.com/NixOS/nixpkgs-channels/archive/nixos-19.09-small.tar.gz"
|
||||||
@ -408,7 +483,8 @@ jobs:
|
|||||||
#
|
#
|
||||||
# https://circleci.com/blog/how-to-build-a-docker-image-on-circleci-2-0/
|
# https://circleci.com/blog/how-to-build-a-docker-image-on-circleci-2-0/
|
||||||
docker:
|
docker:
|
||||||
- image: "docker:17.05.0-ce-git"
|
- <<: *DOCKERHUB_AUTH
|
||||||
|
image: "docker:17.05.0-ce-git"
|
||||||
|
|
||||||
environment:
|
environment:
|
||||||
DISTRO: "tahoelafsci/<DISTRO>:foo-py2"
|
DISTRO: "tahoelafsci/<DISTRO>:foo-py2"
|
||||||
@ -418,47 +494,10 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- "checkout"
|
- "checkout"
|
||||||
- "setup_remote_docker"
|
- "setup_remote_docker"
|
||||||
- run:
|
|
||||||
name: "Get openssl"
|
|
||||||
command: |
|
|
||||||
apk add --no-cache openssl
|
|
||||||
- run:
|
|
||||||
name: "Get Dockerhub secrets"
|
|
||||||
command: |
|
|
||||||
# If you create an encryption key like this:
|
|
||||||
#
|
|
||||||
# openssl enc -aes-256-cbc -k secret -P -md sha256
|
|
||||||
|
|
||||||
# From the output that looks like:
|
|
||||||
#
|
|
||||||
# salt=...
|
|
||||||
# key=...
|
|
||||||
# iv =...
|
|
||||||
#
|
|
||||||
# extract just the value for ``key``.
|
|
||||||
|
|
||||||
# then you can re-generate ``secret-env-cipher`` locally using the
|
|
||||||
# command:
|
|
||||||
#
|
|
||||||
# openssl aes-256-cbc -e -md sha256 -in secret-env-plain -out .circleci/secret-env-cipher -pass env:KEY
|
|
||||||
#
|
|
||||||
# Make sure the key is set as the KEY environment variable in the
|
|
||||||
# CircleCI web interface. You can do this by visiting
|
|
||||||
# <https://circleci.com/gh/tahoe-lafs/tahoe-lafs/edit#env-vars>
|
|
||||||
# after logging in to CircleCI with an account in the tahoe-lafs
|
|
||||||
# CircleCI team.
|
|
||||||
#
|
|
||||||
# Then you can recover the environment plaintext (for example, to
|
|
||||||
# change and re-encrypt it) like just like CircleCI recovers it
|
|
||||||
# here:
|
|
||||||
#
|
|
||||||
openssl aes-256-cbc -d -md sha256 -in .circleci/secret-env-cipher -pass env:KEY >> ~/.env
|
|
||||||
- run:
|
- run:
|
||||||
name: "Log in to Dockerhub"
|
name: "Log in to Dockerhub"
|
||||||
command: |
|
command: |
|
||||||
. ~/.env
|
docker login -u ${DOCKERHUB_USERNAME} -p ${DOCKERHUB_PASSWORD}
|
||||||
# TAHOELAFSCI_PASSWORD come from the secret env.
|
|
||||||
docker login -u tahoelafsci -p ${TAHOELAFSCI_PASSWORD}
|
|
||||||
- run:
|
- run:
|
||||||
name: "Build image"
|
name: "Build image"
|
||||||
command: |
|
command: |
|
||||||
|
@ -1 +0,0 @@
|
|||||||
Salted__ •GPÁøÊ)|!÷[©U[‡ûvSÚ,F¿–m:ö š~ÓY[Uú_¸Fx×’¤Ÿ%<25>“4l×Ö»Š8¼œ¹„1öø‰/lƒÌ`nÆ^·Z]óqš¬æ¢&ø°÷£Ý‚‚ß%T¡n
|
|
@ -3,13 +3,7 @@ repos:
|
|||||||
hooks:
|
hooks:
|
||||||
- id: codechecks
|
- id: codechecks
|
||||||
name: codechecks
|
name: codechecks
|
||||||
stages: ["commit"]
|
stages: ["push"]
|
||||||
entry: "tox -e codechecks"
|
entry: "tox -e codechecks"
|
||||||
language: system
|
language: system
|
||||||
pass_filenames: false
|
pass_filenames: false
|
||||||
- id: test
|
|
||||||
name: test
|
|
||||||
stages: ["push"]
|
|
||||||
entry: "make test"
|
|
||||||
language: system
|
|
||||||
pass_filenames: false
|
|
||||||
|
0
3479.minor
Normal file
0
3479.minor
Normal file
@ -64,3 +64,9 @@ Peter Secor
|
|||||||
Shawn Willden
|
Shawn Willden
|
||||||
|
|
||||||
Terrell Russell
|
Terrell Russell
|
||||||
|
|
||||||
|
Jean-Paul Calderone
|
||||||
|
|
||||||
|
meejah
|
||||||
|
|
||||||
|
Sajith Sasidharan
|
||||||
|
@ -15,7 +15,6 @@ from foolscap.furl import (
|
|||||||
from eliot import (
|
from eliot import (
|
||||||
to_file,
|
to_file,
|
||||||
log_call,
|
log_call,
|
||||||
start_action,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
from twisted.python.procutils import which
|
from twisted.python.procutils import which
|
||||||
@ -34,7 +33,6 @@ from util import (
|
|||||||
_DumpOutputProtocol,
|
_DumpOutputProtocol,
|
||||||
_ProcessExitedProtocol,
|
_ProcessExitedProtocol,
|
||||||
_create_node,
|
_create_node,
|
||||||
_run_node,
|
|
||||||
_cleanup_tahoe_process,
|
_cleanup_tahoe_process,
|
||||||
_tahoe_runner_optional_coverage,
|
_tahoe_runner_optional_coverage,
|
||||||
await_client_ready,
|
await_client_ready,
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
import sys
|
import sys
|
||||||
from os.path import join
|
from os.path import join
|
||||||
|
|
||||||
from twisted.internet import task
|
|
||||||
from twisted.internet.error import ProcessTerminated
|
from twisted.internet.error import ProcessTerminated
|
||||||
|
|
||||||
import util
|
import util
|
||||||
|
@ -1,15 +1,8 @@
|
|||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import time
|
from os import mkdir
|
||||||
import shutil
|
from os.path import join
|
||||||
from os import mkdir, unlink, listdir
|
|
||||||
from os.path import join, exists
|
|
||||||
from six.moves import StringIO
|
|
||||||
|
|
||||||
from twisted.internet.protocol import ProcessProtocol
|
|
||||||
from twisted.internet.error import ProcessExitedAlready, ProcessDone
|
|
||||||
from twisted.internet.defer import inlineCallbacks, Deferred
|
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import pytest_twisted
|
import pytest_twisted
|
||||||
|
@ -9,20 +9,15 @@ WebAPI *should* do in every situation. It's not clear the latter
|
|||||||
exists anywhere, however.
|
exists anywhere, however.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import sys
|
|
||||||
import time
|
import time
|
||||||
import shutil
|
|
||||||
import json
|
import json
|
||||||
import urllib2
|
import urllib2
|
||||||
from os import mkdir, unlink, utime
|
|
||||||
from os.path import join, exists, getmtime
|
|
||||||
|
|
||||||
import allmydata.uri
|
import allmydata.uri
|
||||||
|
|
||||||
import util
|
import util
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
import pytest_twisted
|
|
||||||
import html5lib
|
import html5lib
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
@ -265,7 +260,8 @@ def test_directory_deep_check(alice):
|
|||||||
dircap_url,
|
dircap_url,
|
||||||
params={u"t": u"json"},
|
params={u"t": u"json"},
|
||||||
)
|
)
|
||||||
dir_meta = json.loads(resp.content)
|
# Just verify it is valid JSON.
|
||||||
|
json.loads(resp.content)
|
||||||
|
|
||||||
# upload a file of pangrams into the directory
|
# upload a file of pangrams into the directory
|
||||||
FILE_CONTENTS = u"Sphinx of black quartz, judge my vow.\n" * (2048*10)
|
FILE_CONTENTS = u"Sphinx of black quartz, judge my vow.\n" * (2048*10)
|
||||||
|
0
newsfragments/3283.minor
Normal file
0
newsfragments/3283.minor
Normal file
0
newsfragments/3466.minor
Normal file
0
newsfragments/3466.minor
Normal file
0
newsfragments/3468.minor
Normal file
0
newsfragments/3468.minor
Normal file
0
newsfragments/3483.minor
Normal file
0
newsfragments/3483.minor
Normal file
0
newsfragments/3485.minor
Normal file
0
newsfragments/3485.minor
Normal file
1
newsfragments/3486.installation
Normal file
1
newsfragments/3486.installation
Normal file
@ -0,0 +1 @@
|
|||||||
|
Tahoe-LAFS now requires the `netifaces` Python package and no longer requires the external `ip`, `ifconfig`, or `route.exe` executables.
|
0
newsfragments/3488.minor
Normal file
0
newsfragments/3488.minor
Normal file
0
newsfragments/3490.minor
Normal file
0
newsfragments/3490.minor
Normal file
@ -1,10 +1,10 @@
|
|||||||
{ fetchFromGitHub, lib
|
{ fetchFromGitHub, lib
|
||||||
, nettools, python
|
, python
|
||||||
, twisted, foolscap, zfec
|
, twisted, foolscap, zfec
|
||||||
, setuptools, setuptoolsTrial, pyasn1, zope_interface
|
, setuptools, setuptoolsTrial, pyasn1, zope_interface
|
||||||
, service-identity, pyyaml, magic-wormhole, treq, appdirs
|
, service-identity, pyyaml, magic-wormhole, treq, appdirs
|
||||||
, beautifulsoup4, eliot, autobahn, cryptography
|
, beautifulsoup4, eliot, autobahn, cryptography, netifaces
|
||||||
, html5lib, pyutil, distro
|
, html5lib, pyutil, distro, configparser
|
||||||
}:
|
}:
|
||||||
python.pkgs.buildPythonPackage rec {
|
python.pkgs.buildPythonPackage rec {
|
||||||
version = "1.14.0.dev";
|
version = "1.14.0.dev";
|
||||||
@ -41,16 +41,12 @@ python.pkgs.buildPythonPackage rec {
|
|||||||
'';
|
'';
|
||||||
|
|
||||||
|
|
||||||
propagatedNativeBuildInputs = [
|
|
||||||
nettools
|
|
||||||
];
|
|
||||||
|
|
||||||
propagatedBuildInputs = with python.pkgs; [
|
propagatedBuildInputs = with python.pkgs; [
|
||||||
twisted foolscap zfec appdirs
|
twisted foolscap zfec appdirs
|
||||||
setuptoolsTrial pyasn1 zope_interface
|
setuptoolsTrial pyasn1 zope_interface
|
||||||
service-identity pyyaml magic-wormhole treq
|
service-identity pyyaml magic-wormhole treq
|
||||||
eliot autobahn cryptography setuptools
|
eliot autobahn cryptography netifaces setuptools
|
||||||
future pyutil distro
|
future pyutil distro configparser
|
||||||
];
|
];
|
||||||
|
|
||||||
checkInputs = with python.pkgs; [
|
checkInputs = with python.pkgs; [
|
||||||
|
6
setup.py
6
setup.py
@ -126,11 +126,17 @@ install_requires = [
|
|||||||
# Support for Python 3 transition
|
# Support for Python 3 transition
|
||||||
"future >= 0.18.2",
|
"future >= 0.18.2",
|
||||||
|
|
||||||
|
# Discover local network configuration
|
||||||
|
"netifaces",
|
||||||
|
|
||||||
# Utility code:
|
# Utility code:
|
||||||
"pyutil >= 3.3.0",
|
"pyutil >= 3.3.0",
|
||||||
|
|
||||||
# Linux distribution detection:
|
# Linux distribution detection:
|
||||||
"distro >= 1.4.0",
|
"distro >= 1.4.0",
|
||||||
|
|
||||||
|
# Backported configparser for Python 2:
|
||||||
|
"configparser ; python_version < '3.0'",
|
||||||
]
|
]
|
||||||
|
|
||||||
setup_requires = [
|
setup_requires = [
|
||||||
|
@ -2,10 +2,9 @@ import os, stat, time, weakref
|
|||||||
from base64 import urlsafe_b64encode
|
from base64 import urlsafe_b64encode
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from errno import ENOENT, EPERM
|
from errno import ENOENT, EPERM
|
||||||
try:
|
|
||||||
from ConfigParser import NoSectionError
|
# On Python 2 this will be the backported package:
|
||||||
except ImportError:
|
from configparser import NoSectionError
|
||||||
from configparser import NoSectionError
|
|
||||||
|
|
||||||
from foolscap.furl import (
|
from foolscap.furl import (
|
||||||
decode_furl,
|
decode_furl,
|
||||||
@ -35,8 +34,7 @@ from allmydata.util import (
|
|||||||
hashutil, base32, pollmixin, log, idlib,
|
hashutil, base32, pollmixin, log, idlib,
|
||||||
yamlutil, configutil,
|
yamlutil, configutil,
|
||||||
)
|
)
|
||||||
from allmydata.util.encodingutil import (get_filesystem_encoding,
|
from allmydata.util.encodingutil import get_filesystem_encoding
|
||||||
from_utf8_or_none)
|
|
||||||
from allmydata.util.abbreviate import parse_abbreviated_size
|
from allmydata.util.abbreviate import parse_abbreviated_size
|
||||||
from allmydata.util.time_format import parse_duration, parse_date
|
from allmydata.util.time_format import parse_duration, parse_date
|
||||||
from allmydata.util.i2p_provider import create as create_i2p_provider
|
from allmydata.util.i2p_provider import create as create_i2p_provider
|
||||||
@ -628,7 +626,7 @@ def storage_enabled(config):
|
|||||||
|
|
||||||
:return bool: ``True`` if storage is enabled, ``False`` otherwise.
|
:return bool: ``True`` if storage is enabled, ``False`` otherwise.
|
||||||
"""
|
"""
|
||||||
return config.get_config(b"storage", b"enabled", True, boolean=True)
|
return config.get_config("storage", "enabled", True, boolean=True)
|
||||||
|
|
||||||
|
|
||||||
def anonymous_storage_enabled(config):
|
def anonymous_storage_enabled(config):
|
||||||
@ -642,7 +640,7 @@ def anonymous_storage_enabled(config):
|
|||||||
"""
|
"""
|
||||||
return (
|
return (
|
||||||
storage_enabled(config) and
|
storage_enabled(config) and
|
||||||
config.get_config(b"storage", b"anonymous", True, boolean=True)
|
config.get_config("storage", "anonymous", True, boolean=True)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -719,6 +717,9 @@ class _Client(node.Node, pollmixin.PollMixin):
|
|||||||
|
|
||||||
def init_stats_provider(self):
|
def init_stats_provider(self):
|
||||||
gatherer_furl = self.config.get_config("client", "stats_gatherer.furl", None)
|
gatherer_furl = self.config.get_config("client", "stats_gatherer.furl", None)
|
||||||
|
if gatherer_furl:
|
||||||
|
# FURLs should be bytes:
|
||||||
|
gatherer_furl = gatherer_furl.encode("utf-8")
|
||||||
self.stats_provider = StatsProvider(self, gatherer_furl)
|
self.stats_provider = StatsProvider(self, gatherer_furl)
|
||||||
self.stats_provider.setServiceParent(self)
|
self.stats_provider.setServiceParent(self)
|
||||||
self.stats_provider.register_producer(self)
|
self.stats_provider.register_producer(self)
|
||||||
@ -781,7 +782,7 @@ class _Client(node.Node, pollmixin.PollMixin):
|
|||||||
vk_string = ed25519.string_from_verifying_key(self._node_public_key)
|
vk_string = ed25519.string_from_verifying_key(self._node_public_key)
|
||||||
vk_bytes = remove_prefix(vk_string, ed25519.PUBLIC_KEY_PREFIX)
|
vk_bytes = remove_prefix(vk_string, ed25519.PUBLIC_KEY_PREFIX)
|
||||||
seed = base32.b2a(vk_bytes)
|
seed = base32.b2a(vk_bytes)
|
||||||
self.config.write_config_file("permutation-seed", seed+"\n")
|
self.config.write_config_file("permutation-seed", seed+b"\n", mode="wb")
|
||||||
return seed.strip()
|
return seed.strip()
|
||||||
|
|
||||||
def get_anonymous_storage_server(self):
|
def get_anonymous_storage_server(self):
|
||||||
@ -806,7 +807,7 @@ class _Client(node.Node, pollmixin.PollMixin):
|
|||||||
|
|
||||||
config_storedir = self.get_config(
|
config_storedir = self.get_config(
|
||||||
"storage", "storage_dir", self.STOREDIR,
|
"storage", "storage_dir", self.STOREDIR,
|
||||||
).decode('utf-8')
|
)
|
||||||
storedir = self.config.get_config_path(config_storedir)
|
storedir = self.config.get_config_path(config_storedir)
|
||||||
|
|
||||||
data = self.config.get_config("storage", "reserved_space", None)
|
data = self.config.get_config("storage", "reserved_space", None)
|
||||||
@ -935,6 +936,10 @@ class _Client(node.Node, pollmixin.PollMixin):
|
|||||||
if helper_furl in ("None", ""):
|
if helper_furl in ("None", ""):
|
||||||
helper_furl = None
|
helper_furl = None
|
||||||
|
|
||||||
|
# FURLs need to be bytes:
|
||||||
|
if helper_furl is not None:
|
||||||
|
helper_furl = helper_furl.encode("utf-8")
|
||||||
|
|
||||||
DEP = self.encoding_params
|
DEP = self.encoding_params
|
||||||
DEP["k"] = int(self.config.get_config("client", "shares.needed", DEP["k"]))
|
DEP["k"] = int(self.config.get_config("client", "shares.needed", DEP["k"]))
|
||||||
DEP["n"] = int(self.config.get_config("client", "shares.total", DEP["n"]))
|
DEP["n"] = int(self.config.get_config("client", "shares.total", DEP["n"]))
|
||||||
@ -1021,7 +1026,7 @@ class _Client(node.Node, pollmixin.PollMixin):
|
|||||||
c = ControlServer()
|
c = ControlServer()
|
||||||
c.setServiceParent(self)
|
c.setServiceParent(self)
|
||||||
control_url = self.control_tub.registerReference(c)
|
control_url = self.control_tub.registerReference(c)
|
||||||
self.config.write_private_config("control.furl", control_url + b"\n")
|
self.config.write_private_config("control.furl", control_url + "\n")
|
||||||
|
|
||||||
def init_helper(self):
|
def init_helper(self):
|
||||||
self.helper = Helper(self.config.get_config_path("helper"),
|
self.helper = Helper(self.config.get_config_path("helper"),
|
||||||
@ -1043,15 +1048,14 @@ class _Client(node.Node, pollmixin.PollMixin):
|
|||||||
|
|
||||||
from allmydata.webish import WebishServer
|
from allmydata.webish import WebishServer
|
||||||
nodeurl_path = self.config.get_config_path("node.url")
|
nodeurl_path = self.config.get_config_path("node.url")
|
||||||
staticdir_config = self.config.get_config("node", "web.static", "public_html").decode("utf-8")
|
staticdir_config = self.config.get_config("node", "web.static", "public_html")
|
||||||
staticdir = self.config.get_config_path(staticdir_config)
|
staticdir = self.config.get_config_path(staticdir_config)
|
||||||
ws = WebishServer(self, webport, nodeurl_path, staticdir)
|
ws = WebishServer(self, webport, nodeurl_path, staticdir)
|
||||||
ws.setServiceParent(self)
|
ws.setServiceParent(self)
|
||||||
|
|
||||||
def init_ftp_server(self):
|
def init_ftp_server(self):
|
||||||
if self.config.get_config("ftpd", "enabled", False, boolean=True):
|
if self.config.get_config("ftpd", "enabled", False, boolean=True):
|
||||||
accountfile = from_utf8_or_none(
|
accountfile = self.config.get_config("ftpd", "accounts.file", None)
|
||||||
self.config.get_config("ftpd", "accounts.file", None))
|
|
||||||
if accountfile:
|
if accountfile:
|
||||||
accountfile = self.config.get_config_path(accountfile)
|
accountfile = self.config.get_config_path(accountfile)
|
||||||
accounturl = self.config.get_config("ftpd", "accounts.url", None)
|
accounturl = self.config.get_config("ftpd", "accounts.url", None)
|
||||||
@ -1063,14 +1067,13 @@ class _Client(node.Node, pollmixin.PollMixin):
|
|||||||
|
|
||||||
def init_sftp_server(self):
|
def init_sftp_server(self):
|
||||||
if self.config.get_config("sftpd", "enabled", False, boolean=True):
|
if self.config.get_config("sftpd", "enabled", False, boolean=True):
|
||||||
accountfile = from_utf8_or_none(
|
accountfile = self.config.get_config("sftpd", "accounts.file", None)
|
||||||
self.config.get_config("sftpd", "accounts.file", None))
|
|
||||||
if accountfile:
|
if accountfile:
|
||||||
accountfile = self.config.get_config_path(accountfile)
|
accountfile = self.config.get_config_path(accountfile)
|
||||||
accounturl = self.config.get_config("sftpd", "accounts.url", None)
|
accounturl = self.config.get_config("sftpd", "accounts.url", None)
|
||||||
sftp_portstr = self.config.get_config("sftpd", "port", "8022")
|
sftp_portstr = self.config.get_config("sftpd", "port", "8022")
|
||||||
pubkey_file = from_utf8_or_none(self.config.get_config("sftpd", "host_pubkey_file"))
|
pubkey_file = self.config.get_config("sftpd", "host_pubkey_file")
|
||||||
privkey_file = from_utf8_or_none(self.config.get_config("sftpd", "host_privkey_file"))
|
privkey_file = self.config.get_config("sftpd", "host_privkey_file")
|
||||||
|
|
||||||
from allmydata.frontends import sftpd
|
from allmydata.frontends import sftpd
|
||||||
s = sftpd.SFTPServer(self, accountfile, accounturl,
|
s = sftpd.SFTPServer(self, accountfile, accounturl,
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
from six import ensure_str
|
||||||
|
|
||||||
from types import NoneType
|
from types import NoneType
|
||||||
|
|
||||||
@ -333,5 +334,7 @@ class FTPServer(service.MultiService):
|
|||||||
raise NeedRootcapLookupScheme("must provide some translation")
|
raise NeedRootcapLookupScheme("must provide some translation")
|
||||||
|
|
||||||
f = ftp.FTPFactory(p)
|
f = ftp.FTPFactory(p)
|
||||||
|
# strports requires a native string.
|
||||||
|
ftp_portstr = ensure_str(ftp_portstr)
|
||||||
s = strports.service(ftp_portstr, f)
|
s = strports.service(ftp_portstr, f)
|
||||||
s.setServiceParent(self)
|
s.setServiceParent(self)
|
||||||
|
@ -1,3 +1,14 @@
|
|||||||
|
"""
|
||||||
|
Ported to Python 3.
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
import os, stat, time, weakref
|
import os, stat, time, weakref
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
@ -25,10 +36,11 @@ class CHKCheckerAndUEBFetcher(object):
|
|||||||
less than 'N' shares present.
|
less than 'N' shares present.
|
||||||
|
|
||||||
If the file is completely healthy, I return a tuple of (sharemap,
|
If the file is completely healthy, I return a tuple of (sharemap,
|
||||||
UEB_data, UEB_hash).
|
UEB_data, UEB_hash). A sharemap is a dict with share numbers as keys and
|
||||||
|
sets of server ids (which hold that share) as values.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, peer_getter, storage_index, logparent=None):
|
def __init__(self, peer_getter, storage_index, logparent):
|
||||||
self._peer_getter = peer_getter
|
self._peer_getter = peer_getter
|
||||||
self._found_shares = set()
|
self._found_shares = set()
|
||||||
self._storage_index = storage_index
|
self._storage_index = storage_index
|
||||||
@ -46,6 +58,12 @@ class CHKCheckerAndUEBFetcher(object):
|
|||||||
return log.msg(*args, **kwargs)
|
return log.msg(*args, **kwargs)
|
||||||
|
|
||||||
def check(self):
|
def check(self):
|
||||||
|
"""
|
||||||
|
:return Deferred[bool|(DictOfSets, dict, bytes)]: If no share can be found
|
||||||
|
with a usable UEB block or fewer than N shares can be found then the
|
||||||
|
Deferred fires with ``False``. Otherwise, it fires with a tuple of
|
||||||
|
the sharemap, the UEB data, and the UEB hash.
|
||||||
|
"""
|
||||||
d = self._get_all_shareholders(self._storage_index)
|
d = self._get_all_shareholders(self._storage_index)
|
||||||
d.addCallback(self._get_uri_extension)
|
d.addCallback(self._get_uri_extension)
|
||||||
d.addCallback(self._done)
|
d.addCallback(self._done)
|
||||||
@ -128,9 +146,9 @@ class CHKUploadHelper(Referenceable, upload.CHKUploader):
|
|||||||
peer selection, encoding, and share pushing. I read ciphertext from the
|
peer selection, encoding, and share pushing. I read ciphertext from the
|
||||||
remote AssistedUploader.
|
remote AssistedUploader.
|
||||||
"""
|
"""
|
||||||
VERSION = { "http://allmydata.org/tahoe/protocols/helper/chk-upload/v1" :
|
VERSION = { b"http://allmydata.org/tahoe/protocols/helper/chk-upload/v1" :
|
||||||
{ },
|
{ },
|
||||||
"application-version": str(allmydata.__full_version__),
|
b"application-version": allmydata.__full_version__.encode("utf-8"),
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, storage_index,
|
def __init__(self, storage_index,
|
||||||
@ -485,6 +503,19 @@ class LocalCiphertextReader(AskUntilSuccessMixin):
|
|||||||
|
|
||||||
@implementer(interfaces.RIHelper, interfaces.IStatsProducer)
|
@implementer(interfaces.RIHelper, interfaces.IStatsProducer)
|
||||||
class Helper(Referenceable):
|
class Helper(Referenceable):
|
||||||
|
"""
|
||||||
|
:ivar dict[bytes, CHKUploadHelper] _active_uploads: For any uploads which
|
||||||
|
have been started but not finished, a mapping from storage index to the
|
||||||
|
upload helper.
|
||||||
|
|
||||||
|
:ivar chk_checker: A callable which returns an object like a
|
||||||
|
CHKCheckerAndUEBFetcher instance which can check CHK shares.
|
||||||
|
Primarily for the convenience of tests to override.
|
||||||
|
|
||||||
|
:ivar chk_upload: A callable which returns an object like a
|
||||||
|
CHKUploadHelper instance which can upload CHK shares. Primarily for
|
||||||
|
the convenience of tests to override.
|
||||||
|
"""
|
||||||
# this is the non-distributed version. When we need to have multiple
|
# this is the non-distributed version. When we need to have multiple
|
||||||
# helpers, this object will become the HelperCoordinator, and will query
|
# helpers, this object will become the HelperCoordinator, and will query
|
||||||
# the farm of Helpers to see if anyone has the storage_index of interest,
|
# the farm of Helpers to see if anyone has the storage_index of interest,
|
||||||
@ -498,6 +529,9 @@ class Helper(Referenceable):
|
|||||||
}
|
}
|
||||||
MAX_UPLOAD_STATUSES = 10
|
MAX_UPLOAD_STATUSES = 10
|
||||||
|
|
||||||
|
chk_checker = CHKCheckerAndUEBFetcher
|
||||||
|
chk_upload = CHKUploadHelper
|
||||||
|
|
||||||
def __init__(self, basedir, storage_broker, secret_holder,
|
def __init__(self, basedir, storage_broker, secret_holder,
|
||||||
stats_provider, history):
|
stats_provider, history):
|
||||||
self._basedir = basedir
|
self._basedir = basedir
|
||||||
@ -569,6 +603,9 @@ class Helper(Referenceable):
|
|||||||
return self.VERSION
|
return self.VERSION
|
||||||
|
|
||||||
def remote_upload_chk(self, storage_index):
|
def remote_upload_chk(self, storage_index):
|
||||||
|
"""
|
||||||
|
See ``RIHelper.upload_chk``
|
||||||
|
"""
|
||||||
self.count("chk_upload_helper.upload_requests")
|
self.count("chk_upload_helper.upload_requests")
|
||||||
lp = self.log(format="helper: upload_chk query for SI %(si)s",
|
lp = self.log(format="helper: upload_chk query for SI %(si)s",
|
||||||
si=si_b2a(storage_index))
|
si=si_b2a(storage_index))
|
||||||
@ -591,7 +628,7 @@ class Helper(Referenceable):
|
|||||||
lp2 = self.log("doing a quick check+UEBfetch",
|
lp2 = self.log("doing a quick check+UEBfetch",
|
||||||
parent=lp, level=log.NOISY)
|
parent=lp, level=log.NOISY)
|
||||||
sb = self._storage_broker
|
sb = self._storage_broker
|
||||||
c = CHKCheckerAndUEBFetcher(sb.get_servers_for_psi, storage_index, lp2)
|
c = self.chk_checker(sb.get_servers_for_psi, storage_index, lp2)
|
||||||
d = c.check()
|
d = c.check()
|
||||||
def _checked(res):
|
def _checked(res):
|
||||||
if res:
|
if res:
|
||||||
@ -633,14 +670,18 @@ class Helper(Referenceable):
|
|||||||
return (None, uh)
|
return (None, uh)
|
||||||
|
|
||||||
def _make_chk_upload_helper(self, storage_index, lp):
|
def _make_chk_upload_helper(self, storage_index, lp):
|
||||||
si_s = si_b2a(storage_index)
|
si_s = si_b2a(storage_index).decode('ascii')
|
||||||
incoming_file = os.path.join(self._chk_incoming, si_s)
|
incoming_file = os.path.join(self._chk_incoming, si_s)
|
||||||
encoding_file = os.path.join(self._chk_encoding, si_s)
|
encoding_file = os.path.join(self._chk_encoding, si_s)
|
||||||
uh = CHKUploadHelper(storage_index, self,
|
uh = self.chk_upload(
|
||||||
|
storage_index,
|
||||||
|
self,
|
||||||
self._storage_broker,
|
self._storage_broker,
|
||||||
self._secret_holder,
|
self._secret_holder,
|
||||||
incoming_file, encoding_file,
|
incoming_file,
|
||||||
lp)
|
encoding_file,
|
||||||
|
lp,
|
||||||
|
)
|
||||||
return uh
|
return uh
|
||||||
|
|
||||||
def _add_upload(self, uh):
|
def _add_upload(self, uh):
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from past.builtins import unicode
|
||||||
|
|
||||||
import time
|
import time
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
from twisted.application import service
|
from twisted.application import service
|
||||||
|
@ -3,21 +3,18 @@ This module contains classes and functions to implement and manage
|
|||||||
a node for Tahoe-LAFS.
|
a node for Tahoe-LAFS.
|
||||||
"""
|
"""
|
||||||
from past.builtins import unicode
|
from past.builtins import unicode
|
||||||
|
from six import ensure_str
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import os.path
|
import os.path
|
||||||
import re
|
import re
|
||||||
import types
|
import types
|
||||||
import errno
|
import errno
|
||||||
from io import StringIO
|
|
||||||
import tempfile
|
import tempfile
|
||||||
from base64 import b32decode, b32encode
|
from base64 import b32decode, b32encode
|
||||||
|
|
||||||
# Python 2 compatibility
|
# On Python 2 this will be the backported package.
|
||||||
from six.moves import configparser
|
import configparser
|
||||||
from future.utils import PY2
|
|
||||||
if PY2:
|
|
||||||
from io import BytesIO as StringIO # noqa: F811
|
|
||||||
|
|
||||||
from twisted.python import log as twlog
|
from twisted.python import log as twlog
|
||||||
from twisted.application import service
|
from twisted.application import service
|
||||||
@ -187,12 +184,13 @@ def read_config(basedir, portnumfile, generated_files=[], _valid_config=None):
|
|||||||
|
|
||||||
# (try to) read the main config file
|
# (try to) read the main config file
|
||||||
config_fname = os.path.join(basedir, "tahoe.cfg")
|
config_fname = os.path.join(basedir, "tahoe.cfg")
|
||||||
parser = configparser.SafeConfigParser()
|
|
||||||
try:
|
try:
|
||||||
parser = configutil.get_config(config_fname)
|
parser = configutil.get_config(config_fname)
|
||||||
except EnvironmentError as e:
|
except EnvironmentError as e:
|
||||||
if e.errno != errno.ENOENT:
|
if e.errno != errno.ENOENT:
|
||||||
raise
|
raise
|
||||||
|
# The file is missing, just create empty ConfigParser.
|
||||||
|
parser = configutil.get_config_from_string(u"")
|
||||||
|
|
||||||
configutil.validate_config(config_fname, parser, _valid_config)
|
configutil.validate_config(config_fname, parser, _valid_config)
|
||||||
|
|
||||||
@ -209,9 +207,11 @@ def config_from_string(basedir, portnumfile, config_str, _valid_config=None):
|
|||||||
if _valid_config is None:
|
if _valid_config is None:
|
||||||
_valid_config = _common_valid_config()
|
_valid_config = _common_valid_config()
|
||||||
|
|
||||||
|
if isinstance(config_str, bytes):
|
||||||
|
config_str = config_str.decode("utf-8")
|
||||||
|
|
||||||
# load configuration from in-memory string
|
# load configuration from in-memory string
|
||||||
parser = configparser.SafeConfigParser()
|
parser = configutil.get_config_from_string(config_str)
|
||||||
parser.readfp(StringIO(config_str))
|
|
||||||
|
|
||||||
fname = "<in-memory>"
|
fname = "<in-memory>"
|
||||||
configutil.validate_config(fname, parser, _valid_config)
|
configutil.validate_config(fname, parser, _valid_config)
|
||||||
@ -360,14 +360,16 @@ class _Config(object):
|
|||||||
"""
|
"""
|
||||||
privname = os.path.join(self._basedir, "private", name)
|
privname = os.path.join(self._basedir, "private", name)
|
||||||
try:
|
try:
|
||||||
value = fileutil.read(privname)
|
value = fileutil.read(privname, mode="r")
|
||||||
except EnvironmentError as e:
|
except EnvironmentError as e:
|
||||||
if e.errno != errno.ENOENT:
|
if e.errno != errno.ENOENT:
|
||||||
raise # we only care about "file doesn't exist"
|
raise # we only care about "file doesn't exist"
|
||||||
if default is _None:
|
if default is _None:
|
||||||
raise MissingConfigEntry("The required configuration file %s is missing."
|
raise MissingConfigEntry("The required configuration file %s is missing."
|
||||||
% (quote_output(privname),))
|
% (quote_output(privname),))
|
||||||
if isinstance(default, (bytes, unicode)):
|
if isinstance(default, bytes):
|
||||||
|
default = unicode(default, "utf-8")
|
||||||
|
if isinstance(default, unicode):
|
||||||
value = default
|
value = default
|
||||||
else:
|
else:
|
||||||
value = default()
|
value = default()
|
||||||
@ -379,19 +381,21 @@ class _Config(object):
|
|||||||
config file that resides within the subdirectory named 'private'), and
|
config file that resides within the subdirectory named 'private'), and
|
||||||
return it.
|
return it.
|
||||||
"""
|
"""
|
||||||
|
if isinstance(value, unicode):
|
||||||
|
value = value.encode("utf-8")
|
||||||
privname = os.path.join(self._basedir, "private", name)
|
privname = os.path.join(self._basedir, "private", name)
|
||||||
with open(privname, "wb") as f:
|
with open(privname, "wb") as f:
|
||||||
f.write(value)
|
f.write(value)
|
||||||
|
|
||||||
def get_private_config(self, name, default=_None):
|
def get_private_config(self, name, default=_None):
|
||||||
"""Read the (string) contents of a private config file (which is a
|
"""Read the (native string) contents of a private config file (a
|
||||||
config file that resides within the subdirectory named 'private'),
|
config file that resides within the subdirectory named 'private'),
|
||||||
and return it. Return a default, or raise an error if one was not
|
and return it. Return a default, or raise an error if one was not
|
||||||
given.
|
given.
|
||||||
"""
|
"""
|
||||||
privname = os.path.join(self._basedir, "private", name)
|
privname = os.path.join(self._basedir, "private", name)
|
||||||
try:
|
try:
|
||||||
return fileutil.read(privname).strip()
|
return fileutil.read(privname, mode="r").strip()
|
||||||
except EnvironmentError as e:
|
except EnvironmentError as e:
|
||||||
if e.errno != errno.ENOENT:
|
if e.errno != errno.ENOENT:
|
||||||
raise # we only care about "file doesn't exist"
|
raise # we only care about "file doesn't exist"
|
||||||
@ -549,9 +553,12 @@ def _convert_tub_port(s):
|
|||||||
:returns: a proper Twisted endpoint string like (`tcp:X`) is `s`
|
:returns: a proper Twisted endpoint string like (`tcp:X`) is `s`
|
||||||
is a bare number, or returns `s` as-is
|
is a bare number, or returns `s` as-is
|
||||||
"""
|
"""
|
||||||
if re.search(r'^\d+$', s):
|
us = s
|
||||||
return "tcp:{}".format(int(s))
|
if isinstance(s, bytes):
|
||||||
return s
|
us = s.decode("utf-8")
|
||||||
|
if re.search(r'^\d+$', us):
|
||||||
|
return "tcp:{}".format(int(us))
|
||||||
|
return us
|
||||||
|
|
||||||
|
|
||||||
def _tub_portlocation(config):
|
def _tub_portlocation(config):
|
||||||
@ -639,6 +646,10 @@ def _tub_portlocation(config):
|
|||||||
new_locations.append(loc)
|
new_locations.append(loc)
|
||||||
location = ",".join(new_locations)
|
location = ",".join(new_locations)
|
||||||
|
|
||||||
|
# Lacking this, Python 2 blows up in Foolscap when it is confused by a
|
||||||
|
# Unicode FURL.
|
||||||
|
location = location.encode("utf-8")
|
||||||
|
|
||||||
return tubport, location
|
return tubport, location
|
||||||
|
|
||||||
|
|
||||||
@ -686,6 +697,9 @@ def create_main_tub(config, tub_options,
|
|||||||
port_or_endpoint = tor_provider.get_listener()
|
port_or_endpoint = tor_provider.get_listener()
|
||||||
else:
|
else:
|
||||||
port_or_endpoint = port
|
port_or_endpoint = port
|
||||||
|
# Foolscap requires native strings:
|
||||||
|
if isinstance(port_or_endpoint, (bytes, unicode)):
|
||||||
|
port_or_endpoint = ensure_str(port_or_endpoint)
|
||||||
tub.listenOn(port_or_endpoint)
|
tub.listenOn(port_or_endpoint)
|
||||||
tub.setLocation(location)
|
tub.setLocation(location)
|
||||||
log.msg("Tub location set to %s" % (location,))
|
log.msg("Tub location set to %s" % (location,))
|
||||||
@ -742,7 +756,7 @@ class Node(service.MultiService):
|
|||||||
if self.tub is not None:
|
if self.tub is not None:
|
||||||
self.nodeid = b32decode(self.tub.tubID.upper()) # binary format
|
self.nodeid = b32decode(self.tub.tubID.upper()) # binary format
|
||||||
self.short_nodeid = b32encode(self.nodeid).lower()[:8] # for printing
|
self.short_nodeid = b32encode(self.nodeid).lower()[:8] # for printing
|
||||||
self.config.write_config_file("my_nodeid", b32encode(self.nodeid).lower() + "\n")
|
self.config.write_config_file("my_nodeid", b32encode(self.nodeid).lower() + b"\n", mode="wb")
|
||||||
self.tub.setServiceParent(self)
|
self.tub.setServiceParent(self)
|
||||||
else:
|
else:
|
||||||
self.nodeid = self.short_nodeid = None
|
self.nodeid = self.short_nodeid = None
|
||||||
@ -839,12 +853,13 @@ class Node(service.MultiService):
|
|||||||
lgfurl = self.config.get_config("node", "log_gatherer.furl", "")
|
lgfurl = self.config.get_config("node", "log_gatherer.furl", "")
|
||||||
if lgfurl:
|
if lgfurl:
|
||||||
# this is in addition to the contents of log-gatherer-furlfile
|
# this is in addition to the contents of log-gatherer-furlfile
|
||||||
|
lgfurl = lgfurl.encode("utf-8")
|
||||||
self.log_tub.setOption("log-gatherer-furl", lgfurl)
|
self.log_tub.setOption("log-gatherer-furl", lgfurl)
|
||||||
self.log_tub.setOption("log-gatherer-furlfile",
|
self.log_tub.setOption("log-gatherer-furlfile",
|
||||||
self.config.get_config_path("log_gatherer.furl"))
|
self.config.get_config_path("log_gatherer.furl"))
|
||||||
|
|
||||||
incident_dir = self.config.get_config_path("logs", "incidents")
|
incident_dir = self.config.get_config_path("logs", "incidents")
|
||||||
foolscap.logging.log.setLogDir(incident_dir.encode(get_filesystem_encoding()))
|
foolscap.logging.log.setLogDir(incident_dir)
|
||||||
twlog.msg("Foolscap logging initialized")
|
twlog.msg("Foolscap logging initialized")
|
||||||
twlog.msg("Note to developers: twistd.log does not receive very much.")
|
twlog.msg("Note to developers: twistd.log does not receive very much.")
|
||||||
twlog.msg("Use 'flogtool tail -c NODEDIR/private/logport.furl' instead")
|
twlog.msg("Use 'flogtool tail -c NODEDIR/private/logport.furl' instead")
|
||||||
|
@ -8,7 +8,9 @@ from os.path import join
|
|||||||
from future.utils import PY2
|
from future.utils import PY2
|
||||||
if PY2:
|
if PY2:
|
||||||
from future.builtins import str # noqa: F401
|
from future.builtins import str # noqa: F401
|
||||||
from six.moves.configparser import NoSectionError
|
|
||||||
|
# On Python 2 this will be the backported package:
|
||||||
|
from configparser import NoSectionError
|
||||||
|
|
||||||
from twisted.python import usage
|
from twisted.python import usage
|
||||||
|
|
||||||
|
@ -31,12 +31,10 @@ the foolscap-based server implemented in src/allmydata/storage/*.py .
|
|||||||
from past.builtins import unicode
|
from past.builtins import unicode
|
||||||
|
|
||||||
import re, time, hashlib
|
import re, time, hashlib
|
||||||
try:
|
|
||||||
from ConfigParser import (
|
# On Python 2 this will be the backport.
|
||||||
NoSectionError,
|
from configparser import NoSectionError
|
||||||
)
|
|
||||||
except ImportError:
|
|
||||||
from configparser import NoSectionError
|
|
||||||
import attr
|
import attr
|
||||||
from zope.interface import (
|
from zope.interface import (
|
||||||
Attribute,
|
Attribute,
|
||||||
|
@ -154,3 +154,12 @@ enabled = false
|
|||||||
self.dynamic_valid_config,
|
self.dynamic_valid_config,
|
||||||
)
|
)
|
||||||
self.assertIn("section [node] contains unknown option 'invalid'", str(e))
|
self.assertIn("section [node] contains unknown option 'invalid'", str(e))
|
||||||
|
|
||||||
|
def test_duplicate_sections(self):
|
||||||
|
"""
|
||||||
|
Duplicate section names are merged.
|
||||||
|
"""
|
||||||
|
fname = self.create_tahoe_cfg('[node]\na = foo\n[node]\n b = bar\n')
|
||||||
|
config = configutil.get_config(fname)
|
||||||
|
self.assertEqual(config.get("node", "a"), "foo")
|
||||||
|
self.assertEqual(config.get("node", "b"), "bar")
|
||||||
|
@ -168,7 +168,11 @@ class Tor(unittest.TestCase):
|
|||||||
tor_provider = create_tor_provider(reactor, config)
|
tor_provider = create_tor_provider(reactor, config)
|
||||||
tor_provider.get_tor_handler()
|
tor_provider.get_tor_handler()
|
||||||
self.assertIn(
|
self.assertIn(
|
||||||
"invalid literal for int() with base 10: 'kumquat'",
|
"invalid literal for int()",
|
||||||
|
str(ctx.exception)
|
||||||
|
)
|
||||||
|
self.assertIn(
|
||||||
|
"kumquat",
|
||||||
str(ctx.exception)
|
str(ctx.exception)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
|
"""
|
||||||
|
Ported to Python 3.
|
||||||
|
"""
|
||||||
from __future__ import absolute_import
|
from __future__ import absolute_import
|
||||||
from __future__ import division
|
from __future__ import division
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
@ -8,21 +11,54 @@ if PY2:
|
|||||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
from struct import (
|
||||||
|
pack,
|
||||||
|
)
|
||||||
|
from functools import (
|
||||||
|
partial,
|
||||||
|
)
|
||||||
|
import attr
|
||||||
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.trial import unittest
|
from twisted.trial import unittest
|
||||||
from twisted.application import service
|
from twisted.application import service
|
||||||
|
|
||||||
from foolscap.api import Tub, fireEventually, flushEventualQueue
|
from foolscap.api import Tub, fireEventually, flushEventualQueue
|
||||||
|
|
||||||
|
from eliot.twisted import (
|
||||||
|
inline_callbacks,
|
||||||
|
)
|
||||||
|
|
||||||
from allmydata.crypto import aes
|
from allmydata.crypto import aes
|
||||||
from allmydata.storage.server import si_b2a
|
from allmydata.storage.server import (
|
||||||
|
si_b2a,
|
||||||
|
StorageServer,
|
||||||
|
)
|
||||||
from allmydata.storage_client import StorageFarmBroker
|
from allmydata.storage_client import StorageFarmBroker
|
||||||
|
from allmydata.immutable.layout import (
|
||||||
|
make_write_bucket_proxy,
|
||||||
|
)
|
||||||
from allmydata.immutable import offloaded, upload
|
from allmydata.immutable import offloaded, upload
|
||||||
from allmydata import uri, client
|
from allmydata import uri, client
|
||||||
from allmydata.util import hashutil, fileutil, mathutil
|
from allmydata.util import hashutil, fileutil, mathutil, dictutil
|
||||||
|
|
||||||
|
from .no_network import (
|
||||||
|
NoNetworkServer,
|
||||||
|
LocalWrapper,
|
||||||
|
fireNow,
|
||||||
|
)
|
||||||
from .common import (
|
from .common import (
|
||||||
EMPTY_CLIENT_CONFIG,
|
EMPTY_CLIENT_CONFIG,
|
||||||
|
SyncTestCase,
|
||||||
|
)
|
||||||
|
|
||||||
|
from testtools.matchers import (
|
||||||
|
Equals,
|
||||||
|
MatchesListwise,
|
||||||
|
IsInstance,
|
||||||
|
)
|
||||||
|
from testtools.twistedsupport import (
|
||||||
|
succeeded,
|
||||||
)
|
)
|
||||||
|
|
||||||
MiB = 1024*1024
|
MiB = 1024*1024
|
||||||
@ -63,35 +99,30 @@ class CHKUploadHelper_fake(offloaded.CHKUploadHelper):
|
|||||||
d.addCallback(_got_size)
|
d.addCallback(_got_size)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
class Helper_fake_upload(offloaded.Helper):
|
@attr.s
|
||||||
def _make_chk_upload_helper(self, storage_index, lp):
|
class FakeCHKCheckerAndUEBFetcher(object):
|
||||||
si_s = str(si_b2a(storage_index), "utf-8")
|
"""
|
||||||
incoming_file = os.path.join(self._chk_incoming, si_s)
|
A fake of ``CHKCheckerAndUEBFetcher`` which hard-codes some check result.
|
||||||
encoding_file = os.path.join(self._chk_encoding, si_s)
|
"""
|
||||||
uh = CHKUploadHelper_fake(storage_index, self,
|
peer_getter = attr.ib()
|
||||||
self._storage_broker,
|
storage_index = attr.ib()
|
||||||
self._secret_holder,
|
logparent = attr.ib()
|
||||||
incoming_file, encoding_file,
|
|
||||||
lp)
|
|
||||||
return uh
|
|
||||||
|
|
||||||
class Helper_already_uploaded(Helper_fake_upload):
|
_sharemap = attr.ib()
|
||||||
def _check_chk(self, storage_index, lp):
|
_ueb_data = attr.ib()
|
||||||
res = upload.HelperUploadResults()
|
|
||||||
res.uri_extension_hash = hashutil.uri_extension_hash(b"")
|
|
||||||
|
|
||||||
# we're pretending that the file they're trying to upload was already
|
@property
|
||||||
# present in the grid. We return some information about the file, so
|
def _ueb_hash(self):
|
||||||
# the client can decide if they like the way it looks. The parameters
|
return hashutil.uri_extension_hash(
|
||||||
# used here are chosen to match the defaults.
|
uri.pack_extension(self._ueb_data),
|
||||||
PARAMS = FakeClient.DEFAULT_ENCODING_PARAMETERS
|
)
|
||||||
ueb_data = {"needed_shares": PARAMS["k"],
|
|
||||||
"total_shares": PARAMS["n"],
|
def check(self):
|
||||||
"segment_size": min(PARAMS["max_segment_size"], len(DATA)),
|
return defer.succeed((
|
||||||
"size": len(DATA),
|
self._sharemap,
|
||||||
}
|
self._ueb_data,
|
||||||
res.uri_extension_data = ueb_data
|
self._ueb_hash,
|
||||||
return defer.succeed(res)
|
))
|
||||||
|
|
||||||
class FakeClient(service.MultiService):
|
class FakeClient(service.MultiService):
|
||||||
introducer_clients = []
|
introducer_clients = []
|
||||||
@ -126,6 +157,26 @@ def upload_data(uploader, data, convergence):
|
|||||||
u = upload.Data(data, convergence=convergence)
|
u = upload.Data(data, convergence=convergence)
|
||||||
return uploader.upload(u)
|
return uploader.upload(u)
|
||||||
|
|
||||||
|
|
||||||
|
def make_uploader(helper_furl, parent, override_name=None):
|
||||||
|
"""
|
||||||
|
Make an ``upload.Uploader`` service pointed at the given helper and with
|
||||||
|
the given service parent.
|
||||||
|
|
||||||
|
:param bytes helper_furl: The Foolscap URL of the upload helper.
|
||||||
|
|
||||||
|
:param IServiceCollection parent: A parent to assign to the new uploader.
|
||||||
|
|
||||||
|
:param str override_name: If not ``None``, a new name for the uploader
|
||||||
|
service. Multiple services cannot coexist with the same name.
|
||||||
|
"""
|
||||||
|
u = upload.Uploader(helper_furl)
|
||||||
|
if override_name is not None:
|
||||||
|
u.name = override_name
|
||||||
|
u.setServiceParent(parent)
|
||||||
|
return u
|
||||||
|
|
||||||
|
|
||||||
class AssistedUpload(unittest.TestCase):
|
class AssistedUpload(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.tub = t = Tub()
|
self.tub = t = Tub()
|
||||||
@ -145,13 +196,20 @@ class AssistedUpload(unittest.TestCase):
|
|||||||
# bogus host/port
|
# bogus host/port
|
||||||
t.setLocation(b"bogus:1234")
|
t.setLocation(b"bogus:1234")
|
||||||
|
|
||||||
def setUpHelper(self, basedir, helper_class=Helper_fake_upload):
|
def setUpHelper(self, basedir, chk_upload=CHKUploadHelper_fake, chk_checker=None):
|
||||||
fileutil.make_dirs(basedir)
|
fileutil.make_dirs(basedir)
|
||||||
self.helper = h = helper_class(basedir,
|
self.helper = offloaded.Helper(
|
||||||
|
basedir,
|
||||||
self.s.storage_broker,
|
self.s.storage_broker,
|
||||||
self.s.secret_holder,
|
self.s.secret_holder,
|
||||||
None, None)
|
None,
|
||||||
self.helper_furl = self.tub.registerReference(h)
|
None,
|
||||||
|
)
|
||||||
|
if chk_upload is not None:
|
||||||
|
self.helper.chk_upload = chk_upload
|
||||||
|
if chk_checker is not None:
|
||||||
|
self.helper.chk_checker = chk_checker
|
||||||
|
self.helper_furl = self.tub.registerReference(self.helper)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
d = self.s.stopService()
|
d = self.s.stopService()
|
||||||
@ -159,34 +217,84 @@ class AssistedUpload(unittest.TestCase):
|
|||||||
d.addBoth(flush_but_dont_ignore)
|
d.addBoth(flush_but_dont_ignore)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
def test_one(self):
|
def test_one(self):
|
||||||
|
"""
|
||||||
|
Some data that has never been uploaded before can be uploaded in CHK
|
||||||
|
format using the ``RIHelper`` provider and ``Uploader.upload``.
|
||||||
|
"""
|
||||||
self.basedir = "helper/AssistedUpload/test_one"
|
self.basedir = "helper/AssistedUpload/test_one"
|
||||||
self.setUpHelper(self.basedir)
|
self.setUpHelper(self.basedir)
|
||||||
u = upload.Uploader(self.helper_furl)
|
u = make_uploader(self.helper_furl, self.s)
|
||||||
u.setServiceParent(self.s)
|
|
||||||
|
|
||||||
d = wait_a_few_turns()
|
d = wait_a_few_turns()
|
||||||
|
|
||||||
def _ready(res):
|
def _ready(res):
|
||||||
assert u._helper
|
self.assertTrue(
|
||||||
|
u._helper,
|
||||||
|
"Expected uploader to have a helper reference, had {} instead.".format(
|
||||||
|
u._helper,
|
||||||
|
),
|
||||||
|
)
|
||||||
return upload_data(u, DATA, convergence=b"some convergence string")
|
return upload_data(u, DATA, convergence=b"some convergence string")
|
||||||
d.addCallback(_ready)
|
d.addCallback(_ready)
|
||||||
|
|
||||||
def _uploaded(results):
|
def _uploaded(results):
|
||||||
the_uri = results.get_uri()
|
the_uri = results.get_uri()
|
||||||
assert b"CHK" in the_uri
|
self.assertIn(b"CHK", the_uri)
|
||||||
|
self.assertNotEqual(
|
||||||
|
results.get_pushed_shares(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
d.addCallback(_uploaded)
|
d.addCallback(_uploaded)
|
||||||
|
|
||||||
def _check_empty(res):
|
def _check_empty(res):
|
||||||
|
# Make sure the intermediate artifacts aren't left lying around.
|
||||||
files = os.listdir(os.path.join(self.basedir, "CHK_encoding"))
|
files = os.listdir(os.path.join(self.basedir, "CHK_encoding"))
|
||||||
self.failUnlessEqual(files, [])
|
self.assertEqual(files, [])
|
||||||
files = os.listdir(os.path.join(self.basedir, "CHK_incoming"))
|
files = os.listdir(os.path.join(self.basedir, "CHK_incoming"))
|
||||||
self.failUnlessEqual(files, [])
|
self.assertEqual(files, [])
|
||||||
d.addCallback(_check_empty)
|
d.addCallback(_check_empty)
|
||||||
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
@inline_callbacks
|
||||||
|
def test_concurrent(self):
|
||||||
|
"""
|
||||||
|
The same data can be uploaded by more than one ``Uploader`` at a time.
|
||||||
|
"""
|
||||||
|
self.basedir = "helper/AssistedUpload/test_concurrent"
|
||||||
|
self.setUpHelper(self.basedir)
|
||||||
|
u1 = make_uploader(self.helper_furl, self.s, "u1")
|
||||||
|
u2 = make_uploader(self.helper_furl, self.s, "u2")
|
||||||
|
|
||||||
|
yield wait_a_few_turns()
|
||||||
|
|
||||||
|
for u in [u1, u2]:
|
||||||
|
self.assertTrue(
|
||||||
|
u._helper,
|
||||||
|
"Expected uploader to have a helper reference, had {} instead.".format(
|
||||||
|
u._helper,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
uploads = list(
|
||||||
|
upload_data(u, DATA, convergence=b"some convergence string")
|
||||||
|
for u
|
||||||
|
in [u1, u2]
|
||||||
|
)
|
||||||
|
|
||||||
|
result1, result2 = yield defer.gatherResults(uploads)
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
result1.get_uri(),
|
||||||
|
result2.get_uri(),
|
||||||
|
)
|
||||||
|
# It would be really cool to assert that result1.get_pushed_shares() +
|
||||||
|
# result2.get_pushed_shares() == total_shares here. However, we're
|
||||||
|
# faking too much for that to be meaningful here. Also it doesn't
|
||||||
|
# hold because we don't actually push _anything_, we just lie about
|
||||||
|
# having pushed stuff.
|
||||||
|
|
||||||
def test_previous_upload_failed(self):
|
def test_previous_upload_failed(self):
|
||||||
self.basedir = "helper/AssistedUpload/test_previous_upload_failed"
|
self.basedir = "helper/AssistedUpload/test_previous_upload_failed"
|
||||||
self.setUpHelper(self.basedir)
|
self.setUpHelper(self.basedir)
|
||||||
@ -214,8 +322,7 @@ class AssistedUpload(unittest.TestCase):
|
|||||||
f.write(aes.encrypt_data(encryptor, DATA))
|
f.write(aes.encrypt_data(encryptor, DATA))
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
u = upload.Uploader(self.helper_furl)
|
u = make_uploader(self.helper_furl, self.s)
|
||||||
u.setServiceParent(self.s)
|
|
||||||
|
|
||||||
d = wait_a_few_turns()
|
d = wait_a_few_turns()
|
||||||
|
|
||||||
@ -237,29 +344,247 @@ class AssistedUpload(unittest.TestCase):
|
|||||||
|
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
@inline_callbacks
|
||||||
def test_already_uploaded(self):
|
def test_already_uploaded(self):
|
||||||
|
"""
|
||||||
|
If enough shares to satisfy the needed parameter already exist, the upload
|
||||||
|
succeeds without pushing any shares.
|
||||||
|
"""
|
||||||
|
params = FakeClient.DEFAULT_ENCODING_PARAMETERS
|
||||||
|
chk_checker = partial(
|
||||||
|
FakeCHKCheckerAndUEBFetcher,
|
||||||
|
sharemap=dictutil.DictOfSets({
|
||||||
|
0: {b"server0"},
|
||||||
|
1: {b"server1"},
|
||||||
|
}),
|
||||||
|
ueb_data={
|
||||||
|
"size": len(DATA),
|
||||||
|
"segment_size": min(params["max_segment_size"], len(DATA)),
|
||||||
|
"needed_shares": params["k"],
|
||||||
|
"total_shares": params["n"],
|
||||||
|
},
|
||||||
|
)
|
||||||
self.basedir = "helper/AssistedUpload/test_already_uploaded"
|
self.basedir = "helper/AssistedUpload/test_already_uploaded"
|
||||||
self.setUpHelper(self.basedir, helper_class=Helper_already_uploaded)
|
self.setUpHelper(
|
||||||
u = upload.Uploader(self.helper_furl)
|
self.basedir,
|
||||||
u.setServiceParent(self.s)
|
chk_checker=chk_checker,
|
||||||
|
)
|
||||||
|
u = make_uploader(self.helper_furl, self.s)
|
||||||
|
|
||||||
d = wait_a_few_turns()
|
yield wait_a_few_turns()
|
||||||
|
|
||||||
def _ready(res):
|
|
||||||
assert u._helper
|
assert u._helper
|
||||||
|
|
||||||
return upload_data(u, DATA, convergence=b"some convergence string")
|
results = yield upload_data(u, DATA, convergence=b"some convergence string")
|
||||||
d.addCallback(_ready)
|
|
||||||
def _uploaded(results):
|
|
||||||
the_uri = results.get_uri()
|
the_uri = results.get_uri()
|
||||||
assert b"CHK" in the_uri
|
assert b"CHK" in the_uri
|
||||||
d.addCallback(_uploaded)
|
|
||||||
|
|
||||||
def _check_empty(res):
|
|
||||||
files = os.listdir(os.path.join(self.basedir, "CHK_encoding"))
|
files = os.listdir(os.path.join(self.basedir, "CHK_encoding"))
|
||||||
self.failUnlessEqual(files, [])
|
self.failUnlessEqual(files, [])
|
||||||
files = os.listdir(os.path.join(self.basedir, "CHK_incoming"))
|
files = os.listdir(os.path.join(self.basedir, "CHK_incoming"))
|
||||||
self.failUnlessEqual(files, [])
|
self.failUnlessEqual(files, [])
|
||||||
d.addCallback(_check_empty)
|
|
||||||
|
|
||||||
return d
|
self.assertEqual(
|
||||||
|
results.get_pushed_shares(),
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class CHKCheckerAndUEBFetcherTests(SyncTestCase):
|
||||||
|
"""
|
||||||
|
Tests for ``CHKCheckerAndUEBFetcher``.
|
||||||
|
"""
|
||||||
|
def test_check_no_peers(self):
|
||||||
|
"""
|
||||||
|
If the supplied "peer getter" returns no peers then
|
||||||
|
``CHKCheckerAndUEBFetcher.check`` returns a ``Deferred`` that fires
|
||||||
|
with ``False``.
|
||||||
|
"""
|
||||||
|
storage_index = b"a" * 16
|
||||||
|
peers = {storage_index: []}
|
||||||
|
caf = offloaded.CHKCheckerAndUEBFetcher(
|
||||||
|
peers.get,
|
||||||
|
storage_index,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
self.assertThat(
|
||||||
|
caf.check(),
|
||||||
|
succeeded(Equals(False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
@inline_callbacks
|
||||||
|
def test_check_ueb_unavailable(self):
|
||||||
|
"""
|
||||||
|
If the UEB cannot be read from any of the peers supplied by the "peer
|
||||||
|
getter" then ``CHKCheckerAndUEBFetcher.check`` returns a ``Deferred``
|
||||||
|
that fires with ``False``.
|
||||||
|
"""
|
||||||
|
storage_index = b"a" * 16
|
||||||
|
serverid = b"b" * 20
|
||||||
|
storage = StorageServer(self.mktemp(), serverid)
|
||||||
|
rref_without_ueb = LocalWrapper(storage, fireNow)
|
||||||
|
yield write_bad_share(rref_without_ueb, storage_index)
|
||||||
|
server_without_ueb = NoNetworkServer(serverid, rref_without_ueb)
|
||||||
|
peers = {storage_index: [server_without_ueb]}
|
||||||
|
caf = offloaded.CHKCheckerAndUEBFetcher(
|
||||||
|
peers.get,
|
||||||
|
storage_index,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
self.assertThat(
|
||||||
|
caf.check(),
|
||||||
|
succeeded(Equals(False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
@inline_callbacks
|
||||||
|
def test_not_enough_shares(self):
|
||||||
|
"""
|
||||||
|
If fewer shares are found than are required to reassemble the data then
|
||||||
|
``CHKCheckerAndUEBFetcher.check`` returns a ``Deferred`` that fires
|
||||||
|
with ``False``.
|
||||||
|
"""
|
||||||
|
storage_index = b"a" * 16
|
||||||
|
serverid = b"b" * 20
|
||||||
|
storage = StorageServer(self.mktemp(), serverid)
|
||||||
|
rref_with_ueb = LocalWrapper(storage, fireNow)
|
||||||
|
ueb = {
|
||||||
|
"needed_shares": 2,
|
||||||
|
"total_shares": 2,
|
||||||
|
"segment_size": 128 * 1024,
|
||||||
|
"size": 1024,
|
||||||
|
}
|
||||||
|
yield write_good_share(rref_with_ueb, storage_index, ueb, [0])
|
||||||
|
|
||||||
|
server_with_ueb = NoNetworkServer(serverid, rref_with_ueb)
|
||||||
|
peers = {storage_index: [server_with_ueb]}
|
||||||
|
caf = offloaded.CHKCheckerAndUEBFetcher(
|
||||||
|
peers.get,
|
||||||
|
storage_index,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
self.assertThat(
|
||||||
|
caf.check(),
|
||||||
|
succeeded(Equals(False)),
|
||||||
|
)
|
||||||
|
|
||||||
|
@inline_callbacks
|
||||||
|
def test_enough_shares(self):
|
||||||
|
"""
|
||||||
|
If enough shares are found to reassemble the data then
|
||||||
|
``CHKCheckerAndUEBFetcher.check`` returns a ``Deferred`` that fires
|
||||||
|
with share and share placement information.
|
||||||
|
"""
|
||||||
|
storage_index = b"a" * 16
|
||||||
|
serverids = list(
|
||||||
|
ch * 20
|
||||||
|
for ch
|
||||||
|
in [b"b", b"c"]
|
||||||
|
)
|
||||||
|
storages = list(
|
||||||
|
StorageServer(self.mktemp(), serverid)
|
||||||
|
for serverid
|
||||||
|
in serverids
|
||||||
|
)
|
||||||
|
rrefs_with_ueb = list(
|
||||||
|
LocalWrapper(storage, fireNow)
|
||||||
|
for storage
|
||||||
|
in storages
|
||||||
|
)
|
||||||
|
ueb = {
|
||||||
|
"needed_shares": len(serverids),
|
||||||
|
"total_shares": len(serverids),
|
||||||
|
"segment_size": 128 * 1024,
|
||||||
|
"size": 1024,
|
||||||
|
}
|
||||||
|
for n, rref_with_ueb in enumerate(rrefs_with_ueb):
|
||||||
|
yield write_good_share(rref_with_ueb, storage_index, ueb, [n])
|
||||||
|
|
||||||
|
servers_with_ueb = list(
|
||||||
|
NoNetworkServer(serverid, rref_with_ueb)
|
||||||
|
for (serverid, rref_with_ueb)
|
||||||
|
in zip(serverids, rrefs_with_ueb)
|
||||||
|
)
|
||||||
|
peers = {storage_index: servers_with_ueb}
|
||||||
|
caf = offloaded.CHKCheckerAndUEBFetcher(
|
||||||
|
peers.get,
|
||||||
|
storage_index,
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
self.assertThat(
|
||||||
|
caf.check(),
|
||||||
|
succeeded(MatchesListwise([
|
||||||
|
Equals({
|
||||||
|
n: {serverid}
|
||||||
|
for (n, serverid)
|
||||||
|
in enumerate(serverids)
|
||||||
|
}),
|
||||||
|
Equals(ueb),
|
||||||
|
IsInstance(bytes),
|
||||||
|
])),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def write_bad_share(storage_rref, storage_index):
|
||||||
|
"""
|
||||||
|
Write a share with a corrupt URI extension block.
|
||||||
|
"""
|
||||||
|
# Write some trash to the right bucket on this storage server. It won't
|
||||||
|
# have a recoverable UEB block.
|
||||||
|
return write_share(storage_rref, storage_index, [0], b"\0" * 1024)
|
||||||
|
|
||||||
|
|
||||||
|
def write_good_share(storage_rref, storage_index, ueb, sharenums):
|
||||||
|
"""
|
||||||
|
Write a valid share with the given URI extension block.
|
||||||
|
"""
|
||||||
|
write_proxy = make_write_bucket_proxy(
|
||||||
|
storage_rref,
|
||||||
|
None,
|
||||||
|
1024,
|
||||||
|
ueb["segment_size"],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
ueb["size"],
|
||||||
|
)
|
||||||
|
# See allmydata/immutable/layout.py
|
||||||
|
offset = write_proxy._offsets["uri_extension"]
|
||||||
|
filler = b"\0" * (offset - len(write_proxy._offset_data))
|
||||||
|
ueb_data = uri.pack_extension(ueb)
|
||||||
|
data = (
|
||||||
|
write_proxy._offset_data +
|
||||||
|
filler +
|
||||||
|
pack(write_proxy.fieldstruct, len(ueb_data)) +
|
||||||
|
ueb_data
|
||||||
|
)
|
||||||
|
return write_share(storage_rref, storage_index, sharenums, data)
|
||||||
|
|
||||||
|
|
||||||
|
@inline_callbacks
|
||||||
|
def write_share(storage_rref, storage_index, sharenums, sharedata):
|
||||||
|
"""
|
||||||
|
Write the given share data to the given storage index using the given
|
||||||
|
IStorageServer remote reference.
|
||||||
|
|
||||||
|
:param foolscap.ipb.IRemoteReference storage_rref: A remote reference to
|
||||||
|
an IStorageServer.
|
||||||
|
|
||||||
|
:param bytes storage_index: The storage index to which to write the share
|
||||||
|
data.
|
||||||
|
|
||||||
|
:param [int] sharenums: The share numbers to which to write this sharedata.
|
||||||
|
|
||||||
|
:param bytes sharedata: The ciphertext to write as the share.
|
||||||
|
"""
|
||||||
|
ignored, writers = yield storage_rref.callRemote(
|
||||||
|
"allocate_buckets",
|
||||||
|
storage_index,
|
||||||
|
b"x" * 16,
|
||||||
|
b"x" * 16,
|
||||||
|
sharenums,
|
||||||
|
len(sharedata),
|
||||||
|
LocalWrapper(None),
|
||||||
|
|
||||||
|
)
|
||||||
|
[writer] = writers.values()
|
||||||
|
yield writer.callRemote("write", 0, sharedata)
|
||||||
|
yield writer.callRemote("close")
|
||||||
|
@ -13,9 +13,16 @@ from future.utils import PY2, native_str
|
|||||||
if PY2:
|
if PY2:
|
||||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
import re, errno, subprocess, os, socket
|
import os, socket
|
||||||
import gc
|
import gc
|
||||||
|
|
||||||
|
from testtools.matchers import (
|
||||||
|
MatchesAll,
|
||||||
|
IsInstance,
|
||||||
|
AllMatch,
|
||||||
|
MatchesPredicate,
|
||||||
|
)
|
||||||
|
|
||||||
from twisted.trial import unittest
|
from twisted.trial import unittest
|
||||||
|
|
||||||
from tenacity import retry, stop_after_attempt
|
from tenacity import retry, stop_after_attempt
|
||||||
@ -23,172 +30,14 @@ from tenacity import retry, stop_after_attempt
|
|||||||
from foolscap.api import Tub
|
from foolscap.api import Tub
|
||||||
|
|
||||||
from allmydata.util import iputil, gcutil
|
from allmydata.util import iputil, gcutil
|
||||||
import allmydata.test.common_util as testutil
|
|
||||||
from allmydata.util.namespace import Namespace
|
|
||||||
|
|
||||||
|
from ..util.iputil import (
|
||||||
|
get_local_addresses_sync,
|
||||||
|
)
|
||||||
|
|
||||||
DOTTED_QUAD_RE=re.compile(r"^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$")
|
from .common import (
|
||||||
|
SyncTestCase,
|
||||||
# Mock output from subprocesses should be bytes, that's what happens on both
|
)
|
||||||
# Python 2 and Python 3:
|
|
||||||
MOCK_IPADDR_OUTPUT = b"""\
|
|
||||||
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \n\
|
|
||||||
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
|
|
||||||
inet 127.0.0.1/8 scope host lo
|
|
||||||
inet6 ::1/128 scope host \n\
|
|
||||||
valid_lft forever preferred_lft forever
|
|
||||||
2: eth1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
|
|
||||||
link/ether d4:3d:7e:01:b4:3e brd ff:ff:ff:ff:ff:ff
|
|
||||||
inet 192.168.0.6/24 brd 192.168.0.255 scope global eth1
|
|
||||||
inet6 fe80::d63d:7eff:fe01:b43e/64 scope link \n\
|
|
||||||
valid_lft forever preferred_lft forever
|
|
||||||
3: wlan0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
|
|
||||||
link/ether 90:f6:52:27:15:0a brd ff:ff:ff:ff:ff:ff
|
|
||||||
inet 192.168.0.2/24 brd 192.168.0.255 scope global wlan0
|
|
||||||
inet6 fe80::92f6:52ff:fe27:150a/64 scope link \n\
|
|
||||||
valid_lft forever preferred_lft forever
|
|
||||||
"""
|
|
||||||
|
|
||||||
MOCK_IFCONFIG_OUTPUT = b"""\
|
|
||||||
eth1 Link encap:Ethernet HWaddr d4:3d:7e:01:b4:3e \n\
|
|
||||||
inet addr:192.168.0.6 Bcast:192.168.0.255 Mask:255.255.255.0
|
|
||||||
inet6 addr: fe80::d63d:7eff:fe01:b43e/64 Scope:Link
|
|
||||||
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
|
|
||||||
RX packets:154242234 errors:0 dropped:0 overruns:0 frame:0
|
|
||||||
TX packets:155461891 errors:0 dropped:0 overruns:0 carrier:0
|
|
||||||
collisions:0 txqueuelen:1000 \n\
|
|
||||||
RX bytes:84367213640 (78.5 GiB) TX bytes:73401695329 (68.3 GiB)
|
|
||||||
Interrupt:20 Memory:f4f00000-f4f20000 \n\
|
|
||||||
|
|
||||||
lo Link encap:Local Loopback \n\
|
|
||||||
inet addr:127.0.0.1 Mask:255.0.0.0
|
|
||||||
inet6 addr: ::1/128 Scope:Host
|
|
||||||
UP LOOPBACK RUNNING MTU:16436 Metric:1
|
|
||||||
RX packets:27449267 errors:0 dropped:0 overruns:0 frame:0
|
|
||||||
TX packets:27449267 errors:0 dropped:0 overruns:0 carrier:0
|
|
||||||
collisions:0 txqueuelen:0 \n\
|
|
||||||
RX bytes:192643017823 (179.4 GiB) TX bytes:192643017823 (179.4 GiB)
|
|
||||||
|
|
||||||
wlan0 Link encap:Ethernet HWaddr 90:f6:52:27:15:0a \n\
|
|
||||||
inet addr:192.168.0.2 Bcast:192.168.0.255 Mask:255.255.255.0
|
|
||||||
inet6 addr: fe80::92f6:52ff:fe27:150a/64 Scope:Link
|
|
||||||
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
|
|
||||||
RX packets:12352750 errors:0 dropped:0 overruns:0 frame:0
|
|
||||||
TX packets:4501451 errors:0 dropped:0 overruns:0 carrier:0
|
|
||||||
collisions:0 txqueuelen:1000 \n\
|
|
||||||
RX bytes:3916475942 (3.6 GiB) TX bytes:458353654 (437.1 MiB)
|
|
||||||
"""
|
|
||||||
|
|
||||||
# This is actually from a VirtualBox VM running XP.
|
|
||||||
MOCK_ROUTE_OUTPUT = b"""\
|
|
||||||
===========================================================================
|
|
||||||
Interface List
|
|
||||||
0x1 ........................... MS TCP Loopback interface
|
|
||||||
0x2 ...08 00 27 c3 80 ad ...... AMD PCNET Family PCI Ethernet Adapter - Packet Scheduler Miniport
|
|
||||||
===========================================================================
|
|
||||||
===========================================================================
|
|
||||||
Active Routes:
|
|
||||||
Network Destination Netmask Gateway Interface Metric
|
|
||||||
0.0.0.0 0.0.0.0 10.0.2.2 10.0.2.15 20
|
|
||||||
10.0.2.0 255.255.255.0 10.0.2.15 10.0.2.15 20
|
|
||||||
10.0.2.15 255.255.255.255 127.0.0.1 127.0.0.1 20
|
|
||||||
10.255.255.255 255.255.255.255 10.0.2.15 10.0.2.15 20
|
|
||||||
127.0.0.0 255.0.0.0 127.0.0.1 127.0.0.1 1
|
|
||||||
224.0.0.0 240.0.0.0 10.0.2.15 10.0.2.15 20
|
|
||||||
255.255.255.255 255.255.255.255 10.0.2.15 10.0.2.15 1
|
|
||||||
Default Gateway: 10.0.2.2
|
|
||||||
===========================================================================
|
|
||||||
Persistent Routes:
|
|
||||||
None
|
|
||||||
"""
|
|
||||||
|
|
||||||
UNIX_TEST_ADDRESSES = set(["127.0.0.1", "192.168.0.6", "192.168.0.2", "192.168.0.10"])
|
|
||||||
WINDOWS_TEST_ADDRESSES = set(["127.0.0.1", "10.0.2.15", "192.168.0.10"])
|
|
||||||
CYGWIN_TEST_ADDRESSES = set(["127.0.0.1", "192.168.0.10"])
|
|
||||||
|
|
||||||
|
|
||||||
class FakeProcess(object):
|
|
||||||
def __init__(self, output, err):
|
|
||||||
self.output = output
|
|
||||||
self.err = err
|
|
||||||
def communicate(self):
|
|
||||||
return (self.output, self.err)
|
|
||||||
|
|
||||||
|
|
||||||
class ListAddresses(testutil.SignalMixin, unittest.TestCase):
|
|
||||||
def test_get_local_ip_for(self):
|
|
||||||
addr = iputil.get_local_ip_for('127.0.0.1')
|
|
||||||
self.failUnless(DOTTED_QUAD_RE.match(addr))
|
|
||||||
# Bytes can be taken as input:
|
|
||||||
bytes_addr = iputil.get_local_ip_for(b'127.0.0.1')
|
|
||||||
self.assertEqual(addr, bytes_addr)
|
|
||||||
# The output is a native string:
|
|
||||||
self.assertIsInstance(addr, native_str)
|
|
||||||
|
|
||||||
def test_list_async(self):
|
|
||||||
d = iputil.get_local_addresses_async()
|
|
||||||
def _check(addresses):
|
|
||||||
self.failUnlessIn("127.0.0.1", addresses)
|
|
||||||
self.failIfIn("0.0.0.0", addresses)
|
|
||||||
d.addCallbacks(_check)
|
|
||||||
return d
|
|
||||||
# David A.'s OpenSolaris box timed out on this test one time when it was at 2s.
|
|
||||||
test_list_async.timeout=4
|
|
||||||
|
|
||||||
def _test_list_async_mock(self, command, output, expected):
|
|
||||||
ns = Namespace()
|
|
||||||
ns.first = True
|
|
||||||
|
|
||||||
def call_Popen(args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None,
|
|
||||||
preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None,
|
|
||||||
universal_newlines=False, startupinfo=None, creationflags=0):
|
|
||||||
if ns.first:
|
|
||||||
ns.first = False
|
|
||||||
e = OSError("EINTR")
|
|
||||||
e.errno = errno.EINTR
|
|
||||||
raise e
|
|
||||||
elif os.path.basename(args[0]) == command:
|
|
||||||
return FakeProcess(output, "")
|
|
||||||
else:
|
|
||||||
e = OSError("[Errno 2] No such file or directory")
|
|
||||||
e.errno = errno.ENOENT
|
|
||||||
raise e
|
|
||||||
self.patch(subprocess, 'Popen', call_Popen)
|
|
||||||
self.patch(os.path, 'isfile', lambda x: True)
|
|
||||||
|
|
||||||
def call_get_local_ip_for(target):
|
|
||||||
if target in ("localhost", "127.0.0.1"):
|
|
||||||
return "127.0.0.1"
|
|
||||||
else:
|
|
||||||
return "192.168.0.10"
|
|
||||||
self.patch(iputil, 'get_local_ip_for', call_get_local_ip_for)
|
|
||||||
|
|
||||||
def call_which(name):
|
|
||||||
return [name]
|
|
||||||
self.patch(iputil, 'which', call_which)
|
|
||||||
|
|
||||||
d = iputil.get_local_addresses_async()
|
|
||||||
def _check(addresses):
|
|
||||||
self.failUnlessEquals(set(addresses), set(expected))
|
|
||||||
d.addCallbacks(_check)
|
|
||||||
return d
|
|
||||||
|
|
||||||
def test_list_async_mock_ip_addr(self):
|
|
||||||
self.patch(iputil, 'platform', "linux2")
|
|
||||||
return self._test_list_async_mock("ip", MOCK_IPADDR_OUTPUT, UNIX_TEST_ADDRESSES)
|
|
||||||
|
|
||||||
def test_list_async_mock_ifconfig(self):
|
|
||||||
self.patch(iputil, 'platform', "linux2")
|
|
||||||
return self._test_list_async_mock("ifconfig", MOCK_IFCONFIG_OUTPUT, UNIX_TEST_ADDRESSES)
|
|
||||||
|
|
||||||
def test_list_async_mock_route(self):
|
|
||||||
self.patch(iputil, 'platform', "win32")
|
|
||||||
return self._test_list_async_mock("route.exe", MOCK_ROUTE_OUTPUT, WINDOWS_TEST_ADDRESSES)
|
|
||||||
|
|
||||||
def test_list_async_mock_cygwin(self):
|
|
||||||
self.patch(iputil, 'platform', "cygwin")
|
|
||||||
return self._test_list_async_mock(None, None, CYGWIN_TEST_ADDRESSES)
|
|
||||||
|
|
||||||
|
|
||||||
class ListenOnUsed(unittest.TestCase):
|
class ListenOnUsed(unittest.TestCase):
|
||||||
"""Tests for listenOnUnused."""
|
"""Tests for listenOnUnused."""
|
||||||
@ -261,3 +110,29 @@ class GcUtil(unittest.TestCase):
|
|||||||
self.assertEqual(len(collections), 0)
|
self.assertEqual(len(collections), 0)
|
||||||
tracker.allocate()
|
tracker.allocate()
|
||||||
self.assertEqual(len(collections), 1)
|
self.assertEqual(len(collections), 1)
|
||||||
|
|
||||||
|
|
||||||
|
class GetLocalAddressesSyncTests(SyncTestCase):
|
||||||
|
"""
|
||||||
|
Tests for ``get_local_addresses_sync``.
|
||||||
|
"""
|
||||||
|
def test_some_ipv4_addresses(self):
|
||||||
|
"""
|
||||||
|
``get_local_addresses_sync`` returns a list of IPv4 addresses as native
|
||||||
|
strings.
|
||||||
|
"""
|
||||||
|
self.assertThat(
|
||||||
|
get_local_addresses_sync(),
|
||||||
|
MatchesAll(
|
||||||
|
IsInstance(list),
|
||||||
|
AllMatch(
|
||||||
|
MatchesAll(
|
||||||
|
IsInstance(native_str),
|
||||||
|
MatchesPredicate(
|
||||||
|
lambda addr: socket.inet_pton(socket.AF_INET, addr),
|
||||||
|
"%r is not an IPv4 address.",
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
@ -1,3 +1,15 @@
|
|||||||
|
"""
|
||||||
|
Ported to Python 3.
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
import base64
|
import base64
|
||||||
import os
|
import os
|
||||||
import stat
|
import stat
|
||||||
@ -166,13 +178,13 @@ class TestCase(testutil.SignalMixin, unittest.TestCase):
|
|||||||
def test_tahoe_cfg_utf8(self):
|
def test_tahoe_cfg_utf8(self):
|
||||||
basedir = "test_node/test_tahoe_cfg_utf8"
|
basedir = "test_node/test_tahoe_cfg_utf8"
|
||||||
fileutil.make_dirs(basedir)
|
fileutil.make_dirs(basedir)
|
||||||
f = open(os.path.join(basedir, 'tahoe.cfg'), 'wt')
|
f = open(os.path.join(basedir, 'tahoe.cfg'), 'wb')
|
||||||
f.write(u"\uFEFF[node]\n".encode('utf-8'))
|
f.write(u"\uFEFF[node]\n".encode('utf-8'))
|
||||||
f.write(u"nickname = \u2621\n".encode('utf-8'))
|
f.write(u"nickname = \u2621\n".encode('utf-8'))
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
config = read_config(basedir, "")
|
config = read_config(basedir, "")
|
||||||
self.failUnlessEqual(config.get_config("node", "nickname").decode('utf-8'),
|
self.failUnlessEqual(config.get_config("node", "nickname"),
|
||||||
u"\u2621")
|
u"\u2621")
|
||||||
|
|
||||||
def test_tahoe_cfg_hash_in_name(self):
|
def test_tahoe_cfg_hash_in_name(self):
|
||||||
@ -215,8 +227,8 @@ class TestCase(testutil.SignalMixin, unittest.TestCase):
|
|||||||
config = read_config(basedir, "portnum")
|
config = read_config(basedir, "portnum")
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
config.items("node"),
|
config.items("node"),
|
||||||
[(b"nickname", b"foo"),
|
[("nickname", "foo"),
|
||||||
(b"timeout.disconnect", b"12"),
|
("timeout.disconnect", "12"),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -393,7 +405,7 @@ class TestMissingPorts(unittest.TestCase):
|
|||||||
with get_addr, alloc_port:
|
with get_addr, alloc_port:
|
||||||
tubport, tublocation = _tub_portlocation(config)
|
tubport, tublocation = _tub_portlocation(config)
|
||||||
self.assertEqual(tubport, "tcp:777")
|
self.assertEqual(tubport, "tcp:777")
|
||||||
self.assertEqual(tublocation, "tcp:LOCAL:777")
|
self.assertEqual(tublocation, b"tcp:LOCAL:777")
|
||||||
|
|
||||||
def test_parsing_defaults(self):
|
def test_parsing_defaults(self):
|
||||||
"""
|
"""
|
||||||
@ -415,7 +427,7 @@ class TestMissingPorts(unittest.TestCase):
|
|||||||
with get_addr, alloc_port:
|
with get_addr, alloc_port:
|
||||||
tubport, tublocation = _tub_portlocation(config)
|
tubport, tublocation = _tub_portlocation(config)
|
||||||
self.assertEqual(tubport, "tcp:999")
|
self.assertEqual(tubport, "tcp:999")
|
||||||
self.assertEqual(tublocation, "tcp:LOCAL:999")
|
self.assertEqual(tublocation, b"tcp:LOCAL:999")
|
||||||
|
|
||||||
def test_parsing_location_complex(self):
|
def test_parsing_location_complex(self):
|
||||||
"""
|
"""
|
||||||
@ -438,7 +450,7 @@ class TestMissingPorts(unittest.TestCase):
|
|||||||
with get_addr, alloc_port:
|
with get_addr, alloc_port:
|
||||||
tubport, tublocation = _tub_portlocation(config)
|
tubport, tublocation = _tub_portlocation(config)
|
||||||
self.assertEqual(tubport, "tcp:999")
|
self.assertEqual(tubport, "tcp:999")
|
||||||
self.assertEqual(tublocation, "tcp:HOST:888,tcp:LOCAL:999")
|
self.assertEqual(tublocation, b"tcp:HOST:888,tcp:LOCAL:999")
|
||||||
|
|
||||||
def test_parsing_all_disabled(self):
|
def test_parsing_all_disabled(self):
|
||||||
"""
|
"""
|
||||||
@ -566,7 +578,7 @@ enabled = true
|
|||||||
|
|
||||||
class FakeTub(object):
|
class FakeTub(object):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.tubID = base64.b32encode("foo")
|
self.tubID = base64.b32encode(b"foo")
|
||||||
self.listening_ports = []
|
self.listening_ports = []
|
||||||
def setOption(self, name, value): pass
|
def setOption(self, name, value): pass
|
||||||
def removeAllConnectionHintHandlers(self): pass
|
def removeAllConnectionHintHandlers(self): pass
|
||||||
|
@ -413,6 +413,16 @@ class FileUtil(ReallyEqualMixin, unittest.TestCase):
|
|||||||
f.write(b"foobar")
|
f.write(b"foobar")
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
|
def test_write(self):
|
||||||
|
"""fileutil.write() can write both unicode and bytes."""
|
||||||
|
path = self.mktemp()
|
||||||
|
fileutil.write(path, b"abc")
|
||||||
|
with open(path, "rb") as f:
|
||||||
|
self.assertEqual(f.read(), b"abc")
|
||||||
|
fileutil.write(path, u"def \u1234")
|
||||||
|
with open(path, "rb") as f:
|
||||||
|
self.assertEqual(f.read(), u"def \u1234".encode("utf-8"))
|
||||||
|
|
||||||
|
|
||||||
class PollMixinTests(unittest.TestCase):
|
class PollMixinTests(unittest.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
|
@ -46,6 +46,7 @@ PORTED_MODULES = [
|
|||||||
"allmydata.immutable.happiness_upload",
|
"allmydata.immutable.happiness_upload",
|
||||||
"allmydata.immutable.layout",
|
"allmydata.immutable.layout",
|
||||||
"allmydata.immutable.literal",
|
"allmydata.immutable.literal",
|
||||||
|
"allmydata.immutable.offloaded",
|
||||||
"allmydata.immutable.upload",
|
"allmydata.immutable.upload",
|
||||||
"allmydata.interfaces",
|
"allmydata.interfaces",
|
||||||
"allmydata.introducer.interfaces",
|
"allmydata.introducer.interfaces",
|
||||||
@ -120,6 +121,7 @@ PORTED_TEST_MODULES = [
|
|||||||
"allmydata.test.test_monitor",
|
"allmydata.test.test_monitor",
|
||||||
"allmydata.test.test_netstring",
|
"allmydata.test.test_netstring",
|
||||||
"allmydata.test.test_no_network",
|
"allmydata.test.test_no_network",
|
||||||
|
"allmydata.test.test_node",
|
||||||
"allmydata.test.test_observer",
|
"allmydata.test.test_observer",
|
||||||
"allmydata.test.test_pipeline",
|
"allmydata.test.test_pipeline",
|
||||||
"allmydata.test.test_python3",
|
"allmydata.test.test_python3",
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
"""
|
"""
|
||||||
Read/write config files.
|
Read/write config files.
|
||||||
|
|
||||||
Configuration is returned as native strings.
|
Configuration is returned as Unicode strings.
|
||||||
|
|
||||||
Ported to Python 3.
|
Ported to Python 3.
|
||||||
"""
|
"""
|
||||||
@ -12,17 +12,11 @@ from __future__ import unicode_literals
|
|||||||
|
|
||||||
from future.utils import PY2
|
from future.utils import PY2
|
||||||
if PY2:
|
if PY2:
|
||||||
# We don't do open(), because we want files to read/write native strs when
|
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
# we do "r" or "w".
|
|
||||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
|
||||||
|
|
||||||
if PY2:
|
# On Python 2 we use the backport package; that means we always get unicode
|
||||||
# In theory on Python 2 configparser also works, but then code gets the
|
# out.
|
||||||
# wrong exceptions and they don't get handled. So just use native parser
|
from configparser import ConfigParser
|
||||||
# for now.
|
|
||||||
from ConfigParser import SafeConfigParser
|
|
||||||
else:
|
|
||||||
from configparser import SafeConfigParser
|
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
|
|
||||||
@ -36,19 +30,27 @@ class UnknownConfigError(Exception):
|
|||||||
|
|
||||||
|
|
||||||
def get_config(tahoe_cfg):
|
def get_config(tahoe_cfg):
|
||||||
"""Load the config, returning a SafeConfigParser.
|
"""Load the config, returning a ConfigParser.
|
||||||
|
|
||||||
Configuration is returned as native strings.
|
Configuration is returned as Unicode strings.
|
||||||
"""
|
"""
|
||||||
config = SafeConfigParser()
|
# Byte Order Mark is an optional garbage code point you sometimes get at
|
||||||
with open(tahoe_cfg, "r") as f:
|
# the start of UTF-8 encoded files. Especially on Windows. Skip it by using
|
||||||
# On Python 2, where we read in bytes, skip any initial Byte Order
|
# utf-8-sig. https://en.wikipedia.org/wiki/Byte_order_mark
|
||||||
# Mark. Since this is an ordinary file, we don't need to handle
|
with open(tahoe_cfg, "r", encoding="utf-8-sig") as f:
|
||||||
# incomplete reads, and can assume seekability.
|
cfg_string = f.read()
|
||||||
if PY2 and f.read(3) != b'\xEF\xBB\xBF':
|
return get_config_from_string(cfg_string)
|
||||||
f.seek(0)
|
|
||||||
config.readfp(f)
|
|
||||||
return config
|
def get_config_from_string(tahoe_cfg_string):
|
||||||
|
"""Load the config from a string, return the ConfigParser.
|
||||||
|
|
||||||
|
Configuration is returned as Unicode strings.
|
||||||
|
"""
|
||||||
|
parser = ConfigParser(strict=False)
|
||||||
|
parser.read_string(tahoe_cfg_string)
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
def set_config(config, section, option, value):
|
def set_config(config, section, option, value):
|
||||||
if not config.has_section(section):
|
if not config.has_section(section):
|
||||||
|
@ -271,11 +271,13 @@ def write_atomically(target, contents, mode="b"):
|
|||||||
move_into_place(target+".tmp", target)
|
move_into_place(target+".tmp", target)
|
||||||
|
|
||||||
def write(path, data, mode="wb"):
|
def write(path, data, mode="wb"):
|
||||||
|
if "b" in mode and isinstance(data, str):
|
||||||
|
data = data.encode("utf-8")
|
||||||
with open(path, mode) as f:
|
with open(path, mode) as f:
|
||||||
f.write(data)
|
f.write(data)
|
||||||
|
|
||||||
def read(path):
|
def read(path, mode="rb"):
|
||||||
with open(path, "rb") as rf:
|
with open(path, mode) as rf:
|
||||||
return rf.read()
|
return rf.read()
|
||||||
|
|
||||||
def put_file(path, inf):
|
def put_file(path, inf):
|
||||||
|
@ -13,19 +13,19 @@ from future.utils import PY2, native_str
|
|||||||
if PY2:
|
if PY2:
|
||||||
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
import os, re, socket, subprocess, errno
|
import os, socket
|
||||||
from sys import platform
|
|
||||||
|
|
||||||
from zope.interface import implementer
|
from zope.interface import implementer
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
|
|
||||||
|
from netifaces import (
|
||||||
|
interfaces,
|
||||||
|
ifaddresses,
|
||||||
|
)
|
||||||
|
|
||||||
# from Twisted
|
# from Twisted
|
||||||
from twisted.python.reflect import requireModule
|
from twisted.python.reflect import requireModule
|
||||||
from twisted.internet import defer, threads, reactor
|
|
||||||
from twisted.internet.protocol import DatagramProtocol
|
|
||||||
from twisted.internet.error import CannotListenError
|
|
||||||
from twisted.python.procutils import which
|
|
||||||
from twisted.python import log
|
from twisted.python import log
|
||||||
from twisted.internet.endpoints import AdoptedStreamServerEndpoint
|
from twisted.internet.endpoints import AdoptedStreamServerEndpoint
|
||||||
from twisted.internet.interfaces import (
|
from twisted.internet.interfaces import (
|
||||||
@ -101,182 +101,23 @@ except ImportError:
|
|||||||
# since one might be shadowing the other. This hack appeases pyflakes.
|
# since one might be shadowing the other. This hack appeases pyflakes.
|
||||||
increase_rlimits = _increase_rlimits
|
increase_rlimits = _increase_rlimits
|
||||||
|
|
||||||
|
|
||||||
def get_local_addresses_sync():
|
def get_local_addresses_sync():
|
||||||
"""
|
"""
|
||||||
Return a list of IPv4 addresses (as dotted-quad native strings) that are
|
Get locally assigned addresses as dotted-quad native strings.
|
||||||
currently configured on this host, sorted in descending order of how likely
|
|
||||||
we think they are to work.
|
:return [str]: A list of IPv4 addresses which are assigned to interfaces
|
||||||
|
on the local system.
|
||||||
"""
|
"""
|
||||||
return [native_str(a) for a in _synchronously_find_addresses_via_config()]
|
return list(
|
||||||
|
native_str(address[native_str("addr")])
|
||||||
def get_local_addresses_async(target="198.41.0.4"): # A.ROOT-SERVERS.NET
|
for iface_name
|
||||||
"""
|
in interfaces()
|
||||||
Return a Deferred that fires with a list of IPv4 addresses (as dotted-quad
|
for address
|
||||||
native strings) that are currently configured on this host, sorted in
|
in ifaddresses(iface_name).get(socket.AF_INET, [])
|
||||||
descending order of how likely we think they are to work.
|
|
||||||
|
|
||||||
@param target: we want to learn an IP address they could try using to
|
|
||||||
connect to us; The default value is fine, but it might help if you
|
|
||||||
pass the address of a host that you are actually trying to be
|
|
||||||
reachable to.
|
|
||||||
"""
|
|
||||||
addresses = []
|
|
||||||
local_ip = get_local_ip_for(target)
|
|
||||||
if local_ip is not None:
|
|
||||||
addresses.append(local_ip)
|
|
||||||
|
|
||||||
if platform == "cygwin":
|
|
||||||
d = _cygwin_hack_find_addresses()
|
|
||||||
else:
|
|
||||||
d = _find_addresses_via_config()
|
|
||||||
|
|
||||||
def _collect(res):
|
|
||||||
for addr in res:
|
|
||||||
if addr != "0.0.0.0" and not addr in addresses:
|
|
||||||
addresses.append(addr)
|
|
||||||
return addresses
|
|
||||||
d.addCallback(_collect)
|
|
||||||
d.addCallback(lambda addresses: [native_str(s) for s in addresses])
|
|
||||||
return d
|
|
||||||
|
|
||||||
def get_local_ip_for(target):
|
|
||||||
"""Find out what our IP address is for use by a given target.
|
|
||||||
|
|
||||||
@return: the IP address as a dotted-quad native string which could be used
|
|
||||||
to connect to us. It might work for them, it might not. If
|
|
||||||
there is no suitable address (perhaps we don't currently have an
|
|
||||||
externally-visible interface), this will return None.
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
target_ipaddr = socket.gethostbyname(target)
|
|
||||||
except socket.gaierror:
|
|
||||||
# DNS isn't running, or somehow we encountered an error
|
|
||||||
|
|
||||||
# note: if an interface is configured and up, but nothing is
|
|
||||||
# connected to it, gethostbyname("A.ROOT-SERVERS.NET") will take 20
|
|
||||||
# seconds to raise socket.gaierror . This is synchronous and occurs
|
|
||||||
# for each node being started, so users of
|
|
||||||
# test.common.SystemTestMixin (like test_system) will see something
|
|
||||||
# like 120s of delay, which may be enough to hit the default trial
|
|
||||||
# timeouts. For that reason, get_local_addresses_async() was changed
|
|
||||||
# to default to the numerical ip address for A.ROOT-SERVERS.NET, to
|
|
||||||
# avoid this DNS lookup. This also makes node startup fractionally
|
|
||||||
# faster.
|
|
||||||
return None
|
|
||||||
|
|
||||||
try:
|
|
||||||
udpprot = DatagramProtocol()
|
|
||||||
port = reactor.listenUDP(0, udpprot)
|
|
||||||
try:
|
|
||||||
# connect() will fail if we're offline (e.g. running tests on a
|
|
||||||
# disconnected laptop), which is fine (localip=None), but we must
|
|
||||||
# still do port.stopListening() or we'll get a DirtyReactorError
|
|
||||||
udpprot.transport.connect(target_ipaddr, 7)
|
|
||||||
localip = udpprot.transport.getHost().host
|
|
||||||
return localip
|
|
||||||
finally:
|
|
||||||
d = port.stopListening()
|
|
||||||
d.addErrback(log.err)
|
|
||||||
except (socket.error, CannotListenError):
|
|
||||||
# no route to that host
|
|
||||||
localip = None
|
|
||||||
return native_str(localip)
|
|
||||||
|
|
||||||
|
|
||||||
# Wow, I'm really amazed at home much mileage we've gotten out of calling
|
|
||||||
# the external route.exe program on windows... It appears to work on all
|
|
||||||
# versions so far.
|
|
||||||
# ... thus wrote Greg Smith in time immemorial...
|
|
||||||
# Also, the Win32 APIs for this are really klunky and error-prone. --Daira
|
|
||||||
|
|
||||||
_win32_re = re.compile(br'^\s*\d+\.\d+\.\d+\.\d+\s.+\s(?P<address>\d+\.\d+\.\d+\.\d+)\s+(?P<metric>\d+)\s*$', flags=re.M|re.I|re.S)
|
|
||||||
_win32_commands = (('route.exe', ('print',), _win32_re),)
|
|
||||||
|
|
||||||
# These work in most Unices.
|
|
||||||
_addr_re = re.compile(br'^\s*inet [a-zA-Z]*:?(?P<address>\d+\.\d+\.\d+\.\d+)[\s/].+$', flags=re.M|re.I|re.S)
|
|
||||||
_unix_commands = (('/bin/ip', ('addr',), _addr_re),
|
|
||||||
('/sbin/ip', ('addr',), _addr_re),
|
|
||||||
('/sbin/ifconfig', ('-a',), _addr_re),
|
|
||||||
('/usr/sbin/ifconfig', ('-a',), _addr_re),
|
|
||||||
('/usr/etc/ifconfig', ('-a',), _addr_re),
|
|
||||||
('ifconfig', ('-a',), _addr_re),
|
|
||||||
('/sbin/ifconfig', (), _addr_re),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _find_addresses_via_config():
|
|
||||||
return threads.deferToThread(_synchronously_find_addresses_via_config)
|
|
||||||
|
|
||||||
def _synchronously_find_addresses_via_config():
|
|
||||||
# originally by Greg Smith, hacked by Zooko and then Daira
|
|
||||||
|
|
||||||
# We don't reach here for cygwin.
|
|
||||||
if platform == 'win32':
|
|
||||||
commands = _win32_commands
|
|
||||||
else:
|
|
||||||
commands = _unix_commands
|
|
||||||
|
|
||||||
for (pathtotool, args, regex) in commands:
|
|
||||||
# If pathtotool is a fully qualified path then we just try that.
|
|
||||||
# If it is merely an executable name then we use Twisted's
|
|
||||||
# "which()" utility and try each executable in turn until one
|
|
||||||
# gives us something that resembles a dotted-quad IPv4 address.
|
|
||||||
|
|
||||||
if os.path.isabs(pathtotool):
|
|
||||||
exes_to_try = [pathtotool]
|
|
||||||
else:
|
|
||||||
exes_to_try = which(pathtotool)
|
|
||||||
|
|
||||||
subprocess_error = getattr(
|
|
||||||
subprocess, "SubprocessError", subprocess.CalledProcessError
|
|
||||||
)
|
|
||||||
for exe in exes_to_try:
|
|
||||||
try:
|
|
||||||
addresses = _query(exe, args, regex)
|
|
||||||
except (IOError, OSError, ValueError, subprocess_error):
|
|
||||||
addresses = []
|
|
||||||
if addresses:
|
|
||||||
return addresses
|
|
||||||
|
|
||||||
return []
|
|
||||||
|
|
||||||
def _query(path, args, regex):
|
|
||||||
if not os.path.isfile(path):
|
|
||||||
return []
|
|
||||||
env = {native_str('LANG'): native_str('en_US.UTF-8')}
|
|
||||||
TRIES = 5
|
|
||||||
for trial in range(TRIES):
|
|
||||||
try:
|
|
||||||
p = subprocess.Popen([path] + list(args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
|
|
||||||
(output, err) = p.communicate()
|
|
||||||
break
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno == errno.EINTR and trial < TRIES-1:
|
|
||||||
continue
|
|
||||||
raise
|
|
||||||
|
|
||||||
addresses = []
|
|
||||||
outputsplit = output.split(b'\n')
|
|
||||||
for outline in outputsplit:
|
|
||||||
m = regex.match(outline)
|
|
||||||
if m:
|
|
||||||
addr = m.group('address')
|
|
||||||
if addr not in addresses:
|
|
||||||
addresses.append(addr.decode("utf-8"))
|
|
||||||
|
|
||||||
return addresses
|
|
||||||
|
|
||||||
def _cygwin_hack_find_addresses():
|
|
||||||
addresses = []
|
|
||||||
for h in ["localhost", "127.0.0.1",]:
|
|
||||||
addr = get_local_ip_for(h)
|
|
||||||
if addr is not None and addr not in addresses:
|
|
||||||
addresses.append(addr)
|
|
||||||
|
|
||||||
return defer.succeed(addresses)
|
|
||||||
|
|
||||||
|
|
||||||
def _foolscapEndpointForPortNumber(portnum):
|
def _foolscapEndpointForPortNumber(portnum):
|
||||||
"""
|
"""
|
||||||
Create an endpoint that can be passed to ``Tub.listen``.
|
Create an endpoint that can be passed to ``Tub.listen``.
|
||||||
@ -382,7 +223,5 @@ def listenOnUnused(tub, portnum=None):
|
|||||||
__all__ = ["allocate_tcp_port",
|
__all__ = ["allocate_tcp_port",
|
||||||
"increase_rlimits",
|
"increase_rlimits",
|
||||||
"get_local_addresses_sync",
|
"get_local_addresses_sync",
|
||||||
"get_local_addresses_async",
|
|
||||||
"get_local_ip_for",
|
|
||||||
"listenOnUnused",
|
"listenOnUnused",
|
||||||
]
|
]
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from six import ensure_str
|
||||||
|
|
||||||
import re, time
|
import re, time
|
||||||
|
|
||||||
from functools import (
|
from functools import (
|
||||||
@ -186,6 +188,8 @@ class WebishServer(service.MultiService):
|
|||||||
self.root.putChild("static", static.File(staticdir))
|
self.root.putChild("static", static.File(staticdir))
|
||||||
if re.search(r'^\d', webport):
|
if re.search(r'^\d', webport):
|
||||||
webport = "tcp:"+webport # twisted warns about bare "0" or "3456"
|
webport = "tcp:"+webport # twisted warns about bare "0" or "3456"
|
||||||
|
# strports must be native strings.
|
||||||
|
webport = ensure_str(webport)
|
||||||
s = strports.service(webport, self.site)
|
s = strports.service(webport, self.site)
|
||||||
s.setServiceParent(self)
|
s.setServiceParent(self)
|
||||||
|
|
||||||
|
2
tox.ini
2
tox.ini
@ -96,7 +96,7 @@ setenv =
|
|||||||
# `TypeError: decode() argument 1 must be string, not None`
|
# `TypeError: decode() argument 1 must be string, not None`
|
||||||
PYTHONIOENCODING=utf_8
|
PYTHONIOENCODING=utf_8
|
||||||
commands =
|
commands =
|
||||||
flake8 src static misc setup.py
|
flake8 src integration static misc setup.py
|
||||||
python misc/coding_tools/check-umids.py src
|
python misc/coding_tools/check-umids.py src
|
||||||
python misc/coding_tools/check-debugging.py
|
python misc/coding_tools/check-debugging.py
|
||||||
python misc/coding_tools/find-trailing-spaces.py -r src static misc setup.py
|
python misc/coding_tools/find-trailing-spaces.py -r src static misc setup.py
|
||||||
|
Loading…
Reference in New Issue
Block a user