mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-06-01 07:10:53 +00:00
Merge branch 'master' into 2916.grid-manager-proposal.5
This commit is contained in:
commit
49838afa4d
2
.github/CONTRIBUTING.rst
vendored
2
.github/CONTRIBUTING.rst
vendored
@ -17,4 +17,4 @@ Examples of contributions include:
|
|||||||
* `Patch reviews <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/PatchReviewProcess>`_
|
* `Patch reviews <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/PatchReviewProcess>`_
|
||||||
|
|
||||||
Before authoring or reviewing a patch,
|
Before authoring or reviewing a patch,
|
||||||
please familiarize yourself with the `coding standard <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CodingStandards>`_.
|
please familiarize yourself with the `Coding Standards <https://tahoe-lafs.org/trac/tahoe-lafs/wiki/CodingStandards>`_ and the `Contributor Code of Conduct <../docs/CODE_OF_CONDUCT.md>`_.
|
||||||
|
@ -28,7 +28,7 @@ import os
|
|||||||
# Add any Sphinx extension module names here, as strings. They can be
|
# Add any Sphinx extension module names here, as strings. They can be
|
||||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||||
# ones.
|
# ones.
|
||||||
extensions = []
|
extensions = ['recommonmark']
|
||||||
|
|
||||||
# Add any paths that contain templates here, relative to this directory.
|
# Add any paths that contain templates here, relative to this directory.
|
||||||
templates_path = ['_templates']
|
templates_path = ['_templates']
|
||||||
@ -36,7 +36,7 @@ templates_path = ['_templates']
|
|||||||
# The suffix(es) of source filenames.
|
# The suffix(es) of source filenames.
|
||||||
# You can specify multiple suffix as a list of string:
|
# You can specify multiple suffix as a list of string:
|
||||||
# source_suffix = ['.rst', '.md']
|
# source_suffix = ['.rst', '.md']
|
||||||
source_suffix = '.rst'
|
source_suffix = ['.rst', '.md']
|
||||||
|
|
||||||
# The encoding of source files.
|
# The encoding of source files.
|
||||||
#source_encoding = 'utf-8-sig'
|
#source_encoding = 'utf-8-sig'
|
||||||
|
1
docs/contributing.rst
Normal file
1
docs/contributing.rst
Normal file
@ -0,0 +1 @@
|
|||||||
|
.. include:: ../.github/CONTRIBUTING.rst
|
@ -2032,10 +2032,11 @@ potential for surprises when the file store structure is changed.
|
|||||||
|
|
||||||
Tahoe-LAFS provides a mutable file store, but the ways that the store can
|
Tahoe-LAFS provides a mutable file store, but the ways that the store can
|
||||||
change are limited. The only things that can change are:
|
change are limited. The only things that can change are:
|
||||||
* the mapping from child names to child objects inside mutable directories
|
|
||||||
(by adding a new child, removing an existing child, or changing an
|
* the mapping from child names to child objects inside mutable directories
|
||||||
existing child to point to a different object)
|
(by adding a new child, removing an existing child, or changing an
|
||||||
* the contents of mutable files
|
existing child to point to a different object)
|
||||||
|
* the contents of mutable files
|
||||||
|
|
||||||
Obviously if you query for information about the file store and then act
|
Obviously if you query for information about the file store and then act
|
||||||
to change it (such as by getting a listing of the contents of a mutable
|
to change it (such as by getting a listing of the contents of a mutable
|
||||||
|
@ -23,8 +23,9 @@ Contents:
|
|||||||
frontends/download-status
|
frontends/download-status
|
||||||
|
|
||||||
known_issues
|
known_issues
|
||||||
../.github/CONTRIBUTING
|
contributing
|
||||||
CODE_OF_CONDUCT
|
CODE_OF_CONDUCT
|
||||||
|
release-checklist
|
||||||
|
|
||||||
servers
|
servers
|
||||||
managed-grid
|
managed-grid
|
||||||
|
@ -40,23 +40,31 @@ Create Branch and Apply Updates
|
|||||||
- Create a branch for release-candidates (e.g. `XXXX.release-1.15.0.rc0`)
|
- Create a branch for release-candidates (e.g. `XXXX.release-1.15.0.rc0`)
|
||||||
- run `tox -e news` to produce a new NEWS.txt file (this does a commit)
|
- run `tox -e news` to produce a new NEWS.txt file (this does a commit)
|
||||||
- create the news for the release
|
- create the news for the release
|
||||||
|
|
||||||
- newsfragments/<ticket number>.minor
|
- newsfragments/<ticket number>.minor
|
||||||
- commit it
|
- commit it
|
||||||
|
|
||||||
- manually fix NEWS.txt
|
- manually fix NEWS.txt
|
||||||
|
|
||||||
- proper title for latest release ("Release 1.15.0" instead of "Release ...post1432")
|
- proper title for latest release ("Release 1.15.0" instead of "Release ...post1432")
|
||||||
- double-check date (maybe release will be in the future)
|
- double-check date (maybe release will be in the future)
|
||||||
- spot-check the release notes (these come from the newsfragments
|
- spot-check the release notes (these come from the newsfragments
|
||||||
files though so don't do heavy editing)
|
files though so don't do heavy editing)
|
||||||
- commit these changes
|
- commit these changes
|
||||||
|
|
||||||
- update "relnotes.txt"
|
- update "relnotes.txt"
|
||||||
|
|
||||||
- update all mentions of 1.14.0 -> 1.15.0
|
- update all mentions of 1.14.0 -> 1.15.0
|
||||||
- update "previous release" statement and date
|
- update "previous release" statement and date
|
||||||
- summarize major changes
|
- summarize major changes
|
||||||
- commit it
|
- commit it
|
||||||
|
|
||||||
- update "CREDITS"
|
- update "CREDITS"
|
||||||
|
|
||||||
- are there any new contributors in this release?
|
- are there any new contributors in this release?
|
||||||
- one way: git log release-1.14.0.. | grep Author | sort | uniq
|
- one way: git log release-1.14.0.. | grep Author | sort | uniq
|
||||||
- commit it
|
- commit it
|
||||||
|
|
||||||
- update "docs/known_issues.rst" if appropriate
|
- update "docs/known_issues.rst" if appropriate
|
||||||
- update "docs/INSTALL.rst" references to the new release
|
- update "docs/INSTALL.rst" references to the new release
|
||||||
- Push the branch to github
|
- Push the branch to github
|
||||||
@ -82,21 +90,32 @@ they will need to evaluate which contributors' signatures they trust.
|
|||||||
|
|
||||||
- (all steps above are completed)
|
- (all steps above are completed)
|
||||||
- sign the release
|
- sign the release
|
||||||
|
|
||||||
- git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-1.15.0rc0" tahoe-lafs-1.15.0rc0
|
- git tag -s -u 0xE34E62D06D0E69CFCA4179FFBDE0D31D68666A7A -m "release Tahoe-LAFS-1.15.0rc0" tahoe-lafs-1.15.0rc0
|
||||||
- (replace the key-id above with your own)
|
- (replace the key-id above with your own)
|
||||||
|
|
||||||
- build all code locally
|
- build all code locally
|
||||||
- these should all pass:
|
- these should all pass:
|
||||||
|
|
||||||
- tox -e py27,codechecks,docs,integration
|
- tox -e py27,codechecks,docs,integration
|
||||||
|
|
||||||
- these can fail (ideally they should not of course):
|
- these can fail (ideally they should not of course):
|
||||||
|
|
||||||
- tox -e deprecations,upcoming-deprecations
|
- tox -e deprecations,upcoming-deprecations
|
||||||
|
|
||||||
- build tarballs
|
- build tarballs
|
||||||
|
|
||||||
- tox -e tarballs
|
- tox -e tarballs
|
||||||
- confirm it at least exists:
|
- confirm it at least exists:
|
||||||
- ls dist/ | grep 1.15.0rc0
|
- ls dist/ | grep 1.15.0rc0
|
||||||
|
|
||||||
- inspect and test the tarballs
|
- inspect and test the tarballs
|
||||||
|
|
||||||
- install each in a fresh virtualenv
|
- install each in a fresh virtualenv
|
||||||
- run `tahoe` command
|
- run `tahoe` command
|
||||||
|
|
||||||
- when satisfied, sign the tarballs:
|
- when satisfied, sign the tarballs:
|
||||||
|
|
||||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0-py2-none-any.whl
|
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0-py2-none-any.whl
|
||||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.bz2
|
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.bz2
|
||||||
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.gz
|
- gpg --pinentry=loopback --armor --detach-sign dist/tahoe_lafs-1.15.0rc0.tar.gz
|
||||||
@ -129,6 +148,7 @@ need to be uploaded to https://tahoe-lafs.org in `~source/downloads`
|
|||||||
https://tahoe-lafs.org/downloads/ on the Web.
|
https://tahoe-lafs.org/downloads/ on the Web.
|
||||||
- scp dist/*1.15.0* username@tahoe-lafs.org:/home/source/downloads
|
- scp dist/*1.15.0* username@tahoe-lafs.org:/home/source/downloads
|
||||||
- the following developers have access to do this:
|
- the following developers have access to do this:
|
||||||
|
|
||||||
- exarkun
|
- exarkun
|
||||||
- meejah
|
- meejah
|
||||||
- warner
|
- warner
|
||||||
@ -137,8 +157,9 @@ For the actual release, the tarball and signature files need to be
|
|||||||
uploaded to PyPI as well.
|
uploaded to PyPI as well.
|
||||||
|
|
||||||
- how to do this?
|
- how to do this?
|
||||||
- (original guide says only "twine upload dist/*")
|
- (original guide says only `twine upload dist/*`)
|
||||||
- the following developers have access to do this:
|
- the following developers have access to do this:
|
||||||
|
|
||||||
- warner
|
- warner
|
||||||
- exarkun (partial?)
|
- exarkun (partial?)
|
||||||
- meejah (partial?)
|
- meejah (partial?)
|
||||||
|
@ -7,6 +7,7 @@ from os import mkdir, listdir, environ
|
|||||||
from os.path import join, exists
|
from os.path import join, exists
|
||||||
from tempfile import mkdtemp, mktemp
|
from tempfile import mkdtemp, mktemp
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
from json import loads
|
||||||
|
|
||||||
from foolscap.furl import (
|
from foolscap.furl import (
|
||||||
decode_furl,
|
decode_furl,
|
||||||
@ -37,6 +38,10 @@ from util import (
|
|||||||
_tahoe_runner_optional_coverage,
|
_tahoe_runner_optional_coverage,
|
||||||
await_client_ready,
|
await_client_ready,
|
||||||
TahoeProcess,
|
TahoeProcess,
|
||||||
|
cli,
|
||||||
|
_run_node,
|
||||||
|
generate_ssh_key,
|
||||||
|
block_with_timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -152,7 +157,7 @@ def flog_gatherer(reactor, temp_dir, flog_binary, request):
|
|||||||
)
|
)
|
||||||
print("Waiting for flogtool to complete")
|
print("Waiting for flogtool to complete")
|
||||||
try:
|
try:
|
||||||
pytest_twisted.blockon(flog_protocol.done)
|
block_with_timeout(flog_protocol.done, reactor)
|
||||||
except ProcessTerminated as e:
|
except ProcessTerminated as e:
|
||||||
print("flogtool exited unexpectedly: {}".format(str(e)))
|
print("flogtool exited unexpectedly: {}".format(str(e)))
|
||||||
print("Flogtool completed")
|
print("Flogtool completed")
|
||||||
@ -293,7 +298,7 @@ log_gatherer.furl = {log_furl}
|
|||||||
def cleanup():
|
def cleanup():
|
||||||
try:
|
try:
|
||||||
transport.signalProcess('TERM')
|
transport.signalProcess('TERM')
|
||||||
pytest_twisted.blockon(protocol.exited)
|
block_with_timeout(protocol.exited, reactor)
|
||||||
except ProcessExitedAlready:
|
except ProcessExitedAlready:
|
||||||
pass
|
pass
|
||||||
request.addfinalizer(cleanup)
|
request.addfinalizer(cleanup)
|
||||||
@ -347,8 +352,50 @@ def alice(reactor, temp_dir, introducer_furl, flog_gatherer, storage_nodes, requ
|
|||||||
reactor, request, temp_dir, introducer_furl, flog_gatherer, "alice",
|
reactor, request, temp_dir, introducer_furl, flog_gatherer, "alice",
|
||||||
web_port="tcp:9980:interface=localhost",
|
web_port="tcp:9980:interface=localhost",
|
||||||
storage=False,
|
storage=False,
|
||||||
|
# We're going to kill this ourselves, so no need for finalizer to
|
||||||
|
# do it:
|
||||||
|
finalize=False,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
await_client_ready(process)
|
||||||
|
|
||||||
|
# 1. Create a new RW directory cap:
|
||||||
|
cli(process, "create-alias", "test")
|
||||||
|
rwcap = loads(cli(process, "list-aliases", "--json"))["test"]["readwrite"]
|
||||||
|
|
||||||
|
# 2. Enable SFTP on the node:
|
||||||
|
host_ssh_key_path = join(process.node_dir, "private", "ssh_host_rsa_key")
|
||||||
|
accounts_path = join(process.node_dir, "private", "accounts")
|
||||||
|
with open(join(process.node_dir, "tahoe.cfg"), "a") as f:
|
||||||
|
f.write("""\
|
||||||
|
[sftpd]
|
||||||
|
enabled = true
|
||||||
|
port = tcp:8022:interface=127.0.0.1
|
||||||
|
host_pubkey_file = {ssh_key_path}.pub
|
||||||
|
host_privkey_file = {ssh_key_path}
|
||||||
|
accounts.file = {accounts_path}
|
||||||
|
""".format(ssh_key_path=host_ssh_key_path, accounts_path=accounts_path))
|
||||||
|
generate_ssh_key(host_ssh_key_path)
|
||||||
|
|
||||||
|
# 3. Add a SFTP access file with username/password and SSH key auth.
|
||||||
|
|
||||||
|
# The client SSH key path is typically going to be somewhere else (~/.ssh,
|
||||||
|
# typically), but for convenience sake for testing we'll put it inside node.
|
||||||
|
client_ssh_key_path = join(process.node_dir, "private", "ssh_client_rsa_key")
|
||||||
|
generate_ssh_key(client_ssh_key_path)
|
||||||
|
# Pub key format is "ssh-rsa <thekey> <username>". We want the key.
|
||||||
|
ssh_public_key = open(client_ssh_key_path + ".pub").read().strip().split()[1]
|
||||||
|
with open(accounts_path, "w") as f:
|
||||||
|
f.write("""\
|
||||||
|
alice password {rwcap}
|
||||||
|
|
||||||
|
alice2 ssh-rsa {ssh_public_key} {rwcap}
|
||||||
|
""".format(rwcap=rwcap, ssh_public_key=ssh_public_key))
|
||||||
|
|
||||||
|
# 4. Restart the node with new SFTP config.
|
||||||
|
process.kill()
|
||||||
|
pytest_twisted.blockon(_run_node(reactor, process.node_dir, request, None))
|
||||||
|
|
||||||
await_client_ready(process)
|
await_client_ready(process)
|
||||||
return process
|
return process
|
||||||
|
|
||||||
@ -490,7 +537,13 @@ def tor_network(reactor, temp_dir, chutney, request):
|
|||||||
path=join(chutney_dir),
|
path=join(chutney_dir),
|
||||||
env=env,
|
env=env,
|
||||||
)
|
)
|
||||||
pytest_twisted.blockon(proto.done)
|
try:
|
||||||
|
block_with_timeout(proto.done, reactor)
|
||||||
|
except ProcessTerminated:
|
||||||
|
# If this doesn't exit cleanly, that's fine, that shouldn't fail
|
||||||
|
# the test suite.
|
||||||
|
pass
|
||||||
|
|
||||||
request.addfinalizer(cleanup)
|
request.addfinalizer(cleanup)
|
||||||
|
|
||||||
return chut
|
return chut
|
||||||
|
162
integration/test_sftp.py
Normal file
162
integration/test_sftp.py
Normal file
@ -0,0 +1,162 @@
|
|||||||
|
"""
|
||||||
|
It's possible to create/rename/delete files and directories in Tahoe-LAFS using
|
||||||
|
SFTP.
|
||||||
|
|
||||||
|
These tests use Paramiko, rather than Twisted's Conch, because:
|
||||||
|
|
||||||
|
1. It's a different implementation, so we're not testing Conch against
|
||||||
|
itself.
|
||||||
|
|
||||||
|
2. Its API is much simpler to use.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
|
from posixpath import join
|
||||||
|
from stat import S_ISDIR
|
||||||
|
|
||||||
|
from paramiko import SSHClient
|
||||||
|
from paramiko.client import AutoAddPolicy
|
||||||
|
from paramiko.sftp_client import SFTPClient
|
||||||
|
from paramiko.ssh_exception import AuthenticationException
|
||||||
|
from paramiko.rsakey import RSAKey
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from .util import generate_ssh_key, run_in_thread
|
||||||
|
|
||||||
|
|
||||||
|
def connect_sftp(connect_args={"username": "alice", "password": "password"}):
|
||||||
|
"""Create an SFTP client."""
|
||||||
|
client = SSHClient()
|
||||||
|
client.set_missing_host_key_policy(AutoAddPolicy)
|
||||||
|
client.connect("localhost", port=8022, look_for_keys=False,
|
||||||
|
allow_agent=False, **connect_args)
|
||||||
|
sftp = SFTPClient.from_transport(client.get_transport())
|
||||||
|
|
||||||
|
def rmdir(path, delete_root=True):
|
||||||
|
for f in sftp.listdir_attr(path=path):
|
||||||
|
childpath = join(path, f.filename)
|
||||||
|
if S_ISDIR(f.st_mode):
|
||||||
|
rmdir(childpath)
|
||||||
|
else:
|
||||||
|
sftp.remove(childpath)
|
||||||
|
if delete_root:
|
||||||
|
sftp.rmdir(path)
|
||||||
|
|
||||||
|
# Delete any files left over from previous tests :(
|
||||||
|
rmdir("/", delete_root=False)
|
||||||
|
|
||||||
|
return sftp
|
||||||
|
|
||||||
|
|
||||||
|
@run_in_thread
|
||||||
|
def test_bad_account_password_ssh_key(alice, tmpdir):
|
||||||
|
"""
|
||||||
|
Can't login with unknown username, wrong password, or wrong SSH pub key.
|
||||||
|
"""
|
||||||
|
# Wrong password, wrong username:
|
||||||
|
for u, p in [("alice", "wrong"), ("someuser", "password")]:
|
||||||
|
with pytest.raises(AuthenticationException):
|
||||||
|
connect_sftp(connect_args={
|
||||||
|
"username": u, "password": p,
|
||||||
|
})
|
||||||
|
|
||||||
|
another_key = join(str(tmpdir), "ssh_key")
|
||||||
|
generate_ssh_key(another_key)
|
||||||
|
good_key = RSAKey(filename=join(alice.node_dir, "private", "ssh_client_rsa_key"))
|
||||||
|
bad_key = RSAKey(filename=another_key)
|
||||||
|
|
||||||
|
# Wrong key:
|
||||||
|
with pytest.raises(AuthenticationException):
|
||||||
|
connect_sftp(connect_args={
|
||||||
|
"username": "alice2", "pkey": bad_key,
|
||||||
|
})
|
||||||
|
|
||||||
|
# Wrong username:
|
||||||
|
with pytest.raises(AuthenticationException):
|
||||||
|
connect_sftp(connect_args={
|
||||||
|
"username": "someoneelse", "pkey": good_key,
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
@run_in_thread
|
||||||
|
def test_ssh_key_auth(alice):
|
||||||
|
"""It's possible to login authenticating with SSH public key."""
|
||||||
|
key = RSAKey(filename=join(alice.node_dir, "private", "ssh_client_rsa_key"))
|
||||||
|
sftp = connect_sftp(connect_args={
|
||||||
|
"username": "alice2", "pkey": key
|
||||||
|
})
|
||||||
|
assert sftp.listdir() == []
|
||||||
|
|
||||||
|
|
||||||
|
@run_in_thread
|
||||||
|
def test_read_write_files(alice):
|
||||||
|
"""It's possible to upload and download files."""
|
||||||
|
sftp = connect_sftp()
|
||||||
|
with sftp.file("myfile", "wb") as f:
|
||||||
|
f.write(b"abc")
|
||||||
|
f.write(b"def")
|
||||||
|
|
||||||
|
with sftp.file("myfile", "rb") as f:
|
||||||
|
assert f.read(4) == b"abcd"
|
||||||
|
assert f.read(2) == b"ef"
|
||||||
|
assert f.read(1) == b""
|
||||||
|
|
||||||
|
|
||||||
|
@run_in_thread
|
||||||
|
def test_directories(alice):
|
||||||
|
"""
|
||||||
|
It's possible to create, list directories, and create and remove files in
|
||||||
|
them.
|
||||||
|
"""
|
||||||
|
sftp = connect_sftp()
|
||||||
|
assert sftp.listdir() == []
|
||||||
|
|
||||||
|
sftp.mkdir("childdir")
|
||||||
|
assert sftp.listdir() == ["childdir"]
|
||||||
|
|
||||||
|
with sftp.file("myfile", "wb") as f:
|
||||||
|
f.write(b"abc")
|
||||||
|
assert sorted(sftp.listdir()) == ["childdir", "myfile"]
|
||||||
|
|
||||||
|
sftp.chdir("childdir")
|
||||||
|
assert sftp.listdir() == []
|
||||||
|
|
||||||
|
with sftp.file("myfile2", "wb") as f:
|
||||||
|
f.write(b"def")
|
||||||
|
assert sftp.listdir() == ["myfile2"]
|
||||||
|
|
||||||
|
sftp.chdir(None) # root
|
||||||
|
with sftp.file("childdir/myfile2", "rb") as f:
|
||||||
|
assert f.read() == b"def"
|
||||||
|
|
||||||
|
sftp.remove("myfile")
|
||||||
|
assert sftp.listdir() == ["childdir"]
|
||||||
|
|
||||||
|
sftp.rmdir("childdir")
|
||||||
|
assert sftp.listdir() == []
|
||||||
|
|
||||||
|
|
||||||
|
@run_in_thread
|
||||||
|
def test_rename(alice):
|
||||||
|
"""Directories and files can be renamed."""
|
||||||
|
sftp = connect_sftp()
|
||||||
|
sftp.mkdir("dir")
|
||||||
|
|
||||||
|
filepath = join("dir", "file")
|
||||||
|
with sftp.file(filepath, "wb") as f:
|
||||||
|
f.write(b"abc")
|
||||||
|
|
||||||
|
sftp.rename(filepath, join("dir", "file2"))
|
||||||
|
sftp.rename("dir", "dir2")
|
||||||
|
|
||||||
|
with sftp.file(join("dir2", "file2"), "rb") as f:
|
||||||
|
assert f.read() == b"abc"
|
@ -127,12 +127,12 @@ def test_deep_stats(alice):
|
|||||||
dircap_uri,
|
dircap_uri,
|
||||||
data={
|
data={
|
||||||
u"t": u"upload",
|
u"t": u"upload",
|
||||||
u"when_done": u".",
|
|
||||||
},
|
},
|
||||||
files={
|
files={
|
||||||
u"file": FILE_CONTENTS,
|
u"file": FILE_CONTENTS,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
resp.raise_for_status()
|
||||||
|
|
||||||
# confirm the file is in the directory
|
# confirm the file is in the directory
|
||||||
resp = requests.get(
|
resp = requests.get(
|
||||||
@ -175,6 +175,7 @@ def test_deep_stats(alice):
|
|||||||
time.sleep(.5)
|
time.sleep(.5)
|
||||||
|
|
||||||
|
|
||||||
|
@util.run_in_thread
|
||||||
def test_status(alice):
|
def test_status(alice):
|
||||||
"""
|
"""
|
||||||
confirm we get something sensible from /status and the various sub-types
|
confirm we get something sensible from /status and the various sub-types
|
||||||
|
@ -5,6 +5,7 @@ from os import mkdir, environ
|
|||||||
from os.path import exists, join
|
from os.path import exists, join
|
||||||
from six.moves import StringIO
|
from six.moves import StringIO
|
||||||
from functools import partial
|
from functools import partial
|
||||||
|
from subprocess import check_output
|
||||||
|
|
||||||
from twisted.python.filepath import (
|
from twisted.python.filepath import (
|
||||||
FilePath,
|
FilePath,
|
||||||
@ -12,9 +13,13 @@ from twisted.python.filepath import (
|
|||||||
from twisted.internet.defer import Deferred, succeed
|
from twisted.internet.defer import Deferred, succeed
|
||||||
from twisted.internet.protocol import ProcessProtocol
|
from twisted.internet.protocol import ProcessProtocol
|
||||||
from twisted.internet.error import ProcessExitedAlready, ProcessDone
|
from twisted.internet.error import ProcessExitedAlready, ProcessDone
|
||||||
|
from twisted.internet.threads import deferToThread
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
from paramiko.rsakey import RSAKey
|
||||||
|
from boltons.funcutils import wraps
|
||||||
|
|
||||||
from allmydata.util.configutil import (
|
from allmydata.util.configutil import (
|
||||||
get_config,
|
get_config,
|
||||||
set_config,
|
set_config,
|
||||||
@ -25,6 +30,12 @@ from allmydata import client
|
|||||||
import pytest_twisted
|
import pytest_twisted
|
||||||
|
|
||||||
|
|
||||||
|
def block_with_timeout(deferred, reactor, timeout=120):
|
||||||
|
"""Block until Deferred has result, but timeout instead of waiting forever."""
|
||||||
|
deferred.addTimeout(timeout, reactor)
|
||||||
|
return pytest_twisted.blockon(deferred)
|
||||||
|
|
||||||
|
|
||||||
class _ProcessExitedProtocol(ProcessProtocol):
|
class _ProcessExitedProtocol(ProcessProtocol):
|
||||||
"""
|
"""
|
||||||
Internal helper that .callback()s on self.done when the process
|
Internal helper that .callback()s on self.done when the process
|
||||||
@ -123,11 +134,12 @@ def _cleanup_tahoe_process(tahoe_transport, exited):
|
|||||||
|
|
||||||
:return: After the process has exited.
|
:return: After the process has exited.
|
||||||
"""
|
"""
|
||||||
|
from twisted.internet import reactor
|
||||||
try:
|
try:
|
||||||
print("signaling {} with TERM".format(tahoe_transport.pid))
|
print("signaling {} with TERM".format(tahoe_transport.pid))
|
||||||
tahoe_transport.signalProcess('TERM')
|
tahoe_transport.signalProcess('TERM')
|
||||||
print("signaled, blocking on exit")
|
print("signaled, blocking on exit")
|
||||||
pytest_twisted.blockon(exited)
|
block_with_timeout(exited, reactor)
|
||||||
print("exited, goodbye")
|
print("exited, goodbye")
|
||||||
except ProcessExitedAlready:
|
except ProcessExitedAlready:
|
||||||
pass
|
pass
|
||||||
@ -175,11 +187,15 @@ class TahoeProcess(object):
|
|||||||
u"portnum",
|
u"portnum",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def kill(self):
|
||||||
|
"""Kill the process, block until it's done."""
|
||||||
|
_cleanup_tahoe_process(self.transport, self.transport.exited)
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return "<TahoeProcess in '{}'>".format(self._node_dir)
|
return "<TahoeProcess in '{}'>".format(self._node_dir)
|
||||||
|
|
||||||
|
|
||||||
def _run_node(reactor, node_dir, request, magic_text):
|
def _run_node(reactor, node_dir, request, magic_text, finalize=True):
|
||||||
"""
|
"""
|
||||||
Run a tahoe process from its node_dir.
|
Run a tahoe process from its node_dir.
|
||||||
|
|
||||||
@ -203,7 +219,8 @@ def _run_node(reactor, node_dir, request, magic_text):
|
|||||||
)
|
)
|
||||||
transport.exited = protocol.exited
|
transport.exited = protocol.exited
|
||||||
|
|
||||||
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
if finalize:
|
||||||
|
request.addfinalizer(partial(_cleanup_tahoe_process, transport, protocol.exited))
|
||||||
|
|
||||||
# XXX abusing the Deferred; should use .when_magic_seen() pattern
|
# XXX abusing the Deferred; should use .when_magic_seen() pattern
|
||||||
|
|
||||||
@ -222,7 +239,8 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
|||||||
magic_text=None,
|
magic_text=None,
|
||||||
needed=2,
|
needed=2,
|
||||||
happy=3,
|
happy=3,
|
||||||
total=4):
|
total=4,
|
||||||
|
finalize=True):
|
||||||
"""
|
"""
|
||||||
Helper to create a single node, run it and return the instance
|
Helper to create a single node, run it and return the instance
|
||||||
spawnProcess returned (ITransport)
|
spawnProcess returned (ITransport)
|
||||||
@ -270,7 +288,7 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
|||||||
d = Deferred()
|
d = Deferred()
|
||||||
d.callback(None)
|
d.callback(None)
|
||||||
d.addCallback(lambda _: created_d)
|
d.addCallback(lambda _: created_d)
|
||||||
d.addCallback(lambda _: _run_node(reactor, node_dir, request, magic_text))
|
d.addCallback(lambda _: _run_node(reactor, node_dir, request, magic_text, finalize=finalize))
|
||||||
return d
|
return d
|
||||||
|
|
||||||
|
|
||||||
@ -390,17 +408,13 @@ def await_file_vanishes(path, timeout=10):
|
|||||||
raise FileShouldVanishException(path, timeout)
|
raise FileShouldVanishException(path, timeout)
|
||||||
|
|
||||||
|
|
||||||
def cli(request, reactor, node_dir, *argv):
|
def cli(node, *argv):
|
||||||
"""
|
"""
|
||||||
Run a tahoe CLI subcommand for a given node, optionally running
|
Run a tahoe CLI subcommand for a given node in a blocking manner, returning
|
||||||
under coverage if '--coverage' was supplied.
|
the output.
|
||||||
"""
|
"""
|
||||||
proto = _CollectOutputProtocol()
|
arguments = ["tahoe", '--node-directory', node.node_dir]
|
||||||
_tahoe_runner_optional_coverage(
|
return check_output(arguments + list(argv))
|
||||||
proto, reactor, request,
|
|
||||||
['--node-directory', node_dir] + list(argv),
|
|
||||||
)
|
|
||||||
return proto.done
|
|
||||||
|
|
||||||
|
|
||||||
def node_url(node_dir, uri_fragment):
|
def node_url(node_dir, uri_fragment):
|
||||||
@ -505,3 +519,36 @@ def await_client_ready(tahoe, timeout=10, liveness=60*2):
|
|||||||
tahoe,
|
tahoe,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_ssh_key(path):
|
||||||
|
"""Create a new SSH private/public key pair."""
|
||||||
|
key = RSAKey.generate(2048)
|
||||||
|
key.write_private_key_file(path)
|
||||||
|
with open(path + ".pub", "wb") as f:
|
||||||
|
f.write(b"%s %s" % (key.get_name(), key.get_base64()))
|
||||||
|
|
||||||
|
|
||||||
|
def run_in_thread(f):
|
||||||
|
"""Decorator for integration tests that runs code in a thread.
|
||||||
|
|
||||||
|
Because we're using pytest_twisted, tests that rely on the reactor are
|
||||||
|
expected to return a Deferred and use async APIs so the reactor can run.
|
||||||
|
|
||||||
|
In the case of the integration test suite, it launches nodes in the
|
||||||
|
background using Twisted APIs. The nodes stdout and stderr is read via
|
||||||
|
Twisted code. If the reactor doesn't run, reads don't happen, and
|
||||||
|
eventually the buffers fill up, and the nodes block when they try to flush
|
||||||
|
logs.
|
||||||
|
|
||||||
|
We can switch to Twisted APIs (treq instead of requests etc.), but
|
||||||
|
sometimes it's easier or expedient to just have a blocking test. So this
|
||||||
|
decorator allows you to run the test in a thread, and the reactor can keep
|
||||||
|
running in the main thread.
|
||||||
|
|
||||||
|
See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3597 for tracking bug.
|
||||||
|
"""
|
||||||
|
@wraps(f)
|
||||||
|
def test(*args, **kwargs):
|
||||||
|
return deferToThread(lambda: f(*args, **kwargs))
|
||||||
|
return test
|
||||||
|
0
newsfragments/3536.minor
Normal file
0
newsfragments/3536.minor
Normal file
0
newsfragments/3579.minor
Normal file
0
newsfragments/3579.minor
Normal file
1
newsfragments/3584.bugfix
Normal file
1
newsfragments/3584.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
SFTP public key auth likely works more consistently, and SFTP in general was previously broken.
|
1
newsfragments/3590.bugfix
Normal file
1
newsfragments/3590.bugfix
Normal file
@ -0,0 +1 @@
|
|||||||
|
Fixed issue where redirecting old-style URIs (/uri/?uri=...) didn't work.
|
0
newsfragments/3599.minor
Normal file
0
newsfragments/3599.minor
Normal file
2
setup.py
2
setup.py
@ -395,6 +395,8 @@ setup(name="tahoe-lafs", # also set in __init__.py
|
|||||||
"html5lib",
|
"html5lib",
|
||||||
"junitxml",
|
"junitxml",
|
||||||
"tenacity",
|
"tenacity",
|
||||||
|
"paramiko",
|
||||||
|
"pytest-timeout",
|
||||||
] + tor_requires + i2p_requires,
|
] + tor_requires + i2p_requires,
|
||||||
"tor": tor_requires,
|
"tor": tor_requires,
|
||||||
"i2p": i2p_requires,
|
"i2p": i2p_requires,
|
||||||
|
@ -4,8 +4,8 @@ from zope.interface import implementer
|
|||||||
from twisted.web.client import getPage
|
from twisted.web.client import getPage
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.cred import error, checkers, credentials
|
from twisted.cred import error, checkers, credentials
|
||||||
from twisted.conch import error as conch_error
|
|
||||||
from twisted.conch.ssh import keys
|
from twisted.conch.ssh import keys
|
||||||
|
from twisted.conch.checkers import SSHPublicKeyChecker, InMemorySSHKeyDB
|
||||||
|
|
||||||
from allmydata.util import base32
|
from allmydata.util import base32
|
||||||
from allmydata.util.fileutil import abspath_expanduser_unicode
|
from allmydata.util.fileutil import abspath_expanduser_unicode
|
||||||
@ -29,7 +29,7 @@ class AccountFileChecker(object):
|
|||||||
def __init__(self, client, accountfile):
|
def __init__(self, client, accountfile):
|
||||||
self.client = client
|
self.client = client
|
||||||
self.passwords = {}
|
self.passwords = {}
|
||||||
self.pubkeys = {}
|
pubkeys = {}
|
||||||
self.rootcaps = {}
|
self.rootcaps = {}
|
||||||
with open(abspath_expanduser_unicode(accountfile), "r") as f:
|
with open(abspath_expanduser_unicode(accountfile), "r") as f:
|
||||||
for line in f:
|
for line in f:
|
||||||
@ -40,12 +40,14 @@ class AccountFileChecker(object):
|
|||||||
if passwd.startswith("ssh-"):
|
if passwd.startswith("ssh-"):
|
||||||
bits = rest.split()
|
bits = rest.split()
|
||||||
keystring = " ".join([passwd] + bits[:-1])
|
keystring = " ".join([passwd] + bits[:-1])
|
||||||
|
key = keys.Key.fromString(keystring)
|
||||||
rootcap = bits[-1]
|
rootcap = bits[-1]
|
||||||
self.pubkeys[name] = keystring
|
pubkeys[name] = [key]
|
||||||
else:
|
else:
|
||||||
self.passwords[name] = passwd
|
self.passwords[name] = passwd
|
||||||
rootcap = rest
|
rootcap = rest
|
||||||
self.rootcaps[name] = rootcap
|
self.rootcaps[name] = rootcap
|
||||||
|
self._pubkeychecker = SSHPublicKeyChecker(InMemorySSHKeyDB(pubkeys))
|
||||||
|
|
||||||
def _avatarId(self, username):
|
def _avatarId(self, username):
|
||||||
return FTPAvatarID(username, self.rootcaps[username])
|
return FTPAvatarID(username, self.rootcaps[username])
|
||||||
@ -57,11 +59,9 @@ class AccountFileChecker(object):
|
|||||||
|
|
||||||
def requestAvatarId(self, creds):
|
def requestAvatarId(self, creds):
|
||||||
if credentials.ISSHPrivateKey.providedBy(creds):
|
if credentials.ISSHPrivateKey.providedBy(creds):
|
||||||
# Re-using twisted.conch.checkers.SSHPublicKeyChecker here, rather
|
d = defer.maybeDeferred(self._pubkeychecker.requestAvatarId, creds)
|
||||||
# than re-implementing all of the ISSHPrivateKey checking logic,
|
d.addCallback(self._avatarId)
|
||||||
# would be better. That would require Twisted 14.1.0 or newer,
|
return d
|
||||||
# though.
|
|
||||||
return self._checkKey(creds)
|
|
||||||
elif credentials.IUsernameHashedPassword.providedBy(creds):
|
elif credentials.IUsernameHashedPassword.providedBy(creds):
|
||||||
return self._checkPassword(creds)
|
return self._checkPassword(creds)
|
||||||
elif credentials.IUsernamePassword.providedBy(creds):
|
elif credentials.IUsernamePassword.providedBy(creds):
|
||||||
@ -86,28 +86,6 @@ class AccountFileChecker(object):
|
|||||||
d.addCallback(self._cbPasswordMatch, str(creds.username))
|
d.addCallback(self._cbPasswordMatch, str(creds.username))
|
||||||
return d
|
return d
|
||||||
|
|
||||||
def _checkKey(self, creds):
|
|
||||||
"""
|
|
||||||
Determine whether some key-based credentials correctly authenticates a
|
|
||||||
user.
|
|
||||||
|
|
||||||
Returns a Deferred that fires with the username if so or with an
|
|
||||||
UnauthorizedLogin failure otherwise.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Is the public key indicated by the given credentials allowed to
|
|
||||||
# authenticate the username in those credentials?
|
|
||||||
if creds.blob == self.pubkeys.get(creds.username):
|
|
||||||
if creds.signature is None:
|
|
||||||
return defer.fail(conch_error.ValidPublicKey())
|
|
||||||
|
|
||||||
# Is the signature in the given credentials the correct
|
|
||||||
# signature for the data in those credentials?
|
|
||||||
key = keys.Key.fromString(creds.blob)
|
|
||||||
if key.verify(creds.signature, creds.sigData):
|
|
||||||
return defer.succeed(self._avatarId(creds.username))
|
|
||||||
|
|
||||||
return defer.fail(error.UnauthorizedLogin())
|
|
||||||
|
|
||||||
@implementer(checkers.ICredentialsChecker)
|
@implementer(checkers.ICredentialsChecker)
|
||||||
class AccountURLChecker(object):
|
class AccountURLChecker(object):
|
||||||
|
@ -1,5 +1,17 @@
|
|||||||
|
"""
|
||||||
|
Ported to Python 3.
|
||||||
|
"""
|
||||||
|
from __future__ import absolute_import
|
||||||
|
from __future__ import division
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
from future.utils import PY2
|
||||||
|
if PY2:
|
||||||
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||||
|
|
||||||
import six
|
import six
|
||||||
import heapq, traceback, array, stat, struct
|
import heapq, traceback, stat, struct
|
||||||
from stat import S_IFREG, S_IFDIR
|
from stat import S_IFREG, S_IFDIR
|
||||||
from time import time, strftime, localtime
|
from time import time, strftime, localtime
|
||||||
|
|
||||||
@ -44,6 +56,17 @@ from allmydata.util.log import NOISY, OPERATIONAL, WEIRD, \
|
|||||||
if six.PY3:
|
if six.PY3:
|
||||||
long = int
|
long = int
|
||||||
|
|
||||||
|
|
||||||
|
def createSFTPError(errorCode, errorMessage):
|
||||||
|
"""
|
||||||
|
SFTPError that can accept both Unicode and bytes.
|
||||||
|
|
||||||
|
Twisted expects _native_ strings for the SFTPError message, but we often do
|
||||||
|
Unicode by default even on Python 2.
|
||||||
|
"""
|
||||||
|
return SFTPError(errorCode, six.ensure_str(errorMessage))
|
||||||
|
|
||||||
|
|
||||||
def eventually_callback(d):
|
def eventually_callback(d):
|
||||||
return lambda res: eventually(d.callback, res)
|
return lambda res: eventually(d.callback, res)
|
||||||
|
|
||||||
@ -52,9 +75,9 @@ def eventually_errback(d):
|
|||||||
|
|
||||||
|
|
||||||
def _utf8(x):
|
def _utf8(x):
|
||||||
if isinstance(x, unicode):
|
|
||||||
return x.encode('utf-8')
|
|
||||||
if isinstance(x, str):
|
if isinstance(x, str):
|
||||||
|
return x.encode('utf-8')
|
||||||
|
if isinstance(x, bytes):
|
||||||
return x
|
return x
|
||||||
return repr(x)
|
return repr(x)
|
||||||
|
|
||||||
@ -63,7 +86,7 @@ def _to_sftp_time(t):
|
|||||||
"""SFTP times are unsigned 32-bit integers representing UTC seconds
|
"""SFTP times are unsigned 32-bit integers representing UTC seconds
|
||||||
(ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC.
|
(ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC.
|
||||||
A Tahoe time is the corresponding float."""
|
A Tahoe time is the corresponding float."""
|
||||||
return long(t) & long(0xFFFFFFFF)
|
return int(t) & int(0xFFFFFFFF)
|
||||||
|
|
||||||
|
|
||||||
def _convert_error(res, request):
|
def _convert_error(res, request):
|
||||||
@ -72,7 +95,7 @@ def _convert_error(res, request):
|
|||||||
|
|
||||||
if not isinstance(res, Failure):
|
if not isinstance(res, Failure):
|
||||||
logged_res = res
|
logged_res = res
|
||||||
if isinstance(res, str): logged_res = "<data of length %r>" % (len(res),)
|
if isinstance(res, (bytes, str)): logged_res = "<data of length %r>" % (len(res),)
|
||||||
logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL)
|
logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL)
|
||||||
return res
|
return res
|
||||||
|
|
||||||
@ -91,10 +114,10 @@ def _convert_error(res, request):
|
|||||||
raise err
|
raise err
|
||||||
if err.check(NoSuchChildError):
|
if err.check(NoSuchChildError):
|
||||||
childname = _utf8(err.value.args[0])
|
childname = _utf8(err.value.args[0])
|
||||||
raise SFTPError(FX_NO_SUCH_FILE, childname)
|
raise createSFTPError(FX_NO_SUCH_FILE, childname)
|
||||||
if err.check(NotWriteableError) or err.check(ChildOfWrongTypeError):
|
if err.check(NotWriteableError) or err.check(ChildOfWrongTypeError):
|
||||||
msg = _utf8(err.value.args[0])
|
msg = _utf8(err.value.args[0])
|
||||||
raise SFTPError(FX_PERMISSION_DENIED, msg)
|
raise createSFTPError(FX_PERMISSION_DENIED, msg)
|
||||||
if err.check(ExistingChildError):
|
if err.check(ExistingChildError):
|
||||||
# Versions of SFTP after v3 (which is what twisted.conch implements)
|
# Versions of SFTP after v3 (which is what twisted.conch implements)
|
||||||
# define a specific error code for this case: FX_FILE_ALREADY_EXISTS.
|
# define a specific error code for this case: FX_FILE_ALREADY_EXISTS.
|
||||||
@ -103,16 +126,16 @@ def _convert_error(res, request):
|
|||||||
# to translate the error to the equivalent of POSIX EEXIST, which is
|
# to translate the error to the equivalent of POSIX EEXIST, which is
|
||||||
# necessary for some picky programs (such as gedit).
|
# necessary for some picky programs (such as gedit).
|
||||||
msg = _utf8(err.value.args[0])
|
msg = _utf8(err.value.args[0])
|
||||||
raise SFTPError(FX_FAILURE, msg)
|
raise createSFTPError(FX_FAILURE, msg)
|
||||||
if err.check(NotImplementedError):
|
if err.check(NotImplementedError):
|
||||||
raise SFTPError(FX_OP_UNSUPPORTED, _utf8(err.value))
|
raise createSFTPError(FX_OP_UNSUPPORTED, _utf8(err.value))
|
||||||
if err.check(EOFError):
|
if err.check(EOFError):
|
||||||
raise SFTPError(FX_EOF, "end of file reached")
|
raise createSFTPError(FX_EOF, "end of file reached")
|
||||||
if err.check(defer.FirstError):
|
if err.check(defer.FirstError):
|
||||||
_convert_error(err.value.subFailure, request)
|
_convert_error(err.value.subFailure, request)
|
||||||
|
|
||||||
# We assume that the error message is not anonymity-sensitive.
|
# We assume that the error message is not anonymity-sensitive.
|
||||||
raise SFTPError(FX_FAILURE, _utf8(err.value))
|
raise createSFTPError(FX_FAILURE, _utf8(err.value))
|
||||||
|
|
||||||
|
|
||||||
def _repr_flags(flags):
|
def _repr_flags(flags):
|
||||||
@ -145,7 +168,7 @@ def _lsLine(name, attrs):
|
|||||||
# Since we now depend on Twisted v10.1, consider calling Twisted's version.
|
# Since we now depend on Twisted v10.1, consider calling Twisted's version.
|
||||||
|
|
||||||
mode = st_mode
|
mode = st_mode
|
||||||
perms = array.array('c', '-'*10)
|
perms = ["-"] * 10
|
||||||
ft = stat.S_IFMT(mode)
|
ft = stat.S_IFMT(mode)
|
||||||
if stat.S_ISDIR(ft): perms[0] = 'd'
|
if stat.S_ISDIR(ft): perms[0] = 'd'
|
||||||
elif stat.S_ISREG(ft): perms[0] = '-'
|
elif stat.S_ISREG(ft): perms[0] = '-'
|
||||||
@ -164,7 +187,7 @@ def _lsLine(name, attrs):
|
|||||||
if mode&stat.S_IXOTH: perms[9] = 'x'
|
if mode&stat.S_IXOTH: perms[9] = 'x'
|
||||||
# suid/sgid never set
|
# suid/sgid never set
|
||||||
|
|
||||||
l = perms.tostring()
|
l = "".join(perms)
|
||||||
l += str(st_nlink).rjust(5) + ' '
|
l += str(st_nlink).rjust(5) + ' '
|
||||||
un = str(st_uid)
|
un = str(st_uid)
|
||||||
l += un.ljust(9)
|
l += un.ljust(9)
|
||||||
@ -181,6 +204,7 @@ def _lsLine(name, attrs):
|
|||||||
l += strftime("%b %d %Y ", localtime(st_mtime))
|
l += strftime("%b %d %Y ", localtime(st_mtime))
|
||||||
else:
|
else:
|
||||||
l += strftime("%b %d %H:%M ", localtime(st_mtime))
|
l += strftime("%b %d %H:%M ", localtime(st_mtime))
|
||||||
|
l = l.encode("utf-8")
|
||||||
l += name
|
l += name
|
||||||
return l
|
return l
|
||||||
|
|
||||||
@ -222,7 +246,7 @@ def _populate_attrs(childnode, metadata, size=None):
|
|||||||
if childnode and size is None:
|
if childnode and size is None:
|
||||||
size = childnode.get_size()
|
size = childnode.get_size()
|
||||||
if size is not None:
|
if size is not None:
|
||||||
_assert(isinstance(size, (int, long)) and not isinstance(size, bool), size=size)
|
_assert(isinstance(size, int) and not isinstance(size, bool), size=size)
|
||||||
attrs['size'] = size
|
attrs['size'] = size
|
||||||
perms = S_IFREG | 0o666
|
perms = S_IFREG | 0o666
|
||||||
|
|
||||||
@ -254,7 +278,7 @@ def _attrs_to_metadata(attrs):
|
|||||||
|
|
||||||
for key in attrs:
|
for key in attrs:
|
||||||
if key == "mtime" or key == "ctime" or key == "createtime":
|
if key == "mtime" or key == "ctime" or key == "createtime":
|
||||||
metadata[key] = long(attrs[key])
|
metadata[key] = int(attrs[key])
|
||||||
elif key.startswith("ext_"):
|
elif key.startswith("ext_"):
|
||||||
metadata[key] = str(attrs[key])
|
metadata[key] = str(attrs[key])
|
||||||
|
|
||||||
@ -266,7 +290,7 @@ def _attrs_to_metadata(attrs):
|
|||||||
|
|
||||||
|
|
||||||
def _direntry_for(filenode_or_parent, childname, filenode=None):
|
def _direntry_for(filenode_or_parent, childname, filenode=None):
|
||||||
precondition(isinstance(childname, (unicode, type(None))), childname=childname)
|
precondition(isinstance(childname, (str, type(None))), childname=childname)
|
||||||
|
|
||||||
if childname is None:
|
if childname is None:
|
||||||
filenode_or_parent = filenode
|
filenode_or_parent = filenode
|
||||||
@ -274,7 +298,7 @@ def _direntry_for(filenode_or_parent, childname, filenode=None):
|
|||||||
if filenode_or_parent:
|
if filenode_or_parent:
|
||||||
rw_uri = filenode_or_parent.get_write_uri()
|
rw_uri = filenode_or_parent.get_write_uri()
|
||||||
if rw_uri and childname:
|
if rw_uri and childname:
|
||||||
return rw_uri + "/" + childname.encode('utf-8')
|
return rw_uri + b"/" + childname.encode('utf-8')
|
||||||
else:
|
else:
|
||||||
return rw_uri
|
return rw_uri
|
||||||
|
|
||||||
@ -326,7 +350,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
|||||||
if size < self.current_size or size < self.downloaded:
|
if size < self.current_size or size < self.downloaded:
|
||||||
self.f.truncate(size)
|
self.f.truncate(size)
|
||||||
if size > self.current_size:
|
if size > self.current_size:
|
||||||
self.overwrite(self.current_size, "\x00" * (size - self.current_size))
|
self.overwrite(self.current_size, b"\x00" * (size - self.current_size))
|
||||||
self.current_size = size
|
self.current_size = size
|
||||||
|
|
||||||
# make the invariant self.download_size <= self.current_size be true again
|
# make the invariant self.download_size <= self.current_size be true again
|
||||||
@ -334,7 +358,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
|||||||
self.download_size = size
|
self.download_size = size
|
||||||
|
|
||||||
if self.downloaded >= self.download_size:
|
if self.downloaded >= self.download_size:
|
||||||
self.download_done("size changed")
|
self.download_done(b"size changed")
|
||||||
|
|
||||||
def registerProducer(self, p, streaming):
|
def registerProducer(self, p, streaming):
|
||||||
if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY)
|
if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY)
|
||||||
@ -409,21 +433,21 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
|||||||
milestone = end
|
milestone = end
|
||||||
|
|
||||||
while len(self.milestones) > 0:
|
while len(self.milestones) > 0:
|
||||||
(next, d) = self.milestones[0]
|
(next_, d) = self.milestones[0]
|
||||||
if next > milestone:
|
if next_ > milestone:
|
||||||
return
|
return
|
||||||
if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY)
|
if noisy: self.log("MILESTONE %r %r" % (next_, d), level=NOISY)
|
||||||
heapq.heappop(self.milestones)
|
heapq.heappop(self.milestones)
|
||||||
eventually_callback(d)("reached")
|
eventually_callback(d)(b"reached")
|
||||||
|
|
||||||
if milestone >= self.download_size:
|
if milestone >= self.download_size:
|
||||||
self.download_done("reached download size")
|
self.download_done(b"reached download size")
|
||||||
|
|
||||||
def overwrite(self, offset, data):
|
def overwrite(self, offset, data):
|
||||||
if noisy: self.log(".overwrite(%r, <data of length %r>)" % (offset, len(data)), level=NOISY)
|
if noisy: self.log(".overwrite(%r, <data of length %r>)" % (offset, len(data)), level=NOISY)
|
||||||
if self.is_closed:
|
if self.is_closed:
|
||||||
self.log("overwrite called on a closed OverwriteableFileConsumer", level=WEIRD)
|
self.log("overwrite called on a closed OverwriteableFileConsumer", level=WEIRD)
|
||||||
raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
|
raise createSFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
|
||||||
|
|
||||||
if offset > self.current_size:
|
if offset > self.current_size:
|
||||||
# Normally writing at an offset beyond the current end-of-file
|
# Normally writing at an offset beyond the current end-of-file
|
||||||
@ -434,7 +458,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
|||||||
# the gap between the current EOF and the offset.
|
# the gap between the current EOF and the offset.
|
||||||
|
|
||||||
self.f.seek(self.current_size)
|
self.f.seek(self.current_size)
|
||||||
self.f.write("\x00" * (offset - self.current_size))
|
self.f.write(b"\x00" * (offset - self.current_size))
|
||||||
start = self.current_size
|
start = self.current_size
|
||||||
else:
|
else:
|
||||||
self.f.seek(offset)
|
self.f.seek(offset)
|
||||||
@ -454,7 +478,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
|||||||
if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY)
|
if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY)
|
||||||
if self.is_closed:
|
if self.is_closed:
|
||||||
self.log("read called on a closed OverwriteableFileConsumer", level=WEIRD)
|
self.log("read called on a closed OverwriteableFileConsumer", level=WEIRD)
|
||||||
raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
||||||
|
|
||||||
# Note that the overwrite method is synchronous. When a write request is processed
|
# Note that the overwrite method is synchronous. When a write request is processed
|
||||||
# (e.g. a writeChunk request on the async queue of GeneralSFTPFile), overwrite will
|
# (e.g. a writeChunk request on the async queue of GeneralSFTPFile), overwrite will
|
||||||
@ -508,7 +532,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
|||||||
return d
|
return d
|
||||||
|
|
||||||
def download_done(self, res):
|
def download_done(self, res):
|
||||||
_assert(isinstance(res, (str, Failure)), res=res)
|
_assert(isinstance(res, (bytes, Failure)), res=res)
|
||||||
# Only the first call to download_done counts, but we log subsequent calls
|
# Only the first call to download_done counts, but we log subsequent calls
|
||||||
# (multiple calls are normal).
|
# (multiple calls are normal).
|
||||||
if self.done_status is not None:
|
if self.done_status is not None:
|
||||||
@ -525,8 +549,8 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
|||||||
eventually_callback(self.done)(None)
|
eventually_callback(self.done)(None)
|
||||||
|
|
||||||
while len(self.milestones) > 0:
|
while len(self.milestones) > 0:
|
||||||
(next, d) = self.milestones[0]
|
(next_, d) = self.milestones[0]
|
||||||
if noisy: self.log("MILESTONE FINISH %r %r %r" % (next, d, res), level=NOISY)
|
if noisy: self.log("MILESTONE FINISH %r %r %r" % (next_, d, res), level=NOISY)
|
||||||
heapq.heappop(self.milestones)
|
heapq.heappop(self.milestones)
|
||||||
# The callback means that the milestone has been reached if
|
# The callback means that the milestone has been reached if
|
||||||
# it is ever going to be. Note that the file may have been
|
# it is ever going to be. Note that the file may have been
|
||||||
@ -540,7 +564,7 @@ class OverwriteableFileConsumer(PrefixingLogMixin):
|
|||||||
self.f.close()
|
self.f.close()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD)
|
self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD)
|
||||||
self.download_done("closed")
|
self.download_done(b"closed")
|
||||||
return self.done_status
|
return self.done_status
|
||||||
|
|
||||||
def unregisterProducer(self):
|
def unregisterProducer(self):
|
||||||
@ -564,7 +588,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
|||||||
PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
|
PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
|
||||||
if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY)
|
if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY)
|
||||||
|
|
||||||
precondition(isinstance(userpath, str) and IFileNode.providedBy(filenode),
|
precondition(isinstance(userpath, bytes) and IFileNode.providedBy(filenode),
|
||||||
userpath=userpath, filenode=filenode)
|
userpath=userpath, filenode=filenode)
|
||||||
self.filenode = filenode
|
self.filenode = filenode
|
||||||
self.metadata = metadata
|
self.metadata = metadata
|
||||||
@ -576,7 +600,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
|||||||
self.log(request, level=OPERATIONAL)
|
self.log(request, level=OPERATIONAL)
|
||||||
|
|
||||||
if self.closed:
|
if self.closed:
|
||||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
||||||
return defer.execute(_closed)
|
return defer.execute(_closed)
|
||||||
|
|
||||||
d = defer.Deferred()
|
d = defer.Deferred()
|
||||||
@ -593,7 +617,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
|||||||
# i.e. we respond with an EOF error iff offset is already at EOF.
|
# i.e. we respond with an EOF error iff offset is already at EOF.
|
||||||
|
|
||||||
if offset >= len(data):
|
if offset >= len(data):
|
||||||
eventually_errback(d)(Failure(SFTPError(FX_EOF, "read at or past end of file")))
|
eventually_errback(d)(Failure(createSFTPError(FX_EOF, "read at or past end of file")))
|
||||||
else:
|
else:
|
||||||
eventually_callback(d)(data[offset:offset+length]) # truncated if offset+length > len(data)
|
eventually_callback(d)(data[offset:offset+length]) # truncated if offset+length > len(data)
|
||||||
return data
|
return data
|
||||||
@ -604,7 +628,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
|||||||
def writeChunk(self, offset, data):
|
def writeChunk(self, offset, data):
|
||||||
self.log(".writeChunk(%r, <data of length %r>) denied" % (offset, len(data)), level=OPERATIONAL)
|
self.log(".writeChunk(%r, <data of length %r>) denied" % (offset, len(data)), level=OPERATIONAL)
|
||||||
|
|
||||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||||
return defer.execute(_denied)
|
return defer.execute(_denied)
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
@ -618,7 +642,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
|||||||
self.log(request, level=OPERATIONAL)
|
self.log(request, level=OPERATIONAL)
|
||||||
|
|
||||||
if self.closed:
|
if self.closed:
|
||||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
|
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
|
||||||
return defer.execute(_closed)
|
return defer.execute(_closed)
|
||||||
|
|
||||||
d = defer.execute(_populate_attrs, self.filenode, self.metadata)
|
d = defer.execute(_populate_attrs, self.filenode, self.metadata)
|
||||||
@ -627,7 +651,7 @@ class ShortReadOnlySFTPFile(PrefixingLogMixin):
|
|||||||
|
|
||||||
def setAttrs(self, attrs):
|
def setAttrs(self, attrs):
|
||||||
self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL)
|
self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL)
|
||||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||||
return defer.execute(_denied)
|
return defer.execute(_denied)
|
||||||
|
|
||||||
|
|
||||||
@ -648,7 +672,7 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
|||||||
if noisy: self.log(".__init__(%r, %r = %r, %r, <convergence censored>)" %
|
if noisy: self.log(".__init__(%r, %r = %r, %r, <convergence censored>)" %
|
||||||
(userpath, flags, _repr_flags(flags), close_notify), level=NOISY)
|
(userpath, flags, _repr_flags(flags), close_notify), level=NOISY)
|
||||||
|
|
||||||
precondition(isinstance(userpath, str), userpath=userpath)
|
precondition(isinstance(userpath, bytes), userpath=userpath)
|
||||||
self.userpath = userpath
|
self.userpath = userpath
|
||||||
self.flags = flags
|
self.flags = flags
|
||||||
self.close_notify = close_notify
|
self.close_notify = close_notify
|
||||||
@ -667,11 +691,11 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
|||||||
# not be set before then.
|
# not be set before then.
|
||||||
self.consumer = None
|
self.consumer = None
|
||||||
|
|
||||||
def open(self, parent=None, childname=None, filenode=None, metadata=None):
|
def open(self, parent=None, childname=None, filenode=None, metadata=None): # noqa: F811
|
||||||
self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" %
|
self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" %
|
||||||
(parent, childname, filenode, metadata), level=OPERATIONAL)
|
(parent, childname, filenode, metadata), level=OPERATIONAL)
|
||||||
|
|
||||||
precondition(isinstance(childname, (unicode, type(None))), childname=childname)
|
precondition(isinstance(childname, (str, type(None))), childname=childname)
|
||||||
precondition(filenode is None or IFileNode.providedBy(filenode), filenode=filenode)
|
precondition(filenode is None or IFileNode.providedBy(filenode), filenode=filenode)
|
||||||
precondition(not self.closed, sftpfile=self)
|
precondition(not self.closed, sftpfile=self)
|
||||||
|
|
||||||
@ -688,7 +712,7 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
|||||||
if (self.flags & FXF_TRUNC) or not filenode:
|
if (self.flags & FXF_TRUNC) or not filenode:
|
||||||
# We're either truncating or creating the file, so we don't need the old contents.
|
# We're either truncating or creating the file, so we don't need the old contents.
|
||||||
self.consumer = OverwriteableFileConsumer(0, tempfile_maker)
|
self.consumer = OverwriteableFileConsumer(0, tempfile_maker)
|
||||||
self.consumer.download_done("download not needed")
|
self.consumer.download_done(b"download not needed")
|
||||||
else:
|
else:
|
||||||
self.async_.addCallback(lambda ignored: filenode.get_best_readable_version())
|
self.async_.addCallback(lambda ignored: filenode.get_best_readable_version())
|
||||||
|
|
||||||
@ -702,7 +726,7 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
|||||||
d = version.read(self.consumer, 0, None)
|
d = version.read(self.consumer, 0, None)
|
||||||
def _finished(res):
|
def _finished(res):
|
||||||
if not isinstance(res, Failure):
|
if not isinstance(res, Failure):
|
||||||
res = "download finished"
|
res = b"download finished"
|
||||||
self.consumer.download_done(res)
|
self.consumer.download_done(res)
|
||||||
d.addBoth(_finished)
|
d.addBoth(_finished)
|
||||||
# It is correct to drop d here.
|
# It is correct to drop d here.
|
||||||
@ -722,7 +746,7 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
|||||||
def rename(self, new_userpath, new_parent, new_childname):
|
def rename(self, new_userpath, new_parent, new_childname):
|
||||||
self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL)
|
self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL)
|
||||||
|
|
||||||
precondition(isinstance(new_userpath, str) and isinstance(new_childname, unicode),
|
precondition(isinstance(new_userpath, bytes) and isinstance(new_childname, str),
|
||||||
new_userpath=new_userpath, new_childname=new_childname)
|
new_userpath=new_userpath, new_childname=new_childname)
|
||||||
self.userpath = new_userpath
|
self.userpath = new_userpath
|
||||||
self.parent = new_parent
|
self.parent = new_parent
|
||||||
@ -750,11 +774,11 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
|||||||
self.log(request, level=OPERATIONAL)
|
self.log(request, level=OPERATIONAL)
|
||||||
|
|
||||||
if not (self.flags & FXF_READ):
|
if not (self.flags & FXF_READ):
|
||||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
|
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
|
||||||
return defer.execute(_denied)
|
return defer.execute(_denied)
|
||||||
|
|
||||||
if self.closed:
|
if self.closed:
|
||||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
|
||||||
return defer.execute(_closed)
|
return defer.execute(_closed)
|
||||||
|
|
||||||
d = defer.Deferred()
|
d = defer.Deferred()
|
||||||
@ -772,11 +796,11 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
|||||||
self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
|
self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
|
||||||
|
|
||||||
if not (self.flags & FXF_WRITE):
|
if not (self.flags & FXF_WRITE):
|
||||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||||
return defer.execute(_denied)
|
return defer.execute(_denied)
|
||||||
|
|
||||||
if self.closed:
|
if self.closed:
|
||||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
|
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
|
||||||
return defer.execute(_closed)
|
return defer.execute(_closed)
|
||||||
|
|
||||||
self.has_changed = True
|
self.has_changed = True
|
||||||
@ -892,7 +916,7 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
|||||||
self.log(request, level=OPERATIONAL)
|
self.log(request, level=OPERATIONAL)
|
||||||
|
|
||||||
if self.closed:
|
if self.closed:
|
||||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
|
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
|
||||||
return defer.execute(_closed)
|
return defer.execute(_closed)
|
||||||
|
|
||||||
# Optimization for read-only handles, when we already know the metadata.
|
# Optimization for read-only handles, when we already know the metadata.
|
||||||
@ -916,16 +940,16 @@ class GeneralSFTPFile(PrefixingLogMixin):
|
|||||||
self.log(request, level=OPERATIONAL)
|
self.log(request, level=OPERATIONAL)
|
||||||
|
|
||||||
if not (self.flags & FXF_WRITE):
|
if not (self.flags & FXF_WRITE):
|
||||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
|
||||||
return defer.execute(_denied)
|
return defer.execute(_denied)
|
||||||
|
|
||||||
if self.closed:
|
if self.closed:
|
||||||
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle")
|
def _closed(): raise createSFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle")
|
||||||
return defer.execute(_closed)
|
return defer.execute(_closed)
|
||||||
|
|
||||||
size = attrs.get("size", None)
|
size = attrs.get("size", None)
|
||||||
if size is not None and (not isinstance(size, (int, long)) or size < 0):
|
if size is not None and (not isinstance(size, int) or size < 0):
|
||||||
def _bad(): raise SFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer")
|
def _bad(): raise createSFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer")
|
||||||
return defer.execute(_bad)
|
return defer.execute(_bad)
|
||||||
|
|
||||||
d = defer.Deferred()
|
d = defer.Deferred()
|
||||||
@ -1011,7 +1035,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
def logout(self):
|
def logout(self):
|
||||||
self.log(".logout()", level=OPERATIONAL)
|
self.log(".logout()", level=OPERATIONAL)
|
||||||
|
|
||||||
for files in self._heisenfiles.itervalues():
|
for files in self._heisenfiles.values():
|
||||||
for f in files:
|
for f in files:
|
||||||
f.abandon()
|
f.abandon()
|
||||||
|
|
||||||
@ -1038,7 +1062,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
request = "._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry)
|
request = "._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry)
|
||||||
self.log(request, level=OPERATIONAL)
|
self.log(request, level=OPERATIONAL)
|
||||||
|
|
||||||
precondition(isinstance(userpath, str), userpath=userpath)
|
precondition(isinstance(userpath, bytes), userpath=userpath)
|
||||||
|
|
||||||
# First we synchronously mark all heisenfiles matching the userpath or direntry
|
# First we synchronously mark all heisenfiles matching the userpath or direntry
|
||||||
# as abandoned, and remove them from the two heisenfile dicts. Then we .sync()
|
# as abandoned, and remove them from the two heisenfile dicts. Then we .sync()
|
||||||
@ -1087,8 +1111,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
(from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite))
|
(from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite))
|
||||||
self.log(request, level=OPERATIONAL)
|
self.log(request, level=OPERATIONAL)
|
||||||
|
|
||||||
precondition((isinstance(from_userpath, str) and isinstance(from_childname, unicode) and
|
precondition((isinstance(from_userpath, bytes) and isinstance(from_childname, str) and
|
||||||
isinstance(to_userpath, str) and isinstance(to_childname, unicode)),
|
isinstance(to_userpath, bytes) and isinstance(to_childname, str)),
|
||||||
from_userpath=from_userpath, from_childname=from_childname, to_userpath=to_userpath, to_childname=to_childname)
|
from_userpath=from_userpath, from_childname=from_childname, to_userpath=to_userpath, to_childname=to_childname)
|
||||||
|
|
||||||
if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY)
|
if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY)
|
||||||
@ -1117,7 +1141,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
# does not mean that they were not committed; it is used to determine whether
|
# does not mean that they were not committed; it is used to determine whether
|
||||||
# a NoSuchChildError from the rename attempt should be suppressed). If overwrite
|
# a NoSuchChildError from the rename attempt should be suppressed). If overwrite
|
||||||
# is False and there were already heisenfiles at the destination userpath or
|
# is False and there were already heisenfiles at the destination userpath or
|
||||||
# direntry, we return a Deferred that fails with SFTPError(FX_PERMISSION_DENIED).
|
# direntry, we return a Deferred that fails with createSFTPError(FX_PERMISSION_DENIED).
|
||||||
|
|
||||||
from_direntry = _direntry_for(from_parent, from_childname)
|
from_direntry = _direntry_for(from_parent, from_childname)
|
||||||
to_direntry = _direntry_for(to_parent, to_childname)
|
to_direntry = _direntry_for(to_parent, to_childname)
|
||||||
@ -1126,7 +1150,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
(from_direntry, to_direntry, len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY)
|
(from_direntry, to_direntry, len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY)
|
||||||
|
|
||||||
if not overwrite and (to_userpath in self._heisenfiles or to_direntry in all_heisenfiles):
|
if not overwrite and (to_userpath in self._heisenfiles or to_direntry in all_heisenfiles):
|
||||||
def _existing(): raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
|
def _existing(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8"))
|
||||||
if noisy: self.log("existing", level=NOISY)
|
if noisy: self.log("existing", level=NOISY)
|
||||||
return defer.execute(_existing)
|
return defer.execute(_existing)
|
||||||
|
|
||||||
@ -1160,7 +1184,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
request = "._update_attrs_for_heisenfiles(%r, %r, %r)" % (userpath, direntry, attrs)
|
request = "._update_attrs_for_heisenfiles(%r, %r, %r)" % (userpath, direntry, attrs)
|
||||||
self.log(request, level=OPERATIONAL)
|
self.log(request, level=OPERATIONAL)
|
||||||
|
|
||||||
_assert(isinstance(userpath, str) and isinstance(direntry, str),
|
_assert(isinstance(userpath, bytes) and isinstance(direntry, bytes),
|
||||||
userpath=userpath, direntry=direntry)
|
userpath=userpath, direntry=direntry)
|
||||||
|
|
||||||
files = []
|
files = []
|
||||||
@ -1193,7 +1217,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
request = "._sync_heisenfiles(%r, %r, ignore=%r)" % (userpath, direntry, ignore)
|
request = "._sync_heisenfiles(%r, %r, ignore=%r)" % (userpath, direntry, ignore)
|
||||||
self.log(request, level=OPERATIONAL)
|
self.log(request, level=OPERATIONAL)
|
||||||
|
|
||||||
_assert(isinstance(userpath, str) and isinstance(direntry, (str, type(None))),
|
_assert(isinstance(userpath, bytes) and isinstance(direntry, (bytes, type(None))),
|
||||||
userpath=userpath, direntry=direntry)
|
userpath=userpath, direntry=direntry)
|
||||||
|
|
||||||
files = []
|
files = []
|
||||||
@ -1218,7 +1242,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
def _remove_heisenfile(self, userpath, parent, childname, file_to_remove):
|
def _remove_heisenfile(self, userpath, parent, childname, file_to_remove):
|
||||||
if noisy: self.log("._remove_heisenfile(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY)
|
if noisy: self.log("._remove_heisenfile(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY)
|
||||||
|
|
||||||
_assert(isinstance(userpath, str) and isinstance(childname, (unicode, type(None))),
|
_assert(isinstance(userpath, bytes) and isinstance(childname, (str, type(None))),
|
||||||
userpath=userpath, childname=childname)
|
userpath=userpath, childname=childname)
|
||||||
|
|
||||||
direntry = _direntry_for(parent, childname)
|
direntry = _direntry_for(parent, childname)
|
||||||
@ -1245,8 +1269,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
(existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata),
|
(existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata),
|
||||||
level=NOISY)
|
level=NOISY)
|
||||||
|
|
||||||
_assert((isinstance(userpath, str) and isinstance(childname, (unicode, type(None))) and
|
_assert((isinstance(userpath, bytes) and isinstance(childname, (str, type(None))) and
|
||||||
(metadata is None or 'no-write' in metadata)),
|
(metadata is None or 'no-write' in metadata)),
|
||||||
userpath=userpath, childname=childname, metadata=metadata)
|
userpath=userpath, childname=childname, metadata=metadata)
|
||||||
|
|
||||||
writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0
|
writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0
|
||||||
@ -1279,17 +1303,17 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
|
|
||||||
if not (flags & (FXF_READ | FXF_WRITE)):
|
if not (flags & (FXF_READ | FXF_WRITE)):
|
||||||
def _bad_readwrite():
|
def _bad_readwrite():
|
||||||
raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set")
|
raise createSFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set")
|
||||||
return defer.execute(_bad_readwrite)
|
return defer.execute(_bad_readwrite)
|
||||||
|
|
||||||
if (flags & FXF_EXCL) and not (flags & FXF_CREAT):
|
if (flags & FXF_EXCL) and not (flags & FXF_CREAT):
|
||||||
def _bad_exclcreat():
|
def _bad_exclcreat():
|
||||||
raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT")
|
raise createSFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT")
|
||||||
return defer.execute(_bad_exclcreat)
|
return defer.execute(_bad_exclcreat)
|
||||||
|
|
||||||
path = self._path_from_string(pathstring)
|
path = self._path_from_string(pathstring)
|
||||||
if not path:
|
if not path:
|
||||||
def _emptypath(): raise SFTPError(FX_NO_SUCH_FILE, "path cannot be empty")
|
def _emptypath(): raise createSFTPError(FX_NO_SUCH_FILE, "path cannot be empty")
|
||||||
return defer.execute(_emptypath)
|
return defer.execute(_emptypath)
|
||||||
|
|
||||||
# The combination of flags is potentially valid.
|
# The combination of flags is potentially valid.
|
||||||
@ -1348,20 +1372,20 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
def _got_root(root_and_path):
|
def _got_root(root_and_path):
|
||||||
(root, path) = root_and_path
|
(root, path) = root_and_path
|
||||||
if root.is_unknown():
|
if root.is_unknown():
|
||||||
raise SFTPError(FX_PERMISSION_DENIED,
|
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||||
"cannot open an unknown cap (or child of an unknown object). "
|
"cannot open an unknown cap (or child of an unknown object). "
|
||||||
"Upgrading the gateway to a later Tahoe-LAFS version may help")
|
"Upgrading the gateway to a later Tahoe-LAFS version may help")
|
||||||
if not path:
|
if not path:
|
||||||
# case 1
|
# case 1
|
||||||
if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY)
|
if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY)
|
||||||
if not IFileNode.providedBy(root):
|
if not IFileNode.providedBy(root):
|
||||||
raise SFTPError(FX_PERMISSION_DENIED,
|
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||||
"cannot open a directory cap")
|
"cannot open a directory cap")
|
||||||
if (flags & FXF_WRITE) and root.is_readonly():
|
if (flags & FXF_WRITE) and root.is_readonly():
|
||||||
raise SFTPError(FX_PERMISSION_DENIED,
|
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||||
"cannot write to a non-writeable filecap without a parent directory")
|
"cannot write to a non-writeable filecap without a parent directory")
|
||||||
if flags & FXF_EXCL:
|
if flags & FXF_EXCL:
|
||||||
raise SFTPError(FX_FAILURE,
|
raise createSFTPError(FX_FAILURE,
|
||||||
"cannot create a file exclusively when it already exists")
|
"cannot create a file exclusively when it already exists")
|
||||||
|
|
||||||
# The file does not need to be added to all_heisenfiles, because it is not
|
# The file does not need to be added to all_heisenfiles, because it is not
|
||||||
@ -1388,7 +1412,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
def _got_parent(parent):
|
def _got_parent(parent):
|
||||||
if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY)
|
if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY)
|
||||||
if parent.is_unknown():
|
if parent.is_unknown():
|
||||||
raise SFTPError(FX_PERMISSION_DENIED,
|
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||||
"cannot open a child of an unknown object. "
|
"cannot open a child of an unknown object. "
|
||||||
"Upgrading the gateway to a later Tahoe-LAFS version may help")
|
"Upgrading the gateway to a later Tahoe-LAFS version may help")
|
||||||
|
|
||||||
@ -1403,13 +1427,13 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
# which is consistent with what might happen on a POSIX filesystem.
|
# which is consistent with what might happen on a POSIX filesystem.
|
||||||
|
|
||||||
if parent_readonly:
|
if parent_readonly:
|
||||||
raise SFTPError(FX_FAILURE,
|
raise createSFTPError(FX_FAILURE,
|
||||||
"cannot create a file exclusively when the parent directory is read-only")
|
"cannot create a file exclusively when the parent directory is read-only")
|
||||||
|
|
||||||
# 'overwrite=False' ensures failure if the link already exists.
|
# 'overwrite=False' ensures failure if the link already exists.
|
||||||
# FIXME: should use a single call to set_uri and return (child, metadata) (#1035)
|
# FIXME: should use a single call to set_uri and return (child, metadata) (#1035)
|
||||||
|
|
||||||
zero_length_lit = "URI:LIT:"
|
zero_length_lit = b"URI:LIT:"
|
||||||
if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" %
|
if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" %
|
||||||
(parent, zero_length_lit, childname), level=NOISY)
|
(parent, zero_length_lit, childname), level=NOISY)
|
||||||
d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit,
|
d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit,
|
||||||
@ -1435,14 +1459,14 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
metadata['no-write'] = _no_write(parent_readonly, filenode, current_metadata)
|
metadata['no-write'] = _no_write(parent_readonly, filenode, current_metadata)
|
||||||
|
|
||||||
if filenode.is_unknown():
|
if filenode.is_unknown():
|
||||||
raise SFTPError(FX_PERMISSION_DENIED,
|
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||||
"cannot open an unknown cap. Upgrading the gateway "
|
"cannot open an unknown cap. Upgrading the gateway "
|
||||||
"to a later Tahoe-LAFS version may help")
|
"to a later Tahoe-LAFS version may help")
|
||||||
if not IFileNode.providedBy(filenode):
|
if not IFileNode.providedBy(filenode):
|
||||||
raise SFTPError(FX_PERMISSION_DENIED,
|
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||||
"cannot open a directory as if it were a file")
|
"cannot open a directory as if it were a file")
|
||||||
if (flags & FXF_WRITE) and metadata['no-write']:
|
if (flags & FXF_WRITE) and metadata['no-write']:
|
||||||
raise SFTPError(FX_PERMISSION_DENIED,
|
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||||
"cannot open a non-writeable file for writing")
|
"cannot open a non-writeable file for writing")
|
||||||
|
|
||||||
return self._make_file(file, userpath, flags, parent=parent, childname=childname,
|
return self._make_file(file, userpath, flags, parent=parent, childname=childname,
|
||||||
@ -1452,10 +1476,10 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
f.trap(NoSuchChildError)
|
f.trap(NoSuchChildError)
|
||||||
|
|
||||||
if not (flags & FXF_CREAT):
|
if not (flags & FXF_CREAT):
|
||||||
raise SFTPError(FX_NO_SUCH_FILE,
|
raise createSFTPError(FX_NO_SUCH_FILE,
|
||||||
"the file does not exist, and was not opened with the creation (CREAT) flag")
|
"the file does not exist, and was not opened with the creation (CREAT) flag")
|
||||||
if parent_readonly:
|
if parent_readonly:
|
||||||
raise SFTPError(FX_PERMISSION_DENIED,
|
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||||
"cannot create a file when the parent directory is read-only")
|
"cannot create a file when the parent directory is read-only")
|
||||||
|
|
||||||
return self._make_file(file, userpath, flags, parent=parent, childname=childname)
|
return self._make_file(file, userpath, flags, parent=parent, childname=childname)
|
||||||
@ -1494,9 +1518,9 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
(to_parent, to_childname) = to_pair
|
(to_parent, to_childname) = to_pair
|
||||||
|
|
||||||
if from_childname is None:
|
if from_childname is None:
|
||||||
raise SFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI")
|
raise createSFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI")
|
||||||
if to_childname is None:
|
if to_childname is None:
|
||||||
raise SFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI")
|
raise createSFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI")
|
||||||
|
|
||||||
# <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.5>
|
# <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.5>
|
||||||
# "It is an error if there already exists a file with the name specified
|
# "It is an error if there already exists a file with the name specified
|
||||||
@ -1511,7 +1535,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
d2.addCallback(lambda ign: to_parent.get(to_childname))
|
d2.addCallback(lambda ign: to_parent.get(to_childname))
|
||||||
def _expect_fail(res):
|
def _expect_fail(res):
|
||||||
if not isinstance(res, Failure):
|
if not isinstance(res, Failure):
|
||||||
raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
|
raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8"))
|
||||||
|
|
||||||
# It is OK if we fail for errors other than NoSuchChildError, since that probably
|
# It is OK if we fail for errors other than NoSuchChildError, since that probably
|
||||||
# indicates some problem accessing the destination directory.
|
# indicates some problem accessing the destination directory.
|
||||||
@ -1536,7 +1560,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
if not isinstance(err, Failure) or (renamed and err.check(NoSuchChildError)):
|
if not isinstance(err, Failure) or (renamed and err.check(NoSuchChildError)):
|
||||||
return None
|
return None
|
||||||
if not overwrite and err.check(ExistingChildError):
|
if not overwrite and err.check(ExistingChildError):
|
||||||
raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
|
raise createSFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + str(to_userpath, "utf-8"))
|
||||||
|
|
||||||
return err
|
return err
|
||||||
d3.addBoth(_check)
|
d3.addBoth(_check)
|
||||||
@ -1554,7 +1578,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
path = self._path_from_string(pathstring)
|
path = self._path_from_string(pathstring)
|
||||||
metadata = _attrs_to_metadata(attrs)
|
metadata = _attrs_to_metadata(attrs)
|
||||||
if 'no-write' in metadata:
|
if 'no-write' in metadata:
|
||||||
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only")
|
def _denied(): raise createSFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only")
|
||||||
return defer.execute(_denied)
|
return defer.execute(_denied)
|
||||||
|
|
||||||
d = self._get_root(path)
|
d = self._get_root(path)
|
||||||
@ -1566,7 +1590,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
def _get_or_create_directories(self, node, path, metadata):
|
def _get_or_create_directories(self, node, path, metadata):
|
||||||
if not IDirectoryNode.providedBy(node):
|
if not IDirectoryNode.providedBy(node):
|
||||||
# TODO: provide the name of the blocking file in the error message.
|
# TODO: provide the name of the blocking file in the error message.
|
||||||
def _blocked(): raise SFTPError(FX_FAILURE, "cannot create directory because there "
|
def _blocked(): raise createSFTPError(FX_FAILURE, "cannot create directory because there "
|
||||||
"is a file in the way") # close enough
|
"is a file in the way") # close enough
|
||||||
return defer.execute(_blocked)
|
return defer.execute(_blocked)
|
||||||
|
|
||||||
@ -1604,7 +1628,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
def _got_parent(parent_and_childname):
|
def _got_parent(parent_and_childname):
|
||||||
(parent, childname) = parent_and_childname
|
(parent, childname) = parent_and_childname
|
||||||
if childname is None:
|
if childname is None:
|
||||||
raise SFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI")
|
raise createSFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI")
|
||||||
|
|
||||||
direntry = _direntry_for(parent, childname)
|
direntry = _direntry_for(parent, childname)
|
||||||
d2 = defer.succeed(False)
|
d2 = defer.succeed(False)
|
||||||
@ -1635,18 +1659,18 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
d.addCallback(_got_parent_or_node)
|
d.addCallback(_got_parent_or_node)
|
||||||
def _list(dirnode):
|
def _list(dirnode):
|
||||||
if dirnode.is_unknown():
|
if dirnode.is_unknown():
|
||||||
raise SFTPError(FX_PERMISSION_DENIED,
|
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||||
"cannot list an unknown cap as a directory. Upgrading the gateway "
|
"cannot list an unknown cap as a directory. Upgrading the gateway "
|
||||||
"to a later Tahoe-LAFS version may help")
|
"to a later Tahoe-LAFS version may help")
|
||||||
if not IDirectoryNode.providedBy(dirnode):
|
if not IDirectoryNode.providedBy(dirnode):
|
||||||
raise SFTPError(FX_PERMISSION_DENIED,
|
raise createSFTPError(FX_PERMISSION_DENIED,
|
||||||
"cannot list a file as if it were a directory")
|
"cannot list a file as if it were a directory")
|
||||||
|
|
||||||
d2 = dirnode.list()
|
d2 = dirnode.list()
|
||||||
def _render(children):
|
def _render(children):
|
||||||
parent_readonly = dirnode.is_readonly()
|
parent_readonly = dirnode.is_readonly()
|
||||||
results = []
|
results = []
|
||||||
for filename, (child, metadata) in children.iteritems():
|
for filename, (child, metadata) in list(children.items()):
|
||||||
# The file size may be cached or absent.
|
# The file size may be cached or absent.
|
||||||
metadata['no-write'] = _no_write(parent_readonly, child, metadata)
|
metadata['no-write'] = _no_write(parent_readonly, child, metadata)
|
||||||
attrs = _populate_attrs(child, metadata)
|
attrs = _populate_attrs(child, metadata)
|
||||||
@ -1726,7 +1750,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
if "size" in attrs:
|
if "size" in attrs:
|
||||||
# this would require us to download and re-upload the truncated/extended
|
# this would require us to download and re-upload the truncated/extended
|
||||||
# file contents
|
# file contents
|
||||||
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported")
|
def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported")
|
||||||
return defer.execute(_unsupported)
|
return defer.execute(_unsupported)
|
||||||
|
|
||||||
path = self._path_from_string(pathstring)
|
path = self._path_from_string(pathstring)
|
||||||
@ -1743,7 +1767,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
if childname is None:
|
if childname is None:
|
||||||
if updated_heisenfiles:
|
if updated_heisenfiles:
|
||||||
return None
|
return None
|
||||||
raise SFTPError(FX_NO_SUCH_FILE, userpath)
|
raise createSFTPError(FX_NO_SUCH_FILE, userpath)
|
||||||
else:
|
else:
|
||||||
desired_metadata = _attrs_to_metadata(attrs)
|
desired_metadata = _attrs_to_metadata(attrs)
|
||||||
if noisy: self.log("desired_metadata = %r" % (desired_metadata,), level=NOISY)
|
if noisy: self.log("desired_metadata = %r" % (desired_metadata,), level=NOISY)
|
||||||
@ -1766,7 +1790,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
def readLink(self, pathstring):
|
def readLink(self, pathstring):
|
||||||
self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL)
|
self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL)
|
||||||
|
|
||||||
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "readLink")
|
def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "readLink")
|
||||||
return defer.execute(_unsupported)
|
return defer.execute(_unsupported)
|
||||||
|
|
||||||
def makeLink(self, linkPathstring, targetPathstring):
|
def makeLink(self, linkPathstring, targetPathstring):
|
||||||
@ -1775,7 +1799,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
# If this is implemented, note the reversal of arguments described in point 7 of
|
# If this is implemented, note the reversal of arguments described in point 7 of
|
||||||
# <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>.
|
# <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>.
|
||||||
|
|
||||||
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "makeLink")
|
def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "makeLink")
|
||||||
return defer.execute(_unsupported)
|
return defer.execute(_unsupported)
|
||||||
|
|
||||||
def extendedRequest(self, extensionName, extensionData):
|
def extendedRequest(self, extensionName, extensionData):
|
||||||
@ -1784,8 +1808,8 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
# We implement the three main OpenSSH SFTP extensions; see
|
# We implement the three main OpenSSH SFTP extensions; see
|
||||||
# <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
|
# <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
|
||||||
|
|
||||||
if extensionName == 'posix-rename@openssh.com':
|
if extensionName == b'posix-rename@openssh.com':
|
||||||
def _bad(): raise SFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request")
|
def _bad(): raise createSFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request")
|
||||||
|
|
||||||
if 4 > len(extensionData): return defer.execute(_bad)
|
if 4 > len(extensionData): return defer.execute(_bad)
|
||||||
(fromPathLen,) = struct.unpack('>L', extensionData[0:4])
|
(fromPathLen,) = struct.unpack('>L', extensionData[0:4])
|
||||||
@ -1802,11 +1826,11 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
# an error, or an FXP_EXTENDED_REPLY. But it happens to do the right thing
|
# an error, or an FXP_EXTENDED_REPLY. But it happens to do the right thing
|
||||||
# (respond with an FXP_STATUS message) if we return a Failure with code FX_OK.
|
# (respond with an FXP_STATUS message) if we return a Failure with code FX_OK.
|
||||||
def _succeeded(ign):
|
def _succeeded(ign):
|
||||||
raise SFTPError(FX_OK, "request succeeded")
|
raise createSFTPError(FX_OK, "request succeeded")
|
||||||
d.addCallback(_succeeded)
|
d.addCallback(_succeeded)
|
||||||
return d
|
return d
|
||||||
|
|
||||||
if extensionName == 'statvfs@openssh.com' or extensionName == 'fstatvfs@openssh.com':
|
if extensionName == b'statvfs@openssh.com' or extensionName == b'fstatvfs@openssh.com':
|
||||||
# f_bsize and f_frsize should be the same to avoid a bug in 'df'
|
# f_bsize and f_frsize should be the same to avoid a bug in 'df'
|
||||||
return defer.succeed(struct.pack('>11Q',
|
return defer.succeed(struct.pack('>11Q',
|
||||||
1024, # uint64 f_bsize /* file system block size */
|
1024, # uint64 f_bsize /* file system block size */
|
||||||
@ -1822,7 +1846,7 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
65535, # uint64 f_namemax /* maximum filename length */
|
65535, # uint64 f_namemax /* maximum filename length */
|
||||||
))
|
))
|
||||||
|
|
||||||
def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "unsupported %r request <data of length %r>" %
|
def _unsupported(): raise createSFTPError(FX_OP_UNSUPPORTED, "unsupported %r request <data of length %r>" %
|
||||||
(extensionName, len(extensionData)))
|
(extensionName, len(extensionData)))
|
||||||
return defer.execute(_unsupported)
|
return defer.execute(_unsupported)
|
||||||
|
|
||||||
@ -1837,29 +1861,29 @@ class SFTPUserHandler(ConchUser, PrefixingLogMixin):
|
|||||||
def _path_from_string(self, pathstring):
|
def _path_from_string(self, pathstring):
|
||||||
if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY)
|
if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY)
|
||||||
|
|
||||||
_assert(isinstance(pathstring, str), pathstring=pathstring)
|
_assert(isinstance(pathstring, bytes), pathstring=pathstring)
|
||||||
|
|
||||||
# The home directory is the root directory.
|
# The home directory is the root directory.
|
||||||
pathstring = pathstring.strip("/")
|
pathstring = pathstring.strip(b"/")
|
||||||
if pathstring == "" or pathstring == ".":
|
if pathstring == b"" or pathstring == b".":
|
||||||
path_utf8 = []
|
path_utf8 = []
|
||||||
else:
|
else:
|
||||||
path_utf8 = pathstring.split("/")
|
path_utf8 = pathstring.split(b"/")
|
||||||
|
|
||||||
# <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.2>
|
# <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.2>
|
||||||
# "Servers SHOULD interpret a path name component ".." as referring to
|
# "Servers SHOULD interpret a path name component ".." as referring to
|
||||||
# the parent directory, and "." as referring to the current directory."
|
# the parent directory, and "." as referring to the current directory."
|
||||||
path = []
|
path = []
|
||||||
for p_utf8 in path_utf8:
|
for p_utf8 in path_utf8:
|
||||||
if p_utf8 == "..":
|
if p_utf8 == b"..":
|
||||||
# ignore excess .. components at the root
|
# ignore excess .. components at the root
|
||||||
if len(path) > 0:
|
if len(path) > 0:
|
||||||
path = path[:-1]
|
path = path[:-1]
|
||||||
elif p_utf8 != ".":
|
elif p_utf8 != b".":
|
||||||
try:
|
try:
|
||||||
p = p_utf8.decode('utf-8', 'strict')
|
p = p_utf8.decode('utf-8', 'strict')
|
||||||
except UnicodeError:
|
except UnicodeError:
|
||||||
raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
|
raise createSFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
|
||||||
path.append(p)
|
path.append(p)
|
||||||
|
|
||||||
if noisy: self.log(" PATH %r" % (path,), level=NOISY)
|
if noisy: self.log(" PATH %r" % (path,), level=NOISY)
|
||||||
@ -1978,9 +2002,9 @@ class SFTPServer(service.MultiService):
|
|||||||
|
|
||||||
def __init__(self, client, accountfile, accounturl,
|
def __init__(self, client, accountfile, accounturl,
|
||||||
sftp_portstr, pubkey_file, privkey_file):
|
sftp_portstr, pubkey_file, privkey_file):
|
||||||
precondition(isinstance(accountfile, (unicode, type(None))), accountfile)
|
precondition(isinstance(accountfile, (str, type(None))), accountfile)
|
||||||
precondition(isinstance(pubkey_file, unicode), pubkey_file)
|
precondition(isinstance(pubkey_file, str), pubkey_file)
|
||||||
precondition(isinstance(privkey_file, unicode), privkey_file)
|
precondition(isinstance(privkey_file, str), privkey_file)
|
||||||
service.MultiService.__init__(self)
|
service.MultiService.__init__(self)
|
||||||
|
|
||||||
r = Dispatcher(client)
|
r = Dispatcher(client)
|
||||||
@ -2011,5 +2035,5 @@ class SFTPServer(service.MultiService):
|
|||||||
f = SSHFactory()
|
f = SSHFactory()
|
||||||
f.portal = p
|
f.portal = p
|
||||||
|
|
||||||
s = strports.service(sftp_portstr, f)
|
s = strports.service(six.ensure_str(sftp_portstr), f)
|
||||||
s.setServiceParent(self)
|
s.setServiceParent(self)
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -4757,6 +4757,31 @@ class Web(WebMixin, WebErrorMixin, testutil.StallMixin, testutil.ReallyEqualMixi
|
|||||||
op_url = self.webish_url + "/operations/134?t=status&output=JSON"
|
op_url = self.webish_url + "/operations/134?t=status&output=JSON"
|
||||||
yield self.assertHTTPError(op_url, 404, "unknown/expired handle '134'")
|
yield self.assertHTTPError(op_url, 404, "unknown/expired handle '134'")
|
||||||
|
|
||||||
|
@inlineCallbacks
|
||||||
|
def test_uri_redirect(self):
|
||||||
|
"""URI redirects don't cause failure.
|
||||||
|
|
||||||
|
Unit test reproducer for https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3590
|
||||||
|
"""
|
||||||
|
def req(method, path, **kwargs):
|
||||||
|
return treq.request(method, self.webish_url + path, persistent=False,
|
||||||
|
**kwargs)
|
||||||
|
|
||||||
|
response = yield req("POST", "/uri?format=sdmf&t=mkdir")
|
||||||
|
dircap = yield response.content()
|
||||||
|
assert dircap.startswith('URI:DIR2:')
|
||||||
|
dircap_uri = "/uri/?uri={}&t=json".format(urllib.quote(dircap))
|
||||||
|
|
||||||
|
response = yield req(
|
||||||
|
"GET",
|
||||||
|
dircap_uri,
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
response.request.absoluteURI,
|
||||||
|
self.webish_url + "/uri/{}?t=json".format(urllib.quote(dircap)))
|
||||||
|
if response.code >= 400:
|
||||||
|
raise Error(response.code, response=response.content())
|
||||||
|
|
||||||
def test_incident(self):
|
def test_incident(self):
|
||||||
d = self.POST("/report_incident", details="eek")
|
d = self.POST("/report_incident", details="eek")
|
||||||
def _done(res):
|
def _done(res):
|
||||||
|
@ -37,6 +37,7 @@ PORTED_MODULES = [
|
|||||||
"allmydata.crypto.util",
|
"allmydata.crypto.util",
|
||||||
"allmydata.deep_stats",
|
"allmydata.deep_stats",
|
||||||
"allmydata.dirnode",
|
"allmydata.dirnode",
|
||||||
|
"allmydata.frontends.sftpd",
|
||||||
"allmydata.hashtree",
|
"allmydata.hashtree",
|
||||||
"allmydata.immutable.checker",
|
"allmydata.immutable.checker",
|
||||||
"allmydata.immutable.downloader",
|
"allmydata.immutable.downloader",
|
||||||
@ -170,6 +171,7 @@ PORTED_TEST_MODULES = [
|
|||||||
"allmydata.test.test_pipeline",
|
"allmydata.test.test_pipeline",
|
||||||
"allmydata.test.test_python3",
|
"allmydata.test.test_python3",
|
||||||
"allmydata.test.test_repairer",
|
"allmydata.test.test_repairer",
|
||||||
|
"allmydata.test.test_sftp",
|
||||||
"allmydata.test.test_spans",
|
"allmydata.test.test_spans",
|
||||||
"allmydata.test.test_statistics",
|
"allmydata.test.test_statistics",
|
||||||
"allmydata.test.test_stats",
|
"allmydata.test.test_stats",
|
||||||
|
@ -11,7 +11,7 @@ from twisted.web import (
|
|||||||
resource,
|
resource,
|
||||||
static,
|
static,
|
||||||
)
|
)
|
||||||
from twisted.web.util import redirectTo
|
from twisted.web.util import redirectTo, Redirect
|
||||||
from twisted.python.filepath import FilePath
|
from twisted.python.filepath import FilePath
|
||||||
from twisted.web.template import (
|
from twisted.web.template import (
|
||||||
Element,
|
Element,
|
||||||
@ -147,7 +147,7 @@ class URIHandler(resource.Resource, object):
|
|||||||
and creates and appropriate handler (depending on the kind of
|
and creates and appropriate handler (depending on the kind of
|
||||||
capability it was passed).
|
capability it was passed).
|
||||||
"""
|
"""
|
||||||
# this is in case a URI like "/uri/?cap=<valid capability>" is
|
# this is in case a URI like "/uri/?uri=<valid capability>" is
|
||||||
# passed -- we re-direct to the non-trailing-slash version so
|
# passed -- we re-direct to the non-trailing-slash version so
|
||||||
# that there is just one valid URI for "uri" resource.
|
# that there is just one valid URI for "uri" resource.
|
||||||
if not name:
|
if not name:
|
||||||
@ -155,7 +155,7 @@ class URIHandler(resource.Resource, object):
|
|||||||
u = u.replace(
|
u = u.replace(
|
||||||
path=(s for s in u.path if s), # remove empty segments
|
path=(s for s in u.path if s), # remove empty segments
|
||||||
)
|
)
|
||||||
return redirectTo(u.to_uri().to_text().encode('utf8'), req)
|
return Redirect(u.to_uri().to_text().encode('utf8'))
|
||||||
try:
|
try:
|
||||||
node = self.client.create_node_from_uri(name)
|
node = self.client.create_node_from_uri(name)
|
||||||
return directory.make_handler_for(node, self.client)
|
return directory.make_handler_for(node, self.client)
|
||||||
|
3
tox.ini
3
tox.ini
@ -77,7 +77,7 @@ setenv =
|
|||||||
COVERAGE_PROCESS_START=.coveragerc
|
COVERAGE_PROCESS_START=.coveragerc
|
||||||
commands =
|
commands =
|
||||||
# NOTE: 'run with "py.test --keep-tempdir -s -v integration/" to debug failures'
|
# NOTE: 'run with "py.test --keep-tempdir -s -v integration/" to debug failures'
|
||||||
py.test --coverage -v {posargs:integration}
|
py.test --timeout=1800 --coverage -v {posargs:integration}
|
||||||
coverage combine
|
coverage combine
|
||||||
coverage report
|
coverage report
|
||||||
|
|
||||||
@ -221,6 +221,7 @@ commands =
|
|||||||
deps =
|
deps =
|
||||||
sphinx
|
sphinx
|
||||||
docutils==0.12
|
docutils==0.12
|
||||||
|
recommonmark
|
||||||
# normal install is not needed for docs, and slows things down
|
# normal install is not needed for docs, and slows things down
|
||||||
skip_install = True
|
skip_install = True
|
||||||
commands =
|
commands =
|
||||||
|
Loading…
x
Reference in New Issue
Block a user