mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-19 04:57:54 +00:00
Merge remote-tracking branch 'origin/master' into 3914.faster-rsa-tests
This commit is contained in:
commit
3f8bec0ecc
@ -380,7 +380,7 @@ jobs:
|
||||
docker:
|
||||
# Run in a highly Nix-capable environment.
|
||||
- <<: *DOCKERHUB_AUTH
|
||||
image: "nixos/nix:2.3.16"
|
||||
image: "nixos/nix:2.10.3"
|
||||
|
||||
environment:
|
||||
# CACHIX_AUTH_TOKEN is manually set in the CircleCI web UI and
|
||||
@ -390,27 +390,21 @@ jobs:
|
||||
|
||||
steps:
|
||||
- "run":
|
||||
# The nixos/nix image does not include ssh. Install it so the
|
||||
# `checkout` step will succeed. We also want cachix for
|
||||
# Nix-friendly caching.
|
||||
# Get cachix for Nix-friendly caching.
|
||||
name: "Install Basic Dependencies"
|
||||
command: |
|
||||
NIXPKGS="https://github.com/nixos/nixpkgs/archive/nixos-<<parameters.nixpkgs>>.tar.gz"
|
||||
nix-env \
|
||||
--file https://github.com/nixos/nixpkgs/archive/nixos-<<parameters.nixpkgs>>.tar.gz \
|
||||
--file $NIXPKGS \
|
||||
--install \
|
||||
-A openssh cachix bash
|
||||
-A cachix bash
|
||||
# Activate it for "binary substitution". This sets up
|
||||
# configuration tht lets Nix download something from the cache
|
||||
# instead of building it locally, if possible.
|
||||
cachix use "${CACHIX_NAME}"
|
||||
|
||||
- "checkout"
|
||||
|
||||
- run:
|
||||
name: "Cachix setup"
|
||||
# Record the store paths that exist before we did much. There's no
|
||||
# reason to cache these, they're either in the image or have to be
|
||||
# retrieved before we can use cachix to restore from cache.
|
||||
command: |
|
||||
cachix use "${CACHIX_NAME}"
|
||||
nix path-info --all > /tmp/store-path-pre-build
|
||||
|
||||
- "run":
|
||||
# The Nix package doesn't know how to do this part, unfortunately.
|
||||
name: "Generate version"
|
||||
@ -432,50 +426,21 @@ jobs:
|
||||
# build a couple simple little dependencies that don't take
|
||||
# advantage of multiple cores and we get a little speedup by doing
|
||||
# them in parallel.
|
||||
nix-build --cores 3 --max-jobs 2 --argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>"
|
||||
source .circleci/lib.sh
|
||||
cache_if_able nix-build \
|
||||
--cores 3 \
|
||||
--max-jobs 2 \
|
||||
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>"
|
||||
|
||||
- "run":
|
||||
name: "Test"
|
||||
command: |
|
||||
# Let it go somewhat wild for the test suite itself
|
||||
nix-build --cores 8 --argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" tests.nix
|
||||
|
||||
- run:
|
||||
# Send any new store objects to cachix.
|
||||
name: "Push to Cachix"
|
||||
when: "always"
|
||||
command: |
|
||||
# Cribbed from
|
||||
# https://circleci.com/blog/managing-secrets-when-you-have-pull-requests-from-outside-contributors/
|
||||
if [ -n "$CIRCLE_PR_NUMBER" ]; then
|
||||
# I'm sure you're thinking "CIRCLE_PR_NUMBER must just be the
|
||||
# number of the PR being built". Sorry, dear reader, you have
|
||||
# guessed poorly. It is also conditionally set based on whether
|
||||
# this is a PR from a fork or not.
|
||||
#
|
||||
# https://circleci.com/docs/2.0/env-vars/#built-in-environment-variables
|
||||
echo "Skipping Cachix push for forked PR."
|
||||
else
|
||||
# If this *isn't* a build from a fork then we have the Cachix
|
||||
# write key in our environment and we can push any new objects
|
||||
# to Cachix.
|
||||
#
|
||||
# To decide what to push, we inspect the list of store objects
|
||||
# that existed before and after we did most of our work. Any
|
||||
# that are new after the work is probably a useful thing to have
|
||||
# around so push it to the cache. We exclude all derivation
|
||||
# objects (.drv files) because they're cheap to reconstruct and
|
||||
# by the time you know their cache key you've already done all
|
||||
# the work anyway.
|
||||
#
|
||||
# This shell expression for finding the objects and pushing them
|
||||
# was from the Cachix docs:
|
||||
#
|
||||
# https://docs.cachix.org/continuous-integration-setup/circleci.html
|
||||
#
|
||||
# but they seem to have removed it now.
|
||||
bash -c "comm -13 <(sort /tmp/store-path-pre-build | grep -v '\.drv$') <(nix path-info --all | grep -v '\.drv$' | sort) | cachix push $CACHIX_NAME"
|
||||
fi
|
||||
source .circleci/lib.sh
|
||||
cache_if_able nix-build \
|
||||
--cores 8 \
|
||||
--argstr pkgsVersion "nixpkgs-<<parameters.nixpkgs>>" \
|
||||
tests.nix
|
||||
|
||||
typechecks:
|
||||
docker:
|
||||
|
26
.circleci/lib.sh
Normal file
26
.circleci/lib.sh
Normal file
@ -0,0 +1,26 @@
|
||||
# Run a command, enabling cache writes to cachix if possible. The command is
|
||||
# accepted as a variable number of positional arguments (like argv).
|
||||
function cache_if_able() {
|
||||
# The `cachix watch-exec ...` does our cache population. When it sees
|
||||
# something added to the store (I guess) it pushes it to the named cache.
|
||||
#
|
||||
# We can only *push* to it if we have a CACHIX_AUTH_TOKEN, though.
|
||||
# in-repo jobs will get this from CircleCI configuration but jobs from
|
||||
# forks may not.
|
||||
echo "Building PR from user/org: ${CIRCLE_PROJECT_USERNAME}"
|
||||
if [ -v CACHIX_AUTH_TOKEN ]; then
|
||||
echo "Cachix credentials present; will attempt to write to cache."
|
||||
cachix watch-exec "${CACHIX_NAME}" -- "$@"
|
||||
else
|
||||
# If we're building a from a forked repository then we're allowed to
|
||||
# not have the credentials (but it's also fine if the owner of the
|
||||
# fork supplied their own).
|
||||
if [ "${CIRCLE_PROJECT_USERNAME}" == "tahoe-lafs" ]; then
|
||||
echo "Required credentials (CACHIX_AUTH_TOKEN) are missing."
|
||||
return 1
|
||||
else
|
||||
echo "Cachix credentials missing; will not attempt cache writes."
|
||||
"$@"
|
||||
fi
|
||||
fi
|
||||
}
|
38
.github/workflows/ci.yml
vendored
38
.github/workflows/ci.yml
vendored
@ -153,19 +153,21 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os:
|
||||
- windows-latest
|
||||
include:
|
||||
- os: macos-latest
|
||||
python-version: "3.9"
|
||||
force-foolscap: false
|
||||
- os: windows-latest
|
||||
python-version: "3.9"
|
||||
force-foolscap: false
|
||||
# 22.04 has some issue with Tor at the moment:
|
||||
# https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3943
|
||||
- ubuntu-20.04
|
||||
python-version:
|
||||
- 3.7
|
||||
- 3.9
|
||||
include:
|
||||
# On macOS don't bother with 3.7, just to get faster builds.
|
||||
- os: macos-latest
|
||||
python-version: 3.9
|
||||
|
||||
- os: ubuntu-20.04
|
||||
python-version: "3.7"
|
||||
force-foolscap: true
|
||||
- os: ubuntu-20.04
|
||||
python-version: "3.9"
|
||||
force-foolscap: false
|
||||
steps:
|
||||
|
||||
- name: Install Tor [Ubuntu]
|
||||
@ -206,12 +208,24 @@ jobs:
|
||||
run: python misc/build_helpers/show-tool-versions.py
|
||||
|
||||
- name: Run "Python 3 integration tests"
|
||||
if: "${{ !matrix.force-foolscap }}"
|
||||
env:
|
||||
# On macOS this is necessary to ensure unix socket paths for tor
|
||||
# aren't too long. On Windows tox won't pass it through so it has no
|
||||
# effect. On Linux it doesn't make a difference one way or another.
|
||||
TMPDIR: "/tmp"
|
||||
run: tox -e integration
|
||||
run: |
|
||||
tox -e integration
|
||||
|
||||
- name: Run "Python 3 integration tests (force Foolscap)"
|
||||
if: "${{ matrix.force-foolscap }}"
|
||||
env:
|
||||
# On macOS this is necessary to ensure unix socket paths for tor
|
||||
# aren't too long. On Windows tox won't pass it through so it has no
|
||||
# effect. On Linux it doesn't make a difference one way or another.
|
||||
TMPDIR: "/tmp"
|
||||
run: |
|
||||
tox -e integration -- --force-foolscap integration/
|
||||
|
||||
- name: Upload eliot.log in case of failure
|
||||
uses: actions/upload-artifact@v3
|
||||
|
@ -1,15 +1,6 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
import sys
|
||||
import shutil
|
||||
from time import sleep
|
||||
@ -66,6 +57,13 @@ def pytest_addoption(parser):
|
||||
"--coverage", action="store_true", dest="coverage",
|
||||
help="Collect coverage statistics",
|
||||
)
|
||||
parser.addoption(
|
||||
"--force-foolscap", action="store_true", default=False,
|
||||
dest="force_foolscap",
|
||||
help=("If set, force Foolscap only for the storage protocol. " +
|
||||
"Otherwise HTTP will be used.")
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope='session')
|
||||
def eliot_logging():
|
||||
|
@ -1,14 +1,6 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
"""
|
||||
from __future__ import unicode_literals
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
import sys
|
||||
import time
|
||||
@ -300,6 +292,14 @@ def _create_node(reactor, request, temp_dir, introducer_furl, flog_gatherer, nam
|
||||
u'log_gatherer.furl',
|
||||
flog_gatherer,
|
||||
)
|
||||
force_foolscap = request.config.getoption("force_foolscap")
|
||||
assert force_foolscap in (True, False)
|
||||
set_config(
|
||||
config,
|
||||
'storage',
|
||||
'force_foolscap',
|
||||
str(force_foolscap),
|
||||
)
|
||||
write_config(FilePath(config_path), config)
|
||||
created_d.addCallback(created)
|
||||
|
||||
|
0
newsfragments/3870.minor
Normal file
0
newsfragments/3870.minor
Normal file
0
newsfragments/3937.minor
Normal file
0
newsfragments/3937.minor
Normal file
1
newsfragments/3942.minor
Normal file
1
newsfragments/3942.minor
Normal file
@ -0,0 +1 @@
|
||||
|
0
newsfragments/3947.minor
Normal file
0
newsfragments/3947.minor
Normal file
0
newsfragments/3954.minor
Normal file
0
newsfragments/3954.minor
Normal file
4
setup.py
4
setup.py
@ -96,7 +96,9 @@ install_requires = [
|
||||
# an sftp extra in Tahoe-LAFS, there is no point in having one.
|
||||
# * Twisted 19.10 introduces Site.getContentFile which we use to get
|
||||
# temporary upload files placed into a per-node temporary directory.
|
||||
"Twisted[tls,conch] >= 19.10.0",
|
||||
# * Twisted 22.8.0 added support for coroutine-returning functions in many
|
||||
# places (mainly via `maybeDeferred`)
|
||||
"Twisted[tls,conch] >= 22.8.0",
|
||||
|
||||
"PyYAML >= 3.11",
|
||||
|
||||
|
@ -323,6 +323,7 @@ class StorageClient(object):
|
||||
swissnum = nurl.path[0].encode("ascii")
|
||||
certificate_hash = nurl.user.encode("ascii")
|
||||
pool = HTTPConnectionPool(reactor)
|
||||
pool.maxPersistentPerHost = 20
|
||||
|
||||
if cls.TEST_MODE_REGISTER_HTTP_POOL is not None:
|
||||
cls.TEST_MODE_REGISTER_HTTP_POOL(pool)
|
||||
|
@ -100,7 +100,7 @@ def _authorization_decorator(required_secrets):
|
||||
@wraps(f)
|
||||
def route(self, request, *args, **kwargs):
|
||||
if not timing_safe_compare(
|
||||
request.requestHeaders.getRawHeaders("Authorization", [None])[0].encode(
|
||||
request.requestHeaders.getRawHeaders("Authorization", [""])[0].encode(
|
||||
"utf-8"
|
||||
),
|
||||
swissnum_auth_header(self._swissnum),
|
||||
|
@ -1,19 +1,12 @@
|
||||
"""
|
||||
Ported to Python 3.
|
||||
Tests related to the way ``allmydata.mutable`` handles different versions
|
||||
of data for an object.
|
||||
"""
|
||||
from __future__ import print_function
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from future.utils import PY2
|
||||
if PY2:
|
||||
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
||||
|
||||
from io import StringIO
|
||||
import os
|
||||
from six.moves import cStringIO as StringIO
|
||||
from typing import Optional
|
||||
|
||||
from twisted.internet import defer
|
||||
from ..common import AsyncTestCase
|
||||
from testtools.matchers import (
|
||||
Equals,
|
||||
@ -47,343 +40,268 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
||||
self.small_data = b"test data" * 10 # 90 B; SDMF
|
||||
|
||||
|
||||
def do_upload_mdmf(self, data=None):
|
||||
async def do_upload_mdmf(self, data: Optional[bytes] = None) -> MutableFileNode:
|
||||
if data is None:
|
||||
data = self.data
|
||||
d = self.nm.create_mutable_file(MutableData(data),
|
||||
version=MDMF_VERSION)
|
||||
def _then(n):
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.assertThat(n._protocol_version, Equals(MDMF_VERSION))
|
||||
self.mdmf_node = n
|
||||
return n
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
n = await self.nm.create_mutable_file(MutableData(data),
|
||||
version=MDMF_VERSION)
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.assertThat(n._protocol_version, Equals(MDMF_VERSION))
|
||||
self.mdmf_node = n
|
||||
return n
|
||||
|
||||
def do_upload_sdmf(self, data=None):
|
||||
async def do_upload_sdmf(self, data: Optional[bytes] = None) -> MutableFileNode:
|
||||
if data is None:
|
||||
data = self.small_data
|
||||
d = self.nm.create_mutable_file(MutableData(data))
|
||||
def _then(n):
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
||||
self.sdmf_node = n
|
||||
return n
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
n = await self.nm.create_mutable_file(MutableData(data))
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
||||
self.sdmf_node = n
|
||||
return n
|
||||
|
||||
def do_upload_empty_sdmf(self):
|
||||
d = self.nm.create_mutable_file(MutableData(b""))
|
||||
def _then(n):
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.sdmf_zero_length_node = n
|
||||
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
||||
return n
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
async def do_upload_empty_sdmf(self) -> MutableFileNode:
|
||||
n = await self.nm.create_mutable_file(MutableData(b""))
|
||||
self.assertThat(n, IsInstance(MutableFileNode))
|
||||
self.sdmf_zero_length_node = n
|
||||
self.assertThat(n._protocol_version, Equals(SDMF_VERSION))
|
||||
return n
|
||||
|
||||
def do_upload(self):
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(lambda ign: self.do_upload_sdmf())
|
||||
return d
|
||||
async def do_upload(self) -> MutableFileNode:
|
||||
await self.do_upload_mdmf()
|
||||
return await self.do_upload_sdmf()
|
||||
|
||||
def test_debug(self):
|
||||
d = self.do_upload_mdmf()
|
||||
def _debug(n):
|
||||
fso = debug.FindSharesOptions()
|
||||
storage_index = base32.b2a(n.get_storage_index())
|
||||
fso.si_s = str(storage_index, "utf-8") # command-line options are unicode on Python 3
|
||||
fso.nodedirs = [os.path.dirname(abspath_expanduser_unicode(str(storedir)))
|
||||
for (i,ss,storedir)
|
||||
in self.iterate_servers()]
|
||||
fso.stdout = StringIO()
|
||||
fso.stderr = StringIO()
|
||||
debug.find_shares(fso)
|
||||
sharefiles = fso.stdout.getvalue().splitlines()
|
||||
expected = self.nm.default_encoding_parameters["n"]
|
||||
self.assertThat(sharefiles, HasLength(expected))
|
||||
async def test_debug(self) -> None:
|
||||
n = await self.do_upload_mdmf()
|
||||
fso = debug.FindSharesOptions()
|
||||
storage_index = base32.b2a(n.get_storage_index())
|
||||
fso.si_s = str(storage_index, "utf-8") # command-line options are unicode on Python 3
|
||||
fso.nodedirs = [os.path.dirname(abspath_expanduser_unicode(str(storedir)))
|
||||
for (i,ss,storedir)
|
||||
in self.iterate_servers()]
|
||||
fso.stdout = StringIO()
|
||||
fso.stderr = StringIO()
|
||||
debug.find_shares(fso)
|
||||
sharefiles = fso.stdout.getvalue().splitlines()
|
||||
expected = self.nm.default_encoding_parameters["n"]
|
||||
self.assertThat(sharefiles, HasLength(expected))
|
||||
|
||||
do = debug.DumpOptions()
|
||||
do["filename"] = sharefiles[0]
|
||||
do.stdout = StringIO()
|
||||
debug.dump_share(do)
|
||||
output = do.stdout.getvalue()
|
||||
lines = set(output.splitlines())
|
||||
self.assertTrue("Mutable slot found:" in lines, output)
|
||||
self.assertTrue(" share_type: MDMF" in lines, output)
|
||||
self.assertTrue(" num_extra_leases: 0" in lines, output)
|
||||
self.assertTrue(" MDMF contents:" in lines, output)
|
||||
self.assertTrue(" seqnum: 1" in lines, output)
|
||||
self.assertTrue(" required_shares: 3" in lines, output)
|
||||
self.assertTrue(" total_shares: 10" in lines, output)
|
||||
self.assertTrue(" segsize: 131073" in lines, output)
|
||||
self.assertTrue(" datalen: %d" % len(self.data) in lines, output)
|
||||
vcap = str(n.get_verify_cap().to_string(), "utf-8")
|
||||
self.assertTrue(" verify-cap: %s" % vcap in lines, output)
|
||||
cso = debug.CatalogSharesOptions()
|
||||
cso.nodedirs = fso.nodedirs
|
||||
cso.stdout = StringIO()
|
||||
cso.stderr = StringIO()
|
||||
debug.catalog_shares(cso)
|
||||
shares = cso.stdout.getvalue().splitlines()
|
||||
oneshare = shares[0] # all shares should be MDMF
|
||||
self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
|
||||
self.assertTrue(oneshare.startswith("MDMF"), oneshare)
|
||||
fields = oneshare.split()
|
||||
self.assertThat(fields[0], Equals("MDMF"))
|
||||
self.assertThat(fields[1].encode("ascii"), Equals(storage_index))
|
||||
self.assertThat(fields[2], Equals("3/10"))
|
||||
self.assertThat(fields[3], Equals("%d" % len(self.data)))
|
||||
self.assertTrue(fields[4].startswith("#1:"), fields[3])
|
||||
# the rest of fields[4] is the roothash, which depends upon
|
||||
# encryption salts and is not constant. fields[5] is the
|
||||
# remaining time on the longest lease, which is timing dependent.
|
||||
# The rest of the line is the quoted pathname to the share.
|
||||
d.addCallback(_debug)
|
||||
return d
|
||||
do = debug.DumpOptions()
|
||||
do["filename"] = sharefiles[0]
|
||||
do.stdout = StringIO()
|
||||
debug.dump_share(do)
|
||||
output = do.stdout.getvalue()
|
||||
lines = set(output.splitlines())
|
||||
self.assertTrue("Mutable slot found:" in lines, output)
|
||||
self.assertTrue(" share_type: MDMF" in lines, output)
|
||||
self.assertTrue(" num_extra_leases: 0" in lines, output)
|
||||
self.assertTrue(" MDMF contents:" in lines, output)
|
||||
self.assertTrue(" seqnum: 1" in lines, output)
|
||||
self.assertTrue(" required_shares: 3" in lines, output)
|
||||
self.assertTrue(" total_shares: 10" in lines, output)
|
||||
self.assertTrue(" segsize: 131073" in lines, output)
|
||||
self.assertTrue(" datalen: %d" % len(self.data) in lines, output)
|
||||
vcap = str(n.get_verify_cap().to_string(), "utf-8")
|
||||
self.assertTrue(" verify-cap: %s" % vcap in lines, output)
|
||||
cso = debug.CatalogSharesOptions()
|
||||
cso.nodedirs = fso.nodedirs
|
||||
cso.stdout = StringIO()
|
||||
cso.stderr = StringIO()
|
||||
debug.catalog_shares(cso)
|
||||
shares = cso.stdout.getvalue().splitlines()
|
||||
oneshare = shares[0] # all shares should be MDMF
|
||||
self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
|
||||
self.assertTrue(oneshare.startswith("MDMF"), oneshare)
|
||||
fields = oneshare.split()
|
||||
self.assertThat(fields[0], Equals("MDMF"))
|
||||
self.assertThat(fields[1].encode("ascii"), Equals(storage_index))
|
||||
self.assertThat(fields[2], Equals("3/10"))
|
||||
self.assertThat(fields[3], Equals("%d" % len(self.data)))
|
||||
self.assertTrue(fields[4].startswith("#1:"), fields[3])
|
||||
# the rest of fields[4] is the roothash, which depends upon
|
||||
# encryption salts and is not constant. fields[5] is the
|
||||
# remaining time on the longest lease, which is timing dependent.
|
||||
# The rest of the line is the quoted pathname to the share.
|
||||
|
||||
async def test_get_sequence_number(self) -> None:
|
||||
await self.do_upload()
|
||||
bv = await self.mdmf_node.get_best_readable_version()
|
||||
self.assertThat(bv.get_sequence_number(), Equals(1))
|
||||
bv = await self.sdmf_node.get_best_readable_version()
|
||||
self.assertThat(bv.get_sequence_number(), Equals(1))
|
||||
|
||||
def test_get_sequence_number(self):
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.assertThat(bv.get_sequence_number(), Equals(1)))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.assertThat(bv.get_sequence_number(), Equals(1)))
|
||||
# Now update. The sequence number in both cases should be 1 in
|
||||
# both cases.
|
||||
def _do_update(ignored):
|
||||
new_data = MutableData(b"foo bar baz" * 100000)
|
||||
new_small_data = MutableData(b"foo bar baz" * 10)
|
||||
d1 = self.mdmf_node.overwrite(new_data)
|
||||
d2 = self.sdmf_node.overwrite(new_small_data)
|
||||
dl = gatherResults([d1, d2])
|
||||
return dl
|
||||
d.addCallback(_do_update)
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.assertThat(bv.get_sequence_number(), Equals(2)))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv:
|
||||
self.assertThat(bv.get_sequence_number(), Equals(2)))
|
||||
return d
|
||||
new_data = MutableData(b"foo bar baz" * 100000)
|
||||
new_small_data = MutableData(b"foo bar baz" * 10)
|
||||
d1 = self.mdmf_node.overwrite(new_data)
|
||||
d2 = self.sdmf_node.overwrite(new_small_data)
|
||||
await gatherResults([d1, d2])
|
||||
bv = await self.mdmf_node.get_best_readable_version()
|
||||
self.assertThat(bv.get_sequence_number(), Equals(2))
|
||||
bv = await self.sdmf_node.get_best_readable_version()
|
||||
self.assertThat(bv.get_sequence_number(), Equals(2))
|
||||
|
||||
|
||||
def test_cap_after_upload(self):
|
||||
async def test_cap_after_upload(self) -> None:
|
||||
# If we create a new mutable file and upload things to it, and
|
||||
# it's an MDMF file, we should get an MDMF cap back from that
|
||||
# file and should be able to use that.
|
||||
# That's essentially what MDMF node is, so just check that.
|
||||
d = self.do_upload_mdmf()
|
||||
def _then(ign):
|
||||
mdmf_uri = self.mdmf_node.get_uri()
|
||||
cap = uri.from_string(mdmf_uri)
|
||||
self.assertTrue(isinstance(cap, uri.WriteableMDMFFileURI))
|
||||
readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
|
||||
cap = uri.from_string(readonly_mdmf_uri)
|
||||
self.assertTrue(isinstance(cap, uri.ReadonlyMDMFFileURI))
|
||||
d.addCallback(_then)
|
||||
return d
|
||||
await self.do_upload_mdmf()
|
||||
mdmf_uri = self.mdmf_node.get_uri()
|
||||
cap = uri.from_string(mdmf_uri)
|
||||
self.assertTrue(isinstance(cap, uri.WriteableMDMFFileURI))
|
||||
readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
|
||||
cap = uri.from_string(readonly_mdmf_uri)
|
||||
self.assertTrue(isinstance(cap, uri.ReadonlyMDMFFileURI))
|
||||
|
||||
def test_mutable_version(self):
|
||||
async def test_mutable_version(self) -> None:
|
||||
# assert that getting parameters from the IMutableVersion object
|
||||
# gives us the same data as getting them from the filenode itself
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
|
||||
def _check_mdmf(bv):
|
||||
n = self.mdmf_node
|
||||
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
||||
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
||||
self.assertFalse(bv.is_readonly())
|
||||
d.addCallback(_check_mdmf)
|
||||
d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
|
||||
def _check_sdmf(bv):
|
||||
n = self.sdmf_node
|
||||
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
||||
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
||||
self.assertFalse(bv.is_readonly())
|
||||
d.addCallback(_check_sdmf)
|
||||
return d
|
||||
await self.do_upload()
|
||||
bv = await self.mdmf_node.get_best_mutable_version()
|
||||
n = self.mdmf_node
|
||||
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
||||
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
||||
self.assertFalse(bv.is_readonly())
|
||||
|
||||
bv = await self.sdmf_node.get_best_mutable_version()
|
||||
n = self.sdmf_node
|
||||
self.assertThat(bv.get_writekey(), Equals(n.get_writekey()))
|
||||
self.assertThat(bv.get_storage_index(), Equals(n.get_storage_index()))
|
||||
self.assertFalse(bv.is_readonly())
|
||||
|
||||
|
||||
def test_get_readonly_version(self):
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv: self.assertTrue(bv.is_readonly()))
|
||||
async def test_get_readonly_version(self) -> None:
|
||||
await self.do_upload()
|
||||
bv = await self.mdmf_node.get_best_readable_version()
|
||||
self.assertTrue(bv.is_readonly())
|
||||
|
||||
# Attempting to get a mutable version of a mutable file from a
|
||||
# filenode initialized with a readcap should return a readonly
|
||||
# version of that same node.
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_readonly())
|
||||
d.addCallback(lambda ro: ro.get_best_mutable_version())
|
||||
d.addCallback(lambda v: self.assertTrue(v.is_readonly()))
|
||||
ro = self.mdmf_node.get_readonly()
|
||||
v = await ro.get_best_mutable_version()
|
||||
self.assertTrue(v.is_readonly())
|
||||
|
||||
d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
|
||||
d.addCallback(lambda bv: self.assertTrue(bv.is_readonly()))
|
||||
bv = await self.sdmf_node.get_best_readable_version()
|
||||
self.assertTrue(bv.is_readonly())
|
||||
|
||||
d.addCallback(lambda ign: self.sdmf_node.get_readonly())
|
||||
d.addCallback(lambda ro: ro.get_best_mutable_version())
|
||||
d.addCallback(lambda v: self.assertTrue(v.is_readonly()))
|
||||
return d
|
||||
ro = self.sdmf_node.get_readonly()
|
||||
v = await ro.get_best_mutable_version()
|
||||
self.assertTrue(v.is_readonly())
|
||||
|
||||
|
||||
def test_toplevel_overwrite(self):
|
||||
async def test_toplevel_overwrite(self) -> None:
|
||||
new_data = MutableData(b"foo bar baz" * 100000)
|
||||
new_small_data = MutableData(b"foo bar baz" * 10)
|
||||
d = self.do_upload()
|
||||
d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Equals(b"foo bar baz" * 100000)))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.overwrite(new_small_data))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Equals(b"foo bar baz" * 10)))
|
||||
return d
|
||||
await self.do_upload()
|
||||
await self.mdmf_node.overwrite(new_data)
|
||||
data = await self.mdmf_node.download_best_version()
|
||||
self.assertThat(data, Equals(b"foo bar baz" * 100000))
|
||||
await self.sdmf_node.overwrite(new_small_data)
|
||||
data = await self.sdmf_node.download_best_version()
|
||||
self.assertThat(data, Equals(b"foo bar baz" * 10))
|
||||
|
||||
|
||||
def test_toplevel_modify(self):
|
||||
d = self.do_upload()
|
||||
async def test_toplevel_modify(self) -> None:
|
||||
await self.do_upload()
|
||||
def modifier(old_contents, servermap, first_time):
|
||||
return old_contents + b"modified"
|
||||
d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Contains(b"modified")))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Contains(b"modified")))
|
||||
return d
|
||||
await self.mdmf_node.modify(modifier)
|
||||
data = await self.mdmf_node.download_best_version()
|
||||
self.assertThat(data, Contains(b"modified"))
|
||||
await self.sdmf_node.modify(modifier)
|
||||
data = await self.sdmf_node.download_best_version()
|
||||
self.assertThat(data, Contains(b"modified"))
|
||||
|
||||
|
||||
def test_version_modify(self):
|
||||
async def test_version_modify(self) -> None:
|
||||
# TODO: When we can publish multiple versions, alter this test
|
||||
# to modify a version other than the best usable version, then
|
||||
# test to see that the best recoverable version is that.
|
||||
d = self.do_upload()
|
||||
await self.do_upload()
|
||||
def modifier(old_contents, servermap, first_time):
|
||||
return old_contents + b"modified"
|
||||
d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.mdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Contains(b"modified")))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.modify(modifier))
|
||||
d.addCallback(lambda ignored:
|
||||
self.sdmf_node.download_best_version())
|
||||
d.addCallback(lambda data:
|
||||
self.assertThat(data, Contains(b"modified")))
|
||||
return d
|
||||
await self.mdmf_node.modify(modifier)
|
||||
data = await self.mdmf_node.download_best_version()
|
||||
self.assertThat(data, Contains(b"modified"))
|
||||
await self.sdmf_node.modify(modifier)
|
||||
data = await self.sdmf_node.download_best_version()
|
||||
self.assertThat(data, Contains(b"modified"))
|
||||
|
||||
|
||||
def test_download_version(self):
|
||||
d = self.publish_multiple()
|
||||
async def test_download_version(self) -> None:
|
||||
await self.publish_multiple()
|
||||
# We want to have two recoverable versions on the grid.
|
||||
d.addCallback(lambda res:
|
||||
self._set_versions({0:0,2:0,4:0,6:0,8:0,
|
||||
1:1,3:1,5:1,7:1,9:1}))
|
||||
self._set_versions({0:0,2:0,4:0,6:0,8:0,
|
||||
1:1,3:1,5:1,7:1,9:1})
|
||||
# Now try to download each version. We should get the plaintext
|
||||
# associated with that version.
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.get_servermap(mode=MODE_READ))
|
||||
def _got_servermap(smap):
|
||||
versions = smap.recoverable_versions()
|
||||
assert len(versions) == 2
|
||||
smap = await self._fn.get_servermap(mode=MODE_READ)
|
||||
versions = smap.recoverable_versions()
|
||||
assert len(versions) == 2
|
||||
|
||||
self.servermap = smap
|
||||
self.version1, self.version2 = versions
|
||||
assert self.version1 != self.version2
|
||||
self.servermap = smap
|
||||
self.version1, self.version2 = versions
|
||||
assert self.version1 != self.version2
|
||||
|
||||
self.version1_seqnum = self.version1[0]
|
||||
self.version2_seqnum = self.version2[0]
|
||||
self.version1_index = self.version1_seqnum - 1
|
||||
self.version2_index = self.version2_seqnum - 1
|
||||
self.version1_seqnum = self.version1[0]
|
||||
self.version2_seqnum = self.version2[0]
|
||||
self.version1_index = self.version1_seqnum - 1
|
||||
self.version2_index = self.version2_seqnum - 1
|
||||
|
||||
d.addCallback(_got_servermap)
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.download_version(self.servermap, self.version1))
|
||||
d.addCallback(lambda results:
|
||||
self.assertThat(self.CONTENTS[self.version1_index],
|
||||
Equals(results)))
|
||||
d.addCallback(lambda ignored:
|
||||
self._fn.download_version(self.servermap, self.version2))
|
||||
d.addCallback(lambda results:
|
||||
self.assertThat(self.CONTENTS[self.version2_index],
|
||||
Equals(results)))
|
||||
return d
|
||||
results = await self._fn.download_version(self.servermap, self.version1)
|
||||
self.assertThat(self.CONTENTS[self.version1_index],
|
||||
Equals(results))
|
||||
results = await self._fn.download_version(self.servermap, self.version2)
|
||||
self.assertThat(self.CONTENTS[self.version2_index],
|
||||
Equals(results))
|
||||
|
||||
|
||||
def test_download_nonexistent_version(self):
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
|
||||
def _set_servermap(servermap):
|
||||
self.servermap = servermap
|
||||
d.addCallback(_set_servermap)
|
||||
d.addCallback(lambda ignored:
|
||||
self.shouldFail(UnrecoverableFileError, "nonexistent version",
|
||||
None,
|
||||
self.mdmf_node.download_version, self.servermap,
|
||||
"not a version"))
|
||||
return d
|
||||
async def test_download_nonexistent_version(self) -> None:
|
||||
await self.do_upload_mdmf()
|
||||
servermap = await self.mdmf_node.get_servermap(mode=MODE_WRITE)
|
||||
await self.shouldFail(UnrecoverableFileError, "nonexistent version",
|
||||
None,
|
||||
self.mdmf_node.download_version, servermap,
|
||||
"not a version")
|
||||
|
||||
|
||||
def _test_partial_read(self, node, expected, modes, step):
|
||||
d = node.get_best_readable_version()
|
||||
async def _test_partial_read(self, node, expected, modes, step) -> None:
|
||||
version = await node.get_best_readable_version()
|
||||
for (name, offset, length) in modes:
|
||||
d.addCallback(self._do_partial_read, name, expected, offset, length)
|
||||
await self._do_partial_read(version, name, expected, offset, length)
|
||||
# then read the whole thing, but only a few bytes at a time, and see
|
||||
# that the results are what we expect.
|
||||
def _read_data(version):
|
||||
c = consumer.MemoryConsumer()
|
||||
d2 = defer.succeed(None)
|
||||
for i in range(0, len(expected), step):
|
||||
d2.addCallback(lambda ignored, i=i: version.read(c, i, step))
|
||||
d2.addCallback(lambda ignored:
|
||||
self.assertThat(expected, Equals(b"".join(c.chunks))))
|
||||
return d2
|
||||
d.addCallback(_read_data)
|
||||
return d
|
||||
|
||||
def _do_partial_read(self, version, name, expected, offset, length):
|
||||
c = consumer.MemoryConsumer()
|
||||
d = version.read(c, offset, length)
|
||||
for i in range(0, len(expected), step):
|
||||
await version.read(c, i, step)
|
||||
self.assertThat(expected, Equals(b"".join(c.chunks)))
|
||||
|
||||
async def _do_partial_read(self, version, name, expected, offset, length) -> None:
|
||||
c = consumer.MemoryConsumer()
|
||||
await version.read(c, offset, length)
|
||||
if length is None:
|
||||
expected_range = expected[offset:]
|
||||
else:
|
||||
expected_range = expected[offset:offset+length]
|
||||
d.addCallback(lambda ignored: b"".join(c.chunks))
|
||||
def _check(results):
|
||||
if results != expected_range:
|
||||
print("read([%d]+%s) got %d bytes, not %d" % \
|
||||
(offset, length, len(results), len(expected_range)))
|
||||
print("got: %s ... %s" % (results[:20], results[-20:]))
|
||||
print("exp: %s ... %s" % (expected_range[:20], expected_range[-20:]))
|
||||
self.fail("results[%s] != expected_range" % name)
|
||||
return version # daisy-chained to next call
|
||||
d.addCallback(_check)
|
||||
return d
|
||||
results = b"".join(c.chunks)
|
||||
if results != expected_range:
|
||||
print("read([%d]+%s) got %d bytes, not %d" % \
|
||||
(offset, length, len(results), len(expected_range)))
|
||||
print("got: %r ... %r" % (results[:20], results[-20:]))
|
||||
print("exp: %r ... %r" % (expected_range[:20], expected_range[-20:]))
|
||||
self.fail("results[%s] != expected_range" % name)
|
||||
|
||||
def test_partial_read_mdmf_0(self):
|
||||
async def test_partial_read_mdmf_0(self) -> None:
|
||||
data = b""
|
||||
d = self.do_upload_mdmf(data=data)
|
||||
result = await self.do_upload_mdmf(data=data)
|
||||
modes = [("all1", 0,0),
|
||||
("all2", 0,None),
|
||||
]
|
||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
||||
return d
|
||||
await self._test_partial_read(result, data, modes, 1)
|
||||
|
||||
def test_partial_read_mdmf_large(self):
|
||||
async def test_partial_read_mdmf_large(self) -> None:
|
||||
segment_boundary = mathutil.next_multiple(128 * 1024, 3)
|
||||
modes = [("start_on_segment_boundary", segment_boundary, 50),
|
||||
("ending_one_byte_after_segment_boundary", segment_boundary-50, 51),
|
||||
@ -393,20 +311,18 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
||||
("complete_file1", 0, len(self.data)),
|
||||
("complete_file2", 0, None),
|
||||
]
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(self._test_partial_read, self.data, modes, 10000)
|
||||
return d
|
||||
result = await self.do_upload_mdmf()
|
||||
await self._test_partial_read(result, self.data, modes, 10000)
|
||||
|
||||
def test_partial_read_sdmf_0(self):
|
||||
async def test_partial_read_sdmf_0(self) -> None:
|
||||
data = b""
|
||||
modes = [("all1", 0,0),
|
||||
("all2", 0,None),
|
||||
]
|
||||
d = self.do_upload_sdmf(data=data)
|
||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
||||
return d
|
||||
result = await self.do_upload_sdmf(data=data)
|
||||
await self._test_partial_read(result, data, modes, 1)
|
||||
|
||||
def test_partial_read_sdmf_2(self):
|
||||
async def test_partial_read_sdmf_2(self) -> None:
|
||||
data = b"hi"
|
||||
modes = [("one_byte", 0, 1),
|
||||
("last_byte", 1, 1),
|
||||
@ -414,11 +330,10 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
||||
("complete_file", 0, 2),
|
||||
("complete_file2", 0, None),
|
||||
]
|
||||
d = self.do_upload_sdmf(data=data)
|
||||
d.addCallback(self._test_partial_read, data, modes, 1)
|
||||
return d
|
||||
result = await self.do_upload_sdmf(data=data)
|
||||
await self._test_partial_read(result, data, modes, 1)
|
||||
|
||||
def test_partial_read_sdmf_90(self):
|
||||
async def test_partial_read_sdmf_90(self) -> None:
|
||||
modes = [("start_at_middle", 50, 40),
|
||||
("start_at_middle2", 50, None),
|
||||
("zero_length_at_start", 0, 0),
|
||||
@ -427,11 +342,10 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
||||
("complete_file1", 0, None),
|
||||
("complete_file2", 0, 90),
|
||||
]
|
||||
d = self.do_upload_sdmf()
|
||||
d.addCallback(self._test_partial_read, self.small_data, modes, 10)
|
||||
return d
|
||||
result = await self.do_upload_sdmf()
|
||||
await self._test_partial_read(result, self.small_data, modes, 10)
|
||||
|
||||
def test_partial_read_sdmf_100(self):
|
||||
async def test_partial_read_sdmf_100(self) -> None:
|
||||
data = b"test data "*10
|
||||
modes = [("start_at_middle", 50, 50),
|
||||
("start_at_middle2", 50, None),
|
||||
@ -440,42 +354,30 @@ class Version(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin, \
|
||||
("complete_file1", 0, 100),
|
||||
("complete_file2", 0, None),
|
||||
]
|
||||
d = self.do_upload_sdmf(data=data)
|
||||
d.addCallback(self._test_partial_read, data, modes, 10)
|
||||
return d
|
||||
result = await self.do_upload_sdmf(data=data)
|
||||
await self._test_partial_read(result, data, modes, 10)
|
||||
|
||||
async def _test_read_and_download(self, node, expected) -> None:
|
||||
version = await node.get_best_readable_version()
|
||||
c = consumer.MemoryConsumer()
|
||||
await version.read(c)
|
||||
self.assertThat(expected, Equals(b"".join(c.chunks)))
|
||||
|
||||
def _test_read_and_download(self, node, expected):
|
||||
d = node.get_best_readable_version()
|
||||
def _read_data(version):
|
||||
c = consumer.MemoryConsumer()
|
||||
c2 = consumer.MemoryConsumer()
|
||||
d2 = defer.succeed(None)
|
||||
d2.addCallback(lambda ignored: version.read(c))
|
||||
d2.addCallback(lambda ignored:
|
||||
self.assertThat(expected, Equals(b"".join(c.chunks))))
|
||||
c2 = consumer.MemoryConsumer()
|
||||
await version.read(c2, offset=0, size=len(expected))
|
||||
self.assertThat(expected, Equals(b"".join(c2.chunks)))
|
||||
|
||||
d2.addCallback(lambda ignored: version.read(c2, offset=0,
|
||||
size=len(expected)))
|
||||
d2.addCallback(lambda ignored:
|
||||
self.assertThat(expected, Equals(b"".join(c2.chunks))))
|
||||
return d2
|
||||
d.addCallback(_read_data)
|
||||
d.addCallback(lambda ignored: node.download_best_version())
|
||||
d.addCallback(lambda data: self.assertThat(expected, Equals(data)))
|
||||
return d
|
||||
data = await node.download_best_version()
|
||||
self.assertThat(expected, Equals(data))
|
||||
|
||||
def test_read_and_download_mdmf(self):
|
||||
d = self.do_upload_mdmf()
|
||||
d.addCallback(self._test_read_and_download, self.data)
|
||||
return d
|
||||
async def test_read_and_download_mdmf(self) -> None:
|
||||
result = await self.do_upload_mdmf()
|
||||
await self._test_read_and_download(result, self.data)
|
||||
|
||||
def test_read_and_download_sdmf(self):
|
||||
d = self.do_upload_sdmf()
|
||||
d.addCallback(self._test_read_and_download, self.small_data)
|
||||
return d
|
||||
async def test_read_and_download_sdmf(self) -> None:
|
||||
result = await self.do_upload_sdmf()
|
||||
await self._test_read_and_download(result, self.small_data)
|
||||
|
||||
def test_read_and_download_sdmf_zero_length(self):
|
||||
d = self.do_upload_empty_sdmf()
|
||||
d.addCallback(self._test_read_and_download, b"")
|
||||
return d
|
||||
async def test_read_and_download_sdmf_zero_length(self) -> None:
|
||||
result = await self.do_upload_empty_sdmf()
|
||||
await self._test_read_and_download(result, b"")
|
||||
|
@ -37,6 +37,7 @@ from twisted.web import http
|
||||
from twisted.web.http_headers import Headers
|
||||
from werkzeug import routing
|
||||
from werkzeug.exceptions import NotFound as WNotFound
|
||||
from testtools.matchers import Equals
|
||||
|
||||
from .common import SyncTestCase
|
||||
from ..storage.http_common import get_content_type, CBOR_MIME_TYPE
|
||||
@ -555,6 +556,20 @@ class GenericHTTPAPITests(SyncTestCase):
|
||||
super(GenericHTTPAPITests, self).setUp()
|
||||
self.http = self.useFixture(HttpTestFixture())
|
||||
|
||||
def test_missing_authentication(self) -> None:
|
||||
"""
|
||||
If nothing is given in the ``Authorization`` header at all an
|
||||
``Unauthorized`` response is returned.
|
||||
"""
|
||||
client = StubTreq(self.http.http_server.get_resource())
|
||||
response = self.http.result_of_with_flush(
|
||||
client.request(
|
||||
"GET",
|
||||
"http://127.0.0.1/storage/v1/version",
|
||||
),
|
||||
)
|
||||
self.assertThat(response.code, Equals(http.UNAUTHORIZED))
|
||||
|
||||
def test_bad_authentication(self):
|
||||
"""
|
||||
If the wrong swissnum is used, an ``Unauthorized`` response code is
|
||||
|
Loading…
Reference in New Issue
Block a user