Merge remote-tracking branch 'origin/master' into 3935-happy-eyeballs

This commit is contained in:
Itamar Turner-Trauring 2023-02-21 09:59:00 -05:00
commit aacf3223c2
29 changed files with 422 additions and 139 deletions

78
.circleci/circleci.txt Normal file
View File

@ -0,0 +1,78 @@
# A master build looks like this:
# BASH_ENV=/tmp/.bash_env-63d018969ca480003a031e62-0-build
# CI=true
# CIRCLECI=true
# CIRCLE_BRANCH=master
# CIRCLE_BUILD_NUM=76545
# CIRCLE_BUILD_URL=https://circleci.com/gh/tahoe-lafs/tahoe-lafs/76545
# CIRCLE_JOB=NixOS 21.11
# CIRCLE_NODE_INDEX=0
# CIRCLE_NODE_TOTAL=1
# CIRCLE_PROJECT_REPONAME=tahoe-lafs
# CIRCLE_PROJECT_USERNAME=tahoe-lafs
# CIRCLE_REPOSITORY_URL=git@github.com:tahoe-lafs/tahoe-lafs.git
# CIRCLE_SHA1=ed0bda2d7456f4a2cd60870072e1fe79864a49a1
# CIRCLE_SHELL_ENV=/tmp/.bash_env-63d018969ca480003a031e62-0-build
# CIRCLE_USERNAME=alice
# CIRCLE_WORKFLOW_ID=6d9bb71c-be3a-4659-bf27-60954180619b
# CIRCLE_WORKFLOW_JOB_ID=0793c975-7b9f-489f-909b-8349b72d2785
# CIRCLE_WORKFLOW_WORKSPACE_ID=6d9bb71c-be3a-4659-bf27-60954180619b
# CIRCLE_WORKING_DIRECTORY=~/project
# A build of an in-repo PR looks like this:
# BASH_ENV=/tmp/.bash_env-63d1971a0298086d8841287e-0-build
# CI=true
# CIRCLECI=true
# CIRCLE_BRANCH=3946-less-chatty-downloads
# CIRCLE_BUILD_NUM=76612
# CIRCLE_BUILD_URL=https://circleci.com/gh/tahoe-lafs/tahoe-lafs/76612
# CIRCLE_JOB=NixOS 21.11
# CIRCLE_NODE_INDEX=0
# CIRCLE_NODE_TOTAL=1
# CIRCLE_PROJECT_REPONAME=tahoe-lafs
# CIRCLE_PROJECT_USERNAME=tahoe-lafs
# CIRCLE_PULL_REQUEST=https://github.com/tahoe-lafs/tahoe-lafs/pull/1251
# CIRCLE_PULL_REQUESTS=https://github.com/tahoe-lafs/tahoe-lafs/pull/1251
# CIRCLE_REPOSITORY_URL=git@github.com:tahoe-lafs/tahoe-lafs.git
# CIRCLE_SHA1=921a2083dcefdb5f431cdac195fc9ac510605349
# CIRCLE_SHELL_ENV=/tmp/.bash_env-63d1971a0298086d8841287e-0-build
# CIRCLE_USERNAME=bob
# CIRCLE_WORKFLOW_ID=5e32c12e-be37-4868-9fa8-6a6929fec2f1
# CIRCLE_WORKFLOW_JOB_ID=316ca408-81b4-4c96-bbdd-644e4c3e01e5
# CIRCLE_WORKFLOW_WORKSPACE_ID=5e32c12e-be37-4868-9fa8-6a6929fec2f1
# CIRCLE_WORKING_DIRECTORY=~/project
# CI_PULL_REQUEST=https://github.com/tahoe-lafs/tahoe-lafs/pull/1251
# A build of a PR from a fork looks like this:
# BASH_ENV=/tmp/.bash_env-63d40f7b2e89cd3de10e0db9-0-build
# CI=true
# CIRCLECI=true
# CIRCLE_BRANCH=pull/1252
# CIRCLE_BUILD_NUM=76678
# CIRCLE_BUILD_URL=https://circleci.com/gh/tahoe-lafs/tahoe-lafs/76678
# CIRCLE_JOB=NixOS 21.05
# CIRCLE_NODE_INDEX=0
# CIRCLE_NODE_TOTAL=1
# CIRCLE_PROJECT_REPONAME=tahoe-lafs
# CIRCLE_PROJECT_USERNAME=tahoe-lafs
# CIRCLE_PR_NUMBER=1252
# CIRCLE_PR_REPONAME=tahoe-lafs
# CIRCLE_PR_USERNAME=carol
# CIRCLE_PULL_REQUEST=https://github.com/tahoe-lafs/tahoe-lafs/pull/1252
# CIRCLE_PULL_REQUESTS=https://github.com/tahoe-lafs/tahoe-lafs/pull/1252
# CIRCLE_REPOSITORY_URL=git@github.com:tahoe-lafs/tahoe-lafs.git
# CIRCLE_SHA1=15c7916e0812e6baa2a931cd54b18f3382a8456e
# CIRCLE_SHELL_ENV=/tmp/.bash_env-63d40f7b2e89cd3de10e0db9-0-build
# CIRCLE_USERNAME=
# CIRCLE_WORKFLOW_ID=19c917c8-3a38-4b20-ac10-3265259fa03e
# CIRCLE_WORKFLOW_JOB_ID=58e95215-eccf-4664-a231-1dba7fd2d323
# CIRCLE_WORKFLOW_WORKSPACE_ID=19c917c8-3a38-4b20-ac10-3265259fa03e
# CIRCLE_WORKING_DIRECTORY=~/project
# CI_PULL_REQUEST=https://github.com/tahoe-lafs/tahoe-lafs/pull/1252
# A build of a PR from a fork where the owner has enabled CircleCI looks
# the same as a build of an in-repo PR, except it runs on th owner's
# CircleCI namespace.

View File

@ -1,21 +1,20 @@
# Run a command, enabling cache writes to cachix if possible. The command is
# accepted as a variable number of positional arguments (like argv).
function cache_if_able() {
# The `cachix watch-exec ...` does our cache population. When it sees
# something added to the store (I guess) it pushes it to the named cache.
#
# We can only *push* to it if we have a CACHIX_AUTH_TOKEN, though.
# in-repo jobs will get this from CircleCI configuration but jobs from
# forks may not.
echo "Building PR from user/org: ${CIRCLE_PROJECT_USERNAME}"
if [ -v CACHIX_AUTH_TOKEN ]; then
# Dump some info about our build environment.
describe_build
if is_cache_writeable; then
# If the cache is available we'll use it. This lets fork owners set
# up their own caching if they want.
echo "Cachix credentials present; will attempt to write to cache."
# The `cachix watch-exec ...` does our cache population. When it sees
# something added to the store (I guess) it pushes it to the named
# cache.
cachix watch-exec "${CACHIX_NAME}" -- "$@"
else
# If we're building a from a forked repository then we're allowed to
# not have the credentials (but it's also fine if the owner of the
# fork supplied their own).
if [ "${CIRCLE_PROJECT_USERNAME}" == "tahoe-lafs" ]; then
if is_cache_required; then
echo "Required credentials (CACHIX_AUTH_TOKEN) are missing."
return 1
else
@ -24,3 +23,97 @@ function cache_if_able() {
fi
fi
}
function is_cache_writeable() {
# We can only *push* to the cache if we have a CACHIX_AUTH_TOKEN. in-repo
# jobs will get this from CircleCI configuration but jobs from forks may
# not.
[ -v CACHIX_AUTH_TOKEN ]
}
function is_cache_required() {
# If we're building in tahoe-lafs/tahoe-lafs then we must use the cache.
# If we're building anything from a fork then we're allowed to not have
# the credentials.
is_upstream
}
# Return success if the origin of this build is the tahoe-lafs/tahoe-lafs
# repository itself (and so we expect to have cache credentials available),
# failure otherwise.
#
# See circleci.txt for notes about how this determination is made.
function is_upstream() {
# CIRCLE_PROJECT_USERNAME is set to the org the build is happening for.
# If a PR targets a fork of the repo then this is set to something other
# than "tahoe-lafs".
[ "$CIRCLE_PROJECT_USERNAME" == "tahoe-lafs" ] &&
# CIRCLE_BRANCH is set to the real branch name for in-repo PRs and
# "pull/NNNN" for pull requests from forks.
#
# CIRCLE_PULL_REQUESTS is set to a comma-separated list of the full
# URLs of the PR pages which share an underlying branch, with one of
# them ended with that same "pull/NNNN" for PRs from forks.
! any_element_endswith "/$CIRCLE_BRANCH" "," "$CIRCLE_PULL_REQUESTS"
}
# Return success if splitting $3 on $2 results in an array with any element
# that ends with $1, failure otherwise.
function any_element_endswith() {
suffix=$1
shift
sep=$1
shift
haystack=$1
shift
IFS="${sep}" read -r -a elements <<< "$haystack"
for elem in "${elements[@]}"; do
if endswith "$suffix" "$elem"; then
return 0
fi
done
return 1
}
# Return success if $2 ends with $1, failure otherwise.
function endswith() {
suffix=$1
shift
haystack=$1
shift
case "$haystack" in
*${suffix})
return 0
;;
*)
return 1
;;
esac
}
function describe_build() {
echo "Building PR for user/org: ${CIRCLE_PROJECT_USERNAME}"
echo "Building branch: ${CIRCLE_BRANCH}"
if is_upstream; then
echo "Upstream build."
else
echo "Non-upstream build."
fi
if is_cache_required; then
echo "Cache is required."
else
echo "Cache not required."
fi
if is_cache_writeable; then
echo "Cache is writeable."
else
echo "Cache not writeable."
fi
}

View File

@ -1,10 +0,0 @@
FROM python:2.7
ADD . /tahoe-lafs
RUN \
cd /tahoe-lafs && \
git pull --depth=100 && \
pip install . && \
rm -rf ~/.cache/
WORKDIR /root

View File

@ -1,25 +0,0 @@
FROM debian:9
LABEL maintainer "gordon@leastauthority.com"
RUN apt-get update
RUN DEBIAN_FRONTEND=noninteractive apt-get -yq upgrade
RUN DEBIAN_FRONTEND=noninteractive apt-get -yq install build-essential python-dev libffi-dev libssl-dev python-virtualenv git
RUN \
git clone https://github.com/tahoe-lafs/tahoe-lafs.git /root/tahoe-lafs; \
cd /root/tahoe-lafs; \
virtualenv --python=python2.7 venv; \
./venv/bin/pip install --upgrade setuptools; \
./venv/bin/pip install --editable .; \
./venv/bin/tahoe --version;
RUN \
cd /root; \
mkdir /root/.tahoe-client; \
mkdir /root/.tahoe-introducer; \
mkdir /root/.tahoe-server;
RUN /root/tahoe-lafs/venv/bin/tahoe create-introducer --location=tcp:introducer:3458 --port=tcp:3458 /root/.tahoe-introducer
RUN /root/tahoe-lafs/venv/bin/tahoe start /root/.tahoe-introducer
RUN /root/tahoe-lafs/venv/bin/tahoe create-node --location=tcp:server:3457 --port=tcp:3457 --introducer=$(cat /root/.tahoe-introducer/private/introducer.furl) /root/.tahoe-server
RUN /root/tahoe-lafs/venv/bin/tahoe create-client --webport=3456 --introducer=$(cat /root/.tahoe-introducer/private/introducer.furl) --basedir=/root/.tahoe-client --shares-needed=1 --shares-happy=1 --shares-total=1
VOLUME ["/root/.tahoe-client", "/root/.tahoe-server", "/root/.tahoe-introducer"]
EXPOSE 3456 3457 3458
ENTRYPOINT ["/root/tahoe-lafs/venv/bin/tahoe"]
CMD []

View File

@ -0,0 +1,106 @@
"""
First attempt at benchmarking uploads and downloads.
To run:
$ pytest benchmarks/upload_download.py -s -v -Wignore
TODO Parameterization (pytest?)
- Foolscap vs not foolscap
- Number of nodes
- Data size
- Number of needed/happy/total shares.
CAVEATS: The goal here isn't a realistic benchmark, or a benchmark that will be
measured over time, or is expected to be maintainable over time. This is just
a quick and easy way to measure the speed of certain operations, compare HTTP
and Foolscap, and see the short-term impact of changes.
Eventually this will be replaced by a real benchmark suite that can be run over
time to measure something more meaningful.
"""
from time import time, process_time
from contextlib import contextmanager
from tempfile import mkdtemp
import os
from twisted.trial.unittest import TestCase
from allmydata.util.deferredutil import async_to_deferred
from allmydata.util.consumer import MemoryConsumer
from allmydata.test.common_system import SystemTestMixin
from allmydata.immutable.upload import Data as UData
from allmydata.mutable.publish import MutableData
@contextmanager
def timeit(name):
start = time()
start_cpu = process_time()
try:
yield
finally:
print(
f"{name}: {time() - start:.3f} elapsed, {process_time() - start_cpu:.3f} CPU"
)
class ImmutableBenchmarks(SystemTestMixin, TestCase):
"""Benchmarks for immutables."""
# To use Foolscap, change to True:
FORCE_FOOLSCAP_FOR_STORAGE = False
@async_to_deferred
async def setUp(self):
SystemTestMixin.setUp(self)
self.basedir = os.path.join(mkdtemp(), "nodes")
# 2 nodes
await self.set_up_nodes(2)
# 1 share
for c in self.clients:
c.encoding_params["k"] = 1
c.encoding_params["happy"] = 1
c.encoding_params["n"] = 1
print()
@async_to_deferred
async def test_upload_and_download_immutable(self):
# To test larger files, change this:
DATA = b"Some data to upload\n" * 10
for i in range(5):
# 1. Upload:
with timeit(" upload"):
uploader = self.clients[0].getServiceNamed("uploader")
results = await uploader.upload(UData(DATA, convergence=None))
# 2. Download:
with timeit("download"):
uri = results.get_uri()
node = self.clients[1].create_node_from_uri(uri)
mc = await node.read(MemoryConsumer(), 0, None)
self.assertEqual(b"".join(mc.chunks), DATA)
@async_to_deferred
async def test_upload_and_download_mutable(self):
# To test larger files, change this:
DATA = b"Some data to upload\n" * 10
for i in range(5):
# 1. Upload:
with timeit(" upload"):
result = await self.clients[0].create_mutable_file(MutableData(DATA))
# 2. Download:
with timeit("download"):
data = await result.download_best_version()
self.assertEqual(data, DATA)

View File

@ -1,49 +0,0 @@
version: '2'
services:
client:
build:
context: .
dockerfile: ./Dockerfile.dev
volumes:
- ./misc:/root/tahoe-lafs/misc
- ./integration:/root/tahoe-lafs/integration
- ./src:/root/tahoe-lafs/static
- ./setup.cfg:/root/tahoe-lafs/setup.cfg
- ./setup.py:/root/tahoe-lafs/setup.py
ports:
- "127.0.0.1:3456:3456"
depends_on:
- "introducer"
- "server"
entrypoint: /root/tahoe-lafs/venv/bin/tahoe
command: ["run", "/root/.tahoe-client"]
server:
build:
context: .
dockerfile: ./Dockerfile.dev
volumes:
- ./misc:/root/tahoe-lafs/misc
- ./integration:/root/tahoe-lafs/integration
- ./src:/root/tahoe-lafs/static
- ./setup.cfg:/root/tahoe-lafs/setup.cfg
- ./setup.py:/root/tahoe-lafs/setup.py
ports:
- "127.0.0.1:3457:3457"
depends_on:
- "introducer"
entrypoint: /root/tahoe-lafs/venv/bin/tahoe
command: ["run", "/root/.tahoe-server"]
introducer:
build:
context: .
dockerfile: ./Dockerfile.dev
volumes:
- ./misc:/root/tahoe-lafs/misc
- ./integration:/root/tahoe-lafs/integration
- ./src:/root/tahoe-lafs/static
- ./setup.cfg:/root/tahoe-lafs/setup.cfg
- ./setup.py:/root/tahoe-lafs/setup.py
ports:
- "127.0.0.1:3458:3458"
entrypoint: /root/tahoe-lafs/venv/bin/tahoe
command: ["run", "/root/.tahoe-introducer"]

View File

@ -3,11 +3,14 @@ Integration tests for getting and putting files, including reading from stdin
and stdout.
"""
from subprocess import Popen, PIPE
from subprocess import Popen, PIPE, check_output
import sys
import pytest
from pytest_twisted import ensureDeferred
from twisted.internet import reactor
from .util import run_in_thread, cli
from .util import run_in_thread, cli, reconfigure
DATA = b"abc123 this is not utf-8 decodable \xff\x00\x33 \x11"
try:
@ -62,3 +65,51 @@ def test_get_to_stdout(alice, get_put_alias, tmpdir):
)
assert p.stdout.read() == DATA
assert p.wait() == 0
@pytest.mark.skipif(
sys.platform.startswith("win"),
reason="reconfigure() has issues on Windows"
)
@ensureDeferred
async def test_upload_download_immutable_different_default_max_segment_size(alice, get_put_alias, tmpdir, request):
"""
Tahoe-LAFS used to have a default max segment size of 128KB, and is now
1MB. Test that an upload created when 128KB was the default can be
downloaded with 1MB as the default (i.e. old uploader, new downloader), and
vice versa, (new uploader, old downloader).
"""
tempfile = tmpdir.join("file")
large_data = DATA * 100_000
assert len(large_data) > 2 * 1024 * 1024
with tempfile.open("wb") as f:
f.write(large_data)
async def set_segment_size(segment_size):
await reconfigure(
reactor,
request,
alice,
(1, 1, 1),
None,
max_segment_size=segment_size
)
# 1. Upload file 1 with default segment size set to 1MB
await set_segment_size(1024 * 1024)
cli(alice, "put", str(tempfile), "getput:seg1024kb")
# 2. Download file 1 with default segment size set to 128KB
await set_segment_size(128 * 1024)
assert large_data == check_output(
["tahoe", "--node-directory", alice.node_dir, "get", "getput:seg1024kb", "-"]
)
# 3. Upload file 2 with default segment size set to 128KB
cli(alice, "put", str(tempfile), "getput:seg128kb")
# 4. Download file 2 with default segment size set to 1MB
await set_segment_size(1024 * 1024)
assert large_data == check_output(
["tahoe", "--node-directory", alice.node_dir, "get", "getput:seg128kb", "-"]
)

View File

@ -36,7 +36,8 @@ async def test_capability(reactor, request, alice, case, expected):
computed value.
"""
# rewrite alice's config to match params and convergence
await reconfigure(reactor, request, alice, (1, case.params.required, case.params.total), case.convergence)
await reconfigure(
reactor, request, alice, (1, case.params.required, case.params.total), case.convergence, case.segment_size)
# upload data in the correct format
actual = upload(alice, case.fmt, case.data)
@ -110,7 +111,8 @@ async def generate(
request,
alice,
(happy, case.params.required, case.params.total),
case.convergence
case.convergence,
case.segment_size
)
# Give the format a chance to make an RSA key if it needs it.

View File

@ -46,6 +46,7 @@ from allmydata.util.configutil import (
write_config,
)
from allmydata import client
from allmydata.interfaces import DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE
import pytest_twisted
@ -729,11 +730,16 @@ def upload(alice: TahoeProcess, fmt: CHK | SSK, data: bytes) -> str:
return cli(*argv).decode("utf-8").strip()
async def reconfigure(reactor, request, node: TahoeProcess, params: tuple[int, int, int], convergence: None | bytes) -> None:
async def reconfigure(reactor, request, node: TahoeProcess,
params: tuple[int, int, int],
convergence: None | bytes,
max_segment_size: None | int = None) -> None:
"""
Reconfigure a Tahoe-LAFS node with different ZFEC parameters and
convergence secret.
TODO This appears to have issues on Windows.
If the current configuration is different from the specified
configuration, the node will be restarted so it takes effect.
@ -769,7 +775,22 @@ async def reconfigure(reactor, request, node: TahoeProcess, params: tuple[int, i
changed = True
config.write_private_config("convergence", base32.b2a(convergence))
if max_segment_size is not None:
cur_segment_size = int(config.get_config("client", "shares._max_immutable_segment_size_for_testing", DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE))
if cur_segment_size != max_segment_size:
changed = True
config.set_config(
"client",
"shares._max_immutable_segment_size_for_testing",
str(max_segment_size)
)
if changed:
# TODO reconfigure() seems to have issues on Windows. If you need to
# use it there, delete this assert and try to figure out what's going
# on...
assert not sys.platform.startswith("win")
# restart the node
print(f"Restarting {node.node_dir} for ZFEC reconfiguration")
await node.restart_async(reactor, request)

View File

@ -5,7 +5,7 @@ from __future__ import print_function
import sys, math
from allmydata import uri, storage
from allmydata.immutable import upload
from allmydata.interfaces import DEFAULT_MAX_SEGMENT_SIZE
from allmydata.interfaces import DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE
from allmydata.util import mathutil
def roundup(size, blocksize=4096):
@ -26,7 +26,7 @@ class BigFakeString(object):
def tell(self):
return self.fp
def calc(filesize, params=(3,7,10), segsize=DEFAULT_MAX_SEGMENT_SIZE):
def calc(filesize, params=(3,7,10), segsize=DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE):
num_shares = params[2]
if filesize <= upload.Uploader.URI_LIT_SIZE_THRESHOLD:
urisize = len(uri.LiteralFileURI("A"*filesize).to_string())

View File

@ -1,3 +1,10 @@
[mypy]
ignore_missing_imports = True
plugins=mypy_zope:plugin
show_column_numbers = True
pretty = True
show_error_codes = True
warn_unused_configs =True
no_implicit_optional = True
warn_redundant_casts = True
strict_equality = True

View File

@ -0,0 +1 @@
Downloads of large immutables should now finish much faster.

0
newsfragments/3952.minor Normal file
View File

View File

@ -1 +1 @@
Fix incompatibility with newer versions of the transitive charset_normalizer dependency when using PyInstaller.
Fix incompatibility with transitive dependency charset_normalizer >= 3 when using PyInstaller.

0
newsfragments/3969.minor Normal file
View File

1
newsfragments/3971.minor Normal file
View File

@ -0,0 +1 @@
Changes made to mypy.ini to make mypy more 'strict' and prevent future regressions.

0
newsfragments/3974.minor Normal file
View File

1
newsfragments/3975.minor Normal file
View File

@ -0,0 +1 @@
Fixes truthy conditional in status.py

1
newsfragments/3976.minor Normal file
View File

@ -0,0 +1 @@
Fixes variable name same as built-in type.

View File

@ -36,6 +36,7 @@ hidden_imports = [
'allmydata.stats',
'base64',
'cffi',
'charset_normalizer.md__mypyc',
'collections',
'commands',
'Crypto',

View File

@ -147,13 +147,6 @@ install_requires = [
# for pid-file support
"psutil",
"filelock",
# treq needs requests, requests needs charset_normalizer,
# charset_normalizer breaks PyInstaller
# (https://github.com/Ousret/charset_normalizer/issues/253). So work around
# this by using a lower version number. Once upstream issue is fixed, or
# requests drops charset_normalizer, this can go away.
"charset_normalizer < 3",
]
setup_requires = [

View File

@ -50,7 +50,7 @@ from allmydata.interfaces import (
IStatsProducer,
SDMF_VERSION,
MDMF_VERSION,
DEFAULT_MAX_SEGMENT_SIZE,
DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE,
IFoolscapStoragePlugin,
IAnnounceableStorageServer,
)
@ -88,6 +88,7 @@ _client_config = configutil.ValidConfiguration(
"shares.happy",
"shares.needed",
"shares.total",
"shares._max_immutable_segment_size_for_testing",
"storage.plugins",
),
"storage": (
@ -606,7 +607,7 @@ class _Client(node.Node, pollmixin.PollMixin):
DEFAULT_ENCODING_PARAMETERS = {"k": 3,
"happy": 7,
"n": 10,
"max_segment_size": DEFAULT_MAX_SEGMENT_SIZE,
"max_segment_size": DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE,
}
def __init__(self, config, main_tub, i2p_provider, tor_provider, introducer_clients,
@ -896,6 +897,13 @@ class _Client(node.Node, pollmixin.PollMixin):
DEP["k"] = int(self.config.get_config("client", "shares.needed", DEP["k"]))
DEP["n"] = int(self.config.get_config("client", "shares.total", DEP["n"]))
DEP["happy"] = int(self.config.get_config("client", "shares.happy", DEP["happy"]))
# At the moment this is only used for testing, thus the janky config
# attribute name.
DEP["max_segment_size"] = int(self.config.get_config(
"client",
"shares._max_immutable_segment_size_for_testing",
DEP["max_segment_size"])
)
# for the CLI to authenticate to local JSON endpoints
self._create_auth_token()

View File

@ -19,7 +19,7 @@ from foolscap.api import eventually
from allmydata import uri
from allmydata.codec import CRSDecoder
from allmydata.util import base32, log, hashutil, mathutil, observer
from allmydata.interfaces import DEFAULT_MAX_SEGMENT_SIZE
from allmydata.interfaces import DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE
from allmydata.hashtree import IncompleteHashTree, BadHashError, \
NotEnoughHashesError
@ -49,6 +49,8 @@ class DownloadNode(object):
"""Internal class which manages downloads and holds state. External
callers use CiphertextFileNode instead."""
default_max_segment_size = DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE
# Share._node points to me
def __init__(self, verifycap, storage_broker, secret_holder,
terminator, history, download_status):
@ -76,7 +78,7 @@ class DownloadNode(object):
# .guessed_segment_size, .guessed_num_segments, and
# .ciphertext_hash_tree (with a dummy, to let us guess which hashes
# we'll need)
self._build_guessed_tables(DEFAULT_MAX_SEGMENT_SIZE)
self._build_guessed_tables(self.default_max_segment_size)
# filled in when we parse a valid UEB
self.have_UEB = False

View File

@ -48,7 +48,7 @@ from allmydata.util.rrefutil import add_version_to_remote_reference
from allmydata.interfaces import IUploadable, IUploader, IUploadResults, \
IEncryptedUploadable, RIEncryptedUploadable, IUploadStatus, \
NoServersError, InsufficientVersionError, UploadUnhappinessError, \
DEFAULT_MAX_SEGMENT_SIZE, IPeerSelector
DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE, IPeerSelector
from allmydata.immutable import layout
from io import BytesIO
@ -1692,7 +1692,7 @@ class AssistedUploader(object):
class BaseUploadable(object):
# this is overridden by max_segment_size
default_max_segment_size = DEFAULT_MAX_SEGMENT_SIZE
default_max_segment_size = DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE
default_params_set = False
max_segment_size = None

View File

@ -41,7 +41,8 @@ URI = StringConstraint(300) # kind of arbitrary
MAX_BUCKETS = 256 # per peer -- zfec offers at most 256 shares per file
DEFAULT_MAX_SEGMENT_SIZE = 128*1024
# The default size for segments of new CHK ("immutable") uploads.
DEFAULT_IMMUTABLE_MAX_SEGMENT_SIZE = 1024*1024
ShareData = StringConstraint(None)
URIExtensionData = StringConstraint(1000)

View File

@ -36,7 +36,7 @@ from allmydata.mutable.layout import get_version_from_checkstring,\
SDMFSlotWriteProxy
KiB = 1024
DEFAULT_MAX_SEGMENT_SIZE = 128 * KiB
DEFAULT_MUTABLE_MAX_SEGMENT_SIZE = 128 * KiB
PUSHING_BLOCKS_STATE = 0
PUSHING_EVERYTHING_ELSE_STATE = 1
DONE_STATE = 2
@ -367,7 +367,7 @@ class Publish(object):
self.data = newdata
self.datalength = newdata.get_size()
#if self.datalength >= DEFAULT_MAX_SEGMENT_SIZE:
#if self.datalength >= DEFAULT_MUTABLE_MAX_SEGMENT_SIZE:
# self._version = MDMF_VERSION
#else:
# self._version = SDMF_VERSION
@ -551,7 +551,7 @@ class Publish(object):
def setup_encoding_parameters(self, offset=0):
if self._version == MDMF_VERSION:
segment_size = DEFAULT_MAX_SEGMENT_SIZE # 128 KiB by default
segment_size = DEFAULT_MUTABLE_MAX_SEGMENT_SIZE # 128 KiB by default
else:
segment_size = self.datalength # SDMF is only one segment
# this must be a multiple of self.required_shares

View File

@ -20,7 +20,7 @@ from testtools.matchers import (
from twisted.internet import defer
from allmydata.interfaces import MDMF_VERSION
from allmydata.mutable.filenode import MutableFileNode
from allmydata.mutable.publish import MutableData, DEFAULT_MAX_SEGMENT_SIZE
from allmydata.mutable.publish import MutableData, DEFAULT_MUTABLE_MAX_SEGMENT_SIZE
from ..no_network import GridTestMixin
from .. import common_util as testutil
@ -180,7 +180,7 @@ class Update(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin):
# long -- this is 7 segments in the default segment size. So we
# need to add 2 segments worth of data to push it over a
# power-of-two boundary.
segment = b"a" * DEFAULT_MAX_SEGMENT_SIZE
segment = b"a" * DEFAULT_MUTABLE_MAX_SEGMENT_SIZE
new_data = self.data + (segment * 2)
d0 = self.do_upload_mdmf()
def _run(ign):
@ -232,9 +232,9 @@ class Update(GridTestMixin, AsyncTestCase, testutil.ShouldFailMixin):
return d0
def test_multiple_segment_replace(self):
replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
replace_offset = 2 * DEFAULT_MUTABLE_MAX_SEGMENT_SIZE
new_data = self.data[:replace_offset]
new_segment = b"a" * DEFAULT_MAX_SEGMENT_SIZE
new_segment = b"a" * DEFAULT_MUTABLE_MAX_SEGMENT_SIZE
new_data += 2 * new_segment
new_data += b"replaced"
rest_offset = len(new_data)

View File

@ -707,12 +707,12 @@ def url_for_string(req, url_string):
T = TypeVar("T")
@overload
def get_arg(req: IRequest, argname: str | bytes, default: T = None, *, multiple: Literal[False] = False) -> T | bytes: ...
def get_arg(req: IRequest, argname: str | bytes, default: Optional[T] = None, *, multiple: Literal[False] = False) -> T | bytes: ...
@overload
def get_arg(req: IRequest, argname: str | bytes, default: T = None, *, multiple: Literal[True]) -> T | tuple[bytes, ...]: ...
def get_arg(req: IRequest, argname: str | bytes, default: Optional[T] = None, *, multiple: Literal[True]) -> T | tuple[bytes, ...]: ...
def get_arg(req: IRequest, argname: str | bytes, default: T = None, *, multiple: bool = False) -> None | T | bytes | tuple[bytes, ...]:
def get_arg(req: IRequest, argname: str | bytes, default: Optional[T] = None, *, multiple: bool = False) -> None | T | bytes | tuple[bytes, ...]:
"""Extract an argument from either the query args (req.args) or the form
body fields (req.fields). If multiple=False, this returns a single value
(or the default, which defaults to None), and the query args take

View File

@ -550,7 +550,7 @@ class DownloadStatusElement(Element):
length = r_ev["length"]
bytes_returned = r_ev["bytes_returned"]
decrypt_time = ""
if bytes:
if bytes_returned:
decrypt_time = self._rate_and_time(bytes_returned, r_ev["decrypt_time"])
speed, rtt = "",""
if r_ev["finish_time"] is not None:
@ -1616,30 +1616,30 @@ class StatisticsElement(Element):
@renderer
def uploads(self, req, tag):
files = self._stats["counters"].get("uploader.files_uploaded", 0)
bytes = self._stats["counters"].get("uploader.bytes_uploaded", 0)
bytes_uploaded = self._stats["counters"].get("uploader.bytes_uploaded", 0)
return tag(("%s files / %s bytes (%s)" %
(files, bytes, abbreviate_size(bytes))))
(files, bytes_uploaded, abbreviate_size(bytes_uploaded))))
@renderer
def downloads(self, req, tag):
files = self._stats["counters"].get("downloader.files_downloaded", 0)
bytes = self._stats["counters"].get("downloader.bytes_downloaded", 0)
bytes_uploaded = self._stats["counters"].get("downloader.bytes_downloaded", 0)
return tag("%s files / %s bytes (%s)" %
(files, bytes, abbreviate_size(bytes)))
(files, bytes_uploaded, abbreviate_size(bytes_uploaded)))
@renderer
def publishes(self, req, tag):
files = self._stats["counters"].get("mutable.files_published", 0)
bytes = self._stats["counters"].get("mutable.bytes_published", 0)
return tag("%s files / %s bytes (%s)" % (files, bytes,
abbreviate_size(bytes)))
bytes_uploaded = self._stats["counters"].get("mutable.bytes_published", 0)
return tag("%s files / %s bytes (%s)" % (files, bytes_uploaded,
abbreviate_size(bytes_uploaded)))
@renderer
def retrieves(self, req, tag):
files = self._stats["counters"].get("mutable.files_retrieved", 0)
bytes = self._stats["counters"].get("mutable.bytes_retrieved", 0)
return tag("%s files / %s bytes (%s)" % (files, bytes,
abbreviate_size(bytes)))
bytes_uploaded = self._stats["counters"].get("mutable.bytes_retrieved", 0)
return tag("%s files / %s bytes (%s)" % (files, bytes_uploaded,
abbreviate_size(bytes_uploaded)))
@renderer
def raw(self, req, tag):