2021-11-12 12:51:52 -05:00
|
|
|
"""
|
|
|
|
HTTP client that talks to the HTTP storage server.
|
|
|
|
"""
|
|
|
|
|
2022-04-14 11:45:47 -04:00
|
|
|
from __future__ import annotations
|
|
|
|
|
2022-07-06 09:47:08 -04:00
|
|
|
from typing import Union, Optional, Sequence, Mapping, BinaryIO
|
2021-12-16 11:17:11 -05:00
|
|
|
from base64 import b64encode
|
2022-06-30 13:48:33 -04:00
|
|
|
from io import BytesIO
|
2022-07-19 16:10:22 -04:00
|
|
|
from os import SEEK_END
|
2021-11-16 10:56:21 -05:00
|
|
|
|
2022-06-03 13:46:23 -04:00
|
|
|
from attrs import define, asdict, frozen, field
|
2022-01-05 16:06:29 -05:00
|
|
|
|
2021-11-12 13:13:19 -05:00
|
|
|
# TODO Make sure to import Python version?
|
2022-01-06 12:36:46 -05:00
|
|
|
from cbor2 import loads, dumps
|
2022-04-11 14:03:48 -04:00
|
|
|
from pycddl import Schema
|
2022-01-21 12:36:58 -05:00
|
|
|
from collections_extended import RangeMap
|
2022-01-20 13:10:42 -05:00
|
|
|
from werkzeug.datastructures import Range, ContentRange
|
2021-11-16 10:56:21 -05:00
|
|
|
from twisted.web.http_headers import Headers
|
2022-01-07 14:15:16 -05:00
|
|
|
from twisted.web import http
|
2022-03-23 17:18:06 -04:00
|
|
|
from twisted.web.iweb import IPolicyForHTTPS
|
2022-07-06 09:38:31 -04:00
|
|
|
from twisted.internet.defer import inlineCallbacks, returnValue, fail, Deferred, succeed
|
2022-03-23 17:18:06 -04:00
|
|
|
from twisted.internet.interfaces import IOpenSSLClientConnectionCreator
|
|
|
|
from twisted.internet.ssl import CertificateOptions
|
|
|
|
from twisted.web.client import Agent, HTTPConnectionPool
|
|
|
|
from zope.interface import implementer
|
2021-11-12 12:51:52 -05:00
|
|
|
from hyperlink import DecodedURL
|
|
|
|
import treq
|
2022-03-07 08:21:58 -05:00
|
|
|
from treq.client import HTTPClient
|
|
|
|
from treq.testing import StubTreq
|
2022-03-23 17:18:06 -04:00
|
|
|
from OpenSSL import SSL
|
2022-03-28 11:27:32 -04:00
|
|
|
from cryptography.hazmat.bindings.openssl.binding import Binding
|
2022-07-19 16:10:22 -04:00
|
|
|
from werkzeug.http import parse_content_range_header
|
2022-03-23 17:18:06 -04:00
|
|
|
|
|
|
|
from .http_common import (
|
|
|
|
swissnum_auth_header,
|
|
|
|
Secrets,
|
|
|
|
get_content_type,
|
|
|
|
CBOR_MIME_TYPE,
|
|
|
|
get_spki_hash,
|
|
|
|
)
|
2022-01-12 11:51:56 -05:00
|
|
|
from .common import si_b2a
|
2022-03-25 10:45:54 -04:00
|
|
|
from ..util.hashutil import timing_safe_compare
|
2022-04-15 09:56:06 -04:00
|
|
|
from ..util.deferredutil import async_to_deferred
|
2022-01-12 11:51:56 -05:00
|
|
|
|
2022-03-28 11:27:32 -04:00
|
|
|
_OPENSSL = Binding().lib
|
|
|
|
|
2022-01-12 11:51:56 -05:00
|
|
|
|
|
|
|
def _encode_si(si): # type: (bytes) -> str
|
|
|
|
"""Encode the storage index into Unicode string."""
|
|
|
|
return str(si_b2a(si), "ascii")
|
2022-01-06 12:36:46 -05:00
|
|
|
|
2021-11-12 12:51:52 -05:00
|
|
|
|
2021-11-12 13:13:19 -05:00
|
|
|
class ClientException(Exception):
|
2022-02-08 10:46:55 -05:00
|
|
|
"""An unexpected response code from the server."""
|
|
|
|
|
|
|
|
def __init__(self, code, *additional_args):
|
|
|
|
Exception.__init__(self, code, *additional_args)
|
|
|
|
self.code = code
|
2021-11-12 13:13:19 -05:00
|
|
|
|
|
|
|
|
2022-04-11 14:03:48 -04:00
|
|
|
# Schemas for server responses.
|
|
|
|
#
|
2022-04-12 12:54:16 -04:00
|
|
|
# Tags are of the form #6.nnn, where the number is documented at
|
|
|
|
# https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml. Notably, #6.258
|
|
|
|
# indicates a set.
|
2022-04-11 14:03:48 -04:00
|
|
|
_SCHEMAS = {
|
|
|
|
"get_version": Schema(
|
|
|
|
"""
|
2022-04-15 09:56:06 -04:00
|
|
|
response = {'http://allmydata.org/tahoe/protocols/storage/v1' => {
|
2022-04-11 14:03:48 -04:00
|
|
|
'maximum-immutable-share-size' => uint
|
|
|
|
'maximum-mutable-share-size' => uint
|
|
|
|
'available-space' => uint
|
|
|
|
'tolerates-immutable-read-overrun' => bool
|
|
|
|
'delete-mutable-shares-with-zero-length-writev' => bool
|
|
|
|
'fills-holes-with-zero-bytes' => bool
|
|
|
|
'prevents-read-past-end-of-share-data' => bool
|
|
|
|
}
|
|
|
|
'application-version' => bstr
|
|
|
|
}
|
|
|
|
"""
|
|
|
|
),
|
|
|
|
"allocate_buckets": Schema(
|
|
|
|
"""
|
2022-04-15 09:56:06 -04:00
|
|
|
response = {
|
2022-04-11 14:03:48 -04:00
|
|
|
already-have: #6.258([* uint])
|
|
|
|
allocated: #6.258([* uint])
|
|
|
|
}
|
|
|
|
"""
|
|
|
|
),
|
|
|
|
"immutable_write_share_chunk": Schema(
|
|
|
|
"""
|
2022-04-15 09:56:06 -04:00
|
|
|
response = {
|
2022-04-11 14:03:48 -04:00
|
|
|
required: [* {begin: uint, end: uint}]
|
|
|
|
}
|
|
|
|
"""
|
|
|
|
),
|
|
|
|
"list_shares": Schema(
|
|
|
|
"""
|
2022-04-15 09:56:06 -04:00
|
|
|
response = #6.258([* uint])
|
2022-04-11 14:03:48 -04:00
|
|
|
"""
|
|
|
|
),
|
2022-04-15 09:56:06 -04:00
|
|
|
"mutable_read_test_write": Schema(
|
|
|
|
"""
|
|
|
|
response = {
|
|
|
|
"success": bool,
|
2022-04-19 13:35:09 -04:00
|
|
|
"data": {* share_number: [* bstr]}
|
2022-04-15 09:56:06 -04:00
|
|
|
}
|
|
|
|
share_number = uint
|
|
|
|
"""
|
|
|
|
),
|
2022-05-04 11:03:14 -04:00
|
|
|
"mutable_list_shares": Schema(
|
|
|
|
"""
|
|
|
|
response = #6.258([* uint])
|
|
|
|
"""
|
|
|
|
),
|
2022-04-11 14:03:48 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-06-30 13:48:33 -04:00
|
|
|
@define
|
|
|
|
class _LengthLimitedCollector:
|
|
|
|
"""
|
|
|
|
Collect data using ``treq.collect()``, with limited length.
|
|
|
|
"""
|
|
|
|
|
|
|
|
remaining_length: int
|
|
|
|
f: BytesIO = field(factory=BytesIO)
|
|
|
|
|
|
|
|
def __call__(self, data: bytes):
|
2022-07-06 09:40:46 -04:00
|
|
|
self.remaining_length -= len(data)
|
|
|
|
if self.remaining_length < 0:
|
2022-06-30 13:48:33 -04:00
|
|
|
raise ValueError("Response length was too long")
|
|
|
|
self.f.write(data)
|
|
|
|
|
|
|
|
|
2022-07-06 09:47:08 -04:00
|
|
|
def limited_content(response, max_length: int = 30 * 1024 * 1024) -> Deferred[BinaryIO]:
|
2022-06-30 13:48:33 -04:00
|
|
|
"""
|
|
|
|
Like ``treq.content()``, but limit data read from the response to a set
|
|
|
|
length. If the response is longer than the max allowed length, the result
|
|
|
|
fails with a ``ValueError``.
|
2022-07-06 09:47:08 -04:00
|
|
|
|
|
|
|
A potentially useful future improvement would be using a temporary file to
|
|
|
|
store the content; since filesystem buffering means that would use memory
|
|
|
|
for small responses and disk for large responses.
|
2022-06-30 13:48:33 -04:00
|
|
|
"""
|
|
|
|
collector = _LengthLimitedCollector(max_length)
|
2022-07-06 09:38:31 -04:00
|
|
|
# Make really sure everything gets called in Deferred context, treq might
|
|
|
|
# call collector directly...
|
|
|
|
d = succeed(None)
|
|
|
|
d.addCallback(lambda _: treq.collect(response, collector))
|
2022-07-06 09:47:08 -04:00
|
|
|
|
|
|
|
def done(_):
|
|
|
|
collector.f.seek(0)
|
|
|
|
return collector.f
|
|
|
|
|
|
|
|
d.addCallback(done)
|
2022-06-30 13:48:33 -04:00
|
|
|
return d
|
|
|
|
|
|
|
|
|
2022-04-11 14:03:48 -04:00
|
|
|
def _decode_cbor(response, schema: Schema):
|
2021-11-12 12:51:52 -05:00
|
|
|
"""Given HTTP response, return decoded CBOR body."""
|
2022-04-11 14:03:48 -04:00
|
|
|
|
2022-07-06 09:47:08 -04:00
|
|
|
def got_content(f: BinaryIO):
|
|
|
|
data = f.read()
|
2022-04-11 14:03:48 -04:00
|
|
|
schema.validate_cbor(data)
|
|
|
|
return loads(data)
|
|
|
|
|
2021-11-12 13:13:19 -05:00
|
|
|
if response.code > 199 and response.code < 300:
|
2022-03-14 11:09:40 -04:00
|
|
|
content_type = get_content_type(response.headers)
|
2022-03-14 11:18:53 -04:00
|
|
|
if content_type == CBOR_MIME_TYPE:
|
2022-06-30 13:48:33 -04:00
|
|
|
return limited_content(response).addCallback(got_content)
|
2022-03-14 10:35:39 -04:00
|
|
|
else:
|
|
|
|
raise ClientException(-1, "Server didn't send CBOR")
|
|
|
|
else:
|
2022-04-11 13:11:45 -04:00
|
|
|
return treq.content(response).addCallback(
|
|
|
|
lambda data: fail(ClientException(response.code, response.phrase, data))
|
|
|
|
)
|
2021-11-12 12:51:52 -05:00
|
|
|
|
|
|
|
|
2022-04-15 09:19:30 -04:00
|
|
|
@define
|
2022-01-05 16:06:29 -05:00
|
|
|
class ImmutableCreateResult(object):
|
|
|
|
"""Result of creating a storage index for an immutable."""
|
|
|
|
|
2022-04-28 11:46:37 -04:00
|
|
|
already_have: set[int]
|
|
|
|
allocated: set[int]
|
2022-01-05 16:06:29 -05:00
|
|
|
|
|
|
|
|
2022-03-23 17:18:06 -04:00
|
|
|
class _TLSContextFactory(CertificateOptions):
|
|
|
|
"""
|
|
|
|
Create a context that validates the way Tahoe-LAFS wants to: based on a
|
|
|
|
pinned certificate hash, rather than a certificate authority.
|
|
|
|
|
2022-04-14 11:52:20 -04:00
|
|
|
Originally implemented as part of Foolscap. To comply with the license,
|
|
|
|
here's the original licensing terms:
|
|
|
|
|
|
|
|
Copyright (c) 2006-2008 Brian Warner
|
|
|
|
|
|
|
|
Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
copy of this software and associated documentation files (the "Software"),
|
|
|
|
to deal in the Software without restriction, including without limitation
|
|
|
|
the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
|
|
and/or sell copies of the Software, and to permit persons to whom the
|
|
|
|
Software is furnished to do so, subject to the following conditions:
|
|
|
|
|
|
|
|
The above copyright notice and this permission notice shall be included in
|
|
|
|
all copies or substantial portions of the Software.
|
|
|
|
|
|
|
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
|
|
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
|
|
|
DEALINGS IN THE SOFTWARE.
|
2022-03-23 17:18:06 -04:00
|
|
|
"""
|
|
|
|
|
2022-03-25 15:46:42 -04:00
|
|
|
def __init__(self, expected_spki_hash: bytes):
|
|
|
|
self.expected_spki_hash = expected_spki_hash
|
|
|
|
CertificateOptions.__init__(self)
|
|
|
|
|
|
|
|
def getContext(self) -> SSL.Context:
|
2022-03-23 17:18:06 -04:00
|
|
|
def always_validate(conn, cert, errno, depth, preverify_ok):
|
|
|
|
# This function is called to validate the certificate received by
|
2022-03-28 11:27:32 -04:00
|
|
|
# the other end. OpenSSL calls it multiple times, for each errno
|
|
|
|
# for each certificate.
|
2022-03-23 17:18:06 -04:00
|
|
|
|
|
|
|
# We do not care about certificate authorities or revocation
|
|
|
|
# lists, we just want to know that the certificate has a valid
|
|
|
|
# signature and follow the chain back to one which is
|
|
|
|
# self-signed. We need to protect against forged signatures, but
|
|
|
|
# not the usual TLS concerns about invalid CAs or revoked
|
|
|
|
# certificates.
|
|
|
|
things_are_ok = (
|
2022-03-28 11:27:32 -04:00
|
|
|
_OPENSSL.X509_V_OK,
|
|
|
|
_OPENSSL.X509_V_ERR_CERT_NOT_YET_VALID,
|
|
|
|
_OPENSSL.X509_V_ERR_CERT_HAS_EXPIRED,
|
|
|
|
_OPENSSL.X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT,
|
|
|
|
_OPENSSL.X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN,
|
2022-03-23 17:18:06 -04:00
|
|
|
)
|
|
|
|
# TODO can we do this once instead of multiple times?
|
2022-03-25 10:45:54 -04:00
|
|
|
if errno in things_are_ok and timing_safe_compare(
|
2022-03-25 15:46:42 -04:00
|
|
|
get_spki_hash(cert.to_cryptography()), self.expected_spki_hash
|
2022-03-23 17:18:06 -04:00
|
|
|
):
|
|
|
|
return 1
|
|
|
|
# TODO: log the details of the error, because otherwise they get
|
|
|
|
# lost in the PyOpenSSL exception that will eventually be raised
|
|
|
|
# (possibly OpenSSL.SSL.Error: certificate verify failed)
|
|
|
|
return 0
|
|
|
|
|
|
|
|
ctx = CertificateOptions.getContext(self)
|
|
|
|
|
|
|
|
# VERIFY_PEER means we ask the the other end for their certificate.
|
|
|
|
ctx.set_verify(SSL.VERIFY_PEER, always_validate)
|
|
|
|
return ctx
|
|
|
|
|
|
|
|
|
|
|
|
@implementer(IPolicyForHTTPS)
|
|
|
|
@implementer(IOpenSSLClientConnectionCreator)
|
2022-04-15 09:19:30 -04:00
|
|
|
@define
|
2022-03-23 17:18:06 -04:00
|
|
|
class _StorageClientHTTPSPolicy:
|
|
|
|
"""
|
2022-03-25 15:46:42 -04:00
|
|
|
A HTTPS policy that ensures the SPKI hash of the public key matches a known
|
|
|
|
hash, i.e. pinning-based validation.
|
2022-03-23 17:18:06 -04:00
|
|
|
"""
|
|
|
|
|
2022-04-15 09:19:30 -04:00
|
|
|
expected_spki_hash: bytes
|
2022-03-23 17:18:06 -04:00
|
|
|
|
|
|
|
# IPolicyForHTTPS
|
|
|
|
def creatorForNetloc(self, hostname, port):
|
|
|
|
return self
|
|
|
|
|
|
|
|
# IOpenSSLClientConnectionCreator
|
|
|
|
def clientConnectionForTLS(self, tlsProtocol):
|
2022-03-28 11:28:38 -04:00
|
|
|
return SSL.Connection(
|
2022-03-25 15:46:42 -04:00
|
|
|
_TLSContextFactory(self.expected_spki_hash).getContext(), None
|
2022-03-23 17:18:06 -04:00
|
|
|
)
|
|
|
|
|
|
|
|
|
2022-04-15 09:19:30 -04:00
|
|
|
@define
|
2022-01-06 12:36:46 -05:00
|
|
|
class StorageClient(object):
|
|
|
|
"""
|
2022-02-08 10:19:37 -05:00
|
|
|
Low-level HTTP client that talks to the HTTP storage server.
|
2022-01-06 12:36:46 -05:00
|
|
|
"""
|
|
|
|
|
2022-04-15 09:19:30 -04:00
|
|
|
# The URL is a HTTPS URL ("https://..."). To construct from a NURL, use
|
|
|
|
# ``StorageClient.from_nurl()``.
|
|
|
|
_base_url: DecodedURL
|
|
|
|
_swissnum: bytes
|
|
|
|
_treq: Union[treq, StubTreq, HTTPClient]
|
2022-01-06 12:36:46 -05:00
|
|
|
|
2022-03-07 08:21:58 -05:00
|
|
|
@classmethod
|
2022-04-15 09:19:30 -04:00
|
|
|
def from_nurl(
|
|
|
|
cls, nurl: DecodedURL, reactor, persistent: bool = True
|
|
|
|
) -> StorageClient:
|
2022-03-07 08:21:58 -05:00
|
|
|
"""
|
2022-03-28 11:35:45 -04:00
|
|
|
Create a ``StorageClient`` for the given NURL.
|
2022-03-23 17:18:06 -04:00
|
|
|
|
|
|
|
``persistent`` indicates whether to use persistent HTTP connections.
|
2022-03-07 08:21:58 -05:00
|
|
|
"""
|
2022-03-28 11:35:45 -04:00
|
|
|
assert nurl.fragment == "v=1"
|
|
|
|
assert nurl.scheme == "pb"
|
|
|
|
swissnum = nurl.path[0].encode("ascii")
|
|
|
|
certificate_hash = nurl.user.encode("ascii")
|
2022-03-23 15:58:57 -04:00
|
|
|
|
2022-03-23 17:18:06 -04:00
|
|
|
treq_client = HTTPClient(
|
|
|
|
Agent(
|
|
|
|
reactor,
|
|
|
|
_StorageClientHTTPSPolicy(expected_spki_hash=certificate_hash),
|
|
|
|
pool=HTTPConnectionPool(reactor, persistent=persistent),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2022-03-28 11:35:45 -04:00
|
|
|
https_url = DecodedURL().replace(scheme="https", host=nurl.host, port=nurl.port)
|
2022-03-23 17:18:06 -04:00
|
|
|
return cls(https_url, swissnum, treq_client)
|
2022-03-23 16:33:29 -04:00
|
|
|
|
2022-02-08 10:19:37 -05:00
|
|
|
def relative_url(self, path):
|
2022-01-06 12:36:46 -05:00
|
|
|
"""Get a URL relative to the base URL."""
|
|
|
|
return self._base_url.click(path)
|
|
|
|
|
2022-01-07 14:15:16 -05:00
|
|
|
def _get_headers(self, headers): # type: (Optional[Headers]) -> Headers
|
2022-01-06 12:36:46 -05:00
|
|
|
"""Return the basic headers to be used by default."""
|
2022-01-07 14:15:16 -05:00
|
|
|
if headers is None:
|
|
|
|
headers = Headers()
|
2022-01-06 12:36:46 -05:00
|
|
|
headers.addRawHeader(
|
|
|
|
"Authorization",
|
|
|
|
swissnum_auth_header(self._swissnum),
|
|
|
|
)
|
|
|
|
return headers
|
|
|
|
|
2022-02-08 10:19:37 -05:00
|
|
|
def request(
|
2022-01-06 12:36:46 -05:00
|
|
|
self,
|
|
|
|
method,
|
|
|
|
url,
|
2022-01-11 15:47:32 -05:00
|
|
|
lease_renew_secret=None,
|
2022-01-06 12:36:46 -05:00
|
|
|
lease_cancel_secret=None,
|
|
|
|
upload_secret=None,
|
2022-04-18 14:56:20 -04:00
|
|
|
write_enabler_secret=None,
|
2022-01-07 14:15:16 -05:00
|
|
|
headers=None,
|
2022-03-14 11:28:54 -04:00
|
|
|
message_to_serialize=None,
|
2022-06-30 13:48:33 -04:00
|
|
|
**kwargs,
|
2022-01-06 12:36:46 -05:00
|
|
|
):
|
|
|
|
"""
|
|
|
|
Like ``treq.request()``, but with optional secrets that get translated
|
|
|
|
into corresponding HTTP headers.
|
2022-03-14 11:28:54 -04:00
|
|
|
|
|
|
|
If ``message_to_serialize`` is set, it will be serialized (by default
|
|
|
|
with CBOR) and set as the request body.
|
2022-01-06 12:36:46 -05:00
|
|
|
"""
|
2022-01-07 14:15:16 -05:00
|
|
|
headers = self._get_headers(headers)
|
2022-03-14 11:20:23 -04:00
|
|
|
|
|
|
|
# Add secrets:
|
2022-01-06 12:36:46 -05:00
|
|
|
for secret, value in [
|
2022-01-11 15:47:32 -05:00
|
|
|
(Secrets.LEASE_RENEW, lease_renew_secret),
|
2022-01-06 12:36:46 -05:00
|
|
|
(Secrets.LEASE_CANCEL, lease_cancel_secret),
|
|
|
|
(Secrets.UPLOAD, upload_secret),
|
2022-04-18 14:56:20 -04:00
|
|
|
(Secrets.WRITE_ENABLER, write_enabler_secret),
|
2022-01-06 12:36:46 -05:00
|
|
|
]:
|
|
|
|
if value is None:
|
|
|
|
continue
|
|
|
|
headers.addRawHeader(
|
|
|
|
"X-Tahoe-Authorization",
|
|
|
|
b"%s %s" % (secret.value.encode("ascii"), b64encode(value).strip()),
|
|
|
|
)
|
2022-03-14 11:20:23 -04:00
|
|
|
|
|
|
|
# Note we can accept CBOR:
|
|
|
|
headers.addRawHeader("Accept", CBOR_MIME_TYPE)
|
|
|
|
|
2022-03-14 11:28:54 -04:00
|
|
|
# If there's a request message, serialize it and set the Content-Type
|
|
|
|
# header:
|
|
|
|
if message_to_serialize is not None:
|
2022-03-18 10:12:51 -04:00
|
|
|
if "data" in kwargs:
|
|
|
|
raise TypeError(
|
|
|
|
"Can't use both `message_to_serialize` and `data` "
|
|
|
|
"as keyword arguments at the same time"
|
|
|
|
)
|
2022-03-14 11:28:54 -04:00
|
|
|
kwargs["data"] = dumps(message_to_serialize)
|
|
|
|
headers.addRawHeader("Content-Type", CBOR_MIME_TYPE)
|
|
|
|
|
2022-01-06 12:36:46 -05:00
|
|
|
return self._treq.request(method, url, headers=headers, **kwargs)
|
|
|
|
|
2022-02-08 10:19:37 -05:00
|
|
|
|
|
|
|
class StorageClientGeneral(object):
|
|
|
|
"""
|
|
|
|
High-level HTTP APIs that aren't immutable- or mutable-specific.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self, client): # type: (StorageClient) -> None
|
|
|
|
self._client = client
|
|
|
|
|
2022-01-06 12:36:46 -05:00
|
|
|
@inlineCallbacks
|
|
|
|
def get_version(self):
|
|
|
|
"""
|
|
|
|
Return the version metadata for the server.
|
|
|
|
"""
|
2022-02-08 10:19:37 -05:00
|
|
|
url = self._client.relative_url("/v1/version")
|
|
|
|
response = yield self._client.request("GET", url)
|
2022-04-11 14:03:48 -04:00
|
|
|
decoded_response = yield _decode_cbor(response, _SCHEMAS["get_version"])
|
2022-01-06 12:36:46 -05:00
|
|
|
returnValue(decoded_response)
|
|
|
|
|
2022-05-11 12:00:27 -04:00
|
|
|
@inlineCallbacks
|
|
|
|
def add_or_renew_lease(
|
|
|
|
self, storage_index: bytes, renew_secret: bytes, cancel_secret: bytes
|
2022-05-20 11:09:04 -04:00
|
|
|
) -> Deferred[None]:
|
2022-05-11 12:00:27 -04:00
|
|
|
"""
|
|
|
|
Add or renew a lease.
|
|
|
|
|
|
|
|
If the renewal secret matches an existing lease, it is renewed.
|
|
|
|
Otherwise a new lease is added.
|
|
|
|
"""
|
|
|
|
url = self._client.relative_url(
|
|
|
|
"/v1/lease/{}".format(_encode_si(storage_index))
|
|
|
|
)
|
|
|
|
response = yield self._client.request(
|
|
|
|
"PUT",
|
|
|
|
url,
|
|
|
|
lease_renew_secret=renew_secret,
|
|
|
|
lease_cancel_secret=cancel_secret,
|
|
|
|
)
|
|
|
|
|
|
|
|
if response.code == http.NO_CONTENT:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
raise ClientException(response.code)
|
|
|
|
|
2022-01-06 12:36:46 -05:00
|
|
|
|
2022-04-15 09:19:30 -04:00
|
|
|
@define
|
2022-01-21 12:36:58 -05:00
|
|
|
class UploadProgress(object):
|
|
|
|
"""
|
|
|
|
Progress of immutable upload, per the server.
|
|
|
|
"""
|
2022-02-01 10:20:23 -05:00
|
|
|
|
2022-01-21 12:36:58 -05:00
|
|
|
# True when upload has finished.
|
2022-04-15 09:19:30 -04:00
|
|
|
finished: bool
|
2022-01-21 12:36:58 -05:00
|
|
|
# Remaining ranges to upload.
|
2022-04-15 09:19:30 -04:00
|
|
|
required: RangeMap
|
2022-01-21 12:36:58 -05:00
|
|
|
|
|
|
|
|
2022-04-19 13:18:31 -04:00
|
|
|
@inlineCallbacks
|
|
|
|
def read_share_chunk(
|
|
|
|
client: StorageClient,
|
|
|
|
share_type: str,
|
|
|
|
storage_index: bytes,
|
|
|
|
share_number: int,
|
|
|
|
offset: int,
|
|
|
|
length: int,
|
|
|
|
) -> Deferred[bytes]:
|
|
|
|
"""
|
|
|
|
Download a chunk of data from a share.
|
|
|
|
|
2022-05-04 11:03:35 -04:00
|
|
|
TODO https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3857 Failed downloads
|
|
|
|
should be transparently retried and redownloaded by the implementation a
|
|
|
|
few times so that if a failure percolates up, the caller can assume the
|
|
|
|
failure isn't a short-term blip.
|
|
|
|
|
|
|
|
NOTE: the underlying HTTP protocol is somewhat more flexible than this API,
|
|
|
|
insofar as it doesn't always require a range. In practice a range is
|
|
|
|
always provided by the current callers.
|
2022-04-19 13:18:31 -04:00
|
|
|
"""
|
|
|
|
url = client.relative_url(
|
|
|
|
"/v1/{}/{}/{}".format(share_type, _encode_si(storage_index), share_number)
|
|
|
|
)
|
|
|
|
response = yield client.request(
|
|
|
|
"GET",
|
|
|
|
url,
|
|
|
|
headers=Headers(
|
2022-07-19 16:10:22 -04:00
|
|
|
# Ranges in HTTP are _inclusive_, Python's convention is exclusive,
|
|
|
|
# but Range constructor does that the conversion for us.
|
2022-04-19 13:18:31 -04:00
|
|
|
{"range": [Range("bytes", [(offset, offset + length)]).to_header()]}
|
|
|
|
),
|
|
|
|
)
|
2022-07-20 11:42:01 -04:00
|
|
|
|
|
|
|
if response.code == http.NO_CONTENT:
|
|
|
|
return b""
|
|
|
|
|
2022-04-19 13:18:31 -04:00
|
|
|
if response.code == http.PARTIAL_CONTENT:
|
2022-07-19 16:10:22 -04:00
|
|
|
content_range = parse_content_range_header(
|
|
|
|
response.headers.getRawHeaders("content-range")[0]
|
|
|
|
)
|
|
|
|
supposed_length = content_range.stop - content_range.start
|
|
|
|
if supposed_length > length:
|
|
|
|
raise ValueError("Server sent more than we asked for?!")
|
|
|
|
# It might also send less than we asked for. That's (probably) OK, e.g.
|
|
|
|
# if we went past the end of the file.
|
|
|
|
body = yield limited_content(response, supposed_length)
|
|
|
|
body.seek(0, SEEK_END)
|
|
|
|
actual_length = body.tell()
|
|
|
|
if actual_length != supposed_length:
|
|
|
|
# Most likely a mutable that got changed out from under us, but
|
|
|
|
# concievably could be a bug...
|
|
|
|
raise ValueError(
|
|
|
|
f"Length of response sent from server ({actual_length}) "
|
|
|
|
+ f"didn't match Content-Range header ({supposed_length})"
|
|
|
|
)
|
|
|
|
body.seek(0)
|
2022-07-20 11:42:01 -04:00
|
|
|
return body.read()
|
2022-04-19 13:18:31 -04:00
|
|
|
else:
|
2022-07-19 16:10:22 -04:00
|
|
|
# Technically HTTP allows sending an OK with full body under these
|
|
|
|
# circumstances, but the server is not designed to do that so we ignore
|
|
|
|
# than possibility for now...
|
2022-04-19 13:18:31 -04:00
|
|
|
raise ClientException(response.code)
|
|
|
|
|
|
|
|
|
2022-05-11 10:41:36 -04:00
|
|
|
@async_to_deferred
|
|
|
|
async def advise_corrupt_share(
|
|
|
|
client: StorageClient,
|
|
|
|
share_type: str,
|
|
|
|
storage_index: bytes,
|
|
|
|
share_number: int,
|
|
|
|
reason: str,
|
|
|
|
):
|
|
|
|
assert isinstance(reason, str)
|
|
|
|
url = client.relative_url(
|
|
|
|
"/v1/{}/{}/{}/corrupt".format(
|
|
|
|
share_type, _encode_si(storage_index), share_number
|
|
|
|
)
|
|
|
|
)
|
|
|
|
message = {"reason": reason}
|
|
|
|
response = await client.request("POST", url, message_to_serialize=message)
|
|
|
|
if response.code == http.OK:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
raise ClientException(
|
|
|
|
response.code,
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2022-04-15 09:19:30 -04:00
|
|
|
@define
|
2022-01-05 16:06:29 -05:00
|
|
|
class StorageClientImmutables(object):
|
|
|
|
"""
|
|
|
|
APIs for interacting with immutables.
|
|
|
|
"""
|
|
|
|
|
2022-04-15 09:19:30 -04:00
|
|
|
_client: StorageClient
|
2022-01-05 16:06:29 -05:00
|
|
|
|
|
|
|
@inlineCallbacks
|
|
|
|
def create(
|
|
|
|
self,
|
|
|
|
storage_index,
|
|
|
|
share_numbers,
|
|
|
|
allocated_size,
|
|
|
|
upload_secret,
|
|
|
|
lease_renew_secret,
|
|
|
|
lease_cancel_secret,
|
2022-04-28 11:46:37 -04:00
|
|
|
): # type: (bytes, set[int], int, bytes, bytes, bytes) -> Deferred[ImmutableCreateResult]
|
2022-01-05 16:06:29 -05:00
|
|
|
"""
|
|
|
|
Create a new storage index for an immutable.
|
|
|
|
|
2022-01-07 14:15:16 -05:00
|
|
|
TODO https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3857 retry
|
|
|
|
internally on failure, to ensure the operation fully succeeded. If
|
|
|
|
sufficient number of failures occurred, the result may fire with an
|
|
|
|
error, but there's no expectation that user code needs to have a
|
|
|
|
recovery codepath; it will most likely just report an error to the
|
|
|
|
user.
|
2022-01-05 16:06:29 -05:00
|
|
|
|
|
|
|
Result fires when creating the storage index succeeded, if creating the
|
|
|
|
storage index failed the result will fire with an exception.
|
|
|
|
"""
|
2022-02-08 10:19:37 -05:00
|
|
|
url = self._client.relative_url("/v1/immutable/" + _encode_si(storage_index))
|
2022-03-14 11:28:54 -04:00
|
|
|
message = {"share-numbers": share_numbers, "allocated-size": allocated_size}
|
|
|
|
|
2022-02-08 10:19:37 -05:00
|
|
|
response = yield self._client.request(
|
2022-01-07 14:15:16 -05:00
|
|
|
"POST",
|
|
|
|
url,
|
|
|
|
lease_renew_secret=lease_renew_secret,
|
|
|
|
lease_cancel_secret=lease_cancel_secret,
|
|
|
|
upload_secret=upload_secret,
|
2022-03-14 11:28:54 -04:00
|
|
|
message_to_serialize=message,
|
2022-01-07 14:15:16 -05:00
|
|
|
)
|
2022-04-11 14:03:48 -04:00
|
|
|
decoded_response = yield _decode_cbor(response, _SCHEMAS["allocate_buckets"])
|
2022-01-07 14:15:16 -05:00
|
|
|
returnValue(
|
|
|
|
ImmutableCreateResult(
|
|
|
|
already_have=decoded_response["already-have"],
|
|
|
|
allocated=decoded_response["allocated"],
|
|
|
|
)
|
|
|
|
)
|
2022-01-05 16:06:29 -05:00
|
|
|
|
2022-03-08 10:41:56 -05:00
|
|
|
@inlineCallbacks
|
|
|
|
def abort_upload(
|
|
|
|
self, storage_index: bytes, share_number: int, upload_secret: bytes
|
|
|
|
) -> Deferred[None]:
|
|
|
|
"""Abort the upload."""
|
|
|
|
url = self._client.relative_url(
|
|
|
|
"/v1/immutable/{}/{}/abort".format(_encode_si(storage_index), share_number)
|
|
|
|
)
|
|
|
|
response = yield self._client.request(
|
|
|
|
"PUT",
|
|
|
|
url,
|
|
|
|
upload_secret=upload_secret,
|
|
|
|
)
|
|
|
|
|
|
|
|
if response.code == http.OK:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
raise ClientException(
|
|
|
|
response.code,
|
|
|
|
)
|
|
|
|
|
2022-01-05 16:06:29 -05:00
|
|
|
@inlineCallbacks
|
|
|
|
def write_share_chunk(
|
|
|
|
self, storage_index, share_number, upload_secret, offset, data
|
2022-01-21 12:36:58 -05:00
|
|
|
): # type: (bytes, int, bytes, int, bytes) -> Deferred[UploadProgress]
|
2022-01-05 16:06:29 -05:00
|
|
|
"""
|
|
|
|
Upload a chunk of data for a specific share.
|
|
|
|
|
2022-01-07 14:15:16 -05:00
|
|
|
TODO https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3857 The
|
|
|
|
implementation should retry failed uploads transparently a number of
|
|
|
|
times, so that if a failure percolates up, the caller can assume the
|
2022-01-05 16:06:29 -05:00
|
|
|
failure isn't a short-term blip.
|
|
|
|
|
|
|
|
Result fires when the upload succeeded, with a boolean indicating
|
|
|
|
whether the _complete_ share (i.e. all chunks, not just this one) has
|
|
|
|
been uploaded.
|
|
|
|
"""
|
2022-02-08 10:19:37 -05:00
|
|
|
url = self._client.relative_url(
|
2022-01-12 11:51:56 -05:00
|
|
|
"/v1/immutable/{}/{}".format(_encode_si(storage_index), share_number)
|
2022-01-07 14:15:16 -05:00
|
|
|
)
|
2022-02-08 10:19:37 -05:00
|
|
|
response = yield self._client.request(
|
2022-01-14 08:34:17 -05:00
|
|
|
"PATCH",
|
2022-01-07 14:15:16 -05:00
|
|
|
url,
|
|
|
|
upload_secret=upload_secret,
|
|
|
|
data=data,
|
|
|
|
headers=Headers(
|
|
|
|
{
|
2022-01-11 15:47:32 -05:00
|
|
|
"content-range": [
|
2022-02-01 10:20:23 -05:00
|
|
|
ContentRange("bytes", offset, offset + len(data)).to_header()
|
2022-01-11 15:47:32 -05:00
|
|
|
]
|
2022-01-07 14:15:16 -05:00
|
|
|
}
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
if response.code == http.OK:
|
|
|
|
# Upload is still unfinished.
|
2022-01-21 12:36:58 -05:00
|
|
|
finished = False
|
2022-01-07 14:15:16 -05:00
|
|
|
elif response.code == http.CREATED:
|
|
|
|
# Upload is done!
|
2022-01-21 12:36:58 -05:00
|
|
|
finished = True
|
2022-01-07 14:15:16 -05:00
|
|
|
else:
|
|
|
|
raise ClientException(
|
|
|
|
response.code,
|
|
|
|
)
|
2022-04-11 14:03:48 -04:00
|
|
|
body = yield _decode_cbor(response, _SCHEMAS["immutable_write_share_chunk"])
|
2022-01-21 12:36:58 -05:00
|
|
|
remaining = RangeMap()
|
|
|
|
for chunk in body["required"]:
|
|
|
|
remaining.set(True, chunk["begin"], chunk["end"])
|
|
|
|
returnValue(UploadProgress(finished=finished, required=remaining))
|
2022-01-05 16:06:29 -05:00
|
|
|
|
|
|
|
def read_share_chunk(
|
|
|
|
self, storage_index, share_number, offset, length
|
|
|
|
): # type: (bytes, int, int, int) -> Deferred[bytes]
|
|
|
|
"""
|
|
|
|
Download a chunk of data from a share.
|
|
|
|
"""
|
2022-04-19 13:18:31 -04:00
|
|
|
return read_share_chunk(
|
|
|
|
self._client, "immutable", storage_index, share_number, offset, length
|
2022-01-07 14:15:16 -05:00
|
|
|
)
|
2022-02-01 10:20:23 -05:00
|
|
|
|
|
|
|
@inlineCallbacks
|
2022-06-06 11:00:07 -04:00
|
|
|
def list_shares(self, storage_index: bytes) -> Deferred[set[int]]:
|
2022-02-01 10:20:23 -05:00
|
|
|
"""
|
|
|
|
Return the set of shares for a given storage index.
|
|
|
|
"""
|
2022-02-08 10:19:37 -05:00
|
|
|
url = self._client.relative_url(
|
2022-02-01 10:20:23 -05:00
|
|
|
"/v1/immutable/{}/shares".format(_encode_si(storage_index))
|
|
|
|
)
|
2022-02-08 10:19:37 -05:00
|
|
|
response = yield self._client.request(
|
2022-02-01 10:20:23 -05:00
|
|
|
"GET",
|
|
|
|
url,
|
|
|
|
)
|
|
|
|
if response.code == http.OK:
|
2022-04-11 14:03:48 -04:00
|
|
|
body = yield _decode_cbor(response, _SCHEMAS["list_shares"])
|
2022-02-04 09:26:25 -05:00
|
|
|
returnValue(set(body))
|
2022-02-01 10:20:23 -05:00
|
|
|
else:
|
2022-03-09 13:10:13 -05:00
|
|
|
raise ClientException(response.code)
|
|
|
|
|
2022-03-10 11:09:45 -05:00
|
|
|
def advise_corrupt_share(
|
|
|
|
self,
|
|
|
|
storage_index: bytes,
|
|
|
|
share_number: int,
|
|
|
|
reason: str,
|
|
|
|
):
|
|
|
|
"""Indicate a share has been corrupted, with a human-readable message."""
|
2022-05-11 10:41:36 -04:00
|
|
|
return advise_corrupt_share(
|
|
|
|
self._client, "immutable", storage_index, share_number, reason
|
2022-03-10 11:09:45 -05:00
|
|
|
)
|
2022-04-15 09:56:06 -04:00
|
|
|
|
|
|
|
|
2022-04-28 11:46:37 -04:00
|
|
|
@frozen
|
2022-04-15 09:56:06 -04:00
|
|
|
class WriteVector:
|
|
|
|
"""Data to write to a chunk."""
|
|
|
|
|
|
|
|
offset: int
|
|
|
|
data: bytes
|
|
|
|
|
|
|
|
|
2022-04-28 11:46:37 -04:00
|
|
|
@frozen
|
2022-04-15 09:56:06 -04:00
|
|
|
class TestVector:
|
|
|
|
"""Checks to make on a chunk before writing to it."""
|
|
|
|
|
|
|
|
offset: int
|
|
|
|
size: int
|
|
|
|
specimen: bytes
|
|
|
|
|
|
|
|
|
2022-04-28 11:46:37 -04:00
|
|
|
@frozen
|
2022-04-15 09:56:06 -04:00
|
|
|
class ReadVector:
|
|
|
|
"""
|
|
|
|
Reads to do on chunks, as part of a read/test/write operation.
|
|
|
|
"""
|
|
|
|
|
|
|
|
offset: int
|
|
|
|
size: int
|
|
|
|
|
|
|
|
|
2022-04-28 11:46:37 -04:00
|
|
|
@frozen
|
2022-04-15 09:56:06 -04:00
|
|
|
class TestWriteVectors:
|
|
|
|
"""Test and write vectors for a specific share."""
|
|
|
|
|
2022-06-03 13:46:23 -04:00
|
|
|
test_vectors: Sequence[TestVector] = field(factory=list)
|
|
|
|
write_vectors: Sequence[WriteVector] = field(factory=list)
|
2022-04-28 11:46:37 -04:00
|
|
|
new_length: Optional[int] = None
|
2022-04-15 09:56:06 -04:00
|
|
|
|
2022-04-19 13:18:31 -04:00
|
|
|
def asdict(self) -> dict:
|
|
|
|
"""Return dictionary suitable for sending over CBOR."""
|
|
|
|
d = asdict(self)
|
|
|
|
d["test"] = d.pop("test_vectors")
|
|
|
|
d["write"] = d.pop("write_vectors")
|
|
|
|
d["new-length"] = d.pop("new_length")
|
|
|
|
return d
|
|
|
|
|
2022-04-15 09:56:06 -04:00
|
|
|
|
2022-04-28 11:46:37 -04:00
|
|
|
@frozen
|
2022-04-15 09:56:06 -04:00
|
|
|
class ReadTestWriteResult:
|
|
|
|
"""Result of sending read-test-write vectors."""
|
|
|
|
|
|
|
|
success: bool
|
|
|
|
# Map share numbers to reads corresponding to the request's list of
|
|
|
|
# ReadVectors:
|
2022-04-28 11:46:37 -04:00
|
|
|
reads: Mapping[int, Sequence[bytes]]
|
2022-04-15 09:56:06 -04:00
|
|
|
|
|
|
|
|
2022-04-28 11:46:37 -04:00
|
|
|
@frozen
|
2022-04-15 09:56:06 -04:00
|
|
|
class StorageClientMutables:
|
|
|
|
"""
|
|
|
|
APIs for interacting with mutables.
|
|
|
|
"""
|
|
|
|
|
|
|
|
_client: StorageClient
|
|
|
|
|
|
|
|
@async_to_deferred
|
|
|
|
async def read_test_write_chunks(
|
2022-04-18 14:56:20 -04:00
|
|
|
self,
|
2022-04-15 09:56:06 -04:00
|
|
|
storage_index: bytes,
|
2022-04-18 14:56:20 -04:00
|
|
|
write_enabler_secret: bytes,
|
2022-04-15 09:56:06 -04:00
|
|
|
lease_renew_secret: bytes,
|
|
|
|
lease_cancel_secret: bytes,
|
|
|
|
testwrite_vectors: dict[int, TestWriteVectors],
|
|
|
|
read_vector: list[ReadVector],
|
|
|
|
) -> ReadTestWriteResult:
|
|
|
|
"""
|
|
|
|
Read, test, and possibly write chunks to a particular mutable storage
|
|
|
|
index.
|
|
|
|
|
|
|
|
Reads are done before writes.
|
|
|
|
|
|
|
|
Given a mapping between share numbers and test/write vectors, the tests
|
|
|
|
are done and if they are valid the writes are done.
|
|
|
|
"""
|
2022-04-18 14:56:20 -04:00
|
|
|
url = self._client.relative_url(
|
|
|
|
"/v1/mutable/{}/read-test-write".format(_encode_si(storage_index))
|
|
|
|
)
|
|
|
|
message = {
|
|
|
|
"test-write-vectors": {
|
2022-04-19 13:18:31 -04:00
|
|
|
share_number: twv.asdict()
|
2022-04-18 14:56:20 -04:00
|
|
|
for (share_number, twv) in testwrite_vectors.items()
|
|
|
|
},
|
|
|
|
"read-vector": [asdict(r) for r in read_vector],
|
|
|
|
}
|
2022-04-19 13:18:31 -04:00
|
|
|
response = await self._client.request(
|
2022-04-18 14:56:20 -04:00
|
|
|
"POST",
|
|
|
|
url,
|
|
|
|
write_enabler_secret=write_enabler_secret,
|
|
|
|
lease_renew_secret=lease_renew_secret,
|
|
|
|
lease_cancel_secret=lease_cancel_secret,
|
|
|
|
message_to_serialize=message,
|
|
|
|
)
|
|
|
|
if response.code == http.OK:
|
2022-04-19 13:35:09 -04:00
|
|
|
result = await _decode_cbor(response, _SCHEMAS["mutable_read_test_write"])
|
|
|
|
return ReadTestWriteResult(success=result["success"], reads=result["data"])
|
2022-04-18 14:56:20 -04:00
|
|
|
else:
|
2022-04-19 13:18:31 -04:00
|
|
|
raise ClientException(response.code, (await response.content()))
|
2022-04-15 09:56:06 -04:00
|
|
|
|
2022-04-19 13:35:09 -04:00
|
|
|
def read_share_chunk(
|
2022-04-15 09:56:06 -04:00
|
|
|
self,
|
|
|
|
storage_index: bytes,
|
|
|
|
share_number: int,
|
2022-04-19 13:18:31 -04:00
|
|
|
offset: int,
|
|
|
|
length: int,
|
2022-05-04 11:03:35 -04:00
|
|
|
) -> Deferred[bytes]:
|
2022-04-15 09:56:06 -04:00
|
|
|
"""
|
|
|
|
Download a chunk of data from a share.
|
|
|
|
"""
|
2022-04-19 13:18:31 -04:00
|
|
|
return read_share_chunk(
|
|
|
|
self._client, "mutable", storage_index, share_number, offset, length
|
|
|
|
)
|
2022-05-04 11:03:14 -04:00
|
|
|
|
|
|
|
@async_to_deferred
|
|
|
|
async def list_shares(self, storage_index: bytes) -> set[int]:
|
|
|
|
"""
|
|
|
|
List the share numbers for a given storage index.
|
|
|
|
"""
|
|
|
|
url = self._client.relative_url(
|
|
|
|
"/v1/mutable/{}/shares".format(_encode_si(storage_index))
|
|
|
|
)
|
|
|
|
response = await self._client.request("GET", url)
|
|
|
|
if response.code == http.OK:
|
|
|
|
return await _decode_cbor(response, _SCHEMAS["mutable_list_shares"])
|
|
|
|
else:
|
|
|
|
raise ClientException(response.code)
|
2022-05-11 10:41:36 -04:00
|
|
|
|
|
|
|
def advise_corrupt_share(
|
|
|
|
self,
|
|
|
|
storage_index: bytes,
|
|
|
|
share_number: int,
|
|
|
|
reason: str,
|
|
|
|
):
|
|
|
|
"""Indicate a share has been corrupted, with a human-readable message."""
|
|
|
|
return advise_corrupt_share(
|
|
|
|
self._client, "mutable", storage_index, share_number, reason
|
|
|
|
)
|