Merge pull request #1182 from tahoe-lafs/3876-per-bucket-upload-secret

Allow per-bucket upload secret

Fixes ticket:3876
This commit is contained in:
Itamar Turner-Trauring 2022-03-08 10:10:48 -05:00 committed by GitHub
commit 5be7cbc171
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 69 additions and 40 deletions

View File

@ -493,8 +493,8 @@ Handling repeat calls:
* If the same API call is repeated with the same upload secret, the response is the same and no change is made to server state.
This is necessary to ensure retries work in the face of lost responses from the server.
* If the API calls is with a different upload secret, this implies a new client, perhaps because the old client died.
In order to prevent storage servers from being able to mess with each other, this API call will fail, because the secret doesn't match.
The use case of restarting upload from scratch if the client dies can be implemented by having the client persist the upload secret.
Or it may happen because the client wants to upload a different share number than a previous client.
New shares will be created, existing shares will be unchanged, regardless of whether the upload secret matches or not.
Discussion
``````````

0
newsfragments/3876.minor Normal file
View File

View File

@ -203,18 +203,13 @@ class HTTPServer(object):
upload_secret = authorization[Secrets.UPLOAD]
info = loads(request.content.read())
if storage_index in self._uploads:
for share_number in info["share-numbers"]:
in_progress = self._uploads[storage_index]
# For pre-existing upload, make sure password matches.
if (
share_number in in_progress.upload_secrets
and not timing_safe_compare(
in_progress.upload_secrets[share_number], upload_secret
)
):
request.setResponseCode(http.UNAUTHORIZED)
return b""
# We do NOT validate the upload secret for existing bucket uploads.
# Another upload may be happening in parallel, with a different upload
# key. That's fine! If a client tries to _write_ to that upload, they
# need to have an upload key. That does mean we leak the existence of
# these parallel uploads, but if you know storage index you can
# download them once upload finishes, so it's not a big deal to leak
# that information.
already_got, sharenum_to_bucket = self._storage_server.allocate_buckets(
storage_index,

View File

@ -1102,18 +1102,15 @@ class _HTTPBucketReader(object):
class _HTTPStorageServer(object):
"""
Talk to remote storage server over HTTP.
The same upload key is used for all communication.
"""
_http_client = attr.ib(type=StorageClient)
_upload_secret = attr.ib(type=bytes)
@staticmethod
def from_http_client(http_client): # type: (StorageClient) -> _HTTPStorageServer
"""
Create an ``IStorageServer`` from a HTTP ``StorageClient``.
"""
return _HTTPStorageServer(http_client=http_client, upload_secret=urandom(20))
return _HTTPStorageServer(http_client=http_client)
def get_version(self):
return StorageClientGeneral(self._http_client).get_version()
@ -1128,9 +1125,10 @@ class _HTTPStorageServer(object):
allocated_size,
canary,
):
upload_secret = urandom(20)
immutable_client = StorageClientImmutables(self._http_client)
result = immutable_client.create(
storage_index, sharenums, allocated_size, self._upload_secret, renew_secret,
storage_index, sharenums, allocated_size, upload_secret, renew_secret,
cancel_secret
)
result = yield result
@ -1140,7 +1138,7 @@ class _HTTPStorageServer(object):
client=immutable_client,
storage_index=storage_index,
share_number=share_num,
upload_secret=self._upload_secret
upload_secret=upload_secret
))
for share_num in result.allocated
})

View File

@ -474,41 +474,77 @@ class ImmutableHTTPAPITests(SyncTestCase):
)
self.assertEqual(downloaded, expected_data[offset : offset + length])
def test_allocate_buckets_second_time_wrong_upload_key(self):
"""
If allocate buckets endpoint is called second time with wrong upload
key on the same shares, the result is an error.
"""
# Create a upload:
(upload_secret, lease_secret, storage_index, _) = self.create_upload(
{1, 2, 3}, 100
)
with self.assertRaises(ClientException) as e:
result_of(
self.imm_client.create(
storage_index, {2, 3}, 100, b"x" * 32, lease_secret, lease_secret
)
)
self.assertEqual(e.exception.args[0], http.UNAUTHORIZED)
def test_allocate_buckets_second_time_different_shares(self):
"""
If allocate buckets endpoint is called second time with different
upload key on different shares, that creates the buckets.
upload key on potentially different shares, that creates the buckets on
those shares that are different.
"""
# Create a upload:
(upload_secret, lease_secret, storage_index, created) = self.create_upload(
{1, 2, 3}, 100
)
# Add same shares:
# Write half of share 1
result_of(
self.imm_client.write_share_chunk(
storage_index,
1,
upload_secret,
0,
b"a" * 50,
)
)
# Add same shares with a different upload key share 1 overlaps with
# existing shares, this call shouldn't overwrite the existing
# work-in-progress.
upload_secret2 = b"x" * 2
created2 = result_of(
self.imm_client.create(
storage_index, {4, 6}, 100, b"x" * 2, lease_secret, lease_secret
storage_index,
{1, 4, 6},
100,
upload_secret2,
lease_secret,
lease_secret,
)
)
self.assertEqual(created2.allocated, {4, 6})
# Write second half of share 1
self.assertTrue(
result_of(
self.imm_client.write_share_chunk(
storage_index,
1,
upload_secret,
50,
b"b" * 50,
)
).finished
)
# The upload of share 1 succeeded, demonstrating that second create()
# call didn't overwrite work-in-progress.
downloaded = result_of(
self.imm_client.read_share_chunk(storage_index, 1, 0, 100)
)
self.assertEqual(downloaded, b"a" * 50 + b"b" * 50)
# We can successfully upload the shares created with the second upload secret.
self.assertTrue(
result_of(
self.imm_client.write_share_chunk(
storage_index,
4,
upload_secret2,
0,
b"x" * 100,
)
).finished
)
def test_list_shares(self):
"""
Once a share is finished uploading, it's possible to list it.