Merge remote-tracking branch 'origin/master' into 3797-istorageserver-tests-part-3

This commit is contained in:
Itamar Turner-Trauring 2021-09-27 17:03:05 -04:00
commit da363d54de
17 changed files with 266 additions and 289 deletions

View File

@ -271,6 +271,11 @@ jobs:
# in the project source checkout.
path: "/tmp/project/_trial_temp/test.log"
- store_artifacts: &STORE_ELIOT_LOG
# Despite passing --workdir /tmp to tox above, it still runs trial
# in the project source checkout.
path: "/tmp/project/eliot.log"
- store_artifacts: &STORE_OTHER_ARTIFACTS
# Store any other artifacts, too. This is handy to allow other jobs
# sharing most of the definition of this one to be able to
@ -413,6 +418,7 @@ jobs:
- run: *RUN_TESTS
- store_test_results: *STORE_TEST_RESULTS
- store_artifacts: *STORE_TEST_LOG
- store_artifacts: *STORE_ELIOT_LOG
- store_artifacts: *STORE_OTHER_ARTIFACTS
- run: *SUBMIT_COVERAGE

View File

@ -76,13 +76,18 @@ jobs:
- name: Run tox for corresponding Python version
run: python -m tox
- name: Upload eliot.log in case of failure
- name: Upload eliot.log
uses: actions/upload-artifact@v1
if: failure()
with:
name: eliot.log
path: eliot.log
- name: Upload trial log
uses: actions/upload-artifact@v1
with:
name: test.log
path: _trial_temp/test.log
# Upload this job's coverage data to Coveralls. While there is a GitHub
# Action for this, as of Jan 2021 it does not support Python coverage
# files - only lcov files. Therefore, we use coveralls-python, the
@ -136,7 +141,7 @@ jobs:
# See notes about parallel builds on GitHub Actions at
# https://coveralls-python.readthedocs.io/en/latest/usage/configuration.html
finish-coverage-report:
needs:
needs:
- "coverage"
runs-on: "ubuntu-latest"
container: "python:3-slim"
@ -173,7 +178,7 @@ jobs:
- name: Install Tor [Ubuntu]
if: matrix.os == 'ubuntu-latest'
run: sudo apt install tor
# TODO: See https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3744.
# We have to use an older version of Tor for running integration
# tests on macOS.

View File

@ -600,7 +600,6 @@ For example::
"test": [{
"offset": 3,
"size": 5,
"operator": "eq",
"specimen": "hello"
}, ...],
"write": [{
@ -626,6 +625,9 @@ For example::
}
}
A test vector or read vector that read beyond the boundaries of existing data will return nothing for any bytes past the end.
As a result, if there is no data at all, an empty bytestring is returned no matter what the offset or length.
Reading
~~~~~~~
@ -701,7 +703,10 @@ Immutable Data
Mutable Data
~~~~~~~~~~~~
1. Create mutable share number ``3`` with ``10`` bytes of data in slot ``BBBBBBBBBBBBBBBB``::
1. Create mutable share number ``3`` with ``10`` bytes of data in slot ``BBBBBBBBBBBBBBBB``.
The special test vector of size 1 but empty bytes will only pass
if there is no existing share,
otherwise it will read a byte which won't match `b""`::
POST /v1/mutable/BBBBBBBBBBBBBBBB/read-test-write
{
@ -715,7 +720,6 @@ Mutable Data
"test": [{
"offset": 0,
"size": 1,
"operator": "eq",
"specimen": ""
}],
"write": [{
@ -747,8 +751,7 @@ Mutable Data
3: {
"test": [{
"offset": 0,
"size": <checkstring size>,
"operator": "eq",
"size": <length of checkstring>,
"specimen": "<checkstring>"
}],
"write": [{

0
newsfragments/3525.minor Normal file
View File

0
newsfragments/3792.minor Normal file
View File

0
newsfragments/3795.minor Normal file
View File

0
newsfragments/3799.minor Normal file
View File

View File

@ -91,9 +91,9 @@ class RIBucketReader(RemoteInterface):
TestVector = ListOf(TupleOf(Offset, ReadSize, bytes, bytes))
# elements are (offset, length, operator, specimen)
# operator is one of "lt, le, eq, ne, ge, gt"
# nop always passes and is used to fetch data while writing.
# you should use length==len(specimen) for everything except nop
# operator must be b"eq", typically length==len(specimen), but one can ensure
# writes don't happen to empty shares by setting length to 1 and specimen to
# b"". The operator is still used for wire compatibility with old versions.
DataVector = ListOf(TupleOf(Offset, ShareData))
# (offset, data). This limits us to 30 writes of 1MiB each per call
TestAndWriteVectorsForShares = DictOf(int,
@ -351,6 +351,12 @@ class IStorageServer(Interface):
):
"""
:see: ``RIStorageServer.slot_testv_readv_and_writev``
While the interface mostly matches, test vectors are simplified.
Instead of a tuple ``(offset, read_size, operator, expected_data)`` in
the original, for this method you need only pass in
``(offset, read_size, expected_data)``, with the operator implicitly
being ``b"eq"``.
"""
def advise_corrupt_share(

View File

@ -309,7 +309,7 @@ class SDMFSlotWriteProxy(object):
salt)
else:
checkstring = checkstring_or_seqnum
self._testvs = [(0, len(checkstring), b"eq", checkstring)]
self._testvs = [(0, len(checkstring), checkstring)]
def get_checkstring(self):
@ -318,7 +318,7 @@ class SDMFSlotWriteProxy(object):
server.
"""
if self._testvs:
return self._testvs[0][3]
return self._testvs[0][2]
return b""
@ -548,9 +548,9 @@ class SDMFSlotWriteProxy(object):
if not self._testvs:
# Our caller has not provided us with another checkstring
# yet, so we assume that we are writing a new share, and set
# a test vector that will allow a new share to be written.
# a test vector that will only allow a new share to be written.
self._testvs = []
self._testvs.append(tuple([0, 1, b"eq", b""]))
self._testvs.append(tuple([0, 1, b""]))
tw_vectors = {}
tw_vectors[self.shnum] = (self._testvs, datavs, None)
@ -889,7 +889,7 @@ class MDMFSlotWriteProxy(object):
self._testvs = []
else:
self._testvs = []
self._testvs.append((0, len(checkstring), b"eq", checkstring))
self._testvs.append((0, len(checkstring), checkstring))
def __repr__(self):
@ -1161,8 +1161,10 @@ class MDMFSlotWriteProxy(object):
"""I write the data vectors in datavs to the remote slot."""
tw_vectors = {}
if not self._testvs:
# Make sure we will only successfully write if the share didn't
# previously exist.
self._testvs = []
self._testvs.append(tuple([0, 1, b"eq", b""]))
self._testvs.append(tuple([0, 1, b""]))
if not self._written:
# Write a new checkstring to the share when we write it, so
# that we have something to check later.
@ -1170,7 +1172,7 @@ class MDMFSlotWriteProxy(object):
datavs.append((0, new_checkstring))
def _first_write():
self._written = True
self._testvs = [(0, len(new_checkstring), b"eq", new_checkstring)]
self._testvs = [(0, len(new_checkstring), new_checkstring)]
on_success = _first_write
tw_vectors[self.shnum] = (self._testvs, datavs, None)
d = self._storage_server.slot_testv_and_readv_and_writev(

View File

@ -11,21 +11,48 @@ if PY2:
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
import os
from urllib.parse import urlencode, quote as url_quote
from sys import stdout as _sys_stdout
from urllib.parse import urlencode
import json
from .common import BaseOptions
from allmydata.scripts.common import get_default_nodedir
from allmydata.scripts.common_http import do_http, BadResponse
from allmydata.scripts.common_http import BadResponse
from allmydata.util.abbreviate import abbreviate_space, abbreviate_time
from allmydata.util.encodingutil import argv_to_abspath
def _get_json_for_fragment(options, fragment, method='GET', post_args=None):
_print = print
def print(*args, **kwargs):
"""
returns the JSON for a particular URI-fragment (to which is
pre-pended the node's URL)
Builtin ``print``-alike that will even write unicode which cannot be
encoded using the specified output file's encoding.
This differs from the builtin print in that it will use the "replace"
encoding error handler and then write the result whereas builtin print
uses the "strict" encoding error handler.
"""
from past.builtins import unicode
out = kwargs.pop("file", None)
if out is None:
out = _sys_stdout
encoding = out.encoding or "ascii"
def ensafe(o):
if isinstance(o, unicode):
return o.encode(encoding, errors="replace").decode(encoding)
return o
return _print(
*(ensafe(a) for a in args),
file=out,
**kwargs
)
def _get_request_parameters_for_fragment(options, fragment, method, post_args):
"""
Get parameters for ``do_http`` for requesting the given fragment.
:return dict: A dictionary suitable for use as keyword arguments to
``do_http``.
"""
nodeurl = options['node-url']
if nodeurl.endswith('/'):
@ -40,7 +67,17 @@ def _get_json_for_fragment(options, fragment, method='GET', post_args=None):
body = ''
if post_args is not None:
raise ValueError("post_args= only valid for POST method")
resp = do_http(method, url, body=body.encode("utf-8"))
return dict(
method=method,
url=url,
body=body.encode("utf-8"),
)
def _handle_response_for_fragment(resp, nodeurl):
"""
Inspect an HTTP response and return the parsed payload, if possible.
"""
if isinstance(resp, BadResponse):
# specifically NOT using format_http_error() here because the
# URL is pretty sensitive (we're doing /uri/<key>).
@ -55,12 +92,6 @@ def _get_json_for_fragment(options, fragment, method='GET', post_args=None):
return parsed
def _get_json_for_cap(options, cap):
return _get_json_for_fragment(
options,
'uri/%s?t=json' % url_quote(cap),
)
def pretty_progress(percent, size=10, output_ascii=False):
"""
Displays a unicode or ascii based progress bar of a certain
@ -251,7 +282,10 @@ def render_recent(verbose, stdout, status_data):
print(u" Skipped {} non-upload/download operations; use --verbose to see".format(skipped), file=stdout)
def do_status(options):
def do_status(options, do_http=None):
if do_http is None:
from allmydata.scripts.common_http import do_http
nodedir = options["node-directory"]
with open(os.path.join(nodedir, u'private', u'api_auth_token'), 'r') as f:
token = f.read().strip()
@ -260,25 +294,30 @@ def do_status(options):
# do *all* our data-retrievals first in case there's an error
try:
status_data = _get_json_for_fragment(
options,
'status?t=json',
method='POST',
post_args=dict(
t='json',
token=token,
)
status_data = _handle_response_for_fragment(
do_http(**_get_request_parameters_for_fragment(
options,
'status?t=json',
method='POST',
post_args=dict(
t='json',
token=token,
),
)),
options['node-url'],
)
statistics_data = _get_json_for_fragment(
options,
'statistics?t=json',
method='POST',
post_args=dict(
t='json',
token=token,
)
statistics_data = _handle_response_for_fragment(
do_http(**_get_request_parameters_for_fragment(
options,
'statistics?t=json',
method='POST',
post_args=dict(
t='json',
token=token,
),
)),
options['node-url'],
)
except Exception as e:
print(u"failed to retrieve data: %s" % str(e), file=options.stderr)
return 2

View File

@ -434,20 +434,9 @@ class MutableShareFile(object):
# self._change_container_size() here.
def testv_compare(a, op, b):
assert op in (b"lt", b"le", b"eq", b"ne", b"ge", b"gt")
if op == b"lt":
return a < b
if op == b"le":
return a <= b
if op == b"eq":
return a == b
if op == b"ne":
return a != b
if op == b"ge":
return a >= b
if op == b"gt":
return a > b
# never reached
assert op == b"eq"
return a == b
class EmptyShare(object):

View File

@ -994,11 +994,19 @@ class _StorageServer(object):
tw_vectors,
r_vector,
):
# Match the wire protocol, which requires 4-tuples for test vectors.
wire_format_tw_vectors = {
key: (
[(start, length, b"eq", data) for (start, length, data) in value[0]],
value[1],
value[2],
) for (key, value) in tw_vectors.items()
}
return self._rref.callRemote(
"slot_testv_and_readv_and_writev",
storage_index,
secrets,
tw_vectors,
wire_format_tw_vectors,
r_vector,
)

View File

@ -12,7 +12,6 @@ if PY2:
from six import ensure_text
import os
import mock
import tempfile
from io import BytesIO, StringIO
from os.path import join
@ -22,8 +21,8 @@ from twisted.internet import defer
from allmydata.mutable.publish import MutableData
from allmydata.scripts.common_http import BadResponse
from allmydata.scripts.tahoe_status import _get_json_for_fragment
from allmydata.scripts.tahoe_status import _get_json_for_cap
from allmydata.scripts.tahoe_status import _handle_response_for_fragment
from allmydata.scripts.tahoe_status import _get_request_parameters_for_fragment
from allmydata.scripts.tahoe_status import pretty_progress
from allmydata.scripts.tahoe_status import do_status
from allmydata.web.status import marshal_json
@ -140,17 +139,12 @@ class CommandStatus(unittest.TestCase):
"""
These tests just exercise the renderers and ensure they don't
catastrophically fail.
They could be enhanced to look for "some" magic strings in the
results and assert they're in the output.
"""
def setUp(self):
self.options = _FakeOptions()
@mock.patch('allmydata.scripts.tahoe_status.do_http')
@mock.patch('sys.stdout', StringIO())
def test_no_operations(self, http):
def test_no_operations(self):
values = [
StringIO(ensure_text(json.dumps({
"active": [],
@ -165,12 +159,11 @@ class CommandStatus(unittest.TestCase):
}
}))),
]
http.side_effect = lambda *args, **kw: values.pop(0)
do_status(self.options)
def do_http(*args, **kw):
return values.pop(0)
do_status(self.options, do_http)
@mock.patch('allmydata.scripts.tahoe_status.do_http')
@mock.patch('sys.stdout', StringIO())
def test_simple(self, http):
def test_simple(self):
recent_items = active_items = [
UploadStatus(),
DownloadStatus(b"abcd", 12345),
@ -201,80 +194,72 @@ class CommandStatus(unittest.TestCase):
}
}).encode("utf-8")),
]
http.side_effect = lambda *args, **kw: values.pop(0)
do_status(self.options)
def do_http(*args, **kw):
return values.pop(0)
do_status(self.options, do_http)
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_fetch_error(self, http):
def boom(*args, **kw):
def test_fetch_error(self):
def do_http(*args, **kw):
raise RuntimeError("boom")
http.side_effect = boom
do_status(self.options)
do_status(self.options, do_http)
class JsonHelpers(unittest.TestCase):
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_bad_response(self, http):
http.return_value = BadResponse('the url', 'some err')
def test_bad_response(self):
def do_http(*args, **kw):
return
with self.assertRaises(RuntimeError) as ctx:
_get_json_for_fragment({'node-url': 'http://localhost:1234'}, '/fragment')
self.assertTrue(
"Failed to get" in str(ctx.exception)
_handle_response_for_fragment(
BadResponse('the url', 'some err'),
'http://localhost:1234',
)
self.assertIn(
"Failed to get",
str(ctx.exception),
)
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_happy_path(self, http):
http.return_value = StringIO('{"some": "json"}')
resp = _get_json_for_fragment({'node-url': 'http://localhost:1234/'}, '/fragment/')
self.assertEqual(resp, dict(some='json'))
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_happy_path_post(self, http):
http.return_value = StringIO('{"some": "json"}')
resp = _get_json_for_fragment(
{'node-url': 'http://localhost:1234/'},
'/fragment/',
method='POST',
post_args={'foo': 'bar'}
def test_happy_path(self):
resp = _handle_response_for_fragment(
StringIO('{"some": "json"}'),
'http://localhost:1234/',
)
self.assertEqual(resp, dict(some='json'))
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_happy_path_for_cap(self, http):
http.return_value = StringIO('{"some": "json"}')
resp = _get_json_for_cap({'node-url': 'http://localhost:1234'}, 'fake cap')
def test_happy_path_post(self):
resp = _handle_response_for_fragment(
StringIO('{"some": "json"}'),
'http://localhost:1234/',
)
self.assertEqual(resp, dict(some='json'))
@mock.patch('allmydata.scripts.tahoe_status.do_http')
def test_no_data_returned(self, http):
http.return_value = StringIO('null')
def test_no_data_returned(self):
with self.assertRaises(RuntimeError) as ctx:
_get_json_for_cap({'node-url': 'http://localhost:1234'}, 'fake cap')
self.assertTrue('No data from' in str(ctx.exception))
_handle_response_for_fragment(StringIO('null'), 'http://localhost:1234')
self.assertIn('No data from', str(ctx.exception))
def test_no_post_args(self):
with self.assertRaises(ValueError) as ctx:
_get_json_for_fragment(
_get_request_parameters_for_fragment(
{'node-url': 'http://localhost:1234'},
'/fragment',
method='POST',
post_args=None,
)
self.assertTrue(
"Must pass post_args" in str(ctx.exception)
self.assertIn(
"Must pass post_args",
str(ctx.exception),
)
def test_post_args_for_get(self):
with self.assertRaises(ValueError) as ctx:
_get_json_for_fragment(
_get_request_parameters_for_fragment(
{'node-url': 'http://localhost:1234'},
'/fragment',
method='GET',
post_args={'foo': 'bar'}
)
self.assertTrue(
"only valid for POST" in str(ctx.exception)
self.assertIn(
"only valid for POST",
str(ctx.exception),
)

View File

@ -149,7 +149,7 @@ class FakeStorageServer(object):
readv = {}
for shnum, (testv, writev, new_length) in list(tw_vectors.items()):
for (offset, length, op, specimen) in testv:
assert op in (b"le", b"eq", b"ge")
assert op == b"eq"
# TODO: this isn't right, the read is controlled by read_vector,
# not by testv
readv[shnum] = [ specimen

View File

@ -211,11 +211,8 @@ class IStorageServerImmutableAPIsTestsMixin(object):
"""
Shares that are fully written to can be read.
1. The result is not affected by the order in which writes
happened, only by their offsets.
2. When overlapping writes happen, the resulting read returns the
latest written value.
The result is not affected by the order in which writes
happened, only by their offsets.
"""
storage_index, renew_secret, cancel_secret = (
new_storage_index(),
@ -241,15 +238,8 @@ class IStorageServerImmutableAPIsTestsMixin(object):
yield allocated[2].callRemote("write", 0, b"3" * 512)
yield allocated[2].callRemote("close")
# Bucket 3 has an overlapping write.
yield allocated[3].callRemote("write", 0, b"5" * 20)
# The second write will overwrite the first.
yield allocated[3].callRemote("write", 0, b"6" * 24)
yield allocated[3].callRemote("write", 24, b"7" * 1000)
yield allocated[3].callRemote("close")
buckets = yield self.storage_server.get_buckets(storage_index)
self.assertEqual(set(buckets.keys()), {1, 2, 3})
self.assertEqual(set(buckets.keys()), {1, 2})
self.assertEqual(
(yield buckets[1].callRemote("read", 0, 1024)), b"1" * 512 + b"2" * 512
@ -257,11 +247,102 @@ class IStorageServerImmutableAPIsTestsMixin(object):
self.assertEqual(
(yield buckets[2].callRemote("read", 0, 1024)), b"3" * 512 + b"4" * 512
)
self.assertEqual(
(yield buckets[3].callRemote("read", 0, 1024)),
b"6" * 24 + b"7" * 1000,
@skipIf(True, "https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3801")
def test_overlapping_writes(self):
"""
The policy for overlapping writes is TBD:
https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3801
"""
@inlineCallbacks
def test_get_buckets_skips_unfinished_buckets(self):
"""
Buckets that are not fully written are not returned by
``IStorageServer.get_buckets()`` implementations.
"""
storage_index = new_storage_index()
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret=new_secret(),
cancel_secret=new_secret(),
sharenums=set(range(5)),
allocated_size=10,
canary=Referenceable(),
)
# Bucket 1 is fully written
yield allocated[1].callRemote("write", 0, b"1" * 10)
yield allocated[1].callRemote("close")
# Bucket 2 is partially written
yield allocated[2].callRemote("write", 0, b"1" * 5)
buckets = yield self.storage_server.get_buckets(storage_index)
self.assertEqual(set(buckets.keys()), {1})
@inlineCallbacks
def test_read_bucket_at_offset(self):
"""
Given a read bucket returned from ``IStorageServer.get_buckets()``, it
is possible to read at different offsets and lengths, with reads past
the end resulting in empty bytes.
"""
length = 256 * 17
storage_index = new_storage_index()
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret=new_secret(),
cancel_secret=new_secret(),
sharenums=set(range(1)),
allocated_size=length,
canary=Referenceable(),
)
total_data = _randbytes(256 * 17)
yield allocated[0].callRemote("write", 0, total_data)
yield allocated[0].callRemote("close")
buckets = yield self.storage_server.get_buckets(storage_index)
bucket = buckets[0]
for start, to_read in [
(0, 250), # fraction
(0, length), # whole thing
(100, 1024), # offset fraction
(length + 1, 100), # completely out of bounds
(length - 100, 200), # partially out of bounds
]:
data = yield bucket.callRemote("read", start, to_read)
self.assertEqual(
data,
total_data[start : start + to_read],
"Didn't match for start {}, length {}".format(start, to_read),
)
@inlineCallbacks
def test_bucket_advise_corrupt_share(self):
"""
Calling ``advise_corrupt_share()`` on a bucket returned by
``IStorageServer.get_buckets()`` does not result in error (other
behavior is opaque at this level of abstraction).
"""
storage_index = new_storage_index()
(_, allocated) = yield self.storage_server.allocate_buckets(
storage_index,
renew_secret=new_secret(),
cancel_secret=new_secret(),
sharenums=set(range(1)),
allocated_size=10,
canary=Referenceable(),
)
yield allocated[0].callRemote("write", 0, b"0123456789")
yield allocated[0].callRemote("close")
buckets = yield self.storage_server.get_buckets(storage_index)
yield buckets[0].callRemote("advise_corrupt_share", b"OH NO")
class IStorageServerMutableAPIsTestsMixin(object):
"""

View File

@ -1074,23 +1074,6 @@ class MutableServer(unittest.TestCase):
}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
# as should this one
answer = write(b"si1", secrets,
{0: ([(10, 5, b"lt", b"11111"),
],
[(0, b"x"*100)],
None),
},
[(10,5)],
)
self.failUnlessEqual(answer, (False,
{0: [b"11111"],
1: [b""],
2: [b""]},
))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
def test_operators(self):
# test operators, the data we're comparing is '11111' in all cases.
# test both fail+pass, reset data after each one.
@ -1110,63 +1093,6 @@ class MutableServer(unittest.TestCase):
reset()
# lt
answer = write(b"si1", secrets, {0: ([(10, 5, b"lt", b"11110"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
self.failUnlessEqual(read(b"si1", [], [(0,100)]), {0: [data]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"lt", b"11111"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"lt", b"11112"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
# le
answer = write(b"si1", secrets, {0: ([(10, 5, b"le", b"11110"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"le", b"11111"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"le", b"11112"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
# eq
answer = write(b"si1", secrets, {0: ([(10, 5, b"eq", b"11112"),
],
@ -1186,81 +1112,6 @@ class MutableServer(unittest.TestCase):
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
# ne
answer = write(b"si1", secrets, {0: ([(10, 5, b"ne", b"11111"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"ne", b"11112"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
# ge
answer = write(b"si1", secrets, {0: ([(10, 5, b"ge", b"11110"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"ge", b"11111"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"ge", b"11112"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
# gt
answer = write(b"si1", secrets, {0: ([(10, 5, b"gt", b"11110"),
],
[(0, b"y"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (True, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"gt", b"11111"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
answer = write(b"si1", secrets, {0: ([(10, 5, b"gt", b"11112"),
],
[(0, b"x"*100)],
None,
)}, [(10,5)])
self.failUnlessEqual(answer, (False, {0: [b"11111"]}))
self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]})
reset()
# finally, test some operators against empty shares
answer = write(b"si1", secrets, {1: ([(10, 5, b"eq", b"11112"),
],

View File

@ -74,6 +74,8 @@ commands =
tahoe --version
python -c "import sys; print('sys.stdout.encoding:', sys.stdout.encoding)"
# Run tests with -b to catch bugs like `"%s" % (some_bytes,)`. -b makes
# Python emit BytesWarnings, and warnings configuration in
# src/allmydata/tests/__init__.py turns allmydata's BytesWarnings into