Fold py36 back into normal tox run

This removes the individual-test-based ratchet.
This commit is contained in:
Chad Whitacre 2020-08-12 07:26:38 -04:00
parent e0a52d8bf6
commit 96e3d94172
4 changed files with 2 additions and 643 deletions

View File

@ -1,183 +0,0 @@
allmydata.test.mutable.test_exceptions.Exceptions.test_repr
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_1s
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_25s
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_day
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_future_5_minutes
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_hours
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_month
allmydata.test.test_abbreviate.Abbreviate.test_abbrev_time_year
allmydata.test.test_abbreviate.Abbreviate.test_parse_space
allmydata.test.test_abbreviate.Abbreviate.test_space
allmydata.test.test_abbreviate.Abbreviate.test_time
allmydata.test.test_backupdb.BackupDB.test_basic
allmydata.test.test_backupdb.BackupDB.test_upgrade_v1_v2
allmydata.test.test_backupdb.BackupDB.test_wrong_version
allmydata.test.test_base32.Base32.test_a2b
allmydata.test.test_base32.Base32.test_a2b_b2a_match_Pythons
allmydata.test.test_base32.Base32.test_b2a
allmydata.test.test_base32.Base32.test_b2a_or_none
allmydata.test.test_base62.Base62.test_ende_0x00
allmydata.test.test_base62.Base62.test_ende_0x000000
allmydata.test.test_base62.Base62.test_ende_0x01
allmydata.test.test_base62.Base62.test_ende_0x0100
allmydata.test.test_base62.Base62.test_ende_0x010000
allmydata.test.test_base62.Base62.test_ende_longrandstr
allmydata.test.test_base62.Base62.test_ende_randstr
allmydata.test.test_base62.Base62.test_known_values
allmydata.test.test_base62.Base62.test_num_octets_that_encode_to_this_many_chars
allmydata.test.test_base62.Base62.test_odd_sizes
allmydata.test.test_base62.Base62.test_roundtrip
allmydata.test.test_crypto.TestEd25519.test_deserialize_private_not_bytes
allmydata.test.test_crypto.TestEd25519.test_deserialize_public_not_bytes
allmydata.test.test_crypto.TestEd25519.test_key_serialization
allmydata.test.test_crypto.TestEd25519.test_sign_invalid_pubkey
allmydata.test.test_crypto.TestEd25519.test_signature_data_not_bytes
allmydata.test.test_crypto.TestEd25519.test_signature_not_bytes
allmydata.test.test_crypto.TestEd25519.test_signed_data_not_bytes
allmydata.test.test_crypto.TestEd25519.test_verify_invalid_pubkey
allmydata.test.test_crypto.TestRegression.test_aes_no_iv_process_long_input
allmydata.test.test_crypto.TestRegression.test_aes_no_iv_process_short_input
allmydata.test.test_crypto.TestRegression.test_aes_with_iv_process_long_input
allmydata.test.test_crypto.TestRegression.test_aes_with_iv_process_short_input
allmydata.test.test_crypto.TestRegression.test_decode_ed15519_keypair
allmydata.test.test_crypto.TestRegression.test_decode_rsa_keypair
allmydata.test.test_crypto.TestRegression.test_encrypt_data_not_bytes
allmydata.test.test_crypto.TestRegression.test_incorrect_iv_size
allmydata.test.test_crypto.TestRegression.test_iv_not_bytes
allmydata.test.test_crypto.TestRegression.test_key_incorrect_size
allmydata.test.test_crypto.TestRegression.test_old_start_up_test
allmydata.test.test_crypto.TestRsa.test_keys
allmydata.test.test_crypto.TestRsa.test_sign_invalid_pubkey
allmydata.test.test_crypto.TestRsa.test_verify_invalid_pubkey
allmydata.test.test_crypto.TestUtil.test_remove_prefix_bad
allmydata.test.test_crypto.TestUtil.test_remove_prefix_entire_string
allmydata.test.test_crypto.TestUtil.test_remove_prefix_good
allmydata.test.test_crypto.TestUtil.test_remove_prefix_partial
allmydata.test.test_crypto.TestUtil.test_remove_prefix_zero
allmydata.test.test_deferredutil.DeferredUtilTests.test_failure
allmydata.test.test_deferredutil.DeferredUtilTests.test_gather_results
allmydata.test.test_deferredutil.DeferredUtilTests.test_success
allmydata.test.test_deferredutil.DeferredUtilTests.test_wait_for_delayed_calls
allmydata.test.test_dictutil.DictUtil.test_auxdict
allmydata.test.test_dictutil.DictUtil.test_dict_of_sets
allmydata.test.test_happiness.Happiness.test_100
allmydata.test.test_happiness.Happiness.test_calc_happy
allmydata.test.test_happiness.Happiness.test_everything_broken
allmydata.test.test_happiness.Happiness.test_hypothesis0
allmydata.test.test_happiness.Happiness.test_hypothesis_0
allmydata.test.test_happiness.Happiness.test_hypothesis_1
allmydata.test.test_happiness.Happiness.test_placement_1
allmydata.test.test_happiness.Happiness.test_placement_simple
allmydata.test.test_happiness.Happiness.test_redistribute
allmydata.test.test_happiness.Happiness.test_unhappy
allmydata.test.test_happiness.HappinessUtils.test_residual_0
allmydata.test.test_happiness.HappinessUtils.test_trivial_flow_graph
allmydata.test.test_happiness.HappinessUtils.test_trivial_maximum_graph
allmydata.test.test_happiness.PlacementTests.test_hypothesis_unhappy
allmydata.test.test_happiness.PlacementTests.test_more_hypothesis
allmydata.test.test_hashtree.Complete.test_create
allmydata.test.test_hashtree.Complete.test_dump
allmydata.test.test_hashtree.Complete.test_needed_hashes
allmydata.test.test_hashtree.Incomplete.test_check
allmydata.test.test_hashtree.Incomplete.test_create
allmydata.test.test_hashtree.Incomplete.test_depth_of
allmydata.test.test_hashtree.Incomplete.test_large
allmydata.test.test_hashtree.Incomplete.test_needed_hashes
allmydata.test.test_hashutil.HashUtilTests.test_chk
allmydata.test.test_hashutil.HashUtilTests.test_hashers
allmydata.test.test_hashutil.HashUtilTests.test_known_answers
allmydata.test.test_hashutil.HashUtilTests.test_random_key
allmydata.test.test_hashutil.HashUtilTests.test_sha256d
allmydata.test.test_hashutil.HashUtilTests.test_sha256d_truncated
allmydata.test.test_hashutil.HashUtilTests.test_timing_safe_compare
allmydata.test.test_humanreadable.HumanReadable.test_repr
allmydata.test.test_iputil.GcUtil.test_gc_after_allocations
allmydata.test.test_iputil.GcUtil.test_release_delays_gc
allmydata.test.test_iputil.ListAddresses.test_get_local_ip_for
allmydata.test.test_iputil.ListAddresses.test_list_async
allmydata.test.test_iputil.ListAddresses.test_list_async_mock_cygwin
allmydata.test.test_iputil.ListAddresses.test_list_async_mock_ifconfig
allmydata.test.test_iputil.ListAddresses.test_list_async_mock_ip_addr
allmydata.test.test_iputil.ListAddresses.test_list_async_mock_route
allmydata.test.test_iputil.ListenOnUsed.test_random_port
allmydata.test.test_iputil.ListenOnUsed.test_specific_port
allmydata.test.test_log.Log.test_default_facility
allmydata.test.test_log.Log.test_err
allmydata.test.test_log.Log.test_grandparent_id
allmydata.test.test_log.Log.test_no_prefix
allmydata.test.test_log.Log.test_numming
allmydata.test.test_log.Log.test_parent_id
allmydata.test.test_log.Log.test_with_bytes_prefix
allmydata.test.test_log.Log.test_with_prefix
allmydata.test.test_netstring.Netstring.test_encode
allmydata.test.test_netstring.Netstring.test_extra
allmydata.test.test_netstring.Netstring.test_nested
allmydata.test.test_netstring.Netstring.test_split
allmydata.test.test_observer.Observer.test_lazy_oneshot
allmydata.test.test_observer.Observer.test_observerlist
allmydata.test.test_observer.Observer.test_oneshot
allmydata.test.test_observer.Observer.test_oneshot_fireagain
allmydata.test.test_pipeline.Pipeline.test_basic
allmydata.test.test_pipeline.Pipeline.test_errors
allmydata.test.test_pipeline.Pipeline.test_errors2
allmydata.test.test_python3.Python3PortingEffortTests.test_finished_porting
allmydata.test.test_python3.Python3PortingEffortTests.test_ported_modules_distinct
allmydata.test.test_python3.Python3PortingEffortTests.test_ported_modules_exist
allmydata.test.test_spans.ByteSpans.test_basic
allmydata.test.test_spans.ByteSpans.test_large
allmydata.test.test_spans.ByteSpans.test_math
allmydata.test.test_spans.ByteSpans.test_overlap
allmydata.test.test_spans.ByteSpans.test_random
allmydata.test.test_spans.StringSpans.test_basic
allmydata.test.test_spans.StringSpans.test_random
allmydata.test.test_spans.StringSpans.test_test
allmydata.test.test_statistics.Statistics.test_binomial_coeff
allmydata.test.test_statistics.Statistics.test_binomial_distribution_pmf
allmydata.test.test_statistics.Statistics.test_convolve
allmydata.test.test_statistics.Statistics.test_find_k
allmydata.test.test_statistics.Statistics.test_pr_backup_file_loss
allmydata.test.test_statistics.Statistics.test_pr_file_loss
allmydata.test.test_statistics.Statistics.test_repair_cost
allmydata.test.test_statistics.Statistics.test_repair_count_pmf
allmydata.test.test_statistics.Statistics.test_survival_pmf
allmydata.test.test_time_format.TimeFormat.test_epoch
allmydata.test.test_time_format.TimeFormat.test_epoch_in_London
allmydata.test.test_time_format.TimeFormat.test_format_delta
allmydata.test.test_time_format.TimeFormat.test_format_time
allmydata.test.test_time_format.TimeFormat.test_format_time_y2038
allmydata.test.test_time_format.TimeFormat.test_iso_utc
allmydata.test.test_time_format.TimeFormat.test_parse_date
allmydata.test.test_time_format.TimeFormat.test_parse_duration
allmydata.test.test_util.FileUtil.test_abspath_expanduser_unicode
allmydata.test.test_util.FileUtil.test_create_long_path
allmydata.test.test_util.FileUtil.test_disk_stats
allmydata.test.test_util.FileUtil.test_disk_stats_avail_nonnegative
allmydata.test.test_util.FileUtil.test_du
allmydata.test.test_util.FileUtil.test_encrypted_tempfile
allmydata.test.test_util.FileUtil.test_get_pathinfo
allmydata.test.test_util.FileUtil.test_get_pathinfo_symlink
allmydata.test.test_util.FileUtil.test_make_dirs_with_absolute_mode
allmydata.test.test_util.FileUtil.test_remove_if_possible
allmydata.test.test_util.FileUtil.test_rename
allmydata.test.test_util.FileUtil.test_rename_no_overwrite
allmydata.test.test_util.FileUtil.test_replace_file
allmydata.test.test_util.FileUtil.test_rm_dir
allmydata.test.test_util.FileUtil.test_windows_expanduser_win7
allmydata.test.test_util.FileUtil.test_windows_expanduser_xp
allmydata.test.test_util.FileUtil.test_write_atomically
allmydata.test.test_util.IDLib.test_nodeid_b2a
allmydata.test.test_util.Math.test_round_sigfigs
allmydata.test.test_util.PollMixinTests.test_PollMixin_False_then_True
allmydata.test.test_util.PollMixinTests.test_PollMixin_True
allmydata.test.test_util.PollMixinTests.test_timeout
allmydata.test.test_util.YAML.test_convert
allmydata.test.test_version.CheckRequirement.test_cross_check
allmydata.test.test_version.CheckRequirement.test_cross_check_unparseable_versions
allmydata.test.test_version.CheckRequirement.test_extract_openssl_version
allmydata.test.test_version.CheckRequirement.test_packages_from_pkg_resources
allmydata.test.test_version.T.test_report_import_error
allmydata.test.test_version.VersionTestCase.test_basic_versions
allmydata.test.test_version.VersionTestCase.test_comparison
allmydata.test.test_version.VersionTestCase.test_from_parts
allmydata.test.test_version.VersionTestCase.test_irrational_versions
allmydata.test.test_version.VersionTestCase.test_suggest_normalized_version

View File

@ -1,409 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''Ratchet up passing tests, or ratchet down failing tests.
Usage:
ratchet.py <"up" or "down"> <junitxml file path> <tracking file path>
This script helps when you expect a large test suite to fail spectactularly in
some environment, and you want to gradually improve the situation with minimal
impact to forward development of the same codebase for other environments. The
initial and primary usecase is porting from Python 2 to Python 3.
The idea is to emit JUnit XML from your test runner, and then invoke ratchet.py
to consume this XML output and operate on a so-called "tracking" file. When
ratcheting up passing tests, the tracking file will contain a list of tests,
one per line, that passed. When ratching down, the tracking file contains a
list of failing tests. On each subsequent run, ratchet.py will compare the
prior results in the tracking file with the new results in the XML, and will
report on both welcome and unwelcome changes. It will modify the tracking file
in the case of welcome changes, and therein lies the ratcheting.
The exit codes are:
0 - no changes observed
1 - changes observed, whether welcome or unwelcome
2 - invocation error
If <junitxml file path> does not exist, you'll get a FileNotFoundError:
>>> _test('up', None, None) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
FileNotFoundError: ...
If <tracking file path> does not exist, that's fine:
>>> _test('up', '1', None)
Some tests not required to pass did:
c0.t
Conveniently, they have been added to `<tracking_path>` for you. Perhaps commit that?
Eep! 0 test(s) were required to pass, but instead 1 did. 🐭
Same if you're ratcheting down:
>>> _test('down', '1', None)
All and only tests expected to fail did. 💃
If the test run has the same output as last time, it's all good:
>>> _test('up', '01001110', '01001110')
All and only tests required to pass did. 💃
>>> _test('down', '01001110', '10110001')
All and only tests expected to fail did. 💃
If there's a welcome change, that's noted:
>>> _test('up', '0101', '0100')
Some tests not required to pass did:
c3.t
Conveniently, they have been added to `<tracking_path>` for you. Perhaps commit that?
Eep! 1 test(s) were required to pass, but instead 2 did. 🐭
>>> _test('down', '0011', '1110')
Some tests expected to fail didn't:
c2.t
Conveniently, they have been removed from `<tracking_path>` for you. Perhaps commit that?
Eep! 3 test(s) were expected to fail, but instead 2 did. 🐭
And if there is an unwelcome change, that is noted as well:
>>> _test('up', '1101', '1111')
Some tests required to pass didn't:
c2.t
Eep! 4 test(s) were required to pass, but instead 3 did. 🐭
>>> _test('down', '0000', '1101')
Some tests not expected to fail did:
c2.t
Eep! 3 test(s) were expected to fail, but instead 4 did. 🐭
And if there are both welcome and unwelcome changes, they are both noted:
>>> _test('up', '1101', '1011')
Some tests not required to pass did:
c1.t
Conveniently, they have been added to `<tracking_path>` for you. Perhaps commit that?
Some tests required to pass didn't:
c2.t
Eep! 3 test(s) were required to pass, but instead 3 did. 🐭
>>> _test('down', '0100', '1100')
Some tests not expected to fail did:
c2.t
c3.t
Some tests expected to fail didn't:
c1.t
Conveniently, they have been removed from `<tracking_path>` for you. Perhaps commit that?
Eep! 2 test(s) were expected to fail, but instead 3 did. 🐭
To test ratchet.py itself:
python3 -m doctest ratchet.py
'''
from __future__ import absolute_import, division, print_function, unicode_literals
import io
import os
import re
import sys
import tempfile
import xml.etree.ElementTree as Etree
class JUnitXMLFile(object):
'''Represent a file containing test results in JUnit XML format.
>>> eg = _mktemp_junitxml('0100111')
>>> results = JUnitXMLFile(eg.name).parse()
>>> results.failed
['c0.t', 'c2.t', 'c3.t']
>>> results.passed
['c1.t', 'c4.t', 'c5.t', 'c6.t']
'''
def __init__(self, filepath):
self.filepath = filepath
self.failed = []
self.failed_aggregates = {}
self.stderr_output = []
self.passed = []
self._tree = None
def parse(self):
if self._tree:
raise RuntimeError('already parsed')
self._tree = Etree.parse(self.filepath)
for testcase in self._tree.findall('testcase'):
self.process_testcase(testcase)
return self
def process_testcase(self, case):
key = self.case_key(case)
# look at children but throw away stderr output
nonpassing = [c for c in case if not c.tag == 'system-err']
n = len(nonpassing)
if n > 1:
raise RuntimeError(f'multiple results for {key}: {nonpassing}')
elif n == 1:
result = nonpassing.pop()
self.failed.append(key)
message = result.get('message')
self.failed_aggregates.setdefault(message, []).append(key)
else:
self.passed.append(key)
@staticmethod
def case_key(case):
return f'{case.get("classname")}.{case.get("name")}'
def report(self, details=False):
for k, v in sorted(
self.failed_aggregates.items(),
key = lambda i: len(i[1]),
reverse=True):
print(f'# {k}')
for t in v:
print(f' - {t}')
def load_previous_results(txt):
try:
previous_results = open(txt).read()
except FileNotFoundError:
previous_results = ''
parsed = set()
for line in previous_results.splitlines():
if not line or line.startswith('#'):
continue
parsed.add(line)
return parsed
def print_tests(tests):
for test in sorted(tests):
print(' ', test)
def ratchet_up_passing(tracking_path, tests):
try:
old = set(open(tracking_path, 'r'))
except FileNotFoundError:
old = set()
new = set(t + '\n' for t in tests)
merged = sorted(old | new)
open(tracking_path, 'w+').writelines(merged)
def ratchet_down_failing(tracking_path, tests):
new = set(t + '\n' for t in tests)
open(tracking_path, 'w+').writelines(sorted(new))
def main(direction, junitxml_path, tracking_path):
'''Takes a string indicating which direction to ratchet, "up" or "down,"
and two paths, one to test-runner output in JUnit XML format, the other to
a file tracking test results (one test case dotted name per line). Walk the
former looking for the latter, and react appropriately.
>>> inp = _mktemp_junitxml('0100111')
>>> out = _mktemp_tracking('0000000')
>>> _test_main('up', inp.name, out.name)
Some tests not required to pass did:
c1.t
c4.t
c5.t
c6.t
Conveniently, they have been added to `<tracking_path>` for you. Perhaps commit that?
Eep! 0 test(s) were required to pass, but instead 4 did. 🐭
'''
results = JUnitXMLFile(junitxml_path).parse()
if tracking_path == '...':
# Shortcut to aid in debugging XML parsing issues.
results.report()
return
previous = load_previous_results(tracking_path)
current = set(results.passed if direction == 'up' else results.failed)
subjunctive = {'up': 'required to pass', 'down': 'expected to fail'}[direction]
ratchet = None
too_many = current - previous
if too_many:
print(f'Some tests not {subjunctive} did:')
print_tests(too_many)
if direction == 'up':
# Too many passing tests is good -- let's do more of those!
ratchet_up_passing(tracking_path, current)
print(f'Conveniently, they have been added to `{tracking_path}` for you. Perhaps commit that?')
not_enough = previous - current
if not_enough:
print(f'Some tests {subjunctive} didn\'t:')
print_tests(not_enough)
if direction == 'down':
# Not enough failing tests is good -- let's do more of those!
ratchet_down_failing(tracking_path, current)
print(f'Conveniently, they have been removed from `{tracking_path}` for you. Perhaps commit that?')
if too_many or not_enough:
print(f'Eep! {len(previous)} test(s) were {subjunctive}, but instead {len(current)} did. 🐭')
return 1
print(f'All and only tests {subjunctive} did. 💃')
return 0
# When called as an executable ...
if __name__ == '__main__':
try:
direction, junitxml_path, tracking_path = sys.argv[1:4]
if direction not in ('up', 'down'):
raise ValueError
except ValueError:
doc = '\n'.join(__doc__.splitlines()[:6])
doc = re.sub(' ratchet.py', f' {sys.argv[0]}', doc)
print(doc, file=sys.stderr)
exit_code = 2
else:
exit_code = main(direction, junitxml_path, tracking_path)
sys.exit(exit_code)
# Helpers for when called under doctest ...
def _test(*a):
return _test_main(*_mk(*a))
def _test_main(direction, junitxml, tracking):
'''Takes a string 'up' or 'down' and paths to (or open file objects for)
the JUnit XML and tracking files to use for this test run. Captures and
emits stdout (slightly modified) for inspection via doctest.'''
junitxml_path = junitxml.name if hasattr(junitxml, 'name') else junitxml
tracking_path = tracking.name if hasattr(tracking, 'name') else tracking
old_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
main(direction, junitxml_path, tracking_path)
finally:
sys.stdout.seek(0)
out = sys.stdout.read()
out = re.sub('`.*?`', '`<tracking_path>`', out).strip()
sys.stdout = old_stdout
print(out)
class _PotentialFile(object):
'''Represent a file that we are able to create but which doesn't exist yet,
and which, if we create it, will be automatically torn down when the test
run is over.'''
def __init__(self, filename):
self.d = tempfile.TemporaryDirectory()
self.name = os.path.join(self.d.name, filename)
def _mk(direction, spec_junitxml, spec_tracking):
'''Takes a string 'up' or 'down' and two bit strings specifying the state
of the JUnit XML results file and the tracking file to set up for this test
case. Returns the direction (unharmed) and two file-ish objects.
If a spec string is None the corresponding return value will be a
_PotentialFile object, which has a .name attribute (like a true file
object) that points to a file that does not exist, but could.
The reason not to simply return the path in all cases is that the file
objects are actually temporary file objects that destroy the underlying
file when they go out of scope, and we want to keep the underlying file
around until the end of the test run.'''
if None not in(spec_junitxml, spec_tracking):
if len(spec_junitxml) != len(spec_tracking):
raise ValueError('if both given, must be the same length: `{spec_junitxml}` and `{spec_tracking}`')
if spec_junitxml is None:
junitxml_fp = _PotentialFile('results.xml')
else:
junitxml_fp = _mktemp_junitxml(spec_junitxml)
if spec_tracking is None:
tracking_fp = _PotentialFile('tracking')
else:
tracking_fp = _mktemp_tracking(spec_tracking)
return direction, junitxml_fp, tracking_fp
def _mktemp_junitxml(spec):
'''Test helper to generate a raw JUnit XML file.
>>> fp = _mktemp_junitxml('00101')
>>> open(fp.name).read()[:11]
'<testsuite>'
'''
fp = tempfile.NamedTemporaryFile()
fp.write(b'<testsuite>')
passed = '''\
<testcase classname="c{i}" name="t"></testcase>
'''
failed = '''\
<testcase classname="c{i}" name="t">
<failure>Traceback (most recent call last):
File "/foo/bar/baz/buz.py", line 1, in &lt;module>
NameError: name 'heck' is not defined
</failure>
</testcase>
'''
i = 0
for c in spec:
if c == '0':
out = failed
elif c == '1':
out = passed
else:
raise ValueError(f'bad c: `{c}`')
fp.write(out.format(i=i).encode('utf8'))
i += 1
fp.write(b'</testsuite>')
fp.flush()
return fp
def _mktemp_tracking(spec):
'''Test helper to prefabricate a tracking file.
>>> fp = _mktemp_tracking('01101')
>>> print(open(fp.name).read()[:-1])
c1.t
c2.t
c4.t
'''
fp = tempfile.NamedTemporaryFile()
i = 0
for c in spec:
if c == '0':
pass
elif c == '1':
fp.write(f'c{i}.t\n'.encode('utf8'))
else:
raise ValueError(f'bad c: `{c}`')
i += 1
fp.flush()
return fp

View File

@ -1,45 +0,0 @@
#!/usr/bin/env bash
set -euxo pipefail
tracking_filename="ratchet-passing"
# Start somewhere predictable.
cd "$(dirname $0)"
base=$(pwd)
# Actually, though, trial outputs some things that are only gitignored in the project root.
cd "../.."
# Since both of the next calls are expected to exit non-0, relax our guard.
set +e
trial --reporter=subunitv2-file allmydata
subunit2junitxml < "${SUBUNITREPORTER_OUTPUT_PATH}" > "$base/results.xml"
set -e
# Okay, now we're clear.
cd "$base"
# Make sure ratchet.py itself is clean.
python3 -m doctest ratchet.py
# Now see about Tahoe-LAFS (also expected to fail) ...
set +e
python3 ratchet.py up results.xml "$tracking_filename"
code=$?
set -e
# Emit a diff of the tracking file, to aid in the situation where changes are
# not discovered until CI (where TERM might `dumb`).
if [ $TERM = 'dumb' ]; then
export TERM=ansi
fi
echo "The ${tracking_filename} diff is:"
echo "================================="
# "git diff" gets pretty confused in this execution context when trying to
# write to stdout. Somehow it fails with SIGTTOU.
git diff -- "${tracking_filename}" > tracking.diff
cat tracking.diff
echo "================================="
echo "Exiting with code ${code} from ratchet.py."
exit ${code}

View File

@ -45,13 +45,9 @@ usedevelop = False
# tests.
extras = test
commands =
# `tahoe --version` is not ready for prime time yet under py36. May it be some day!
!py36: tahoe --version
trial {env:TAHOE_LAFS_TRIAL_ARGS:--rterrors} {posargs:allmydata}
tahoe --version
[testenv:py36]
# On macOS, git inside of ratchet.sh needs $HOME.
passenv = {[testenv]passenv} HOME
commands = {toxinidir}/misc/python3/ratchet.sh
[testenv:integration]
setenv =