From 48283ea6f871925268638e392b0984c9163812a0 Mon Sep 17 00:00:00 2001 From: "Fon E. Noel NFEBE" Date: Wed, 7 Sep 2022 22:35:57 +0100 Subject: [PATCH 01/54] Refactor test_storage.py There are base test classes namely `SyncTestCase` and `AsyncTestCase` which we would like all test classes in this code base to extend. This commit extends the listed classes in test_storage.py to extend the above mentioned base classes: * UtilTests * BucketProxy * Server Signed-off-by: Fon E. Noel NFEBE --- newsfragments/3917.minor | 0 src/allmydata/test/test_storage.py | 75 +++++++++++++++++------------- 2 files changed, 43 insertions(+), 32 deletions(-) create mode 100644 newsfragments/3917.minor diff --git a/newsfragments/3917.minor b/newsfragments/3917.minor new file mode 100644 index 000000000..e69de29bb diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index c3f2a35e1..3f33d82ab 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -25,8 +25,16 @@ import shutil from functools import partial from uuid import uuid4 +from .common import ( + SyncTestCase, + AsyncTestCase, +) + from testtools.matchers import ( + Equals, + Contains, HasLength, + IsInstance, ) from twisted.trial import unittest @@ -92,23 +100,23 @@ from .strategies import ( ) -class UtilTests(unittest.TestCase): +class UtilTests(SyncTestCase): """Tests for allmydata.storage.common and .shares.""" def test_encoding(self): """b2a/a2b are the same as base32.""" s = b"\xFF HELLO \xF3" result = si_b2a(s) - self.assertEqual(base32.b2a(s), result) - self.assertEqual(si_a2b(result), s) + self.assertThat(base32.b2a(s), Equals(result)) + self.assertThat(si_a2b(result), Equals(s)) def test_storage_index_to_dir(self): """storage_index_to_dir creates a native string path.""" s = b"\xFF HELLO \xF3" path = storage_index_to_dir(s) parts = os.path.split(path) - self.assertEqual(parts[0], parts[1][:2]) - self.assertIsInstance(path, native_str) + self.assertThat(parts[0], Equals(parts[1][:2])) + self.assertThat(path, IsInstance(native_str)) def test_get_share_file_mutable(self): """A mutable share is identified by get_share_file().""" @@ -116,16 +124,16 @@ class UtilTests(unittest.TestCase): msf = MutableShareFile(path) msf.create(b"12", b"abc") # arbitrary values loaded = get_share_file(path) - self.assertIsInstance(loaded, MutableShareFile) - self.assertEqual(loaded.home, path) + self.assertThat(loaded, IsInstance(MutableShareFile)) + self.assertThat(loaded.home, Equals(path)) def test_get_share_file_immutable(self): """An immutable share is identified by get_share_file().""" path = self.mktemp() _ = ShareFile(path, max_size=1000, create=True) loaded = get_share_file(path) - self.assertIsInstance(loaded, ShareFile) - self.assertEqual(loaded.home, path) + self.assertThat(loaded, IsInstance(ShareFile)) + self.assertThat(loaded.home, Equals(path)) class FakeStatsProvider(object): @@ -135,7 +143,7 @@ class FakeStatsProvider(object): pass -class Bucket(unittest.TestCase): +class Bucket(SyncTestCase): def make_workdir(self, name): basedir = os.path.join("storage", "Bucket", name) incoming = os.path.join(basedir, "tmp", "bucket") @@ -178,9 +186,9 @@ class Bucket(unittest.TestCase): # now read from it br = BucketReader(self, bw.finalhome) - self.failUnlessEqual(br.read(0, 25), b"a"*25) - self.failUnlessEqual(br.read(25, 25), b"b"*25) - self.failUnlessEqual(br.read(50, 7), b"c"*7) + self.assertThat(br.read(0, 25), Equals(b"a"*25)) + self.assertThat(br.read(25, 25), Equals(b"b"*25)) + self.assertThat(br.read(50, 7), Equals(b"c"*7)) def test_write_past_size_errors(self): """Writing beyond the size of the bucket throws an exception.""" @@ -430,7 +438,7 @@ class RemoteBucket(object): return defer.maybeDeferred(_call) -class BucketProxy(unittest.TestCase): +class BucketProxy(SyncTestCase): def make_bucket(self, name, size): basedir = os.path.join("storage", "BucketProxy", name) incoming = os.path.join(basedir, "tmp", "bucket") @@ -513,7 +521,7 @@ class BucketProxy(unittest.TestCase): rb = RemoteBucket(FoolscapBucketReader(br)) server = NoNetworkServer(b"abc", None) rbp = rbp_class(rb, server, storage_index=b"") - self.failUnlessIn("to peer", repr(rbp)) + self.assertThat(repr(rbp), Contains("to peer")) self.failUnless(interfaces.IStorageBucketReader.providedBy(rbp), rbp) d1 = rbp.get_block_data(0, 25, 25) @@ -550,13 +558,16 @@ class BucketProxy(unittest.TestCase): return self._do_test_readwrite("test_readwrite_v2", 0x44, WriteBucketProxy_v2, ReadBucketProxy) -class Server(unittest.TestCase): +class Server(AsyncTestCase): def setUp(self): + super(Server, self).setUp() self.sparent = LoggingServiceParent() self.sparent.startService() self._lease_secret = itertools.count() + def tearDown(self): + super(Server, self).tearDown() return self.sparent.stopService() def workdir(self, name): @@ -586,14 +597,14 @@ class Server(unittest.TestCase): ss = self.create("test_declares_maximum_share_sizes") ver = ss.get_version() sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1'] - self.failUnlessIn(b'maximum-immutable-share-size', sv1) - self.failUnlessIn(b'maximum-mutable-share-size', sv1) + self.assertThat(sv1, Contains(b'maximum-immutable-share-size')) + self.assertThat(sv1, Contains(b'maximum-mutable-share-size')) def test_declares_available_space(self): ss = self.create("test_declares_available_space") ver = ss.get_version() sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1'] - self.failUnlessIn(b'available-space', sv1) + self.assertThat(sv1, Contains(b'available-space')) def allocate(self, ss, storage_index, sharenums, size, renew_leases=True): """ @@ -725,8 +736,8 @@ class Server(unittest.TestCase): self.failUnlessEqual(set(b.keys()), set([0,1,2])) self.failUnlessEqual(b[0].read(0, 25), b"%25d" % 0) b_str = str(b[0]) - self.failUnlessIn("BucketReader", b_str) - self.failUnlessIn("mfwgy33dmf2g 0", b_str) + self.assertThat(b_str, Contains("BucketReader")) + self.assertThat(b_str, Contains("mfwgy33dmf2g 0")) # now if we ask about writing again, the server should offer those # three buckets as already present. It should offer them even if we @@ -1216,21 +1227,21 @@ class Server(unittest.TestCase): b"This share smells funny.\n") reportdir = os.path.join(workdir, "corruption-advisories") reports = os.listdir(reportdir) - self.failUnlessEqual(len(reports), 1) + self.assertThat(reports, HasLength(1)) report_si0 = reports[0] - self.failUnlessIn(ensure_str(si0_s), report_si0) + self.assertThat(report_si0, Contains(ensure_str(si0_s))) f = open(os.path.join(reportdir, report_si0), "rb") report = f.read() f.close() - self.failUnlessIn(b"type: immutable", report) - self.failUnlessIn(b"storage_index: %s" % si0_s, report) - self.failUnlessIn(b"share_number: 0", report) - self.failUnlessIn(b"This share smells funny.", report) + self.assertThat(report, Contains(b"type: immutable")) + self.assertThat(report, Contains(b"storage_index: %s" % si0_s)) + self.assertThat(report, Contains(b"share_number: 0")) + self.assertThat(report, Contains(b"This share smells funny.")) # test the RIBucketWriter version too si1_s = base32.b2a(b"si1") already,writers = self.allocate(ss, b"si1", [1], 75) - self.failUnlessEqual(already, set()) + self.assertThat(already, Equals(set())) self.failUnlessEqual(set(writers.keys()), set([1])) writers[1].write(0, b"data") writers[1].close() @@ -1245,10 +1256,10 @@ class Server(unittest.TestCase): f = open(os.path.join(reportdir, report_si1), "rb") report = f.read() f.close() - self.failUnlessIn(b"type: immutable", report) - self.failUnlessIn(b"storage_index: %s" % si1_s, report) - self.failUnlessIn(b"share_number: 1", report) - self.failUnlessIn(b"This share tastes like dust.", report) + self.assertThat(report, Contains(b"type: immutable")) + self.assertThat(report, Contains(b"storage_index: %s" % si1_s)) + self.assertThat(report, Contains(b"share_number: 1")) + self.assertThat(report, Contains(b"This share tastes like dust.")) def test_advise_corruption_missing(self): """ From fbc8baa238f72720cfa840a9c227c670a5e2fa6e Mon Sep 17 00:00:00 2001 From: "Fon E. Noel NFEBE" Date: Wed, 14 Sep 2022 22:55:31 +0100 Subject: [PATCH 02/54] Refactor Server class in test_storage.py As a follow up to commit: 48283ea6f871925268638e392b0984c9163812a0 this refactor adds better methods and cleans up the test to be consistent with methods that used in classes that extend the `AsyncTestCase`. Signed-off-by: Fon E. Noel NFEBE --- src/allmydata/test/test_storage.py | 166 ++++++++++++++--------------- 1 file changed, 82 insertions(+), 84 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 3f33d82ab..d50ae1c18 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -25,13 +25,9 @@ import shutil from functools import partial from uuid import uuid4 -from .common import ( - SyncTestCase, - AsyncTestCase, -) - from testtools.matchers import ( Equals, + NotEquals, Contains, HasLength, IsInstance, @@ -88,7 +84,9 @@ from .common import ( ShouldFailMixin, FakeDisk, SyncTestCase, + AsyncTestCase, ) + from .common_util import FakeCanary from .common_storage import ( upload_immutable, @@ -346,16 +344,16 @@ class Bucket(SyncTestCase): # Now read from it. br = BucketReader(mockstorageserver, final) - self.failUnlessEqual(br.read(0, len(share_data)), share_data) + self.assertThat(br.read(0, len(share_data)), Equals(share_data)) # Read past the end of share data to get the cancel secret. read_length = len(share_data) + len(ownernumber) + len(renewsecret) + len(cancelsecret) result_of_read = br.read(0, read_length) - self.failUnlessEqual(result_of_read, share_data) + self.assertThat(result_of_read, Equals(share_data)) result_of_read = br.read(0, len(share_data)+1) - self.failUnlessEqual(result_of_read, share_data) + self.assertThat(result_of_read, Equals(share_data)) def _assert_timeout_only_after_30_minutes(self, clock, bw): """ @@ -591,7 +589,7 @@ class Server(AsyncTestCase): ss = self.create("test_declares_fixed_1528") ver = ss.get_version() sv1 = ver[b'http://allmydata.org/tahoe/protocols/storage/v1'] - self.failUnless(sv1.get(b'prevents-read-past-end-of-share-data'), sv1) + self.assertTrue(sv1.get(b'prevents-read-past-end-of-share-data'), sv1) def test_declares_maximum_share_sizes(self): ss = self.create("test_declares_maximum_share_sizes") @@ -634,8 +632,8 @@ class Server(AsyncTestCase): ss = self.create("test_large_share") already,writers = self.allocate(ss, b"allocate", [0], 2**32+2) - self.failUnlessEqual(already, set()) - self.failUnlessEqual(set(writers.keys()), set([0])) + self.assertThat(set(), Equals(already)) + self.assertThat(set([0]), Equals(set(writers.keys()))) shnum, bucket = list(writers.items())[0] # This test is going to hammer your filesystem if it doesn't make a sparse file for this. :-( @@ -644,7 +642,7 @@ class Server(AsyncTestCase): readers = ss.get_buckets(b"allocate") reader = readers[shnum] - self.failUnlessEqual(reader.read(2**32, 2), b"ab") + self.assertThat(b"ab", Equals(reader.read(2**32, 2))) def test_dont_overfill_dirs(self): """ @@ -670,7 +668,7 @@ class Server(AsyncTestCase): storedir = os.path.join(self.workdir("test_dont_overfill_dirs"), "shares") new_children_of_storedir = set(os.listdir(storedir)) - self.failUnlessEqual(children_of_storedir, new_children_of_storedir) + self.assertThat(new_children_of_storedir, Equals(children_of_storedir)) def test_remove_incoming(self): ss = self.create("test_remove_incoming") @@ -682,9 +680,9 @@ class Server(AsyncTestCase): incoming_bucket_dir = os.path.dirname(incoming_share_dir) incoming_prefix_dir = os.path.dirname(incoming_bucket_dir) incoming_dir = os.path.dirname(incoming_prefix_dir) - self.failIf(os.path.exists(incoming_bucket_dir), incoming_bucket_dir) - self.failIf(os.path.exists(incoming_prefix_dir), incoming_prefix_dir) - self.failUnless(os.path.exists(incoming_dir), incoming_dir) + self.assertFalse(os.path.exists(incoming_bucket_dir), incoming_bucket_dir) + self.assertFalse(os.path.exists(incoming_prefix_dir), incoming_prefix_dir) + self.assertTrue(os.path.exists(incoming_dir), incoming_dir) def test_abort(self): # remote_abort, when called on a writer, should make sure that @@ -692,12 +690,12 @@ class Server(AsyncTestCase): # server when accounting for space. ss = self.create("test_abort") already, writers = self.allocate(ss, b"allocate", [0, 1, 2], 150) - self.failIfEqual(ss.allocated_size(), 0) + self.assertThat(ss.allocated_size(), NotEquals(0)) # Now abort the writers. for writer in writers.values(): writer.abort() - self.failUnlessEqual(ss.allocated_size(), 0) + self.assertThat(ss.allocated_size(), Equals(0)) def test_immutable_length(self): """ @@ -709,20 +707,20 @@ class Server(AsyncTestCase): bucket = writers[22] bucket.write(0, b"X" * 75) bucket.close() - self.assertEqual(ss.get_immutable_share_length(b"allocate", 22), 75) - self.assertEqual(ss.get_buckets(b"allocate")[22].get_length(), 75) + self.assertThat(ss.get_immutable_share_length(b"allocate", 22), Equals(75)) + self.assertThat(ss.get_buckets(b"allocate")[22].get_length(), Equals(75)) def test_allocate(self): ss = self.create("test_allocate") - self.failUnlessEqual(ss.get_buckets(b"allocate"), {}) + self.assertThat(ss.get_buckets(b"allocate"), Equals({})) already,writers = self.allocate(ss, b"allocate", [0,1,2], 75) - self.failUnlessEqual(already, set()) - self.failUnlessEqual(set(writers.keys()), set([0,1,2])) + self.assertThat(already, Equals(set())) + self.assertThat(set(writers.keys()), Equals(set([0,1,2]))) # while the buckets are open, they should not count as readable - self.failUnlessEqual(ss.get_buckets(b"allocate"), {}) + self.assertThat(ss.get_buckets(b"allocate"), Equals({})) # close the buckets for i,wb in writers.items(): @@ -733,8 +731,8 @@ class Server(AsyncTestCase): # now they should be readable b = ss.get_buckets(b"allocate") - self.failUnlessEqual(set(b.keys()), set([0,1,2])) - self.failUnlessEqual(b[0].read(0, 25), b"%25d" % 0) + self.assertThat(set(b.keys()), Equals(set([0,1,2]))) + self.assertThat(b[0].read(0, 25), Equals(b"%25d" % 0)) b_str = str(b[0]) self.assertThat(b_str, Contains("BucketReader")) self.assertThat(b_str, Contains("mfwgy33dmf2g 0")) @@ -743,22 +741,22 @@ class Server(AsyncTestCase): # three buckets as already present. It should offer them even if we # don't ask about those specific ones. already,writers = self.allocate(ss, b"allocate", [2,3,4], 75) - self.failUnlessEqual(already, set([0,1,2])) - self.failUnlessEqual(set(writers.keys()), set([3,4])) + self.assertThat(already, Equals(set([0,1,2]))) + self.assertThat(set(writers.keys()), Equals(set([3,4]))) # while those two buckets are open for writing, the server should # refuse to offer them to uploaders already2,writers2 = self.allocate(ss, b"allocate", [2,3,4,5], 75) - self.failUnlessEqual(already2, set([0,1,2])) - self.failUnlessEqual(set(writers2.keys()), set([5])) + self.assertThat(already2, Equals(set([0,1,2]))) + self.assertThat(set(writers2.keys()), Equals(set([5]))) # aborting the writes should remove the tempfiles for i,wb in writers2.items(): wb.abort() already2,writers2 = self.allocate(ss, b"allocate", [2,3,4,5], 75) - self.failUnlessEqual(already2, set([0,1,2])) - self.failUnlessEqual(set(writers2.keys()), set([5])) + self.assertThat(already2, Equals(set([0,1,2]))) + self.assertThat(set(writers2.keys()), Equals(set([5]))) for i,wb in writers2.items(): wb.abort() @@ -814,13 +812,13 @@ class Server(AsyncTestCase): # The first share's lease expiration time is unchanged. shares = dict(ss.get_shares(storage_index)) - self.assertEqual( + self.assertThat( [first_lease], - list( + Equals(list( lease.get_grant_renew_time_time() for lease in ShareFile(shares[0]).get_leases() - ), + )), ) def test_bad_container_version(self): @@ -839,9 +837,9 @@ class Server(AsyncTestCase): e = self.failUnlessRaises(UnknownImmutableContainerVersionError, ss.get_buckets, b"si1") - self.assertEqual(e.filename, fn) - self.assertEqual(e.version, 0) - self.assertIn("had unexpected version 0", str(e)) + self.assertThat(e.filename, Equals(fn)) + self.assertThat(e.version, Equals(0)) + self.assertThat(str(e), Contains("had unexpected version 0")) def test_disconnect(self): # simulate a disconnection @@ -857,8 +855,8 @@ class Server(AsyncTestCase): allocated_size=75, canary=canary, ) - self.failUnlessEqual(already, set()) - self.failUnlessEqual(set(writers.keys()), set([0,1,2])) + self.assertThat(already, Equals(set())) + self.assertThat(set(writers.keys()), Equals(set([0,1,2]))) for (f,args,kwargs) in list(canary.disconnectors.values()): f(*args, **kwargs) del already @@ -866,8 +864,8 @@ class Server(AsyncTestCase): # that ought to delete the incoming shares already,writers = self.allocate(ss, b"disconnect", [0,1,2], 75) - self.failUnlessEqual(already, set()) - self.failUnlessEqual(set(writers.keys()), set([0,1,2])) + self.assertThat(already, Equals(set())) + self.assertThat(set(writers.keys()), Equals(set([0,1,2]))) def test_reserved_space_immutable_lease(self): """ @@ -965,22 +963,22 @@ class Server(AsyncTestCase): allocated_size=1000, canary=canary, ) - self.failUnlessEqual(len(writers), 3) + self.assertThat(writers, HasLength(3)) # now the StorageServer should have 3000 bytes provisionally # allocated, allowing only 2000 more to be claimed - self.failUnlessEqual(len(ss._server._bucket_writers), 3) + self.assertThat(ss._server._bucket_writers, HasLength(3)) # allocating 1001-byte shares only leaves room for one canary2 = FakeCanary() already2, writers2 = self.allocate(ss, b"vid2", [0,1,2], 1001, canary2) - self.failUnlessEqual(len(writers2), 1) - self.failUnlessEqual(len(ss._server._bucket_writers), 4) + self.assertThat(writers2, HasLength(1)) + self.assertThat(ss._server._bucket_writers, HasLength(4)) # we abandon the first set, so their provisional allocation should be # returned canary.disconnected() - self.failUnlessEqual(len(ss._server._bucket_writers), 1) + self.assertThat(ss._server._bucket_writers, HasLength(1)) # now we have a provisional allocation of 1001 bytes # and we close the second set, so their provisional allocation should @@ -989,7 +987,7 @@ class Server(AsyncTestCase): for bw in writers2.values(): bw.write(0, b"a"*25) bw.close() - self.failUnlessEqual(len(ss._server._bucket_writers), 0) + self.assertThat(ss._server._bucket_writers, HasLength(0)) # this also changes the amount reported as available by call_get_disk_stats allocated = 1001 + OVERHEAD + LEASE_SIZE @@ -1005,12 +1003,12 @@ class Server(AsyncTestCase): allocated_size=100, canary=canary3, ) - self.failUnlessEqual(len(writers3), 39) - self.failUnlessEqual(len(ss._server._bucket_writers), 39) + self.assertThat(writers3, HasLength(39)) + self.assertThat(ss._server._bucket_writers, HasLength(39)) canary3.disconnected() - self.failUnlessEqual(len(ss._server._bucket_writers), 0) + self.assertThat(ss._server._bucket_writers, HasLength(0)) ss._server.disownServiceParent() del ss @@ -1029,9 +1027,9 @@ class Server(AsyncTestCase): f.write(b"100") f.close() filelen = os.stat(filename)[stat.ST_SIZE] - self.failUnlessEqual(filelen, 100+3) + self.assertThat(filelen, Equals(100+3)) f2 = open(filename, "rb") - self.failUnlessEqual(f2.read(5), b"start") + self.assertThat(f2.read(5), Equals(b"start")) def create_bucket_5_shares( self, ss, storage_index, expected_already=0, expected_writers=5 @@ -1048,8 +1046,8 @@ class Server(AsyncTestCase): hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))) already, writers = ss.allocate_buckets(storage_index, rs, cs, sharenums, size) - self.failUnlessEqual(len(already), expected_already) - self.failUnlessEqual(len(writers), expected_writers) + self.assertThat(already, HasLength(expected_already)) + self.assertThat(writers, HasLength(expected_writers)) for wb in writers.values(): wb.close() return rs, cs @@ -1085,11 +1083,11 @@ class Server(AsyncTestCase): self.assertTrue(lease3.is_renew_secret(rs2a)) # add-lease on a missing storage index is silently ignored - self.assertIsNone(ss.add_lease(b"si18", b"", b"")) + self.assertThat(ss.add_lease(b"si18", b"", b""), Equals(None)) # check that si0 is readable readers = ss.get_buckets(b"si0") - self.failUnlessEqual(len(readers), 5) + self.assertThat(readers, HasLength(5)) # renew the first lease. Only the proper renew_secret should work ss.renew_lease(b"si0", rs0) @@ -1098,11 +1096,11 @@ class Server(AsyncTestCase): # check that si0 is still readable readers = ss.get_buckets(b"si0") - self.failUnlessEqual(len(readers), 5) + self.assertThat(readers, HasLength(5)) # There is no such method as remote_cancel_lease for now -- see # ticket #1528. - self.failIf(hasattr(FoolscapStorageServer(ss), 'remote_cancel_lease'), \ + self.assertFalse(hasattr(FoolscapStorageServer(ss), 'remote_cancel_lease'), \ "ss should not have a 'remote_cancel_lease' method/attribute") # test overlapping uploads @@ -1112,25 +1110,25 @@ class Server(AsyncTestCase): hashutil.my_cancel_secret_hash(b"%d" % next(self._lease_secret))) already,writers = ss.allocate_buckets(b"si3", rs3, cs3, sharenums, size) - self.failUnlessEqual(len(already), 0) - self.failUnlessEqual(len(writers), 5) + self.assertThat(already, HasLength(0)) + self.assertThat(writers, HasLength(5)) already2,writers2 = ss.allocate_buckets(b"si3", rs4, cs4, sharenums, size) - self.failUnlessEqual(len(already2), 0) - self.failUnlessEqual(len(writers2), 0) + self.assertThat(already2, HasLength(0)) + self.assertThat(writers2, HasLength(0)) for wb in writers.values(): wb.close() leases = list(ss.get_leases(b"si3")) - self.failUnlessEqual(len(leases), 1) + self.assertThat(leases, HasLength(1)) already3,writers3 = ss.allocate_buckets(b"si3", rs4, cs4, sharenums, size) - self.failUnlessEqual(len(already3), 5) - self.failUnlessEqual(len(writers3), 0) + self.assertThat(already3, HasLength(5)) + self.assertThat(writers3, HasLength(0)) leases = list(ss.get_leases(b"si3")) - self.failUnlessEqual(len(leases), 2) + self.assertThat(leases, HasLength(2)) def test_immutable_add_lease_renews(self): """ @@ -1144,7 +1142,7 @@ class Server(AsyncTestCase): # Start out with single lease created with bucket: renewal_secret, cancel_secret = self.create_bucket_5_shares(ss, b"si0") [lease] = ss.get_leases(b"si0") - self.assertEqual(lease.get_expiration_time(), 123 + DEFAULT_RENEWAL_TIME) + self.assertThat(lease.get_expiration_time(), Equals(123 + DEFAULT_RENEWAL_TIME)) # Time passes: clock.advance(123456) @@ -1152,7 +1150,7 @@ class Server(AsyncTestCase): # Adding a lease with matching renewal secret just renews it: ss.add_lease(b"si0", renewal_secret, cancel_secret) [lease] = ss.get_leases(b"si0") - self.assertEqual(lease.get_expiration_time(), 123 + 123456 + DEFAULT_RENEWAL_TIME) + self.assertThat(lease.get_expiration_time(), Equals(123 + 123456 + DEFAULT_RENEWAL_TIME)) def test_have_shares(self): """By default the StorageServer has no shares.""" @@ -1166,15 +1164,15 @@ class Server(AsyncTestCase): ss.setServiceParent(self.sparent) already,writers = self.allocate(ss, b"vid", [0,1,2], 75) - self.failUnlessEqual(already, set()) - self.failUnlessEqual(writers, {}) + self.assertThat(already, Equals(set())) + self.assertThat(writers, Equals({})) stats = ss.get_stats() - self.failUnlessEqual(stats["storage_server.accepting_immutable_shares"], 0) + self.assertThat(stats["storage_server.accepting_immutable_shares"], Equals(0)) if "storage_server.disk_avail" in stats: # Some platforms may not have an API to get disk stats. # But if there are stats, readonly_storage means disk_avail=0 - self.failUnlessEqual(stats["storage_server.disk_avail"], 0) + self.assertThat(stats["storage_server.disk_avail"], Equals(0)) def test_discard(self): # discard is really only used for other tests, but we test it anyways @@ -1183,8 +1181,8 @@ class Server(AsyncTestCase): ss.setServiceParent(self.sparent) already,writers = self.allocate(ss, b"vid", [0,1,2], 75) - self.failUnlessEqual(already, set()) - self.failUnlessEqual(set(writers.keys()), set([0,1,2])) + self.assertThat(already, Equals(set())) + self.assertThat(set(writers.keys()), Equals(set([0,1,2]))) for i,wb in writers.items(): wb.write(0, b"%25d" % i) wb.close() @@ -1192,8 +1190,8 @@ class Server(AsyncTestCase): # Since we write with some seeks, the data we read back will be all # zeros. b = ss.get_buckets(b"vid") - self.failUnlessEqual(set(b.keys()), set([0,1,2])) - self.failUnlessEqual(b[0].read(0, 25), b"\x00" * 25) + self.assertThat(set(b.keys()), Equals(set([0,1,2]))) + self.assertThat(b[0].read(0, 25), Equals(b"\x00" * 25)) def test_reserved_space_advise_corruption(self): """ @@ -1211,9 +1209,9 @@ class Server(AsyncTestCase): ss.advise_corrupt_share(b"immutable", b"si0", 0, b"This share smells funny.\n") - self.assertEqual( + self.assertThat( [], - os.listdir(ss.corruption_advisory_dir), + Equals(os.listdir(ss.corruption_advisory_dir)), ) def test_advise_corruption(self): @@ -1242,16 +1240,16 @@ class Server(AsyncTestCase): si1_s = base32.b2a(b"si1") already,writers = self.allocate(ss, b"si1", [1], 75) self.assertThat(already, Equals(set())) - self.failUnlessEqual(set(writers.keys()), set([1])) + self.assertThat(set(writers.keys()), Equals(set([1]))) writers[1].write(0, b"data") writers[1].close() b = ss.get_buckets(b"si1") - self.failUnlessEqual(set(b.keys()), set([1])) + self.assertThat(set(b.keys()), Equals(set([1]))) b[1].advise_corrupt_share(b"This share tastes like dust.\n") reports = os.listdir(reportdir) - self.failUnlessEqual(len(reports), 2) + self.assertThat(reports, HasLength(2)) report_si1 = [r for r in reports if bytes_to_native_str(si1_s) in r][0] f = open(os.path.join(reportdir, report_si1), "rb") report = f.read() @@ -1277,9 +1275,9 @@ class Server(AsyncTestCase): ss.advise_corrupt_share(b"immutable", b"si0", 1, b"This share smells funny.\n") - self.assertEqual( + self.assertThat( [], - os.listdir(ss.corruption_advisory_dir), + Equals(os.listdir(ss.corruption_advisory_dir)), ) From 1d85a2c5cf40a4e52bbb024e2edba4fcd883caa1 Mon Sep 17 00:00:00 2001 From: "Fon E. Noel NFEBE" Date: Sun, 20 Nov 2022 14:02:49 +0100 Subject: [PATCH 03/54] Refactor more test_storage.py classes As a follow up to commit fbc8baa238f72720cfa840a9c227c670a5e2fa6e this refactor continues to remove deprecated methods and ensures test classes either extend `SyncTestCase` or `AsyncTestCase` Classes refactored: - `MutableServer` - `MDMFProxies` - `Stats` - `MutableShareFileTests` Signed-off-by: Fon E. Noel NFEBE --- src/allmydata/test/test_storage.py | 654 ++++++++++++++--------------- 1 file changed, 327 insertions(+), 327 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 920f2d935..3d35ec55f 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -470,7 +470,7 @@ class BucketProxy(SyncTestCase): num_segments=5, num_share_hashes=3, uri_extension_size=500) - self.failUnless(interfaces.IStorageBucketWriter.providedBy(bp), bp) + self.assertTrue(interfaces.IStorageBucketWriter.providedBy(bp), bp) def _do_test_readwrite(self, name, header_size, wbp_class, rbp_class): # Let's pretend each share has 100 bytes of data, and that there are @@ -520,7 +520,7 @@ class BucketProxy(SyncTestCase): server = NoNetworkServer(b"abc", None) rbp = rbp_class(rb, server, storage_index=b"") self.assertThat(repr(rbp), Contains("to peer")) - self.failUnless(interfaces.IStorageBucketReader.providedBy(rbp), rbp) + self.assertTrue(interfaces.IStorageBucketReader.providedBy(rbp), rbp) d1 = rbp.get_block_data(0, 25, 25) d1.addCallback(lambda res: self.failUnlessEqual(res, b"a"*25)) @@ -1281,7 +1281,7 @@ class Server(AsyncTestCase): ) -class MutableServer(unittest.TestCase): +class MutableServer(SyncTestCase): def setUp(self): self.sparent = LoggingServiceParent() @@ -1311,13 +1311,13 @@ class MutableServer(unittest.TestCase): def renew_secret(self, tag): if isinstance(tag, int): tag = b"%d" % (tag,) - assert isinstance(tag, bytes) + self.assertThat(tag, IsInstance(bytes)) return hashutil.tagged_hash(b"renew_blah", tag) def cancel_secret(self, tag): if isinstance(tag, int): tag = b"%d" % (tag,) - assert isinstance(tag, bytes) + self.assertThat(tag, IsInstance(bytes)) return hashutil.tagged_hash(b"cancel_blah", tag) def allocate(self, ss, storage_index, we_tag, lease_tag, sharenums, size): @@ -1333,9 +1333,9 @@ class MutableServer(unittest.TestCase): testandwritev, readv) (did_write, readv_data) = rc - self.failUnless(did_write) - self.failUnless(isinstance(readv_data, dict)) - self.failUnlessEqual(len(readv_data), 0) + self.assertTrue(did_write) + self.assertThat(readv_data, IsInstance(dict)) + self.assertThat(readv_data, HasLength(0)) def test_enumerate_mutable_shares(self): """ @@ -1357,9 +1357,9 @@ class MutableServer(unittest.TestCase): self.cancel_secret(b"le1")) ss.slot_testv_and_readv_and_writev(b"si1", secrets, {2: ([], [], 0)}, []) shares0_1_4 = ss.enumerate_mutable_shares(b"si1") - self.assertEqual( + self.assertThat( (empty, shares0_1_2_4, shares0_1_4), - (set(), {0, 1, 2, 4}, {0, 1, 4}) + Equals((set(), {0, 1, 2, 4}, {0, 1, 4})) ) def test_mutable_share_length(self): @@ -1373,7 +1373,7 @@ class MutableServer(unittest.TestCase): {16: ([], [(0, b"x" * 23)], None)}, [] ) - self.assertEqual(ss.get_mutable_share_length(b"si1", 16), 23) + self.assertThat(ss.get_mutable_share_length(b"si1", 16), Equals(23)) def test_mutable_share_length_unknown(self): """ @@ -1406,10 +1406,10 @@ class MutableServer(unittest.TestCase): read = ss.slot_readv e = self.failUnlessRaises(UnknownMutableContainerVersionError, read, b"si1", [0], [(0,10)]) - self.assertEqual(e.filename, fn) + self.assertThat(e.filename, Equals(fn)) self.assertTrue(e.version.startswith(b"BAD MAGIC")) - self.assertIn("had unexpected version", str(e)) - self.assertIn("BAD MAGIC", str(e)) + self.assertThat(str(e), Contains("had unexpected version")) + self.assertThat(str(e), Contains("BAD MAGIC")) def test_container_size(self): ss = self.create("test_container_size") @@ -1424,7 +1424,7 @@ class MutableServer(unittest.TestCase): answer = rstaraw(b"si1", secrets, {0: ([], [(0,data)], len(data)+12)}, []) - self.failUnlessEqual(answer, (True, {0:[],1:[],2:[]}) ) + self.assertThat(answer, Equals((True, {0:[],1:[],2:[]}))) # Trying to make the container too large (by sending a write vector # whose offset is too high) will raise an exception. @@ -1437,10 +1437,10 @@ class MutableServer(unittest.TestCase): answer = rstaraw(b"si1", secrets, {0: ([], [(0,data)], None)}, []) - self.failUnlessEqual(answer, (True, {0:[],1:[],2:[]}) ) + self.assertThat(answer, Equals((True, {0:[],1:[],2:[]}))) read_answer = read(b"si1", [0], [(0,10)]) - self.failUnlessEqual(read_answer, {0: [data[:10]]}) + self.assertThat(read_answer, Equals({0: [data[:10]]})) # Sending a new_length shorter than the current length truncates the # data. @@ -1448,7 +1448,7 @@ class MutableServer(unittest.TestCase): {0: ([], [], 9)}, []) read_answer = read(b"si1", [0], [(0,10)]) - self.failUnlessEqual(read_answer, {0: [data[:9]]}) + self.assertThat(read_answer, Equals({0: [data[:9]]})) # Sending a new_length longer than the current length doesn't change # the data. @@ -1457,7 +1457,7 @@ class MutableServer(unittest.TestCase): []) assert answer == (True, {0:[],1:[],2:[]}) read_answer = read(b"si1", [0], [(0, 20)]) - self.failUnlessEqual(read_answer, {0: [data[:9]]}) + self.assertThat(read_answer, Equals({0: [data[:9]]})) # Sending a write vector whose start is after the end of the current # data doesn't reveal "whatever was there last time" (palimpsest), @@ -1479,7 +1479,7 @@ class MutableServer(unittest.TestCase): answer = rstaraw(b"si1", secrets, {0: ([], [], None)}, [(20, 1980)]) - self.failUnlessEqual(answer, (True, {0:[b''],1:[b''],2:[b'']})) + self.assertThat(answer, Equals((True, {0:[b''],1:[b''],2:[b'']}))) # Then the extend the file by writing a vector which starts out past # the end... @@ -1492,22 +1492,22 @@ class MutableServer(unittest.TestCase): answer = rstaraw(b"si1", secrets, {0: ([], [], None)}, [(20, 30)]) - self.failUnlessEqual(answer, (True, {0:[b'\x00'*30],1:[b''],2:[b'']})) + self.assertThat(answer, Equals((True, {0:[b'\x00'*30],1:[b''],2:[b'']}))) # Also see if the server explicitly declares that it supports this # feature. ver = ss.get_version() storage_v1_ver = ver[b"http://allmydata.org/tahoe/protocols/storage/v1"] - self.failUnless(storage_v1_ver.get(b"fills-holes-with-zero-bytes")) + self.assertTrue(storage_v1_ver.get(b"fills-holes-with-zero-bytes")) # If the size is dropped to zero the share is deleted. answer = rstaraw(b"si1", secrets, {0: ([], [(0,data)], 0)}, []) - self.failUnlessEqual(answer, (True, {0:[],1:[],2:[]}) ) + self.assertThat(answer, Equals((True, {0:[],1:[],2:[]}))) read_answer = read(b"si1", [0], [(0,10)]) - self.failUnlessEqual(read_answer, {}) + self.assertThat(read_answer, Equals({})) def test_allocate(self): ss = self.create("test_allocate") @@ -1515,12 +1515,12 @@ class MutableServer(unittest.TestCase): set([0,1,2]), 100) read = ss.slot_readv - self.failUnlessEqual(read(b"si1", [0], [(0, 10)]), - {0: [b""]}) - self.failUnlessEqual(read(b"si1", [], [(0, 10)]), - {0: [b""], 1: [b""], 2: [b""]}) - self.failUnlessEqual(read(b"si1", [0], [(100, 10)]), - {0: [b""]}) + self.assertThat(read(b"si1", [0], [(0, 10)]), + Equals({0: [b""]})) + self.assertThat(read(b"si1", [], [(0, 10)]), + Equals({0: [b""], 1: [b""], 2: [b""]})) + self.assertThat(read(b"si1", [0], [(100, 10)]), + Equals({0: [b""]})) # try writing to one secrets = ( self.write_enabler(b"we1"), @@ -1531,19 +1531,19 @@ class MutableServer(unittest.TestCase): answer = write(b"si1", secrets, {0: ([], [(0,data)], None)}, []) - self.failUnlessEqual(answer, (True, {0:[],1:[],2:[]}) ) + self.assertThat(answer, Equals((True, {0:[],1:[],2:[]}))) - self.failUnlessEqual(read(b"si1", [0], [(0,20)]), - {0: [b"00000000001111111111"]}) - self.failUnlessEqual(read(b"si1", [0], [(95,10)]), - {0: [b"99999"]}) + self.assertThat(read(b"si1", [0], [(0,20)]), + Equals({0: [b"00000000001111111111"]})) + self.assertThat(read(b"si1", [0], [(95,10)]), + Equals({0: [b"99999"]})) #self.failUnlessEqual(s0.get_length(), 100) bad_secrets = (b"bad write enabler", secrets[1], secrets[2]) f = self.failUnlessRaises(BadWriteEnablerError, write, b"si1", bad_secrets, {}, []) - self.failUnlessIn("The write enabler was recorded by nodeid 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'.", str(f)) + self.assertThat(str(f), Contains("The write enabler was recorded by nodeid 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'.")) # this testv should fail answer = write(b"si1", secrets, @@ -1555,12 +1555,12 @@ class MutableServer(unittest.TestCase): }, [(0,12), (20,5)], ) - self.failUnlessEqual(answer, (False, - {0: [b"000000000011", b"22222"], + self.assertThat(answer, (False, + Equals({0: [b"000000000011", b"22222"], 1: [b"", b""], 2: [b"", b""], - })) - self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]}) + }))) + self.assertThat(read(b"si1", [0], [(0,100)]), Equals({0: [data]})) def test_operators(self): # test operators, the data we're comparing is '11111' in all cases. @@ -1587,8 +1587,8 @@ class MutableServer(unittest.TestCase): [(0, b"x"*100)], None, )}, [(10,5)]) - self.failUnlessEqual(answer, (False, {0: [b"11111"]})) - self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]}) + self.assertThat(answer, Equals((False, {0: [b"11111"]}))) + self.assertThat(read(b"si1", [0], [(0,100)]), Equals({0: [data]})) reset() answer = write(b"si1", secrets, {0: ([(10, 5, b"eq", b"11111"), @@ -1596,8 +1596,8 @@ class MutableServer(unittest.TestCase): [(0, b"y"*100)], None, )}, [(10,5)]) - self.failUnlessEqual(answer, (True, {0: [b"11111"]})) - self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [b"y"*100]}) + self.assertThat(answer, Equals((True, {0: [b"11111"]}))) + self.assertThat(read(b"si1", [0], [(0,100)]), Equals({0: [b"y"*100]})) reset() # finally, test some operators against empty shares @@ -1606,8 +1606,8 @@ class MutableServer(unittest.TestCase): [(0, b"x"*100)], None, )}, [(10,5)]) - self.failUnlessEqual(answer, (False, {0: [b"11111"]})) - self.failUnlessEqual(read(b"si1", [0], [(0,100)]), {0: [data]}) + self.assertThat(answer, Equals((False, {0: [b"11111"]}))) + self.assertThat(read(b"si1", [0], [(0,100)]), Equals({0: [data]})) reset() def test_readv(self): @@ -1624,12 +1624,12 @@ class MutableServer(unittest.TestCase): 1: ([], [(0,data[1])], None), 2: ([], [(0,data[2])], None), }, []) - self.failUnlessEqual(rc, (True, {})) + self.assertThat(rc, Equals((True, {}))) answer = read(b"si1", [], [(0, 10)]) - self.failUnlessEqual(answer, {0: [b"0"*10], + self.assertThat(answer, Equals({0: [b"0"*10], 1: [b"1"*10], - 2: [b"2"*10]}) + 2: [b"2"*10]})) def compare_leases_without_timestamps(self, leases_a, leases_b): """ @@ -1646,11 +1646,11 @@ class MutableServer(unittest.TestCase): # non-equal inputs (expiration timestamp aside). It seems # reasonably safe to use `renew` to make _one_ of the timestamps # equal to the other though. - self.assertEqual( + self.assertThat( a.renew(b.get_expiration_time()), - b, + Equals(b), ) - self.assertEqual(len(leases_a), len(leases_b)) + self.assertThat(len(leases_a), Equals(len(leases_b))) def test_leases(self): ss = self.create("test_leases") @@ -1662,7 +1662,7 @@ class MutableServer(unittest.TestCase): write = ss.slot_testv_and_readv_and_writev read = ss.slot_readv rc = write(b"si1", secrets(0), {0: ([], [(0,data)], None)}, []) - self.failUnlessEqual(rc, (True, {})) + self.assertThat(rc, Equals((True, {}))) # create a random non-numeric file in the bucket directory, to # exercise the code that's supposed to ignore those. @@ -1673,32 +1673,32 @@ class MutableServer(unittest.TestCase): f.close() s0 = MutableShareFile(os.path.join(bucket_dir, "0")) - self.failUnlessEqual(len(list(s0.get_leases())), 1) + self.assertThat(list(s0.get_leases()), HasLength(1)) # add-lease on a missing storage index is silently ignored - self.failUnlessEqual(ss.add_lease(b"si18", b"", b""), None) + self.assertThat(ss.add_lease(b"si18", b"", b""), Equals(None)) # re-allocate the slots and use the same secrets, that should update # the lease write(b"si1", secrets(0), {0: ([], [(0,data)], None)}, []) - self.failUnlessEqual(len(list(s0.get_leases())), 1) + self.assertThat(list(s0.get_leases()), HasLength(1)) # renew it directly ss.renew_lease(b"si1", secrets(0)[1]) - self.failUnlessEqual(len(list(s0.get_leases())), 1) + self.assertThat(list(s0.get_leases()), HasLength(1)) # now allocate them with a bunch of different secrets, to trigger the # extended lease code. Use add_lease for one of them. write(b"si1", secrets(1), {0: ([], [(0,data)], None)}, []) - self.failUnlessEqual(len(list(s0.get_leases())), 2) + self.assertThat(list(s0.get_leases()), HasLength(2)) secrets2 = secrets(2) ss.add_lease(b"si1", secrets2[1], secrets2[2]) - self.failUnlessEqual(len(list(s0.get_leases())), 3) + self.assertThat(list(s0.get_leases()), HasLength(3)) write(b"si1", secrets(3), {0: ([], [(0,data)], None)}, []) write(b"si1", secrets(4), {0: ([], [(0,data)], None)}, []) write(b"si1", secrets(5), {0: ([], [(0,data)], None)}, []) - self.failUnlessEqual(len(list(s0.get_leases())), 6) + self.assertThat(list(s0.get_leases()), HasLength(6)) all_leases = list(s0.get_leases()) # and write enough data to expand the container, forcing the server @@ -1728,15 +1728,15 @@ class MutableServer(unittest.TestCase): ss.renew_lease, b"si1", secrets(20)[1]) e_s = str(e) - self.failUnlessIn("Unable to renew non-existent lease", e_s) - self.failUnlessIn("I have leases accepted by nodeids:", e_s) - self.failUnlessIn("nodeids: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' .", e_s) + self.assertThat(e_s, Contains("Unable to renew non-existent lease")) + self.assertThat(e_s, Contains("I have leases accepted by nodeids:")) + self.assertThat(e_s, Contains("nodeids: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' .")) - self.assertEqual(all_leases, list(s0.get_leases())) + self.assertThat(all_leases, Equals(list(s0.get_leases()))) # reading shares should not modify the timestamp read(b"si1", [], [(0,200)]) - self.assertEqual(all_leases, list(s0.get_leases())) + self.assertThat(all_leases, Equals(list(s0.get_leases()))) write(b"si1", secrets(0), {0: ([], [(200, b"make me bigger")], None)}, []) @@ -1764,13 +1764,13 @@ class MutableServer(unittest.TestCase): write_enabler, renew_secret, cancel_secret = secrets(0) rc = write(b"si1", (write_enabler, renew_secret, cancel_secret), {0: ([], [(0,data)], None)}, []) - self.failUnlessEqual(rc, (True, {})) + self.assertThat(rc, Equals((True, {}))) bucket_dir = os.path.join(self.workdir("test_mutable_add_lease_renews"), "shares", storage_index_to_dir(b"si1")) s0 = MutableShareFile(os.path.join(bucket_dir, "0")) [lease] = s0.get_leases() - self.assertEqual(lease.get_expiration_time(), 235 + DEFAULT_RENEWAL_TIME) + self.assertThat(lease.get_expiration_time(), Equals(235 + DEFAULT_RENEWAL_TIME)) # Time passes... clock.advance(835) @@ -1778,8 +1778,8 @@ class MutableServer(unittest.TestCase): # Adding a lease renews it: ss.add_lease(b"si1", renew_secret, cancel_secret) [lease] = s0.get_leases() - self.assertEqual(lease.get_expiration_time(), - 235 + 835 + DEFAULT_RENEWAL_TIME) + self.assertThat(lease.get_expiration_time(), + Equals(235 + 835 + DEFAULT_RENEWAL_TIME)) def test_remove(self): ss = self.create("test_remove") @@ -1796,26 +1796,26 @@ class MutableServer(unittest.TestCase): []) # the answer should mention all the shares that existed before the # write - self.failUnlessEqual(answer, (True, {0:[],1:[],2:[]}) ) + self.assertThat(answer, Equals((True, {0:[],1:[],2:[]}))) # but a new read should show only sh1 and sh2 - self.failUnlessEqual(readv(b"si1", [], [(0,10)]), - {1: [b""], 2: [b""]}) + self.assertThat(readv(b"si1", [], [(0,10)]), + Equals({1: [b""], 2: [b""]})) # delete sh1 by setting its size to zero answer = writev(b"si1", secrets, {1: ([], [], 0)}, []) - self.failUnlessEqual(answer, (True, {1:[],2:[]}) ) - self.failUnlessEqual(readv(b"si1", [], [(0,10)]), - {2: [b""]}) + self.assertThat(answer, Equals((True, {1:[],2:[]}))) + self.assertThat(readv(b"si1", [], [(0,10)]), + Equals({2: [b""]})) # delete sh2 by setting its size to zero answer = writev(b"si1", secrets, {2: ([], [], 0)}, []) - self.failUnlessEqual(answer, (True, {2:[]}) ) - self.failUnlessEqual(readv(b"si1", [], [(0,10)]), - {}) + self.assertThat(answer, Equals((True, {2:[]}))) + self.assertThat(readv(b"si1", [], [(0,10)]), + Equals({})) # and the bucket directory should now be gone si = base32.b2a(b"si1") # note: this is a detail of the storage server implementation, and @@ -1824,8 +1824,8 @@ class MutableServer(unittest.TestCase): prefix = si[:2] prefixdir = os.path.join(self.workdir("test_remove"), "shares", prefix) bucketdir = os.path.join(prefixdir, si) - self.failUnless(os.path.exists(prefixdir), prefixdir) - self.failIf(os.path.exists(bucketdir), bucketdir) + self.assertThat(prefixdir, Contains(os.path.exists(prefixdir))) + self.assertFalse(os.path.exists(bucketdir), bucketdir) def test_writev_without_renew_lease(self): """ @@ -1854,7 +1854,7 @@ class MutableServer(unittest.TestCase): renew_leases=False, ) leases = list(ss.get_slot_leases(storage_index)) - self.assertEqual([], leases) + self.assertThat([], Equals(leases)) def test_get_slot_leases_empty_slot(self): """ @@ -1862,9 +1862,9 @@ class MutableServer(unittest.TestCase): shares, it returns an empty iterable. """ ss = self.create("test_get_slot_leases_empty_slot") - self.assertEqual( + self.assertThat( list(ss.get_slot_leases(b"si1")), - [], + Equals([]), ) def test_remove_non_present(self): @@ -1900,10 +1900,10 @@ class MutableServer(unittest.TestCase): ) self.assertTrue(testv_is_good) - self.assertEqual({}, read_data) + self.assertThat({}, Equals(read_data)) -class MDMFProxies(unittest.TestCase, ShouldFailMixin): +class MDMFProxies(SyncTestCase, ShouldFailMixin): def setUp(self): self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() @@ -2084,7 +2084,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): tws[0] = (testvs, [(0, data)], None) readv = [(0, 1)] results = write(storage_index, self.secrets, tws, readv) - self.failUnless(results[0]) + self.assertTrue(results[0]) def build_test_sdmf_share(self, empty=False): @@ -2150,7 +2150,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): tws[0] = (testvs, [(0, share)], None) readv = [] results = write(storage_index, self.secrets, tws, readv) - self.failUnless(results[0]) + self.assertFalse(results[0]) def test_read(self): @@ -2160,8 +2160,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d = defer.succeed(None) def _check_block_and_salt(block_and_salt): (block, salt) = block_and_salt - self.failUnlessEqual(block, self.block) - self.failUnlessEqual(salt, self.salt) + self.assertThat(block, Equals(self.block)) + self.assertThat(salt, Equals(self.salt)) for i in range(6): d.addCallback(lambda ignored, i=i: @@ -2171,57 +2171,57 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(lambda ignored: mr.get_encprivkey()) d.addCallback(lambda encprivkey: - self.failUnlessEqual(self.encprivkey, encprivkey)) + self.assertThat(self.encprivkey, Equals(encprivkey))) d.addCallback(lambda ignored: mr.get_blockhashes()) d.addCallback(lambda blockhashes: - self.failUnlessEqual(self.block_hash_tree, blockhashes)) + self.assertThat(self.block_hash_tree, Equals(blockhashes))) d.addCallback(lambda ignored: mr.get_sharehashes()) d.addCallback(lambda sharehashes: - self.failUnlessEqual(self.share_hash_chain, sharehashes)) + self.assertThat(self.share_hash_chain, Equals(sharehashes))) d.addCallback(lambda ignored: mr.get_signature()) d.addCallback(lambda signature: - self.failUnlessEqual(signature, self.signature)) + self.assertThat(signature, Equals(self.signature))) d.addCallback(lambda ignored: mr.get_verification_key()) d.addCallback(lambda verification_key: - self.failUnlessEqual(verification_key, self.verification_key)) + self.assertThat(verification_key, Equals(self.verification_key))) d.addCallback(lambda ignored: mr.get_seqnum()) d.addCallback(lambda seqnum: - self.failUnlessEqual(seqnum, 0)) + self.assertThat(seqnum, Equals(0))) d.addCallback(lambda ignored: mr.get_root_hash()) d.addCallback(lambda root_hash: - self.failUnlessEqual(self.root_hash, root_hash)) + self.assertThat(self.root_hash, Equals(root_hash))) d.addCallback(lambda ignored: mr.get_seqnum()) d.addCallback(lambda seqnum: - self.failUnlessEqual(0, seqnum)) + self.assertThat(seqnum, Equals(0))) d.addCallback(lambda ignored: mr.get_encoding_parameters()) def _check_encoding_parameters(args): (k, n, segsize, datalen) = args - self.failUnlessEqual(k, 3) - self.failUnlessEqual(n, 10) - self.failUnlessEqual(segsize, 6) - self.failUnlessEqual(datalen, 36) + self.assertThat(k, Equals(3)) + self.assertThat(n, Equals(10)) + self.assertThat(segsize, Equals(6)) + self.assertThat(datalen, Equals(36)) d.addCallback(_check_encoding_parameters) d.addCallback(lambda ignored: mr.get_checkstring()) d.addCallback(lambda checkstring: - self.failUnlessEqual(checkstring, checkstring)) + self.assertThat(checkstring, Equals(checkstring))) return d @@ -2231,8 +2231,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d = mr.get_block_and_salt(5) def _check_tail_segment(results): block, salt = results - self.failUnlessEqual(len(block), 1) - self.failUnlessEqual(block, b"a") + self.assertThat(block, HasLength(1)) + self.assertThat(block, Equals(b"a")) d.addCallback(_check_tail_segment) return d @@ -2254,10 +2254,10 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d = mr.get_encoding_parameters() def _check_encoding_parameters(args): (k, n, segment_size, datalen) = args - self.failUnlessEqual(k, 3) - self.failUnlessEqual(n, 10) - self.failUnlessEqual(segment_size, 6) - self.failUnlessEqual(datalen, 36) + self.assertThat(k, Equals(3)) + self.assertThat(n, Equals(10)) + self.assertThat(segment_size, Equals(6)) + self.assertThat(datalen, Equals(36)) d.addCallback(_check_encoding_parameters) return d @@ -2267,7 +2267,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = mr.get_seqnum() d.addCallback(lambda seqnum: - self.failUnlessEqual(seqnum, 0)) + self.assertThat(seqnum, Equals(0))) return d @@ -2276,7 +2276,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = mr.get_root_hash() d.addCallback(lambda root_hash: - self.failUnlessEqual(root_hash, self.root_hash)) + self.assertThat(root_hash, Equals(self.root_hash))) return d @@ -2285,7 +2285,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = mr.get_checkstring() d.addCallback(lambda checkstring: - self.failUnlessEqual(checkstring, self.checkstring)) + self.assertThat(checkstring, Equals(self.checkstring))) return d @@ -2307,22 +2307,22 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw.put_verification_key(self.verification_key) d = mw.finish_publishing() def _then(results): - self.failUnless(len(results), 2) + self.assertThat(results, HasLength(2)) result, readv = results - self.failUnless(result) - self.failIf(readv) + self.assertTrue(result) + self.assertFalse(readv) self.old_checkstring = mw.get_checkstring() mw.set_checkstring(b"") d.addCallback(_then) d.addCallback(lambda ignored: mw.finish_publishing()) def _then_again(results): - self.failUnlessEqual(len(results), 2) + self.assertThat(results, HasLength(2)) result, readvs = results - self.failIf(result) - self.failUnlessIn(0, readvs) + self.assertFalse(result) + self.assertThat(readvs, Contains(0)) readv = readvs[0][0] - self.failUnlessEqual(readv, self.old_checkstring) + self.assertThat(readv, Equals(self.old_checkstring)) d.addCallback(_then_again) # The checkstring remains the same for the rest of the process. return d @@ -2383,11 +2383,11 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): def _check_success(results): result, readvs = results - self.failUnless(result) + self.assertTrue(result) def _check_failure(results): result, readvs = results - self.failIf(result) + self.assertFalse(result) def _write_share(mw): for i in range(6): @@ -2431,14 +2431,14 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): # any point during the process, it should fail to write when we # tell it to write. def _check_failure(results): - self.failUnlessEqual(len(results), 2) + self.assertThat(results, Equals(2)) res, d = results - self.failIf(res) + self.assertFalse(res) def _check_success(results): - self.failUnlessEqual(len(results), 2) + self.assertThat(results, HasLength(2)) res, d = results - self.failUnless(results) + self.assertFalse(results) mw = self._make_new_mw(b"si1", 0) mw.set_checkstring(b"this is a lie") @@ -2495,100 +2495,100 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw.put_verification_key(self.verification_key) d = mw.finish_publishing() def _check_publish(results): - self.failUnlessEqual(len(results), 2) + self.assertThat(results, HasLength(2)) result, ign = results - self.failUnless(result, "publish failed") + self.assertTrue(result, "publish failed") for i in range(6): - self.failUnlessEqual(read(b"si1", [0], [(expected_sharedata_offset + (i * written_block_size), written_block_size)]), - {0: [written_block]}) + self.assertThat(read(b"si1", [0], [(expected_sharedata_offset + (i * written_block_size), written_block_size)]), + Equals({0: [written_block]})) - self.failUnlessEqual(len(self.encprivkey), 7) - self.failUnlessEqual(read(b"si1", [0], [(expected_private_key_offset, 7)]), - {0: [self.encprivkey]}) + self.assertThat(self.encprivkey, HasLength(7)) + self.assertThat(read(b"si1", [0], [(expected_private_key_offset, 7)]), + Equals({0: [self.encprivkey]})) expected_block_hash_offset = expected_sharedata_offset + \ (6 * written_block_size) - self.failUnlessEqual(len(self.block_hash_tree_s), 32 * 6) - self.failUnlessEqual(read(b"si1", [0], [(expected_block_hash_offset, 32 * 6)]), - {0: [self.block_hash_tree_s]}) + self.assertThat(self.block_hash_tree_s, HasLength(32 * 6)) + self.assertThat(read(b"si1", [0], [(expected_block_hash_offset, 32 * 6)]), + Equals({0: [self.block_hash_tree_s]})) expected_share_hash_offset = expected_private_key_offset + len(self.encprivkey) - self.failUnlessEqual(read(b"si1", [0],[(expected_share_hash_offset, (32 + 2) * 6)]), - {0: [self.share_hash_chain_s]}) + self.assertThat(read(b"si1", [0],[(expected_share_hash_offset, (32 + 2) * 6)]), + Equals({0: [self.share_hash_chain_s]})) - self.failUnlessEqual(read(b"si1", [0], [(9, 32)]), - {0: [self.root_hash]}) + self.assertThat(read(b"si1", [0], [(9, 32)]), + Equals({0: [self.root_hash]})) expected_signature_offset = expected_share_hash_offset + \ len(self.share_hash_chain_s) - self.failUnlessEqual(len(self.signature), 9) - self.failUnlessEqual(read(b"si1", [0], [(expected_signature_offset, 9)]), - {0: [self.signature]}) + self.assertThat(self.signature, HasLength(9)) + self.assertThat(read(b"si1", [0], [(expected_signature_offset, 9)]), + Equals({0: [self.signature]})) expected_verification_key_offset = expected_signature_offset + len(self.signature) - self.failUnlessEqual(len(self.verification_key), 6) - self.failUnlessEqual(read(b"si1", [0], [(expected_verification_key_offset, 6)]), - {0: [self.verification_key]}) + self.assertThat(self.verification_key, HasLength(6)) + self.assertThat(read(b"si1", [0], [(expected_verification_key_offset, 6)]), + Equals({0: [self.verification_key]})) signable = mw.get_signable() verno, seq, roothash, k, n, segsize, datalen = \ struct.unpack(">BQ32sBBQQ", signable) - self.failUnlessEqual(verno, 1) - self.failUnlessEqual(seq, 0) - self.failUnlessEqual(roothash, self.root_hash) - self.failUnlessEqual(k, 3) - self.failUnlessEqual(n, 10) - self.failUnlessEqual(segsize, 6) - self.failUnlessEqual(datalen, 36) + self.assertThat(verno, Equals(1)) + self.assertThat(seq, Equals(0)) + self.assertThat(roothash, Equals(self.root_hash)) + self.assertThat(k, Equals(3)) + self.assertThat(n, Equals(10)) + self.assertThat(segsize, Equals(6)) + self.assertThat(datalen, Equals(36)) expected_eof_offset = expected_block_hash_offset + \ len(self.block_hash_tree_s) # Check the version number to make sure that it is correct. expected_version_number = struct.pack(">B", 1) - self.failUnlessEqual(read(b"si1", [0], [(0, 1)]), - {0: [expected_version_number]}) + self.assertThat(read(b"si1", [0], [(0, 1)]), + Equals({0: [expected_version_number]})) # Check the sequence number to make sure that it is correct expected_sequence_number = struct.pack(">Q", 0) - self.failUnlessEqual(read(b"si1", [0], [(1, 8)]), - {0: [expected_sequence_number]}) + self.assertThat(read(b"si1", [0], [(1, 8)]), + Equals({0: [expected_sequence_number]})) # Check that the encoding parameters (k, N, segement size, data # length) are what they should be. These are 3, 10, 6, 36 expected_k = struct.pack(">B", 3) - self.failUnlessEqual(read(b"si1", [0], [(41, 1)]), - {0: [expected_k]}) + self.assertThat(read(b"si1", [0], [(41, 1)]), + Equals({0: [expected_k]})) expected_n = struct.pack(">B", 10) - self.failUnlessEqual(read(b"si1", [0], [(42, 1)]), - {0: [expected_n]}) + self.assertThat(read(b"si1", [0], [(42, 1)]), + Equals({0: [expected_n]})) expected_segment_size = struct.pack(">Q", 6) - self.failUnlessEqual(read(b"si1", [0], [(43, 8)]), - {0: [expected_segment_size]}) + self.assertThat(read(b"si1", [0], [(43, 8)]), + Equals({0: [expected_segment_size]})) expected_data_length = struct.pack(">Q", 36) - self.failUnlessEqual(read(b"si1", [0], [(51, 8)]), - {0: [expected_data_length]}) + self.assertThat(read(b"si1", [0], [(51, 8)]), + Equals({0: [expected_data_length]})) expected_offset = struct.pack(">Q", expected_private_key_offset) - self.failUnlessEqual(read(b"si1", [0], [(59, 8)]), - {0: [expected_offset]}) + self.assertThat(read(b"si1", [0], [(59, 8)]), + Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_share_hash_offset) - self.failUnlessEqual(read(b"si1", [0], [(67, 8)]), - {0: [expected_offset]}) + self.assertThat(read(b"si1", [0], [(67, 8)]), + Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_signature_offset) - self.failUnlessEqual(read(b"si1", [0], [(75, 8)]), - {0: [expected_offset]}) + self.assertThat(read(b"si1", [0], [(75, 8)]), + Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_verification_key_offset) - self.failUnlessEqual(read(b"si1", [0], [(83, 8)]), - {0: [expected_offset]}) + self.assertThat(read(b"si1", [0], [(83, 8)]), + Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_verification_key_offset + len(self.verification_key)) - self.failUnlessEqual(read(b"si1", [0], [(91, 8)]), - {0: [expected_offset]}) + self.assertThat(read(b"si1", [0], [(91, 8)]), + Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_sharedata_offset) - self.failUnlessEqual(read(b"si1", [0], [(99, 8)]), - {0: [expected_offset]}) + self.assertThat(read(b"si1", [0], [(99, 8)]), + Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_block_hash_offset) - self.failUnlessEqual(read(b"si1", [0], [(107, 8)]), - {0: [expected_offset]}) + self.assertThat(read(b"si1", [0], [(107, 8)]), + Equals({0: [expected_offset]})) expected_offset = struct.pack(">Q", expected_eof_offset) - self.failUnlessEqual(read(b"si1", [0], [(115, 8)]), - {0: [expected_offset]}) + self.assertThat(read(b"si1", [0], [(115, 8)]), + Equals({0: [expected_offset]})) d.addCallback(_check_publish) return d @@ -2803,8 +2803,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) def _check_block_and_salt(block_and_salt): (block, salt) = block_and_salt - self.failUnlessEqual(block, self.block) - self.failUnlessEqual(salt, self.salt) + self.assertThat(block, Equals(self.block)) + self.assertThat(salt, Equals(self.salt)) for i in range(6): d.addCallback(lambda ignored, i=i: @@ -2814,52 +2814,52 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(lambda ignored: mr.get_encprivkey()) d.addCallback(lambda encprivkey: - self.failUnlessEqual(self.encprivkey, encprivkey)) + self.assertThat(self.encprivkey, Equals(encprivkey))) d.addCallback(lambda ignored: mr.get_blockhashes()) d.addCallback(lambda blockhashes: - self.failUnlessEqual(self.block_hash_tree, blockhashes)) + self.assertThat(self.block_hash_tree, Equals(blockhashes))) d.addCallback(lambda ignored: mr.get_sharehashes()) d.addCallback(lambda sharehashes: - self.failUnlessEqual(self.share_hash_chain, sharehashes)) + self.assertThat(self.share_hash_chain, Equals(sharehashes))) d.addCallback(lambda ignored: mr.get_signature()) d.addCallback(lambda signature: - self.failUnlessEqual(signature, self.signature)) + self.assertThat(signature, Equals(self.signature))) d.addCallback(lambda ignored: mr.get_verification_key()) d.addCallback(lambda verification_key: - self.failUnlessEqual(verification_key, self.verification_key)) + self.assertThat(verification_key, Equals(self.verification_key))) d.addCallback(lambda ignored: mr.get_seqnum()) d.addCallback(lambda seqnum: - self.failUnlessEqual(seqnum, 0)) + self.assertThat(seqnum, Equals(0))) d.addCallback(lambda ignored: mr.get_root_hash()) d.addCallback(lambda root_hash: - self.failUnlessEqual(self.root_hash, root_hash)) + self.assertThat(self.root_hash, Equals(root_hash))) d.addCallback(lambda ignored: mr.get_encoding_parameters()) def _check_encoding_parameters(args): (k, n, segsize, datalen) = args - self.failUnlessEqual(k, 3) - self.failUnlessEqual(n, 10) - self.failUnlessEqual(segsize, 6) - self.failUnlessEqual(datalen, 36) + self.assertThat(k, Equals(3)) + self.assertThat(n, Equals(10)) + self.assertThat(segsize, Equals(6)) + self.assertThat(datalen, Equals(36)) d.addCallback(_check_encoding_parameters) d.addCallback(lambda ignored: mr.get_checkstring()) d.addCallback(lambda checkstring: - self.failUnlessEqual(checkstring, mw.get_checkstring())) + self.assertThat(checkstring, Equals(mw.get_checkstring()))) return d @@ -2871,7 +2871,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = mr.is_sdmf() d.addCallback(lambda issdmf: - self.failUnless(issdmf)) + self.assertFalse(issdmf)) return d @@ -2884,7 +2884,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(lambda ignored: mr.is_sdmf()) d.addCallback(lambda issdmf: - self.failUnless(issdmf)) + self.assertTrue(issdmf)) # What do we need to read? # - The sharedata @@ -2897,51 +2897,51 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): # bytes in size. The share is composed entirely of the # letter a. self.block contains 2 as, so 6 * self.block is # what we are looking for. - self.failUnlessEqual(block, self.block * 6) - self.failUnlessEqual(salt, self.salt) + self.assertThat(block, Equals(self.block * 6)) + self.assertThat(salt, Equals(self.salt)) d.addCallback(_check_block_and_salt) # - The blockhashes d.addCallback(lambda ignored: mr.get_blockhashes()) d.addCallback(lambda blockhashes: - self.failUnlessEqual(self.block_hash_tree, - blockhashes, + self.assertThat(self.block_hash_tree, + Equals(blockhashes), blockhashes)) # - The sharehashes d.addCallback(lambda ignored: mr.get_sharehashes()) d.addCallback(lambda sharehashes: - self.failUnlessEqual(self.share_hash_chain, - sharehashes)) + self.assertThat(self.share_hash_chain, + Equals(sharehashes))) # - The keys d.addCallback(lambda ignored: mr.get_encprivkey()) d.addCallback(lambda encprivkey: - self.failUnlessEqual(encprivkey, self.encprivkey, encprivkey)) + self.assertThat(encprivkey, self.encprivkey, Equals(encprivkey))) d.addCallback(lambda ignored: mr.get_verification_key()) d.addCallback(lambda verification_key: - self.failUnlessEqual(verification_key, - self.verification_key, + self.assertThat(verification_key, + Equals(self.verification_key), verification_key)) # - The signature d.addCallback(lambda ignored: mr.get_signature()) d.addCallback(lambda signature: - self.failUnlessEqual(signature, self.signature, signature)) + self.assertThat(signature, Equals(self.signature), signature)) # - The sequence number d.addCallback(lambda ignored: mr.get_seqnum()) d.addCallback(lambda seqnum: - self.failUnlessEqual(seqnum, 0, seqnum)) + self.assertThat(seqnum, Equals(0), seqnum)) # - The root hash d.addCallback(lambda ignored: mr.get_root_hash()) d.addCallback(lambda root_hash: - self.failUnlessEqual(root_hash, self.root_hash, root_hash)) + self.assertThat(root_hash, Equals(self.root_hash), root_hash)) return d @@ -2955,7 +2955,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(lambda ignored: mr.is_sdmf()) d.addCallback(lambda issdmf: - self.failUnless(issdmf)) + self.assertTrue(issdmf)) d.addCallback(lambda ignored: self.shouldFail(LayoutInvalid, "test bad segment", None, @@ -2983,8 +2983,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(lambda mr: mr.get_verinfo()) def _check_verinfo(verinfo): - self.failUnless(verinfo) - self.failUnlessEqual(len(verinfo), 9) + self.assertTrue(verinfo) + self.assertThat(verinfo, HasLength(9)) (seqnum, root_hash, salt_hash, @@ -2994,12 +2994,12 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): n, prefix, offsets) = verinfo - self.failUnlessEqual(seqnum, 0) - self.failUnlessEqual(root_hash, self.root_hash) - self.failUnlessEqual(segsize, 6) - self.failUnlessEqual(datalen, 36) - self.failUnlessEqual(k, 3) - self.failUnlessEqual(n, 10) + self.assertThat(seqnum, Equals(0)) + self.assertThat(root_hash, Equals(self.root_hash)) + self.assertThat(segsize, Equals(6)) + self.assertThat(datalen, Equals(36)) + self.assertThat(k, Equals(3)) + self.assertThat(n, Equals(10)) expected_prefix = struct.pack(MDMFSIGNABLEHEADER, 1, seqnum, @@ -3008,8 +3008,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): n, segsize, datalen) - self.failUnlessEqual(expected_prefix, prefix) - self.failUnlessEqual(self.rref.read_count, 0) + self.assertThat(expected_prefix, Equals(prefix)) + self.assertThat(self.rref.read_count, Equals(0)) d.addCallback(_check_verinfo) # This is not enough data to read a block and a share, so the # wrapper should attempt to read this from the remote server. @@ -3018,9 +3018,9 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr.get_block_and_salt(0)) def _check_block_and_salt(block_and_salt): (block, salt) = block_and_salt - self.failUnlessEqual(block, self.block) - self.failUnlessEqual(salt, self.salt) - self.failUnlessEqual(self.rref.read_count, 1) + self.assertThat(block, Equals(self.block)) + self.assertThat(salt, Equals(self.salt)) + self.assertThat(self.rref.read_count, Equals(1)) # This should be enough data to read one block. d.addCallback(_make_mr, 123 + PRIVATE_KEY_SIZE + SIGNATURE_SIZE + VERIFICATION_KEY_SIZE + SHARE_HASH_CHAIN_SIZE + 140) d.addCallback(lambda mr: @@ -3044,8 +3044,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(lambda mr: mr.get_verinfo()) def _check_verinfo(verinfo): - self.failUnless(verinfo) - self.failUnlessEqual(len(verinfo), 9) + self.assertTrue(verinfo) + self.assertThat(verinfo, HasLength(9)) (seqnum, root_hash, salt, @@ -3055,13 +3055,13 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): n, prefix, offsets) = verinfo - self.failUnlessEqual(seqnum, 0) - self.failUnlessEqual(root_hash, self.root_hash) - self.failUnlessEqual(salt, self.salt) - self.failUnlessEqual(segsize, 36) - self.failUnlessEqual(datalen, 36) - self.failUnlessEqual(k, 3) - self.failUnlessEqual(n, 10) + self.assertThat(seqnum, Equals(0)) + self.assertThat(root_hash, Equals(self.root_hash)) + self.assertThat(salt, Equals(self.salt)) + self.assertThat(segsize, Equals(36)) + self.assertThat(datalen, Equals(36)) + self.assertThat(k, Equals(3)) + self.assertThat(n, Equals(10)) expected_prefix = struct.pack(SIGNED_PREFIX, 0, seqnum, @@ -3071,8 +3071,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): n, segsize, datalen) - self.failUnlessEqual(expected_prefix, prefix) - self.failUnlessEqual(self.rref.read_count, 0) + self.assertThat(expected_prefix, Equals(prefix)) + self.assertThat(self.rref.read_count, Equals(0)) d.addCallback(_check_verinfo) # This shouldn't be enough to read any share data. d.addCallback(_make_mr, 123) @@ -3080,11 +3080,11 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr.get_block_and_salt(0)) def _check_block_and_salt(block_and_salt): (block, salt) = block_and_salt - self.failUnlessEqual(block, self.block * 6) - self.failUnlessEqual(salt, self.salt) + self.assertThat(block, Equals(self.block * 6)) + self.assertThat(salt, Equals(self.salt)) # TODO: Fix the read routine so that it reads only the data # that it has cached if it can't read all of it. - self.failUnlessEqual(self.rref.read_count, 2) + self.assertThat(self.rref.read_count, Equals(2)) # This should be enough to read share data. d.addCallback(_make_mr, self.offsets['share_data']) @@ -3106,12 +3106,12 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(lambda ignored: mr.get_encoding_parameters()) def _check_encoding_parameters(params): - self.failUnlessEqual(len(params), 4) + self.assertThat(params, HasLength(4)) k, n, segsize, datalen = params - self.failUnlessEqual(k, 3) - self.failUnlessEqual(n, 10) - self.failUnlessEqual(segsize, 0) - self.failUnlessEqual(datalen, 0) + self.assertThat(k, Equals(3)) + self.assertThat(n, Equals(10)) + self.assertThat(segsize, Equals(0)) + self.assertThat(datalen, Equals(0)) d.addCallback(_check_encoding_parameters) # We should not be able to fetch a block, since there are no @@ -3132,12 +3132,12 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(lambda ignored: mr.get_encoding_parameters()) def _check_encoding_parameters(params): - self.failUnlessEqual(len(params), 4) + self.assertThat(params, HasLength(4)) k, n, segsize, datalen = params - self.failUnlessEqual(k, 3) - self.failUnlessEqual(n, 10) - self.failUnlessEqual(segsize, 0) - self.failUnlessEqual(datalen, 0) + self.assertThat(k, Equals(3)) + self.assertThat(n, Equals(10)) + self.assertThat(segsize, Equals(0)) + self.assertThat(datalen, Equals(0)) d.addCallback(_check_encoding_parameters) # It does not make sense to get a block in this format, so we @@ -3157,8 +3157,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(lambda ignored: mr.get_verinfo()) def _check_verinfo(verinfo): - self.failUnless(verinfo) - self.failUnlessEqual(len(verinfo), 9) + self.assertTrue(verinfo) + self.assertThat(verinfo, HasLength(9)) (seqnum, root_hash, salt, @@ -3168,13 +3168,13 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): n, prefix, offsets) = verinfo - self.failUnlessEqual(seqnum, 0) - self.failUnlessEqual(root_hash, self.root_hash) - self.failUnlessEqual(salt, self.salt) - self.failUnlessEqual(segsize, 36) - self.failUnlessEqual(datalen, 36) - self.failUnlessEqual(k, 3) - self.failUnlessEqual(n, 10) + self.assertThat(seqnum, Equals(0)) + self.assertThat(root_hash, Equals(self.root_hash)) + self.assertThat(salt, Equals(self.salt)) + self.assertThat(segsize, Equals(36)) + self.assertThat(datalen, Equals(36)) + self.assertThat(k, Equals(3)) + self.assertThat(n, Equals(10)) expected_prefix = struct.pack(">BQ32s16s BBQQ", 0, seqnum, @@ -3184,8 +3184,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): n, segsize, datalen) - self.failUnlessEqual(prefix, expected_prefix) - self.failUnlessEqual(offsets, self.offsets) + self.assertThat(prefix, Equals(expected_prefix)) + self.assertThat(offsets, Equals(self.offsets)) d.addCallback(_check_verinfo) return d @@ -3197,8 +3197,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(lambda ignored: mr.get_verinfo()) def _check_verinfo(verinfo): - self.failUnless(verinfo) - self.failUnlessEqual(len(verinfo), 9) + self.assertThat(verinfo) + self.assertThat(verinfo, HasLength(9)) (seqnum, root_hash, IV, @@ -3208,13 +3208,13 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): n, prefix, offsets) = verinfo - self.failUnlessEqual(seqnum, 0) - self.failUnlessEqual(root_hash, self.root_hash) - self.failIf(IV) - self.failUnlessEqual(segsize, 6) - self.failUnlessEqual(datalen, 36) - self.failUnlessEqual(k, 3) - self.failUnlessEqual(n, 10) + self.assertThat(seqnum, Equals(0)) + self.assertThat(root_hash, Equals(self.root_hash)) + self.assertFalse(IV) + self.assertThat(segsize, Equals(6)) + self.assertThat(datalen, Equals(36)) + self.assertThat(k, Equals(3)) + self.assertThat(n, Equals(10)) expected_prefix = struct.pack(">BQ32s BBQQ", 1, seqnum, @@ -3223,8 +3223,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): n, segsize, datalen) - self.failUnlessEqual(prefix, expected_prefix) - self.failUnlessEqual(offsets, self.offsets) + self.assertThat(prefix, Equals(expected_prefix)) + self.assertThat(offsets, Equals(self.offsets)) d.addCallback(_check_verinfo) return d @@ -3260,15 +3260,15 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): sdmfr.put_verification_key(self.verification_key) # Now check to make sure that nothing has been written yet. - self.failUnlessEqual(self.rref.write_count, 0) + self.assertThat(self.rref.write_count, Equals(0)) # Now finish publishing d = sdmfr.finish_publishing() def _then(ignored): - self.failUnlessEqual(self.rref.write_count, 1) + self.assertThat(self.rref.write_count, Equals(1)) read = self.ss.slot_readv - self.failUnlessEqual(read(b"si1", [0], [(0, len(data))]), - {0: [data]}) + self.assertThat(read(b"si1", [0], [(0, len(data))]), + Equals({0: [data]})) d.addCallback(_then) return d @@ -3304,11 +3304,11 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): sdmfw.put_verification_key(self.verification_key) # We shouldn't have a checkstring yet - self.failUnlessEqual(sdmfw.get_checkstring(), b"") + self.assertThat(sdmfw.get_checkstring(), Equals(b"")) d = sdmfw.finish_publishing() def _then(results): - self.failIf(results[0]) + self.assertFalse(results[0]) # this is the correct checkstring self._expected_checkstring = results[1][0][0] return self._expected_checkstring @@ -3318,21 +3318,21 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(lambda ignored: sdmfw.get_checkstring()) d.addCallback(lambda checkstring: - self.failUnlessEqual(checkstring, self._expected_checkstring)) + self.assertThat(checkstring, Equals(self._expected_checkstring))) d.addCallback(lambda ignored: sdmfw.finish_publishing()) def _then_again(results): - self.failUnless(results[0]) + self.assertTrue(results[0]) read = self.ss.slot_readv - self.failUnlessEqual(read(b"si1", [0], [(1, 8)]), + self.assertThat(read(b"si1", [0], [(1, 8)]), {0: [struct.pack(">Q", 1)]}) - self.failUnlessEqual(read(b"si1", [0], [(9, len(data) - 9)]), - {0: [data[9:]]}) + self.assertThat(read(b"si1", [0], [(9, len(data) - 9)]), + Equals({0: [data[9:]]})) d.addCallback(_then_again) return d -class Stats(unittest.TestCase): +class Stats(SyncTestCase): def setUp(self): self.sparent = LoggingServiceParent() @@ -3364,57 +3364,57 @@ class Stats(unittest.TestCase): output = ss.get_latencies() - self.failUnlessEqual(sorted(output.keys()), - sorted(["allocate", "renew", "cancel", "write", "get"])) - self.failUnlessEqual(len(ss.latencies["allocate"]), 1000) - self.failUnless(abs(output["allocate"]["mean"] - 9500) < 1, output) - self.failUnless(abs(output["allocate"]["01_0_percentile"] - 9010) < 1, output) - self.failUnless(abs(output["allocate"]["10_0_percentile"] - 9100) < 1, output) - self.failUnless(abs(output["allocate"]["50_0_percentile"] - 9500) < 1, output) - self.failUnless(abs(output["allocate"]["90_0_percentile"] - 9900) < 1, output) - self.failUnless(abs(output["allocate"]["95_0_percentile"] - 9950) < 1, output) - self.failUnless(abs(output["allocate"]["99_0_percentile"] - 9990) < 1, output) - self.failUnless(abs(output["allocate"]["99_9_percentile"] - 9999) < 1, output) + self.assertThat(sorted(output.keys()), + Equals(sorted(["allocate", "renew", "cancel", "write", "get"]))) + self.assertThat(ss.latencies["allocate"], HasLength(1000)) + self.assertTrue(abs(output["allocate"]["mean"] - 9500) < 1, output) + self.assertTrue(abs(output["allocate"]["01_0_percentile"] - 9010) < 1, output) + self.assertTrue(abs(output["allocate"]["10_0_percentile"] - 9100) < 1, output) + self.assertTrue(abs(output["allocate"]["50_0_percentile"] - 9500) < 1, output) + self.assertTrue(abs(output["allocate"]["90_0_percentile"] - 9900) < 1, output) + self.assertTrue(abs(output["allocate"]["95_0_percentile"] - 9950) < 1, output) + self.assertTrue(abs(output["allocate"]["99_0_percentile"] - 9990) < 1, output) + self.assertTrue(abs(output["allocate"]["99_9_percentile"] - 9999) < 1, output) - self.failUnlessEqual(len(ss.latencies["renew"]), 1000) - self.failUnless(abs(output["renew"]["mean"] - 500) < 1, output) - self.failUnless(abs(output["renew"]["01_0_percentile"] - 10) < 1, output) - self.failUnless(abs(output["renew"]["10_0_percentile"] - 100) < 1, output) - self.failUnless(abs(output["renew"]["50_0_percentile"] - 500) < 1, output) - self.failUnless(abs(output["renew"]["90_0_percentile"] - 900) < 1, output) - self.failUnless(abs(output["renew"]["95_0_percentile"] - 950) < 1, output) - self.failUnless(abs(output["renew"]["99_0_percentile"] - 990) < 1, output) - self.failUnless(abs(output["renew"]["99_9_percentile"] - 999) < 1, output) + self.assertThat(ss.latencies["renew"], HasLength(1000)) + self.assertTrue(abs(output["renew"]["mean"] - 500) < 1, output) + self.assertTrue(abs(output["renew"]["01_0_percentile"] - 10) < 1, output) + self.assertTrue(abs(output["renew"]["10_0_percentile"] - 100) < 1, output) + self.assertTrue(abs(output["renew"]["50_0_percentile"] - 500) < 1, output) + self.assertTrue(abs(output["renew"]["90_0_percentile"] - 900) < 1, output) + self.assertTrue(abs(output["renew"]["95_0_percentile"] - 950) < 1, output) + self.assertTrue(abs(output["renew"]["99_0_percentile"] - 990) < 1, output) + self.assertTrue(abs(output["renew"]["99_9_percentile"] - 999) < 1, output) - self.failUnlessEqual(len(ss.latencies["write"]), 20) - self.failUnless(abs(output["write"]["mean"] - 9) < 1, output) - self.failUnless(output["write"]["01_0_percentile"] is None, output) - self.failUnless(abs(output["write"]["10_0_percentile"] - 2) < 1, output) - self.failUnless(abs(output["write"]["50_0_percentile"] - 10) < 1, output) - self.failUnless(abs(output["write"]["90_0_percentile"] - 18) < 1, output) - self.failUnless(abs(output["write"]["95_0_percentile"] - 19) < 1, output) - self.failUnless(output["write"]["99_0_percentile"] is None, output) - self.failUnless(output["write"]["99_9_percentile"] is None, output) + self.assertThat(ss.latencies["write"], HasLength(20)) + self.assertTrue(abs(output["write"]["mean"] - 9) < 1, output) + self.assertTrue(output["write"]["01_0_percentile"] is None, output) + self.assertTrue(abs(output["write"]["10_0_percentile"] - 2) < 1, output) + self.assertTrue(abs(output["write"]["50_0_percentile"] - 10) < 1, output) + self.assertTrue(abs(output["write"]["90_0_percentile"] - 18) < 1, output) + self.assertTrue(abs(output["write"]["95_0_percentile"] - 19) < 1, output) + self.assertTrue(output["write"]["99_0_percentile"] is None, output) + self.assertTrue(output["write"]["99_9_percentile"] is None, output) - self.failUnlessEqual(len(ss.latencies["cancel"]), 10) - self.failUnless(abs(output["cancel"]["mean"] - 9) < 1, output) - self.failUnless(output["cancel"]["01_0_percentile"] is None, output) - self.failUnless(abs(output["cancel"]["10_0_percentile"] - 2) < 1, output) - self.failUnless(abs(output["cancel"]["50_0_percentile"] - 10) < 1, output) - self.failUnless(abs(output["cancel"]["90_0_percentile"] - 18) < 1, output) - self.failUnless(output["cancel"]["95_0_percentile"] is None, output) - self.failUnless(output["cancel"]["99_0_percentile"] is None, output) - self.failUnless(output["cancel"]["99_9_percentile"] is None, output) + self.assertThat(ss.latencies["cancel"], HasLength(10)) + self.assertTrue(abs(output["cancel"]["mean"] - 9) < 1, output) + self.assertTrue(output["cancel"]["01_0_percentile"] is None, output) + self.assertTrue(abs(output["cancel"]["10_0_percentile"] - 2) < 1, output) + self.assertTrue(abs(output["cancel"]["50_0_percentile"] - 10) < 1, output) + self.assertTrue(abs(output["cancel"]["90_0_percentile"] - 18) < 1, output) + self.assertTrue(output["cancel"]["95_0_percentile"] is None, output) + self.assertTrue(output["cancel"]["99_0_percentile"] is None, output) + self.assertTrue(output["cancel"]["99_9_percentile"] is None, output) - self.failUnlessEqual(len(ss.latencies["get"]), 1) - self.failUnless(output["get"]["mean"] is None, output) - self.failUnless(output["get"]["01_0_percentile"] is None, output) - self.failUnless(output["get"]["10_0_percentile"] is None, output) - self.failUnless(output["get"]["50_0_percentile"] is None, output) - self.failUnless(output["get"]["90_0_percentile"] is None, output) - self.failUnless(output["get"]["95_0_percentile"] is None, output) - self.failUnless(output["get"]["99_0_percentile"] is None, output) - self.failUnless(output["get"]["99_9_percentile"] is None, output) + self.assertThat(ss.latencies["get"], HasLength(1)) + self.assertTrue(output["get"]["mean"] is None, output) + self.assertTrue(output["get"]["01_0_percentile"] is None, output) + self.assertTrue(output["get"]["10_0_percentile"] is None, output) + self.assertTrue(output["get"]["50_0_percentile"] is None, output) + self.assertTrue(output["get"]["90_0_percentile"] is None, output) + self.assertTrue(output["get"]["95_0_percentile"] is None, output) + self.assertTrue(output["get"]["99_0_percentile"] is None, output) + self.assertTrue(output["get"]["99_9_percentile"] is None, output) immutable_schemas = strategies.sampled_from(list(ALL_IMMUTABLE_SCHEMAS)) @@ -3557,7 +3557,7 @@ class ShareFileTests(unittest.TestCase): mutable_schemas = strategies.sampled_from(list(ALL_MUTABLE_SCHEMAS)) -class MutableShareFileTests(unittest.TestCase): +class MutableShareFileTests(SyncTestCase): """ Tests for allmydata.storage.mutable.MutableShareFile. """ From 6b0fa64236fa8decc2b877163a437ff29d74052a Mon Sep 17 00:00:00 2001 From: "Fon E. Noel NFEBE" Date: Fri, 25 Nov 2022 12:15:58 +0100 Subject: [PATCH 04/54] Clean up test_storage.py after refactor This PR cleans up errorneous changes resulting from 1d85a2c5cf40a4e52bbb024e2edba4fcd883caa1 and adds a few improvements such as calling `super` implementations. Making sure classes with functions returning deferreds use `AsyncTestCase` Signed-off-by: Fon E. Noel NFEBE --- src/allmydata/test/test_storage.py | 29 ++++++++++++++--------------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 3d35ec55f..f7d5ae919 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -436,7 +436,7 @@ class RemoteBucket(object): return defer.maybeDeferred(_call) -class BucketProxy(SyncTestCase): +class BucketProxy(AsyncTestCase): def make_bucket(self, name, size): basedir = os.path.join("storage", "BucketProxy", name) incoming = os.path.join(basedir, "tmp", "bucket") @@ -563,10 +563,7 @@ class Server(AsyncTestCase): self.sparent = LoggingServiceParent() self.sparent.startService() self._lease_secret = itertools.count() - - def tearDown(self): - super(Server, self).tearDown() - return self.sparent.stopService() + self.addCleanup(self.sparent.stopService()) def workdir(self, name): basedir = os.path.join("storage", "Server", name) @@ -1284,10 +1281,10 @@ class Server(AsyncTestCase): class MutableServer(SyncTestCase): def setUp(self): + super(MutableServer, self).setUp() self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() - def tearDown(self): - return self.sparent.stopService() + self.addCleanup(self.sparent.stopService()) def workdir(self, name): basedir = os.path.join("storage", "MutableServer", name) @@ -1903,8 +1900,9 @@ class MutableServer(SyncTestCase): self.assertThat({}, Equals(read_data)) -class MDMFProxies(SyncTestCase, ShouldFailMixin): +class MDMFProxies(AsyncTestCase, ShouldFailMixin): def setUp(self): + super(MDMFProxies, self).setUp() self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() self.ss = self.create("MDMFProxies storage test server") @@ -1935,6 +1933,7 @@ class MDMFProxies(SyncTestCase, ShouldFailMixin): def tearDown(self): + super(MDMFProxies, self).tearDown() self.sparent.stopService() shutil.rmtree(self.workdir("MDMFProxies storage test server")) @@ -2150,7 +2149,7 @@ class MDMFProxies(SyncTestCase, ShouldFailMixin): tws[0] = (testvs, [(0, share)], None) readv = [] results = write(storage_index, self.secrets, tws, readv) - self.assertFalse(results[0]) + self.assertTrue(results[0]) def test_read(self): @@ -2438,7 +2437,7 @@ class MDMFProxies(SyncTestCase, ShouldFailMixin): def _check_success(results): self.assertThat(results, HasLength(2)) res, d = results - self.assertFalse(results) + self.assertTrue(results) mw = self._make_new_mw(b"si1", 0) mw.set_checkstring(b"this is a lie") @@ -2918,7 +2917,7 @@ class MDMFProxies(SyncTestCase, ShouldFailMixin): d.addCallback(lambda ignored: mr.get_encprivkey()) d.addCallback(lambda encprivkey: - self.assertThat(encprivkey, self.encprivkey, Equals(encprivkey))) + self.assertThat(encprivkey, Equals(self.encprivkey), encprivkey)) d.addCallback(lambda ignored: mr.get_verification_key()) d.addCallback(lambda verification_key: @@ -3325,7 +3324,7 @@ class MDMFProxies(SyncTestCase, ShouldFailMixin): self.assertTrue(results[0]) read = self.ss.slot_readv self.assertThat(read(b"si1", [0], [(1, 8)]), - {0: [struct.pack(">Q", 1)]}) + Equals({0: [struct.pack(">Q", 1)]})) self.assertThat(read(b"si1", [0], [(9, len(data) - 9)]), Equals({0: [data[9:]]})) d.addCallback(_then_again) @@ -3335,10 +3334,10 @@ class MDMFProxies(SyncTestCase, ShouldFailMixin): class Stats(SyncTestCase): def setUp(self): + super(Stats, self).setUp() self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() - def tearDown(self): - return self.sparent.stopService() + self.addCleanup(self.sparent.stopService()) def workdir(self, name): basedir = os.path.join("storage", "Server", name) @@ -3418,7 +3417,7 @@ class Stats(SyncTestCase): immutable_schemas = strategies.sampled_from(list(ALL_IMMUTABLE_SCHEMAS)) -class ShareFileTests(unittest.TestCase): +class ShareFileTests(SyncTestCase): """Tests for allmydata.storage.immutable.ShareFile.""" def get_sharefile(self, **kwargs): From b193ad3ed42527d9118cc7d82d142d648a21a5c1 Mon Sep 17 00:00:00 2001 From: "Fon E. Noel NFEBE" Date: Wed, 30 Nov 2022 16:53:20 +0100 Subject: [PATCH 05/54] Correct addCleanup reference Some test_storage.py classes contain calls to cleanup methods instead of references. This commit fixes that. Signed-off-by: Fon E. Noel NFEBE --- src/allmydata/test/test_storage.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index f7d5ae919..2234b5af2 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -563,7 +563,7 @@ class Server(AsyncTestCase): self.sparent = LoggingServiceParent() self.sparent.startService() self._lease_secret = itertools.count() - self.addCleanup(self.sparent.stopService()) + self.addCleanup(self.sparent.stopService) def workdir(self, name): basedir = os.path.join("storage", "Server", name) @@ -1284,7 +1284,7 @@ class MutableServer(SyncTestCase): super(MutableServer, self).setUp() self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() - self.addCleanup(self.sparent.stopService()) + self.addCleanup(self.sparent.stopService) def workdir(self, name): basedir = os.path.join("storage", "MutableServer", name) @@ -3337,7 +3337,7 @@ class Stats(SyncTestCase): super(Stats, self).setUp() self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() - self.addCleanup(self.sparent.stopService()) + self.addCleanup(self.sparent.stopService) def workdir(self, name): basedir = os.path.join("storage", "Server", name) From badba97ff20a961153ddd86490c9646e042df172 Mon Sep 17 00:00:00 2001 From: dlee Date: Fri, 17 Feb 2023 16:20:29 -0600 Subject: [PATCH 06/54] Type annotations added for wormholetesting.py --- src/allmydata/test/cli/wormholetesting.py | 35 +++++++++++------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py index 744f9d75a..7cf9d7eff 100644 --- a/src/allmydata/test/cli/wormholetesting.py +++ b/src/allmydata/test/cli/wormholetesting.py @@ -32,8 +32,7 @@ For example:: from __future__ import annotations -from typing import Iterator, Optional, List, Tuple -from collections.abc import Awaitable +from typing import Iterator, Optional, List, Tuple, Any, TextIO from inspect import getargspec from itertools import count from sys import stderr @@ -66,18 +65,18 @@ class MemoryWormholeServer(object): def create( self, - appid, - relay_url, - reactor, - versions={}, - delegate=None, - journal=None, - tor=None, - timing=None, - stderr=stderr, - _eventual_queue=None, - _enable_dilate=False, - ): + appid: str, + relay_url: str, + reactor: Any, + versions: Any={}, + delegate: Optional[Any]=None, + journal: Optional[Any]=None, + tor: Optional[Any]=None, + timing: Optional[Any]=None, + stderr: TextIO=stderr, + _eventual_queue: Optional[Any]=None, + _enable_dilate: bool=False, + )-> _MemoryWormhole: """ Create a wormhole. It will be able to connect to other wormholes created by this instance (and constrained by the normal appid/relay_url @@ -184,7 +183,7 @@ class _WormholeApp(object): return code - def wait_for_wormhole(self, code: WormholeCode) -> Awaitable[_MemoryWormhole]: + def wait_for_wormhole(self, code: WormholeCode) -> Deferred[_MemoryWormhole]: """ Return a ``Deferred`` which fires with the next wormhole to be associated with the given code. This is used to let the first end of a wormhole @@ -262,7 +261,7 @@ class _MemoryWormhole(object): return d return succeed(self._code) - def get_welcome(self): + def get_welcome(self) -> Deferred[str]: return succeed("welcome") def send_message(self, payload: WormholeMessage) -> None: @@ -276,8 +275,8 @@ class _MemoryWormhole(object): ) d = self._view.wormhole_by_code(self._code, exclude=self) - def got_wormhole(wormhole): - msg = wormhole._payload.get() + def got_wormhole(wormhole: _MemoryWormhole) -> Deferred[_MemoryWormhole]: + msg: Deferred[_MemoryWormhole] = wormhole._payload.get() return msg d.addCallback(got_wormhole) From 86dbcb21ce4f27e779b3d8febc633c1cbf3fd97e Mon Sep 17 00:00:00 2001 From: dlee Date: Fri, 17 Feb 2023 16:24:32 -0600 Subject: [PATCH 07/54] Refactored verify function to update deprecated getargspec function with getfullargspec and maintained strictness --- src/allmydata/test/cli/wormholetesting.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py index 7cf9d7eff..9ce199545 100644 --- a/src/allmydata/test/cli/wormholetesting.py +++ b/src/allmydata/test/cli/wormholetesting.py @@ -33,7 +33,7 @@ For example:: from __future__ import annotations from typing import Iterator, Optional, List, Tuple, Any, TextIO -from inspect import getargspec +from inspect import getfullargspec from itertools import count from sys import stderr @@ -133,18 +133,24 @@ class TestingHelper(object): return wormhole -def _verify(): +def _verify() -> None: """ Roughly confirm that the in-memory wormhole creation function matches the interface of the real implementation. """ # Poor man's interface verification. - a = getargspec(create) - b = getargspec(MemoryWormholeServer.create) + a = getfullargspec(create) + b = getfullargspec(MemoryWormholeServer.create) + # I know it has a `self` argument at the beginning. That's okay. b = b._replace(args=b.args[1:]) - assert a == b, "{} != {}".format(a, b) + + # Just compare the same information to check function signature + assert a.varkw == b.varkw + assert a.args == b.args + assert a.varargs == b.varargs + assert a.kwonlydefaults == b.kwonlydefaults _verify() From be9d76e2b8cffda206afe066fe00be2db8dd6759 Mon Sep 17 00:00:00 2001 From: dlee Date: Fri, 17 Feb 2023 16:24:52 -0600 Subject: [PATCH 08/54] Added newsfragment --- newsfragments/3970.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3970.minor diff --git a/newsfragments/3970.minor b/newsfragments/3970.minor new file mode 100644 index 000000000..e69de29bb From af51b022284f2b7284a806de7beb2223f5ad9961 Mon Sep 17 00:00:00 2001 From: dlee Date: Mon, 27 Feb 2023 15:05:52 -0600 Subject: [PATCH 09/54] Revert wait_for_wormhole function return type back to Awaitable for forward compatibility when we move to async def --- src/allmydata/test/cli/wormholetesting.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py index 9ce199545..b30b92fe1 100644 --- a/src/allmydata/test/cli/wormholetesting.py +++ b/src/allmydata/test/cli/wormholetesting.py @@ -38,7 +38,7 @@ from itertools import count from sys import stderr from attrs import frozen, define, field, Factory -from twisted.internet.defer import Deferred, DeferredQueue, succeed +from twisted.internet.defer import Deferred, DeferredQueue, succeed, Awaitable from wormhole._interfaces import IWormhole from wormhole.wormhole import create from zope.interface import implementer @@ -189,7 +189,7 @@ class _WormholeApp(object): return code - def wait_for_wormhole(self, code: WormholeCode) -> Deferred[_MemoryWormhole]: + def wait_for_wormhole(self, code: WormholeCode) -> Awaitable[_MemoryWormhole]: """ Return a ``Deferred`` which fires with the next wormhole to be associated with the given code. This is used to let the first end of a wormhole @@ -281,8 +281,8 @@ class _MemoryWormhole(object): ) d = self._view.wormhole_by_code(self._code, exclude=self) - def got_wormhole(wormhole: _MemoryWormhole) -> Deferred[_MemoryWormhole]: - msg: Deferred[_MemoryWormhole] = wormhole._payload.get() + def got_wormhole(wormhole: _MemoryWormhole) -> Deferred[WormholeMessage]: + msg: Deferred[WormholeMessage] = wormhole._payload.get() return msg d.addCallback(got_wormhole) From 582876197a724dd8c24b06160345d122832e03b6 Mon Sep 17 00:00:00 2001 From: dlee Date: Mon, 27 Feb 2023 15:14:58 -0600 Subject: [PATCH 10/54] Added default check to verify to ensure strictness --- src/allmydata/test/cli/wormholetesting.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py index b30b92fe1..d4e53a342 100644 --- a/src/allmydata/test/cli/wormholetesting.py +++ b/src/allmydata/test/cli/wormholetesting.py @@ -151,6 +151,7 @@ def _verify() -> None: assert a.args == b.args assert a.varargs == b.varargs assert a.kwonlydefaults == b.kwonlydefaults + assert a.defaults == b.defaults _verify() From 7c3f6cb4c7fcba278dea93d62a3ddea381835a7f Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 28 Feb 2023 07:55:43 -0500 Subject: [PATCH 11/54] Fix inverted assertion --- src/allmydata/test/test_storage.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 2234b5af2..434b42c0d 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -2870,7 +2870,7 @@ class MDMFProxies(AsyncTestCase, ShouldFailMixin): mr = MDMFSlotReadProxy(self.storage_server, b"si1", 0) d = mr.is_sdmf() d.addCallback(lambda issdmf: - self.assertFalse(issdmf)) + self.assertTrue(issdmf)) return d From 450eed78688142b78151725a10da4d569f53106f Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Wed, 8 Mar 2023 11:31:58 -0500 Subject: [PATCH 12/54] Test writing at an offset. --- src/allmydata/test/test_system.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/src/allmydata/test/test_system.py b/src/allmydata/test/test_system.py index 10a64c1fe..e68f367cd 100644 --- a/src/allmydata/test/test_system.py +++ b/src/allmydata/test/test_system.py @@ -33,6 +33,7 @@ from allmydata.util import log, base32 from allmydata.util.encodingutil import quote_output, unicode_to_argv from allmydata.util.fileutil import abspath_expanduser_unicode from allmydata.util.consumer import MemoryConsumer, download_to_data +from allmydata.util.deferredutil import async_to_deferred from allmydata.interfaces import IDirectoryNode, IFileNode, \ NoSuchChildError, NoSharesError, SDMF_VERSION, MDMF_VERSION from allmydata.monitor import Monitor @@ -657,7 +658,23 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): self.failUnlessEqual(res, NEWERDATA) d.addCallback(_check_download_5) - def _corrupt_shares(res): + @async_to_deferred + async def _check_write_at_offset(newnode): + log.msg("writing at offset") + start = b"abcdef" + expected = b"abXYef" + uri = self._mutable_node_1.get_uri() + newnode = self.clients[0].create_node_from_uri(uri) + await newnode.overwrite(MutableData(start)) + version = await newnode.get_mutable_version() + await version.update(MutableData(b"XY"), 2) + result = await newnode.download_best_version() + self.assertEqual(result, expected) + # Revert to previous version + await newnode.overwrite(MutableData(NEWERDATA)) + d.addCallback(_check_write_at_offset) + + def _corrupt_shares(_res): # run around and flip bits in all but k of the shares, to test # the hash checks shares = self._find_all_shares(self.basedir) From 5dc108dfe87b53966bc252afbf1a7c6c77e9c7df Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Wed, 8 Mar 2023 11:38:31 -0500 Subject: [PATCH 13/54] Test large immutable upload and download. --- integration/test_get_put.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/integration/test_get_put.py b/integration/test_get_put.py index 76c6ee600..65020429e 100644 --- a/integration/test_get_put.py +++ b/integration/test_get_put.py @@ -53,17 +53,20 @@ def test_put_from_stdin(alice, get_put_alias, tmpdir): def test_get_to_stdout(alice, get_put_alias, tmpdir): """ It's possible to upload a file, and then download it to stdout. + + We test with large file, this time. """ tempfile = tmpdir.join("file") + large_data = DATA * 1_000_000 with tempfile.open("wb") as f: - f.write(DATA) + f.write(large_data) cli(alice, "put", str(tempfile), "getput:tostdout") p = Popen( ["tahoe", "--node-directory", alice.node_dir, "get", "getput:tostdout", "-"], stdout=PIPE ) - assert p.stdout.read() == DATA + assert p.stdout.read() == large_data assert p.wait() == 0 From 7bdfed6434c4129bfe766a72b8d6b38fe3d1349e Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Wed, 8 Mar 2023 11:55:30 -0500 Subject: [PATCH 14/54] News fragment. --- newsfragments/3959.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3959.minor diff --git a/newsfragments/3959.minor b/newsfragments/3959.minor new file mode 100644 index 000000000..e69de29bb From a61e41d5f9d1c4aae258c25055f13d807ea26720 Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Wed, 8 Mar 2023 14:58:52 -0500 Subject: [PATCH 15/54] Document the motivation. --- src/allmydata/test/test_system.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/allmydata/test/test_system.py b/src/allmydata/test/test_system.py index e68f367cd..d11a6e866 100644 --- a/src/allmydata/test/test_system.py +++ b/src/allmydata/test/test_system.py @@ -658,6 +658,8 @@ class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase): self.failUnlessEqual(res, NEWERDATA) d.addCallback(_check_download_5) + # The previous checks upload a complete replacement. This uses a + # different API that is supposed to do a partial write at an offset. @async_to_deferred async def _check_write_at_offset(newnode): log.msg("writing at offset") From 8ccbd37d29906cef62d8db22573878534a783fdd Mon Sep 17 00:00:00 2001 From: dlee Date: Wed, 8 Mar 2023 15:16:03 -0600 Subject: [PATCH 16/54] Fix implicit re-export error by importing IWormhole from wormhole library directly --- src/allmydata/test/cli/test_invite.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/allmydata/test/cli/test_invite.py b/src/allmydata/test/cli/test_invite.py index 07756eeed..94d4395ff 100644 --- a/src/allmydata/test/cli/test_invite.py +++ b/src/allmydata/test/cli/test_invite.py @@ -19,7 +19,8 @@ from ...util.jsonbytes import dumps_bytes from ..common_util import run_cli from ..no_network import GridTestMixin from .common import CLITestMixin -from .wormholetesting import IWormhole, MemoryWormholeServer, TestingHelper, memory_server +from .wormholetesting import MemoryWormholeServer, TestingHelper, memory_server +from wormhole._interfaces import IWormhole # Logically: # JSONable = dict[str, Union[JSONable, None, int, float, str, list[JSONable]]] From 10b3eabed41baedd47e3b4f9ce55aec92699003a Mon Sep 17 00:00:00 2001 From: dlee Date: Wed, 8 Mar 2023 15:19:08 -0600 Subject: [PATCH 17/54] Apply per file flags corresponding to --strict to wormholetesting.py --- mypy.ini | 2 +- src/allmydata/test/cli/wormholetesting.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/mypy.ini b/mypy.ini index e6e7d16ff..27e9f6154 100644 --- a/mypy.ini +++ b/mypy.ini @@ -7,4 +7,4 @@ show_error_codes = True warn_unused_configs =True no_implicit_optional = True warn_redundant_casts = True -strict_equality = True \ No newline at end of file +strict_equality = True diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py index d4e53a342..a0050a75b 100644 --- a/src/allmydata/test/cli/wormholetesting.py +++ b/src/allmydata/test/cli/wormholetesting.py @@ -29,6 +29,7 @@ For example:: import wormhole run(peerA(wormhole)) """ +# mypy: warn-unused-configs, disallow-any-generics, disallow-subclassing-any, disallow-untyped-calls, disallow-untyped-defs, disallow-incomplete-defs, check-untyped-defs, disallow-untyped-decorators, warn-redundant-casts, warn-unused-ignores, warn-return-any, no-implicit-reexport, strict-equality, strict-concatenate from __future__ import annotations From 4f47a18c6af89e92c81641c9bcc96bb30398c355 Mon Sep 17 00:00:00 2001 From: dlee Date: Wed, 8 Mar 2023 15:29:50 -0600 Subject: [PATCH 18/54] Comments added for inline mypy config. Individual flags used as --strict flag can only be used on a per-module basis. --- src/allmydata/test/cli/wormholetesting.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py index a0050a75b..6fb2b791c 100644 --- a/src/allmydata/test/cli/wormholetesting.py +++ b/src/allmydata/test/cli/wormholetesting.py @@ -30,6 +30,10 @@ For example:: run(peerA(wormhole)) """ # mypy: warn-unused-configs, disallow-any-generics, disallow-subclassing-any, disallow-untyped-calls, disallow-untyped-defs, disallow-incomplete-defs, check-untyped-defs, disallow-untyped-decorators, warn-redundant-casts, warn-unused-ignores, warn-return-any, no-implicit-reexport, strict-equality, strict-concatenate +# This inline mypy config applies a per-file mypy config for this file. +# It applies the '--strict' list of flags to this file. +# If you want to test using CLI run the command remove the inline config above and run: +# "mypy --follow-imports silent --strict src/allmydata/test/cli/wormholetesting.py" from __future__ import annotations From ccf12897f2913de4415580dc322c8231e8c49042 Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Fri, 10 Mar 2023 09:02:08 -0500 Subject: [PATCH 19/54] Add content limits to server. --- newsfragments/3965.minor | 0 src/allmydata/storage/http_server.py | 17 ++++++++++++++--- 2 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 newsfragments/3965.minor diff --git a/newsfragments/3965.minor b/newsfragments/3965.minor new file mode 100644 index 000000000..e69de29bb diff --git a/src/allmydata/storage/http_server.py b/src/allmydata/storage/http_server.py index c6c3ab615..fd7fd1187 100644 --- a/src/allmydata/storage/http_server.py +++ b/src/allmydata/storage/http_server.py @@ -606,7 +606,10 @@ class HTTPServer(object): async def allocate_buckets(self, request, authorization, storage_index): """Allocate buckets.""" upload_secret = authorization[Secrets.UPLOAD] - info = await self._read_encoded(request, _SCHEMAS["allocate_buckets"]) + # It's just a list of up to ~256 shares, shouldn't use many bytes. + info = await self._read_encoded( + request, _SCHEMAS["allocate_buckets"], max_size=8192 + ) # We do NOT validate the upload secret for existing bucket uploads. # Another upload may be happening in parallel, with a different upload @@ -773,7 +776,11 @@ class HTTPServer(object): except KeyError: raise _HTTPError(http.NOT_FOUND) - info = await self._read_encoded(request, _SCHEMAS["advise_corrupt_share"]) + # The reason can be a string with explanation, so in theory it could be + # longish? + info = await self._read_encoded( + request, _SCHEMAS["advise_corrupt_share"], max_size=32768, + ) bucket.advise_corrupt_share(info["reason"].encode("utf-8")) return b"" @@ -872,7 +879,11 @@ class HTTPServer(object): }: raise _HTTPError(http.NOT_FOUND) - info = await self._read_encoded(request, _SCHEMAS["advise_corrupt_share"]) + # The reason can be a string with explanation, so in theory it could be + # longish? + info = await self._read_encoded( + request, _SCHEMAS["advise_corrupt_share"], max_size=32768 + ) self._storage_server.advise_corrupt_share( b"mutable", storage_index, share_number, info["reason"].encode("utf-8") ) From 7a387a054eb5c9b86ec20c6b95b0489a0034af13 Mon Sep 17 00:00:00 2001 From: "Fon E. Noel NFEBE" Date: Sat, 11 Mar 2023 23:58:54 +0100 Subject: [PATCH 20/54] Fix more inverted assertions Just like in 7c3f6cb4c7fcba278dea93d62a3ddea381835a7f This commit corrects some wronly inverted assertions inside `test/test_storage.py` Signed-off-by: Fon E. Noel NFEBE --- src/allmydata/test/test_storage.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index af4d549bf..655753d90 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -1816,7 +1816,7 @@ class MutableServer(SyncTestCase): prefix = si[:2] prefixdir = os.path.join(self.workdir("test_remove"), "shares", prefix) bucketdir = os.path.join(prefixdir, si) - self.assertThat(prefixdir, Contains(os.path.exists(prefixdir))) + self.assertTrue(os.path.exists(prefixdir), prefixdir) self.assertFalse(os.path.exists(bucketdir), bucketdir) def test_writev_without_renew_lease(self): @@ -2425,7 +2425,7 @@ class MDMFProxies(AsyncTestCase, ShouldFailMixin): # any point during the process, it should fail to write when we # tell it to write. def _check_failure(results): - self.assertThat(results, Equals(2)) + self.assertThat(results, HasLength(2)) res, d = results self.assertFalse(res) @@ -3191,7 +3191,7 @@ class MDMFProxies(AsyncTestCase, ShouldFailMixin): d.addCallback(lambda ignored: mr.get_verinfo()) def _check_verinfo(verinfo): - self.assertThat(verinfo) + self.assertTrue(verinfo) self.assertThat(verinfo, HasLength(9)) (seqnum, root_hash, From f9acb56e82602081c09f2db8f1eab9db24ee3ddb Mon Sep 17 00:00:00 2001 From: "Fon E. Noel NFEBE" Date: Sun, 12 Mar 2023 00:16:38 +0100 Subject: [PATCH 21/54] Fix wrong expected val in assertion This is a follow up to 7a387a054eb5c9b86ec20c6b95b0489a0034af13 Signed-off-by: Fon E. Noel NFEBE --- src/allmydata/test/test_storage.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 655753d90..c1d6004e8 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -1547,8 +1547,8 @@ class MutableServer(SyncTestCase): }, [(0,12), (20,5)], ) - self.assertThat(answer, (False, - Equals({0: [b"000000000011", b"22222"], + self.assertThat(answer, Equals((False, + {0: [b"000000000011", b"22222"], 1: [b"", b""], 2: [b"", b""], }))) From 74ff8cd08041a1107b05771778310449bf4d99f8 Mon Sep 17 00:00:00 2001 From: dlee Date: Mon, 13 Mar 2023 11:04:52 -0500 Subject: [PATCH 22/54] Per-file configuration for wormholetesting.py moved from inline mypy configuration moved to mypy.ini file --- mypy.ini | 16 ++++++++++++++++ src/allmydata/test/cli/wormholetesting.py | 5 ----- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/mypy.ini b/mypy.ini index 27e9f6154..c391c5594 100644 --- a/mypy.ini +++ b/mypy.ini @@ -8,3 +8,19 @@ warn_unused_configs =True no_implicit_optional = True warn_redundant_casts = True strict_equality = True + +[mypy-allmydata.test.cli.wormholetesting] +warn_unused_configs = True +disallow_any_generics = True +disallow_subclassing_any = True +disallow_untyped_calls = True +disallow_untyped_defs = True +disallow_incomplete_defs = True +check_untyped_defs = True +disallow_untyped_decorators = True +warn_redundant_casts = True +warn_unused_ignores = True +warn_return_any = True +no_implicit_reexport = True +strict_equality = True +strict_concatenate = True diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py index 6fb2b791c..d4e53a342 100644 --- a/src/allmydata/test/cli/wormholetesting.py +++ b/src/allmydata/test/cli/wormholetesting.py @@ -29,11 +29,6 @@ For example:: import wormhole run(peerA(wormhole)) """ -# mypy: warn-unused-configs, disallow-any-generics, disallow-subclassing-any, disallow-untyped-calls, disallow-untyped-defs, disallow-incomplete-defs, check-untyped-defs, disallow-untyped-decorators, warn-redundant-casts, warn-unused-ignores, warn-return-any, no-implicit-reexport, strict-equality, strict-concatenate -# This inline mypy config applies a per-file mypy config for this file. -# It applies the '--strict' list of flags to this file. -# If you want to test using CLI run the command remove the inline config above and run: -# "mypy --follow-imports silent --strict src/allmydata/test/cli/wormholetesting.py" from __future__ import annotations From 61c835c8a05c15b7eabe29b453633b7c4da022e8 Mon Sep 17 00:00:00 2001 From: dlee Date: Mon, 13 Mar 2023 11:17:01 -0500 Subject: [PATCH 23/54] Added missing space between return type --- src/allmydata/test/cli/wormholetesting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py index d4e53a342..be94a7981 100644 --- a/src/allmydata/test/cli/wormholetesting.py +++ b/src/allmydata/test/cli/wormholetesting.py @@ -76,7 +76,7 @@ class MemoryWormholeServer(object): stderr: TextIO=stderr, _eventual_queue: Optional[Any]=None, _enable_dilate: bool=False, - )-> _MemoryWormhole: + ) -> _MemoryWormhole: """ Create a wormhole. It will be able to connect to other wormholes created by this instance (and constrained by the normal appid/relay_url From b58dd2bb3bed375258f611eb0af39f6c08f64684 Mon Sep 17 00:00:00 2001 From: dlee Date: Mon, 13 Mar 2023 12:27:53 -0500 Subject: [PATCH 24/54] Remove flags that are unused --- mypy.ini | 2 -- 1 file changed, 2 deletions(-) diff --git a/mypy.ini b/mypy.ini index c391c5594..7acc0ddc5 100644 --- a/mypy.ini +++ b/mypy.ini @@ -10,7 +10,6 @@ warn_redundant_casts = True strict_equality = True [mypy-allmydata.test.cli.wormholetesting] -warn_unused_configs = True disallow_any_generics = True disallow_subclassing_any = True disallow_untyped_calls = True @@ -18,7 +17,6 @@ disallow_untyped_defs = True disallow_incomplete_defs = True check_untyped_defs = True disallow_untyped_decorators = True -warn_redundant_casts = True warn_unused_ignores = True warn_return_any = True no_implicit_reexport = True From 041a634d27f1f2adfdc82471e60192aaecb1fbfc Mon Sep 17 00:00:00 2001 From: dlee Date: Mon, 13 Mar 2023 13:08:32 -0500 Subject: [PATCH 25/54] Fix private interface import to test_invite --- src/allmydata/test/cli/test_invite.py | 4 ++-- src/allmydata/test/cli/wormholetesting.py | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/allmydata/test/cli/test_invite.py b/src/allmydata/test/cli/test_invite.py index 94d4395ff..31992a54d 100644 --- a/src/allmydata/test/cli/test_invite.py +++ b/src/allmydata/test/cli/test_invite.py @@ -19,8 +19,8 @@ from ...util.jsonbytes import dumps_bytes from ..common_util import run_cli from ..no_network import GridTestMixin from .common import CLITestMixin -from .wormholetesting import MemoryWormholeServer, TestingHelper, memory_server -from wormhole._interfaces import IWormhole +from .wormholetesting import MemoryWormholeServer, TestingHelper, memory_server, IWormhole + # Logically: # JSONable = dict[str, Union[JSONable, None, int, float, str, list[JSONable]]] diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py index be94a7981..9fbe8b63e 100644 --- a/src/allmydata/test/cli/wormholetesting.py +++ b/src/allmydata/test/cli/wormholetesting.py @@ -32,6 +32,8 @@ For example:: from __future__ import annotations +__all__ = ['IWormhole'] + from typing import Iterator, Optional, List, Tuple, Any, TextIO from inspect import getfullargspec from itertools import count @@ -76,7 +78,7 @@ class MemoryWormholeServer(object): stderr: TextIO=stderr, _eventual_queue: Optional[Any]=None, _enable_dilate: bool=False, - ) -> _MemoryWormhole: + )-> _MemoryWormhole: """ Create a wormhole. It will be able to connect to other wormholes created by this instance (and constrained by the normal appid/relay_url From 2bb96d8452c6fc4eaad990088c63314fd54e6aed Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 15:19:07 -0400 Subject: [PATCH 26/54] There are new autobahn releases; remove the upper bound. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 53fc42c62..82ff45764 100644 --- a/setup.py +++ b/setup.py @@ -118,7 +118,7 @@ install_requires = [ "attrs >= 18.2.0", # WebSocket library for twisted and asyncio - "autobahn < 22.4.1", # remove this when 22.4.3 is released + "autobahn", # Support for Python 3 transition "future >= 0.18.2", From 568e1b53177f33e53dae4d54b03c606b693c3319 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 15:19:43 -0400 Subject: [PATCH 27/54] Replace the mach-nix-based package with a nixpkgs-based package The built-in nixpkgs `buildPythonPackage` doesn't do metadata discovery so we have to duplicate a lot of the package metadata. However, mach-nix is unmaintained and incompatible with newer versions of nixpkgs. --- .circleci/config.yml | 12 +++-- default.nix | 73 +++++----------------------- nix/pycddl.nix | 22 +++++++++ nix/sources.json | 52 ++++++++------------ nix/tahoe-lafs.nix | 112 +++++++++++++++++++++++++++++++++++++++++++ tests.nix | 88 ---------------------------------- 6 files changed, 174 insertions(+), 185 deletions(-) create mode 100644 nix/pycddl.nix create mode 100644 nix/tahoe-lafs.nix delete mode 100644 tests.nix diff --git a/.circleci/config.yml b/.circleci/config.yml index 152d56810..d07383b84 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -70,14 +70,18 @@ workflows: - "oraclelinux-8": {} - - "nixos": - name: "NixOS 21.05" - nixpkgs: "21.05" - - "nixos": name: "NixOS 21.11" nixpkgs: "21.11" + - "nixos": + name: "NixOS 22.11" + nixpkgs: "22.11" + + - "nixos": + name: "NixOS unstable" + nixpkgs: "unstable" + # Eventually, test against PyPy 3.8 #- "pypy27-buster": # {} diff --git a/default.nix b/default.nix index e4f2dd4d4..59903b1e2 100644 --- a/default.nix +++ b/default.nix @@ -21,16 +21,13 @@ let sources = import nix/sources.nix; in { - pkgsVersion ? "nixpkgs-21.11" # a string which chooses a nixpkgs from the + pkgsVersion ? "nixpkgs-22.11" # a string which chooses a nixpkgs from the # niv-managed sources data , pkgs ? import sources.${pkgsVersion} { } # nixpkgs itself -, pypiData ? sources.pypi-deps-db # the pypi package database snapshot to use - # for dependency resolution - -, pythonVersion ? "python39" # a string choosing the python derivation from - # nixpkgs to target +, pythonVersion ? "python310" # a string choosing the python derivation from + # nixpkgs to target , extras ? [ "tor" "i2p" ] # a list of strings identifying tahoe-lafs extras, # the dependencies of which the resulting package @@ -39,64 +36,18 @@ in # including them is a lot smaller than the cost of # re-building the whole thing to add them. -, mach-nix ? import sources.mach-nix { # the mach-nix package to use to build - # the tahoe-lafs package - inherit pkgs pypiData; - python = pythonVersion; -} }: -# The project name, version, and most other metadata are automatically -# extracted from the source. Some requirements are not properly extracted -# and those cases are handled below. The version can only be extracted if -# `setup.py update_version` has been run (this is not at all ideal but it -# seems difficult to fix) - so for now just be sure to run that first. -mach-nix.buildPythonPackage rec { - # Define the location of the Tahoe-LAFS source to be packaged. Clean up all - # as many of the non-source files (eg the `.git` directory, `~` backup - # files, nix's own `result` symlink, etc) as possible to avoid needing to - # re-build when files that make no difference to the package have changed. - src = pkgs.lib.cleanSource ./.; - +with pkgs.${pythonVersion}.pkgs; +callPackage ./nix/tahoe-lafs.nix { # Select whichever package extras were requested. inherit extras; - # Define some extra requirements that mach-nix does not automatically detect - # from inspection of the source. We typically don't need to put version - # constraints on any of these requirements. The pypi-deps-db we're - # operating with makes dependency resolution deterministic so as long as it - # works once it will always work. It could be that in the future we update - # pypi-deps-db and an incompatibility arises - in which case it would make - # sense to apply some version constraints here. - requirementsExtra = '' - # mach-nix does not yet support pyproject.toml which means it misses any - # build-time requirements of our dependencies which are declared in such a - # file. Tell it about them here. - setuptools_rust + # Define the location of the Tahoe-LAFS source to be packaged. Clean up as + # many of the non-source files (eg the `.git` directory, `~` backup files, + # nix's own `result` symlink, etc) as possible to avoid needing to re-build + # when files that make no difference to the package have changed. + tahoe-lafs-src = pkgs.lib.cleanSource ./.; - # mach-nix does not yet parse environment markers (e.g. "python > '3.0'") - # correctly. It misses all of our requirements which have an environment marker. - # Duplicate them here. - foolscap - eliot - pyrsistent - collections-extended - ''; - - # Specify where mach-nix should find packages for our Python dependencies. - # There are some reasonable defaults so we only need to specify certain - # packages where the default configuration runs into some issue. - providers = { - }; - - # Define certain overrides to the way Python dependencies are built. - _ = { - # Remove a click-default-group patch for a test suite problem which no - # longer applies because the project apparently no longer has a test suite - # in its source distribution. - click-default-group.patches = []; - }; - - passthru.meta.mach-nix = { - inherit providers _; - }; + # pycddl isn't packaged in nixpkgs so supply our own package of it. + pycddl = callPackage ./nix/pycddl.nix { }; } diff --git a/nix/pycddl.nix b/nix/pycddl.nix new file mode 100644 index 000000000..0f6a0329e --- /dev/null +++ b/nix/pycddl.nix @@ -0,0 +1,22 @@ +{ lib, fetchPypi, buildPythonPackage, rustPlatform }: +buildPythonPackage rec { + pname = "pycddl"; + version = "0.4.0"; + format = "pyproject"; + + src = fetchPypi { + inherit pname version; + sha256 = "sha256-w0CGbPeiXyS74HqZXyiXhvaAMUaIj5onwjl9gWKAjqY="; + }; + + nativeBuildInputs = with rustPlatform; [ + maturinBuildHook + cargoSetupHook + ]; + + cargoDeps = rustPlatform.fetchCargoTarball { + inherit src; + name = "${pname}-${version}"; + hash = "sha256-g96eeaqN9taPED4u+UKUcoitf5aTGFrW2/TOHoHEVHs="; + }; +} diff --git a/nix/sources.json b/nix/sources.json index 18aa18e3f..bcac22174 100644 --- a/nix/sources.json +++ b/nix/sources.json @@ -1,16 +1,4 @@ { - "mach-nix": { - "branch": "switch-to-nix-pypi-fetcher-2", - "description": "Create highly reproducible python environments", - "homepage": "", - "owner": "PrivateStorageio", - "repo": "mach-nix", - "rev": "f6d1a1841d8778c199326f95d0703c16bee2f8c4", - "sha256": "0krc4yhnpbzc4yhja9frnmym2vqm5zyacjnqb3fq9z9gav8vs9ls", - "type": "tarball", - "url": "https://github.com/PrivateStorageio/mach-nix/archive/f6d1a1841d8778c199326f95d0703c16bee2f8c4.tar.gz", - "url_template": "https://github.com///archive/.tar.gz" - }, "niv": { "branch": "master", "description": "Easy dependency management for Nix projects", @@ -23,18 +11,6 @@ "url": "https://github.com/nmattia/niv/archive/5830a4dd348d77e39a0f3c4c762ff2663b602d4c.tar.gz", "url_template": "https://github.com///archive/.tar.gz" }, - "nixpkgs-21.05": { - "branch": "nixos-21.05", - "description": "Nix Packages collection", - "homepage": "", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "0fd9ee1aa36ce865ad273f4f07fdc093adeb5c00", - "sha256": "1mr2qgv5r2nmf6s3gqpcjj76zpsca6r61grzmqngwm0xlh958smx", - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/0fd9ee1aa36ce865ad273f4f07fdc093adeb5c00.tar.gz", - "url_template": "https://github.com///archive/.tar.gz" - }, "nixpkgs-21.11": { "branch": "nixos-21.11", "description": "Nix Packages collection", @@ -47,16 +23,28 @@ "url": "https://github.com/NixOS/nixpkgs/archive/838eefb4f93f2306d4614aafb9b2375f315d917f.tar.gz", "url_template": "https://github.com///archive/.tar.gz" }, - "pypi-deps-db": { - "branch": "master", - "description": "Probably the most complete python dependency database", + "nixpkgs-22.11": { + "branch": "nixos-22.11", + "description": "Nix Packages collection", "homepage": "", - "owner": "DavHau", - "repo": "pypi-deps-db", - "rev": "5440c9c76f6431f300fb6a1ecae762a5444de5f6", - "sha256": "08r3iiaxzw9v2gq15y1m9bwajshyyz9280g6aia7mkgnjs9hnd1n", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "970402e6147c49603f4d06defe44d27fe51884ce", + "sha256": "1v0ljy7wqq14ad3gd1871fgvd4psr7dy14q724k0wwgxk7inbbwh", "type": "tarball", - "url": "https://github.com/DavHau/pypi-deps-db/archive/5440c9c76f6431f300fb6a1ecae762a5444de5f6.tar.gz", + "url": "https://github.com/nixos/nixpkgs/archive/970402e6147c49603f4d06defe44d27fe51884ce.tar.gz", + "url_template": "https://github.com///archive/.tar.gz" + }, + "nixpkgs-unstable": { + "branch": "master", + "description": "Nix Packages collection", + "homepage": "", + "owner": "nixos", + "repo": "nixpkgs", + "rev": "d0c9a536331227ab883b4f6964be638fa436d81f", + "sha256": "1gg6v5rk1p26ciygdg262zc5vqws753rvgcma5rim2s6gyfrjaq1", + "type": "tarball", + "url": "https://github.com/nixos/nixpkgs/archive/d0c9a536331227ab883b4f6964be638fa436d81f.tar.gz", "url_template": "https://github.com///archive/.tar.gz" } } diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix new file mode 100644 index 000000000..af0e4bf4d --- /dev/null +++ b/nix/tahoe-lafs.nix @@ -0,0 +1,112 @@ +{ buildPythonPackage +, tahoe-lafs-src +, extras + +# always dependencies +, attrs +, autobahn +, cbor2 +, click +, collections-extended +, cryptography +, distro +, eliot +, filelock +, foolscap +, future +, klein +, magic-wormhole +, netifaces +, psutil +, pycddl +, pyrsistent +, pyutil +, six +, treq +, twisted +, werkzeug +, zfec +, zope_interface + +# tor extra dependencies +, txtorcon + +# i2p extra dependencies +, txi2p-tahoe + +# test dependencies +, beautifulsoup4 +, fixtures +, hypothesis +, mock +, paramiko +, prometheus-client +, pytest +, pytest-timeout +, pytest-twisted +, tenacity +, testtools +, towncrier +}: +let + pname = "tahoe-lafs"; + version = "1.18.0.post1"; + + pickExtraDependencies = deps: extras: builtins.foldl' (accum: extra: accum ++ deps.${extra}) [] extras; + + pythonExtraDependencies = { + tor = [ txtorcon ]; + i2p = [ txi2p-tahoe ]; + }; + + pythonPackageDependencies = [ + attrs + autobahn + cbor2 + click + collections-extended + cryptography + distro + eliot + filelock + foolscap + future + klein + magic-wormhole + netifaces + psutil + pycddl + pyrsistent + pyutil + six + treq + twisted + (twisted.passthru.optional-dependencies.tls) + (twisted.passthru.optional-dependencies.conch) + werkzeug + zfec + zope_interface + ] ++ pickExtraDependencies pythonExtraDependencies extras; + + pythonCheckDependencies = [ + beautifulsoup4 + fixtures + hypothesis + mock + paramiko + prometheus-client + pytest + pytest-timeout + pytest-twisted + tenacity + testtools + towncrier + ]; +in +buildPythonPackage { + inherit pname version; + src = tahoe-lafs-src; + buildInputs = pythonPackageDependencies; + checkInputs = pythonCheckDependencies; + checkPhase = "TAHOE_LAFS_HYPOTHESIS_PROFILE=ci python -m twisted.trial -j $NIX_BUILD_CORES allmydata"; +} diff --git a/tests.nix b/tests.nix deleted file mode 100644 index f8ed678f3..000000000 --- a/tests.nix +++ /dev/null @@ -1,88 +0,0 @@ -let - sources = import nix/sources.nix; -in -# See default.nix for documentation about parameters. -{ pkgsVersion ? "nixpkgs-21.11" -, pkgs ? import sources.${pkgsVersion} { } -, pypiData ? sources.pypi-deps-db -, pythonVersion ? "python39" -, mach-nix ? import sources.mach-nix { - inherit pkgs pypiData; - python = pythonVersion; - } -}@args: -let - # We would like to know the test requirements but mach-nix does not directly - # expose this information to us. However, it is perfectly capable of - # determining it if we ask right... This is probably not meant to be a - # public mach-nix API but we pinned mach-nix so we can deal with mach-nix - # upgrade breakage in our own time. - mach-lib = import "${sources.mach-nix}/mach_nix/nix/lib.nix" { - inherit pkgs; - lib = pkgs.lib; - }; - tests_require = (mach-lib.extract "python39" ./. "extras_require" ).extras_require.test; - - # Get the Tahoe-LAFS package itself. This does not include test - # requirements and we don't ask for test requirements so that we can just - # re-use the normal package if it is already built. - tahoe-lafs = import ./. args; - - # If we want to get tahoe-lafs into a Python environment with a bunch of - # *other* Python modules and let them interact in the usual way then we have - # to ask mach-nix for tahoe-lafs and those other Python modules in the same - # way - i.e., using `requirements`. The other tempting mechanism, - # `packagesExtra`, inserts an extra layer of Python environment and prevents - # normal interaction between Python modules (as well as usually producing - # file collisions in the packages that are both runtime and test - # dependencies). To get the tahoe-lafs we just built into the environment, - # put it into nixpkgs using an overlay and tell mach-nix to get tahoe-lafs - # from nixpkgs. - overridesPre = [(self: super: { inherit tahoe-lafs; })]; - providers = tahoe-lafs.meta.mach-nix.providers // { tahoe-lafs = "nixpkgs"; }; - - # Make the Python environment in which we can run the tests. - python-env = mach-nix.mkPython { - # Get the packaging fixes we already know we need from putting together - # the runtime package. - inherit (tahoe-lafs.meta.mach-nix) _; - # Share the runtime package's provider configuration - combined with our - # own that causes the right tahoe-lafs to be picked up. - inherit providers overridesPre; - requirements = '' - # Here we pull in the Tahoe-LAFS package itself. - tahoe-lafs - - # Unfortunately mach-nix misses all of the Python dependencies of the - # tahoe-lafs satisfied from nixpkgs. Drag them in here. This gives a - # bit of a pyrrhic flavor to the whole endeavor but maybe mach-nix will - # fix this soon. - # - # https://github.com/DavHau/mach-nix/issues/123 - # https://github.com/DavHau/mach-nix/pull/386 - ${tahoe-lafs.requirements} - - # And then all of the test-only dependencies. - ${builtins.concatStringsSep "\n" tests_require} - - # txi2p-tahoe is another dependency with an environment marker that - # mach-nix doesn't automatically pick up. - txi2p-tahoe - ''; - }; -in -# Make a derivation that runs the unit test suite. -pkgs.runCommand "tahoe-lafs-tests" { } '' - export TAHOE_LAFS_HYPOTHESIS_PROFILE=ci - ${python-env}/bin/python -m twisted.trial -j $NIX_BUILD_CORES allmydata - - # It's not cool to put the whole _trial_temp into $out because it has weird - # files in it we don't want in the store. Plus, even all of the less weird - # files are mostly just trash that's not meaningful if the test suite passes - # (which is the only way we get $out anyway). - # - # The build log itself is typically available from `nix-store --read-log` so - # we don't need to record that either. - echo "passed" >$out - -'' From f1be1ca1de497f4b3ebb5d5795803a5a331dcba9 Mon Sep 17 00:00:00 2001 From: dlee Date: Mon, 13 Mar 2023 14:53:25 -0500 Subject: [PATCH 28/54] Added more elements to export list in wormholetesting.py --- src/allmydata/test/cli/wormholetesting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py index 9fbe8b63e..99e26e64b 100644 --- a/src/allmydata/test/cli/wormholetesting.py +++ b/src/allmydata/test/cli/wormholetesting.py @@ -32,7 +32,7 @@ For example:: from __future__ import annotations -__all__ = ['IWormhole'] +__all__ = ['MemoryWormholeServer', 'TestingHelper', 'memory_server', 'IWormhole'] from typing import Iterator, Optional, List, Tuple, Any, TextIO from inspect import getfullargspec From fa2ba64d4d7c7f4393ab2378b7a12dd5f1b6aa08 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 16:02:57 -0400 Subject: [PATCH 29/54] Also supply the i2p extra dependency, txi2p --- default.nix | 3 ++- nix/tahoe-lafs.nix | 4 ++-- nix/txi2p.nix | 14 ++++++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 nix/txi2p.nix diff --git a/default.nix b/default.nix index 59903b1e2..c5f49a372 100644 --- a/default.nix +++ b/default.nix @@ -48,6 +48,7 @@ callPackage ./nix/tahoe-lafs.nix { # when files that make no difference to the package have changed. tahoe-lafs-src = pkgs.lib.cleanSource ./.; - # pycddl isn't packaged in nixpkgs so supply our own package of it. + # Some dependencies aren't packaged in nixpkgs so supply our own packages. pycddl = callPackage ./nix/pycddl.nix { }; + txi2p = callPackage ./nix/txi2p.nix { }; } diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix index af0e4bf4d..386e3adc9 100644 --- a/nix/tahoe-lafs.nix +++ b/nix/tahoe-lafs.nix @@ -32,7 +32,7 @@ , txtorcon # i2p extra dependencies -, txi2p-tahoe +, txi2p # test dependencies , beautifulsoup4 @@ -56,7 +56,7 @@ let pythonExtraDependencies = { tor = [ txtorcon ]; - i2p = [ txi2p-tahoe ]; + i2p = [ txi2p ]; }; pythonPackageDependencies = [ diff --git a/nix/txi2p.nix b/nix/txi2p.nix new file mode 100644 index 000000000..a3a5fea3a --- /dev/null +++ b/nix/txi2p.nix @@ -0,0 +1,14 @@ +{ fetchPypi, buildPythonPackage, parsley, twisted, unittestCheckHook }: +buildPythonPackage rec { + pname = "txi2p-tahoe"; + version = "0.3.7"; + + src = fetchPypi { + inherit pname version; + hash = "sha256-+Vs9zaFS+ACI14JNxEme93lnWmncdZyFAmnTH0yhOiY="; + }; + + propagatedBuildInputs = [ twisted parsley ]; + checkInputs = [ unittestCheckHook ]; + pythonImportsCheck = [ "parsley" "ometa"]; +} From 02904a363b0bf73119ad05bfc0a889231eb53f78 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 16:19:07 -0400 Subject: [PATCH 30/54] Drop nixpkgs 21.11 - it is missing some stuff we need Not only some nixpkgs facilities but it also includes a rustc that's too old to build pycddl. --- .circleci/config.yml | 6 +----- nix/sources.json | 12 ------------ nix/tahoe-lafs.nix | 14 ++++++++++++-- nix/txi2p.nix | 7 ++++++- 4 files changed, 19 insertions(+), 20 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index d07383b84..82bb263f9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -70,13 +70,9 @@ workflows: - "oraclelinux-8": {} - - "nixos": - name: "NixOS 21.11" - nixpkgs: "21.11" - - "nixos": name: "NixOS 22.11" - nixpkgs: "22.11" + nixpkgs: "21.11" - "nixos": name: "NixOS unstable" diff --git a/nix/sources.json b/nix/sources.json index bcac22174..ddf05d39d 100644 --- a/nix/sources.json +++ b/nix/sources.json @@ -11,18 +11,6 @@ "url": "https://github.com/nmattia/niv/archive/5830a4dd348d77e39a0f3c4c762ff2663b602d4c.tar.gz", "url_template": "https://github.com///archive/.tar.gz" }, - "nixpkgs-21.11": { - "branch": "nixos-21.11", - "description": "Nix Packages collection", - "homepage": "", - "owner": "NixOS", - "repo": "nixpkgs", - "rev": "838eefb4f93f2306d4614aafb9b2375f315d917f", - "sha256": "1bm8cmh1wx4h8b4fhbs75hjci3gcrpi7k1m1pmiy3nc0gjim9vkg", - "type": "tarball", - "url": "https://github.com/NixOS/nixpkgs/archive/838eefb4f93f2306d4614aafb9b2375f315d917f.tar.gz", - "url_template": "https://github.com///archive/.tar.gz" - }, "nixpkgs-22.11": { "branch": "nixos-22.11", "description": "Nix Packages collection", diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix index 386e3adc9..11698f611 100644 --- a/nix/tahoe-lafs.nix +++ b/nix/tahoe-lafs.nix @@ -34,6 +34,15 @@ # i2p extra dependencies , txi2p +# twisted extra dependencies - if there is overlap with our dependencies we +# have to skip them since we can't have a name in the argument set twice. +, appdirs +, bcrypt +, idna +, pyasn1 +, pyopenssl +, service-identity + # test dependencies , beautifulsoup4 , fixtures @@ -81,8 +90,9 @@ let six treq twisted - (twisted.passthru.optional-dependencies.tls) - (twisted.passthru.optional-dependencies.conch) + # Get the dependencies for the Twisted extras we depend on, too. + twisted.passthru.optional-dependencies.tls + twisted.passthru.optional-dependencies.conch werkzeug zfec zope_interface diff --git a/nix/txi2p.nix b/nix/txi2p.nix index a3a5fea3a..c6b28aad4 100644 --- a/nix/txi2p.nix +++ b/nix/txi2p.nix @@ -1,4 +1,9 @@ -{ fetchPypi, buildPythonPackage, parsley, twisted, unittestCheckHook }: +{ fetchPypi +, buildPythonPackage +, parsley +, twisted +, unittestCheckHook +}: buildPythonPackage rec { pname = "txi2p-tahoe"; version = "0.3.7"; From b73045d93c1144ea1ef2ce9fb5129ce221e36943 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 16:21:33 -0400 Subject: [PATCH 31/54] fix ci configuration --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 82bb263f9..b7009c2af 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -72,7 +72,7 @@ workflows: - "nixos": name: "NixOS 22.11" - nixpkgs: "21.11" + nixpkgs: "22.11" - "nixos": name: "NixOS unstable" From edd8e99178a711ae2d1c6765288872f3a6cbb0b9 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 16:36:53 -0400 Subject: [PATCH 32/54] no more pypi-deps-db or mach-nix --- default.nix | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/default.nix b/default.nix index c5f49a372..21a17bdef 100644 --- a/default.nix +++ b/default.nix @@ -1,8 +1,7 @@ let # sources.nix contains information about which versions of some of our - # dependencies we should use. since we use it to pin nixpkgs and the PyPI - # package database, roughly all the rest of our dependencies are *also* - # pinned - indirectly. + # dependencies we should use. since we use it to pin nixpkgs, all the rest + # of our dependencies are *also* pinned - indirectly. # # sources.nix is managed using a tool called `niv`. as an example, to # update to the most recent version of nixpkgs from the 21.11 maintenance @@ -10,11 +9,6 @@ let # # niv update nixpkgs-21.11 # - # or, to update the PyPI package database -- which is necessary to make any - # newly released packages visible -- you likewise run: - # - # niv update pypi-deps-db - # # niv also supports chosing a specific revision, following a different # branch, etc. find complete documentation for the tool at # https://github.com/nmattia/niv From 93cd2aa354dee1777bdce719338fc61ce1209b04 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 16:43:22 -0400 Subject: [PATCH 33/54] re-enable nix-based test suite runs --- .circleci/config.yml | 2 +- nix/tahoe-lafs.nix | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b7009c2af..8b6dc8347 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -438,7 +438,7 @@ jobs: cache_if_able nix-build \ --cores 8 \ --argstr pkgsVersion "nixpkgs-<>" \ - tests.nix + nix/tests/ typechecks: docker: diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix index 11698f611..ebb66a65e 100644 --- a/nix/tahoe-lafs.nix +++ b/nix/tahoe-lafs.nix @@ -2,6 +2,9 @@ , tahoe-lafs-src , extras +# control how the test suite is run +, doCheck ? false + # always dependencies , attrs , autobahn @@ -117,6 +120,11 @@ buildPythonPackage { inherit pname version; src = tahoe-lafs-src; buildInputs = pythonPackageDependencies; + + inherit doCheck; checkInputs = pythonCheckDependencies; - checkPhase = "TAHOE_LAFS_HYPOTHESIS_PROFILE=ci python -m twisted.trial -j $NIX_BUILD_CORES allmydata"; + checkPhase = '' + export TAHOE_LAFS_HYPOTHESIS_PROFILE=ci + python -m twisted.trial -j $NIX_BUILD_CORES allmydata + ''; } From f59c6a3acfaa6f2e60455ff5a91cb67015ab4a5c Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 17:01:57 -0400 Subject: [PATCH 34/54] Get our dependencies at runtime, too. --- nix/tahoe-lafs.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix index ebb66a65e..6d743c5b2 100644 --- a/nix/tahoe-lafs.nix +++ b/nix/tahoe-lafs.nix @@ -119,7 +119,7 @@ in buildPythonPackage { inherit pname version; src = tahoe-lafs-src; - buildInputs = pythonPackageDependencies; + propagatedBuildInputs = pythonPackageDependencies; inherit doCheck; checkInputs = pythonCheckDependencies; From 17a2c32e1f7c5eaf75340417da5741445e50b628 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 17:02:10 -0400 Subject: [PATCH 35/54] Avoid colliding with the "extra" package in nixpkgs :/ --- default.nix | 14 +++++++------- nix/tahoe-lafs.nix | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/default.nix b/default.nix index 21a17bdef..87f398ca5 100644 --- a/default.nix +++ b/default.nix @@ -23,18 +23,18 @@ in , pythonVersion ? "python310" # a string choosing the python derivation from # nixpkgs to target -, extras ? [ "tor" "i2p" ] # a list of strings identifying tahoe-lafs extras, - # the dependencies of which the resulting package - # will also depend on. Include all of the runtime - # extras by default because the incremental cost of - # including them is a lot smaller than the cost of - # re-building the whole thing to add them. +, extrasNames ? [ "tor" "i2p" ] # a list of strings identifying tahoe-lafs extras, + # the dependencies of which the resulting + # package will also depend on. Include all of the + # runtime extras by default because the incremental + # cost of including them is a lot smaller than the + # cost of re-building the whole thing to add them. }: with pkgs.${pythonVersion}.pkgs; callPackage ./nix/tahoe-lafs.nix { # Select whichever package extras were requested. - inherit extras; + inherit extrasNames; # Define the location of the Tahoe-LAFS source to be packaged. Clean up as # many of the non-source files (eg the `.git` directory, `~` backup files, diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix index 6d743c5b2..ec1c83f73 100644 --- a/nix/tahoe-lafs.nix +++ b/nix/tahoe-lafs.nix @@ -1,6 +1,6 @@ { buildPythonPackage , tahoe-lafs-src -, extras +, extrasNames # control how the test suite is run , doCheck ? false @@ -99,7 +99,7 @@ let werkzeug zfec zope_interface - ] ++ pickExtraDependencies pythonExtraDependencies extras; + ] ++ pickExtraDependencies pythonExtraDependencies extrasNames; pythonCheckDependencies = [ beautifulsoup4 From 1e0e5304d7d7c8f9300fd9338b76a3ebf4195dcc Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 17:02:50 -0400 Subject: [PATCH 36/54] actually add the test expression --- nix/tests/default.nix | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 nix/tests/default.nix diff --git a/nix/tests/default.nix b/nix/tests/default.nix new file mode 100644 index 000000000..0990c9b2b --- /dev/null +++ b/nix/tests/default.nix @@ -0,0 +1,4 @@ +# Build the package with the test suite enabled. +(import ../../. {}).override { + doCheck = true; +} From 0d11c6c07655f97ebedb102608fd9cd5b13a9f84 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 17:06:06 -0400 Subject: [PATCH 37/54] package metadata --- nix/tahoe-lafs.nix | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix index ec1c83f73..43f46092d 100644 --- a/nix/tahoe-lafs.nix +++ b/nix/tahoe-lafs.nix @@ -127,4 +127,11 @@ buildPythonPackage { export TAHOE_LAFS_HYPOTHESIS_PROFILE=ci python -m twisted.trial -j $NIX_BUILD_CORES allmydata ''; + + meta = with lib; { + homepage = "https://tahoe-lafs.org/"; + description = "secure, decentralized, fault-tolerant file store"; + # Also TGPPL + license = licenses.gpl2Plus; + }; } From 1b9936bd1b89e8b253aa003a7e129e72adbb100e Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 17:10:29 -0400 Subject: [PATCH 38/54] get lib :/ --- nix/tahoe-lafs.nix | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix index 43f46092d..62fba48b1 100644 --- a/nix/tahoe-lafs.nix +++ b/nix/tahoe-lafs.nix @@ -1,4 +1,5 @@ -{ buildPythonPackage +{ lib +, buildPythonPackage , tahoe-lafs-src , extrasNames From 1c926aeb869817c0a2aaa76786075b5459c396a2 Mon Sep 17 00:00:00 2001 From: dlee Date: Mon, 13 Mar 2023 16:23:28 -0500 Subject: [PATCH 39/54] Add space to return type --- src/allmydata/test/cli/wormholetesting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/test/cli/wormholetesting.py b/src/allmydata/test/cli/wormholetesting.py index 99e26e64b..eb6249a0d 100644 --- a/src/allmydata/test/cli/wormholetesting.py +++ b/src/allmydata/test/cli/wormholetesting.py @@ -78,7 +78,7 @@ class MemoryWormholeServer(object): stderr: TextIO=stderr, _eventual_queue: Optional[Any]=None, _enable_dilate: bool=False, - )-> _MemoryWormhole: + ) -> _MemoryWormhole: """ Create a wormhole. It will be able to connect to other wormholes created by this instance (and constrained by the normal appid/relay_url From 6e6fc2d3070172dc9537b3379c305c070f0f41ea Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 19:02:54 -0400 Subject: [PATCH 40/54] The Nix test expression includes a package build, so just do that --- .circleci/config.yml | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8b6dc8347..a7dba614b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -412,29 +412,13 @@ jobs: --run 'python setup.py update_version' - "run": - name: "Build" + name: "Test" command: | # CircleCI build environment looks like it has a zillion and a # half cores. Don't let Nix autodetect this high core count # because it blows up memory usage and fails the test run. Pick a # number of cores that suites the build environment we're paying - # for (the free one!). - # - # Also, let it run more than one job at a time because we have to - # build a couple simple little dependencies that don't take - # advantage of multiple cores and we get a little speedup by doing - # them in parallel. - source .circleci/lib.sh - cache_if_able nix-build \ - --cores 3 \ - --max-jobs 2 \ - --argstr pkgsVersion "nixpkgs-<>" - - - "run": - name: "Test" - command: | - # Let it go somewhat wild for the test suite itself - source .circleci/lib.sh + # for (the free one!).h cache_if_able nix-build \ --cores 8 \ --argstr pkgsVersion "nixpkgs-<>" \ From 99559638b93b2ac08d6740d26e2bfbe5dee43fc5 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 19:03:26 -0400 Subject: [PATCH 41/54] remove some repetition in the package definition --- default.nix | 12 ++++++--- nix/tahoe-lafs.nix | 64 ++++------------------------------------------ 2 files changed, 13 insertions(+), 63 deletions(-) diff --git a/default.nix b/default.nix index 87f398ca5..3ecc88ec1 100644 --- a/default.nix +++ b/default.nix @@ -31,7 +31,13 @@ in # cost of re-building the whole thing to add them. }: -with pkgs.${pythonVersion}.pkgs; +with (pkgs.${pythonVersion}.override { + packageOverrides = self: super: { + # Some dependencies aren't packaged in nixpkgs so supply our own packages. + pycddl = self.callPackage ./nix/pycddl.nix { }; + txi2p = self.callPackage ./nix/txi2p.nix { }; + }; +}).pkgs; callPackage ./nix/tahoe-lafs.nix { # Select whichever package extras were requested. inherit extrasNames; @@ -42,7 +48,5 @@ callPackage ./nix/tahoe-lafs.nix { # when files that make no difference to the package have changed. tahoe-lafs-src = pkgs.lib.cleanSource ./.; - # Some dependencies aren't packaged in nixpkgs so supply our own packages. - pycddl = callPackage ./nix/pycddl.nix { }; - txi2p = callPackage ./nix/txi2p.nix { }; + doCheck = false; } diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix index 62fba48b1..380260c70 100644 --- a/nix/tahoe-lafs.nix +++ b/nix/tahoe-lafs.nix @@ -1,65 +1,11 @@ { lib +, pythonPackages , buildPythonPackage , tahoe-lafs-src , extrasNames # control how the test suite is run -, doCheck ? false - -# always dependencies -, attrs -, autobahn -, cbor2 -, click -, collections-extended -, cryptography -, distro -, eliot -, filelock -, foolscap -, future -, klein -, magic-wormhole -, netifaces -, psutil -, pycddl -, pyrsistent -, pyutil -, six -, treq -, twisted -, werkzeug -, zfec -, zope_interface - -# tor extra dependencies -, txtorcon - -# i2p extra dependencies -, txi2p - -# twisted extra dependencies - if there is overlap with our dependencies we -# have to skip them since we can't have a name in the argument set twice. -, appdirs -, bcrypt -, idna -, pyasn1 -, pyopenssl -, service-identity - -# test dependencies -, beautifulsoup4 -, fixtures -, hypothesis -, mock -, paramiko -, prometheus-client -, pytest -, pytest-timeout -, pytest-twisted -, tenacity -, testtools -, towncrier +, doCheck }: let pname = "tahoe-lafs"; @@ -67,12 +13,12 @@ let pickExtraDependencies = deps: extras: builtins.foldl' (accum: extra: accum ++ deps.${extra}) [] extras; - pythonExtraDependencies = { + pythonExtraDependencies = with pythonPackages; { tor = [ txtorcon ]; i2p = [ txi2p ]; }; - pythonPackageDependencies = [ + pythonPackageDependencies = with pythonPackages; [ attrs autobahn cbor2 @@ -102,7 +48,7 @@ let zope_interface ] ++ pickExtraDependencies pythonExtraDependencies extrasNames; - pythonCheckDependencies = [ + pythonCheckDependencies = with pythonPackages; [ beautifulsoup4 fixtures hypothesis From d648592a871520415196c6a1ee3b68081392a3da Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Mon, 13 Mar 2023 19:43:16 -0400 Subject: [PATCH 42/54] get the helper ... --- .circleci/config.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a7dba614b..b1e121337 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -418,7 +418,8 @@ jobs: # half cores. Don't let Nix autodetect this high core count # because it blows up memory usage and fails the test run. Pick a # number of cores that suites the build environment we're paying - # for (the free one!).h + # for (the free one!). + source .circleci/lib.sh cache_if_able nix-build \ --cores 8 \ --argstr pkgsVersion "nixpkgs-<>" \ From d7018905b9537befb47753da92ad3f975f9951de Mon Sep 17 00:00:00 2001 From: Itamar Turner-Trauring Date: Tue, 14 Mar 2023 09:57:29 -0400 Subject: [PATCH 43/54] Switch away from using stdin, it's flaky on Windows. --- integration/test_get_put.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/integration/test_get_put.py b/integration/test_get_put.py index 65020429e..1b6c30072 100644 --- a/integration/test_get_put.py +++ b/integration/test_get_put.py @@ -3,7 +3,7 @@ Integration tests for getting and putting files, including reading from stdin and stdout. """ -from subprocess import Popen, PIPE, check_output +from subprocess import Popen, PIPE, check_output, check_call import sys import pytest @@ -53,23 +53,38 @@ def test_put_from_stdin(alice, get_put_alias, tmpdir): def test_get_to_stdout(alice, get_put_alias, tmpdir): """ It's possible to upload a file, and then download it to stdout. - - We test with large file, this time. """ tempfile = tmpdir.join("file") - large_data = DATA * 1_000_000 with tempfile.open("wb") as f: - f.write(large_data) + f.write(DATA) cli(alice, "put", str(tempfile), "getput:tostdout") p = Popen( ["tahoe", "--node-directory", alice.node_dir, "get", "getput:tostdout", "-"], stdout=PIPE ) - assert p.stdout.read() == large_data + assert p.stdout.read() == DATA assert p.wait() == 0 +def test_large_file(alice, get_put_alias, tmp_path): + """ + It's possible to upload and download a larger file. + + We avoid stdin/stdout since that's flaky on Windows. + """ + tempfile = tmp_path / "file" + with tempfile.open("wb") as f: + f.write(DATA * 1_000_000) + cli(alice, "put", str(tempfile), "getput:largefile") + + outfile = tmp_path / "out" + check_call( + ["tahoe", "--node-directory", alice.node_dir, "get", "getput:largefile", str(outfile)], + ) + assert outfile.read_bytes() == tempfile.read_bytes() + + @pytest.mark.skipif( sys.platform.startswith("win"), reason="reconfigure() has issues on Windows" From ea5928ce537a97b7fa850dc26553c12566e3dc5f Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 14 Mar 2023 10:19:27 -0400 Subject: [PATCH 44/54] news fragment --- newsfragments/3987.minor | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 newsfragments/3987.minor diff --git a/newsfragments/3987.minor b/newsfragments/3987.minor new file mode 100644 index 000000000..e69de29bb From ff50bfe5c4d8e05c02a3fa58d754779855988375 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 14 Mar 2023 10:19:49 -0400 Subject: [PATCH 45/54] Accept all the arguments default.nix accepts, too --- nix/tests/default.nix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nix/tests/default.nix b/nix/tests/default.nix index 0990c9b2b..2eb490718 100644 --- a/nix/tests/default.nix +++ b/nix/tests/default.nix @@ -1,4 +1,4 @@ # Build the package with the test suite enabled. -(import ../../. {}).override { +args@{...}: (import ../../. args).override { doCheck = true; } From 10414e80eda31e956631d9f336858a58cc5fdad9 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 14 Mar 2023 10:25:02 -0400 Subject: [PATCH 46/54] Remove some unnecessary hierarchy I thought `default.nix` was handled specially for the purposes of automatic parameter population but it isn't. Instead, you just need this `args@{...}` pattern. --- .circleci/config.yml | 2 +- nix/{tests/default.nix => tests.nix} | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename nix/{tests/default.nix => tests.nix} (60%) diff --git a/.circleci/config.yml b/.circleci/config.yml index b1e121337..ab0573a3f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -423,7 +423,7 @@ jobs: cache_if_able nix-build \ --cores 8 \ --argstr pkgsVersion "nixpkgs-<>" \ - nix/tests/ + nix/tests.nix typechecks: docker: diff --git a/nix/tests/default.nix b/nix/tests.nix similarity index 60% rename from nix/tests/default.nix rename to nix/tests.nix index 2eb490718..42ca9f882 100644 --- a/nix/tests/default.nix +++ b/nix/tests.nix @@ -1,4 +1,4 @@ # Build the package with the test suite enabled. -args@{...}: (import ../../. args).override { +args@{...}: (import ../. args).override { doCheck = true; } From 505032d0cac83111341304335c88d2ee95afdd9f Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 14 Mar 2023 20:38:46 -0400 Subject: [PATCH 47/54] a note about what this is and what's going on upstream --- nix/pycddl.nix | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/nix/pycddl.nix b/nix/pycddl.nix index 0f6a0329e..563936cbb 100644 --- a/nix/pycddl.nix +++ b/nix/pycddl.nix @@ -1,3 +1,10 @@ +# package https://gitlab.com/tahoe-lafs/pycddl +# +# also in the process of being pushed upstream +# https://github.com/NixOS/nixpkgs/pull/221220 +# +# we should switch to the upstream package when it is available from our +# minimum version of nixpkgs { lib, fetchPypi, buildPythonPackage, rustPlatform }: buildPythonPackage rec { pname = "pycddl"; From 324a5ba397bfe211311cf7db1da7f70adc14b85d Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Tue, 14 Mar 2023 20:40:08 -0400 Subject: [PATCH 48/54] give the reader a hint about the interpretation of `./.` --- default.nix | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/default.nix b/default.nix index 3ecc88ec1..b87a6730a 100644 --- a/default.nix +++ b/default.nix @@ -42,10 +42,11 @@ callPackage ./nix/tahoe-lafs.nix { # Select whichever package extras were requested. inherit extrasNames; - # Define the location of the Tahoe-LAFS source to be packaged. Clean up as - # many of the non-source files (eg the `.git` directory, `~` backup files, - # nix's own `result` symlink, etc) as possible to avoid needing to re-build - # when files that make no difference to the package have changed. + # Define the location of the Tahoe-LAFS source to be packaged (the same + # directory as contains this file). Clean up as many of the non-source + # files (eg the `.git` directory, `~` backup files, nix's own `result` + # symlink, etc) as possible to avoid needing to re-build when files that + # make no difference to the package have changed. tahoe-lafs-src = pkgs.lib.cleanSource ./.; doCheck = false; From aaaec9a69dbed5c0b97566d3be8bde79fd9765e2 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Wed, 15 Mar 2023 15:42:52 -0400 Subject: [PATCH 49/54] package update instructions --- nix/pycddl.nix | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/nix/pycddl.nix b/nix/pycddl.nix index 563936cbb..703f00595 100644 --- a/nix/pycddl.nix +++ b/nix/pycddl.nix @@ -4,7 +4,29 @@ # https://github.com/NixOS/nixpkgs/pull/221220 # # we should switch to the upstream package when it is available from our -# minimum version of nixpkgs +# minimum version of nixpkgs. +# +# if you need to update this package to a new pycddl release then +# +# 1. change value given to `buildPythonPackage` for `version` to match the new +# release +# +# 2. change the value given to `fetchPypi` for `sha256` to `lib.fakeHash` +# +# 3. run `nix-build` +# +# 4. there will be an error about a hash mismatch. change the value given to +# `fetchPypi` for `sha256` to the "actual" hash value report. +# +# 5. change the value given to `cargoDeps` for `hash` to lib.fakeHash`. +# +# 6. run `nix-build` +# +# 7. there will be an error about a hash mismatch. change the value given to +# `cargoDeps` for `hash` to the "actual" hash value report. +# +# 8. run `nix-build`. it should succeed. if it does not, seek assistance. +# { lib, fetchPypi, buildPythonPackage, rustPlatform }: buildPythonPackage rec { pname = "pycddl"; From 2a8867f6cf0ca52ef62361b049babafffc956705 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Wed, 15 Mar 2023 15:47:43 -0400 Subject: [PATCH 50/54] more packaging instructions --- nix/txi2p.nix | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/nix/txi2p.nix b/nix/txi2p.nix index c6b28aad4..3464b7b3d 100644 --- a/nix/txi2p.nix +++ b/nix/txi2p.nix @@ -1,3 +1,23 @@ +# package https://github.com/tahoe-lafs/txi2p +# +# if you need to update this package to a new txi2p release then +# +# 1. change value given to `buildPythonPackage` for `version` to match the new +# release +# +# 2. change the value given to `fetchPypi` for `sha256` to `lib.fakeHash` +# +# 3. run `nix-build` +# +# 4. there will be an error about a hash mismatch. change the value given to +# `fetchPypi` for `sha256` to the "actual" hash value report. +# +# 5. if there are new runtime dependencies then add them to the argument list +# at the top. if there are new test dependencies add them to the +# `checkInputs` list. +# +# 6. run `nix-build`. it should succeed. if it does not, seek assistance. +# { fetchPypi , buildPythonPackage , parsley From a3ebd21b25c29fe8a871ae53967e0ed3f29be5d4 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Fri, 17 Mar 2023 15:30:14 -0400 Subject: [PATCH 51/54] implement retry ourselves, don't depend on tenacity --- setup.py | 1 - src/allmydata/test/test_iputil.py | 55 ++++++++++++++++++++++++------- 2 files changed, 44 insertions(+), 12 deletions(-) diff --git a/setup.py b/setup.py index 82ff45764..152c49f0e 100644 --- a/setup.py +++ b/setup.py @@ -413,7 +413,6 @@ setup(name="tahoe-lafs", # also set in __init__.py "beautifulsoup4", "html5lib", "junitxml", - "tenacity", # Pin old version until # https://github.com/paramiko/paramiko/issues/1961 is fixed. "paramiko < 2.9", diff --git a/src/allmydata/test/test_iputil.py b/src/allmydata/test/test_iputil.py index 081c80ee3..c060fcc04 100644 --- a/src/allmydata/test/test_iputil.py +++ b/src/allmydata/test/test_iputil.py @@ -4,18 +4,14 @@ Tests for allmydata.util.iputil. Ported to Python 3. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -from future.utils import PY2, native_str -if PY2: - from builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401 +from __future__ import annotations import os, socket import gc +from functools import wraps +from typing import TypeVar, Callable +from typing_extensions import TypeAlias from testtools.matchers import ( MatchesAll, IsInstance, @@ -25,8 +21,6 @@ from testtools.matchers import ( from twisted.trial import unittest -from tenacity import retry, stop_after_attempt - from foolscap.api import Tub from allmydata.util import iputil, gcutil @@ -39,6 +33,45 @@ from .common import ( SyncTestCase, ) +T = TypeVar("T") + +TestFunction: TypeAlias = Callable[[], T] +Decorator: TypeAlias = Callable[[TestFunction[T]], TestFunction[T]] + +def retry(stop: Callable[[], bool]) -> Decorator[T]: + """ + Call a function until the predicate says to stop or the function stops + raising an exception. + + :param stop: A callable to call after the decorated function raises an + exception. The decorated function will be called again if ``stop`` + returns ``False``. + + :return: A decorator function. + """ + def decorate(f: TestFunction[T]) -> TestFunction[T]: + @wraps(f) + def decorator(self) -> T: + while True: + try: + return f(self) + except Exception: + if stop(): + raise + return decorator + return decorate + +def stop_after_attempt(limit: int) -> Callable[[], bool]: + """ + Stop after ``limit`` calls. + """ + counter = 0 + def check(): + nonlocal counter + counter += 1 + return counter < limit + return check + class ListenOnUsed(unittest.TestCase): """Tests for listenOnUnused.""" @@ -127,7 +160,7 @@ class GetLocalAddressesSyncTests(SyncTestCase): IsInstance(list), AllMatch( MatchesAll( - IsInstance(native_str), + IsInstance(str), MatchesPredicate( lambda addr: socket.inet_pton(socket.AF_INET, addr), "%r is not an IPv4 address.", From a9f34655686764e633be4a6ccb8f2d79b841a291 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Fri, 17 Mar 2023 15:31:07 -0400 Subject: [PATCH 52/54] news fragment --- newsfragments/3989.installation | 1 + 1 file changed, 1 insertion(+) create mode 100644 newsfragments/3989.installation diff --git a/newsfragments/3989.installation b/newsfragments/3989.installation new file mode 100644 index 000000000..a2155b65c --- /dev/null +++ b/newsfragments/3989.installation @@ -0,0 +1 @@ +tenacity is no longer a dependency. From 5cf892b441daf77ad5efada4a785dfa4a0e2ecf6 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Fri, 17 Mar 2023 15:32:13 -0400 Subject: [PATCH 53/54] Also remove it from the Nix packaging --- nix/tahoe-lafs.nix | 1 - 1 file changed, 1 deletion(-) diff --git a/nix/tahoe-lafs.nix b/nix/tahoe-lafs.nix index 380260c70..5986db420 100644 --- a/nix/tahoe-lafs.nix +++ b/nix/tahoe-lafs.nix @@ -58,7 +58,6 @@ let pytest pytest-timeout pytest-twisted - tenacity testtools towncrier ]; From 6a4346587cf06f7603572796daf4851bd98a1415 Mon Sep 17 00:00:00 2001 From: Jean-Paul Calderone Date: Fri, 17 Mar 2023 15:46:27 -0400 Subject: [PATCH 54/54] Fix the type annotations --- src/allmydata/test/test_iputil.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/src/allmydata/test/test_iputil.py b/src/allmydata/test/test_iputil.py index c060fcc04..26274830f 100644 --- a/src/allmydata/test/test_iputil.py +++ b/src/allmydata/test/test_iputil.py @@ -11,7 +11,6 @@ import gc from functools import wraps from typing import TypeVar, Callable -from typing_extensions import TypeAlias from testtools.matchers import ( MatchesAll, IsInstance, @@ -33,12 +32,10 @@ from .common import ( SyncTestCase, ) -T = TypeVar("T") +T = TypeVar("T", contravariant=True) +U = TypeVar("U", covariant=True) -TestFunction: TypeAlias = Callable[[], T] -Decorator: TypeAlias = Callable[[TestFunction[T]], TestFunction[T]] - -def retry(stop: Callable[[], bool]) -> Decorator[T]: +def retry(stop: Callable[[], bool]) -> Callable[[Callable[[T], U]], Callable[[T], U]]: """ Call a function until the predicate says to stop or the function stops raising an exception. @@ -49,9 +46,9 @@ def retry(stop: Callable[[], bool]) -> Decorator[T]: :return: A decorator function. """ - def decorate(f: TestFunction[T]) -> TestFunction[T]: + def decorate(f: Callable[[T], U]) -> Callable[[T], U]: @wraps(f) - def decorator(self) -> T: + def decorator(self: T) -> U: while True: try: return f(self)