immutable: remove the last bits of code (only test code or unused code) which did something with plaintext hashes or plaintext hash trees

This commit is contained in:
Zooko O'Whielacronx 2008-12-19 08:18:07 -07:00
parent d67a3fe4b1
commit 7b285ebcb1
3 changed files with 10 additions and 39 deletions
src/allmydata

@ -18,13 +18,13 @@ the beginning of the share data.
0x04: segment size
0x08: data size
0x0c: offset of data (=00 00 00 24)
0x10: offset of plaintext_hash_tree
0x10: offset of plaintext_hash_tree UNUSED
0x14: offset of crypttext_hash_tree
0x18: offset of block_hashes
0x1c: offset of share_hashes
0x20: offset of uri_extension_length + uri_extension
0x24: start of data
? : start of plaintext_hash_tree
? : start of plaintext_hash_tree UNUSED
? : start of crypttext_hash_tree
? : start of block_hashes
? : start of share_hashes
@ -43,7 +43,7 @@ limitations described in #346.
0x04: segment size
0x0c: data size
0x14: offset of data (=00 00 00 00 00 00 00 44)
0x1c: offset of plaintext_hash_tree
0x1c: offset of plaintext_hash_tree UNUSED
0x24: offset of crypttext_hash_tree
0x2c: offset of block_hashes
0x34: offset of share_hashes
@ -92,7 +92,7 @@ class WriteBucketProxy:
x = 0x24
offsets['data'] = x
x += data_size
offsets['plaintext_hash_tree'] = x
offsets['plaintext_hash_tree'] = x # UNUSED
x += self._segment_hash_size
offsets['crypttext_hash_tree'] = x
x += self._segment_hash_size
@ -110,7 +110,7 @@ class WriteBucketProxy:
segment_size,
data_size,
offsets['data'],
offsets['plaintext_hash_tree'],
offsets['plaintext_hash_tree'], # UNUSED
offsets['crypttext_hash_tree'],
offsets['block_hashes'],
offsets['share_hashes'],
@ -143,17 +143,6 @@ class WriteBucketProxy:
len(data), self._segment_size)
return self._write(offset, data)
def put_plaintext_hashes(self, hashes):
offset = self._offsets['plaintext_hash_tree']
assert isinstance(hashes, list)
data = "".join(hashes)
precondition(len(data) == self._segment_hash_size,
len(data), self._segment_hash_size)
precondition(offset+len(data) <= self._offsets['crypttext_hash_tree'],
offset, len(data), offset+len(data),
self._offsets['crypttext_hash_tree'])
return self._write(offset, data)
def put_crypttext_hashes(self, hashes):
offset = self._offsets['crypttext_hash_tree']
assert isinstance(hashes, list)
@ -220,7 +209,7 @@ class WriteBucketProxy_v2(WriteBucketProxy):
x = 0x44
offsets['data'] = x
x += data_size
offsets['plaintext_hash_tree'] = x
offsets['plaintext_hash_tree'] = x # UNUSED
x += self._segment_hash_size
offsets['crypttext_hash_tree'] = x
x += self._segment_hash_size
@ -238,7 +227,7 @@ class WriteBucketProxy_v2(WriteBucketProxy):
segment_size,
data_size,
offsets['data'],
offsets['plaintext_hash_tree'],
offsets['plaintext_hash_tree'], # UNUSED
offsets['crypttext_hash_tree'],
offsets['block_hashes'],
offsets['share_hashes'],
@ -306,7 +295,7 @@ class ReadBucketProxy:
self._fieldstruct = fieldstruct
for field in ( 'data',
'plaintext_hash_tree',
'plaintext_hash_tree', # UNUSED
'crypttext_hash_tree',
'block_hashes',
'share_hashes',
@ -333,13 +322,6 @@ class ReadBucketProxy:
return [ s[i:i+HASH_SIZE]
for i in range(0, len(s), HASH_SIZE) ]
def get_plaintext_hashes(self):
offset = self._offsets['plaintext_hash_tree']
size = self._offsets['crypttext_hash_tree'] - offset
d = self._read(offset, size)
d.addCallback(self._str2l)
return d
def get_crypttext_hashes(self):
offset = self._offsets['crypttext_hash_tree']
size = self._offsets['block_hashes'] - offset

@ -313,11 +313,6 @@ class IStorageBucketReader(Interface):
@return: ShareData
"""
def get_plaintext_hashes():
"""
@return: ListOf(Hash)
"""
def get_crypttext_hashes():
"""
@return: ListOf(Hash)

@ -134,8 +134,8 @@ class BucketProxy(unittest.TestCase):
def _do_test_readwrite(self, name, header_size, wbp_class, rbp_class):
# Let's pretend each share has 100 bytes of data, and that there are
# 4 segments (25 bytes each), and 8 shares total. So the three
# per-segment merkle trees (plaintext_hash_tree, crypttext_hash_tree,
# 4 segments (25 bytes each), and 8 shares total. So the two
# per-segment merkle trees (crypttext_hash_tree,
# block_hashes) will have 4 leaves and 7 nodes each. The per-share
# merkle tree (share_hashes) has 8 leaves and 15 nodes, and we need 3
# nodes. Furthermore, let's assume the uri_extension is 500 bytes
@ -146,8 +146,6 @@ class BucketProxy(unittest.TestCase):
sharesize = header_size + 100 + 7*32 + 7*32 + 7*32 + 3*(2+32) + 4+500
plaintext_hashes = [hashutil.tagged_hash("plain", "bar%d" % i)
for i in range(7)]
crypttext_hashes = [hashutil.tagged_hash("crypt", "bar%d" % i)
for i in range(7)]
block_hashes = [hashutil.tagged_hash("block", "bar%d" % i)
@ -170,7 +168,6 @@ class BucketProxy(unittest.TestCase):
d.addCallback(lambda res: bp.put_block(1, "b"*25))
d.addCallback(lambda res: bp.put_block(2, "c"*25))
d.addCallback(lambda res: bp.put_block(3, "d"*20))
d.addCallback(lambda res: bp.put_plaintext_hashes(plaintext_hashes))
d.addCallback(lambda res: bp.put_crypttext_hashes(crypttext_hashes))
d.addCallback(lambda res: bp.put_block_hashes(block_hashes))
d.addCallback(lambda res: bp.put_share_hashes(share_hashes))
@ -197,9 +194,6 @@ class BucketProxy(unittest.TestCase):
d1.addCallback(lambda res: rbp.get_block(3))
d1.addCallback(lambda res: self.failUnlessEqual(res, "d"*20))
d1.addCallback(lambda res: rbp.get_plaintext_hashes())
d1.addCallback(lambda res:
self.failUnlessEqual(res, plaintext_hashes))
d1.addCallback(lambda res: rbp.get_crypttext_hashes())
d1.addCallback(lambda res:
self.failUnlessEqual(res, crypttext_hashes))