storage: add version number to share data. Closes #90.

This commit is contained in:
Brian Warner 2007-09-04 09:00:24 -07:00
parent fb65aadd82
commit 277e720f7c
3 changed files with 24 additions and 21 deletions

View File

@ -48,7 +48,7 @@ def dump_share(config, out=sys.stdout, err=sys.stderr):
f = storage.ShareFile(config['filename']) f = storage.ShareFile(config['filename'])
# use a ReadBucketProxy to parse the bucket and find the uri extension # use a ReadBucketProxy to parse the bucket and find the uri extension
bp = storage.ReadBucketProxy(None) bp = storage.ReadBucketProxy(None)
offsets = bp._parse_offsets(f.read_share_data(0, 8*4)) offsets = bp._parse_offsets(f.read_share_data(0, 0x24))
seek = offsets['uri_extension'] seek = offsets['uri_extension']
length = struct.unpack(">L", f.read_share_data(seek, 4))[0] length = struct.unpack(">L", f.read_share_data(seek, 4))[0]
seek += 4 seek += 4

View File

@ -8,7 +8,7 @@ from zope.interface import implements
from allmydata.interfaces import RIStorageServer, RIBucketWriter, \ from allmydata.interfaces import RIStorageServer, RIBucketWriter, \
RIBucketReader, IStorageBucketWriter, IStorageBucketReader, HASH_SIZE RIBucketReader, IStorageBucketWriter, IStorageBucketReader, HASH_SIZE
from allmydata.util import fileutil, idlib, mathutil from allmydata.util import fileutil, idlib, mathutil
from allmydata.util.assertutil import precondition from allmydata.util.assertutil import precondition, _assert
# storage/ # storage/
# storage/shares/incoming # storage/shares/incoming
@ -377,15 +377,16 @@ Share data is written into a single file. At the start of the file, there is
a series of four-byte big-endian offset values, which indicate where each a series of four-byte big-endian offset values, which indicate where each
section starts. Each offset is measured from the beginning of the file. section starts. Each offset is measured from the beginning of the file.
0x00: segment size 0x00: version number (=00 00 00 01)
0x04: data size 0x04: segment size
0x08: offset of data (=00 00 00 1c) 0x08: data size
0x0c: offset of plaintext_hash_tree 0x0c: offset of data (=00 00 00 24)
0x10: offset of crypttext_hash_tree 0x10: offset of plaintext_hash_tree
0x14: offset of block_hashes 0x14: offset of crypttext_hash_tree
0x18: offset of share_hashes 0x18: offset of block_hashes
0x1c: offset of uri_extension_length + uri_extension 0x1c: offset of share_hashes
0x20: start of data 0x20: offset of uri_extension_length + uri_extension
0x24: start of data
? : start of plaintext_hash_tree ? : start of plaintext_hash_tree
? : start of crypttext_hash_tree ? : start of crypttext_hash_tree
? : start of block_hashes ? : start of block_hashes
@ -422,7 +423,7 @@ class WriteBucketProxy:
self._uri_extension_size = uri_extension_size self._uri_extension_size = uri_extension_size
offsets = self._offsets = {} offsets = self._offsets = {}
x = 0x20 x = 0x24
offsets['data'] = x offsets['data'] = x
x += data_size x += data_size
offsets['plaintext_hash_tree'] = x offsets['plaintext_hash_tree'] = x
@ -435,7 +436,8 @@ class WriteBucketProxy:
x += self._share_hash_size x += self._share_hash_size
offsets['uri_extension'] = x offsets['uri_extension'] = x
offset_data = struct.pack(">LLLLLLLL", offset_data = struct.pack(">LLLLLLLLL",
1, # version number
segment_size, segment_size,
data_size, data_size,
offsets['data'], offsets['data'],
@ -445,7 +447,7 @@ class WriteBucketProxy:
offsets['share_hashes'], offsets['share_hashes'],
offsets['uri_extension'], offsets['uri_extension'],
) )
assert len(offset_data) == 8*4 assert len(offset_data) == 0x24
self._offset_data = offset_data self._offset_data = offset_data
def start(self): def start(self):
@ -542,16 +544,17 @@ class ReadBucketProxy:
def start(self): def start(self):
# TODO: for small shares, read the whole bucket in start() # TODO: for small shares, read the whole bucket in start()
d = self._read(0, 8*4) d = self._read(0, 0x24)
d.addCallback(self._parse_offsets) d.addCallback(self._parse_offsets)
return d return d
def _parse_offsets(self, data): def _parse_offsets(self, data):
precondition(len(data) == 8*4) precondition(len(data) == 0x24)
self._offsets = {} self._offsets = {}
self._segment_size = struct.unpack(">L", data[0:4])[0] (version, self._segment_size, self._data_size) = \
self._data_size = struct.unpack(">L", data[4:8])[0] struct.unpack(">LLL", data[0:0xc])
x = 0x08 _assert(version == 1)
x = 0x0c
for field in ( 'data', for field in ( 'data',
'plaintext_hash_tree', 'plaintext_hash_tree',
'crypttext_hash_tree', 'crypttext_hash_tree',

View File

@ -102,7 +102,7 @@ class BucketProxy(unittest.TestCase):
# nodes. Furthermore, let's assume the uri_extension is 500 bytes # nodes. Furthermore, let's assume the uri_extension is 500 bytes
# long. That should make the whole share: # long. That should make the whole share:
# #
# 0x1c + 100 + 7*32 + 7*32 + 7*32 + 3*(2+32) + 4+500 = 1406 bytes long # 0x24 + 100 + 7*32 + 7*32 + 7*32 + 3*(2+32) + 4+500 = 1414 bytes long
plaintext_hashes = [hashutil.tagged_hash("plain", "bar%d" % i) plaintext_hashes = [hashutil.tagged_hash("plain", "bar%d" % i)
for i in range(7)] for i in range(7)]
@ -114,7 +114,7 @@ class BucketProxy(unittest.TestCase):
for i in (1,9,13)] for i in (1,9,13)]
uri_extension = "s" + "E"*498 + "e" uri_extension = "s" + "E"*498 + "e"
bw, rb, final = self.make_bucket("test_readwrite", 1406) bw, rb, final = self.make_bucket("test_readwrite", 1414)
bp = WriteBucketProxy(rb, bp = WriteBucketProxy(rb,
data_size=95, data_size=95,
segment_size=25, segment_size=25,