2007-07-13 21:04:49 +00:00
|
|
|
import os, re, weakref, stat, struct
|
2006-12-01 03:14:23 +00:00
|
|
|
|
|
|
|
from foolscap import Referenceable
|
|
|
|
from twisted.application import service
|
2007-07-13 21:04:49 +00:00
|
|
|
from twisted.internet import defer
|
2006-12-01 03:14:23 +00:00
|
|
|
|
2006-12-02 02:17:50 +00:00
|
|
|
from zope.interface import implements
|
2007-04-04 22:59:36 +00:00
|
|
|
from allmydata.interfaces import RIStorageServer, RIBucketWriter, \
|
2007-07-13 23:38:25 +00:00
|
|
|
RIBucketReader, IStorageBucketWriter, IStorageBucketReader, HASH_SIZE
|
|
|
|
from allmydata.util import fileutil, idlib, mathutil
|
2007-04-18 17:43:33 +00:00
|
|
|
from allmydata.util.assertutil import precondition
|
2006-12-01 03:14:23 +00:00
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
# store/
|
2007-06-02 01:48:01 +00:00
|
|
|
# store/incoming # temp dirs named $STORAGEINDEX/$SHARENUM which will be moved to store/$STORAGEINDEX/$SHARENUM on success
|
|
|
|
# store/$STORAGEINDEX
|
|
|
|
# store/$STORAGEINDEX/$SHARENUM
|
|
|
|
# store/$STORAGEINDEX/$SHARENUM/blocksize
|
|
|
|
# store/$STORAGEINDEX/$SHARENUM/data
|
|
|
|
# store/$STORAGEINDEX/$SHARENUM/blockhashes
|
|
|
|
# store/$STORAGEINDEX/$SHARENUM/sharehashtree
|
2006-12-01 03:14:23 +00:00
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
# $SHARENUM matches this regex:
|
2007-03-31 00:07:04 +00:00
|
|
|
NUM_RE=re.compile("[0-9]*")
|
2007-03-30 03:19:52 +00:00
|
|
|
|
|
|
|
class BucketWriter(Referenceable):
|
|
|
|
implements(RIBucketWriter)
|
|
|
|
|
2007-07-13 21:04:49 +00:00
|
|
|
def __init__(self, ss, incominghome, finalhome, size):
|
2007-07-04 00:08:02 +00:00
|
|
|
self.ss = ss
|
2007-03-30 17:52:19 +00:00
|
|
|
self.incominghome = incominghome
|
2007-03-30 03:19:52 +00:00
|
|
|
self.finalhome = finalhome
|
2007-07-13 21:04:49 +00:00
|
|
|
self._size = size
|
2007-03-30 03:19:52 +00:00
|
|
|
self.closed = False
|
2007-07-17 01:07:03 +00:00
|
|
|
self.throw_out_all_data = False
|
2007-07-13 21:04:49 +00:00
|
|
|
# touch the file, so later callers will see that we're working on it
|
|
|
|
f = open(self.incominghome, 'ab')
|
|
|
|
f.close()
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2007-07-04 00:08:02 +00:00
|
|
|
def allocated_size(self):
|
2007-07-13 21:04:49 +00:00
|
|
|
return self._size
|
2007-07-04 00:08:02 +00:00
|
|
|
|
2007-07-13 21:04:49 +00:00
|
|
|
def remote_write(self, offset, data):
|
2007-03-30 03:19:52 +00:00
|
|
|
precondition(not self.closed)
|
2007-07-13 21:04:49 +00:00
|
|
|
precondition(offset >= 0)
|
|
|
|
precondition(offset+len(data) <= self._size)
|
2007-07-17 01:07:03 +00:00
|
|
|
if self.throw_out_all_data:
|
|
|
|
return
|
2007-07-13 21:04:49 +00:00
|
|
|
f = open(self.incominghome, 'ab')
|
|
|
|
f.seek(offset)
|
2007-03-30 03:19:52 +00:00
|
|
|
f.write(data)
|
2007-07-13 21:04:49 +00:00
|
|
|
f.close()
|
2007-06-02 01:48:01 +00:00
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
def remote_close(self):
|
2007-03-30 03:19:52 +00:00
|
|
|
precondition(not self.closed)
|
2007-03-30 17:52:19 +00:00
|
|
|
fileutil.rename(self.incominghome, self.finalhome)
|
2007-03-30 03:19:52 +00:00
|
|
|
self.closed = True
|
2007-07-13 21:04:49 +00:00
|
|
|
filelen = os.stat(self.finalhome)[stat.ST_SIZE]
|
|
|
|
self.ss.bucket_writer_closed(self, filelen)
|
2007-03-30 03:19:52 +00:00
|
|
|
|
|
|
|
|
|
|
|
class BucketReader(Referenceable):
|
2007-04-04 22:59:36 +00:00
|
|
|
implements(RIBucketReader)
|
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
def __init__(self, home):
|
|
|
|
self.home = home
|
|
|
|
|
2007-07-13 21:04:49 +00:00
|
|
|
def remote_read(self, offset, length):
|
|
|
|
f = open(self.home, 'rb')
|
|
|
|
f.seek(offset)
|
|
|
|
return f.read(length)
|
2007-06-02 01:48:01 +00:00
|
|
|
|
2006-12-01 03:14:23 +00:00
|
|
|
class StorageServer(service.MultiService, Referenceable):
|
2006-12-02 02:17:50 +00:00
|
|
|
implements(RIStorageServer)
|
2006-12-01 03:14:23 +00:00
|
|
|
name = 'storageserver'
|
|
|
|
|
2007-07-17 01:07:03 +00:00
|
|
|
def __init__(self, storedir, sizelimit=None, no_storage=False):
|
2007-07-04 00:08:02 +00:00
|
|
|
service.MultiService.__init__(self)
|
2007-03-30 03:19:52 +00:00
|
|
|
fileutil.make_dirs(storedir)
|
|
|
|
self.storedir = storedir
|
2007-07-04 00:08:02 +00:00
|
|
|
self.sizelimit = sizelimit
|
2007-07-17 01:07:03 +00:00
|
|
|
self.no_storage = no_storage
|
2007-03-30 17:52:19 +00:00
|
|
|
self.incomingdir = os.path.join(storedir, 'incoming')
|
|
|
|
self._clean_incomplete()
|
|
|
|
fileutil.make_dirs(self.incomingdir)
|
2007-07-04 00:08:02 +00:00
|
|
|
self._active_writers = weakref.WeakKeyDictionary()
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2007-07-04 00:08:02 +00:00
|
|
|
self.measure_size()
|
2006-12-01 03:14:23 +00:00
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
def _clean_incomplete(self):
|
|
|
|
fileutil.rm_dir(self.incomingdir)
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2007-07-04 00:08:02 +00:00
|
|
|
def measure_size(self):
|
|
|
|
self.consumed = fileutil.du(self.storedir)
|
|
|
|
|
|
|
|
def allocated_size(self):
|
|
|
|
space = self.consumed
|
|
|
|
for bw in self._active_writers:
|
|
|
|
space += bw.allocated_size()
|
|
|
|
return space
|
|
|
|
|
2007-07-13 22:09:01 +00:00
|
|
|
def remote_allocate_buckets(self, storage_index, sharenums, allocated_size,
|
2007-07-13 21:04:49 +00:00
|
|
|
canary):
|
2007-03-30 17:52:19 +00:00
|
|
|
alreadygot = set()
|
|
|
|
bucketwriters = {} # k: shnum, v: BucketWriter
|
2007-07-04 00:08:02 +00:00
|
|
|
si_s = idlib.b2a(storage_index)
|
2007-07-13 22:09:01 +00:00
|
|
|
space_per_bucket = allocated_size
|
2007-07-04 00:08:02 +00:00
|
|
|
no_limits = self.sizelimit is None
|
|
|
|
yes_limits = not no_limits
|
|
|
|
if yes_limits:
|
|
|
|
remaining_space = self.sizelimit - self.allocated_size()
|
2007-03-30 17:52:19 +00:00
|
|
|
for shnum in sharenums:
|
2007-07-04 00:08:02 +00:00
|
|
|
incominghome = os.path.join(self.incomingdir, si_s, "%d" % shnum)
|
|
|
|
finalhome = os.path.join(self.storedir, si_s, "%d" % shnum)
|
2007-03-30 17:52:19 +00:00
|
|
|
if os.path.exists(incominghome) or os.path.exists(finalhome):
|
|
|
|
alreadygot.add(shnum)
|
2007-07-04 00:08:02 +00:00
|
|
|
elif no_limits or remaining_space >= space_per_bucket:
|
2007-07-13 21:04:49 +00:00
|
|
|
fileutil.make_dirs(os.path.join(self.incomingdir, si_s))
|
2007-07-04 00:08:02 +00:00
|
|
|
bw = BucketWriter(self, incominghome, finalhome,
|
2007-07-13 21:04:49 +00:00
|
|
|
space_per_bucket)
|
2007-07-17 01:07:03 +00:00
|
|
|
if self.no_storage:
|
|
|
|
bw.throw_out_all_data = True
|
2007-07-04 00:08:02 +00:00
|
|
|
bucketwriters[shnum] = bw
|
|
|
|
self._active_writers[bw] = 1
|
|
|
|
if yes_limits:
|
|
|
|
remaining_space -= space_per_bucket
|
2007-03-30 17:52:19 +00:00
|
|
|
else:
|
2007-07-04 00:08:02 +00:00
|
|
|
# not enough space to accept this bucket
|
|
|
|
pass
|
|
|
|
|
2007-07-13 21:04:49 +00:00
|
|
|
if bucketwriters:
|
|
|
|
fileutil.make_dirs(os.path.join(self.storedir, si_s))
|
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
return alreadygot, bucketwriters
|
2006-12-01 03:14:23 +00:00
|
|
|
|
2007-07-04 00:38:49 +00:00
|
|
|
def bucket_writer_closed(self, bw, consumed_size):
|
|
|
|
self.consumed += consumed_size
|
2007-07-04 00:08:02 +00:00
|
|
|
del self._active_writers[bw]
|
|
|
|
|
2007-06-02 01:48:01 +00:00
|
|
|
def remote_get_buckets(self, storage_index):
|
2007-03-30 03:19:52 +00:00
|
|
|
bucketreaders = {} # k: sharenum, v: BucketReader
|
2007-06-02 01:48:01 +00:00
|
|
|
storagedir = os.path.join(self.storedir, idlib.b2a(storage_index))
|
2007-03-31 00:12:07 +00:00
|
|
|
try:
|
2007-06-02 01:48:01 +00:00
|
|
|
for f in os.listdir(storagedir):
|
2007-04-18 14:41:56 +00:00
|
|
|
if NUM_RE.match(f):
|
2007-07-04 00:08:02 +00:00
|
|
|
br = BucketReader(os.path.join(storagedir, f))
|
|
|
|
bucketreaders[int(f)] = br
|
2007-03-31 00:12:07 +00:00
|
|
|
except OSError:
|
|
|
|
# Commonly caused by there being no buckets at all.
|
|
|
|
pass
|
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
return bucketreaders
|
2007-07-09 06:27:46 +00:00
|
|
|
|
2007-07-13 21:04:49 +00:00
|
|
|
"""
|
|
|
|
Share data is written into a single file. At the start of the file, there is
|
|
|
|
a series of four-byte big-endian offset values, which indicate where each
|
|
|
|
section starts. Each offset is measured from the beginning of the file.
|
|
|
|
|
|
|
|
0x00: segment size
|
2007-07-13 23:38:25 +00:00
|
|
|
0x04: data size
|
|
|
|
0x08: offset of data (=00 00 00 1c)
|
|
|
|
0x0c: offset of plaintext_hash_tree
|
|
|
|
0x10: offset of crypttext_hash_tree
|
|
|
|
0x14: offset of block_hashes
|
|
|
|
0x18: offset of share_hashes
|
|
|
|
0x1c: offset of uri_extension_length + uri_extension
|
|
|
|
0x20: start of data
|
|
|
|
? : start of plaintext_hash_tree
|
|
|
|
? : start of crypttext_hash_tree
|
|
|
|
? : start of block_hashes
|
|
|
|
? : start of share_hashes
|
2007-07-13 21:04:49 +00:00
|
|
|
each share_hash is written as a two-byte (big-endian) hashnum
|
|
|
|
followed by the 32-byte SHA-256 hash. We only store the hashes
|
|
|
|
necessary to validate the share hash root
|
2007-07-13 23:38:25 +00:00
|
|
|
? : start of uri_extension_length (four-byte big-endian value)
|
|
|
|
? : start of uri_extension
|
2007-07-13 21:04:49 +00:00
|
|
|
"""
|
|
|
|
|
2007-07-13 22:09:01 +00:00
|
|
|
def allocated_size(data_size, num_segments, num_share_hashes,
|
|
|
|
uri_extension_size):
|
|
|
|
wbp = WriteBucketProxy(None, data_size, 0, num_segments, num_share_hashes,
|
|
|
|
uri_extension_size)
|
|
|
|
uri_extension_starts_at = wbp._offsets['uri_extension']
|
|
|
|
return uri_extension_starts_at + 4 + uri_extension_size
|
|
|
|
|
2007-07-09 06:27:46 +00:00
|
|
|
class WriteBucketProxy:
|
|
|
|
implements(IStorageBucketWriter)
|
2007-07-13 21:04:49 +00:00
|
|
|
def __init__(self, rref, data_size, segment_size, num_segments,
|
2007-07-13 22:09:01 +00:00
|
|
|
num_share_hashes, uri_extension_size):
|
2007-07-09 06:27:46 +00:00
|
|
|
self._rref = rref
|
2007-07-13 23:38:25 +00:00
|
|
|
self._data_size = data_size
|
2007-07-13 21:04:49 +00:00
|
|
|
self._segment_size = segment_size
|
2007-07-13 22:09:01 +00:00
|
|
|
self._num_segments = num_segments
|
2007-07-13 21:04:49 +00:00
|
|
|
|
2007-07-14 02:30:21 +00:00
|
|
|
effective_segments = mathutil.next_power_of_k(num_segments,2)
|
|
|
|
self._segment_hash_size = (2*effective_segments - 1) * HASH_SIZE
|
2007-07-13 21:04:49 +00:00
|
|
|
# how many share hashes are included in each share? This will be
|
|
|
|
# about ln2(num_shares).
|
|
|
|
self._share_hash_size = num_share_hashes * (2+HASH_SIZE)
|
2007-07-13 22:09:01 +00:00
|
|
|
# we commit to not sending a uri extension larger than this
|
|
|
|
self._uri_extension_size = uri_extension_size
|
2007-07-13 21:04:49 +00:00
|
|
|
|
|
|
|
offsets = self._offsets = {}
|
2007-07-13 23:38:25 +00:00
|
|
|
x = 0x20
|
2007-07-13 21:04:49 +00:00
|
|
|
offsets['data'] = x
|
|
|
|
x += data_size
|
|
|
|
offsets['plaintext_hash_tree'] = x
|
|
|
|
x += self._segment_hash_size
|
|
|
|
offsets['crypttext_hash_tree'] = x
|
|
|
|
x += self._segment_hash_size
|
|
|
|
offsets['block_hashes'] = x
|
|
|
|
x += self._segment_hash_size
|
|
|
|
offsets['share_hashes'] = x
|
|
|
|
x += self._share_hash_size
|
|
|
|
offsets['uri_extension'] = x
|
|
|
|
|
2007-07-13 23:38:25 +00:00
|
|
|
offset_data = struct.pack(">LLLLLLLL",
|
2007-07-13 21:04:49 +00:00
|
|
|
segment_size,
|
2007-07-13 23:38:25 +00:00
|
|
|
data_size,
|
2007-07-13 21:04:49 +00:00
|
|
|
offsets['data'],
|
|
|
|
offsets['plaintext_hash_tree'],
|
|
|
|
offsets['crypttext_hash_tree'],
|
|
|
|
offsets['block_hashes'],
|
|
|
|
offsets['share_hashes'],
|
2007-07-13 23:38:25 +00:00
|
|
|
offsets['uri_extension'],
|
2007-07-13 21:04:49 +00:00
|
|
|
)
|
2007-07-13 23:38:25 +00:00
|
|
|
assert len(offset_data) == 8*4
|
2007-07-13 21:04:49 +00:00
|
|
|
self._offset_data = offset_data
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
return self._write(0, self._offset_data)
|
2007-07-09 06:27:46 +00:00
|
|
|
|
|
|
|
def put_block(self, segmentnum, data):
|
2007-07-13 21:04:49 +00:00
|
|
|
offset = self._offsets['data'] + segmentnum * self._segment_size
|
|
|
|
assert offset + len(data) <= self._offsets['uri_extension']
|
|
|
|
assert isinstance(data, str)
|
2007-07-13 22:09:01 +00:00
|
|
|
if segmentnum < self._num_segments-1:
|
|
|
|
precondition(len(data) == self._segment_size,
|
|
|
|
len(data), self._segment_size)
|
2007-07-13 21:04:49 +00:00
|
|
|
else:
|
2007-07-13 23:38:25 +00:00
|
|
|
precondition(len(data) == (self._data_size -
|
|
|
|
(self._segment_size *
|
|
|
|
(self._num_segments - 1))),
|
2007-07-13 22:09:01 +00:00
|
|
|
len(data), self._segment_size)
|
2007-07-13 21:04:49 +00:00
|
|
|
return self._write(offset, data)
|
2007-07-09 06:27:46 +00:00
|
|
|
|
|
|
|
def put_plaintext_hashes(self, hashes):
|
2007-07-13 21:04:49 +00:00
|
|
|
offset = self._offsets['plaintext_hash_tree']
|
|
|
|
assert isinstance(hashes, list)
|
|
|
|
data = "".join(hashes)
|
2007-07-14 02:30:48 +00:00
|
|
|
precondition(len(data) == self._segment_hash_size,
|
|
|
|
len(data), self._segment_hash_size)
|
|
|
|
precondition(offset+len(data) <= self._offsets['crypttext_hash_tree'],
|
|
|
|
offset, len(data), offset+len(data),
|
|
|
|
self._offsets['crypttext_hash_tree'])
|
2007-07-13 21:04:49 +00:00
|
|
|
return self._write(offset, data)
|
|
|
|
|
2007-07-09 06:27:46 +00:00
|
|
|
def put_crypttext_hashes(self, hashes):
|
2007-07-13 21:04:49 +00:00
|
|
|
offset = self._offsets['crypttext_hash_tree']
|
|
|
|
assert isinstance(hashes, list)
|
|
|
|
data = "".join(hashes)
|
2007-07-14 02:30:48 +00:00
|
|
|
precondition(len(data) == self._segment_hash_size,
|
|
|
|
len(data), self._segment_hash_size)
|
|
|
|
precondition(offset + len(data) <= self._offsets['block_hashes'],
|
|
|
|
offset, len(data), offset+len(data),
|
|
|
|
self._offsets['block_hashes'])
|
2007-07-13 21:04:49 +00:00
|
|
|
return self._write(offset, data)
|
|
|
|
|
2007-07-09 06:27:46 +00:00
|
|
|
def put_block_hashes(self, blockhashes):
|
2007-07-13 21:04:49 +00:00
|
|
|
offset = self._offsets['block_hashes']
|
|
|
|
assert isinstance(blockhashes, list)
|
|
|
|
data = "".join(blockhashes)
|
2007-07-14 02:30:48 +00:00
|
|
|
precondition(len(data) == self._segment_hash_size,
|
|
|
|
len(data), self._segment_hash_size)
|
|
|
|
precondition(offset + len(data) <= self._offsets['share_hashes'],
|
|
|
|
offset, len(data), offset+len(data),
|
|
|
|
self._offsets['share_hashes'])
|
2007-07-13 21:04:49 +00:00
|
|
|
return self._write(offset, data)
|
|
|
|
|
2007-07-09 06:27:46 +00:00
|
|
|
def put_share_hashes(self, sharehashes):
|
2007-07-13 21:04:49 +00:00
|
|
|
# sharehashes is a list of (index, hash) tuples, so they get stored
|
|
|
|
# as 2+32=34 bytes each
|
|
|
|
offset = self._offsets['share_hashes']
|
|
|
|
assert isinstance(sharehashes, list)
|
|
|
|
data = "".join([struct.pack(">H", hashnum) + hashvalue
|
|
|
|
for hashnum,hashvalue in sharehashes])
|
2007-07-13 22:09:01 +00:00
|
|
|
precondition(len(data) == self._share_hash_size,
|
|
|
|
len(data), self._share_hash_size)
|
2007-07-14 02:30:48 +00:00
|
|
|
precondition(offset + len(data) <= self._offsets['uri_extension'],
|
|
|
|
offset, len(data), offset+len(data),
|
|
|
|
self._offsets['uri_extension'])
|
2007-07-13 21:04:49 +00:00
|
|
|
return self._write(offset, data)
|
|
|
|
|
2007-07-09 06:27:46 +00:00
|
|
|
def put_uri_extension(self, data):
|
2007-07-13 21:04:49 +00:00
|
|
|
offset = self._offsets['uri_extension']
|
|
|
|
assert isinstance(data, str)
|
2007-07-14 02:30:48 +00:00
|
|
|
precondition(len(data) <= self._uri_extension_size,
|
|
|
|
len(data), self._uri_extension_size)
|
2007-07-13 21:04:49 +00:00
|
|
|
length = struct.pack(">L", len(data))
|
|
|
|
return self._write(offset, length+data)
|
|
|
|
|
|
|
|
def _write(self, offset, data):
|
|
|
|
# TODO: for small shares, buffer the writes and do just a single call
|
|
|
|
return self._rref.callRemote("write", offset, data)
|
|
|
|
|
2007-07-09 06:27:46 +00:00
|
|
|
def close(self):
|
|
|
|
return self._rref.callRemote("close")
|
|
|
|
|
|
|
|
class ReadBucketProxy:
|
|
|
|
implements(IStorageBucketReader)
|
|
|
|
def __init__(self, rref):
|
|
|
|
self._rref = rref
|
2007-07-13 22:09:01 +00:00
|
|
|
self._started = False
|
2007-07-09 06:27:46 +00:00
|
|
|
|
2007-07-13 21:04:49 +00:00
|
|
|
def startIfNecessary(self):
|
|
|
|
if self._started:
|
|
|
|
return defer.succeed(self)
|
|
|
|
d = self.start()
|
|
|
|
d.addCallback(lambda res: self)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
# TODO: for small shares, read the whole bucket in start()
|
2007-07-13 23:38:25 +00:00
|
|
|
d = self._read(0, 8*4)
|
2007-07-13 23:52:17 +00:00
|
|
|
d.addCallback(self._parse_offsets)
|
2007-07-13 21:04:49 +00:00
|
|
|
return d
|
|
|
|
|
2007-07-13 23:52:17 +00:00
|
|
|
def _parse_offsets(self, data):
|
|
|
|
precondition(len(data) == 8*4)
|
|
|
|
self._offsets = {}
|
|
|
|
self._segment_size = struct.unpack(">L", data[0:4])[0]
|
|
|
|
self._data_size = struct.unpack(">L", data[4:8])[0]
|
|
|
|
x = 0x08
|
|
|
|
for field in ( 'data',
|
|
|
|
'plaintext_hash_tree',
|
|
|
|
'crypttext_hash_tree',
|
|
|
|
'block_hashes',
|
|
|
|
'share_hashes',
|
|
|
|
'uri_extension',
|
|
|
|
):
|
|
|
|
offset = struct.unpack(">L", data[x:x+4])[0]
|
|
|
|
x += 4
|
|
|
|
self._offsets[field] = offset
|
|
|
|
return self._offsets
|
|
|
|
|
2007-07-09 06:27:46 +00:00
|
|
|
def get_block(self, blocknum):
|
2007-07-13 23:38:25 +00:00
|
|
|
num_segments = mathutil.div_ceil(self._data_size, self._segment_size)
|
|
|
|
if blocknum < num_segments-1:
|
|
|
|
size = self._segment_size
|
|
|
|
else:
|
|
|
|
size = self._data_size % self._segment_size
|
|
|
|
if size == 0:
|
|
|
|
size = self._segment_size
|
2007-07-13 21:04:49 +00:00
|
|
|
offset = self._offsets['data'] + blocknum * self._segment_size
|
2007-07-13 23:38:25 +00:00
|
|
|
return self._read(offset, size)
|
2007-07-13 21:04:49 +00:00
|
|
|
|
|
|
|
def _str2l(self, s):
|
|
|
|
""" split string (pulled from storage) into a list of blockids """
|
2007-07-13 23:38:25 +00:00
|
|
|
return [ s[i:i+HASH_SIZE]
|
|
|
|
for i in range(0, len(s), HASH_SIZE) ]
|
2007-07-09 06:27:46 +00:00
|
|
|
|
|
|
|
def get_plaintext_hashes(self):
|
2007-07-13 21:04:49 +00:00
|
|
|
offset = self._offsets['plaintext_hash_tree']
|
|
|
|
size = self._offsets['crypttext_hash_tree'] - offset
|
|
|
|
d = self._read(offset, size)
|
|
|
|
d.addCallback(self._str2l)
|
|
|
|
return d
|
|
|
|
|
2007-07-09 06:27:46 +00:00
|
|
|
def get_crypttext_hashes(self):
|
2007-07-13 21:04:49 +00:00
|
|
|
offset = self._offsets['crypttext_hash_tree']
|
|
|
|
size = self._offsets['block_hashes'] - offset
|
|
|
|
d = self._read(offset, size)
|
|
|
|
d.addCallback(self._str2l)
|
|
|
|
return d
|
|
|
|
|
2007-07-09 06:27:46 +00:00
|
|
|
def get_block_hashes(self):
|
2007-07-13 21:04:49 +00:00
|
|
|
offset = self._offsets['block_hashes']
|
|
|
|
size = self._offsets['share_hashes'] - offset
|
|
|
|
d = self._read(offset, size)
|
|
|
|
d.addCallback(self._str2l)
|
|
|
|
return d
|
|
|
|
|
2007-07-09 06:27:46 +00:00
|
|
|
def get_share_hashes(self):
|
2007-07-13 21:04:49 +00:00
|
|
|
offset = self._offsets['share_hashes']
|
|
|
|
size = self._offsets['uri_extension'] - offset
|
|
|
|
assert size % (2+HASH_SIZE) == 0
|
|
|
|
d = self._read(offset, size)
|
|
|
|
def _unpack_share_hashes(data):
|
|
|
|
assert len(data) == size
|
|
|
|
hashes = []
|
|
|
|
for i in range(0, size, 2+HASH_SIZE):
|
|
|
|
hashnum = struct.unpack(">H", data[i:i+2])[0]
|
|
|
|
hashvalue = data[i+2:i+2+HASH_SIZE]
|
|
|
|
hashes.append( (hashnum, hashvalue) )
|
|
|
|
return hashes
|
|
|
|
d.addCallback(_unpack_share_hashes)
|
|
|
|
return d
|
2007-07-09 06:27:46 +00:00
|
|
|
|
2007-07-13 21:04:49 +00:00
|
|
|
def get_uri_extension(self):
|
|
|
|
offset = self._offsets['uri_extension']
|
|
|
|
d = self._read(offset, 4)
|
|
|
|
def _got_length(data):
|
|
|
|
length = struct.unpack(">L", data)[0]
|
|
|
|
return self._read(offset+4, length)
|
|
|
|
d.addCallback(_got_length)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def _read(self, offset, length):
|
|
|
|
return self._rref.callRemote("read", offset, length)
|