2007-03-30 17:52:19 +00:00
|
|
|
import os, re
|
2006-12-01 03:14:23 +00:00
|
|
|
|
|
|
|
from foolscap import Referenceable
|
|
|
|
from twisted.application import service
|
|
|
|
|
2006-12-02 02:17:50 +00:00
|
|
|
from zope.interface import implements
|
2007-04-04 22:59:36 +00:00
|
|
|
from allmydata.interfaces import RIStorageServer, RIBucketWriter, \
|
|
|
|
RIBucketReader
|
2007-03-30 03:19:52 +00:00
|
|
|
from allmydata import interfaces
|
|
|
|
from allmydata.util import bencode, fileutil, idlib
|
2007-04-18 17:43:33 +00:00
|
|
|
from allmydata.util.assertutil import precondition
|
2006-12-01 03:14:23 +00:00
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
# store/
|
2007-06-02 01:48:01 +00:00
|
|
|
# store/incoming # temp dirs named $STORAGEINDEX/$SHARENUM which will be moved to store/$STORAGEINDEX/$SHARENUM on success
|
|
|
|
# store/$STORAGEINDEX
|
|
|
|
# store/$STORAGEINDEX/$SHARENUM
|
|
|
|
# store/$STORAGEINDEX/$SHARENUM/blocksize
|
|
|
|
# store/$STORAGEINDEX/$SHARENUM/data
|
|
|
|
# store/$STORAGEINDEX/$SHARENUM/blockhashes
|
|
|
|
# store/$STORAGEINDEX/$SHARENUM/sharehashtree
|
2006-12-01 03:14:23 +00:00
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
# $SHARENUM matches this regex:
|
2007-03-31 00:07:04 +00:00
|
|
|
NUM_RE=re.compile("[0-9]*")
|
2007-03-30 03:19:52 +00:00
|
|
|
|
|
|
|
class BucketWriter(Referenceable):
|
|
|
|
implements(RIBucketWriter)
|
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
def __init__(self, incominghome, finalhome, blocksize):
|
|
|
|
self.incominghome = incominghome
|
2007-03-30 03:19:52 +00:00
|
|
|
self.finalhome = finalhome
|
|
|
|
self.blocksize = blocksize
|
|
|
|
self.closed = False
|
2007-04-18 03:21:05 +00:00
|
|
|
self._next_segnum = 0
|
2007-03-30 23:50:50 +00:00
|
|
|
fileutil.make_dirs(incominghome)
|
2007-03-30 03:19:52 +00:00
|
|
|
self._write_file('blocksize', str(blocksize))
|
|
|
|
|
|
|
|
def _write_file(self, fname, data):
|
2007-03-30 17:52:19 +00:00
|
|
|
open(os.path.join(self.incominghome, fname), 'wb').write(data)
|
2007-03-30 03:19:52 +00:00
|
|
|
|
|
|
|
def remote_put_block(self, segmentnum, data):
|
|
|
|
precondition(not self.closed)
|
2007-04-17 20:41:45 +00:00
|
|
|
# all blocks but the last will be of size self.blocksize, however the
|
|
|
|
# last one may be short, and we don't know the total number of
|
|
|
|
# segments so we can't tell which is which.
|
|
|
|
assert len(data) <= self.blocksize
|
2007-04-18 03:21:05 +00:00
|
|
|
assert segmentnum == self._next_segnum # must write in sequence
|
|
|
|
self._next_segnum = segmentnum + 1
|
2007-04-18 03:14:44 +00:00
|
|
|
f = fileutil.open_or_create(os.path.join(self.incominghome, 'data'))
|
2007-03-30 03:19:52 +00:00
|
|
|
f.seek(self.blocksize*segmentnum)
|
|
|
|
f.write(data)
|
|
|
|
|
2007-06-07 02:40:20 +00:00
|
|
|
def remote_put_plaintext_hashes(self, hashes):
|
|
|
|
precondition(not self.closed)
|
|
|
|
# TODO: verify the length of blockhashes.
|
|
|
|
# TODO: tighten foolscap schema to require exactly 32 bytes.
|
|
|
|
self._write_file('plaintext_hashes', ''.join(hashes))
|
|
|
|
|
|
|
|
def remote_put_crypttext_hashes(self, hashes):
|
|
|
|
precondition(not self.closed)
|
|
|
|
# TODO: verify the length of blockhashes.
|
|
|
|
# TODO: tighten foolscap schema to require exactly 32 bytes.
|
|
|
|
self._write_file('crypttext_hashes', ''.join(hashes))
|
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
def remote_put_block_hashes(self, blockhashes):
|
|
|
|
precondition(not self.closed)
|
|
|
|
# TODO: verify the length of blockhashes.
|
|
|
|
# TODO: tighten foolscap schema to require exactly 32 bytes.
|
|
|
|
self._write_file('blockhashes', ''.join(blockhashes))
|
|
|
|
|
|
|
|
def remote_put_share_hashes(self, sharehashes):
|
|
|
|
precondition(not self.closed)
|
2007-04-07 05:51:19 +00:00
|
|
|
self._write_file('sharehashes', bencode.bencode(sharehashes))
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2007-06-08 22:59:16 +00:00
|
|
|
def remote_put_uri_extension(self, data):
|
2007-06-02 01:48:01 +00:00
|
|
|
precondition(not self.closed)
|
2007-06-08 22:59:16 +00:00
|
|
|
self._write_file('uri_extension', data)
|
2007-06-02 01:48:01 +00:00
|
|
|
|
2007-03-30 23:50:50 +00:00
|
|
|
def remote_close(self):
|
2007-03-30 03:19:52 +00:00
|
|
|
precondition(not self.closed)
|
|
|
|
# TODO assert or check the completeness and consistency of the data that has been written
|
2007-03-31 01:01:56 +00:00
|
|
|
fileutil.make_dirs(os.path.dirname(self.finalhome))
|
2007-03-30 17:52:19 +00:00
|
|
|
fileutil.rename(self.incominghome, self.finalhome)
|
2007-03-31 01:01:56 +00:00
|
|
|
try:
|
|
|
|
os.rmdir(os.path.dirname(self.incominghome))
|
|
|
|
except OSError:
|
|
|
|
# Perhaps the directory wasn't empty. In any case, ignore the error.
|
|
|
|
pass
|
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
self.closed = True
|
|
|
|
|
|
|
|
def str2l(s):
|
|
|
|
""" split string (pulled from storage) into a list of blockids """
|
|
|
|
return [ s[i:i+interfaces.HASH_SIZE] for i in range(0, len(s), interfaces.HASH_SIZE) ]
|
|
|
|
|
|
|
|
class BucketReader(Referenceable):
|
2007-04-04 22:59:36 +00:00
|
|
|
implements(RIBucketReader)
|
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
def __init__(self, home):
|
|
|
|
self.home = home
|
|
|
|
self.blocksize = int(self._read_file('blocksize'))
|
|
|
|
|
|
|
|
def _read_file(self, fname):
|
|
|
|
return open(os.path.join(self.home, fname), 'rb').read()
|
|
|
|
|
|
|
|
def remote_get_block(self, blocknum):
|
|
|
|
f = open(os.path.join(self.home, 'data'), 'rb')
|
|
|
|
f.seek(self.blocksize * blocknum)
|
2007-03-30 23:50:50 +00:00
|
|
|
return f.read(self.blocksize) # this might be short for the last block
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2007-06-07 07:15:41 +00:00
|
|
|
def remote_get_plaintext_hashes(self):
|
|
|
|
return str2l(self._read_file('plaintext_hashes'))
|
|
|
|
def remote_get_crypttext_hashes(self):
|
|
|
|
return str2l(self._read_file('crypttext_hashes'))
|
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
def remote_get_block_hashes(self):
|
|
|
|
return str2l(self._read_file('blockhashes'))
|
|
|
|
|
|
|
|
def remote_get_share_hashes(self):
|
2007-04-07 05:51:19 +00:00
|
|
|
hashes = bencode.bdecode(self._read_file('sharehashes'))
|
|
|
|
# tuples come through bdecode(bencode()) as lists, which violates the
|
|
|
|
# schema
|
|
|
|
return [tuple(i) for i in hashes]
|
|
|
|
|
2007-06-08 22:59:16 +00:00
|
|
|
def remote_get_uri_extension(self):
|
|
|
|
return self._read_file('uri_extension')
|
2007-06-02 01:48:01 +00:00
|
|
|
|
2006-12-01 03:14:23 +00:00
|
|
|
class StorageServer(service.MultiService, Referenceable):
|
2006-12-02 02:17:50 +00:00
|
|
|
implements(RIStorageServer)
|
2006-12-01 03:14:23 +00:00
|
|
|
name = 'storageserver'
|
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
def __init__(self, storedir):
|
|
|
|
fileutil.make_dirs(storedir)
|
|
|
|
self.storedir = storedir
|
2007-03-30 17:52:19 +00:00
|
|
|
self.incomingdir = os.path.join(storedir, 'incoming')
|
|
|
|
self._clean_incomplete()
|
|
|
|
fileutil.make_dirs(self.incomingdir)
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2006-12-01 03:14:23 +00:00
|
|
|
service.MultiService.__init__(self)
|
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
def _clean_incomplete(self):
|
|
|
|
fileutil.rm_dir(self.incomingdir)
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2007-06-02 01:48:01 +00:00
|
|
|
def remote_allocate_buckets(self, storage_index, sharenums, sharesize,
|
2007-03-30 03:19:52 +00:00
|
|
|
blocksize, canary):
|
2007-03-30 17:52:19 +00:00
|
|
|
alreadygot = set()
|
|
|
|
bucketwriters = {} # k: shnum, v: BucketWriter
|
|
|
|
for shnum in sharenums:
|
2007-06-02 01:48:01 +00:00
|
|
|
incominghome = os.path.join(self.incomingdir, idlib.b2a(storage_index), "%d"%shnum)
|
|
|
|
finalhome = os.path.join(self.storedir, idlib.b2a(storage_index), "%d"%shnum)
|
2007-03-30 17:52:19 +00:00
|
|
|
if os.path.exists(incominghome) or os.path.exists(finalhome):
|
|
|
|
alreadygot.add(shnum)
|
|
|
|
else:
|
|
|
|
bucketwriters[shnum] = BucketWriter(incominghome, finalhome, blocksize)
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
return alreadygot, bucketwriters
|
2006-12-01 03:14:23 +00:00
|
|
|
|
2007-06-02 01:48:01 +00:00
|
|
|
def remote_get_buckets(self, storage_index):
|
2007-03-30 03:19:52 +00:00
|
|
|
bucketreaders = {} # k: sharenum, v: BucketReader
|
2007-06-02 01:48:01 +00:00
|
|
|
storagedir = os.path.join(self.storedir, idlib.b2a(storage_index))
|
2007-03-31 00:12:07 +00:00
|
|
|
try:
|
2007-06-02 01:48:01 +00:00
|
|
|
for f in os.listdir(storagedir):
|
2007-04-18 14:41:56 +00:00
|
|
|
if NUM_RE.match(f):
|
2007-06-02 01:48:01 +00:00
|
|
|
bucketreaders[int(f)] = BucketReader(os.path.join(storagedir, f))
|
2007-03-31 00:12:07 +00:00
|
|
|
except OSError:
|
|
|
|
# Commonly caused by there being no buckets at all.
|
|
|
|
pass
|
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
return bucketreaders
|