2006-12-02 02:17:50 +00:00
|
|
|
|
2007-01-05 04:52:51 +00:00
|
|
|
from zope.interface import Interface
|
2007-04-14 02:04:38 +00:00
|
|
|
from foolscap.schema import StringConstraint, ListOf, TupleOf, SetOf, DictOf, \
|
|
|
|
ChoiceOf
|
2007-04-04 22:59:36 +00:00
|
|
|
from foolscap import RemoteInterface, Referenceable
|
2006-12-02 02:17:50 +00:00
|
|
|
|
2007-03-30 03:19:52 +00:00
|
|
|
HASH_SIZE=32
|
|
|
|
|
2007-04-04 22:59:36 +00:00
|
|
|
Hash = StringConstraint(maxLength=HASH_SIZE,
|
|
|
|
minLength=HASH_SIZE)# binary format 32-byte SHA256 hash
|
|
|
|
Nodeid = StringConstraint(maxLength=20,
|
|
|
|
minLength=20) # binary format 20-byte SHA1 hash
|
2007-05-22 21:08:30 +00:00
|
|
|
FURL = StringConstraint(1000)
|
2006-12-02 23:26:26 +00:00
|
|
|
Verifierid = StringConstraint(20)
|
2007-04-27 01:08:29 +00:00
|
|
|
URI = StringConstraint(300) # kind of arbitrary
|
2007-04-04 22:59:36 +00:00
|
|
|
MAX_BUCKETS = 200 # per peer
|
2006-12-04 12:14:31 +00:00
|
|
|
ShareData = StringConstraint(100000)
|
2006-12-02 02:17:50 +00:00
|
|
|
|
2007-03-23 23:15:57 +00:00
|
|
|
class RIIntroducerClient(RemoteInterface):
|
2007-05-22 21:08:30 +00:00
|
|
|
def new_peers(furls=SetOf(FURL)):
|
2007-03-23 23:15:57 +00:00
|
|
|
return None
|
|
|
|
|
|
|
|
class RIIntroducer(RemoteInterface):
|
2007-05-22 21:08:30 +00:00
|
|
|
def hello(node=RIIntroducerClient, furl=FURL):
|
2007-03-22 21:39:30 +00:00
|
|
|
return None
|
|
|
|
|
2007-03-27 23:12:11 +00:00
|
|
|
class RIClient(RemoteInterface):
|
2007-04-26 19:01:25 +00:00
|
|
|
def get_versions():
|
|
|
|
"""Return a tuple of (my_version, oldest_supported) strings.
|
|
|
|
|
|
|
|
Each string can be parsed by an allmydata.util.version.Version
|
|
|
|
instance, and then compared. The first goal is to make sure that
|
|
|
|
nodes are not confused by speaking to an incompatible peer. The
|
|
|
|
second goal is to enable the development of backwards-compatibility
|
|
|
|
code.
|
|
|
|
|
|
|
|
This method is likely to change in incompatible ways until we get the
|
|
|
|
whole compatibility scheme nailed down.
|
|
|
|
"""
|
|
|
|
return TupleOf(str, str)
|
2006-12-02 02:17:50 +00:00
|
|
|
def get_service(name=str):
|
2007-04-04 22:59:36 +00:00
|
|
|
return Referenceable
|
2007-03-23 23:15:57 +00:00
|
|
|
def get_nodeid():
|
|
|
|
return Nodeid
|
2006-12-02 02:17:50 +00:00
|
|
|
|
|
|
|
class RIBucketWriter(RemoteInterface):
|
2007-03-30 03:19:52 +00:00
|
|
|
def put_block(segmentnum=int, data=ShareData):
|
2007-03-30 23:50:50 +00:00
|
|
|
"""@param data: For most segments, this data will be 'blocksize'
|
|
|
|
bytes in length. The last segment might be shorter.
|
|
|
|
"""
|
2007-03-30 03:19:52 +00:00
|
|
|
return None
|
|
|
|
|
2007-04-30 06:51:15 +00:00
|
|
|
def put_block_hashes(blockhashes=ListOf(Hash, maxLength=2**20)):
|
2007-01-15 21:01:22 +00:00
|
|
|
return None
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2007-04-30 06:51:15 +00:00
|
|
|
def put_share_hashes(sharehashes=ListOf(TupleOf(int, Hash), maxLength=2**20)):
|
2007-01-15 21:01:22 +00:00
|
|
|
return None
|
2007-03-30 03:19:52 +00:00
|
|
|
|
2006-12-02 02:17:50 +00:00
|
|
|
def close():
|
2007-03-30 03:19:52 +00:00
|
|
|
"""
|
|
|
|
If the data that has been written is incomplete or inconsistent then
|
|
|
|
the server will throw the data away, else it will store it for future
|
|
|
|
retrieval.
|
|
|
|
"""
|
2007-01-15 21:01:22 +00:00
|
|
|
return None
|
2006-12-02 02:17:50 +00:00
|
|
|
|
2006-12-03 00:25:57 +00:00
|
|
|
class RIBucketReader(RemoteInterface):
|
2007-03-30 03:19:52 +00:00
|
|
|
def get_block(blocknum=int):
|
2007-03-30 23:50:50 +00:00
|
|
|
"""Most blocks will be the same size. The last block might be shorter
|
|
|
|
than the others.
|
|
|
|
"""
|
2006-12-03 00:25:57 +00:00
|
|
|
return ShareData
|
2007-03-30 03:19:52 +00:00
|
|
|
def get_block_hashes():
|
2007-04-30 13:00:10 +00:00
|
|
|
return ListOf(Hash, maxLength=2**20)
|
2007-03-30 03:19:52 +00:00
|
|
|
def get_share_hashes():
|
2007-04-30 13:00:10 +00:00
|
|
|
return ListOf(TupleOf(int, Hash), maxLength=2**20)
|
2006-12-03 00:25:57 +00:00
|
|
|
|
2007-04-04 22:59:36 +00:00
|
|
|
class RIStorageServer(RemoteInterface):
|
|
|
|
def allocate_buckets(verifierid=Verifierid,
|
|
|
|
sharenums=SetOf(int, maxLength=MAX_BUCKETS),
|
|
|
|
sharesize=int, blocksize=int, canary=Referenceable):
|
|
|
|
"""
|
|
|
|
@param canary: If the canary is lost before close(), the bucket is deleted.
|
|
|
|
@return: tuple of (alreadygot, allocated), where alreadygot is what we
|
|
|
|
already have and is what we hereby agree to accept
|
|
|
|
"""
|
|
|
|
return TupleOf(SetOf(int, maxLength=MAX_BUCKETS),
|
|
|
|
DictOf(int, RIBucketWriter, maxKeys=MAX_BUCKETS))
|
|
|
|
def get_buckets(verifierid=Verifierid):
|
|
|
|
return DictOf(int, RIBucketReader, maxKeys=MAX_BUCKETS)
|
|
|
|
|
|
|
|
# hm, we need a solution for forward references in schemas
|
|
|
|
from foolscap.schema import Any
|
|
|
|
RIMutableDirectoryNode_ = Any() # TODO: how can we avoid this?
|
2006-12-04 02:07:41 +00:00
|
|
|
class RIMutableDirectoryNode(RemoteInterface):
|
|
|
|
def list():
|
|
|
|
return ListOf( TupleOf(str, # name, relative to directory
|
2007-04-14 02:04:38 +00:00
|
|
|
ChoiceOf(RIMutableDirectoryNode_, Verifierid)),
|
2006-12-04 02:07:41 +00:00
|
|
|
maxLength=100,
|
|
|
|
)
|
|
|
|
|
2006-12-04 11:03:29 +00:00
|
|
|
def get(name=str):
|
2007-04-14 02:04:38 +00:00
|
|
|
return ChoiceOf(RIMutableDirectoryNode_, Verifierid)
|
2006-12-04 11:03:29 +00:00
|
|
|
|
2006-12-04 02:07:41 +00:00
|
|
|
def add_directory(name=str):
|
|
|
|
return RIMutableDirectoryNode_
|
|
|
|
|
2007-01-16 04:22:22 +00:00
|
|
|
def add_file(name=str, uri=URI):
|
2007-01-15 21:01:22 +00:00
|
|
|
return None
|
2006-12-04 02:07:41 +00:00
|
|
|
|
|
|
|
def remove(name=str):
|
2007-01-15 21:01:22 +00:00
|
|
|
return None
|
2006-12-04 02:07:41 +00:00
|
|
|
|
|
|
|
# need more to move directories
|
2006-12-07 21:58:23 +00:00
|
|
|
|
2007-01-05 04:52:51 +00:00
|
|
|
|
2007-01-12 03:57:14 +00:00
|
|
|
class ICodecEncoder(Interface):
|
2007-01-16 04:22:22 +00:00
|
|
|
def set_params(data_size, required_shares, max_shares):
|
2007-01-05 04:52:51 +00:00
|
|
|
"""Set up the parameters of this encoder.
|
|
|
|
|
2007-03-28 02:05:09 +00:00
|
|
|
This prepares the encoder to perform an operation that converts a
|
|
|
|
single block of data into a number of shares, such that a future
|
|
|
|
ICodecDecoder can use a subset of these shares to recover the
|
|
|
|
original data. This operation is invoked by calling encode(). Once
|
|
|
|
the encoding parameters are set up, the encode operation can be
|
|
|
|
invoked multiple times.
|
|
|
|
|
|
|
|
set_params() prepares the encoder to accept blocks of input data that
|
|
|
|
are exactly 'data_size' bytes in length. The encoder will be prepared
|
|
|
|
to produce 'max_shares' shares for each encode() operation (although
|
|
|
|
see the 'desired_share_ids' to use less CPU). The encoding math will
|
|
|
|
be chosen such that the decoder can get by with as few as
|
|
|
|
'required_shares' of these shares and still reproduce the original
|
|
|
|
data. For example, set_params(1000, 5, 5) offers no redundancy at
|
|
|
|
all, whereas set_params(1000, 1, 10) provides 10x redundancy.
|
|
|
|
|
2007-03-28 03:14:45 +00:00
|
|
|
Numerical Restrictions: 'data_size' is required to be an integral
|
|
|
|
multiple of 'required_shares'. In general, the caller should choose
|
|
|
|
required_shares and max_shares based upon their reliability
|
|
|
|
requirements and the number of peers available (the total storage
|
|
|
|
space used is roughly equal to max_shares*data_size/required_shares),
|
|
|
|
then choose data_size to achieve the memory footprint desired (larger
|
|
|
|
data_size means more efficient operation, smaller data_size means
|
|
|
|
smaller memory footprint).
|
|
|
|
|
|
|
|
In addition, 'max_shares' must be equal to or greater than
|
|
|
|
'required_shares'. Of course, setting them to be equal causes
|
|
|
|
encode() to degenerate into a particularly slow form of the 'split'
|
|
|
|
utility.
|
|
|
|
|
2007-03-28 02:05:09 +00:00
|
|
|
See encode() for more details about how these parameters are used.
|
2007-03-28 03:14:45 +00:00
|
|
|
|
2007-03-28 02:05:09 +00:00
|
|
|
set_params() must be called before any other ICodecEncoder methods
|
|
|
|
may be invoked.
|
2007-01-05 04:52:51 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
def get_encoder_type():
|
2007-01-17 04:29:59 +00:00
|
|
|
"""Return a short string that describes the type of this encoder.
|
2007-01-05 04:52:51 +00:00
|
|
|
|
2007-01-24 22:34:02 +00:00
|
|
|
There is required to be a global table of encoder classes. This method
|
|
|
|
returns an index into this table; the value at this index is an
|
|
|
|
encoder class, and this encoder is an instance of that class.
|
2007-01-05 04:52:51 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
def get_serialized_params(): # TODO: maybe, maybe not
|
|
|
|
"""Return a string that describes the parameters of this encoder.
|
|
|
|
|
|
|
|
This string can be passed to the decoder to prepare it for handling
|
|
|
|
the encoded shares we create. It might contain more information than
|
|
|
|
was presented to set_params(), if there is some flexibility of
|
|
|
|
parameter choice.
|
|
|
|
|
|
|
|
This string is intended to be embedded in the URI, so there are
|
|
|
|
several restrictions on its contents. At the moment I'm thinking that
|
2007-01-17 04:29:59 +00:00
|
|
|
this means it may contain hex digits and hyphens, and nothing else.
|
|
|
|
The idea is that the URI contains something like '%s:%s:%s' %
|
|
|
|
(encoder.get_encoder_name(), encoder.get_serialized_params(),
|
|
|
|
b2a(verifierid)), and this is enough information to construct a
|
|
|
|
compatible decoder.
|
2007-01-05 04:52:51 +00:00
|
|
|
"""
|
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
def get_block_size():
|
|
|
|
"""Return the length of the shares that encode() will produce.
|
|
|
|
"""
|
|
|
|
|
2007-03-28 03:14:45 +00:00
|
|
|
def encode_proposal(data, desired_share_ids=None):
|
|
|
|
"""Encode some data.
|
|
|
|
|
|
|
|
'data' must be a string (or other buffer object), and len(data) must
|
|
|
|
be equal to the 'data_size' value passed earlier to set_params().
|
|
|
|
|
|
|
|
This will return a Deferred that will fire with two lists. The first
|
|
|
|
is a list of shares, each of which is a string (or other buffer
|
|
|
|
object) such that len(share) is the same as what get_share_size()
|
|
|
|
returned earlier. The second is a list of shareids, in which each is
|
|
|
|
an integer. The lengths of the two lists will always be equal to each
|
|
|
|
other. The user should take care to keep each share closely
|
|
|
|
associated with its shareid, as one is useless without the other.
|
|
|
|
|
|
|
|
The length of this output list will normally be the same as the value
|
|
|
|
provided to the 'max_shares' parameter of set_params(). This may be
|
|
|
|
different if 'desired_share_ids' is provided.
|
|
|
|
|
|
|
|
'desired_share_ids', if provided, is required to be a sequence of
|
|
|
|
ints, each of which is required to be >= 0 and < max_shares. If not
|
|
|
|
provided, encode() will produce 'max_shares' shares, as if
|
|
|
|
'desired_share_ids' were set to range(max_shares). You might use this
|
|
|
|
if you initially thought you were going to use 10 peers, started
|
|
|
|
encoding, and then two of the peers dropped out: you could use
|
|
|
|
desired_share_ids= to skip the work (both memory and CPU) of
|
|
|
|
producing shares for the peers which are no longer available.
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
2007-01-24 22:34:02 +00:00
|
|
|
def encode(inshares, desired_share_ids=None):
|
2007-02-01 23:07:00 +00:00
|
|
|
"""Encode some data. This may be called multiple times. Each call is
|
|
|
|
independent.
|
2007-01-05 04:52:51 +00:00
|
|
|
|
2007-03-06 03:57:38 +00:00
|
|
|
inshares is a sequence of length required_shares, containing buffers
|
|
|
|
(i.e. strings), where each buffer contains the next contiguous
|
|
|
|
non-overlapping segment of the input data. Each buffer is required to
|
|
|
|
be the same length, and the sum of the lengths of the buffers is
|
|
|
|
required to be exactly the data_size promised by set_params(). (This
|
|
|
|
implies that the data has to be padded before being passed to
|
|
|
|
encode(), unless of course it already happens to be an even multiple
|
|
|
|
of required_shares in length.)
|
|
|
|
|
|
|
|
ALSO: the requirement to break up your data into 'required_shares'
|
|
|
|
chunks before calling encode() feels a bit surprising, at least from
|
|
|
|
the point of view of a user who doesn't know how FEC works. It feels
|
|
|
|
like an implementation detail that has leaked outside the
|
|
|
|
abstraction barrier. Can you imagine a use case in which the data to
|
|
|
|
be encoded might already be available in pre-segmented chunks, such
|
|
|
|
that it is faster or less work to make encode() take a list rather
|
|
|
|
than splitting a single string?
|
|
|
|
|
|
|
|
ALSO ALSO: I think 'inshares' is a misleading term, since encode()
|
|
|
|
is supposed to *produce* shares, so what it *accepts* should be
|
|
|
|
something other than shares. Other places in this interface use the
|
|
|
|
word 'data' for that-which-is-not-shares.. maybe we should use that
|
|
|
|
term?
|
|
|
|
|
|
|
|
'desired_share_ids', if provided, is required to be a sequence of
|
|
|
|
ints, each of which is required to be >= 0 and < max_shares. If not
|
|
|
|
provided, encode() will produce 'max_shares' shares, as if
|
2007-03-28 02:05:09 +00:00
|
|
|
'desired_share_ids' were set to range(max_shares). You might use this
|
|
|
|
if you initially thought you were going to use 10 peers, started
|
|
|
|
encoding, and then two of the peers dropped out: you could use
|
|
|
|
desired_share_ids= to skip the work (both memory and CPU) of
|
|
|
|
producing shares for the peers which are no longer available.
|
2007-01-24 22:34:02 +00:00
|
|
|
|
|
|
|
For each call, encode() will return a Deferred that fires with two
|
2007-02-01 23:07:00 +00:00
|
|
|
lists, one containing shares and the other containing the shareids.
|
2007-03-28 19:52:44 +00:00
|
|
|
The get_share_size() method can be used to determine the length of
|
|
|
|
the share strings returned by encode(). Each shareid is a small
|
|
|
|
integer, exactly as passed into 'desired_share_ids' (or
|
|
|
|
range(max_shares), if desired_share_ids was not provided).
|
|
|
|
|
2007-02-01 23:07:00 +00:00
|
|
|
The shares and their corresponding shareids are required to be kept
|
|
|
|
together during storage and retrieval. Specifically, the share data is
|
|
|
|
useless by itself: the decoder needs to be told which share is which
|
|
|
|
by providing it with both the shareid and the actual share data.
|
2007-01-05 04:52:51 +00:00
|
|
|
|
2007-03-28 19:52:44 +00:00
|
|
|
This function will allocate an amount of memory roughly equal to::
|
|
|
|
|
|
|
|
(max_shares - required_shares) * get_share_size()
|
2007-02-01 23:07:00 +00:00
|
|
|
|
2007-03-28 19:52:44 +00:00
|
|
|
When combined with the memory that the caller must allocate to
|
|
|
|
provide the input data, this leads to a memory footprint roughly
|
|
|
|
equal to the size of the resulting encoded shares (i.e. the expansion
|
|
|
|
factor times the size of the input segment).
|
2007-01-05 04:52:51 +00:00
|
|
|
"""
|
|
|
|
|
2007-03-28 03:14:45 +00:00
|
|
|
# rejected ideas:
|
|
|
|
#
|
|
|
|
# returning a list of (shareidN,shareN) tuples instead of a pair of
|
|
|
|
# lists (shareids..,shares..). Brian thought the tuples would
|
|
|
|
# encourage users to keep the share and shareid together throughout
|
|
|
|
# later processing, Zooko pointed out that the code to iterate
|
|
|
|
# through two lists is not really more complicated than using a list
|
|
|
|
# of tuples and there's also a performance improvement
|
|
|
|
#
|
|
|
|
# having 'data_size' not required to be an integral multiple of
|
|
|
|
# 'required_shares'. Doing this would require encode() to perform
|
|
|
|
# padding internally, and we'd prefer to have any padding be done
|
|
|
|
# explicitly by the caller. Yes, it is an abstraction leak, but
|
|
|
|
# hopefully not an onerous one.
|
|
|
|
|
|
|
|
|
2007-01-12 03:57:14 +00:00
|
|
|
class ICodecDecoder(Interface):
|
2007-01-05 04:52:51 +00:00
|
|
|
def set_serialized_params(params):
|
|
|
|
"""Set up the parameters of this encoder, from a string returned by
|
|
|
|
encoder.get_serialized_params()."""
|
|
|
|
|
2007-03-30 17:52:19 +00:00
|
|
|
def get_needed_shares():
|
2007-01-16 04:22:22 +00:00
|
|
|
"""Return the number of shares needed to reconstruct the data.
|
2007-01-24 22:34:02 +00:00
|
|
|
set_serialized_params() is required to be called before this."""
|
2007-01-16 04:22:22 +00:00
|
|
|
|
2007-01-24 22:34:02 +00:00
|
|
|
def decode(some_shares, their_shareids):
|
2007-01-05 04:52:51 +00:00
|
|
|
"""Decode a partial list of shares into data.
|
|
|
|
|
2007-02-01 23:07:00 +00:00
|
|
|
'some_shares' is required to be a sequence of buffers of sharedata, a
|
2007-01-24 22:34:02 +00:00
|
|
|
subset of the shares returned by ICodecEncode.encode(). Each share is
|
|
|
|
required to be of the same length. The i'th element of their_shareids
|
2007-02-01 23:07:00 +00:00
|
|
|
is required to be the shareid of the i'th buffer in some_shares.
|
2007-01-24 22:34:02 +00:00
|
|
|
|
|
|
|
This returns a Deferred which fires with a sequence of buffers. This
|
|
|
|
sequence will contain all of the segments of the original data, in
|
2007-03-28 02:05:09 +00:00
|
|
|
order. The sum of the lengths of all of the buffers will be the
|
2007-01-24 22:34:02 +00:00
|
|
|
'data_size' value passed into the original ICodecEncode.set_params()
|
2007-03-28 02:05:09 +00:00
|
|
|
call. To get back the single original input block of data, use
|
|
|
|
''.join(output_buffers), or you may wish to simply write them in
|
|
|
|
order to an output file.
|
|
|
|
|
|
|
|
Note that some of the elements in the result sequence may be
|
|
|
|
references to the elements of the some_shares input sequence. In
|
|
|
|
particular, this means that if those share objects are mutable (e.g.
|
|
|
|
arrays) and if they are changed, then both the input (the
|
|
|
|
'some_shares' parameter) and the output (the value given when the
|
|
|
|
deferred is triggered) will change.
|
2007-01-24 22:34:02 +00:00
|
|
|
|
|
|
|
The length of 'some_shares' is required to be exactly the value of
|
|
|
|
'required_shares' passed into the original ICodecEncode.set_params()
|
|
|
|
call.
|
2007-01-05 04:52:51 +00:00
|
|
|
"""
|
2007-01-21 22:01:34 +00:00
|
|
|
|
2007-03-28 18:24:53 +00:00
|
|
|
class IEncoder(Interface):
|
2007-03-30 03:19:52 +00:00
|
|
|
"""I take a file-like object that provides a sequence of bytes and a list
|
|
|
|
of shareholders, then encrypt, encode, hash, and deliver shares to those
|
|
|
|
shareholders. I will compute all the necessary Merkle hash trees that are
|
|
|
|
necessary to validate the data that eventually comes back from the
|
|
|
|
shareholders. I provide the root hash of the hash tree, and the encoding
|
|
|
|
parameters, both of which must be included in the URI.
|
2007-03-28 18:24:53 +00:00
|
|
|
|
|
|
|
I do not choose shareholders, that is left to the IUploader. I must be
|
|
|
|
given a dict of RemoteReferences to storage buckets that are ready and
|
|
|
|
willing to receive data.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def setup(infile):
|
|
|
|
"""I take a file-like object (providing seek, tell, and read) from
|
|
|
|
which all the plaintext data that is to be uploaded can be read. I
|
|
|
|
will seek to the beginning of the file before reading any data.
|
|
|
|
setup() must be called before making any other calls, in particular
|
|
|
|
before calling get_reservation_size().
|
|
|
|
"""
|
|
|
|
|
2007-03-30 18:53:03 +00:00
|
|
|
def get_share_size():
|
2007-03-28 18:24:53 +00:00
|
|
|
"""I return the size of the data that will be stored on each
|
2007-03-30 18:53:03 +00:00
|
|
|
shareholder. This is aggregate amount of data that will be sent to
|
|
|
|
the shareholder, summed over all the put_block() calls I will ever
|
|
|
|
make.
|
|
|
|
|
|
|
|
TODO: this might also include some amount of overhead, like the size
|
|
|
|
of all the hashes. We need to decide whether this is useful or not.
|
|
|
|
|
|
|
|
It is useful to determine this size before asking potential
|
|
|
|
shareholders whether they will grant a lease or not, since their
|
|
|
|
answers will depend upon how much space we need.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def get_block_size(): # TODO: can we avoid exposing this?
|
|
|
|
"""I return the size of the individual blocks that will be delivered
|
|
|
|
to a shareholder's put_block() method. By knowing this, the
|
|
|
|
shareholder will be able to keep all blocks in a single file and
|
|
|
|
still provide random access when reading them.
|
2007-03-28 18:24:53 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
def set_shareholders(shareholders):
|
|
|
|
"""I take a dictionary that maps share identifiers (small integers,
|
|
|
|
starting at 0) to RemoteReferences that provide RIBucketWriter. This
|
2007-03-28 18:31:31 +00:00
|
|
|
must be called before start().
|
2007-03-28 18:24:53 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
def start():
|
|
|
|
"""I start the upload. This process involves reading data from the
|
|
|
|
input file, encrypting it, encoding the pieces, uploading the shares
|
|
|
|
to the shareholders, then sending the hash trees.
|
|
|
|
|
|
|
|
I return a Deferred that fires with the root hash.
|
|
|
|
"""
|
|
|
|
|
2007-03-28 18:31:31 +00:00
|
|
|
class IDecoder(Interface):
|
|
|
|
"""I take a list of shareholders and some setup information, then
|
|
|
|
download, validate, decode, and decrypt data from them, writing the
|
|
|
|
results to an output file.
|
|
|
|
|
|
|
|
I do not locate the shareholders, that is left to the IDownloader. I must
|
|
|
|
be given a dict of RemoteReferences to storage buckets that are ready to
|
|
|
|
send data.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def setup(outfile):
|
|
|
|
"""I take a file-like object (providing write and close) to which all
|
|
|
|
the plaintext data will be written.
|
|
|
|
|
|
|
|
TODO: producer/consumer . Maybe write() should return a Deferred that
|
|
|
|
indicates when it will accept more data? But probably having the
|
|
|
|
IDecoder be a producer is easier to glue to IConsumer pieces.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def set_shareholders(shareholders):
|
|
|
|
"""I take a dictionary that maps share identifiers (small integers)
|
|
|
|
to RemoteReferences that provide RIBucketReader. This must be called
|
|
|
|
before start()."""
|
|
|
|
|
|
|
|
def start():
|
|
|
|
"""I start the download. This process involves retrieving data and
|
|
|
|
hash chains from the shareholders, using the hashes to validate the
|
|
|
|
data, decoding the shares into segments, decrypting the segments,
|
|
|
|
then writing the resulting plaintext to the output file.
|
|
|
|
|
|
|
|
I return a Deferred that will fire (with self) when the download is
|
|
|
|
complete.
|
|
|
|
"""
|
2007-03-28 18:24:53 +00:00
|
|
|
|
2007-01-21 22:01:34 +00:00
|
|
|
class IDownloadTarget(Interface):
|
|
|
|
def open():
|
|
|
|
"""Called before any calls to write() or close()."""
|
|
|
|
def write(data):
|
|
|
|
"""Output some data to the target."""
|
|
|
|
def close():
|
|
|
|
"""Inform the target that there is no more data to be written."""
|
|
|
|
def fail():
|
|
|
|
"""fail() is called to indicate that the download has failed. No
|
|
|
|
further methods will be invoked on the IDownloadTarget after fail()."""
|
|
|
|
def register_canceller(cb):
|
|
|
|
"""The FileDownloader uses this to register a no-argument function
|
|
|
|
that the target can call to cancel the download. Once this canceller
|
|
|
|
is invoked, no further calls to write() or close() will be made."""
|
|
|
|
def finish(self):
|
|
|
|
"""When the FileDownloader is done, this finish() function will be
|
|
|
|
called. Whatever it returns will be returned to the invoker of
|
|
|
|
Downloader.download.
|
|
|
|
"""
|
|
|
|
|
|
|
|
class IDownloader(Interface):
|
|
|
|
def download(uri, target):
|
|
|
|
"""Perform a CHK download, sending the data to the given target.
|
|
|
|
'target' must provide IDownloadTarget."""
|
|
|
|
|
|
|
|
class IUploadable(Interface):
|
|
|
|
def get_filehandle():
|
|
|
|
"""Return a filehandle from which the data to be uploaded can be
|
|
|
|
read. It must implement .read, .seek, and .tell (since the latter two
|
|
|
|
are used to determine the length of the data)."""
|
|
|
|
def close_filehandle(f):
|
|
|
|
"""The upload is finished. This provides the same filehandle as was
|
|
|
|
returned by get_filehandle. This is an appropriate place to close the
|
|
|
|
filehandle."""
|
|
|
|
|
|
|
|
class IUploader(Interface):
|
|
|
|
def upload(uploadable):
|
|
|
|
"""Upload the file. 'uploadable' must impement IUploadable. This
|
|
|
|
returns a Deferred which fires with the URI of the file."""
|
|
|
|
|
|
|
|
def upload_ssk(write_capability, new_version, uploadable):
|
2007-04-18 22:46:37 +00:00
|
|
|
"""TODO: how should this work?"""
|
2007-01-24 22:10:53 +00:00
|
|
|
def upload_data(data):
|
|
|
|
"""Like upload(), but accepts a string."""
|
|
|
|
|
|
|
|
def upload_filename(filename):
|
|
|
|
"""Like upload(), but accepts an absolute pathname."""
|
|
|
|
|
|
|
|
def upload_filehandle(filehane):
|
|
|
|
"""Like upload(), but accepts an open filehandle."""
|
|
|
|
|
2007-01-21 22:15:31 +00:00
|
|
|
|
|
|
|
class IWorkQueue(Interface):
|
|
|
|
"""Each filetable root is associated a work queue, which is persisted on
|
|
|
|
disk and contains idempotent actions that need to be performed. After
|
|
|
|
each action is completed, it is removed from the queue.
|
|
|
|
|
|
|
|
The queue is broken up into several sections. First are the 'upload'
|
|
|
|
steps. After this are the 'add_subpath' commands. The last section has
|
|
|
|
the 'unlink' steps. Somewhere in here are the 'retain' steps.. maybe
|
|
|
|
interspersed with 'upload', maybe after 'add_subpath' and before
|
|
|
|
'unlink'.
|
|
|
|
|
|
|
|
The general idea is that the processing of the work queue could be
|
|
|
|
interrupted at any time, in the middle of a step, and the next time the
|
|
|
|
application is started, the step can be re-started without problems. The
|
|
|
|
placement of the 'retain' commands depends upon how long we might expect
|
|
|
|
the app to be offline.
|
|
|
|
|
|
|
|
tempfiles: the workqueue has a special directory where temporary files
|
|
|
|
are stored. create_tempfile() generates these files, while steps like
|
|
|
|
add_upload_chk() use them. The add_delete_tempfile() will delete the
|
|
|
|
tempfile. All tempfiles are deleted when the workqueue becomes empty,
|
|
|
|
since at that point none of them can still be referenced.
|
|
|
|
|
|
|
|
boxes: there is another special directory where named slots (called
|
|
|
|
'boxes') hold serialized INode specifications (the strings which are
|
|
|
|
returned by INode.serialize_node()). Boxes are created by calling
|
|
|
|
create_boxname(). Boxes are filled either at the time of creation or by
|
|
|
|
steps like add_upload_chk(). Boxes are used by steps like add_addpath()
|
|
|
|
and add_retain_uri_from_box. Boxes are deleted by add_delete_box(), as
|
|
|
|
well as when the workqueue becomes empty.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def create_tempfile(suffix=""):
|
|
|
|
"""Return (f, filename), where 'f' is an open filehandle, and
|
|
|
|
'filename' is a string that can be passed to other workqueue steps to
|
|
|
|
refer to that same file later. NOTE: 'filename' is not an absolute
|
|
|
|
path, rather it will be interpreted relative to some directory known
|
|
|
|
only by the workqueue."""
|
|
|
|
def create_boxname(contents=None):
|
|
|
|
"""Return a unique box name (as a string). If 'contents' are
|
|
|
|
provided, it must be an instance that provides INode, and the
|
|
|
|
serialized form of the node will be written into the box. Otherwise
|
|
|
|
the boxname can be used by steps like add_upload_chk to hold the
|
|
|
|
generated uri."""
|
|
|
|
|
|
|
|
def add_upload_chk(source_filename, stash_uri_in_boxname):
|
2007-04-30 20:06:09 +00:00
|
|
|
"""This step uploads a file to the grid and obtains a content-based
|
2007-01-21 22:15:31 +00:00
|
|
|
URI which can be used to later retrieve the same contents ('CHK'
|
|
|
|
mode). This URI includes unlink rights. It does not mark the file for
|
|
|
|
retention.
|
|
|
|
|
2007-01-21 23:03:15 +00:00
|
|
|
Non-absolute filenames are interpreted relative to the workqueue's
|
|
|
|
special just-for-tempfiles directory.
|
|
|
|
|
2007-01-21 22:15:31 +00:00
|
|
|
When the upload is complete, the resulting URI is stashed in a 'box'
|
|
|
|
with the specified name. This is basically a local variable. A later
|
|
|
|
'add_subpath' step will reference this boxname and retrieve the URI.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def add_upload_ssk(write_capability, previous_version, source_filename):
|
2007-04-30 20:06:09 +00:00
|
|
|
"""This step uploads a file to the grid in a way that replaces the
|
2007-01-21 22:15:31 +00:00
|
|
|
previous version and does not require a change to the ID referenced
|
|
|
|
by the parent.
|
|
|
|
"""
|
|
|
|
|
2007-04-30 16:57:52 +00:00
|
|
|
def add_vdrive_update_handle(handle, source_filename):
|
|
|
|
"""Arrange for a vdrive server to be notified that the given handle
|
2007-01-21 22:15:31 +00:00
|
|
|
has been updated with the contents of the given tempfile. This will
|
2007-04-30 16:57:52 +00:00
|
|
|
send a set_handle() message to the vdrive."""
|
2007-01-21 22:15:31 +00:00
|
|
|
|
|
|
|
def add_retain_ssk(read_capability):
|
|
|
|
"""Arrange for the given SSK to be kept alive."""
|
|
|
|
|
|
|
|
def add_unlink_ssk(write_capability):
|
|
|
|
"""Stop keeping the given SSK alive."""
|
|
|
|
|
|
|
|
def add_retain_uri_from_box(boxname):
|
|
|
|
"""When executed, this step retrieves the URI from the given box and
|
|
|
|
marks it for retention: this adds it to a list of all URIs that this
|
|
|
|
system cares about, which will initiate filechecking/repair for the
|
|
|
|
file."""
|
|
|
|
|
|
|
|
def add_addpath(boxname, path):
|
2007-01-22 08:06:09 +00:00
|
|
|
"""When executed, this step pulls a node specification from 'boxname'
|
|
|
|
and figures out which subtrees must be modified to allow that node to
|
|
|
|
live at the 'path' (which is an absolute path). This will probably
|
|
|
|
cause one or more 'add_modify_subtree' or 'add_modify_redirection'
|
|
|
|
steps to be added to the workqueue.
|
|
|
|
"""
|
|
|
|
|
2007-01-24 22:10:53 +00:00
|
|
|
def add_deletepath(path):
|
|
|
|
"""When executed, finds the subtree that contains the node at 'path'
|
|
|
|
and modifies it (and any necessary parent subtrees) to delete that
|
|
|
|
path. This will probably cause one or more 'add_modify_subtree' or
|
|
|
|
'add_modify_redirection' steps to be added to the workqueue.
|
|
|
|
"""
|
|
|
|
|
2007-01-22 08:06:09 +00:00
|
|
|
def add_modify_subtree(subtree_node, localpath, new_node_boxname,
|
|
|
|
new_subtree_boxname=None):
|
|
|
|
"""When executed, this step retrieves the subtree specified by
|
|
|
|
'subtree_node', pulls a node specification out of 'new_node_boxname',
|
2007-01-27 00:31:24 +00:00
|
|
|
then modifies the subtree such that a subtree-relative 'localpath'
|
|
|
|
points to the new node. If 'new_node_boxname' is None, this deletes
|
|
|
|
the given path. It then serializes the subtree in its new form, and
|
2007-01-22 08:06:09 +00:00
|
|
|
optionally puts a node that describes the new subtree in
|
2007-01-27 00:31:24 +00:00
|
|
|
'new_subtree_boxname' for use by another add_modify_subtree step.
|
2007-01-22 08:06:09 +00:00
|
|
|
|
|
|
|
The idea is that 'subtree_node' will refer a CHKDirectorySubTree, and
|
|
|
|
'new_node_boxname' will contain the CHKFileNode that points to a
|
|
|
|
newly-uploaded file. When the CHKDirectorySubTree is modified, it
|
|
|
|
acquires a new URI, which will be stuffed (in the form of a
|
2007-01-27 00:31:24 +00:00
|
|
|
CHKDirectorySubTreeNode) into 'new_subtree_boxname'. A subsequent
|
|
|
|
step would then read from 'new_subtree_boxname' and modify some other
|
2007-01-22 08:06:09 +00:00
|
|
|
subtree with the contents.
|
|
|
|
|
|
|
|
If 'subtree_node' refers to a redirection subtree like
|
2007-04-30 16:57:52 +00:00
|
|
|
LocalFileRedirection or VdriveRedirection, then 'localpath' is
|
2007-01-22 08:06:09 +00:00
|
|
|
ignored, because redirection subtrees don't consume path components
|
|
|
|
and have no internal directory structure (they just have the one
|
|
|
|
redirection target). Redirection subtrees generally retain a constant
|
|
|
|
identity, so it is unlikely that 'new_subtree_boxname' will be used.
|
2007-01-21 22:15:31 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
def add_unlink_uri(uri):
|
|
|
|
"""When executed, this step will unlink the data referenced by the
|
|
|
|
given URI: the unlink rights are used to tell any shareholders to
|
|
|
|
unlink the file (possibly deleting it), and the URI is removed from
|
|
|
|
the list that this system cares about, cancelling filechecking/repair
|
|
|
|
for the file.
|
|
|
|
|
|
|
|
All 'unlink' steps are pushed to the end of the queue.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def add_delete_tempfile(filename):
|
|
|
|
"""This step will delete a tempfile created by create_tempfile."""
|
|
|
|
|
|
|
|
def add_delete_box(boxname):
|
|
|
|
"""When executed, this step deletes the given box."""
|
|
|
|
|
|
|
|
|
|
|
|
# methods for use in unit tests
|
|
|
|
|
|
|
|
def flush():
|
|
|
|
"""Execute all steps in the WorkQueue right away. Return a Deferred
|
|
|
|
that fires (with self) when the queue is empty.
|
|
|
|
"""
|
|
|
|
|
|
|
|
class NotCapableError(Exception):
|
|
|
|
"""You have tried to write to a read-only node."""
|
2007-03-08 02:16:06 +00:00
|
|
|
|
|
|
|
class RIControlClient(RemoteInterface):
|
2007-05-30 00:39:39 +00:00
|
|
|
|
|
|
|
def wait_for_client_connections(num_clients=int):
|
|
|
|
"""Do not return until we have connections to at least NUM_CLIENTS
|
|
|
|
storage servers.
|
|
|
|
"""
|
|
|
|
|
2007-03-08 02:16:06 +00:00
|
|
|
def upload_from_file_to_uri(filename=str):
|
2007-04-30 20:06:09 +00:00
|
|
|
"""Upload a file to the grid. This accepts a filename (which must be
|
2007-03-08 02:16:06 +00:00
|
|
|
absolute) that points to a file on the node's local disk. The node
|
2007-04-30 20:06:09 +00:00
|
|
|
will read the contents of this file, upload it to the grid, then
|
2007-03-08 02:16:06 +00:00
|
|
|
return the URI at which it was uploaded.
|
|
|
|
"""
|
|
|
|
return URI
|
|
|
|
|
|
|
|
def download_from_uri_to_file(uri=URI, filename=str):
|
2007-04-30 20:06:09 +00:00
|
|
|
"""Download a file from the grid, placing it on the node's local disk
|
2007-03-08 02:16:06 +00:00
|
|
|
at the given filename (which must be absolute[?]). Returns the
|
|
|
|
absolute filename where the file was written."""
|
|
|
|
return str
|
|
|
|
|
|
|
|
# debug stuff
|
|
|
|
|
|
|
|
def get_memory_usage():
|
|
|
|
"""Return a dict describes the amount of memory currently in use. The
|
|
|
|
keys are 'VmPeak', 'VmSize', and 'VmData'. The values are integers,
|
|
|
|
measuring memory consupmtion in bytes."""
|
|
|
|
return DictOf(str, int)
|