2007-11-02 06:46:47 +00:00
|
|
|
|
2008-05-08 23:19:42 +00:00
|
|
|
import os, time, math
|
2007-11-02 06:46:47 +00:00
|
|
|
|
|
|
|
from zope.interface import implements
|
|
|
|
from twisted.internet import defer
|
|
|
|
import simplejson
|
2008-04-11 21:31:16 +00:00
|
|
|
from allmydata.mutable.common import NotMutableError
|
|
|
|
from allmydata.mutable.node import MutableFileNode
|
2007-11-02 06:46:47 +00:00
|
|
|
from allmydata.interfaces import IMutableFileNode, IDirectoryNode,\
|
2008-05-16 23:09:47 +00:00
|
|
|
IURI, IFileNode, IMutableFileURI, IVerifierURI, IFilesystemNode, \
|
2008-07-16 00:23:25 +00:00
|
|
|
ExistingChildError, ICheckable
|
2008-09-07 19:44:56 +00:00
|
|
|
from allmydata.checker_results import DeepCheckResults, \
|
|
|
|
DeepCheckAndRepairResults
|
2008-08-12 04:23:38 +00:00
|
|
|
from allmydata.util import hashutil, mathutil, base32, log
|
2007-11-02 06:46:47 +00:00
|
|
|
from allmydata.util.hashutil import netstring
|
2008-05-08 01:36:37 +00:00
|
|
|
from allmydata.util.limiter import ConcurrencyLimiter
|
2007-11-02 06:46:47 +00:00
|
|
|
from allmydata.uri import NewDirectoryURI
|
2007-12-04 00:27:46 +00:00
|
|
|
from pycryptopp.cipher.aes import AES
|
2007-11-02 06:46:47 +00:00
|
|
|
|
|
|
|
def split_netstring(data, numstrings, allow_leftover=False):
|
|
|
|
"""like string.split(), but extracts netstrings. If allow_leftover=False,
|
|
|
|
returns numstrings elements, and throws ValueError if there was leftover
|
|
|
|
data. If allow_leftover=True, returns numstrings+1 elements, in which the
|
|
|
|
last element is the leftover data (possibly an empty string)"""
|
|
|
|
elements = []
|
|
|
|
assert numstrings >= 0
|
|
|
|
while data:
|
|
|
|
colon = data.index(":")
|
|
|
|
length = int(data[:colon])
|
|
|
|
string = data[colon+1:colon+1+length]
|
|
|
|
assert len(string) == length
|
|
|
|
elements.append(string)
|
|
|
|
assert data[colon+1+length] == ","
|
|
|
|
data = data[colon+1+length+1:]
|
|
|
|
if len(elements) == numstrings:
|
|
|
|
break
|
|
|
|
if len(elements) < numstrings:
|
|
|
|
raise ValueError("ran out of netstrings")
|
|
|
|
if allow_leftover:
|
|
|
|
return tuple(elements + [data])
|
|
|
|
if data:
|
|
|
|
raise ValueError("leftover data in netstrings")
|
|
|
|
return tuple(elements)
|
|
|
|
|
2008-04-18 02:57:04 +00:00
|
|
|
class Deleter:
|
2008-04-18 03:06:06 +00:00
|
|
|
def __init__(self, node, name, must_exist=True):
|
2008-04-18 02:57:04 +00:00
|
|
|
self.node = node
|
|
|
|
self.name = name
|
2008-04-18 03:06:06 +00:00
|
|
|
self.must_exist = True
|
2008-04-18 02:57:04 +00:00
|
|
|
def modify(self, old_contents):
|
|
|
|
children = self.node._unpack_contents(old_contents)
|
|
|
|
if self.name not in children:
|
2008-04-18 03:06:06 +00:00
|
|
|
if self.must_exist:
|
|
|
|
raise KeyError(self.name)
|
2008-04-18 02:57:04 +00:00
|
|
|
self.old_child = None
|
|
|
|
return None
|
|
|
|
self.old_child, metadata = children[self.name]
|
|
|
|
del children[self.name]
|
|
|
|
new_contents = self.node._pack_contents(children)
|
|
|
|
return new_contents
|
|
|
|
|
|
|
|
class MetadataSetter:
|
|
|
|
def __init__(self, node, name, metadata):
|
|
|
|
self.node = node
|
|
|
|
self.name = name
|
|
|
|
self.metadata = metadata
|
|
|
|
|
|
|
|
def modify(self, old_contents):
|
|
|
|
children = self.node._unpack_contents(old_contents)
|
|
|
|
children[self.name] = (children[self.name][0], self.metadata)
|
|
|
|
new_contents = self.node._pack_contents(children)
|
|
|
|
return new_contents
|
|
|
|
|
|
|
|
|
|
|
|
class Adder:
|
2008-05-16 23:09:47 +00:00
|
|
|
def __init__(self, node, entries=None, overwrite=True):
|
2008-04-18 02:57:04 +00:00
|
|
|
self.node = node
|
|
|
|
if entries is None:
|
|
|
|
entries = []
|
|
|
|
self.entries = entries
|
2008-05-16 23:09:47 +00:00
|
|
|
self.overwrite = overwrite
|
2008-04-18 02:57:04 +00:00
|
|
|
|
|
|
|
def set_node(self, name, node, metadata):
|
|
|
|
self.entries.append( [name, node, metadata] )
|
|
|
|
|
|
|
|
def modify(self, old_contents):
|
|
|
|
children = self.node._unpack_contents(old_contents)
|
|
|
|
now = time.time()
|
|
|
|
for e in self.entries:
|
|
|
|
if len(e) == 2:
|
|
|
|
name, child = e
|
|
|
|
new_metadata = None
|
|
|
|
else:
|
|
|
|
assert len(e) == 3
|
|
|
|
name, child, new_metadata = e
|
|
|
|
assert isinstance(name, unicode)
|
|
|
|
if name in children:
|
2008-05-16 23:09:47 +00:00
|
|
|
if not self.overwrite:
|
|
|
|
raise ExistingChildError("child '%s' already exists" % name)
|
2008-04-18 02:57:04 +00:00
|
|
|
metadata = children[name][1].copy()
|
|
|
|
else:
|
|
|
|
metadata = {"ctime": now,
|
|
|
|
"mtime": now}
|
|
|
|
if new_metadata is None:
|
|
|
|
# update timestamps
|
|
|
|
if "ctime" not in metadata:
|
|
|
|
metadata["ctime"] = now
|
|
|
|
metadata["mtime"] = now
|
|
|
|
else:
|
|
|
|
# just replace it
|
|
|
|
metadata = new_metadata.copy()
|
|
|
|
children[name] = (child, metadata)
|
|
|
|
new_contents = self.node._pack_contents(children)
|
|
|
|
return new_contents
|
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
class NewDirectoryNode:
|
2008-07-16 00:23:25 +00:00
|
|
|
implements(IDirectoryNode, ICheckable)
|
2007-11-02 06:46:47 +00:00
|
|
|
filenode_class = MutableFileNode
|
|
|
|
|
|
|
|
def __init__(self, client):
|
|
|
|
self._client = client
|
2008-05-08 20:21:14 +00:00
|
|
|
self._most_recent_size = None
|
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
def __repr__(self):
|
|
|
|
return "<%s %s %s>" % (self.__class__.__name__, self.is_readonly() and "RO" or "RW", hasattr(self, '_uri') and self._uri.abbrev())
|
2007-11-02 06:46:47 +00:00
|
|
|
def init_from_uri(self, myuri):
|
2007-12-03 21:52:42 +00:00
|
|
|
self._uri = IURI(myuri)
|
2007-11-02 06:46:47 +00:00
|
|
|
self._node = self.filenode_class(self._client)
|
2007-12-03 21:52:42 +00:00
|
|
|
self._node.init_from_uri(self._uri.get_filenode_uri())
|
2007-11-02 06:46:47 +00:00
|
|
|
return self
|
|
|
|
|
2008-04-02 01:45:13 +00:00
|
|
|
def create(self, keypair_generator=None):
|
2007-12-03 21:52:42 +00:00
|
|
|
"""
|
|
|
|
Returns a deferred that eventually fires with self once the directory
|
|
|
|
has been created (distributed across a set of storage servers).
|
|
|
|
"""
|
2007-11-02 06:46:47 +00:00
|
|
|
# first we create a MutableFileNode with empty_contents, then use its
|
|
|
|
# URI to create our own.
|
|
|
|
self._node = self.filenode_class(self._client)
|
|
|
|
empty_contents = self._pack_contents({})
|
2008-04-02 01:45:13 +00:00
|
|
|
d = self._node.create(empty_contents, keypair_generator)
|
2007-11-02 06:46:47 +00:00
|
|
|
d.addCallback(self._filenode_created)
|
|
|
|
return d
|
|
|
|
def _filenode_created(self, res):
|
2007-12-04 21:32:04 +00:00
|
|
|
self._uri = NewDirectoryURI(IMutableFileURI(self._node.get_uri()))
|
2007-12-03 21:52:42 +00:00
|
|
|
return self
|
2007-11-02 06:46:47 +00:00
|
|
|
|
2008-05-08 20:21:14 +00:00
|
|
|
def get_size(self):
|
|
|
|
# return the size of our backing mutable file, in bytes, if we've
|
|
|
|
# fetched it.
|
|
|
|
return self._most_recent_size
|
|
|
|
|
|
|
|
def _set_size(self, data):
|
|
|
|
self._most_recent_size = len(data)
|
|
|
|
return data
|
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
def _read(self):
|
2008-04-18 00:51:38 +00:00
|
|
|
d = self._node.download_best_version()
|
2008-05-08 20:21:14 +00:00
|
|
|
d.addCallback(self._set_size)
|
2007-11-02 06:46:47 +00:00
|
|
|
d.addCallback(self._unpack_contents)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def _encrypt_rwcap(self, rwcap):
|
|
|
|
assert isinstance(rwcap, str)
|
|
|
|
IV = os.urandom(16)
|
2007-11-06 04:51:08 +00:00
|
|
|
key = hashutil.mutable_rwcap_key_hash(IV, self._node.get_writekey())
|
2007-12-04 00:27:46 +00:00
|
|
|
cryptor = AES(key)
|
|
|
|
crypttext = cryptor.process(rwcap)
|
2007-11-02 06:46:47 +00:00
|
|
|
mac = hashutil.hmac(key, IV + crypttext)
|
|
|
|
assert len(mac) == 32
|
|
|
|
return IV + crypttext + mac
|
|
|
|
|
|
|
|
def _decrypt_rwcapdata(self, encwrcap):
|
|
|
|
IV = encwrcap[:16]
|
|
|
|
crypttext = encwrcap[16:-32]
|
|
|
|
mac = encwrcap[-32:]
|
2007-11-06 04:51:08 +00:00
|
|
|
key = hashutil.mutable_rwcap_key_hash(IV, self._node.get_writekey())
|
2007-11-02 06:46:47 +00:00
|
|
|
if mac != hashutil.hmac(key, IV+crypttext):
|
2007-12-03 21:52:42 +00:00
|
|
|
raise hashutil.IntegrityCheckError("HMAC does not match, crypttext is corrupted")
|
2007-12-04 00:27:46 +00:00
|
|
|
cryptor = AES(key)
|
|
|
|
plaintext = cryptor.process(crypttext)
|
2007-11-02 06:46:47 +00:00
|
|
|
return plaintext
|
|
|
|
|
|
|
|
def _create_node(self, child_uri):
|
2007-11-09 09:54:51 +00:00
|
|
|
return self._client.create_node_from_uri(child_uri)
|
2007-11-02 06:46:47 +00:00
|
|
|
|
|
|
|
def _unpack_contents(self, data):
|
|
|
|
# the directory is serialized as a list of netstrings, one per child.
|
|
|
|
# Each child is serialized as a list of four netstrings: (name,
|
|
|
|
# rocap, rwcap, metadata), in which the name,rocap,metadata are in
|
2008-02-14 22:45:56 +00:00
|
|
|
# cleartext. The 'name' is UTF-8 encoded. The rwcap is formatted as:
|
|
|
|
# pack("16ss32s", iv, AES(H(writekey+iv), plaintextrwcap), mac)
|
2007-11-02 06:46:47 +00:00
|
|
|
assert isinstance(data, str)
|
|
|
|
# an empty directory is serialized as an empty string
|
|
|
|
if data == "":
|
|
|
|
return {}
|
2007-12-03 21:52:42 +00:00
|
|
|
writeable = not self.is_readonly()
|
2007-11-02 06:46:47 +00:00
|
|
|
children = {}
|
|
|
|
while len(data) > 0:
|
|
|
|
entry, data = split_netstring(data, 1, True)
|
|
|
|
name, rocap, rwcapdata, metadata_s = split_netstring(entry, 4)
|
2008-02-14 22:45:56 +00:00
|
|
|
name = name.decode("utf-8")
|
2007-12-03 21:52:42 +00:00
|
|
|
if writeable:
|
2007-11-02 06:46:47 +00:00
|
|
|
rwcap = self._decrypt_rwcapdata(rwcapdata)
|
|
|
|
child = self._create_node(rwcap)
|
|
|
|
else:
|
|
|
|
child = self._create_node(rocap)
|
|
|
|
metadata = simplejson.loads(metadata_s)
|
|
|
|
assert isinstance(metadata, dict)
|
|
|
|
children[name] = (child, metadata)
|
|
|
|
return children
|
|
|
|
|
|
|
|
def _pack_contents(self, children):
|
|
|
|
# expects children in the same format as _unpack_contents
|
|
|
|
assert isinstance(children, dict)
|
|
|
|
entries = []
|
|
|
|
for name in sorted(children.keys()):
|
|
|
|
child, metadata = children[name]
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(name, unicode)
|
2007-11-02 06:46:47 +00:00
|
|
|
assert (IFileNode.providedBy(child)
|
|
|
|
or IMutableFileNode.providedBy(child)
|
2007-12-19 06:30:02 +00:00
|
|
|
or IDirectoryNode.providedBy(child)), (name,child)
|
2007-11-02 06:46:47 +00:00
|
|
|
assert isinstance(metadata, dict)
|
2007-12-03 21:52:42 +00:00
|
|
|
rwcap = child.get_uri() # might be RO if the child is not writeable
|
|
|
|
rocap = child.get_readonly_uri()
|
2008-02-14 22:45:56 +00:00
|
|
|
entry = "".join([netstring(name.encode("utf-8")),
|
2007-11-02 06:46:47 +00:00
|
|
|
netstring(rocap),
|
|
|
|
netstring(self._encrypt_rwcap(rwcap)),
|
|
|
|
netstring(simplejson.dumps(metadata))])
|
|
|
|
entries.append(netstring(entry))
|
|
|
|
return "".join(entries)
|
|
|
|
|
|
|
|
def is_readonly(self):
|
|
|
|
return self._node.is_readonly()
|
|
|
|
def is_mutable(self):
|
|
|
|
return self._node.is_mutable()
|
|
|
|
|
|
|
|
def get_uri(self):
|
|
|
|
return self._uri.to_string()
|
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
def get_readonly_uri(self):
|
2007-11-02 06:46:47 +00:00
|
|
|
return self._uri.get_readonly().to_string()
|
|
|
|
|
|
|
|
def get_verifier(self):
|
|
|
|
return self._uri.get_verifier().to_string()
|
|
|
|
|
2008-08-12 23:14:07 +00:00
|
|
|
def get_storage_index(self):
|
|
|
|
return self._uri._filenode_uri.storage_index
|
|
|
|
|
2008-09-07 19:44:56 +00:00
|
|
|
def check(self, verify=False):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""Perform a file check. See IChecker.check for details."""
|
2008-09-07 19:44:56 +00:00
|
|
|
return self._node.check(verify)
|
|
|
|
def check_and_repair(self, verify=False):
|
|
|
|
return self._node.check_and_repair(verify)
|
2007-11-02 06:46:47 +00:00
|
|
|
|
|
|
|
def list(self):
|
|
|
|
"""I return a Deferred that fires with a dictionary mapping child
|
2007-12-03 21:52:42 +00:00
|
|
|
name to a tuple of (IFileNode or IDirectoryNode, metadata)."""
|
2007-11-02 06:46:47 +00:00
|
|
|
return self._read()
|
|
|
|
|
|
|
|
def has_child(self, name):
|
|
|
|
"""I return a Deferred that fires with a boolean, True if there
|
|
|
|
exists a child of the given name, False if not."""
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(name, unicode)
|
2007-11-02 06:46:47 +00:00
|
|
|
d = self._read()
|
|
|
|
d.addCallback(lambda children: children.has_key(name))
|
|
|
|
return d
|
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
def _get(self, children, name):
|
|
|
|
child = children.get(name)
|
|
|
|
if child is None:
|
|
|
|
raise KeyError(name)
|
|
|
|
return child[0]
|
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
def get(self, name):
|
2007-12-03 21:52:42 +00:00
|
|
|
"""I return a Deferred that fires with the named child node,
|
|
|
|
which is either an IFileNode or an IDirectoryNode."""
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(name, unicode)
|
2007-11-02 06:46:47 +00:00
|
|
|
d = self._read()
|
2007-12-03 21:52:42 +00:00
|
|
|
d.addCallback(self._get, name)
|
2007-11-02 06:46:47 +00:00
|
|
|
return d
|
|
|
|
|
|
|
|
def get_metadata_for(self, name):
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(name, unicode)
|
2007-11-02 06:46:47 +00:00
|
|
|
d = self._read()
|
|
|
|
d.addCallback(lambda children: children[name][1])
|
|
|
|
return d
|
|
|
|
|
2008-02-09 01:43:47 +00:00
|
|
|
def set_metadata_for(self, name, metadata):
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(name, unicode)
|
2008-02-09 01:43:47 +00:00
|
|
|
if self.is_readonly():
|
|
|
|
return defer.fail(NotMutableError())
|
|
|
|
assert isinstance(metadata, dict)
|
2008-04-18 02:57:04 +00:00
|
|
|
s = MetadataSetter(self, name, metadata)
|
|
|
|
d = self._node.modify(s.modify)
|
2008-02-09 01:43:47 +00:00
|
|
|
d.addCallback(lambda res: self)
|
|
|
|
return d
|
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
def get_child_at_path(self, path):
|
|
|
|
"""Transform a child path into an IDirectoryNode or IFileNode.
|
|
|
|
|
|
|
|
I perform a recursive series of 'get' operations to find the named
|
|
|
|
descendant node. I return a Deferred that fires with the node, or
|
|
|
|
errbacks with IndexError if the node could not be found.
|
|
|
|
|
|
|
|
The path can be either a single string (slash-separated) or a list of
|
|
|
|
path-name elements.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if not path:
|
|
|
|
return defer.succeed(self)
|
2008-02-14 22:45:56 +00:00
|
|
|
if isinstance(path, (list, tuple)):
|
|
|
|
pass
|
|
|
|
else:
|
2007-11-02 06:46:47 +00:00
|
|
|
path = path.split("/")
|
2008-02-14 22:45:56 +00:00
|
|
|
for p in path:
|
|
|
|
assert isinstance(p, unicode)
|
2007-11-02 06:46:47 +00:00
|
|
|
childname = path[0]
|
|
|
|
remaining_path = path[1:]
|
|
|
|
d = self.get(childname)
|
|
|
|
if remaining_path:
|
|
|
|
def _got(node):
|
|
|
|
return node.get_child_at_path(remaining_path)
|
|
|
|
d.addCallback(_got)
|
|
|
|
return d
|
|
|
|
|
2008-05-16 23:09:47 +00:00
|
|
|
def set_uri(self, name, child_uri, metadata=None, overwrite=True):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""I add a child (by URI) at the specific name. I return a Deferred
|
2007-12-03 21:52:42 +00:00
|
|
|
that fires with the child node when the operation finishes. I will
|
|
|
|
replace any existing child of the same name.
|
2007-11-02 06:46:47 +00:00
|
|
|
|
|
|
|
The child_uri could be for a file, or for a directory (either
|
|
|
|
read-write or read-only, using a URI that came from get_uri() ).
|
|
|
|
|
|
|
|
If this directory node is read-only, the Deferred will errback with a
|
|
|
|
NotMutableError."""
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(name, unicode)
|
2008-04-18 02:57:04 +00:00
|
|
|
child_node = self._create_node(child_uri)
|
2008-05-16 23:09:47 +00:00
|
|
|
d = self.set_node(name, child_node, metadata, overwrite)
|
2008-04-18 02:57:04 +00:00
|
|
|
d.addCallback(lambda res: child_node)
|
|
|
|
return d
|
2007-12-19 06:30:02 +00:00
|
|
|
|
2008-05-16 23:09:47 +00:00
|
|
|
def set_children(self, entries, overwrite=True):
|
2008-04-18 02:57:04 +00:00
|
|
|
# this takes URIs
|
2008-05-16 23:09:47 +00:00
|
|
|
a = Adder(self, overwrite=overwrite)
|
2007-12-19 06:30:02 +00:00
|
|
|
node_entries = []
|
|
|
|
for e in entries:
|
|
|
|
if len(e) == 2:
|
|
|
|
name, child_uri = e
|
2008-02-09 01:43:47 +00:00
|
|
|
metadata = None
|
2007-12-19 06:30:02 +00:00
|
|
|
else:
|
|
|
|
assert len(e) == 3
|
|
|
|
name, child_uri, metadata = e
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(name, unicode)
|
2008-04-18 02:57:04 +00:00
|
|
|
a.set_node(name, self._create_node(child_uri), metadata)
|
|
|
|
return self._node.modify(a.modify)
|
2007-11-02 06:46:47 +00:00
|
|
|
|
2008-05-16 23:09:47 +00:00
|
|
|
def set_node(self, name, child, metadata=None, overwrite=True):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""I add a child at the specific name. I return a Deferred that fires
|
|
|
|
when the operation finishes. This Deferred will fire with the child
|
|
|
|
node that was just added. I will replace any existing child of the
|
|
|
|
same name.
|
|
|
|
|
|
|
|
If this directory node is read-only, the Deferred will errback with a
|
|
|
|
NotMutableError."""
|
2008-04-18 02:57:04 +00:00
|
|
|
|
|
|
|
if self.is_readonly():
|
|
|
|
return defer.fail(NotMutableError())
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(name, unicode)
|
2007-12-19 06:30:02 +00:00
|
|
|
assert IFilesystemNode.providedBy(child), child
|
2008-05-16 23:09:47 +00:00
|
|
|
a = Adder(self, overwrite=overwrite)
|
2008-04-18 02:57:04 +00:00
|
|
|
a.set_node(name, child, metadata)
|
|
|
|
d = self._node.modify(a.modify)
|
2007-12-19 06:30:02 +00:00
|
|
|
d.addCallback(lambda res: child)
|
|
|
|
return d
|
|
|
|
|
2008-05-16 23:09:47 +00:00
|
|
|
def set_nodes(self, entries, overwrite=True):
|
2007-11-02 06:46:47 +00:00
|
|
|
if self.is_readonly():
|
|
|
|
return defer.fail(NotMutableError())
|
2008-05-16 23:09:47 +00:00
|
|
|
a = Adder(self, entries, overwrite=overwrite)
|
2008-04-18 02:57:04 +00:00
|
|
|
d = self._node.modify(a.modify)
|
2007-12-19 06:30:02 +00:00
|
|
|
d.addCallback(lambda res: None)
|
2007-11-02 06:46:47 +00:00
|
|
|
return d
|
|
|
|
|
2007-12-19 06:30:02 +00:00
|
|
|
|
2008-05-16 23:09:47 +00:00
|
|
|
def add_file(self, name, uploadable, metadata=None, overwrite=True):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""I upload a file (using the given IUploadable), then attach the
|
|
|
|
resulting FileNode to the directory at the given name. I return a
|
|
|
|
Deferred that fires (with the IFileNode of the uploaded file) when
|
|
|
|
the operation completes."""
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(name, unicode)
|
2007-11-02 06:46:47 +00:00
|
|
|
if self.is_readonly():
|
|
|
|
return defer.fail(NotMutableError())
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self._client.upload(uploadable)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(lambda results: results.uri)
|
2007-11-09 09:54:51 +00:00
|
|
|
d.addCallback(self._client.create_node_from_uri)
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda node:
|
|
|
|
self.set_node(name, node, metadata, overwrite))
|
2007-11-02 06:46:47 +00:00
|
|
|
return d
|
|
|
|
|
|
|
|
def delete(self, name):
|
|
|
|
"""I remove the child at the specific name. I return a Deferred that
|
|
|
|
fires (with the node just removed) when the operation finishes."""
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(name, unicode)
|
2007-11-02 06:46:47 +00:00
|
|
|
if self.is_readonly():
|
|
|
|
return defer.fail(NotMutableError())
|
2008-04-18 02:57:04 +00:00
|
|
|
deleter = Deleter(self, name)
|
|
|
|
d = self._node.modify(deleter.modify)
|
|
|
|
d.addCallback(lambda res: deleter.old_child)
|
2007-11-02 06:46:47 +00:00
|
|
|
return d
|
|
|
|
|
2008-05-16 23:09:47 +00:00
|
|
|
def create_empty_directory(self, name, overwrite=True):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""I create and attach an empty directory at the given name. I return
|
|
|
|
a Deferred that fires (with the new directory node) when the
|
|
|
|
operation finishes."""
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(name, unicode)
|
2007-11-02 06:46:47 +00:00
|
|
|
if self.is_readonly():
|
|
|
|
return defer.fail(NotMutableError())
|
2008-01-14 21:55:59 +00:00
|
|
|
d = self._client.create_empty_dirnode()
|
2007-11-02 06:46:47 +00:00
|
|
|
def _created(child):
|
2008-04-18 02:57:04 +00:00
|
|
|
entries = [(name, child, None)]
|
2008-05-16 23:09:47 +00:00
|
|
|
a = Adder(self, entries, overwrite=overwrite)
|
2008-04-18 02:57:04 +00:00
|
|
|
d = self._node.modify(a.modify)
|
2007-11-02 06:46:47 +00:00
|
|
|
d.addCallback(lambda res: child)
|
|
|
|
return d
|
|
|
|
d.addCallback(_created)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def move_child_to(self, current_child_name, new_parent,
|
2008-05-16 23:09:47 +00:00
|
|
|
new_child_name=None, overwrite=True):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""I take one of my children and move them to a new parent. The child
|
|
|
|
is referenced by name. On the new parent, the child will live under
|
|
|
|
'new_child_name', which defaults to 'current_child_name'. I return a
|
|
|
|
Deferred that fires when the operation finishes."""
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(current_child_name, unicode)
|
2007-11-02 06:46:47 +00:00
|
|
|
if self.is_readonly() or new_parent.is_readonly():
|
|
|
|
return defer.fail(NotMutableError())
|
|
|
|
if new_child_name is None:
|
|
|
|
new_child_name = current_child_name
|
2008-02-14 22:45:56 +00:00
|
|
|
assert isinstance(new_child_name, unicode)
|
2007-11-02 06:46:47 +00:00
|
|
|
d = self.get(current_child_name)
|
2007-12-03 21:52:42 +00:00
|
|
|
def sn(child):
|
2008-05-16 23:09:47 +00:00
|
|
|
return new_parent.set_node(new_child_name, child,
|
|
|
|
overwrite=overwrite)
|
2007-12-03 21:52:42 +00:00
|
|
|
d.addCallback(sn)
|
2007-11-02 06:46:47 +00:00
|
|
|
d.addCallback(lambda child: self.delete(current_child_name))
|
|
|
|
return d
|
|
|
|
|
|
|
|
def build_manifest(self):
|
|
|
|
"""Return a frozenset of verifier-capability strings for all nodes
|
|
|
|
(directories and files) reachable from this one."""
|
|
|
|
|
|
|
|
# this is just a tree-walker, except that following each edge
|
2008-05-08 01:36:37 +00:00
|
|
|
# requires a Deferred. We use a ConcurrencyLimiter to make sure the
|
|
|
|
# fan-out doesn't cause problems.
|
2007-11-02 06:46:47 +00:00
|
|
|
|
|
|
|
manifest = set()
|
|
|
|
manifest.add(self.get_verifier())
|
2008-05-08 01:36:37 +00:00
|
|
|
limiter = ConcurrencyLimiter(10) # allow 10 in parallel
|
2007-11-02 06:46:47 +00:00
|
|
|
|
2008-05-08 01:36:37 +00:00
|
|
|
d = self._build_manifest_from_node(self, manifest, limiter)
|
2007-11-02 06:46:47 +00:00
|
|
|
def _done(res):
|
|
|
|
# LIT nodes have no verifier-capability: their data is stored
|
|
|
|
# inside the URI itself, so there is no need to refresh anything.
|
|
|
|
# They indicate this by returning None from their get_verifier
|
|
|
|
# method. We need to remove any such Nones from our set. We also
|
|
|
|
# want to convert all these caps into strings.
|
|
|
|
return frozenset([IVerifierURI(cap).to_string()
|
|
|
|
for cap in manifest
|
|
|
|
if cap is not None])
|
|
|
|
d.addCallback(_done)
|
|
|
|
return d
|
|
|
|
|
2008-05-08 01:36:37 +00:00
|
|
|
def _build_manifest_from_node(self, node, manifest, limiter):
|
|
|
|
d = limiter.add(node.list)
|
2007-11-02 06:46:47 +00:00
|
|
|
def _got_list(res):
|
|
|
|
dl = []
|
|
|
|
for name, (child, metadata) in res.iteritems():
|
|
|
|
verifier = child.get_verifier()
|
|
|
|
if verifier not in manifest:
|
|
|
|
manifest.add(verifier)
|
|
|
|
if IDirectoryNode.providedBy(child):
|
|
|
|
dl.append(self._build_manifest_from_node(child,
|
2008-05-08 01:36:37 +00:00
|
|
|
manifest,
|
|
|
|
limiter))
|
2007-11-02 06:46:47 +00:00
|
|
|
if dl:
|
|
|
|
return defer.DeferredList(dl)
|
|
|
|
d.addCallback(_got_list)
|
|
|
|
return d
|
|
|
|
|
2008-05-08 20:21:14 +00:00
|
|
|
def deep_stats(self):
|
2008-05-08 20:33:07 +00:00
|
|
|
stats = DeepStats()
|
2008-05-08 20:21:14 +00:00
|
|
|
# we track verifier caps, to avoid double-counting children for which
|
|
|
|
# we've got both a write-cap and a read-cap
|
|
|
|
found = set()
|
|
|
|
found.add(self.get_verifier())
|
|
|
|
|
|
|
|
limiter = ConcurrencyLimiter(10)
|
|
|
|
|
|
|
|
d = self._add_deepstats_from_node(self, found, stats, limiter)
|
2008-05-08 20:33:07 +00:00
|
|
|
d.addCallback(lambda res: stats.get_results())
|
2008-05-08 20:21:14 +00:00
|
|
|
return d
|
|
|
|
|
|
|
|
def _add_deepstats_from_node(self, node, found, stats, limiter):
|
|
|
|
d = limiter.add(node.list)
|
|
|
|
def _got_list(children):
|
|
|
|
dl = []
|
|
|
|
dirsize_bytes = node.get_size()
|
|
|
|
dirsize_children = len(children)
|
2008-05-08 20:33:07 +00:00
|
|
|
stats.add("count-directories")
|
|
|
|
stats.add("size-directories", dirsize_bytes)
|
|
|
|
stats.max("largest-directory", dirsize_bytes)
|
|
|
|
stats.max("largest-directory-children", dirsize_children)
|
2008-05-08 20:21:14 +00:00
|
|
|
for name, (child, metadata) in children.iteritems():
|
|
|
|
verifier = child.get_verifier()
|
|
|
|
if verifier in found:
|
|
|
|
continue
|
|
|
|
found.add(verifier)
|
|
|
|
if IDirectoryNode.providedBy(child):
|
|
|
|
dl.append(self._add_deepstats_from_node(child, found,
|
|
|
|
stats, limiter))
|
|
|
|
elif IMutableFileNode.providedBy(child):
|
2008-05-08 20:33:07 +00:00
|
|
|
stats.add("count-files")
|
|
|
|
stats.add("count-mutable-files")
|
2008-05-08 20:21:14 +00:00
|
|
|
# TODO: update the servermap, compute a size, add it to
|
2008-05-08 20:33:07 +00:00
|
|
|
# size-mutable-files, max it into "largest-mutable-file"
|
2008-05-08 20:21:14 +00:00
|
|
|
elif IFileNode.providedBy(child): # CHK and LIT
|
2008-05-08 20:33:07 +00:00
|
|
|
stats.add("count-files")
|
2008-05-08 20:21:14 +00:00
|
|
|
size = child.get_size()
|
2008-05-08 23:19:42 +00:00
|
|
|
stats.histogram("size-files-histogram", size)
|
2008-05-08 20:21:14 +00:00
|
|
|
if child.get_uri().startswith("URI:LIT:"):
|
2008-05-08 20:33:07 +00:00
|
|
|
stats.add("count-literal-files")
|
|
|
|
stats.add("size-literal-files", size)
|
2008-05-08 20:21:14 +00:00
|
|
|
else:
|
2008-05-08 20:33:07 +00:00
|
|
|
stats.add("count-immutable-files")
|
|
|
|
stats.add("size-immutable-files", size)
|
|
|
|
stats.max("largest-immutable-file", size)
|
2008-05-08 20:21:14 +00:00
|
|
|
if dl:
|
|
|
|
return defer.DeferredList(dl)
|
|
|
|
d.addCallback(_got_list)
|
|
|
|
return d
|
|
|
|
|
2008-09-07 19:44:56 +00:00
|
|
|
def deep_check(self, verify=False):
|
|
|
|
return self.deep_check_base(verify, False)
|
|
|
|
def deep_check_and_repair(self, verify=False):
|
|
|
|
return self.deep_check_base(verify, True)
|
|
|
|
|
|
|
|
def deep_check_base(self, verify, repair):
|
2008-07-17 21:25:04 +00:00
|
|
|
# shallow-check each object first, then traverse children
|
2008-07-17 23:47:09 +00:00
|
|
|
root_si = self._node.get_storage_index()
|
2008-08-12 04:23:38 +00:00
|
|
|
self._lp = log.msg(format="deep-check starting (%(si)s),"
|
|
|
|
" verify=%(verify)s, repair=%(repair)s",
|
|
|
|
si=base32.b2a(root_si), verify=verify, repair=repair)
|
2008-09-07 19:44:56 +00:00
|
|
|
if repair:
|
|
|
|
results = DeepCheckAndRepairResults(root_si)
|
|
|
|
else:
|
|
|
|
results = DeepCheckResults(root_si)
|
2008-07-17 01:20:57 +00:00
|
|
|
found = set()
|
|
|
|
limiter = ConcurrencyLimiter(10)
|
|
|
|
|
2008-09-07 19:44:56 +00:00
|
|
|
d = self._add_deepcheck_from_node([], self, results, found, limiter,
|
2008-07-17 21:25:04 +00:00
|
|
|
verify, repair)
|
2008-08-12 04:23:38 +00:00
|
|
|
def _done(res):
|
|
|
|
log.msg("deep-check done", parent=self._lp)
|
|
|
|
return results
|
|
|
|
d.addCallback(_done)
|
2008-07-17 01:20:57 +00:00
|
|
|
return d
|
|
|
|
|
2008-09-07 19:44:56 +00:00
|
|
|
def _add_deepcheck_from_node(self, path, node, results, found, limiter,
|
2008-07-17 21:25:04 +00:00
|
|
|
verify, repair):
|
|
|
|
verifier = node.get_verifier()
|
|
|
|
if verifier in found:
|
|
|
|
# avoid loops
|
|
|
|
return None
|
|
|
|
found.add(verifier)
|
|
|
|
|
2008-09-07 19:44:56 +00:00
|
|
|
if repair:
|
|
|
|
d = limiter.add(node.check_and_repair, verify)
|
|
|
|
d.addCallback(results.add_check_and_repair, path)
|
|
|
|
else:
|
|
|
|
d = limiter.add(node.check, verify)
|
|
|
|
d.addCallback(results.add_check, path)
|
|
|
|
|
|
|
|
# TODO: stats: split the DeepStats.foo calls out of
|
|
|
|
# _add_deepstats_from_node into a separate non-recursing method, call
|
|
|
|
# it from both here and _add_deepstats_from_node.
|
2008-07-17 01:20:57 +00:00
|
|
|
|
2008-07-17 21:25:04 +00:00
|
|
|
if IDirectoryNode.providedBy(node):
|
|
|
|
d.addCallback(lambda res: node.list())
|
|
|
|
def _got_children(children):
|
|
|
|
dl = []
|
|
|
|
for name, (child, metadata) in children.iteritems():
|
2008-09-07 19:44:56 +00:00
|
|
|
childpath = path + [name]
|
|
|
|
d2 = self._add_deepcheck_from_node(childpath, child,
|
|
|
|
results,
|
2008-07-17 21:37:04 +00:00
|
|
|
found, limiter,
|
2008-07-17 21:25:04 +00:00
|
|
|
verify, repair)
|
|
|
|
if d2:
|
2008-07-17 21:37:04 +00:00
|
|
|
dl.append(d2)
|
2008-07-17 21:25:04 +00:00
|
|
|
if dl:
|
2008-07-17 21:37:04 +00:00
|
|
|
return defer.DeferredList(dl, fireOnOneErrback=True)
|
2008-07-17 21:25:04 +00:00
|
|
|
d.addCallback(_got_children)
|
2008-07-17 01:20:57 +00:00
|
|
|
return d
|
|
|
|
|
2008-07-17 21:25:04 +00:00
|
|
|
|
2008-05-08 20:33:07 +00:00
|
|
|
class DeepStats:
|
|
|
|
def __init__(self):
|
|
|
|
self.stats = {}
|
|
|
|
for k in ["count-immutable-files",
|
|
|
|
"count-mutable-files",
|
|
|
|
"count-literal-files",
|
|
|
|
"count-files",
|
|
|
|
"count-directories",
|
|
|
|
"size-immutable-files",
|
|
|
|
#"size-mutable-files",
|
|
|
|
"size-literal-files",
|
|
|
|
"size-directories",
|
|
|
|
"largest-directory",
|
|
|
|
"largest-directory-children",
|
|
|
|
"largest-immutable-file",
|
|
|
|
#"largest-mutable-file",
|
|
|
|
]:
|
|
|
|
self.stats[k] = 0
|
2008-05-08 23:19:42 +00:00
|
|
|
self.histograms = {}
|
|
|
|
for k in ["size-files-histogram"]:
|
|
|
|
self.histograms[k] = {} # maps (min,max) to count
|
|
|
|
self.buckets = [ (0,0), (1,3)]
|
|
|
|
self.root = math.sqrt(10)
|
2008-05-08 20:33:07 +00:00
|
|
|
|
|
|
|
def add(self, key, value=1):
|
|
|
|
self.stats[key] += value
|
|
|
|
|
|
|
|
def max(self, key, value):
|
|
|
|
self.stats[key] = max(self.stats[key], value)
|
|
|
|
|
2008-05-08 23:19:42 +00:00
|
|
|
def which_bucket(self, size):
|
|
|
|
# return (min,max) such that min <= size <= max
|
|
|
|
# values are from the set (0,0), (1,3), (4,10), (11,31), (32,100),
|
|
|
|
# (101,316), (317, 1000), etc: two per decade
|
|
|
|
assert size >= 0
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i >= len(self.buckets):
|
|
|
|
# extend the list
|
|
|
|
new_lower = self.buckets[i-1][1]+1
|
|
|
|
new_upper = int(mathutil.next_power_of_k(new_lower, self.root))
|
|
|
|
self.buckets.append( (new_lower, new_upper) )
|
|
|
|
maybe = self.buckets[i]
|
|
|
|
if maybe[0] <= size <= maybe[1]:
|
|
|
|
return maybe
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
def histogram(self, key, size):
|
|
|
|
bucket = self.which_bucket(size)
|
|
|
|
h = self.histograms[key]
|
|
|
|
if bucket not in h:
|
|
|
|
h[bucket] = 0
|
|
|
|
h[bucket] += 1
|
|
|
|
|
2008-05-08 20:33:07 +00:00
|
|
|
def get_results(self):
|
2008-05-08 23:19:42 +00:00
|
|
|
stats = self.stats.copy()
|
|
|
|
for key in self.histograms:
|
|
|
|
h = self.histograms[key]
|
|
|
|
out = [ (bucket[0], bucket[1], h[bucket]) for bucket in h ]
|
|
|
|
out.sort()
|
|
|
|
stats[key] = out
|
|
|
|
return stats
|
2008-05-08 20:21:14 +00:00
|
|
|
|
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
# use client.create_dirnode() to make one of these
|
|
|
|
|
|
|
|
|