2007-11-02 06:46:47 +00:00
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
import time, math, unicodedata
|
2007-11-02 06:46:47 +00:00
|
|
|
|
|
|
|
from zope.interface import implements
|
|
|
|
from twisted.internet import defer
|
2009-05-22 00:38:23 +00:00
|
|
|
from foolscap.api import fireEventually
|
2007-11-02 06:46:47 +00:00
|
|
|
import simplejson
|
2010-01-27 06:44:30 +00:00
|
|
|
from allmydata.mutable.common import NotWriteableError
|
2008-12-07 15:20:08 +00:00
|
|
|
from allmydata.mutable.filenode import MutableFileNode
|
2010-01-27 06:44:30 +00:00
|
|
|
from allmydata.unknown import UnknownNode, strip_prefix_for_ro
|
2009-11-20 07:52:55 +00:00
|
|
|
from allmydata.interfaces import IFilesystemNode, IDirectoryNode, IFileNode, \
|
|
|
|
IImmutableFileNode, IMutableFileNode, \
|
2009-07-03 01:07:49 +00:00
|
|
|
ExistingChildError, NoSuchChildError, ICheckable, IDeepCheckable, \
|
2010-05-27 19:45:29 +00:00
|
|
|
MustBeDeepImmutableError, CapConstraintError, ChildOfWrongTypeError
|
2009-01-06 20:37:03 +00:00
|
|
|
from allmydata.check_results import DeepCheckResults, \
|
2008-09-07 19:44:56 +00:00
|
|
|
DeepCheckAndRepairResults
|
2008-10-22 00:03:07 +00:00
|
|
|
from allmydata.monitor import Monitor
|
2008-08-12 04:23:38 +00:00
|
|
|
from allmydata.util import hashutil, mathutil, base32, log
|
2010-07-12 00:30:15 +00:00
|
|
|
from allmydata.util.encodingutil import quote_output
|
2009-10-17 19:28:29 +00:00
|
|
|
from allmydata.util.assertutil import precondition
|
2009-07-02 22:25:37 +00:00
|
|
|
from allmydata.util.netstring import netstring, split_netstring
|
2009-12-01 22:44:35 +00:00
|
|
|
from allmydata.util.consumer import download_to_data
|
2009-11-12 00:22:33 +00:00
|
|
|
from allmydata.uri import LiteralFileURI, from_string, wrap_dirnode_cap
|
2007-12-04 00:27:46 +00:00
|
|
|
from pycryptopp.cipher.aes import AES
|
2009-10-17 18:00:05 +00:00
|
|
|
from allmydata.util.dictutil import AuxValueDict
|
2009-07-05 21:23:45 +00:00
|
|
|
|
2010-01-27 07:03:09 +00:00
|
|
|
|
2010-06-01 04:54:28 +00:00
|
|
|
def update_metadata(metadata, new_metadata, now):
|
|
|
|
"""Updates 'metadata' in-place with the information in 'new_metadata'.
|
|
|
|
Timestamps are set according to the time 'now'."""
|
|
|
|
|
|
|
|
if metadata is None:
|
2010-06-18 23:01:19 +00:00
|
|
|
metadata = {}
|
|
|
|
|
|
|
|
old_ctime = None
|
|
|
|
if 'ctime' in metadata:
|
|
|
|
old_ctime = metadata['ctime']
|
2010-06-01 04:54:28 +00:00
|
|
|
|
|
|
|
if new_metadata is not None:
|
|
|
|
# Overwrite all metadata.
|
|
|
|
newmd = new_metadata.copy()
|
|
|
|
|
|
|
|
# Except 'tahoe'.
|
2010-06-02 01:46:44 +00:00
|
|
|
if 'tahoe' in newmd:
|
2010-06-01 04:54:28 +00:00
|
|
|
del newmd['tahoe']
|
2010-06-02 01:46:44 +00:00
|
|
|
if 'tahoe' in metadata:
|
2010-06-01 04:54:28 +00:00
|
|
|
newmd['tahoe'] = metadata['tahoe']
|
|
|
|
|
|
|
|
metadata = newmd
|
|
|
|
|
|
|
|
# update timestamps
|
|
|
|
sysmd = metadata.get('tahoe', {})
|
2010-06-02 01:46:44 +00:00
|
|
|
if 'linkcrtime' not in sysmd:
|
|
|
|
# In Tahoe < 1.4.0 we used the word 'ctime' to mean what Tahoe >= 1.4.0
|
2010-06-18 23:01:19 +00:00
|
|
|
# calls 'linkcrtime'. This field is only used if it was in the old metadata,
|
|
|
|
# and 'tahoe:linkcrtime' was not.
|
|
|
|
if old_ctime is not None:
|
|
|
|
sysmd['linkcrtime'] = old_ctime
|
|
|
|
else:
|
|
|
|
sysmd['linkcrtime'] = now
|
|
|
|
|
2010-06-02 01:46:44 +00:00
|
|
|
sysmd['linkmotime'] = now
|
|
|
|
metadata['tahoe'] = sysmd
|
2010-06-01 04:54:28 +00:00
|
|
|
|
|
|
|
return metadata
|
|
|
|
|
2010-06-17 04:14:11 +00:00
|
|
|
|
|
|
|
# 'x' at the end of a variable name indicates that it holds a Unicode string that may not
|
|
|
|
# be NFC-normalized.
|
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def normalize(namex):
|
|
|
|
return unicodedata.normalize('NFC', namex)
|
2010-06-01 04:54:28 +00:00
|
|
|
|
2010-01-27 07:03:09 +00:00
|
|
|
# TODO: {Deleter,MetadataSetter,Adder}.modify all start by unpacking the
|
|
|
|
# contents and end by repacking them. It might be better to apply them to
|
|
|
|
# the unpacked contents.
|
|
|
|
|
2008-04-18 02:57:04 +00:00
|
|
|
class Deleter:
|
2010-06-16 03:14:50 +00:00
|
|
|
def __init__(self, node, namex, must_exist=True, must_be_directory=False, must_be_file=False):
|
2008-04-18 02:57:04 +00:00
|
|
|
self.node = node
|
2010-06-16 03:14:50 +00:00
|
|
|
self.name = normalize(namex)
|
2010-05-27 19:45:29 +00:00
|
|
|
self.must_exist = must_exist
|
|
|
|
self.must_be_directory = must_be_directory
|
|
|
|
self.must_be_file = must_be_file
|
|
|
|
|
2008-12-06 05:07:10 +00:00
|
|
|
def modify(self, old_contents, servermap, first_time):
|
2008-04-18 02:57:04 +00:00
|
|
|
children = self.node._unpack_contents(old_contents)
|
|
|
|
if self.name not in children:
|
2008-12-06 05:08:37 +00:00
|
|
|
if first_time and self.must_exist:
|
2008-10-27 20:15:25 +00:00
|
|
|
raise NoSuchChildError(self.name)
|
2008-04-18 02:57:04 +00:00
|
|
|
self.old_child = None
|
|
|
|
return None
|
|
|
|
self.old_child, metadata = children[self.name]
|
2010-05-27 19:45:29 +00:00
|
|
|
|
|
|
|
# Unknown children can be removed regardless of must_be_directory or must_be_file.
|
|
|
|
if self.must_be_directory and IFileNode.providedBy(self.old_child):
|
|
|
|
raise ChildOfWrongTypeError("delete required a directory, not a file")
|
|
|
|
if self.must_be_file and IDirectoryNode.providedBy(self.old_child):
|
|
|
|
raise ChildOfWrongTypeError("delete required a file, not a directory")
|
|
|
|
|
2008-04-18 02:57:04 +00:00
|
|
|
del children[self.name]
|
|
|
|
new_contents = self.node._pack_contents(children)
|
|
|
|
return new_contents
|
|
|
|
|
2010-01-27 06:44:30 +00:00
|
|
|
|
2008-04-18 02:57:04 +00:00
|
|
|
class MetadataSetter:
|
2010-06-16 03:14:50 +00:00
|
|
|
def __init__(self, node, namex, metadata, create_readonly_node=None):
|
2008-04-18 02:57:04 +00:00
|
|
|
self.node = node
|
2010-06-16 03:14:50 +00:00
|
|
|
self.name = normalize(namex)
|
2008-04-18 02:57:04 +00:00
|
|
|
self.metadata = metadata
|
2010-06-01 04:54:28 +00:00
|
|
|
self.create_readonly_node = create_readonly_node
|
2008-04-18 02:57:04 +00:00
|
|
|
|
2008-12-06 05:07:10 +00:00
|
|
|
def modify(self, old_contents, servermap, first_time):
|
2008-04-18 02:57:04 +00:00
|
|
|
children = self.node._unpack_contents(old_contents)
|
2010-06-01 04:54:28 +00:00
|
|
|
name = self.name
|
|
|
|
if name not in children:
|
|
|
|
raise NoSuchChildError(name)
|
|
|
|
|
|
|
|
now = time.time()
|
|
|
|
child = children[name][0]
|
2010-06-02 03:26:41 +00:00
|
|
|
|
|
|
|
metadata = update_metadata(children[name][1].copy(), self.metadata, now)
|
|
|
|
if self.create_readonly_node and metadata.get('no-write', False):
|
2010-06-01 04:54:28 +00:00
|
|
|
child = self.create_readonly_node(child, name)
|
|
|
|
|
|
|
|
children[name] = (child, metadata)
|
2008-04-18 02:57:04 +00:00
|
|
|
new_contents = self.node._pack_contents(children)
|
|
|
|
return new_contents
|
|
|
|
|
|
|
|
|
|
|
|
class Adder:
|
2010-06-01 04:54:28 +00:00
|
|
|
def __init__(self, node, entries=None, overwrite=True, create_readonly_node=None):
|
2008-04-18 02:57:04 +00:00
|
|
|
self.node = node
|
|
|
|
if entries is None:
|
2009-10-17 19:28:29 +00:00
|
|
|
entries = {}
|
|
|
|
precondition(isinstance(entries, dict), entries)
|
2010-06-16 03:14:50 +00:00
|
|
|
# keys of 'entries' may not be normalized.
|
2008-04-18 02:57:04 +00:00
|
|
|
self.entries = entries
|
2008-05-16 23:09:47 +00:00
|
|
|
self.overwrite = overwrite
|
2010-06-01 04:54:28 +00:00
|
|
|
self.create_readonly_node = create_readonly_node
|
2008-04-18 02:57:04 +00:00
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def set_node(self, namex, node, metadata):
|
2008-12-19 15:39:24 +00:00
|
|
|
precondition(IFilesystemNode.providedBy(node), node)
|
2010-06-16 03:14:50 +00:00
|
|
|
self.entries[namex] = (node, metadata)
|
2008-04-18 02:57:04 +00:00
|
|
|
|
2008-12-06 05:07:10 +00:00
|
|
|
def modify(self, old_contents, servermap, first_time):
|
2008-04-18 02:57:04 +00:00
|
|
|
children = self.node._unpack_contents(old_contents)
|
|
|
|
now = time.time()
|
2010-06-16 03:14:50 +00:00
|
|
|
for (namex, (child, new_metadata)) in self.entries.iteritems():
|
|
|
|
name = normalize(namex)
|
2009-10-17 19:28:29 +00:00
|
|
|
precondition(IFilesystemNode.providedBy(child), child)
|
2010-01-27 06:44:30 +00:00
|
|
|
|
|
|
|
# Strictly speaking this is redundant because we would raise the
|
2010-06-18 00:02:49 +00:00
|
|
|
# error again in _pack_normalized_children.
|
2010-01-27 06:44:30 +00:00
|
|
|
child.raise_error()
|
|
|
|
|
2010-06-01 04:54:28 +00:00
|
|
|
metadata = None
|
2008-04-18 02:57:04 +00:00
|
|
|
if name in children:
|
2008-05-16 23:09:47 +00:00
|
|
|
if not self.overwrite:
|
2010-06-16 03:14:50 +00:00
|
|
|
raise ExistingChildError("child %s already exists" % quote_output(name, encoding='utf-8'))
|
2009-07-18 03:00:10 +00:00
|
|
|
|
2009-07-20 03:43:18 +00:00
|
|
|
if self.overwrite == "only-files" and IDirectoryNode.providedBy(children[name][0]):
|
2010-06-16 03:14:50 +00:00
|
|
|
raise ExistingChildError("child %s already exists" % quote_output(name, encoding='utf-8'))
|
2008-04-18 02:57:04 +00:00
|
|
|
metadata = children[name][1].copy()
|
2009-04-11 22:52:05 +00:00
|
|
|
|
2010-06-02 03:26:41 +00:00
|
|
|
metadata = update_metadata(metadata, new_metadata, now)
|
|
|
|
if self.create_readonly_node and metadata.get('no-write', False):
|
2010-06-01 04:54:28 +00:00
|
|
|
child = self.create_readonly_node(child, name)
|
|
|
|
|
2010-06-02 03:26:41 +00:00
|
|
|
children[name] = (child, metadata)
|
2008-04-18 02:57:04 +00:00
|
|
|
new_contents = self.node._pack_contents(children)
|
|
|
|
return new_contents
|
|
|
|
|
2010-07-14 06:02:55 +00:00
|
|
|
def _encrypt_rw_uri(writekey, rw_uri):
|
|
|
|
precondition(isinstance(rw_uri, str), rw_uri)
|
|
|
|
precondition(isinstance(writekey, str), writekey)
|
2010-06-16 03:14:50 +00:00
|
|
|
|
2010-01-27 06:44:30 +00:00
|
|
|
salt = hashutil.mutable_rwcap_salt_hash(rw_uri)
|
2009-11-12 00:22:33 +00:00
|
|
|
key = hashutil.mutable_rwcap_key_hash(salt, writekey)
|
2009-10-17 18:07:07 +00:00
|
|
|
cryptor = AES(key)
|
2010-01-27 06:44:30 +00:00
|
|
|
crypttext = cryptor.process(rw_uri)
|
2009-10-17 18:07:07 +00:00
|
|
|
mac = hashutil.hmac(key, salt + crypttext)
|
|
|
|
assert len(mac) == 32
|
|
|
|
return salt + crypttext + mac
|
|
|
|
# The MAC is not checked by readers in Tahoe >= 1.3.0, but we still
|
|
|
|
# produce it for the sake of older readers.
|
|
|
|
|
2010-07-14 06:02:55 +00:00
|
|
|
def pack_children(childrenx, writekey, deep_immutable=False):
|
2010-06-18 00:02:49 +00:00
|
|
|
# initial_children must have metadata (i.e. {} instead of None)
|
|
|
|
children = {}
|
|
|
|
for (namex, (node, metadata)) in childrenx.iteritems():
|
|
|
|
precondition(isinstance(metadata, dict),
|
|
|
|
"directory creation requires metadata to be a dict, not None", metadata)
|
|
|
|
children[normalize(namex)] = (node, metadata)
|
|
|
|
|
2010-07-14 06:02:55 +00:00
|
|
|
return _pack_normalized_children(children, writekey=writekey, deep_immutable=deep_immutable)
|
2010-06-18 00:02:49 +00:00
|
|
|
|
|
|
|
|
2010-07-14 06:02:55 +00:00
|
|
|
ZERO_LEN_NETSTR=netstring('')
|
|
|
|
def _pack_normalized_children(children, writekey, deep_immutable=False):
|
2009-10-17 18:07:07 +00:00
|
|
|
"""Take a dict that maps:
|
2010-06-18 00:02:49 +00:00
|
|
|
children[unicode_nfc_name] = (IFileSystemNode, metadata_dict)
|
2009-10-17 18:07:07 +00:00
|
|
|
and pack it into a single string, for use as the contents of the backing
|
|
|
|
file. This is the same format as is returned by _unpack_contents. I also
|
|
|
|
accept an AuxValueDict, in which case I'll use the auxilliary cached data
|
|
|
|
as the pre-packed entry, which is faster than re-packing everything each
|
2009-10-26 16:28:09 +00:00
|
|
|
time.
|
|
|
|
|
2010-07-14 06:02:55 +00:00
|
|
|
If writekey is provided then I will superencrypt the child's writecap with
|
|
|
|
writekey.
|
|
|
|
|
2009-10-26 16:28:09 +00:00
|
|
|
If deep_immutable is True, I will require that all my children are deeply
|
2010-01-27 06:44:30 +00:00
|
|
|
immutable, and will raise a MustBeDeepImmutableError if not.
|
2009-10-26 16:28:09 +00:00
|
|
|
"""
|
2010-07-14 06:02:55 +00:00
|
|
|
precondition((writekey is None) or isinstance(writekey, str), writekey)
|
2009-10-26 16:28:09 +00:00
|
|
|
|
2009-10-17 18:07:07 +00:00
|
|
|
has_aux = isinstance(children, AuxValueDict)
|
|
|
|
entries = []
|
|
|
|
for name in sorted(children.keys()):
|
|
|
|
assert isinstance(name, unicode)
|
|
|
|
entry = None
|
2009-10-26 16:28:09 +00:00
|
|
|
(child, metadata) = children[name]
|
2010-01-27 06:44:30 +00:00
|
|
|
child.raise_error()
|
|
|
|
if deep_immutable and not child.is_allowed_in_immutable_directory():
|
2010-06-16 03:14:50 +00:00
|
|
|
raise MustBeDeepImmutableError("child %s is not allowed in an immutable directory" %
|
|
|
|
quote_output(name, encoding='utf-8'), name)
|
2009-10-17 18:07:07 +00:00
|
|
|
if has_aux:
|
|
|
|
entry = children.get_aux(name)
|
|
|
|
if not entry:
|
|
|
|
assert IFilesystemNode.providedBy(child), (name,child)
|
|
|
|
assert isinstance(metadata, dict)
|
2010-01-27 06:44:30 +00:00
|
|
|
rw_uri = child.get_write_uri()
|
|
|
|
if rw_uri is None:
|
|
|
|
rw_uri = ""
|
|
|
|
assert isinstance(rw_uri, str), rw_uri
|
2010-07-14 06:02:55 +00:00
|
|
|
|
2010-01-27 06:44:30 +00:00
|
|
|
# should be prevented by MustBeDeepImmutableError check above
|
|
|
|
assert not (rw_uri and deep_immutable)
|
|
|
|
|
|
|
|
ro_uri = child.get_readonly_uri()
|
|
|
|
if ro_uri is None:
|
|
|
|
ro_uri = ""
|
|
|
|
assert isinstance(ro_uri, str), ro_uri
|
2010-07-14 06:02:55 +00:00
|
|
|
if writekey is not None:
|
|
|
|
writecap = netstring(_encrypt_rw_uri(writekey, rw_uri))
|
|
|
|
else:
|
|
|
|
writecap = ZERO_LEN_NETSTR
|
2009-10-17 18:07:07 +00:00
|
|
|
entry = "".join([netstring(name.encode("utf-8")),
|
2010-01-27 06:44:30 +00:00
|
|
|
netstring(strip_prefix_for_ro(ro_uri, deep_immutable)),
|
2010-07-14 06:02:55 +00:00
|
|
|
writecap,
|
2009-10-17 18:07:07 +00:00
|
|
|
netstring(simplejson.dumps(metadata))])
|
|
|
|
entries.append(netstring(entry))
|
|
|
|
return "".join(entries)
|
|
|
|
|
2009-07-17 01:01:03 +00:00
|
|
|
class DirectoryNode:
|
2008-09-10 20:44:58 +00:00
|
|
|
implements(IDirectoryNode, ICheckable, IDeepCheckable)
|
2007-11-02 06:46:47 +00:00
|
|
|
filenode_class = MutableFileNode
|
|
|
|
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
def __init__(self, filenode, nodemaker, uploader):
|
2009-11-18 07:09:00 +00:00
|
|
|
assert IFileNode.providedBy(filenode), filenode
|
|
|
|
assert not IDirectoryNode.providedBy(filenode), filenode
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
self._node = filenode
|
2009-11-11 22:45:42 +00:00
|
|
|
filenode_cap = filenode.get_cap()
|
2009-11-12 00:22:33 +00:00
|
|
|
self._uri = wrap_dirnode_cap(filenode_cap)
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
self._nodemaker = nodemaker
|
|
|
|
self._uploader = uploader
|
2008-05-08 20:21:14 +00:00
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
def __repr__(self):
|
2009-11-12 00:22:33 +00:00
|
|
|
return "<%s %s-%s %s>" % (self.__class__.__name__,
|
|
|
|
self.is_readonly() and "RO" or "RW",
|
|
|
|
self.is_mutable() and "MUT" or "IMM",
|
|
|
|
hasattr(self, '_uri') and self._uri.abbrev())
|
2007-11-02 06:46:47 +00:00
|
|
|
|
2008-05-08 20:21:14 +00:00
|
|
|
def get_size(self):
|
2009-11-18 19:16:24 +00:00
|
|
|
"""Return the size of our backing mutable file, in bytes, if we've
|
|
|
|
fetched it. Otherwise return None. This returns synchronously."""
|
|
|
|
return self._node.get_size()
|
2008-05-08 20:21:14 +00:00
|
|
|
|
2009-11-18 19:16:24 +00:00
|
|
|
def get_current_size(self):
|
|
|
|
"""Calculate the size of our backing mutable file, in bytes. Returns
|
|
|
|
a Deferred that fires with the result."""
|
|
|
|
return self._node.get_current_size()
|
2008-05-08 20:21:14 +00:00
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
def _read(self):
|
2009-11-12 00:22:33 +00:00
|
|
|
if self._node.is_mutable():
|
|
|
|
# use the IMutableFileNode API.
|
|
|
|
d = self._node.download_best_version()
|
|
|
|
else:
|
2009-12-01 22:44:35 +00:00
|
|
|
d = download_to_data(self._node)
|
2007-11-02 06:46:47 +00:00
|
|
|
d.addCallback(self._unpack_contents)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def _decrypt_rwcapdata(self, encwrcap):
|
2009-07-12 23:50:25 +00:00
|
|
|
salt = encwrcap[:16]
|
2007-11-02 06:46:47 +00:00
|
|
|
crypttext = encwrcap[16:-32]
|
2009-07-12 23:50:25 +00:00
|
|
|
key = hashutil.mutable_rwcap_key_hash(salt, self._node.get_writekey())
|
2007-12-04 00:27:46 +00:00
|
|
|
cryptor = AES(key)
|
|
|
|
plaintext = cryptor.process(crypttext)
|
2007-11-02 06:46:47 +00:00
|
|
|
return plaintext
|
|
|
|
|
2010-01-27 06:44:30 +00:00
|
|
|
def _create_and_validate_node(self, rw_uri, ro_uri, name):
|
2010-06-16 03:14:50 +00:00
|
|
|
# name is just for error reporting
|
2010-01-27 06:44:30 +00:00
|
|
|
node = self._nodemaker.create_from_cap(rw_uri, ro_uri,
|
|
|
|
deep_immutable=not self.is_mutable(),
|
|
|
|
name=name)
|
|
|
|
node.raise_error()
|
|
|
|
return node
|
2007-11-02 06:46:47 +00:00
|
|
|
|
2010-06-01 04:54:28 +00:00
|
|
|
def _create_readonly_node(self, node, name):
|
2010-06-16 03:14:50 +00:00
|
|
|
# name is just for error reporting
|
2010-06-01 04:54:28 +00:00
|
|
|
if not node.is_unknown() and node.is_readonly():
|
|
|
|
return node
|
|
|
|
return self._create_and_validate_node(None, node.get_readonly_uri(), name=name)
|
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
def _unpack_contents(self, data):
|
|
|
|
# the directory is serialized as a list of netstrings, one per child.
|
2010-01-27 06:44:30 +00:00
|
|
|
# Each child is serialized as a list of four netstrings: (name, ro_uri,
|
|
|
|
# rwcapdata, metadata), in which the name, ro_uri, metadata are in
|
2010-06-16 03:14:50 +00:00
|
|
|
# cleartext. The 'name' is UTF-8 encoded, and should be normalized to NFC.
|
|
|
|
# The rwcapdata is formatted as:
|
2010-01-27 06:44:30 +00:00
|
|
|
# pack("16ss32s", iv, AES(H(writekey+iv), plaintext_rw_uri), mac)
|
2009-07-05 02:51:09 +00:00
|
|
|
assert isinstance(data, str), (repr(data), type(data))
|
2007-11-02 06:46:47 +00:00
|
|
|
# an empty directory is serialized as an empty string
|
|
|
|
if data == "":
|
2009-10-17 18:00:05 +00:00
|
|
|
return AuxValueDict()
|
2007-12-03 21:52:42 +00:00
|
|
|
writeable = not self.is_readonly()
|
2010-01-27 06:44:30 +00:00
|
|
|
mutable = self.is_mutable()
|
2009-10-17 18:00:05 +00:00
|
|
|
children = AuxValueDict()
|
2009-07-05 02:51:09 +00:00
|
|
|
position = 0
|
|
|
|
while position < len(data):
|
|
|
|
entries, position = split_netstring(data, 1, position)
|
2009-07-10 03:20:28 +00:00
|
|
|
entry = entries[0]
|
2010-06-16 03:14:50 +00:00
|
|
|
(namex_utf8, ro_uri, rwcapdata, metadata_s), subpos = split_netstring(entry, 4)
|
2010-01-27 06:44:30 +00:00
|
|
|
if not mutable and len(rwcapdata) > 0:
|
|
|
|
raise ValueError("the rwcapdata field of a dirnode in an immutable directory was not empty")
|
2010-06-16 03:14:50 +00:00
|
|
|
|
|
|
|
# A name containing characters that are unassigned in one version of Unicode might
|
2010-06-17 04:14:11 +00:00
|
|
|
# not be normalized wrt a later version. See the note in section 'Normalization Stability'
|
|
|
|
# at <http://unicode.org/policies/stability_policy.html>.
|
|
|
|
# Therefore we normalize names going both in and out of directories.
|
2010-06-16 03:14:50 +00:00
|
|
|
name = normalize(namex_utf8.decode("utf-8"))
|
|
|
|
|
2010-01-27 06:44:30 +00:00
|
|
|
rw_uri = ""
|
2007-12-03 21:52:42 +00:00
|
|
|
if writeable:
|
2010-01-27 06:44:30 +00:00
|
|
|
rw_uri = self._decrypt_rwcapdata(rwcapdata)
|
|
|
|
|
|
|
|
# Since the encryption uses CTR mode, it currently leaks the length of the
|
|
|
|
# plaintext rw_uri -- and therefore whether it is present, i.e. whether the
|
2010-01-27 23:06:42 +00:00
|
|
|
# dirnode is writeable (ticket #925). By stripping trailing spaces in
|
|
|
|
# Tahoe >= 1.6.0, we may make it easier for future versions to plug this leak.
|
2010-01-27 06:44:30 +00:00
|
|
|
# ro_uri is treated in the same way for consistency.
|
|
|
|
# rw_uri and ro_uri will be either None or a non-empty string.
|
|
|
|
|
2010-01-27 23:06:42 +00:00
|
|
|
rw_uri = rw_uri.rstrip(' ') or None
|
|
|
|
ro_uri = ro_uri.rstrip(' ') or None
|
2010-01-27 06:44:30 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
child = self._create_and_validate_node(rw_uri, ro_uri, name)
|
|
|
|
if mutable or child.is_allowed_in_immutable_directory():
|
|
|
|
metadata = simplejson.loads(metadata_s)
|
|
|
|
assert isinstance(metadata, dict)
|
|
|
|
children[name] = (child, metadata)
|
|
|
|
children.set_with_aux(name, (child, metadata), auxilliary=entry)
|
|
|
|
else:
|
2010-06-16 03:14:50 +00:00
|
|
|
log.msg(format="mutable cap for child %(name)s unpacked from an immutable directory",
|
|
|
|
name=quote_output(name, encoding='utf-8'),
|
2010-01-27 06:44:30 +00:00
|
|
|
facility="tahoe.webish", level=log.UNUSUAL)
|
|
|
|
except CapConstraintError, e:
|
2010-06-16 03:14:50 +00:00
|
|
|
log.msg(format="unmet constraint on cap for child %(name)s unpacked from a directory:\n"
|
|
|
|
"%(message)s", message=e.args[0], name=quote_output(name, encoding='utf-8'),
|
2010-01-27 06:44:30 +00:00
|
|
|
facility="tahoe.webish", level=log.UNUSUAL)
|
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
return children
|
|
|
|
|
|
|
|
def _pack_contents(self, children):
|
2010-06-14 04:57:15 +00:00
|
|
|
# expects children in the same format as _unpack_contents returns
|
2010-07-14 06:02:55 +00:00
|
|
|
return _pack_normalized_children(children, self._node.get_writekey())
|
2007-11-02 06:46:47 +00:00
|
|
|
|
|
|
|
def is_readonly(self):
|
|
|
|
return self._node.is_readonly()
|
2010-01-27 06:44:30 +00:00
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
def is_mutable(self):
|
|
|
|
return self._node.is_mutable()
|
|
|
|
|
2010-01-27 06:44:30 +00:00
|
|
|
def is_unknown(self):
|
|
|
|
return False
|
|
|
|
|
|
|
|
def is_allowed_in_immutable_directory(self):
|
|
|
|
return not self._node.is_mutable()
|
|
|
|
|
|
|
|
def raise_error(self):
|
|
|
|
pass
|
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
def get_uri(self):
|
|
|
|
return self._uri.to_string()
|
|
|
|
|
2010-01-27 06:44:30 +00:00
|
|
|
def get_write_uri(self):
|
|
|
|
if self.is_readonly():
|
|
|
|
return None
|
|
|
|
return self._uri.to_string()
|
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
def get_readonly_uri(self):
|
2007-11-02 06:46:47 +00:00
|
|
|
return self._uri.get_readonly().to_string()
|
|
|
|
|
2009-11-11 22:25:42 +00:00
|
|
|
def get_cap(self):
|
|
|
|
return self._uri
|
2010-01-27 06:44:30 +00:00
|
|
|
|
2009-11-11 22:25:42 +00:00
|
|
|
def get_readcap(self):
|
|
|
|
return self._uri.get_readonly()
|
2010-01-27 06:44:30 +00:00
|
|
|
|
2008-12-08 19:44:11 +00:00
|
|
|
def get_verify_cap(self):
|
|
|
|
return self._uri.get_verify_cap()
|
2010-01-27 06:44:30 +00:00
|
|
|
|
2009-01-23 04:44:49 +00:00
|
|
|
def get_repair_cap(self):
|
|
|
|
if self._node.is_readonly():
|
2009-11-11 22:25:42 +00:00
|
|
|
return None # readonly (mutable) dirnodes are not yet repairable
|
2009-01-23 04:44:49 +00:00
|
|
|
return self._uri
|
|
|
|
|
2008-08-12 23:14:07 +00:00
|
|
|
def get_storage_index(self):
|
2010-02-22 02:45:04 +00:00
|
|
|
return self._uri.get_storage_index()
|
2008-08-12 23:14:07 +00:00
|
|
|
|
2009-02-18 02:32:43 +00:00
|
|
|
def check(self, monitor, verify=False, add_lease=False):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""Perform a file check. See IChecker.check for details."""
|
2009-02-18 02:32:43 +00:00
|
|
|
return self._node.check(monitor, verify, add_lease)
|
|
|
|
def check_and_repair(self, monitor, verify=False, add_lease=False):
|
|
|
|
return self._node.check_and_repair(monitor, verify, add_lease)
|
2007-11-02 06:46:47 +00:00
|
|
|
|
|
|
|
def list(self):
|
|
|
|
"""I return a Deferred that fires with a dictionary mapping child
|
2010-02-24 08:02:20 +00:00
|
|
|
name to a tuple of (IFilesystemNode, metadata)."""
|
2007-11-02 06:46:47 +00:00
|
|
|
return self._read()
|
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def has_child(self, namex):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""I return a Deferred that fires with a boolean, True if there
|
|
|
|
exists a child of the given name, False if not."""
|
2010-06-16 03:14:50 +00:00
|
|
|
name = normalize(namex)
|
2007-11-02 06:46:47 +00:00
|
|
|
d = self._read()
|
|
|
|
d.addCallback(lambda children: children.has_key(name))
|
|
|
|
return d
|
|
|
|
|
2007-12-03 21:52:42 +00:00
|
|
|
def _get(self, children, name):
|
|
|
|
child = children.get(name)
|
|
|
|
if child is None:
|
2008-10-27 20:15:25 +00:00
|
|
|
raise NoSuchChildError(name)
|
2007-12-03 21:52:42 +00:00
|
|
|
return child[0]
|
|
|
|
|
2008-10-03 00:52:03 +00:00
|
|
|
def _get_with_metadata(self, children, name):
|
|
|
|
child = children.get(name)
|
|
|
|
if child is None:
|
2008-10-27 20:15:25 +00:00
|
|
|
raise NoSuchChildError(name)
|
2008-10-03 00:52:03 +00:00
|
|
|
return child
|
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def get(self, namex):
|
2007-12-03 21:52:42 +00:00
|
|
|
"""I return a Deferred that fires with the named child node,
|
2010-02-24 08:02:20 +00:00
|
|
|
which is an IFilesystemNode."""
|
2010-06-16 03:14:50 +00:00
|
|
|
name = normalize(namex)
|
2007-11-02 06:46:47 +00:00
|
|
|
d = self._read()
|
2007-12-03 21:52:42 +00:00
|
|
|
d.addCallback(self._get, name)
|
2007-11-02 06:46:47 +00:00
|
|
|
return d
|
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def get_child_and_metadata(self, namex):
|
2008-10-03 00:52:03 +00:00
|
|
|
"""I return a Deferred that fires with the (node, metadata) pair for
|
2010-02-24 08:02:20 +00:00
|
|
|
the named child. The node is an IFilesystemNode, and the metadata
|
|
|
|
is a dictionary."""
|
2010-06-16 03:14:50 +00:00
|
|
|
name = normalize(namex)
|
2008-10-03 00:52:03 +00:00
|
|
|
d = self._read()
|
|
|
|
d.addCallback(self._get_with_metadata, name)
|
|
|
|
return d
|
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def get_metadata_for(self, namex):
|
|
|
|
name = normalize(namex)
|
2007-11-02 06:46:47 +00:00
|
|
|
d = self._read()
|
|
|
|
d.addCallback(lambda children: children[name][1])
|
|
|
|
return d
|
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def set_metadata_for(self, namex, metadata):
|
|
|
|
name = normalize(namex)
|
2008-02-09 01:43:47 +00:00
|
|
|
if self.is_readonly():
|
2010-01-27 06:44:30 +00:00
|
|
|
return defer.fail(NotWriteableError())
|
2008-02-09 01:43:47 +00:00
|
|
|
assert isinstance(metadata, dict)
|
2010-06-01 04:54:28 +00:00
|
|
|
s = MetadataSetter(self, name, metadata,
|
|
|
|
create_readonly_node=self._create_readonly_node)
|
2008-04-18 02:57:04 +00:00
|
|
|
d = self._node.modify(s.modify)
|
2008-02-09 01:43:47 +00:00
|
|
|
d.addCallback(lambda res: self)
|
|
|
|
return d
|
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def get_child_at_path(self, pathx):
|
2010-02-24 08:02:20 +00:00
|
|
|
"""Transform a child path into an IFilesystemNode.
|
2007-11-02 06:46:47 +00:00
|
|
|
|
|
|
|
I perform a recursive series of 'get' operations to find the named
|
|
|
|
descendant node. I return a Deferred that fires with the node, or
|
|
|
|
errbacks with IndexError if the node could not be found.
|
|
|
|
|
|
|
|
The path can be either a single string (slash-separated) or a list of
|
|
|
|
path-name elements.
|
|
|
|
"""
|
2010-06-16 03:14:50 +00:00
|
|
|
d = self.get_child_and_metadata_at_path(pathx)
|
2008-10-03 00:52:03 +00:00
|
|
|
d.addCallback(lambda (node, metadata): node)
|
|
|
|
return d
|
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def get_child_and_metadata_at_path(self, pathx):
|
2010-02-24 08:02:20 +00:00
|
|
|
"""Transform a child path into an IFilesystemNode and
|
2008-10-03 00:52:03 +00:00
|
|
|
a metadata dictionary from the last edge that was traversed.
|
|
|
|
"""
|
2007-11-02 06:46:47 +00:00
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
if not pathx:
|
2008-10-03 00:52:03 +00:00
|
|
|
return defer.succeed((self, {}))
|
2010-06-16 03:14:50 +00:00
|
|
|
if isinstance(pathx, (list, tuple)):
|
2008-02-14 22:45:56 +00:00
|
|
|
pass
|
|
|
|
else:
|
2010-06-16 03:14:50 +00:00
|
|
|
pathx = pathx.split("/")
|
|
|
|
for p in pathx:
|
|
|
|
assert isinstance(p, unicode), p
|
|
|
|
childnamex = pathx[0]
|
|
|
|
remaining_pathx = pathx[1:]
|
|
|
|
if remaining_pathx:
|
|
|
|
d = self.get(childnamex)
|
2008-10-03 00:52:03 +00:00
|
|
|
d.addCallback(lambda node:
|
2010-06-16 03:14:50 +00:00
|
|
|
node.get_child_and_metadata_at_path(remaining_pathx))
|
2008-10-03 00:52:03 +00:00
|
|
|
return d
|
2010-06-16 03:14:50 +00:00
|
|
|
d = self.get_child_and_metadata(childnamex)
|
2007-11-02 06:46:47 +00:00
|
|
|
return d
|
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def set_uri(self, namex, writecap, readcap, metadata=None, overwrite=True):
|
2009-10-12 23:51:26 +00:00
|
|
|
precondition(isinstance(writecap, (str,type(None))), writecap)
|
|
|
|
precondition(isinstance(readcap, (str,type(None))), readcap)
|
2010-06-01 04:54:28 +00:00
|
|
|
|
2010-01-27 06:44:30 +00:00
|
|
|
# We now allow packing unknown nodes, provided they are valid
|
|
|
|
# for this type of directory.
|
2010-06-16 03:14:50 +00:00
|
|
|
child_node = self._create_and_validate_node(writecap, readcap, namex)
|
|
|
|
d = self.set_node(namex, child_node, metadata, overwrite)
|
2008-04-18 02:57:04 +00:00
|
|
|
d.addCallback(lambda res: child_node)
|
|
|
|
return d
|
2007-12-19 06:30:02 +00:00
|
|
|
|
2008-05-16 23:09:47 +00:00
|
|
|
def set_children(self, entries, overwrite=True):
|
2008-04-18 02:57:04 +00:00
|
|
|
# this takes URIs
|
2010-06-01 04:54:28 +00:00
|
|
|
a = Adder(self, overwrite=overwrite,
|
|
|
|
create_readonly_node=self._create_readonly_node)
|
2010-06-16 03:14:50 +00:00
|
|
|
for (namex, e) in entries.iteritems():
|
|
|
|
assert isinstance(namex, unicode), namex
|
2009-10-13 00:24:40 +00:00
|
|
|
if len(e) == 2:
|
|
|
|
writecap, readcap = e
|
2008-02-09 01:43:47 +00:00
|
|
|
metadata = None
|
2007-12-19 06:30:02 +00:00
|
|
|
else:
|
2009-10-13 00:24:40 +00:00
|
|
|
assert len(e) == 3
|
|
|
|
writecap, readcap, metadata = e
|
2009-10-12 23:51:26 +00:00
|
|
|
precondition(isinstance(writecap, (str,type(None))), writecap)
|
|
|
|
precondition(isinstance(readcap, (str,type(None))), readcap)
|
2011-08-09 00:11:17 +00:00
|
|
|
|
2010-01-27 06:44:30 +00:00
|
|
|
# We now allow packing unknown nodes, provided they are valid
|
|
|
|
# for this type of directory.
|
2010-06-16 03:14:50 +00:00
|
|
|
child_node = self._create_and_validate_node(writecap, readcap, namex)
|
|
|
|
a.set_node(namex, child_node, metadata)
|
2009-10-13 01:50:26 +00:00
|
|
|
d = self._node.modify(a.modify)
|
|
|
|
d.addCallback(lambda ign: self)
|
|
|
|
return d
|
2007-11-02 06:46:47 +00:00
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def set_node(self, namex, child, metadata=None, overwrite=True):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""I add a child at the specific name. I return a Deferred that fires
|
|
|
|
when the operation finishes. This Deferred will fire with the child
|
|
|
|
node that was just added. I will replace any existing child of the
|
|
|
|
same name.
|
|
|
|
|
|
|
|
If this directory node is read-only, the Deferred will errback with a
|
2010-01-27 06:44:30 +00:00
|
|
|
NotWriteableError."""
|
2008-04-18 02:57:04 +00:00
|
|
|
|
2008-12-19 15:39:24 +00:00
|
|
|
precondition(IFilesystemNode.providedBy(child), child)
|
|
|
|
|
2008-04-18 02:57:04 +00:00
|
|
|
if self.is_readonly():
|
2010-01-27 06:44:30 +00:00
|
|
|
return defer.fail(NotWriteableError())
|
2007-12-19 06:30:02 +00:00
|
|
|
assert IFilesystemNode.providedBy(child), child
|
2010-06-01 04:54:28 +00:00
|
|
|
a = Adder(self, overwrite=overwrite,
|
|
|
|
create_readonly_node=self._create_readonly_node)
|
2010-06-16 03:14:50 +00:00
|
|
|
a.set_node(namex, child, metadata)
|
2008-04-18 02:57:04 +00:00
|
|
|
d = self._node.modify(a.modify)
|
2007-12-19 06:30:02 +00:00
|
|
|
d.addCallback(lambda res: child)
|
|
|
|
return d
|
|
|
|
|
2008-05-16 23:09:47 +00:00
|
|
|
def set_nodes(self, entries, overwrite=True):
|
2009-10-17 19:28:29 +00:00
|
|
|
precondition(isinstance(entries, dict), entries)
|
2007-11-02 06:46:47 +00:00
|
|
|
if self.is_readonly():
|
2010-01-27 06:44:30 +00:00
|
|
|
return defer.fail(NotWriteableError())
|
2010-06-01 04:54:28 +00:00
|
|
|
a = Adder(self, entries, overwrite=overwrite,
|
|
|
|
create_readonly_node=self._create_readonly_node)
|
2008-04-18 02:57:04 +00:00
|
|
|
d = self._node.modify(a.modify)
|
2009-10-13 01:45:46 +00:00
|
|
|
d.addCallback(lambda res: self)
|
2007-11-02 06:46:47 +00:00
|
|
|
return d
|
|
|
|
|
2007-12-19 06:30:02 +00:00
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def add_file(self, namex, uploadable, metadata=None, overwrite=True):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""I upload a file (using the given IUploadable), then attach the
|
|
|
|
resulting FileNode to the directory at the given name. I return a
|
|
|
|
Deferred that fires (with the IFileNode of the uploaded file) when
|
|
|
|
the operation completes."""
|
2010-06-16 03:14:50 +00:00
|
|
|
name = normalize(namex)
|
2007-11-02 06:46:47 +00:00
|
|
|
if self.is_readonly():
|
2010-01-27 06:44:30 +00:00
|
|
|
return defer.fail(NotWriteableError())
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
d = self._uploader.upload(uploadable)
|
2010-01-27 06:44:30 +00:00
|
|
|
d.addCallback(lambda results:
|
|
|
|
self._create_and_validate_node(results.uri, None, name))
|
2008-05-16 23:09:47 +00:00
|
|
|
d.addCallback(lambda node:
|
|
|
|
self.set_node(name, node, metadata, overwrite))
|
2007-11-02 06:46:47 +00:00
|
|
|
return d
|
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def delete(self, namex, must_exist=True, must_be_directory=False, must_be_file=False):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""I remove the child at the specific name. I return a Deferred that
|
|
|
|
fires (with the node just removed) when the operation finishes."""
|
|
|
|
if self.is_readonly():
|
2010-01-27 06:44:30 +00:00
|
|
|
return defer.fail(NotWriteableError())
|
2010-06-16 03:14:50 +00:00
|
|
|
deleter = Deleter(self, namex, must_exist=must_exist,
|
2010-05-27 19:45:29 +00:00
|
|
|
must_be_directory=must_be_directory, must_be_file=must_be_file)
|
2008-04-18 02:57:04 +00:00
|
|
|
d = self._node.modify(deleter.modify)
|
|
|
|
d.addCallback(lambda res: deleter.old_child)
|
2007-11-02 06:46:47 +00:00
|
|
|
return d
|
|
|
|
|
2011-08-07 00:42:24 +00:00
|
|
|
# XXX: Too many arguments? Worthwhile to break into mutable/immutable?
|
2010-06-16 03:14:50 +00:00
|
|
|
def create_subdirectory(self, namex, initial_children={}, overwrite=True,
|
2011-08-07 00:42:24 +00:00
|
|
|
mutable=True, mutable_version=None, metadata=None):
|
2010-06-16 03:14:50 +00:00
|
|
|
name = normalize(namex)
|
2007-11-02 06:46:47 +00:00
|
|
|
if self.is_readonly():
|
2010-01-27 06:44:30 +00:00
|
|
|
return defer.fail(NotWriteableError())
|
2009-11-18 07:09:00 +00:00
|
|
|
if mutable:
|
2011-08-07 00:42:24 +00:00
|
|
|
if mutable_version:
|
|
|
|
d = self._nodemaker.create_new_mutable_directory(initial_children,
|
|
|
|
version=mutable_version)
|
|
|
|
else:
|
|
|
|
d = self._nodemaker.create_new_mutable_directory(initial_children)
|
2009-11-18 07:09:00 +00:00
|
|
|
else:
|
2011-08-07 00:42:24 +00:00
|
|
|
# mutable version doesn't make sense for immmutable directories.
|
|
|
|
assert mutable_version is None
|
2009-11-18 07:09:00 +00:00
|
|
|
d = self._nodemaker.create_immutable_directory(initial_children)
|
2007-11-02 06:46:47 +00:00
|
|
|
def _created(child):
|
2010-06-02 03:26:41 +00:00
|
|
|
entries = {name: (child, metadata)}
|
2010-06-01 04:54:28 +00:00
|
|
|
a = Adder(self, entries, overwrite=overwrite,
|
|
|
|
create_readonly_node=self._create_readonly_node)
|
2008-04-18 02:57:04 +00:00
|
|
|
d = self._node.modify(a.modify)
|
2007-11-02 06:46:47 +00:00
|
|
|
d.addCallback(lambda res: child)
|
|
|
|
return d
|
|
|
|
d.addCallback(_created)
|
|
|
|
return d
|
|
|
|
|
2010-06-16 03:14:50 +00:00
|
|
|
def move_child_to(self, current_child_namex, new_parent,
|
|
|
|
new_child_namex=None, overwrite=True):
|
2007-11-02 06:46:47 +00:00
|
|
|
"""I take one of my children and move them to a new parent. The child
|
|
|
|
is referenced by name. On the new parent, the child will live under
|
|
|
|
'new_child_name', which defaults to 'current_child_name'. I return a
|
|
|
|
Deferred that fires when the operation finishes."""
|
2010-06-16 03:14:50 +00:00
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
if self.is_readonly() or new_parent.is_readonly():
|
2010-01-27 06:44:30 +00:00
|
|
|
return defer.fail(NotWriteableError())
|
2010-06-16 03:14:50 +00:00
|
|
|
|
|
|
|
current_child_name = normalize(current_child_namex)
|
|
|
|
if new_child_namex is None:
|
|
|
|
new_child_namex = current_child_name
|
2007-11-02 06:46:47 +00:00
|
|
|
d = self.get(current_child_name)
|
2007-12-03 21:52:42 +00:00
|
|
|
def sn(child):
|
2010-06-16 03:14:50 +00:00
|
|
|
return new_parent.set_node(new_child_namex, child,
|
2008-05-16 23:09:47 +00:00
|
|
|
overwrite=overwrite)
|
2007-12-03 21:52:42 +00:00
|
|
|
d.addCallback(sn)
|
2007-11-02 06:46:47 +00:00
|
|
|
d.addCallback(lambda child: self.delete(current_child_name))
|
|
|
|
return d
|
|
|
|
|
2008-09-10 08:45:04 +00:00
|
|
|
|
|
|
|
def deep_traverse(self, walker):
|
|
|
|
"""Perform a recursive walk, using this dirnode as a root, notifying
|
|
|
|
the 'walker' instance of everything I encounter.
|
|
|
|
|
|
|
|
I call walker.enter_directory(parent, children) once for each dirnode
|
|
|
|
I visit, immediately after retrieving the list of children. I pass in
|
|
|
|
the parent dirnode and the dict of childname->(childnode,metadata).
|
|
|
|
This function should *not* traverse the children: I will do that.
|
|
|
|
enter_directory() is most useful for the deep-stats number that
|
|
|
|
counts how large a directory is.
|
|
|
|
|
|
|
|
I call walker.add_node(node, path) for each node (both files and
|
|
|
|
directories) I can reach. Most work should be done here.
|
|
|
|
|
|
|
|
I avoid loops by keeping track of verifier-caps and refusing to call
|
2009-01-09 02:41:16 +00:00
|
|
|
walker.add_node() or traverse a node that I've seen before. This
|
|
|
|
means that any file or directory will only be given to the walker
|
|
|
|
once. If files or directories are referenced multiple times by a
|
|
|
|
directory structure, this may appear to under-count or miss some of
|
|
|
|
them.
|
2008-09-10 08:45:04 +00:00
|
|
|
|
2009-01-23 04:39:50 +00:00
|
|
|
I return a Monitor which can be used to wait for the operation to
|
|
|
|
finish, learn about its progress, or cancel the operation.
|
2008-09-10 08:45:04 +00:00
|
|
|
"""
|
2007-11-02 06:46:47 +00:00
|
|
|
|
|
|
|
# this is just a tree-walker, except that following each edge
|
2009-01-09 02:41:16 +00:00
|
|
|
# requires a Deferred. We used to use a ConcurrencyLimiter to limit
|
|
|
|
# fanout to 10 simultaneous operations, but the memory load of the
|
|
|
|
# queued operations was excessive (in one case, with 330k dirnodes,
|
|
|
|
# it caused the process to run into the 3.0GB-ish per-process 32bit
|
|
|
|
# linux memory limit, and crashed). So we use a single big Deferred
|
|
|
|
# chain, and do a strict depth-first traversal, one node at a time.
|
|
|
|
# This can be slower, because we aren't pipelining directory reads,
|
|
|
|
# but it brought the memory footprint down by roughly 50%.
|
2007-11-02 06:46:47 +00:00
|
|
|
|
2008-10-22 00:03:07 +00:00
|
|
|
monitor = Monitor()
|
|
|
|
walker.set_monitor(monitor)
|
|
|
|
|
2008-12-08 19:44:11 +00:00
|
|
|
found = set([self.get_verify_cap()])
|
2009-01-09 02:41:16 +00:00
|
|
|
d = self._deep_traverse_dirnode(self, [], walker, monitor, found)
|
2008-09-10 08:45:04 +00:00
|
|
|
d.addCallback(lambda ignored: walker.finish())
|
2008-10-22 00:03:07 +00:00
|
|
|
d.addBoth(monitor.finish)
|
2008-10-22 07:55:52 +00:00
|
|
|
d.addErrback(lambda f: None)
|
|
|
|
|
2008-10-22 00:03:07 +00:00
|
|
|
return monitor
|
2007-11-02 06:46:47 +00:00
|
|
|
|
2009-01-09 02:41:16 +00:00
|
|
|
def _deep_traverse_dirnode(self, node, path, walker, monitor, found):
|
2008-09-10 08:45:04 +00:00
|
|
|
# process this directory, then walk its children
|
2008-10-22 07:55:52 +00:00
|
|
|
monitor.raise_if_cancelled()
|
2009-01-09 02:41:16 +00:00
|
|
|
d = defer.maybeDeferred(walker.add_node, node, path)
|
|
|
|
d.addCallback(lambda ignored: node.list())
|
2008-09-10 08:45:04 +00:00
|
|
|
d.addCallback(self._deep_traverse_dirnode_children, node, path,
|
2009-01-09 02:41:16 +00:00
|
|
|
walker, monitor, found)
|
2007-11-02 06:46:47 +00:00
|
|
|
return d
|
|
|
|
|
2008-09-10 08:45:04 +00:00
|
|
|
def _deep_traverse_dirnode_children(self, children, parent, path,
|
2009-01-09 02:41:16 +00:00
|
|
|
walker, monitor, found):
|
2008-10-22 07:55:52 +00:00
|
|
|
monitor.raise_if_cancelled()
|
2009-01-09 02:41:16 +00:00
|
|
|
d = defer.maybeDeferred(walker.enter_directory, parent, children)
|
|
|
|
# we process file-like children first, so we can drop their FileNode
|
|
|
|
# objects as quickly as possible. Tests suggest that a FileNode (held
|
|
|
|
# in the client's nodecache) consumes about 2440 bytes. dirnodes (not
|
|
|
|
# in the nodecache) seem to consume about 2000 bytes.
|
|
|
|
dirkids = []
|
|
|
|
filekids = []
|
2009-03-13 06:50:46 +00:00
|
|
|
for name, (child, metadata) in sorted(children.iteritems()):
|
2009-07-03 01:07:49 +00:00
|
|
|
childpath = path + [name]
|
|
|
|
if isinstance(child, UnknownNode):
|
|
|
|
walker.add_node(child, childpath)
|
|
|
|
continue
|
2008-12-08 19:44:11 +00:00
|
|
|
verifier = child.get_verify_cap()
|
2008-11-15 05:50:49 +00:00
|
|
|
# allow LIT files (for which verifier==None) to be processed
|
|
|
|
if (verifier is not None) and (verifier in found):
|
2008-09-10 08:45:04 +00:00
|
|
|
continue
|
|
|
|
found.add(verifier)
|
|
|
|
if IDirectoryNode.providedBy(child):
|
2009-01-09 02:41:16 +00:00
|
|
|
dirkids.append( (child, childpath) )
|
2008-09-10 08:45:04 +00:00
|
|
|
else:
|
2009-01-09 02:41:16 +00:00
|
|
|
filekids.append( (child, childpath) )
|
2009-03-13 23:31:35 +00:00
|
|
|
for i, (child, childpath) in enumerate(filekids):
|
2009-01-09 02:41:16 +00:00
|
|
|
d.addCallback(lambda ignored, child=child, childpath=childpath:
|
|
|
|
walker.add_node(child, childpath))
|
2009-03-13 23:31:35 +00:00
|
|
|
# to work around the Deferred tail-recursion problem
|
|
|
|
# (specifically the defer.succeed flavor) requires us to avoid
|
|
|
|
# doing more than 158 LIT files in a row. We insert a turn break
|
|
|
|
# once every 100 files (LIT or CHK) to preserve some stack space
|
|
|
|
# for other code. This is a different expression of the same
|
|
|
|
# Twisted problem as in #237.
|
|
|
|
if i % 100 == 99:
|
|
|
|
d.addCallback(lambda ignored: fireEventually())
|
2009-01-09 02:41:16 +00:00
|
|
|
for (child, childpath) in dirkids:
|
|
|
|
d.addCallback(lambda ignored, child=child, childpath=childpath:
|
|
|
|
self._deep_traverse_dirnode(child, childpath,
|
|
|
|
walker, monitor,
|
|
|
|
found))
|
|
|
|
return d
|
2008-05-08 20:21:14 +00:00
|
|
|
|
|
|
|
|
2008-09-10 08:45:04 +00:00
|
|
|
def build_manifest(self):
|
2008-10-22 00:03:07 +00:00
|
|
|
"""Return a Monitor, with a ['status'] that will be a list of (path,
|
|
|
|
cap) tuples, for all nodes (directories and files) reachable from
|
|
|
|
this one."""
|
|
|
|
walker = ManifestWalker(self)
|
|
|
|
return self.deep_traverse(walker)
|
2008-05-08 20:21:14 +00:00
|
|
|
|
2008-10-22 00:03:07 +00:00
|
|
|
def start_deep_stats(self):
|
2008-09-10 08:45:04 +00:00
|
|
|
# Since deep_traverse tracks verifier caps, we avoid double-counting
|
|
|
|
# children for which we've got both a write-cap and a read-cap
|
2008-10-22 00:03:07 +00:00
|
|
|
return self.deep_traverse(DeepStats(self))
|
2008-05-08 20:21:14 +00:00
|
|
|
|
2009-02-18 02:32:43 +00:00
|
|
|
def start_deep_check(self, verify=False, add_lease=False):
|
|
|
|
return self.deep_traverse(DeepChecker(self, verify, repair=False, add_lease=add_lease))
|
2008-07-17 01:20:57 +00:00
|
|
|
|
2009-02-18 02:32:43 +00:00
|
|
|
def start_deep_check_and_repair(self, verify=False, add_lease=False):
|
|
|
|
return self.deep_traverse(DeepChecker(self, verify, repair=True, add_lease=add_lease))
|
2008-09-07 19:44:56 +00:00
|
|
|
|
2008-07-17 01:20:57 +00:00
|
|
|
|
2008-07-17 21:25:04 +00:00
|
|
|
|
2008-05-08 20:33:07 +00:00
|
|
|
class DeepStats:
|
2008-10-22 00:03:07 +00:00
|
|
|
def __init__(self, origin):
|
|
|
|
self.origin = origin
|
2008-05-08 20:33:07 +00:00
|
|
|
self.stats = {}
|
|
|
|
for k in ["count-immutable-files",
|
|
|
|
"count-mutable-files",
|
|
|
|
"count-literal-files",
|
|
|
|
"count-files",
|
|
|
|
"count-directories",
|
2009-07-03 01:07:49 +00:00
|
|
|
"count-unknown",
|
2008-05-08 20:33:07 +00:00
|
|
|
"size-immutable-files",
|
|
|
|
#"size-mutable-files",
|
|
|
|
"size-literal-files",
|
|
|
|
"size-directories",
|
|
|
|
"largest-directory",
|
|
|
|
"largest-directory-children",
|
|
|
|
"largest-immutable-file",
|
|
|
|
#"largest-mutable-file",
|
|
|
|
]:
|
|
|
|
self.stats[k] = 0
|
2008-05-08 23:19:42 +00:00
|
|
|
self.histograms = {}
|
|
|
|
for k in ["size-files-histogram"]:
|
|
|
|
self.histograms[k] = {} # maps (min,max) to count
|
|
|
|
self.buckets = [ (0,0), (1,3)]
|
|
|
|
self.root = math.sqrt(10)
|
2008-05-08 20:33:07 +00:00
|
|
|
|
2008-10-22 00:03:07 +00:00
|
|
|
def set_monitor(self, monitor):
|
|
|
|
self.monitor = monitor
|
|
|
|
monitor.origin_si = self.origin.get_storage_index()
|
2008-11-19 23:00:27 +00:00
|
|
|
monitor.set_status(self.get_results())
|
2008-10-22 00:03:07 +00:00
|
|
|
|
2008-09-10 08:45:04 +00:00
|
|
|
def add_node(self, node, childpath):
|
2009-07-03 01:07:49 +00:00
|
|
|
if isinstance(node, UnknownNode):
|
|
|
|
self.add("count-unknown")
|
|
|
|
elif IDirectoryNode.providedBy(node):
|
2008-09-10 08:45:04 +00:00
|
|
|
self.add("count-directories")
|
|
|
|
elif IMutableFileNode.providedBy(node):
|
|
|
|
self.add("count-files")
|
|
|
|
self.add("count-mutable-files")
|
|
|
|
# TODO: update the servermap, compute a size, add it to
|
|
|
|
# size-mutable-files, max it into "largest-mutable-file"
|
2009-11-20 07:52:55 +00:00
|
|
|
elif IImmutableFileNode.providedBy(node): # CHK and LIT
|
2008-09-10 08:45:04 +00:00
|
|
|
self.add("count-files")
|
|
|
|
size = node.get_size()
|
|
|
|
self.histogram("size-files-histogram", size)
|
download: refactor handling of URI Extension Block and crypttext hash tree, simplify things
Refactor into a class the logic of asking each server in turn until one of them gives an answer
that validates. It is called ValidatedThingObtainer.
Refactor the downloading and verification of the URI Extension Block into a class named
ValidatedExtendedURIProxy.
The new logic of validating UEBs is minimalist: it doesn't require the UEB to contain any
unncessary information, but of course it still accepts such information for backwards
compatibility (so that this new download code is able to download files uploaded with old, and
for that matter with current, upload code).
The new logic of validating UEBs follows the practice of doing all validation up front. This
practice advises one to isolate the validation of incoming data into one place, so that all of
the rest of the code can assume only valid data.
If any redundant information is present in the UEB+URI, the new code cross-checks and asserts
that it is all fully consistent. This closes some issues where the uploader could have
uploaded inconsistent redundant data, which would probably have caused the old downloader to
simply reject that download after getting a Python exception, but perhaps could have caused
greater harm to the old downloader.
I removed the notion of selecting an erasure codec from codec.py based on the string that was
passed in the UEB. Currently "crs" is the only such string that works, so
"_assert(codec_name == 'crs')" is simpler and more explicit. This is also in keeping with the
"validate up front" strategy -- now if someone sets a different string than "crs" in their UEB,
the downloader will reject the download in the "validate this UEB" function instead of in a
separate "select the codec instance" function.
I removed the code to check plaintext hashes and plaintext Merkle Trees. Uploaders do not
produce this information any more (since it potentially exposes confidential information about
the file), and the unit tests for it were disabled. The downloader before this patch would
check that plaintext hash or plaintext merkle tree if they were present, but not complain if
they were absent. The new downloader in this patch complains if they are present and doesn't
check them. (We might in the future re-introduce such hashes over the plaintext, but encrypt
the hashes which are stored in the UEB to preserve confidentiality. This would be a double-
check on the correctness of our own source code -- the current Merkle Tree over the ciphertext
is already sufficient to guarantee the integrity of the download unless there is a bug in our
Merkle Tree or AES implementation.)
This patch increases the lines-of-code count by 8 (from 17,770 to 17,778), and reduces the
uncovered-by-tests lines-of-code count by 24 (from 1408 to 1384). Those numbers would be more
meaningful if we omitted src/allmydata/util/ from the test-coverage statistics.
2008-12-05 15:17:54 +00:00
|
|
|
theuri = from_string(node.get_uri())
|
|
|
|
if isinstance(theuri, LiteralFileURI):
|
2008-09-10 08:45:04 +00:00
|
|
|
self.add("count-literal-files")
|
|
|
|
self.add("size-literal-files", size)
|
|
|
|
else:
|
|
|
|
self.add("count-immutable-files")
|
|
|
|
self.add("size-immutable-files", size)
|
|
|
|
self.max("largest-immutable-file", size)
|
|
|
|
|
|
|
|
def enter_directory(self, parent, children):
|
|
|
|
dirsize_bytes = parent.get_size()
|
2009-11-18 19:16:24 +00:00
|
|
|
if dirsize_bytes is not None:
|
|
|
|
self.add("size-directories", dirsize_bytes)
|
|
|
|
self.max("largest-directory", dirsize_bytes)
|
2008-09-10 08:45:04 +00:00
|
|
|
dirsize_children = len(children)
|
|
|
|
self.max("largest-directory-children", dirsize_children)
|
|
|
|
|
2008-05-08 20:33:07 +00:00
|
|
|
def add(self, key, value=1):
|
|
|
|
self.stats[key] += value
|
|
|
|
|
|
|
|
def max(self, key, value):
|
|
|
|
self.stats[key] = max(self.stats[key], value)
|
|
|
|
|
2008-05-08 23:19:42 +00:00
|
|
|
def which_bucket(self, size):
|
|
|
|
# return (min,max) such that min <= size <= max
|
|
|
|
# values are from the set (0,0), (1,3), (4,10), (11,31), (32,100),
|
|
|
|
# (101,316), (317, 1000), etc: two per decade
|
|
|
|
assert size >= 0
|
|
|
|
i = 0
|
|
|
|
while True:
|
|
|
|
if i >= len(self.buckets):
|
|
|
|
# extend the list
|
|
|
|
new_lower = self.buckets[i-1][1]+1
|
|
|
|
new_upper = int(mathutil.next_power_of_k(new_lower, self.root))
|
|
|
|
self.buckets.append( (new_lower, new_upper) )
|
|
|
|
maybe = self.buckets[i]
|
|
|
|
if maybe[0] <= size <= maybe[1]:
|
|
|
|
return maybe
|
|
|
|
i += 1
|
|
|
|
|
|
|
|
def histogram(self, key, size):
|
|
|
|
bucket = self.which_bucket(size)
|
|
|
|
h = self.histograms[key]
|
|
|
|
if bucket not in h:
|
|
|
|
h[bucket] = 0
|
|
|
|
h[bucket] += 1
|
|
|
|
|
2008-05-08 20:33:07 +00:00
|
|
|
def get_results(self):
|
2008-05-08 23:19:42 +00:00
|
|
|
stats = self.stats.copy()
|
|
|
|
for key in self.histograms:
|
|
|
|
h = self.histograms[key]
|
|
|
|
out = [ (bucket[0], bucket[1], h[bucket]) for bucket in h ]
|
|
|
|
out.sort()
|
|
|
|
stats[key] = out
|
|
|
|
return stats
|
2008-05-08 20:21:14 +00:00
|
|
|
|
2008-09-10 08:45:04 +00:00
|
|
|
def finish(self):
|
|
|
|
return self.get_results()
|
|
|
|
|
2008-11-19 22:03:47 +00:00
|
|
|
class ManifestWalker(DeepStats):
|
|
|
|
def __init__(self, origin):
|
|
|
|
DeepStats.__init__(self, origin)
|
|
|
|
self.manifest = []
|
2008-11-19 23:00:27 +00:00
|
|
|
self.storage_index_strings = set()
|
2008-11-24 21:40:46 +00:00
|
|
|
self.verifycaps = set()
|
2008-11-19 22:03:47 +00:00
|
|
|
|
|
|
|
def add_node(self, node, path):
|
|
|
|
self.manifest.append( (tuple(path), node.get_uri()) )
|
2008-11-19 23:00:27 +00:00
|
|
|
si = node.get_storage_index()
|
|
|
|
if si:
|
|
|
|
self.storage_index_strings.add(base32.b2a(si))
|
2008-12-08 19:44:11 +00:00
|
|
|
v = node.get_verify_cap()
|
2008-11-24 21:40:46 +00:00
|
|
|
if v:
|
|
|
|
self.verifycaps.add(v.to_string())
|
2008-11-19 22:03:47 +00:00
|
|
|
return DeepStats.add_node(self, node, path)
|
|
|
|
|
2008-11-19 23:00:27 +00:00
|
|
|
def get_results(self):
|
|
|
|
stats = DeepStats.get_results(self)
|
2008-11-19 22:03:47 +00:00
|
|
|
return {"manifest": self.manifest,
|
2008-11-24 21:40:46 +00:00
|
|
|
"verifycaps": self.verifycaps,
|
2008-11-19 23:00:27 +00:00
|
|
|
"storage-index": self.storage_index_strings,
|
|
|
|
"stats": stats,
|
2008-11-19 22:03:47 +00:00
|
|
|
}
|
|
|
|
|
2008-09-10 08:45:04 +00:00
|
|
|
|
|
|
|
class DeepChecker:
|
2009-02-18 02:32:43 +00:00
|
|
|
def __init__(self, root, verify, repair, add_lease):
|
2008-09-10 08:45:04 +00:00
|
|
|
root_si = root.get_storage_index()
|
2010-02-27 06:55:51 +00:00
|
|
|
if root_si:
|
|
|
|
root_si_base32 = base32.b2a(root_si)
|
|
|
|
else:
|
|
|
|
root_si_base32 = ""
|
2008-09-10 08:45:04 +00:00
|
|
|
self._lp = log.msg(format="deep-check starting (%(si)s),"
|
|
|
|
" verify=%(verify)s, repair=%(repair)s",
|
2010-02-27 06:55:51 +00:00
|
|
|
si=root_si_base32, verify=verify, repair=repair)
|
2008-09-10 08:45:04 +00:00
|
|
|
self._verify = verify
|
|
|
|
self._repair = repair
|
2009-02-18 02:32:43 +00:00
|
|
|
self._add_lease = add_lease
|
2008-09-10 08:45:04 +00:00
|
|
|
if repair:
|
|
|
|
self._results = DeepCheckAndRepairResults(root_si)
|
|
|
|
else:
|
|
|
|
self._results = DeepCheckResults(root_si)
|
2008-10-22 00:03:07 +00:00
|
|
|
self._stats = DeepStats(root)
|
|
|
|
|
|
|
|
def set_monitor(self, monitor):
|
|
|
|
self.monitor = monitor
|
|
|
|
monitor.set_status(self._results)
|
2008-09-10 08:45:04 +00:00
|
|
|
|
|
|
|
def add_node(self, node, childpath):
|
|
|
|
if self._repair:
|
2009-02-18 02:32:43 +00:00
|
|
|
d = node.check_and_repair(self.monitor, self._verify, self._add_lease)
|
2008-09-10 08:45:04 +00:00
|
|
|
d.addCallback(self._results.add_check_and_repair, childpath)
|
|
|
|
else:
|
2009-02-18 02:32:43 +00:00
|
|
|
d = node.check(self.monitor, self._verify, self._add_lease)
|
2008-09-10 08:45:04 +00:00
|
|
|
d.addCallback(self._results.add_check, childpath)
|
|
|
|
d.addCallback(lambda ignored: self._stats.add_node(node, childpath))
|
|
|
|
return d
|
|
|
|
|
|
|
|
def enter_directory(self, parent, children):
|
|
|
|
return self._stats.enter_directory(parent, children)
|
|
|
|
|
|
|
|
def finish(self):
|
|
|
|
log.msg("deep-check done", parent=self._lp)
|
|
|
|
self._results.update_stats(self._stats.get_results())
|
|
|
|
return self._results
|
|
|
|
|
2008-05-08 20:21:14 +00:00
|
|
|
|
2007-11-02 06:46:47 +00:00
|
|
|
# use client.create_dirnode() to make one of these
|
|
|
|
|
|
|
|
|