2009-01-12 18:00:22 +00:00
|
|
|
from zope.interface import implements
|
2009-01-06 01:28:18 +00:00
|
|
|
from twisted.internet import defer
|
2009-02-18 21:46:55 +00:00
|
|
|
from allmydata.storage.server import si_b2a
|
2010-08-04 07:26:39 +00:00
|
|
|
from allmydata.util import log, consumer
|
|
|
|
from allmydata.util.assertutil import precondition
|
|
|
|
from allmydata.interfaces import IEncryptedUploadable
|
2009-01-06 01:28:18 +00:00
|
|
|
|
2010-08-04 07:26:39 +00:00
|
|
|
from allmydata.immutable import upload
|
2009-01-06 01:28:18 +00:00
|
|
|
|
2009-01-12 18:00:22 +00:00
|
|
|
class Repairer(log.PrefixingLogMixin):
|
2010-08-04 07:26:39 +00:00
|
|
|
implements(IEncryptedUploadable)
|
2009-07-01 00:00:47 +00:00
|
|
|
"""I generate any shares which were not available and upload them to
|
|
|
|
servers.
|
|
|
|
|
|
|
|
Which servers? Well, I just use the normal upload process, so any servers
|
|
|
|
that will take shares. In fact, I even believe servers if they say that
|
|
|
|
they already have shares even if attempts to download those shares would
|
|
|
|
fail because the shares are corrupted.
|
|
|
|
|
|
|
|
My process of uploading replacement shares proceeds in a segment-wise
|
|
|
|
fashion -- first I ask servers if they can hold the new shares, and wait
|
|
|
|
until enough have agreed then I download the first segment of the file
|
|
|
|
and upload the first block of each replacement share, and only after all
|
|
|
|
those blocks have been uploaded do I download the second segment of the
|
|
|
|
file and upload the second block of each replacement share to its
|
|
|
|
respective server. (I do it this way in order to minimize the amount of
|
|
|
|
downloading I have to do and the amount of memory I have to use at any
|
|
|
|
one time.)
|
|
|
|
|
|
|
|
If any of the servers to which I am uploading replacement shares fails to
|
|
|
|
accept the blocks during this process, then I just stop using that
|
|
|
|
server, abandon any share-uploads that were going to that server, and
|
|
|
|
proceed to finish uploading the remaining shares to their respective
|
|
|
|
servers. At the end of my work, I produce an object which satisfies the
|
|
|
|
ICheckAndRepairResults interface (by firing the deferred that I returned
|
|
|
|
from start() and passing that check-and-repair-results object).
|
|
|
|
|
|
|
|
Before I send any new request to a server, I always ask the 'monitor'
|
|
|
|
object that was passed into my constructor whether this task has been
|
|
|
|
cancelled (by invoking its raise_if_cancelled() method).
|
2009-01-06 01:28:18 +00:00
|
|
|
"""
|
2009-07-01 00:00:47 +00:00
|
|
|
|
2010-08-04 07:26:39 +00:00
|
|
|
def __init__(self, filenode, storage_broker, secret_holder, monitor):
|
|
|
|
logprefix = si_b2a(filenode.get_storage_index())[:5]
|
2009-07-01 00:00:47 +00:00
|
|
|
log.PrefixingLogMixin.__init__(self, "allmydata.immutable.repairer",
|
|
|
|
prefix=logprefix)
|
2010-08-04 07:26:39 +00:00
|
|
|
self._filenode = filenode
|
Overhaul IFilesystemNode handling, to simplify tests and use POLA internally.
* stop using IURI as an adapter
* pass cap strings around instead of URI instances
* move filenode/dirnode creation duties from Client to new NodeMaker class
* move other Client duties to KeyGenerator, SecretHolder, History classes
* stop passing Client reference to dirnode/filenode constructors
- pass less-powerful references instead, like StorageBroker or Uploader
* always create DirectoryNodes by wrapping a filenode (mutable for now)
* remove some specialized mock classes from unit tests
Detailed list of changes (done one at a time, then merged together)
always pass a string to create_node_from_uri(), not an IURI instance
always pass a string to IFilesystemNode constructors, not an IURI instance
stop using IURI() as an adapter, switch on cap prefix in create_node_from_uri()
client.py: move SecretHolder code out to a separate class
test_web.py: hush pyflakes
client.py: move NodeMaker functionality out into a separate object
LiteralFileNode: stop storing a Client reference
immutable Checker: remove Client reference, it only needs a SecretHolder
immutable Upload: remove Client reference, leave SecretHolder and StorageBroker
immutable Repairer: replace Client reference with StorageBroker and SecretHolder
immutable FileNode: remove Client reference
mutable.Publish: stop passing Client
mutable.ServermapUpdater: get StorageBroker in constructor, not by peeking into Client reference
MutableChecker: reference StorageBroker and History directly, not through Client
mutable.FileNode: removed unused indirection to checker classes
mutable.FileNode: remove Client reference
client.py: move RSA key generation into a separate class, so it can be passed to the nodemaker
move create_mutable_file() into NodeMaker
test_dirnode.py: stop using FakeClient mockups, use NoNetworkGrid instead. This simplifies the code, but takes longer to run (17s instead of 6s). This should come down later when other cleanups make it possible to use simpler (non-RSA) fake mutable files for dirnode tests.
test_mutable.py: clean up basedir names
client.py: move create_empty_dirnode() into NodeMaker
dirnode.py: get rid of DirectoryNode.create
remove DirectoryNode.init_from_uri, refactor NodeMaker for customization, simplify test_web's mock Client to match
stop passing Client to DirectoryNode, make DirectoryNode.create_with_mutablefile the normal DirectoryNode constructor, start removing client from NodeMaker
remove Client from NodeMaker
move helper status into History, pass History to web.Status instead of Client
test_mutable.py: fix minor typo
2009-08-15 11:02:56 +00:00
|
|
|
self._storage_broker = storage_broker
|
|
|
|
self._secret_holder = secret_holder
|
2009-01-06 01:28:18 +00:00
|
|
|
self._monitor = monitor
|
2010-08-04 07:26:39 +00:00
|
|
|
self._offset = 0
|
2009-01-06 01:28:18 +00:00
|
|
|
|
|
|
|
def start(self):
|
2009-01-12 18:00:22 +00:00
|
|
|
self.log("starting repair")
|
2010-08-04 07:26:39 +00:00
|
|
|
d = self._filenode.get_segment_size()
|
|
|
|
def _got_segsize(segsize):
|
|
|
|
vcap = self._filenode.get_verify_cap()
|
|
|
|
k = vcap.needed_shares
|
|
|
|
N = vcap.total_shares
|
2010-09-27 20:01:02 +00:00
|
|
|
# Per ticket #1212
|
|
|
|
# (http://tahoe-lafs.org/trac/tahoe-lafs/ticket/1212)
|
|
|
|
happy = 0
|
2010-08-04 07:26:39 +00:00
|
|
|
self._encodingparams = (k, happy, N, segsize)
|
|
|
|
ul = upload.CHKUploader(self._storage_broker, self._secret_holder)
|
|
|
|
return ul.start(self) # I am the IEncryptedUploadable
|
|
|
|
d.addCallback(_got_segsize)
|
2009-01-06 01:28:18 +00:00
|
|
|
return d
|
|
|
|
|
2009-01-12 18:00:22 +00:00
|
|
|
|
|
|
|
# methods to satisfy the IEncryptedUploader interface
|
|
|
|
# (From the perspective of an uploader I am an IEncryptedUploadable.)
|
|
|
|
def set_upload_status(self, upload_status):
|
|
|
|
self.upload_status = upload_status
|
|
|
|
def get_size(self):
|
2010-08-04 07:26:39 +00:00
|
|
|
size = self._filenode.get_size()
|
|
|
|
assert size is not None
|
|
|
|
return defer.succeed(size)
|
2009-01-12 18:00:22 +00:00
|
|
|
def get_all_encoding_parameters(self):
|
2010-08-04 07:26:39 +00:00
|
|
|
return defer.succeed(self._encodingparams)
|
2009-01-12 18:00:22 +00:00
|
|
|
def read_encrypted(self, length, hash_only):
|
2010-08-04 07:26:39 +00:00
|
|
|
"""Returns a deferred which eventually fires with the requested
|
|
|
|
ciphertext, as a list of strings."""
|
2009-02-10 07:56:47 +00:00
|
|
|
precondition(length) # please don't ask to read 0 bytes
|
2010-08-04 07:26:39 +00:00
|
|
|
mc = consumer.MemoryConsumer()
|
|
|
|
d = self._filenode.read(mc, self._offset, length)
|
|
|
|
self._offset += length
|
|
|
|
d.addCallback(lambda ign: mc.chunks)
|
2009-01-12 18:00:22 +00:00
|
|
|
return d
|
|
|
|
def get_storage_index(self):
|
2010-08-04 07:26:39 +00:00
|
|
|
return self._filenode.get_storage_index()
|
|
|
|
def close(self):
|
|
|
|
pass
|