mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-19 13:07:56 +00:00
mutable/publish.py: raise FileTooLargeError instead of an ugly assertion when the SDMF restrictions are exceeded
This commit is contained in:
parent
5289064dcf
commit
f4496bd553
@ -5,7 +5,7 @@ from itertools import count
|
||||
from zope.interface import implements
|
||||
from twisted.internet import defer
|
||||
from twisted.python import failure
|
||||
from allmydata.interfaces import IPublishStatus
|
||||
from allmydata.interfaces import IPublishStatus, FileTooLargeError
|
||||
from allmydata.util import base32, hashutil, mathutil, idlib, log
|
||||
from allmydata import hashtree, codec, storage
|
||||
from pycryptopp.cipher.aes import AES
|
||||
@ -136,6 +136,10 @@ class Publish:
|
||||
# 5: when enough responses are back, we're done
|
||||
|
||||
self.log("starting publish, datalen is %s" % len(newdata))
|
||||
if len(newdata) > self.MAX_SEGMENT_SIZE:
|
||||
raise FileTooLargeError("SDMF is limited to one segment, and "
|
||||
"%d > %d" % (len(newdata),
|
||||
self.MAX_SEGMENT_SIZE))
|
||||
self._status.set_size(len(newdata))
|
||||
self._status.set_status("Started")
|
||||
self._started = time.time()
|
||||
|
@ -10,7 +10,8 @@ from allmydata.util.idlib import shortnodeid_b2a
|
||||
from allmydata.util.hashutil import tagged_hash
|
||||
from allmydata.util.fileutil import make_dirs
|
||||
from allmydata.encode import NotEnoughSharesError
|
||||
from allmydata.interfaces import IURI, IMutableFileURI, IUploadable
|
||||
from allmydata.interfaces import IURI, IMutableFileURI, IUploadable, \
|
||||
FileTooLargeError
|
||||
from foolscap.eventual import eventually, fireEventually
|
||||
from foolscap.logging import log
|
||||
import sha
|
||||
@ -339,6 +340,21 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
def test_create_with_too_large_contents(self):
|
||||
BIG = "a" * (Publish.MAX_SEGMENT_SIZE+1)
|
||||
d = self.shouldFail(FileTooLargeError, "too_large",
|
||||
"SDMF is limited to one segment, and %d > %d" %
|
||||
(len(BIG), Publish.MAX_SEGMENT_SIZE),
|
||||
self.client.create_mutable_file, BIG)
|
||||
d.addCallback(lambda res: self.client.create_mutable_file("small"))
|
||||
def _created(n):
|
||||
return self.shouldFail(FileTooLargeError, "too_large_2",
|
||||
"SDMF is limited to one segment, and %d > %d" %
|
||||
(len(BIG), Publish.MAX_SEGMENT_SIZE),
|
||||
n.overwrite, BIG)
|
||||
d.addCallback(_created)
|
||||
return d
|
||||
|
||||
def failUnlessCurrentSeqnumIs(self, n, expected_seqnum):
|
||||
d = n.get_servermap(MODE_READ)
|
||||
d.addCallback(lambda servermap: servermap.best_recoverable_version())
|
||||
@ -355,6 +371,8 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
|
||||
return None
|
||||
def _error_modifier(old_contents):
|
||||
raise ValueError("oops")
|
||||
def _toobig_modifier(old_contents):
|
||||
return "b" * (Publish.MAX_SEGMENT_SIZE+1)
|
||||
calls = []
|
||||
def _ucw_error_modifier(old_contents):
|
||||
# simulate an UncoordinatedWriteError once
|
||||
@ -387,6 +405,14 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2))
|
||||
|
||||
d.addCallback(lambda res:
|
||||
self.shouldFail(FileTooLargeError, "toobig_modifier",
|
||||
"SDMF is limited to one segment",
|
||||
n.modify, _toobig_modifier))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
|
||||
d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2))
|
||||
|
||||
d.addCallback(lambda res: n.modify(_ucw_error_modifier))
|
||||
d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
|
||||
d.addCallback(lambda res: n.download_best_version())
|
||||
|
Loading…
Reference in New Issue
Block a user