#620: storage: allow mutable shares to be deleted, with a writev where new_length=0

This commit is contained in:
Brian Warner 2009-02-10 23:37:56 -07:00
parent 6a5f28f47d
commit 13a3ef5ec1
4 changed files with 74 additions and 18 deletions

View File

@ -207,7 +207,7 @@ class RIStorageServer(RemoteInterface):
can be used to pre-allocate space for a series of upcoming writes, or can be used to pre-allocate space for a series of upcoming writes, or
truncate existing data. If the container is growing, new_length will truncate existing data. If the container is growing, new_length will
be applied before datav. If the container is shrinking, it will be be applied before datav. If the container is shrinking, it will be
applied afterwards. applied afterwards. If new_length==0, the share will be deleted.
The read vector is used to extract data from all known shares, The read vector is used to extract data from all known shares,
*before* any writes have been applied. The same vector is used for *before* any writes have been applied. The same vector is used for

View File

@ -34,6 +34,7 @@ class RemoteServiceConnector:
"storage": { "http://allmydata.org/tahoe/protocols/storage/v1" : "storage": { "http://allmydata.org/tahoe/protocols/storage/v1" :
{ "maximum-immutable-share-size": 2**32, { "maximum-immutable-share-size": 2**32,
"tolerates-immutable-read-overrun": False, "tolerates-immutable-read-overrun": False,
"delete-mutable-shares-with-zero-length-writev": False,
}, },
"application-version": "unknown: no get_version()", "application-version": "unknown: no get_version()",
}, },

View File

@ -963,6 +963,7 @@ class StorageServer(service.MultiService, Referenceable):
version = { "http://allmydata.org/tahoe/protocols/storage/v1" : version = { "http://allmydata.org/tahoe/protocols/storage/v1" :
{ "maximum-immutable-share-size": remaining_space, { "maximum-immutable-share-size": remaining_space,
"tolerates-immutable-read-overrun": True, "tolerates-immutable-read-overrun": True,
"delete-mutable-shares-with-zero-length-writev": True,
}, },
"application-version": str(allmydata.__version__), "application-version": str(allmydata.__version__),
} }
@ -1218,27 +1219,37 @@ class StorageServer(service.MultiService, Referenceable):
for sharenum, share in shares.items(): for sharenum, share in shares.items():
read_data[sharenum] = share.readv(read_vector) read_data[sharenum] = share.readv(read_vector)
ownerid = 1 # TODO
expire_time = time.time() + 31*24*60*60 # one month
lease_info = LeaseInfo(ownerid,
renew_secret, cancel_secret,
expire_time, self.my_nodeid)
if testv_is_good: if testv_is_good:
# now apply the write vectors # now apply the write vectors
for sharenum in test_and_write_vectors: for sharenum in test_and_write_vectors:
(testv, datav, new_length) = test_and_write_vectors[sharenum] (testv, datav, new_length) = test_and_write_vectors[sharenum]
if sharenum not in shares: if new_length == 0:
# allocate a new share if sharenum in shares:
allocated_size = 2000 # arbitrary, really shares[sharenum].unlink()
share = self._allocate_slot_share(bucketdir, secrets, else:
sharenum, if sharenum not in shares:
allocated_size, # allocate a new share
owner_num=0) allocated_size = 2000 # arbitrary, really
shares[sharenum] = share share = self._allocate_slot_share(bucketdir, secrets,
shares[sharenum].writev(datav, new_length) sharenum,
# and update the leases on all shares allocated_size,
ownerid = 1 # TODO owner_num=0)
expire_time = time.time() + 31*24*60*60 # one month shares[sharenum] = share
lease_info = LeaseInfo(ownerid, shares[sharenum].writev(datav, new_length)
renew_secret, cancel_secret, # and update the lease
expire_time, self.my_nodeid) shares[sharenum].add_or_renew_lease(lease_info)
for share in shares.values():
share.add_or_renew_lease(lease_info) if new_length == 0:
# delete empty bucket directories
if not os.listdir(bucketdir):
os.rmdir(bucketdir)
# all done # all done
self.add_latency("writev", time.time() - start) self.add_latency("writev", time.time() - start)

View File

@ -1150,6 +1150,50 @@ class MutableServer(unittest.TestCase):
self.failUnlessRaises(IndexError, self.failUnlessRaises(IndexError,
ss.remote_cancel_lease, "si2", "nonsecret") ss.remote_cancel_lease, "si2", "nonsecret")
def test_remove(self):
ss = self.create("test_remove")
self.allocate(ss, "si1", "we1", self._lease_secret.next(),
set([0,1,2]), 100)
readv = ss.remote_slot_readv
writev = ss.remote_slot_testv_and_readv_and_writev
secrets = ( self.write_enabler("we1"),
self.renew_secret("we1"),
self.cancel_secret("we1") )
# delete sh0 by setting its size to zero
answer = writev("si1", secrets,
{0: ([], [], 0)},
[])
# the answer should mention all the shares that existed before the
# write
self.failUnlessEqual(answer, (True, {0:[],1:[],2:[]}) )
# but a new read should show only sh1 and sh2
self.failUnlessEqual(readv("si1", [], [(0,10)]),
{1: [""], 2: [""]})
# delete sh1 by setting its size to zero
answer = writev("si1", secrets,
{1: ([], [], 0)},
[])
self.failUnlessEqual(answer, (True, {1:[],2:[]}) )
self.failUnlessEqual(readv("si1", [], [(0,10)]),
{2: [""]})
# delete sh2 by setting its size to zero
answer = writev("si1", secrets,
{2: ([], [], 0)},
[])
self.failUnlessEqual(answer, (True, {2:[]}) )
self.failUnlessEqual(readv("si1", [], [(0,10)]),
{})
# and the bucket directory should now be gone
si = base32.b2a("si1")
# note: this is a detail of the storage server implementation, and
# may change in the future
prefix = si[:2]
prefixdir = os.path.join(self.workdir("test_remove"), "shares", prefix)
bucketdir = os.path.join(prefixdir, si)
self.failUnless(os.path.exists(prefixdir))
self.failIf(os.path.exists(bucketdir))
class Stats(unittest.TestCase): class Stats(unittest.TestCase):