storage: remove update_write_enabler method, it won't serve the desired purpose, and I have a better scheme in mind. See #489 for details

This commit is contained in:
Brian Warner 2008-07-21 17:28:28 -07:00
parent cbadcc86cc
commit afda2a43e4
3 changed files with 0 additions and 74 deletions

View File

@ -226,24 +226,6 @@ class RIStorageServer(RemoteInterface):
"""
return TupleOf(bool, DictOf(int, ReadData))
def update_write_enabler(storage_index=StorageIndex,
old_write_enabler=WriteEnablerSecret,
new_write_enabler=WriteEnablerSecret):
"""
Replace the write-enabler on a given bucket. This is used when a
share has been moved from one server to another, causing the secret
(which is scoped to a given server's nodeid) to become invalid. The
client discovers this when it gets a BadWriteEnablerError, and the
string body of the exception will contain a message that includes the
nodeid that was used for the old secret.
The client should compute the old write-enabler secret, and send it
in conjunction with the new one. The server will then update the
share to record the new write-enabler instead of the old one. The
client can then retry its writev call.
"""
return None
class IStorageBucketWriter(Interface):
def put_block(segmentnum=int, data=ShareData):
"""@param data: For most segments, this data will be 'blocksize'

View File

@ -700,16 +700,6 @@ class MutableShareFile:
(idlib.nodeid_b2a(write_enabler_nodeid),)
raise BadWriteEnablerError(msg)
def update_write_enabler(self, old_write_enabler, new_write_enabler,
my_nodeid, si_s):
self.check_write_enabler(old_write_enabler, si_s)
f = open(self.home, 'rb+')
f.seek(0)
header = struct.pack(">32s20s32s",
self.MAGIC, my_nodeid, new_write_enabler)
f.write(header)
f.close()
def check_testv(self, testv):
test_good = True
f = open(self.home, 'rb+')
@ -1199,15 +1189,6 @@ class StorageServer(service.MultiService, Referenceable):
self.add_latency("readv", time.time() - start)
return datavs
def remote_update_write_enabler(self, storage_index,
old_write_enabler, new_write_enabler):
si_s = si_b2a(storage_index)
for sf in self._iter_share_files(storage_index):
if not isinstance(sf, MutableShareFile):
continue
sf.update_write_enabler(old_write_enabler, new_write_enabler,
self.my_nodeid, si_s)
# the code before here runs on the storage server, not the client
# the code beyond here runs on the client, not the storage server

View File

@ -1077,43 +1077,6 @@ class MutableServer(unittest.TestCase):
self.failUnlessRaises(IndexError,
ss.remote_cancel_lease, "si2", "nonsecret")
def test_update_write_enabler(self):
ss = self.create("test_update_write_enabler", sizelimit=1000*1000)
secrets = ( self.write_enabler("we1"),
self.renew_secret("we1-0"),
self.cancel_secret("we1-0") )
old_write_enabler = secrets[0]
new_write_enabler = self.write_enabler("we2")
new_secrets = (new_write_enabler, secrets[1], secrets[2])
data = "".join([ ("%d" % i) * 10 for i in range(10) ])
write = ss.remote_slot_testv_and_readv_and_writev
read = ss.remote_slot_readv
update_write_enabler = ss.remote_update_write_enabler
rc = write("si1", secrets, {0: ([], [(0,data)], None)}, [])
self.failUnlessEqual(rc, (True, {}))
rc = write("si1", secrets, {0: ([], [(1,data)], None)}, [])
self.failUnlessEqual(rc[0], True)
f = self.failUnlessRaises(BadWriteEnablerError,
write, "si1", new_secrets,
{}, [])
self.failUnless("The write enabler was recorded by nodeid 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'." in f, f)
ss.setNodeID("\xff" * 20)
rc = update_write_enabler("si1", old_write_enabler, new_write_enabler)
self.failUnlessEqual(rc, None)
f = self.failUnlessRaises(BadWriteEnablerError,
write, "si1", secrets,
{}, [])
self.failUnless("The write enabler was recorded by nodeid '77777777777777777777777777777777'." in f, f)
rc = write("si1", new_secrets, {0: ([], [(2,data)], None)}, [])
self.failUnlessEqual(rc[0], True)
class Stats(unittest.TestCase):