Use IStorageServer instead of RemoteReference in a lot of places

This commit is contained in:
Jean-Paul Calderone 2019-05-31 13:41:07 -04:00
parent 46a2065357
commit 72cf590320
13 changed files with 120 additions and 108 deletions

View File

@ -123,9 +123,9 @@ class ControlServer(Referenceable, service.Service):
return results
server = everyone_left.pop(0)
server_name = server.get_longname()
connection = server.get_rref()
storage_server = server.get_storage_server()
start = time.time()
d = connection.callRemote("get_buckets", "\x00"*16)
d = storage_server.get_buckets("\x00" * 16)
def _done(ignored):
stop = time.time()
elapsed = stop - start

View File

@ -496,16 +496,19 @@ class Checker(log.PrefixingLogMixin):
that we want to track and report whether or not each server
responded.)"""
rref = s.get_rref()
storage_server = s.get_storage_server()
lease_seed = s.get_lease_seed()
if self._add_lease:
renew_secret = self._get_renewal_secret(lease_seed)
cancel_secret = self._get_cancel_secret(lease_seed)
d2 = rref.callRemote("add_lease", storageindex,
renew_secret, cancel_secret)
d2 = storage_server.add_lease(
storageindex,
renew_secret,
cancel_secret,
)
d2.addErrback(self._add_lease_failed, s.get_name(), storageindex)
d = rref.callRemote("get_buckets", storageindex)
d = storage_server.get_buckets(storageindex)
def _wrap_results(res):
return (res, True)

View File

@ -139,7 +139,7 @@ class ShareFinder(object):
# TODO: get the timer from a Server object, it knows best
self.overdue_timers[req] = reactor.callLater(self.OVERDUE_TIMEOUT,
self.overdue, req)
d = server.get_rref().callRemote("get_buckets", self._storage_index)
d = server.get_storage_server().get_buckets(self._storage_index)
d.addBoth(incidentally, self._request_retired, req)
d.addCallbacks(self._got_response, self._got_error,
callbackArgs=(server, req, d_ev, time_sent, lp),
@ -221,5 +221,3 @@ class ShareFinder(object):
self.log(format="got error from [%(name)s]",
name=server.get_name(), failure=f,
level=log.UNUSUAL, parent=lp, umid="zUKdCw")

View File

@ -54,7 +54,7 @@ class CHKCheckerAndUEBFetcher(object):
def _get_all_shareholders(self, storage_index):
dl = []
for s in self._peer_getter(storage_index):
d = s.get_rref().callRemote("get_buckets", storage_index)
d = s.get_storage_server().get_buckets(storage_index)
d.addCallbacks(self._got_response, self._got_error,
callbackArgs=(s,))
dl.append(d)

View File

@ -261,20 +261,21 @@ class ServerTracker(object):
return self._server.get_name()
def query(self, sharenums):
rref = self._server.get_rref()
d = rref.callRemote("allocate_buckets",
self.storage_index,
self.renew_secret,
self.cancel_secret,
sharenums,
self.allocated_size,
canary=Referenceable())
storage_server = self._server.get_storage_server()
d = storage_server.allocate_buckets(
self.storage_index,
self.renew_secret,
self.cancel_secret,
sharenums,
self.allocated_size,
canary=Referenceable(),
)
d.addCallback(self._buckets_allocated)
return d
def ask_about_existing_shares(self):
rref = self._server.get_rref()
return rref.callRemote("get_buckets", self.storage_index)
storage_server = self._server.get_storage_server()
return storage_server.get_buckets(self.storage_index)
def _buckets_allocated(self, alreadygot_and_buckets):
#log.msg("%s._got_reply(%s)" % (self, (alreadygot, buckets)))
@ -415,7 +416,7 @@ class Tahoe2ServerSelector(log.PrefixingLogMixin):
# field) from getting large shares (for files larger than about
# 12GiB). See #439 for details.
def _get_maxsize(server):
v0 = server.get_rref().version
v0 = server.get_version()
v1 = v0["http://allmydata.org/tahoe/protocols/storage/v1"]
return v1["maximum-immutable-share-size"]

View File

@ -230,7 +230,7 @@ class SDMFSlotWriteProxy(object):
"""
def __init__(self,
shnum,
rref, # a remote reference to a storage server
storage_server, # an IStorageServer
storage_index,
secrets, # (write_enabler, renew_secret, cancel_secret)
seqnum, # the sequence number of the mutable file
@ -239,7 +239,7 @@ class SDMFSlotWriteProxy(object):
segment_size,
data_length): # the length of the original file
self.shnum = shnum
self._rref = rref
self._storage_server = storage_server
self._storage_index = storage_index
self._secrets = secrets
self._seqnum = seqnum
@ -541,12 +541,13 @@ class SDMFSlotWriteProxy(object):
tw_vectors = {}
tw_vectors[self.shnum] = (self._testvs, datavs, None)
return self._rref.callRemote("slot_testv_and_readv_and_writev",
self._storage_index,
self._secrets,
tw_vectors,
# TODO is it useful to read something?
self._readvs)
return self._storage_server.slot_testv_and_readv_and_writev(
self._storage_index,
self._secrets,
tw_vectors,
# TODO is it useful to read something?
self._readvs,
)
MDMFHEADER = ">BQ32sBBQQ QQQQQQQQ"
@ -729,7 +730,7 @@ class MDMFSlotWriteProxy(object):
# disruption.
def __init__(self,
shnum,
rref, # a remote reference to a storage server
storage_server, # a remote reference to a storage server
storage_index,
secrets, # (write_enabler, renew_secret, cancel_secret)
seqnum, # the sequence number of the mutable file
@ -738,7 +739,7 @@ class MDMFSlotWriteProxy(object):
segment_size,
data_length): # the length of the original file
self.shnum = shnum
self._rref = rref
self._storage_server = storage_server
self._storage_index = storage_index
self._seqnum = seqnum
self._required_shares = required_shares
@ -1159,11 +1160,12 @@ class MDMFSlotWriteProxy(object):
self._testvs = [(0, len(new_checkstring), "eq", new_checkstring)]
on_success = _first_write
tw_vectors[self.shnum] = (self._testvs, datavs, None)
d = self._rref.callRemote("slot_testv_and_readv_and_writev",
self._storage_index,
self._secrets,
tw_vectors,
self._readv)
d = self._storage_server.slot_testv_and_readv_and_writev(
self._storage_index,
self._secrets,
tw_vectors,
self._readv,
)
def _result(results):
if isinstance(results, failure.Failure) or not results[0]:
# Do nothing; the write was unsuccessful.
@ -1189,13 +1191,13 @@ class MDMFSlotReadProxy(object):
it is valid) to eliminate some of the need to fetch it from servers.
"""
def __init__(self,
rref,
storage_server,
storage_index,
shnum,
data="",
data_is_everything=False):
# Start the initialization process.
self._rref = rref
self._storage_server = storage_server
self._storage_index = storage_index
self.shnum = shnum
@ -1752,10 +1754,11 @@ class MDMFSlotReadProxy(object):
results = {self.shnum: results}
return defer.succeed(results)
else:
return self._rref.callRemote("slot_readv",
self._storage_index,
[self.shnum],
readvs)
return self._storage_server.slot_readv(
self._storage_index,
[self.shnum],
readvs,
)
def is_sdmf(self):

View File

@ -269,7 +269,7 @@ class Publish(object):
secrets = (write_enabler, renew_secret, cancel_secret)
writer = writer_class(shnum,
server.get_rref(),
server.get_storage_server(),
self._storage_index,
secrets,
self._new_seqnum,
@ -471,7 +471,7 @@ class Publish(object):
secrets = (write_enabler, renew_secret, cancel_secret)
writer = writer_class(shnum,
server.get_rref(),
server.get_storage_server(),
self._storage_index,
secrets,
self._new_seqnum,

View File

@ -309,7 +309,7 @@ class Retrieve(object):
if key in self.servermap.proxies:
reader = self.servermap.proxies[key]
else:
reader = MDMFSlotReadProxy(server.get_rref(),
reader = MDMFSlotReadProxy(server.get_storage_server(),
self._storage_index, shnum, None)
reader.server = server
self.readers[shnum] = reader
@ -906,9 +906,13 @@ class Retrieve(object):
def notify_server_corruption(self, server, shnum, reason):
rref = server.get_rref()
rref.callRemoteOnly("advise_corrupt_share",
"mutable", self._storage_index, shnum, reason)
storage_server = server.get_storage_server()
storage_server.advise_corrupt_share(
"mutable",
self._storage_index,
shnum,
reason,
)
def _try_to_validate_privkey(self, enc_privkey, reader, server):

View File

@ -592,7 +592,7 @@ class ServermapUpdater(object):
return d
def _do_read(self, server, storage_index, shnums, readv):
ss = server.get_rref()
ss = server.get_storage_server()
if self._add_lease:
# send an add-lease message in parallel. The results are handled
# separately. This is sent before the slot_readv() so that we can
@ -601,11 +601,14 @@ class ServermapUpdater(object):
# add_lease is synchronous).
renew_secret = self._node.get_renewal_secret(server)
cancel_secret = self._node.get_cancel_secret(server)
d2 = ss.callRemote("add_lease", storage_index,
renew_secret, cancel_secret)
d2 = ss.add_lease(
storage_index,
renew_secret,
cancel_secret,
)
# we ignore success
d2.addErrback(self._add_lease_failed, server, storage_index)
d = ss.callRemote("slot_readv", storage_index, shnums, readv)
d = ss.slot_readv(storage_index, shnums, readv)
return d
@ -638,7 +641,7 @@ class ServermapUpdater(object):
lp = self.log(format="got result from [%(name)s], %(numshares)d shares",
name=server.get_name(),
numshares=len(datavs))
ss = server.get_rref()
ss = server.get_storage_server()
now = time.time()
elapsed = now - started
def _done_processing(ignored=None):
@ -796,9 +799,13 @@ class ServermapUpdater(object):
def notify_server_corruption(self, server, shnum, reason):
rref = server.get_rref()
rref.callRemoteOnly("advise_corrupt_share",
"mutable", self._storage_index, shnum, reason)
ss = server.get_storage_server()
ss.advise_corrupt_share(
"mutable",
self._storage_index,
shnum,
reason,
)
def _got_signature_one_share(self, results, shnum, server, lp):
@ -1220,5 +1227,3 @@ class ServermapUpdater(object):
def _fatal_error(self, f):
self.log("fatal error", failure=f, level=log.WEIRD, umid="1cNvlw")
self._done_deferred.errback(f)

View File

@ -32,6 +32,9 @@ from allmydata.util import fileutil, idlib, hashutil
from allmydata.util.hashutil import permute_server_hash
from allmydata.util.fileutil import abspath_expanduser_unicode
from allmydata.interfaces import IStorageBroker, IServer
from allmydata.storage_client import (
_StorageServer,
)
from .common import (
TEST_RSA_KEY_SIZE,
SameProcessStreamEndpointAssigner,
@ -166,6 +169,10 @@ class NoNetworkServer(object):
return "nickname"
def get_rref(self):
return self.rref
def get_storage_server(self):
if self.rref is None:
return None
return _StorageServer(lambda: self.rref)
def get_version(self):
return self.rref.version

View File

@ -601,8 +601,8 @@ class DownloadTest(_Base, unittest.TestCase):
# that they're old and can't handle reads that overrun the length of
# the share. This exercises a different code path.
for s in self.c0.storage_broker.get_connected_servers():
rref = s.get_rref()
v1 = rref.version["http://allmydata.org/tahoe/protocols/storage/v1"]
v = s.get_version()
v1 = v["http://allmydata.org/tahoe/protocols/storage/v1"]
v1["tolerates-immutable-read-overrun"] = False
n = self.c0.create_node_from_uri(immutable_uri)
@ -1178,8 +1178,8 @@ class DownloadV2(_Base, unittest.TestCase):
# that they're old and can't handle reads that overrun the length of
# the share. This exercises a different code path.
for s in self.c0.storage_broker.get_connected_servers():
rref = s.get_rref()
v1 = rref.version["http://allmydata.org/tahoe/protocols/storage/v1"]
v = s.get_version()
v1 = v["http://allmydata.org/tahoe/protocols/storage/v1"]
v1["tolerates-immutable-read-overrun"] = False
# upload a file
@ -1198,8 +1198,8 @@ class DownloadV2(_Base, unittest.TestCase):
self.c0 = self.g.clients[0]
for s in self.c0.storage_broker.get_connected_servers():
rref = s.get_rref()
v1 = rref.version["http://allmydata.org/tahoe/protocols/storage/v1"]
v = s.get_version()
v1 = v["http://allmydata.org/tahoe/protocols/storage/v1"]
v1["tolerates-immutable-read-overrun"] = False
# upload a file

View File

@ -16,6 +16,9 @@ from allmydata.interfaces import NotEnoughSharesError
from allmydata.immutable.upload import Data
from allmydata.immutable.downloader import finder
from .no_network import (
NoNetworkServer,
)
class MockShareHashTree(object):
def needed_hashes(self):
@ -106,19 +109,6 @@ class TestShareFinder(unittest.TestCase):
eventually(_give_buckets_and_hunger_again)
return d
class MockIServer(object):
def __init__(self, serverid, rref):
self.serverid = serverid
self.rref = rref
def get_serverid(self):
return self.serverid
def get_rref(self):
return self.rref
def get_name(self):
return "name-%s" % self.serverid
def get_version(self):
return self.rref.version
class MockStorageBroker(object):
def __init__(self, servers):
self.servers = servers
@ -136,9 +126,9 @@ class TestShareFinder(unittest.TestCase):
mockserver1 = MockServer({1: MockBuckets(), 2: MockBuckets()})
mockserver2 = MockServer({})
mockserver3 = MockServer({3: MockBuckets()})
servers = [ MockIServer("ms1", mockserver1),
MockIServer("ms2", mockserver2),
MockIServer("ms3", mockserver3), ]
servers = [ NoNetworkServer("ms1", mockserver1),
NoNetworkServer("ms2", mockserver2),
NoNetworkServer("ms3", mockserver3), ]
mockstoragebroker = MockStorageBroker(servers)
mockdownloadstatus = MockDownloadStatus()
mocknode = MockNode(check_reneging=True, check_fetch_failed=True)

View File

@ -32,6 +32,9 @@ from allmydata.test.common import LoggingServiceParent, ShouldFailMixin
from allmydata.test.common_web import WebRenderingMixin
from allmydata.test.no_network import NoNetworkServer
from allmydata.web.storage import StorageStatus, remove_prefix
from allmydata.storage_client import (
_StorageServer,
)
class Marker(object):
pass
@ -162,7 +165,8 @@ class Bucket(unittest.TestCase):
class RemoteBucket(object):
def __init__(self):
def __init__(self, target):
self.target = target
self.read_count = 0
self.write_count = 0
@ -188,8 +192,7 @@ class BucketProxy(unittest.TestCase):
fileutil.make_dirs(os.path.join(basedir, "tmp"))
bw = BucketWriter(self, incoming, final, size, self.make_lease(),
FakeCanary())
rb = RemoteBucket()
rb.target = bw
rb = RemoteBucket(bw)
return bw, rb, final
def make_lease(self):
@ -261,8 +264,7 @@ class BucketProxy(unittest.TestCase):
# now read everything back
def _start_reading(res):
br = BucketReader(self, sharefname)
rb = RemoteBucket()
rb.target = br
rb = RemoteBucket(br)
server = NoNetworkServer("abc", None)
rbp = rbp_class(rb, server, storage_index="")
self.failUnlessIn("to peer", repr(rbp))
@ -1374,8 +1376,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
self.sparent = LoggingServiceParent()
self._lease_secret = itertools.count()
self.ss = self.create("MDMFProxies storage test server")
self.rref = RemoteBucket()
self.rref.target = self.ss
self.rref = RemoteBucket(self.ss)
self.storage_server = _StorageServer(lambda: self.rref)
self.secrets = (self.write_enabler("we_secret"),
self.renew_secret("renew_secret"),
self.cancel_secret("cancel_secret"))
@ -1605,7 +1607,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
empty=False):
# Some tests need SDMF shares to verify that we can still
# read them. This method writes one, which resembles but is not
assert self.rref
write = self.ss.remote_slot_testv_and_readv_and_writev
share = self.build_test_sdmf_share(empty)
testvs = [(0, 1, "eq", "")]
@ -1618,7 +1619,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
def test_read(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
# Check that every method equals what we expect it to.
d = defer.succeed(None)
def _check_block_and_salt(block_and_salt):
@ -1690,7 +1691,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
def test_read_with_different_tail_segment_size(self):
self.write_test_share_to_server("si1", tail_segment=True)
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
d = mr.get_block_and_salt(5)
def _check_tail_segment(results):
block, salt = results
@ -1702,7 +1703,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
def test_get_block_with_invalid_segnum(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
d = defer.succeed(None)
d.addCallback(lambda ignored:
self.shouldFail(LayoutInvalid, "test invalid segnum",
@ -1713,7 +1714,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
def test_get_encoding_parameters_first(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
d = mr.get_encoding_parameters()
def _check_encoding_parameters(args):
(k, n, segment_size, datalen) = args
@ -1727,7 +1728,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
def test_get_seqnum_first(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
d = mr.get_seqnum()
d.addCallback(lambda seqnum:
self.failUnlessEqual(seqnum, 0))
@ -1736,7 +1737,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
def test_get_root_hash_first(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
d = mr.get_root_hash()
d.addCallback(lambda root_hash:
self.failUnlessEqual(root_hash, self.root_hash))
@ -1745,7 +1746,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
def test_get_checkstring_first(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
d = mr.get_checkstring()
d.addCallback(lambda checkstring:
self.failUnlessEqual(checkstring, self.checkstring))
@ -2060,7 +2061,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
# size of 6, we know that it has 6 byte segments, which will
# be split into blocks of 2 bytes because our FEC k
# parameter is 3.
mw = MDMFSlotWriteProxy(share, self.rref, si, self.secrets, 0, 3, 10,
mw = MDMFSlotWriteProxy(share, self.storage_server, si, self.secrets, 0, 3, 10,
6, datalength)
return mw
@ -2263,7 +2264,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
d.addCallback(lambda ignored:
mw.finish_publishing())
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
def _check_block_and_salt(block_and_salt):
(block, salt) = block_and_salt
self.failUnlessEqual(block, self.block)
@ -2331,7 +2332,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
# since it will encounter them on the grid. Callers use the
# is_sdmf method to test this.
self.write_sdmf_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
d = mr.is_sdmf()
d.addCallback(lambda issdmf:
self.failUnless(issdmf))
@ -2342,7 +2343,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
# The slot read proxy should, naturally, know how to tell us
# about data in the SDMF format
self.write_sdmf_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
d = defer.succeed(None)
d.addCallback(lambda ignored:
mr.is_sdmf())
@ -2413,7 +2414,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
# read more segments than that. The reader should know this and
# complain if we try to do that.
self.write_sdmf_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
d = defer.succeed(None)
d.addCallback(lambda ignored:
mr.is_sdmf())
@ -2435,7 +2436,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
mdmf_data = self.build_test_mdmf_share()
self.write_test_share_to_server("si1")
def _make_mr(ignored, length):
mr = MDMFSlotReadProxy(self.rref, "si1", 0, mdmf_data[:length])
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0, mdmf_data[:length])
return mr
d = defer.succeed(None)
@ -2496,7 +2497,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
sdmf_data = self.build_test_sdmf_share()
self.write_sdmf_share_to_server("si1")
def _make_mr(ignored, length):
mr = MDMFSlotReadProxy(self.rref, "si1", 0, sdmf_data[:length])
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0, sdmf_data[:length])
return mr
d = defer.succeed(None)
@ -2562,7 +2563,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
# unrelated to the actual handling of the content of the file.
# The reader should behave intelligently in these cases.
self.write_test_share_to_server("si1", empty=True)
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
# We should be able to get the encoding parameters, and they
# should be correct.
d = defer.succeed(None)
@ -2588,7 +2589,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
def test_read_with_empty_sdmf_file(self):
self.write_sdmf_share_to_server("si1", empty=True)
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
# We should be able to get the encoding parameters, and they
# should be correct
d = defer.succeed(None)
@ -2614,7 +2615,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
def test_verinfo_with_sdmf_file(self):
self.write_sdmf_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
# We should be able to get the version information.
d = defer.succeed(None)
d.addCallback(lambda ignored:
@ -2655,7 +2656,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
def test_verinfo_with_mdmf_file(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr = MDMFSlotReadProxy(self.storage_server, "si1", 0)
d = defer.succeed(None)
d.addCallback(lambda ignored:
mr.get_verinfo())
@ -2701,7 +2702,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
# set the way we want them for the tests below.
data = self.build_test_sdmf_share()
sdmfr = SDMFSlotWriteProxy(0,
self.rref,
self.storage_server,
"si1",
self.secrets,
0, 3, 10, 36, 36)
@ -2744,7 +2745,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin):
# write, we need to set the checkstring correctly. When we
# don't, no write should occur.
sdmfw = SDMFSlotWriteProxy(0,
self.rref,
self.storage_server,
"si1",
self.secrets,
1, 3, 10, 36, 36)