mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-06-22 08:50:07 +00:00
storage: improve logging a bit
This commit is contained in:
@ -9,7 +9,7 @@ from zope.interface import implements
|
|||||||
from allmydata.interfaces import RIStorageServer, RIBucketWriter, \
|
from allmydata.interfaces import RIStorageServer, RIBucketWriter, \
|
||||||
RIBucketReader, IStorageBucketWriter, IStorageBucketReader, HASH_SIZE, \
|
RIBucketReader, IStorageBucketWriter, IStorageBucketReader, HASH_SIZE, \
|
||||||
BadWriteEnablerError
|
BadWriteEnablerError
|
||||||
from allmydata.util import fileutil, idlib, mathutil
|
from allmydata.util import fileutil, idlib, mathutil, log
|
||||||
from allmydata.util.assertutil import precondition, _assert
|
from allmydata.util.assertutil import precondition, _assert
|
||||||
|
|
||||||
class DataTooLargeError(Exception):
|
class DataTooLargeError(Exception):
|
||||||
@ -257,7 +257,7 @@ class MutableShareFile:
|
|||||||
MAX_SIZE = 2*1000*1000*1000 # 2GB, kind of arbitrary
|
MAX_SIZE = 2*1000*1000*1000 # 2GB, kind of arbitrary
|
||||||
# TODO: decide upon a policy for max share size
|
# TODO: decide upon a policy for max share size
|
||||||
|
|
||||||
def __init__(self, filename):
|
def __init__(self, filename, parent=None):
|
||||||
self.home = filename
|
self.home = filename
|
||||||
if os.path.exists(self.home):
|
if os.path.exists(self.home):
|
||||||
# we don't cache anything, just check the magic
|
# we don't cache anything, just check the magic
|
||||||
@ -268,7 +268,10 @@ class MutableShareFile:
|
|||||||
data_length, extra_least_offset) = \
|
data_length, extra_least_offset) = \
|
||||||
struct.unpack(">32s20s32sQQ", data)
|
struct.unpack(">32s20s32sQQ", data)
|
||||||
assert magic == self.MAGIC
|
assert magic == self.MAGIC
|
||||||
|
self.parent = parent # for logging
|
||||||
|
|
||||||
|
def log(self, *args, **kwargs):
|
||||||
|
return self.parent.log(*args, **kwargs)
|
||||||
|
|
||||||
def create(self, my_nodeid, write_enabler):
|
def create(self, my_nodeid, write_enabler):
|
||||||
assert not os.path.exists(self.home)
|
assert not os.path.exists(self.home)
|
||||||
@ -555,7 +558,7 @@ class MutableShareFile:
|
|||||||
# f.close()
|
# f.close()
|
||||||
# return data_length
|
# return data_length
|
||||||
|
|
||||||
def check_write_enabler(self, write_enabler):
|
def check_write_enabler(self, write_enabler, si_s):
|
||||||
f = open(self.home, 'rb+')
|
f = open(self.home, 'rb+')
|
||||||
(real_write_enabler, write_enabler_nodeid) = \
|
(real_write_enabler, write_enabler_nodeid) = \
|
||||||
self._read_write_enabler_and_nodeid(f)
|
self._read_write_enabler_and_nodeid(f)
|
||||||
@ -563,6 +566,11 @@ class MutableShareFile:
|
|||||||
if write_enabler != real_write_enabler:
|
if write_enabler != real_write_enabler:
|
||||||
# accomodate share migration by reporting the nodeid used for the
|
# accomodate share migration by reporting the nodeid used for the
|
||||||
# old write enabler.
|
# old write enabler.
|
||||||
|
self.log(format="bad write enabler on SI %(si)s,"
|
||||||
|
" recorded by nodeid %(nodeid)s",
|
||||||
|
facility="tahoe.storage",
|
||||||
|
level=log.WEIRD,
|
||||||
|
si=si_s, nodeid=idlib.nodeid_b2a(write_enabler_nodeid))
|
||||||
msg = "The write enabler was recorded by nodeid '%s'." % \
|
msg = "The write enabler was recorded by nodeid '%s'." % \
|
||||||
(idlib.nodeid_b2a(write_enabler_nodeid),)
|
(idlib.nodeid_b2a(write_enabler_nodeid),)
|
||||||
raise BadWriteEnablerError(msg)
|
raise BadWriteEnablerError(msg)
|
||||||
@ -615,11 +623,11 @@ class EmptyShare:
|
|||||||
break
|
break
|
||||||
return test_good
|
return test_good
|
||||||
|
|
||||||
def create_mutable_sharefile(filename, my_nodeid, write_enabler):
|
def create_mutable_sharefile(filename, my_nodeid, write_enabler, parent):
|
||||||
ms = MutableShareFile(filename)
|
ms = MutableShareFile(filename, parent)
|
||||||
ms.create(my_nodeid, write_enabler)
|
ms.create(my_nodeid, write_enabler)
|
||||||
del ms
|
del ms
|
||||||
return MutableShareFile(filename)
|
return MutableShareFile(filename, parent)
|
||||||
|
|
||||||
|
|
||||||
class StorageServer(service.MultiService, Referenceable):
|
class StorageServer(service.MultiService, Referenceable):
|
||||||
@ -640,8 +648,9 @@ class StorageServer(service.MultiService, Referenceable):
|
|||||||
self._active_writers = weakref.WeakKeyDictionary()
|
self._active_writers = weakref.WeakKeyDictionary()
|
||||||
self.measure_size()
|
self.measure_size()
|
||||||
|
|
||||||
def log(self, msg):
|
def log(self, *args, **kwargs):
|
||||||
#self.parent.log(msg)
|
if self.parent:
|
||||||
|
return self.parent.log(*args, **kwargs)
|
||||||
return
|
return
|
||||||
|
|
||||||
def setNodeID(self, nodeid):
|
def setNodeID(self, nodeid):
|
||||||
@ -737,7 +746,7 @@ class StorageServer(service.MultiService, Referenceable):
|
|||||||
header = f.read(32)
|
header = f.read(32)
|
||||||
f.close()
|
f.close()
|
||||||
if header[:32] == MutableShareFile.MAGIC:
|
if header[:32] == MutableShareFile.MAGIC:
|
||||||
sf = MutableShareFile(filename)
|
sf = MutableShareFile(filename, self)
|
||||||
# note: if the share has been migrated, the renew_lease()
|
# note: if the share has been migrated, the renew_lease()
|
||||||
# call will throw an exception, with information to help the
|
# call will throw an exception, with information to help the
|
||||||
# client update the lease.
|
# client update the lease.
|
||||||
@ -765,7 +774,7 @@ class StorageServer(service.MultiService, Referenceable):
|
|||||||
header = f.read(32)
|
header = f.read(32)
|
||||||
f.close()
|
f.close()
|
||||||
if header[:32] == MutableShareFile.MAGIC:
|
if header[:32] == MutableShareFile.MAGIC:
|
||||||
sf = MutableShareFile(filename)
|
sf = MutableShareFile(filename, self)
|
||||||
# note: if the share has been migrated, the renew_lease()
|
# note: if the share has been migrated, the renew_lease()
|
||||||
# call will throw an exception, with information to help the
|
# call will throw an exception, with information to help the
|
||||||
# client update the lease.
|
# client update the lease.
|
||||||
@ -857,8 +866,8 @@ class StorageServer(service.MultiService, Referenceable):
|
|||||||
except ValueError:
|
except ValueError:
|
||||||
continue
|
continue
|
||||||
filename = os.path.join(bucketdir, sharenum_s)
|
filename = os.path.join(bucketdir, sharenum_s)
|
||||||
msf = MutableShareFile(filename)
|
msf = MutableShareFile(filename, self)
|
||||||
msf.check_write_enabler(write_enabler)
|
msf.check_write_enabler(write_enabler, si_s)
|
||||||
shares[sharenum] = msf
|
shares[sharenum] = msf
|
||||||
# write_enabler is good for all existing shares.
|
# write_enabler is good for all existing shares.
|
||||||
|
|
||||||
@ -917,7 +926,8 @@ class StorageServer(service.MultiService, Referenceable):
|
|||||||
my_nodeid = self.my_nodeid
|
my_nodeid = self.my_nodeid
|
||||||
fileutil.make_dirs(bucketdir)
|
fileutil.make_dirs(bucketdir)
|
||||||
filename = os.path.join(bucketdir, "%d" % sharenum)
|
filename = os.path.join(bucketdir, "%d" % sharenum)
|
||||||
share = create_mutable_sharefile(filename, my_nodeid, write_enabler)
|
share = create_mutable_sharefile(filename, my_nodeid, write_enabler,
|
||||||
|
self)
|
||||||
return share
|
return share
|
||||||
|
|
||||||
def remote_slot_readv(self, storage_index, shares, readv):
|
def remote_slot_readv(self, storage_index, shares, readv):
|
||||||
@ -934,7 +944,7 @@ class StorageServer(service.MultiService, Referenceable):
|
|||||||
continue
|
continue
|
||||||
if sharenum in shares or not shares:
|
if sharenum in shares or not shares:
|
||||||
filename = os.path.join(bucketdir, sharenum_s)
|
filename = os.path.join(bucketdir, sharenum_s)
|
||||||
msf = MutableShareFile(filename)
|
msf = MutableShareFile(filename, self)
|
||||||
datavs[sharenum] = msf.readv(readv)
|
datavs[sharenum] = msf.readv(readv)
|
||||||
return datavs
|
return datavs
|
||||||
|
|
||||||
|
@ -3,9 +3,11 @@ import os
|
|||||||
from zope.interface import implements
|
from zope.interface import implements
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from twisted.python import failure
|
from twisted.python import failure
|
||||||
|
from twisted.application import service
|
||||||
from allmydata import uri, dirnode
|
from allmydata import uri, dirnode
|
||||||
from allmydata.interfaces import IURI, IMutableFileNode, IFileNode
|
from allmydata.interfaces import IURI, IMutableFileNode, IFileNode
|
||||||
from allmydata.encode import NotEnoughPeersError
|
from allmydata.encode import NotEnoughPeersError
|
||||||
|
from allmydata.util import log
|
||||||
|
|
||||||
class FakeCHKFileNode:
|
class FakeCHKFileNode:
|
||||||
"""I provide IFileNode, but all of my data is stored in a class-level
|
"""I provide IFileNode, but all of my data is stored in a class-level
|
||||||
@ -115,3 +117,7 @@ class NonGridDirectoryNode(dirnode.NewDirectoryNode):
|
|||||||
look inside the dirnodes and check their contents.
|
look inside the dirnodes and check their contents.
|
||||||
"""
|
"""
|
||||||
filenode_class = FakeMutableFileNode
|
filenode_class = FakeMutableFileNode
|
||||||
|
|
||||||
|
class LoggingServiceParent(service.MultiService):
|
||||||
|
def log(self, *args, **kwargs):
|
||||||
|
return log.msg(*args, **kwargs)
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
|
|
||||||
from twisted.trial import unittest
|
from twisted.trial import unittest
|
||||||
|
|
||||||
from twisted.application import service
|
|
||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
from foolscap import Referenceable
|
from foolscap import Referenceable
|
||||||
import time, os.path, stat
|
import time, os.path, stat
|
||||||
@ -11,6 +10,7 @@ from allmydata.util import fileutil, hashutil, idlib
|
|||||||
from allmydata.storage import BucketWriter, BucketReader, \
|
from allmydata.storage import BucketWriter, BucketReader, \
|
||||||
WriteBucketProxy, ReadBucketProxy, StorageServer, MutableShareFile
|
WriteBucketProxy, ReadBucketProxy, StorageServer, MutableShareFile
|
||||||
from allmydata.interfaces import BadWriteEnablerError
|
from allmydata.interfaces import BadWriteEnablerError
|
||||||
|
from allmydata.test.common import LoggingServiceParent
|
||||||
|
|
||||||
class Bucket(unittest.TestCase):
|
class Bucket(unittest.TestCase):
|
||||||
def make_workdir(self, name):
|
def make_workdir(self, name):
|
||||||
@ -178,7 +178,7 @@ class BucketProxy(unittest.TestCase):
|
|||||||
class Server(unittest.TestCase):
|
class Server(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.sparent = service.MultiService()
|
self.sparent = LoggingServiceParent()
|
||||||
self._lease_secret = itertools.count()
|
self._lease_secret = itertools.count()
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
return self.sparent.stopService()
|
return self.sparent.stopService()
|
||||||
@ -444,7 +444,7 @@ class Server(unittest.TestCase):
|
|||||||
class MutableServer(unittest.TestCase):
|
class MutableServer(unittest.TestCase):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.sparent = service.MultiService()
|
self.sparent = LoggingServiceParent()
|
||||||
self._lease_secret = itertools.count()
|
self._lease_secret = itertools.count()
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
return self.sparent.stopService()
|
return self.sparent.stopService()
|
||||||
|
Reference in New Issue
Block a user