mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-04-10 04:09:58 +00:00
use 'with open' correctly
This commit is contained in:
parent
0a43409adc
commit
4b0c730adf
@ -38,7 +38,8 @@ class FileStoreOptions(BaseOptions):
|
||||
raise usage.UsageError(msg)
|
||||
else:
|
||||
node_url_file = os.path.join(self['node-directory'], "node.url")
|
||||
self['node-url'] = open(node_url_file, "r").read().strip()
|
||||
with open(node_url_file, "r") as f:
|
||||
self['node-url'] = f.read().strip()
|
||||
if self['node-url'][-1] != "/":
|
||||
self['node-url'] += "/"
|
||||
|
||||
|
@ -50,7 +50,6 @@ class ShareFile(object):
|
||||
# it. Also construct the metadata.
|
||||
assert not os.path.exists(self.home)
|
||||
fileutil.make_dirs(os.path.dirname(self.home))
|
||||
f = open(self.home, 'wb')
|
||||
# The second field -- the four-byte share data length -- is no
|
||||
# longer used as of Tahoe v1.3.0, but we continue to write it in
|
||||
# there in case someone downgrades a storage server from >=
|
||||
@ -60,15 +59,14 @@ class ShareFile(object):
|
||||
# the largest length that can fit into the field. That way, even
|
||||
# if this does happen, the old < v1.3.0 server will still allow
|
||||
# clients to read the first part of the share.
|
||||
f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0))
|
||||
f.close()
|
||||
with open(self.home, 'wb') as f:
|
||||
f.write(struct.pack(">LLL", 1, min(2**32-1, max_size), 0))
|
||||
self._lease_offset = max_size + 0x0c
|
||||
self._num_leases = 0
|
||||
else:
|
||||
f = open(self.home, 'rb')
|
||||
filesize = os.path.getsize(self.home)
|
||||
(version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
|
||||
f.close()
|
||||
with open(self.home, 'rb') as f:
|
||||
filesize = os.path.getsize(self.home)
|
||||
(version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
|
||||
if version != 1:
|
||||
msg = "sharefile %s had version %d but we wanted 1" % \
|
||||
(filename, version)
|
||||
@ -88,21 +86,20 @@ class ShareFile(object):
|
||||
actuallength = max(0, min(length, self._lease_offset-seekpos))
|
||||
if actuallength == 0:
|
||||
return ""
|
||||
f = open(self.home, 'rb')
|
||||
f.seek(seekpos)
|
||||
return f.read(actuallength)
|
||||
with open(self.home, 'rb') as f:
|
||||
f.seek(seekpos)
|
||||
return f.read(actuallength)
|
||||
|
||||
def write_share_data(self, offset, data):
|
||||
length = len(data)
|
||||
precondition(offset >= 0, offset)
|
||||
if self._max_size is not None and offset+length > self._max_size:
|
||||
raise DataTooLargeError(self._max_size, offset, length)
|
||||
f = open(self.home, 'rb+')
|
||||
real_offset = self._data_offset+offset
|
||||
f.seek(real_offset)
|
||||
assert f.tell() == real_offset
|
||||
f.write(data)
|
||||
f.close()
|
||||
with open(self.home, 'rb+') as f:
|
||||
real_offset = self._data_offset+offset
|
||||
f.seek(real_offset)
|
||||
assert f.tell() == real_offset
|
||||
f.write(data)
|
||||
|
||||
def _write_lease_record(self, f, lease_number, lease_info):
|
||||
offset = self._lease_offset + lease_number * self.LEASE_SIZE
|
||||
@ -124,20 +121,19 @@ class ShareFile(object):
|
||||
|
||||
def get_leases(self):
|
||||
"""Yields a LeaseInfo instance for all leases."""
|
||||
f = open(self.home, 'rb')
|
||||
(version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
|
||||
f.seek(self._lease_offset)
|
||||
for i in range(num_leases):
|
||||
data = f.read(self.LEASE_SIZE)
|
||||
if data:
|
||||
yield LeaseInfo().from_immutable_data(data)
|
||||
with open(self.home, 'rb') as f:
|
||||
(version, unused, num_leases) = struct.unpack(">LLL", f.read(0xc))
|
||||
f.seek(self._lease_offset)
|
||||
for i in range(num_leases):
|
||||
data = f.read(self.LEASE_SIZE)
|
||||
if data:
|
||||
yield LeaseInfo().from_immutable_data(data)
|
||||
|
||||
def add_lease(self, lease_info):
|
||||
f = open(self.home, 'rb+')
|
||||
num_leases = self._read_num_leases(f)
|
||||
self._write_lease_record(f, num_leases, lease_info)
|
||||
self._write_num_leases(f, num_leases+1)
|
||||
f.close()
|
||||
with open(self.home, 'rb+') as f:
|
||||
num_leases = self._read_num_leases(f)
|
||||
self._write_lease_record(f, num_leases, lease_info)
|
||||
self._write_num_leases(f, num_leases+1)
|
||||
|
||||
def renew_lease(self, renew_secret, new_expire_time):
|
||||
for i,lease in enumerate(self.get_leases()):
|
||||
@ -146,9 +142,8 @@ class ShareFile(object):
|
||||
if new_expire_time > lease.expiration_time:
|
||||
# yes
|
||||
lease.expiration_time = new_expire_time
|
||||
f = open(self.home, 'rb+')
|
||||
self._write_lease_record(f, i, lease)
|
||||
f.close()
|
||||
with open(self.home, 'rb+') as f:
|
||||
self._write_lease_record(f, i, lease)
|
||||
return
|
||||
raise IndexError("unable to renew non-existent lease")
|
||||
|
||||
@ -181,12 +176,11 @@ class ShareFile(object):
|
||||
# the same order as they were added, so that if we crash while
|
||||
# doing this, we won't lose any non-cancelled leases.
|
||||
leases = [l for l in leases if l] # remove the cancelled leases
|
||||
f = open(self.home, 'rb+')
|
||||
for i,lease in enumerate(leases):
|
||||
self._write_lease_record(f, i, lease)
|
||||
self._write_num_leases(f, len(leases))
|
||||
self._truncate_leases(f, len(leases))
|
||||
f.close()
|
||||
with open(self.home, 'rb+') as f:
|
||||
for i, lease in enumerate(leases):
|
||||
self._write_lease_record(f, i, lease)
|
||||
self._write_num_leases(f, len(leases))
|
||||
self._truncate_leases(f, len(leases))
|
||||
space_freed = self.LEASE_SIZE * num_leases_removed
|
||||
if not len(leases):
|
||||
space_freed += os.stat(self.home)[stat.ST_SIZE]
|
||||
|
@ -332,13 +332,12 @@ class NoNetworkGrid(service.MultiService):
|
||||
if write_config:
|
||||
from twisted.internet import reactor
|
||||
_, port_endpoint = self.port_assigner.assign(reactor)
|
||||
f = open(tahoe_cfg_path, "w")
|
||||
f.write("[node]\n")
|
||||
f.write("nickname = client-%d\n" % i)
|
||||
f.write("web.port = {}\n".format(port_endpoint))
|
||||
f.write("[storage]\n")
|
||||
f.write("enabled = false\n")
|
||||
f.close()
|
||||
with open(tahoe_cfg_path, "w") as f:
|
||||
f.write("[node]\n")
|
||||
f.write("nickname = client-%d\n" % i)
|
||||
f.write("web.port = {}\n".format(port_endpoint))
|
||||
f.write("[storage]\n")
|
||||
f.write("enabled = false\n")
|
||||
else:
|
||||
_assert(os.path.exists(tahoe_cfg_path), tahoe_cfg_path=tahoe_cfg_path)
|
||||
|
||||
@ -523,12 +522,14 @@ class GridTestMixin(object):
|
||||
def copy_shares(self, uri):
|
||||
shares = {}
|
||||
for (shnum, serverid, sharefile) in self.find_uri_shares(uri):
|
||||
shares[sharefile] = open(sharefile, "rb").read()
|
||||
with open(sharefile, "rb") as f:
|
||||
shares[sharefile] = f.read()
|
||||
return shares
|
||||
|
||||
def restore_all_shares(self, shares):
|
||||
for sharefile, data in shares.items():
|
||||
open(sharefile, "wb").write(data)
|
||||
with open(sharefile, "wb") as f:
|
||||
f.write(data)
|
||||
|
||||
def delete_share(self, sharenum_and_serverid_and_sharefile):
|
||||
(shnum, serverid, sharefile) = sharenum_and_serverid_and_sharefile
|
||||
@ -547,16 +548,20 @@ class GridTestMixin(object):
|
||||
|
||||
def corrupt_share(self, sharenum_and_serverid_and_sharefile, corruptor_function):
|
||||
(shnum, serverid, sharefile) = sharenum_and_serverid_and_sharefile
|
||||
sharedata = open(sharefile, "rb").read()
|
||||
with open(sharefile, "rb") as f:
|
||||
sharedata = f.read()
|
||||
corruptdata = corruptor_function(sharedata)
|
||||
open(sharefile, "wb").write(corruptdata)
|
||||
with open(sharefile, "wb") as f:
|
||||
f.write(corruptdata)
|
||||
|
||||
def corrupt_shares_numbered(self, uri, shnums, corruptor, debug=False):
|
||||
for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri):
|
||||
if i_shnum in shnums:
|
||||
sharedata = open(i_sharefile, "rb").read()
|
||||
with open(i_sharefile, "rb") as f:
|
||||
sharedata = f.read()
|
||||
corruptdata = corruptor(sharedata, debug=debug)
|
||||
open(i_sharefile, "wb").write(corruptdata)
|
||||
with open(i_sharefile, "wb") as f:
|
||||
f.write(corruptdata)
|
||||
|
||||
def corrupt_all_shares(self, uri, corruptor, debug=False):
|
||||
for (i_shnum, i_serverid, i_sharefile) in self.find_uri_shares(uri):
|
||||
|
Loading…
x
Reference in New Issue
Block a user