use 'with open' for more file-opens

This commit is contained in:
meejah 2019-12-21 00:03:38 -07:00
parent 9bcc465f76
commit 4c3d0ea6cc
12 changed files with 155 additions and 173 deletions

View File

@ -34,7 +34,8 @@ class Blacklist(object):
try: try:
if self.last_mtime is None or current_mtime > self.last_mtime: if self.last_mtime is None or current_mtime > self.last_mtime:
self.entries.clear() self.entries.clear()
for line in open(self.blacklist_fn, "r").readlines(): with open(self.blacklist_fn, "r") as f:
for line in f.readlines():
line = line.strip() line = line.strip()
if not line or line.startswith("#"): if not line or line.startswith("#"):
continue continue

View File

@ -19,7 +19,8 @@ def get_memory_usage():
"VmData") "VmData")
stats = {} stats = {}
try: try:
for line in open("/proc/self/status", "r").readlines(): with open("/proc/self/status", "r") as f:
for line in f.readlines():
name, right = line.split(":",2) name, right = line.split(":",2)
if name in stat_names: if name in stat_names:
assert right.endswith(" kB\n") assert right.endswith(" kB\n")

View File

@ -31,7 +31,8 @@ class AccountFileChecker(object):
self.passwords = {} self.passwords = {}
self.pubkeys = {} self.pubkeys = {}
self.rootcaps = {} self.rootcaps = {}
for line in open(abspath_expanduser_unicode(accountfile), "r"): with open(abspath_expanduser_unicode(accountfile), "r") as f:
for line in f.readlines():
line = line.strip() line = line.strip()
if line.startswith("#") or not line: if line.startswith("#") or not line:
continue continue

View File

@ -131,14 +131,14 @@ def get_aliases(nodedir):
aliasfile = os.path.join(nodedir, "private", "aliases") aliasfile = os.path.join(nodedir, "private", "aliases")
rootfile = os.path.join(nodedir, "private", "root_dir.cap") rootfile = os.path.join(nodedir, "private", "root_dir.cap")
try: try:
f = open(rootfile, "r") with open(rootfile, "r") as f:
rootcap = f.read().strip() rootcap = f.read().strip()
if rootcap: if rootcap:
aliases[DEFAULT_ALIAS] = rootcap aliases[DEFAULT_ALIAS] = rootcap
except EnvironmentError: except EnvironmentError:
pass pass
try: try:
f = codecs.open(aliasfile, "r", "utf-8") with codecs.open(aliasfile, "r", "utf-8") as f:
for line in f.readlines(): for line in f.readlines():
line = line.strip() line = line.strip()
if line.startswith("#") or not line: if line.startswith("#") or not line:

View File

@ -250,8 +250,8 @@ class JSONStatsGatherer(StdOutStatsGatherer):
self.jsonfile = os.path.join(basedir, "stats.json") self.jsonfile = os.path.join(basedir, "stats.json")
if os.path.exists(self.jsonfile): if os.path.exists(self.jsonfile):
f = open(self.jsonfile, 'rb')
try: try:
with open(self.jsonfile, 'rb') as f:
self.gathered_stats = json.load(f) self.gathered_stats = json.load(f)
except Exception: except Exception:
print("Error while attempting to load stats file %s.\n" print("Error while attempting to load stats file %s.\n"
@ -259,7 +259,6 @@ class JSONStatsGatherer(StdOutStatsGatherer):
" or delete it if no backup is available.\n" % " or delete it if no backup is available.\n" %
quote_local_unicode_path(self.jsonfile)) quote_local_unicode_path(self.jsonfile))
raise raise
f.close()
else: else:
self.gathered_stats = {} self.gathered_stats = {}
@ -272,9 +271,8 @@ class JSONStatsGatherer(StdOutStatsGatherer):
def dump_json(self): def dump_json(self):
tmp = "%s.tmp" % (self.jsonfile,) tmp = "%s.tmp" % (self.jsonfile,)
f = open(tmp, 'wb') with open(tmp, 'wb') as f:
json.dump(self.gathered_stats, f) json.dump(self.gathered_stats, f)
f.close()
if os.path.exists(self.jsonfile): if os.path.exists(self.jsonfile):
os.unlink(self.jsonfile) os.unlink(self.jsonfile)
os.rename(tmp, self.jsonfile) os.rename(tmp, self.jsonfile)

View File

@ -191,9 +191,8 @@ class ShareCrawler(service.MultiService):
# of the last bucket to be processed, or # of the last bucket to be processed, or
# None if we are sleeping between cycles # None if we are sleeping between cycles
try: try:
f = open(self.statefile, "rb") with open(self.statefile, "rb") as f:
state = pickle.load(f) state = pickle.load(f)
f.close()
except Exception: except Exception:
state = {"version": 1, state = {"version": 1,
"last-cycle-finished": None, "last-cycle-finished": None,
@ -230,9 +229,8 @@ class ShareCrawler(service.MultiService):
last_complete_prefix = self.prefixes[lcpi] last_complete_prefix = self.prefixes[lcpi]
self.state["last-complete-prefix"] = last_complete_prefix self.state["last-complete-prefix"] = last_complete_prefix
tmpfile = self.statefile + ".tmp" tmpfile = self.statefile + ".tmp"
f = open(tmpfile, "wb") with open(tmpfile, "wb") as f:
pickle.dump(self.state, f) pickle.dump(self.state, f)
f.close()
fileutil.move_into_place(tmpfile, self.statefile) fileutil.move_into_place(tmpfile, self.statefile)
def startService(self): def startService(self):

View File

@ -84,9 +84,8 @@ class LeaseCheckingCrawler(ShareCrawler):
# initialize history # initialize history
if not os.path.exists(self.historyfile): if not os.path.exists(self.historyfile):
history = {} # cyclenum -> dict history = {} # cyclenum -> dict
f = open(self.historyfile, "wb") with open(self.historyfile, "wb") as f:
pickle.dump(history, f) pickle.dump(history, f)
f.close()
def create_empty_cycle_dict(self): def create_empty_cycle_dict(self):
recovered = self.create_empty_recovered_dict() recovered = self.create_empty_recovered_dict()
@ -303,14 +302,14 @@ class LeaseCheckingCrawler(ShareCrawler):
# copy() needs to become a deepcopy # copy() needs to become a deepcopy
h["space-recovered"] = s["space-recovered"].copy() h["space-recovered"] = s["space-recovered"].copy()
history = pickle.load(open(self.historyfile, "rb")) with open(self.historyfile, "rb") as f:
history = pickle.load(f)
history[cycle] = h history[cycle] = h
while len(history) > 10: while len(history) > 10:
oldcycles = sorted(history.keys()) oldcycles = sorted(history.keys())
del history[oldcycles[0]] del history[oldcycles[0]]
f = open(self.historyfile, "wb") with open(self.historyfile, "wb") as f:
pickle.dump(history, f) pickle.dump(history, f)
f.close()
def get_state(self): def get_state(self):
"""In addition to the crawler state described in """In addition to the crawler state described in
@ -379,7 +378,8 @@ class LeaseCheckingCrawler(ShareCrawler):
progress = self.get_progress() progress = self.get_progress()
state = ShareCrawler.get_state(self) # does a shallow copy state = ShareCrawler.get_state(self) # does a shallow copy
history = pickle.load(open(self.historyfile, "rb")) with open(self.historyfile, "rb") as f:
history = pickle.load(f)
state["history"] = history state["history"] = history
if not progress["cycle-in-progress"]: if not progress["cycle-in-progress"]:

View File

@ -57,7 +57,7 @@ class MutableShareFile(object):
self.home = filename self.home = filename
if os.path.exists(self.home): if os.path.exists(self.home):
# we don't cache anything, just check the magic # we don't cache anything, just check the magic
f = open(self.home, 'rb') with open(self.home, 'rb') as f:
data = f.read(self.HEADER_SIZE) data = f.read(self.HEADER_SIZE)
(magic, (magic,
write_enabler_nodeid, write_enabler, write_enabler_nodeid, write_enabler,
@ -80,17 +80,17 @@ class MutableShareFile(object):
+ data_length) + data_length)
assert extra_lease_offset == self.DATA_OFFSET # true at creation assert extra_lease_offset == self.DATA_OFFSET # true at creation
num_extra_leases = 0 num_extra_leases = 0
f = open(self.home, 'wb') with open(self.home, 'wb') as f:
header = struct.pack(">32s20s32sQQ", header = struct.pack(
">32s20s32sQQ",
self.MAGIC, my_nodeid, write_enabler, self.MAGIC, my_nodeid, write_enabler,
data_length, extra_lease_offset, data_length, extra_lease_offset,
) )
leases = ("\x00"*self.LEASE_SIZE) * 4 leases = ("\x00" * self.LEASE_SIZE) * 4
f.write(header + leases) f.write(header + leases)
# data goes here, empty after creation # data goes here, empty after creation
f.write(struct.pack(">L", num_extra_leases)) f.write(struct.pack(">L", num_extra_leases))
# extra leases go here, none at creation # extra leases go here, none at creation
f.close()
def unlink(self): def unlink(self):
os.unlink(self.home) os.unlink(self.home)
@ -261,10 +261,9 @@ class MutableShareFile(object):
def get_leases(self): def get_leases(self):
"""Yields a LeaseInfo instance for all leases.""" """Yields a LeaseInfo instance for all leases."""
f = open(self.home, 'rb') with open(self.home, 'rb') as f:
for i, lease in self._enumerate_leases(f): for i, lease in self._enumerate_leases(f):
yield lease yield lease
f.close()
def _enumerate_leases(self, f): def _enumerate_leases(self, f):
for i in range(self._get_num_lease_slots(f)): for i in range(self._get_num_lease_slots(f)):
@ -277,18 +276,17 @@ class MutableShareFile(object):
def add_lease(self, lease_info): def add_lease(self, lease_info):
precondition(lease_info.owner_num != 0) # 0 means "no lease here" precondition(lease_info.owner_num != 0) # 0 means "no lease here"
f = open(self.home, 'rb+') with open(self.home, 'rb+') as f:
num_lease_slots = self._get_num_lease_slots(f) num_lease_slots = self._get_num_lease_slots(f)
empty_slot = self._get_first_empty_lease_slot(f) empty_slot = self._get_first_empty_lease_slot(f)
if empty_slot is not None: if empty_slot is not None:
self._write_lease_record(f, empty_slot, lease_info) self._write_lease_record(f, empty_slot, lease_info)
else: else:
self._write_lease_record(f, num_lease_slots, lease_info) self._write_lease_record(f, num_lease_slots, lease_info)
f.close()
def renew_lease(self, renew_secret, new_expire_time): def renew_lease(self, renew_secret, new_expire_time):
accepting_nodeids = set() accepting_nodeids = set()
f = open(self.home, 'rb+') with open(self.home, 'rb+') as f:
for (leasenum,lease) in self._enumerate_leases(f): for (leasenum,lease) in self._enumerate_leases(f):
if timing_safe_compare(lease.renew_secret, renew_secret): if timing_safe_compare(lease.renew_secret, renew_secret):
# yup. See if we need to update the owner time. # yup. See if we need to update the owner time.
@ -296,10 +294,8 @@ class MutableShareFile(object):
# yes # yes
lease.expiration_time = new_expire_time lease.expiration_time = new_expire_time
self._write_lease_record(f, leasenum, lease) self._write_lease_record(f, leasenum, lease)
f.close()
return return
accepting_nodeids.add(lease.nodeid) accepting_nodeids.add(lease.nodeid)
f.close()
# Return the accepting_nodeids set, to give the client a chance to # Return the accepting_nodeids set, to give the client a chance to
# update the leases on a share which has been migrated from its # update the leases on a share which has been migrated from its
# original server to a new one. # original server to a new one.
@ -333,7 +329,7 @@ class MutableShareFile(object):
cancel_secret="\x00"*32, cancel_secret="\x00"*32,
expiration_time=0, expiration_time=0,
nodeid="\x00"*20) nodeid="\x00"*20)
f = open(self.home, 'rb+') with open(self.home, 'rb+') as f:
for (leasenum,lease) in self._enumerate_leases(f): for (leasenum,lease) in self._enumerate_leases(f):
accepting_nodeids.add(lease.nodeid) accepting_nodeids.add(lease.nodeid)
if timing_safe_compare(lease.cancel_secret, cancel_secret): if timing_safe_compare(lease.cancel_secret, cancel_secret):
@ -372,10 +368,9 @@ class MutableShareFile(object):
def readv(self, readv): def readv(self, readv):
datav = [] datav = []
f = open(self.home, 'rb') with open(self.home, 'rb') as f:
for (offset, length) in readv: for (offset, length) in readv:
datav.append(self._read_share_data(f, offset, length)) datav.append(self._read_share_data(f, offset, length))
f.close()
return datav return datav
# def remote_get_length(self): # def remote_get_length(self):
@ -385,10 +380,9 @@ class MutableShareFile(object):
# return data_length # return data_length
def check_write_enabler(self, write_enabler, si_s): def check_write_enabler(self, write_enabler, si_s):
f = open(self.home, 'rb+') with open(self.home, 'rb+') as f:
(real_write_enabler, write_enabler_nodeid) = \ (real_write_enabler, write_enabler_nodeid) = \
self._read_write_enabler_and_nodeid(f) self._read_write_enabler_and_nodeid(f)
f.close()
# avoid a timing attack # avoid a timing attack
#if write_enabler != real_write_enabler: #if write_enabler != real_write_enabler:
if not timing_safe_compare(write_enabler, real_write_enabler): if not timing_safe_compare(write_enabler, real_write_enabler):
@ -405,17 +399,16 @@ class MutableShareFile(object):
def check_testv(self, testv): def check_testv(self, testv):
test_good = True test_good = True
f = open(self.home, 'rb+') with open(self.home, 'rb+') as f:
for (offset, length, operator, specimen) in testv: for (offset, length, operator, specimen) in testv:
data = self._read_share_data(f, offset, length) data = self._read_share_data(f, offset, length)
if not testv_compare(data, operator, specimen): if not testv_compare(data, operator, specimen):
test_good = False test_good = False
break break
f.close()
return test_good return test_good
def writev(self, datav, new_length): def writev(self, datav, new_length):
f = open(self.home, 'rb+') with open(self.home, 'rb+') as f:
for (offset, data) in datav: for (offset, data) in datav:
self._write_share_data(f, offset, data) self._write_share_data(f, offset, data)
if new_length is not None: if new_length is not None:
@ -425,7 +418,6 @@ class MutableShareFile(object):
# TODO: if we're going to shrink the share file when the # TODO: if we're going to shrink the share file when the
# share data has shrunk, then call # share data has shrunk, then call
# self._change_container_size() here. # self._change_container_size() here.
f.close()
def testv_compare(a, op, b): def testv_compare(a, op, b):
assert op in ("lt", "le", "eq", "ne", "ge", "gt") assert op in ("lt", "le", "eq", "ne", "ge", "gt")

View File

@ -317,9 +317,8 @@ class StorageServer(service.MultiService, Referenceable):
def _iter_share_files(self, storage_index): def _iter_share_files(self, storage_index):
for shnum, filename in self._get_bucket_shares(storage_index): for shnum, filename in self._get_bucket_shares(storage_index):
f = open(filename, 'rb') with open(filename, 'rb') as f:
header = f.read(32) header = f.read(32)
f.close()
if header[:32] == MutableShareFile.MAGIC: if header[:32] == MutableShareFile.MAGIC:
sf = MutableShareFile(filename, self) sf = MutableShareFile(filename, self)
# note: if the share has been migrated, the renew_lease() # note: if the share has been migrated, the renew_lease()
@ -682,7 +681,7 @@ class StorageServer(service.MultiService, Referenceable):
# windows can't handle colons in the filename # windows can't handle colons in the filename
fn = os.path.join(self.corruption_advisory_dir, fn = os.path.join(self.corruption_advisory_dir,
"%s--%s-%d" % (now, si_s, shnum)).replace(":","") "%s--%s-%d" % (now, si_s, shnum)).replace(":","")
f = open(fn, "w") with open(fn, "w") as f:
f.write("report: Share Corruption\n") f.write("report: Share Corruption\n")
f.write("type: %s\n" % share_type) f.write("type: %s\n" % share_type)
f.write("storage_index: %s\n" % si_s) f.write("storage_index: %s\n" % si_s)
@ -690,7 +689,6 @@ class StorageServer(service.MultiService, Referenceable):
f.write("\n") f.write("\n")
f.write(reason) f.write(reason)
f.write("\n") f.write("\n")
f.close()
log.msg(format=("client claims corruption in (%(share_type)s) " + log.msg(format=("client claims corruption in (%(share_type)s) " +
"%(si)s-%(shnum)d: %(reason)s"), "%(si)s-%(shnum)d: %(reason)s"),
share_type=share_type, si=si_s, shnum=shnum, reason=reason, share_type=share_type, si=si_s, shnum=shnum, reason=reason,

View File

@ -4,9 +4,8 @@ from allmydata.storage.mutable import MutableShareFile
from allmydata.storage.immutable import ShareFile from allmydata.storage.immutable import ShareFile
def get_share_file(filename): def get_share_file(filename):
f = open(filename, "rb") with open(filename, "rb") as f:
prefix = f.read(32) prefix = f.read(32)
f.close()
if prefix == MutableShareFile.MAGIC: if prefix == MutableShareFile.MAGIC:
return MutableShareFile(filename) return MutableShareFile(filename)
# otherwise assume it's immutable # otherwise assume it's immutable

View File

@ -13,15 +13,12 @@ class UnknownConfigError(Exception):
def get_config(tahoe_cfg): def get_config(tahoe_cfg):
config = SafeConfigParser() config = SafeConfigParser()
f = open(tahoe_cfg, "rb") with open(tahoe_cfg, "rb") as f:
try:
# Skip any initial Byte Order Mark. Since this is an ordinary file, we # Skip any initial Byte Order Mark. Since this is an ordinary file, we
# don't need to handle incomplete reads, and can assume seekability. # don't need to handle incomplete reads, and can assume seekability.
if f.read(3) != '\xEF\xBB\xBF': if f.read(3) != '\xEF\xBB\xBF':
f.seek(0) f.seek(0)
config.readfp(f) config.readfp(f)
finally:
f.close()
return config return config
def set_config(config, section, option, value): def set_config(config, section, option, value):
@ -31,11 +28,8 @@ def set_config(config, section, option, value):
assert config.get(section, option) == value assert config.get(section, option) == value
def write_config(tahoe_cfg, config): def write_config(tahoe_cfg, config):
f = open(tahoe_cfg, "wb") with open(tahoe_cfg, "wb") as f:
try:
config.write(f) config.write(f)
finally:
f.close()
def validate_config(fname, cfg, valid_config): def validate_config(fname, cfg, valid_config):
""" """

View File

@ -235,7 +235,7 @@ def _get_linux_distro():
return (_distname, _version) return (_distname, _version)
try: try:
etclsbrel = open("/etc/lsb-release", "rU") with open("/etc/lsb-release", "rU") as etclsbrel:
for line in etclsbrel: for line in etclsbrel:
m = _distributor_id_file_re.search(line) m = _distributor_id_file_re.search(line)
if m: if m: