mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-01-18 10:46:24 +00:00
for some reason this makes 'the occasional error' happen all the time?
This commit is contained in:
parent
25ef11efc8
commit
194011946c
@ -132,8 +132,8 @@ class DownloadNode(object):
|
||||
def stop(self):
|
||||
# called by the Terminator at shutdown, mostly for tests
|
||||
if self._active_segment:
|
||||
self._active_segment.stop()
|
||||
self._active_segment = None
|
||||
seg, self._active_segment = self._active_segment, None
|
||||
seg.stop()
|
||||
self._sharefinder.stop()
|
||||
|
||||
# things called by outside callers, via CiphertextFileNode. get_segment()
|
||||
@ -410,11 +410,11 @@ class DownloadNode(object):
|
||||
|
||||
def fetch_failed(self, sf, f):
|
||||
assert sf is self._active_segment
|
||||
self._active_segment = None
|
||||
# deliver error upwards
|
||||
for (d,c,seg_ev) in self._extract_requests(sf.segnum):
|
||||
seg_ev.error(now())
|
||||
eventually(self._deliver, d, c, f)
|
||||
self._active_segment = None
|
||||
self._start_new_segment()
|
||||
|
||||
def process_blocks(self, segnum, blocks):
|
||||
@ -434,6 +434,7 @@ class DownloadNode(object):
|
||||
eventually(self._deliver, d, c, result)
|
||||
else:
|
||||
(offset, segment, decodetime) = result
|
||||
self._active_segment = None
|
||||
for (d,c,seg_ev) in self._extract_requests(segnum):
|
||||
# when we have two requests for the same segment, the
|
||||
# second one will not be "activated" before the data is
|
||||
@ -446,7 +447,6 @@ class DownloadNode(object):
|
||||
seg_ev.deliver(when, offset, len(segment), decodetime)
|
||||
eventually(self._deliver, d, c, result)
|
||||
self._download_status.add_misc_event("process_block", start, now())
|
||||
self._active_segment = None
|
||||
self._start_new_segment()
|
||||
d.addBoth(_deliver)
|
||||
d.addErrback(log.err, "unhandled error during process_blocks",
|
||||
@ -536,8 +536,8 @@ class DownloadNode(object):
|
||||
# self._active_segment might be None in rare circumstances, so make
|
||||
# sure we tolerate it
|
||||
if self._active_segment and self._active_segment.segnum not in segnums:
|
||||
self._active_segment.stop()
|
||||
self._active_segment = None
|
||||
seg, self._active_segment = self._active_segment, None##True # XXX None for real
|
||||
seg.stop()
|
||||
self._start_new_segment()
|
||||
|
||||
# called by ShareFinder to choose hashtree sizes in CommonShares, and by
|
||||
|
Loading…
Reference in New Issue
Block a user