crawler: fix problems on windows and our slow cygwin slave

This commit is contained in:
Brian Warner 2009-02-18 22:24:31 -07:00
parent 193889f793
commit b949ea4f32
2 changed files with 15 additions and 4 deletions

View File

@ -1,5 +1,5 @@
import os, time, struct, pickle
import sys, os, time, struct, pickle
from twisted.internet import reactor
from twisted.application import service
from allmydata.storage.server import si_b2a
@ -89,6 +89,9 @@ class ShareCrawler(service.MultiService):
f = open(tmpfile, "wb")
pickle.dump(state, f)
f.close()
if "win32" in sys.platform.lower():
# sigh, stupid windows can't use this technique
os.unlink(self.statefile)
os.rename(tmpfile, self.statefile)
def startService(self):
@ -181,3 +184,4 @@ class ShareCrawler(service.MultiService):
def yielding(self, sleep_time):
pass

View File

@ -1,5 +1,6 @@
import time
import sys
import os.path
from twisted.trial import unittest
from twisted.application import service
@ -285,15 +286,20 @@ class Basic(unittest.TestCase, StallMixin, pollmixin.PollMixin):
# seconds), the overhead is enough to make a nominal 50% usage more
# like 30%. Forcing sleep_time to 0 only gets us 67% usage.
# who knows what will happen on our slower buildslaves. I'll ditch
# the cycles>1 test first.
# the windows/cygwin buildslaves, which are slow (even by windows
# standards) and have low-resolution timers, get more like 7% usage.
# On windows I'll extend the allowable range.
min_ok = 20
if "cygwin" in sys.platform.lower() or "win32" in sys.platform.lower():
min_ok = 3
start = time.time()
d = self.stall(delay=4.0)
def _done(res):
elapsed = time.time() - start
percent = 100.0 * c.accumulated / elapsed
self.failUnless(20 < percent < 70, "crawler got %d%%" % percent)
self.failUnless(min_ok < percent < 70, "crawler got %d%%" % percent)
self.failUnless(c.cycles >= 1, c.cycles)
d.addCallback(_done)
return d
@ -318,3 +324,4 @@ class Basic(unittest.TestCase, StallMixin, pollmixin.PollMixin):
return c.first_cycle_finished
d = self.poll(_check)
return d