2007-03-08 02:16:06 +00:00
|
|
|
|
2007-12-07 01:38:42 +00:00
|
|
|
import os, time
|
2007-03-08 02:16:06 +00:00
|
|
|
from zope.interface import implements
|
|
|
|
from twisted.application import service
|
2007-09-20 23:55:33 +00:00
|
|
|
from twisted.internet import defer
|
2007-03-08 02:16:06 +00:00
|
|
|
from foolscap import Referenceable
|
|
|
|
from allmydata.interfaces import RIControlClient
|
2007-09-27 01:16:15 +00:00
|
|
|
from allmydata.util import testutil, fileutil, mathutil
|
2007-12-14 09:05:31 +00:00
|
|
|
from allmydata import upload, download
|
2007-07-09 02:07:54 +00:00
|
|
|
from twisted.python import log
|
|
|
|
|
|
|
|
def get_memory_usage():
|
|
|
|
# this is obviously linux-specific
|
|
|
|
stat_names = ("VmPeak",
|
|
|
|
"VmSize",
|
|
|
|
#"VmHWM",
|
|
|
|
"VmData")
|
|
|
|
stats = {}
|
2007-12-07 00:27:40 +00:00
|
|
|
try:
|
|
|
|
for line in open("/proc/self/status", "r").readlines():
|
|
|
|
name, right = line.split(":",2)
|
|
|
|
if name in stat_names:
|
|
|
|
assert right.endswith(" kB\n")
|
|
|
|
right = right[:-4]
|
|
|
|
stats[name] = int(right) * 1024
|
|
|
|
except:
|
|
|
|
# Probably not on (a compatible version of) Linux
|
|
|
|
stats['VmSize'] = 0
|
|
|
|
stats['VmPeak'] = 0
|
2007-07-09 02:07:54 +00:00
|
|
|
return stats
|
|
|
|
|
|
|
|
def log_memory_usage(where=""):
|
|
|
|
stats = get_memory_usage()
|
|
|
|
log.msg("VmSize: %9d VmPeak: %9d %s" % (stats["VmSize"],
|
|
|
|
stats["VmPeak"],
|
|
|
|
where))
|
2007-03-08 02:16:06 +00:00
|
|
|
|
|
|
|
|
2007-05-30 00:39:39 +00:00
|
|
|
class ControlServer(Referenceable, service.Service, testutil.PollMixin):
|
2007-03-08 02:20:27 +00:00
|
|
|
implements(RIControlClient)
|
2007-03-08 02:16:06 +00:00
|
|
|
|
2007-05-30 00:39:39 +00:00
|
|
|
def remote_wait_for_client_connections(self, num_clients):
|
2007-09-20 22:33:58 +00:00
|
|
|
return self.parent.debug_wait_for_client_connections(num_clients)
|
2007-05-30 00:39:39 +00:00
|
|
|
|
2007-03-08 02:16:06 +00:00
|
|
|
def remote_upload_from_file_to_uri(self, filename):
|
|
|
|
uploader = self.parent.getServiceNamed("uploader")
|
2008-01-31 02:03:19 +00:00
|
|
|
u = upload.FileName(filename)
|
|
|
|
d = uploader.upload(u)
|
2008-02-06 04:01:38 +00:00
|
|
|
d.addCallback(lambda results: results.uri)
|
2007-03-08 02:16:06 +00:00
|
|
|
return d
|
|
|
|
|
|
|
|
def remote_download_from_uri_to_file(self, uri, filename):
|
|
|
|
downloader = self.parent.getServiceNamed("downloader")
|
|
|
|
d = downloader.download_to_filename(uri, filename)
|
|
|
|
d.addCallback(lambda res: filename)
|
|
|
|
return d
|
|
|
|
|
2007-12-14 09:05:31 +00:00
|
|
|
def remote_speed_test(self, count, size, mutable):
|
2007-09-20 01:40:18 +00:00
|
|
|
assert size > 8
|
2008-01-30 03:44:32 +00:00
|
|
|
log.msg("speed_test: count=%d, size=%d, mutable=%s" % (count, size,
|
2007-12-14 09:05:31 +00:00
|
|
|
mutable))
|
|
|
|
st = SpeedTest(self.parent, count, size, mutable)
|
2007-09-21 01:52:44 +00:00
|
|
|
return st.run()
|
|
|
|
|
|
|
|
def remote_get_memory_usage(self):
|
|
|
|
return get_memory_usage()
|
|
|
|
|
2007-09-26 19:21:15 +00:00
|
|
|
def remote_measure_peer_response_time(self):
|
2007-09-27 01:16:15 +00:00
|
|
|
# I'd like to average together several pings, but I don't want this
|
|
|
|
# phase to take more than 10 seconds. Expect worst-case latency to be
|
|
|
|
# 300ms.
|
2007-09-26 19:21:15 +00:00
|
|
|
results = {}
|
2008-02-05 20:05:13 +00:00
|
|
|
conns = self.parent.introducer_client.get_all_connections_for("storage")
|
|
|
|
everyone = [(peerid,rref) for (peerid, service_name, rref) in conns]
|
2007-09-27 01:16:15 +00:00
|
|
|
num_pings = int(mathutil.div_ceil(10, (len(everyone) * 0.3)))
|
|
|
|
everyone = everyone * num_pings
|
2007-09-26 19:21:15 +00:00
|
|
|
d = self._do_one_ping(None, everyone, results)
|
|
|
|
return d
|
|
|
|
def _do_one_ping(self, res, everyone_left, results):
|
|
|
|
if not everyone_left:
|
|
|
|
return results
|
|
|
|
peerid, connection = everyone_left.pop(0)
|
|
|
|
start = time.time()
|
2008-02-05 20:05:13 +00:00
|
|
|
d = connection.callRemote("get_versions")
|
2007-09-26 19:21:15 +00:00
|
|
|
def _done(ignored):
|
|
|
|
stop = time.time()
|
2007-09-27 01:16:15 +00:00
|
|
|
elapsed = stop - start
|
|
|
|
if peerid in results:
|
|
|
|
results[peerid].append(elapsed)
|
|
|
|
else:
|
|
|
|
results[peerid] = [elapsed]
|
2007-09-26 19:21:15 +00:00
|
|
|
d.addCallback(_done)
|
|
|
|
d.addCallback(self._do_one_ping, everyone_left, results)
|
2007-09-27 01:16:15 +00:00
|
|
|
def _average(res):
|
|
|
|
averaged = {}
|
|
|
|
for peerid,times in results.iteritems():
|
|
|
|
averaged[peerid] = sum(times) / len(times)
|
|
|
|
return averaged
|
|
|
|
d.addCallback(_average)
|
2007-09-26 19:21:15 +00:00
|
|
|
return d
|
|
|
|
|
2007-09-21 01:52:44 +00:00
|
|
|
class SpeedTest:
|
2007-12-14 09:05:31 +00:00
|
|
|
def __init__(self, parent, count, size, mutable):
|
2007-09-21 01:52:44 +00:00
|
|
|
self.parent = parent
|
|
|
|
self.count = count
|
|
|
|
self.size = size
|
2008-01-30 03:44:32 +00:00
|
|
|
self.mutable_mode = mutable
|
2007-09-21 01:52:44 +00:00
|
|
|
self.uris = {}
|
|
|
|
self.basedir = os.path.join(self.parent.basedir, "_speed_test_data")
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
self.create_data()
|
|
|
|
d = self.do_upload()
|
|
|
|
d.addCallback(lambda res: self.do_download())
|
|
|
|
d.addBoth(self.do_cleanup)
|
|
|
|
d.addCallback(lambda res: (self.upload_time, self.download_time))
|
|
|
|
return d
|
|
|
|
|
|
|
|
def create_data(self):
|
|
|
|
fileutil.make_dirs(self.basedir)
|
|
|
|
for i in range(self.count):
|
|
|
|
s = self.size
|
|
|
|
fn = os.path.join(self.basedir, str(i))
|
2007-09-20 23:55:33 +00:00
|
|
|
if os.path.exists(fn):
|
|
|
|
os.unlink(fn)
|
|
|
|
f = open(fn, "w")
|
|
|
|
f.write(os.urandom(8))
|
|
|
|
s -= 8
|
|
|
|
while s > 0:
|
|
|
|
chunk = min(s, 4096)
|
|
|
|
f.write("\x00" * chunk)
|
|
|
|
s -= chunk
|
|
|
|
f.close()
|
2007-09-21 01:52:44 +00:00
|
|
|
|
|
|
|
def do_upload(self):
|
2007-09-20 23:55:33 +00:00
|
|
|
d = defer.succeed(None)
|
2008-01-30 03:44:32 +00:00
|
|
|
def _create_slot(res):
|
|
|
|
d1 = self.parent.create_mutable_file("")
|
|
|
|
def _created(n):
|
|
|
|
self._n = n
|
|
|
|
d1.addCallback(_created)
|
|
|
|
return d1
|
|
|
|
if self.mutable_mode == "upload":
|
|
|
|
d.addCallback(_create_slot)
|
|
|
|
def _start(res):
|
|
|
|
self._start = time.time()
|
|
|
|
d.addCallback(_start)
|
|
|
|
|
2007-09-21 01:52:44 +00:00
|
|
|
def _record_uri(uri, i):
|
|
|
|
self.uris[i] = uri
|
|
|
|
def _upload_one_file(ignored, i):
|
|
|
|
if i >= self.count:
|
2007-09-20 23:55:33 +00:00
|
|
|
return
|
2007-09-21 01:52:44 +00:00
|
|
|
fn = os.path.join(self.basedir, str(i))
|
2008-01-30 03:44:32 +00:00
|
|
|
if self.mutable_mode == "create":
|
2007-12-14 09:05:31 +00:00
|
|
|
data = open(fn,"rb").read()
|
|
|
|
d1 = self.parent.create_mutable_file(data)
|
|
|
|
d1.addCallback(lambda n: n.get_uri())
|
2008-01-30 03:44:32 +00:00
|
|
|
elif self.mutable_mode == "upload":
|
|
|
|
data = open(fn,"rb").read()
|
|
|
|
d1 = self._n.replace(data)
|
|
|
|
d1.addCallback(lambda res: self._n.get_uri())
|
2007-12-14 09:05:31 +00:00
|
|
|
else:
|
|
|
|
up = upload.FileName(fn)
|
|
|
|
d1 = self.parent.upload(up)
|
2008-02-06 04:01:38 +00:00
|
|
|
d1.addCallback(lambda results: results.uri)
|
2007-09-21 01:52:44 +00:00
|
|
|
d1.addCallback(_record_uri, i)
|
|
|
|
d1.addCallback(_upload_one_file, i+1)
|
2007-09-20 23:55:33 +00:00
|
|
|
return d1
|
2007-09-21 01:52:44 +00:00
|
|
|
d.addCallback(_upload_one_file, 0)
|
|
|
|
def _upload_done(ignored):
|
2007-09-20 01:40:18 +00:00
|
|
|
stop = time.time()
|
2008-01-30 03:44:32 +00:00
|
|
|
self.upload_time = stop - self._start
|
2007-09-21 01:52:44 +00:00
|
|
|
d.addCallback(_upload_done)
|
2007-09-20 01:40:18 +00:00
|
|
|
return d
|
|
|
|
|
2007-09-21 01:52:44 +00:00
|
|
|
def do_download(self):
|
|
|
|
start = time.time()
|
|
|
|
d = defer.succeed(None)
|
|
|
|
def _download_one_file(ignored, i):
|
|
|
|
if i >= self.count:
|
|
|
|
return
|
2007-12-14 09:05:31 +00:00
|
|
|
n = self.parent.create_node_from_uri(self.uris[i])
|
|
|
|
d1 = n.download(download.FileHandle(Discard()))
|
2007-09-21 01:52:44 +00:00
|
|
|
d1.addCallback(_download_one_file, i+1)
|
|
|
|
return d1
|
|
|
|
d.addCallback(_download_one_file, 0)
|
|
|
|
def _download_done(ignored):
|
|
|
|
stop = time.time()
|
|
|
|
self.download_time = stop - start
|
|
|
|
d.addCallback(_download_done)
|
|
|
|
return d
|
|
|
|
|
|
|
|
def do_cleanup(self, res):
|
|
|
|
for i in range(self.count):
|
|
|
|
fn = os.path.join(self.basedir, str(i))
|
|
|
|
os.unlink(fn)
|
|
|
|
return res
|
|
|
|
|
|
|
|
class Discard:
|
|
|
|
def write(self, data):
|
|
|
|
pass
|
2007-09-26 19:06:55 +00:00
|
|
|
# download_to_filehandle explicitly does not close the filehandle it was
|
|
|
|
# given: that is reserved for the provider of the filehandle. Therefore
|
|
|
|
# the lack of a close() method on this otherwise filehandle-like object
|
|
|
|
# is a part of the test.
|