mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2025-06-11 20:11:47 +00:00
misc/spacetime: add munin plugins, add everything to .deb
This commit is contained in:
@ -15,7 +15,8 @@ DEBNAME := $(firstword $(DEB_PACKAGES))
|
|||||||
STAGING_DIR := $(CURDIR)/debian/$(DEBNAME)
|
STAGING_DIR := $(CURDIR)/debian/$(DEBNAME)
|
||||||
|
|
||||||
DEB_INSTALL_DOCS_ALL := COPYING.GPL COPYING.TGPPL.html CREDITS \
|
DEB_INSTALL_DOCS_ALL := COPYING.GPL COPYING.TGPPL.html CREDITS \
|
||||||
NEWS README relnotes.txt docs
|
NEWS README relnotes.txt docs misc/spacetime
|
||||||
|
DEB_COMPRESS_EXCLUDE := .tac
|
||||||
|
|
||||||
|
|
||||||
# we overwrite the setuptools-generated /usr/bin/tahoe (located in
|
# we overwrite the setuptools-generated /usr/bin/tahoe (located in
|
||||||
|
39
misc/munin/tahoe_diskusage
Normal file
39
misc/munin/tahoe_diskusage
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
# This is a munin plugin which pulls data from the server in
|
||||||
|
# misc/spacetime/diskwatcher.tac . It produces a graph of how much disk space
|
||||||
|
# is being used per unit time. The plugin should be configured with env_url=
|
||||||
|
# pointing at the diskwatcher.tac webport.
|
||||||
|
|
||||||
|
import os, sys, urllib, simplejson
|
||||||
|
|
||||||
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
|
print """\
|
||||||
|
graph_title Tahoe Disk Usage Measurement
|
||||||
|
graph_vlabel bytes per second
|
||||||
|
graph_category tahoe
|
||||||
|
graph_info This graph shows the estimated disk usage per unit time, totalled across all storage servers
|
||||||
|
rate_1hr.label (one hour sample)
|
||||||
|
rate_1hr.draw LINE1
|
||||||
|
rate_1day.label (one day sample)
|
||||||
|
rate_1day.draw LINE1
|
||||||
|
rate_2wk.label (two week sample)
|
||||||
|
rate_2wk.draw LINE2
|
||||||
|
rate_4wk.label (four week sample)
|
||||||
|
rate_4wk.draw LINE2"""
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
url = os.environ["url"]
|
||||||
|
timespans = simplejson.load(urllib.urlopen(url))
|
||||||
|
|
||||||
|
data = dict([(name, growth)
|
||||||
|
for (name, timespan, growth, timeleft) in timespans])
|
||||||
|
# growth is in bytes per second
|
||||||
|
if "1hr" in data:
|
||||||
|
print "rate_1hr.value", data["1hr"]
|
||||||
|
if "1day" in data:
|
||||||
|
print "rate_1hr.value", data["1day"]
|
||||||
|
if "2wk" in data:
|
||||||
|
print "rate_1hr.value", data["2wk"]
|
||||||
|
if "4wk" in data:
|
||||||
|
print "rate_1hr.value", data["4wk"]
|
40
misc/munin/tahoe_doomsday
Normal file
40
misc/munin/tahoe_doomsday
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
#! /usr/bin/python
|
||||||
|
|
||||||
|
# This is a munin plugin which pulls data from the server in
|
||||||
|
# misc/spacetime/diskwatcher.tac . It produces a graph of how much time is
|
||||||
|
# left before the grid fills up. The plugin should be configured with
|
||||||
|
# env_url= pointing at the diskwatcher.tac webport.
|
||||||
|
|
||||||
|
import os, sys, urllib, simplejson
|
||||||
|
|
||||||
|
if len(sys.argv) > 1 and sys.argv[1] == "config":
|
||||||
|
print """\
|
||||||
|
graph_title Tahoe Remaining Space Predictor
|
||||||
|
graph_vlabel days remaining
|
||||||
|
graph_category tahoe
|
||||||
|
graph_info This graph shows the estimated number of days left until storage space is exhausted
|
||||||
|
days_1hr.label days left (one hour sample)
|
||||||
|
days_1hr.draw LINE1
|
||||||
|
days_1day.label days left (one day sample)
|
||||||
|
days_1day.draw LINE1
|
||||||
|
days_2wk.label days left (two week sample)
|
||||||
|
days_2wk.draw LINE2
|
||||||
|
days_4wk.label days left (four week sample)
|
||||||
|
days_4wk.draw LINE2"""
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
url = os.environ["url"]
|
||||||
|
timespans = simplejson.load(urllib.urlopen(url))
|
||||||
|
|
||||||
|
data = dict([(name, timeleft)
|
||||||
|
for (name, timespan, growth, timeleft) in timespans])
|
||||||
|
# timeleft is in seconds
|
||||||
|
DAY = 24*60*60
|
||||||
|
if "1hr" in data:
|
||||||
|
print "days_1hr.value", data["1hr"]/DAY
|
||||||
|
if "1day" in data:
|
||||||
|
print "days_1hr.value", data["1day"]/DAY
|
||||||
|
if "2wk" in data:
|
||||||
|
print "days_1hr.value", data["2wk"]/DAY
|
||||||
|
if "4wk" in data:
|
||||||
|
print "days_1hr.value", data["4wk"]/DAY
|
@ -15,7 +15,8 @@ DEBNAME := $(firstword $(DEB_PACKAGES))
|
|||||||
STAGING_DIR := $(CURDIR)/debian/$(DEBNAME)
|
STAGING_DIR := $(CURDIR)/debian/$(DEBNAME)
|
||||||
|
|
||||||
DEB_INSTALL_DOCS_ALL := COPYING.GPL COPYING.TGPPL.html CREDITS \
|
DEB_INSTALL_DOCS_ALL := COPYING.GPL COPYING.TGPPL.html CREDITS \
|
||||||
NEWS README relnotes.txt docs
|
NEWS README relnotes.txt docs misc/spacetime
|
||||||
|
DEB_COMPRESS_EXCLUDE := .tac
|
||||||
|
|
||||||
|
|
||||||
# we overwrite the setuptools-generated /usr/bin/tahoe (located in
|
# we overwrite the setuptools-generated /usr/bin/tahoe (located in
|
||||||
|
@ -2,11 +2,15 @@
|
|||||||
|
|
||||||
"""
|
"""
|
||||||
Run this tool with twistd in its own directory, with a file named 'urls.txt'
|
Run this tool with twistd in its own directory, with a file named 'urls.txt'
|
||||||
describing which nodes to query. It will request disk-usage numbers from the
|
describing which nodes to query. Make sure to copy diskwatcher.py into the
|
||||||
nodes once per hour (or slower), and store them in a local database. It will
|
same directory. It will request disk-usage numbers from the nodes once per
|
||||||
compute usage-per-unit time values over several time ranges and make them
|
hour (or slower), and store them in a local database. It will compute
|
||||||
available through an HTTP query (using ./webport). It will also provide an
|
usage-per-unit time values over several time ranges and make them available
|
||||||
estimate of how much time is left before the grid's storage is exhausted.
|
through an HTTP query (using ./webport). It will also provide an estimate of
|
||||||
|
how much time is left before the grid's storage is exhausted.
|
||||||
|
|
||||||
|
There are munin plugins (named tahoe_doomsday and tahoe_diskusage) to graph
|
||||||
|
the values this tool computes.
|
||||||
|
|
||||||
Each line of urls.txt points to a single node. Each node should have its own
|
Each line of urls.txt points to a single node. Each node should have its own
|
||||||
dedicated disk: if multiple nodes share a disk, only list one of them in
|
dedicated disk: if multiple nodes share a disk, only list one of them in
|
||||||
|
Reference in New Issue
Block a user