tahoe-lafs/Makefile

414 lines
13 KiB
Makefile
Raw Normal View History

2008-04-10 23:47:52 +00:00
# NOTE: this Makefile requires GNU make
default: build
PYTHON=python
2008-09-12 05:09:19 +00:00
export PYTHON
# setup.py will extend sys.path to include our support/lib/... directory
# itself. It will also create it in the beginning of the 'develop' command.
PP=$(shell $(PYTHON) setup.py -q show_pythonpath)
RUNPP=$(PYTHON) setup.py run_with_pythonpath
.PHONY: make-version build
# The 'darcsver' setup.py command comes in the 'darcsver' package:
# http://pypi.python.org/pypi/darcsver It is necessary only if you want to
# automatically produce a new _version.py file from the current darcs history.
make-version:
$(PYTHON) ./setup.py darcsver --count-all-patches
# We want src/allmydata/_version.py to be up-to-date, but it's a fairly
# expensive operation (about 6 seconds on a just-before-0.7.0 tree, probably
# because of the 332 patches since the last tag), and we've removed the need
# for an explicit 'build' step by removing the C code from src/allmydata and
# by running everything in place. It would be neat to do:
#
#src/allmydata/_version.py: _darcs/patches
# $(MAKE) make-version
#
# since that would update the embedded version string each time new darcs
# patches were pulled, but 1) this would break non-darcs trees (i.e. building
# from an exported tarball), and 2) without an obligatory 'build' step this
# rule wouldn't be run frequently enought anyways.
#
# So instead, I'll just make sure that we update the version at least once
# when we first start using the tree, and again whenever an explicit
# 'make-version' is run, since then at least the developer has some means to
# update things. It would be nice if 'make clean' deleted any
# automatically-generated _version.py too, so that 'make clean; make all'
# could be useable as a "what the heck is going on, get me back to a clean
# state', but we need 'make clean' to work on non-darcs trees without
# destroying useful information.
.built:
$(MAKE) build
src/allmydata/_version.py:
$(MAKE) make-version
# c.f. ticket #455, there is a problem in the intersection of setuptools,
# twisted's setup.py, and nevow's setup.py . A Tahoe build, to satisfy its
# dependencies, may try to build both Twisted and Nevow. If both of these
# occur during the same invocation of 'setup.py develop', then the Nevow
# build will fail with an "ImportError: No module named components". Running
# the build a second time will succeed. Until there is a new version of
# setuptools which properly sandboxes sys.modules (or a new version of nevow
# which doesn't import twisted during its build, or a new version of twisted
# which doesn't import itself during its build), we just build tahoe twice
# and ignore the errors from the first pass. Updated 16-Sep-2008: now we need
# three invocations.
build: src/allmydata/_version.py
$(MAKE) build-once || $(MAKE) build-once || $(MAKE) build-once
# setuptools has a bug (Issue17, see tahoe #229 for details) that causes it
# to mishandle dependencies that are installed in non-site-directories,
# including the /var/lib/ place that debian's python-support system uses. We
# add this debian/ubuntu-specific directory (via $SITEDIRARG) to the setup.py
# command line to work around this. Some day this will probably be fixed in
# setuptools.
build-once:
$(PYTHON) setup.py build_tahoe
touch .built
2007-09-12 23:02:53 +00:00
# 'make install' will do the following:
# build+install tahoe (probably to /usr/lib/pythonN.N/site-packages)
# 'make install PREFIX=/usr/local/stow/tahoe-N.N' will do the same, but to
# a different location
install: src/allmydata/_version.py
2007-09-12 23:02:53 +00:00
ifdef PREFIX
mkdir -p $(PREFIX)
$(PYTHON) ./setup.py install --single-version-externally-managed \
2007-09-12 23:02:53 +00:00
--prefix=$(PREFIX) --record=./tahoe.files
else
$(PYTHON) ./setup.py install --single-version-externally-managed
2007-09-12 23:02:53 +00:00
endif
2007-02-01 23:07:07 +00:00
# TESTING
.PHONY: signal-error-deps test test-figleaf figleaf-output
signal-error-deps:
@echo
@echo
@echo "ERROR: Not all of Tahoe's dependencies are in place. Please see docs/install.html for help on installing dependencies."
@echo
@echo
exit 1
check-auto-deps:
$(PYTHON) setup.py -q check_auto_deps || $(MAKE) signal-error-deps
.checked-deps:
$(MAKE) check-auto-deps
touch .checked-deps
2006-12-01 02:51:19 +00:00
# you can use 'make test TEST=allmydata.test.test_introducer' to run just
# test_introducer. TEST=allmydata.test.test_client.Basic.test_permute works
# too.
2007-09-12 23:02:53 +00:00
TEST=allmydata
# use 'make test TRIALARGS=--reporter=bwverbose' from buildbot, to
# suppress the ansi color sequences
test: build src/allmydata/_version.py
$(PYTHON) setup.py trial -a "$(TRIALARGS) $(TEST)"
quicktest: .built .checked-deps
$(PYTHON) setup.py trial -a "$(TRIALARGS) $(TEST)"
fuse-test: .built .checked-deps
$(RUNPP) -d contrib/fuse -p -c runtests.py
test-figleaf: build src/allmydata/_version.py
rm -f .figleaf
$(PYTHON) setup.py trial -a "--reporter=bwverbose-figleaf $(TEST)"
quicktest-figleaf: src/allmydata/_version.py
rm -f .figleaf
$(PYTHON) setup.py trial -a "--reporter=bwverbose-figleaf $(TEST)"
2006-12-07 02:24:02 +00:00
figleaf-output:
$(RUNPP) -p -c "misc/figleaf2html -d coverage-html -r src -x misc/figleaf.excludes"
@echo "now point your browser at coverage-html/index.html"
# after doing test-figleaf and figleaf-output, point your browser at
# coverage-html/index.html
2006-12-07 02:24:02 +00:00
.PHONY: upload-figleaf .figleaf.el pyflakes count-lines
.PHONY: check-memory check-memory-once clean
# 'upload-figleaf' is meant to be run with an UPLOAD_TARGET=host:/dir setting
ifdef UPLOAD_TARGET
ifndef UPLOAD_HOST
$(error UPLOAD_HOST must be set when using UPLOAD_TARGET)
endif
ifndef COVERAGEDIR
$(error COVERAGEDIR must be set when using UPLOAD_TARGET)
endif
upload-figleaf:
rsync -a coverage-html/ $(UPLOAD_TARGET)
ssh $(UPLOAD_HOST) make update-tahoe-figleaf COVERAGEDIR=$(COVERAGEDIR)
else
upload-figleaf:
echo "this target is meant to be run with UPLOAD_TARGET=host:/path/"
false
endif
.figleaf.el: .figleaf
$(RUNPP) -p -c "misc/figleaf2el.py .figleaf src"
pyflakes:
$(PYTHON) -OOu `which pyflakes` src/allmydata |sort |uniq
count-lines:
@echo -n "files: "
@find src -name '*.py' |grep -v /build/ |wc --lines
@echo -n "lines: "
@cat `find src -name '*.py' |grep -v /build/` |wc --lines
@echo -n "TODO: "
@grep TODO `find src -name '*.py' |grep -v /build/` | wc --lines
check-memory: .built
rm -rf _test_memory
$(RUNPP) -p -c "src/allmydata/test/check_memory.py upload"
$(RUNPP) -p -c "src/allmydata/test/check_memory.py upload-self"
$(RUNPP) -p -c "src/allmydata/test/check_memory.py upload-POST"
$(RUNPP) -p -c "src/allmydata/test/check_memory.py download"
$(RUNPP) -p -c "src/allmydata/test/check_memory.py download-GET"
$(RUNPP) -p -c "src/allmydata/test/check_memory.py download-GET-slow"
$(RUNPP) -p -c "src/allmydata/test/check_memory.py receive"
check-memory-once: .built
rm -rf _test_memory
$(RUNPP) -p -c "src/allmydata/test/check_memory.py $(MODE)"
2008-06-18 22:49:30 +00:00
# The check-speed target uses a pre-established client node to run a canned
# set of performance tests against a test network that is also
# pre-established (probably on a remote machine). Provide it with the path to
# a local directory where this client node has been created (and populated
# with the necessary FURLs of the test network). This target will start that
# client with the current code and then run the tests. Afterwards it will
# stop the client.
#
# The 'sleep 5' is in there to give the new client a chance to connect to its
# storageservers, since check_speed.py has no good way of doing that itself.
check-speed: .built
if [ -z '$(TESTCLIENTDIR)' ]; then exit 1; fi
@echo "stopping any leftover client code"
-$(PYTHON) bin/tahoe stop $(TESTCLIENTDIR)
$(PYTHON) bin/tahoe start $(TESTCLIENTDIR)
sleep 5
$(PYTHON) src/allmydata/test/check_speed.py $(TESTCLIENTDIR)
$(PYTHON) bin/tahoe stop $(TESTCLIENTDIR)
# The check-grid target also uses a pre-established client node, along with a
# long-term directory that contains some well-known files. See the docstring
# in src/allmydata/test/check_grid.py to see how to set this up.
check-grid: .built
if [ -z '$(TESTCLIENTDIR)' ]; then exit 1; fi
$(PYTHON) src/allmydata/test/check_grid.py $(TESTCLIENTDIR) bin/tahoe
# 'make repl' is a simple-to-type command to get a Python interpreter loop
# from which you can type 'import allmydata'
repl:
$(RUNPP) -p
test-darcs-boringfile:
2007-05-24 00:57:48 +00:00
$(MAKE)
$(PYTHON) misc/test-darcs-boringfile.py
2007-05-24 00:57:48 +00:00
test-clean:
find . |grep -vEe"allfiles.tmp|src/allmydata/_(version|auto_deps).py|src/allmydata_tahoe.egg-info" |sort >allfiles.tmp.old
2007-05-24 00:57:48 +00:00
$(MAKE)
$(MAKE) clean
find . |grep -vEe"allfiles.tmp|src/allmydata/_(version|auto_deps).py|src/allmydata_tahoe.egg-info" |sort >allfiles.tmp.new
2007-05-24 00:57:48 +00:00
diff allfiles.tmp.old allfiles.tmp.new
2007-09-12 23:02:53 +00:00
clean:
rm -rf build _trial_temp _test_memory .checked-deps .built
rm -f debian
2007-09-12 23:02:53 +00:00
rm -f `find src/allmydata -name '*.so' -or -name '*.pyc'`
rm -rf tahoe_deps.egg-info allmydata_tahoe.egg-info
rm -rf support dist
2008-03-11 03:40:39 +00:00
rm -rf setuptools*.egg *.pyc darcsver*.egg pyutil*.egg
rm -rf misc/dependencies/build misc/dependencies/temp
rm -rf misc/dependencies/tahoe_deps.egg-info
find-trailing-spaces:
$(PYTHON) misc/find-trailing-spaces.py -r src
# The test-desert-island target grabs the tahoe-deps tarball, unpacks it,
# does a build, then asserts that the build did not try to download anything
# as it ran. Invoke this on a new tree, or after a 'clean', to make sure the
# support/lib/ directory is gone.
fetch-and-unpack-deps:
test -f tahoe-deps.tar.gz || wget http://allmydata.org/source/tahoe/deps/tahoe-deps.tar.gz
rm -rf tahoe-deps
tar xf tahoe-deps.tar.gz
test-desert-island:
$(MAKE) fetch-and-unpack-deps
$(MAKE) 2>&1 | tee make.out
$(PYTHON) misc/check-build.py make.out no-downloads
# TARBALL GENERATION
.PHONY: tarballs upload-tarballs
tarballs:
$(MAKE) make-version
$(PYTHON) setup.py sdist --formats=bztar,gztar,zip
$(PYTHON) setup.py sdist --sumo --formats=bztar,gztar,zip
upload-tarballs:
for f in dist/allmydata-tahoe-*; do \
xfer-client --furlfile ~/.tahoe-tarball-upload.furl $$f; \
done
# DEBIAN PACKAGING
2006-12-05 09:00:44 +00:00
VER=$(shell $(PYTHON) misc/get-version.py)
DEBCOMMENTS="'make deb' build"
show-version:
@echo $(VER)
show-pp:
@echo $(PP)
.PHONY: setup-deb deb-ARCH is-known-debian-arch
2008-06-17 22:26:18 +00:00
.PHONY: deb-etch deb-sid
.PHONY: deb-edgy deb-feisty deb-gutsy deb-hardy
deb-sid:
$(MAKE) deb-ARCH ARCH=sid
deb-feisty:
$(MAKE) deb-ARCH ARCH=feisty
2007-04-04 23:39:25 +00:00
# edgy uses the feisty control files for now
deb-edgy:
$(MAKE) deb-ARCH ARCH=edgy TAHOE_ARCH=feisty
# etch uses the feisty control files for now
deb-etch:
2007-09-12 23:02:53 +00:00
$(MAKE) deb-ARCH ARCH=etch TAHOE_ARCH=feisty
# same with gutsy, the process has been nicely stable for a while now
deb-gutsy:
$(MAKE) deb-ARCH ARCH=gutsy TAHOE_ARCH=feisty
2008-06-17 22:26:18 +00:00
deb-hardy:
$(MAKE) deb-ARCH ARCH=hardy TAHOE_ARCH=feisty
# we know how to handle the following debian architectures
2008-06-17 22:26:18 +00:00
KNOWN_DEBIAN_ARCHES := etch sid edgy feisty gutsy hardy
ifeq ($(findstring x-$(ARCH)-x,$(foreach arch,$(KNOWN_DEBIAN_ARCHES),"x-$(arch)-x")),)
is-known-debian-arch:
@echo "ARCH must be set when using setup-deb or deb-ARCH"
@echo "I know how to handle:" $(KNOWN_DEBIAN_ARCHES)
false
else
is-known-debian-arch:
true
endif
2007-04-04 23:39:25 +00:00
ifndef TAHOE_ARCH
TAHOE_ARCH=$(ARCH)
endif
setup-deb: is-known-debian-arch
rm -f debian
ln -s misc/$(TAHOE_ARCH)/debian debian
chmod +x debian/rules
# etch (current debian stable) has python-simplejson-1.3, which doesn't
# support indent=
# sid (debian unstable) currently has python-simplejson 1.7.1
# edgy has 1.3, which doesn't support indent=
# feisty has 1.4, which supports indent= but emits a deprecation warning
# gutsy has 1.7.1
#
# we need 1.4 or newer
deb-ARCH: is-known-debian-arch setup-deb
fakeroot debian/rules binary
2007-10-11 21:37:29 +00:00
@echo
@echo "The newly built .deb packages are in the parent directory from here."
.PHONY: increment-deb-version
2008-06-17 22:26:18 +00:00
.PHONY: deb-edgy-head deb-feisty-head deb-gutsy-head deb-hardy-head
.PHONY: deb-etch-head deb-sid-head
# The buildbot runs the following targets after each change, to produce
# up-to-date tahoe .debs. These steps do not create .debs for anything else.
increment-deb-version: make-version
debchange --newversion $(VER) $(DEBCOMMENTS)
deb-sid-head:
$(MAKE) setup-deb ARCH=sid
$(MAKE) increment-deb-version
2006-12-05 09:00:44 +00:00
fakeroot debian/rules binary
deb-edgy-head:
$(MAKE) setup-deb ARCH=edgy TAHOE_ARCH=feisty
$(MAKE) increment-deb-version
2007-04-04 23:39:25 +00:00
fakeroot debian/rules binary
deb-feisty-head:
$(MAKE) setup-deb ARCH=feisty
$(MAKE) increment-deb-version
fakeroot debian/rules binary
deb-etch-head:
$(MAKE) setup-deb ARCH=etch TAHOE_ARCH=feisty
$(MAKE) increment-deb-version
fakeroot debian/rules binary
deb-gutsy-head:
$(MAKE) setup-deb ARCH=gutsy TAHOE_ARCH=feisty
$(MAKE) increment-deb-version
fakeroot debian/rules binary
2008-06-17 22:26:18 +00:00
deb-hardy-head:
$(MAKE) setup-deb ARCH=hardy TAHOE_ARCH=feisty
$(MAKE) increment-deb-version
fakeroot debian/rules binary
2008-01-10 02:06:28 +00:00
# These targets provide for windows native builds
.PHONY: windows-exe windows-installer windows-installer-upload
windows-exe: .built
$(RUNPP) -c "$(MAKE) -C windows windows-exe"
2008-01-10 02:06:28 +00:00
2008-09-12 05:09:19 +00:00
windows-installer:
$(RUNPP) -c "$(MAKE) -C windows windows-installer"
windows-installer-upload:
$(RUNPP) -c "$(MAKE) -C windows windows-installer-upload"
add mac native build This patch adds support for a mac native build. At the moment it's a fairly simple .app - i.e. so simple as to be unacceptable for a shipping product, but ok for testing and experiment at this point. notably once launched, the app's ui does not respond at all, although its dock icon does allow it to be force-quit. this produces a single .app bundle, which when run will look for a node basedir in ~/.tahoe. If one is not found, one will be created in ~/Library/Application Support/Allmydata Tahoe, and that will be symlinked to ~/.tahoe if the basedir is lacking basic config (introducer.furl and root_dir.cap) then the wx config wizard will be launched to log into an account and to set up those files. if a webport file is not found, the default value of 8123 will be written into it. once the node has started running, a webbrowser will be opened to the webish interface at the users root_dir note that, once configured, the node runs as the main thread of the .app, no daemonisation is done, twistd is not involved. the binary itself, from within the .app bundle, i.e. "Allmydata Tahoe.app/Contents/MacOS/Allmydata Tahoe" can be used from the command line and functions as the 'tahoe' executable would in a unix environment, with one exception - when launched with no args it triggers the default behaviour of running a node, and if necessary config wizard, as if the user had launched the .app one other gotcha to be aware of is that symlinking to this binary from some other place in ones $PATH will most likely not work. when I tried this, something - wx I believe - exploded, since it seems to use argv[0] to figure out where necessary libraries reside and fails if argv[0] isn't in the .app bundle. it's pretty easy to set up a script a la #!/bin/bash /Blah/blah/blah/Allmydata\ Tahoe.app/Contents/MacOS/Allmydata\ Tahoe "${@}"
2008-01-23 02:32:26 +00:00
# These targets provide for mac native builds
.PHONY: mac-exe mac-upload mac-cleanup mac-dbg
mac-exe: .built
$(MAKE) -C mac clean
VERSION=$(VER) $(RUNPP) -c "$(MAKE) -C mac build"
add mac native build This patch adds support for a mac native build. At the moment it's a fairly simple .app - i.e. so simple as to be unacceptable for a shipping product, but ok for testing and experiment at this point. notably once launched, the app's ui does not respond at all, although its dock icon does allow it to be force-quit. this produces a single .app bundle, which when run will look for a node basedir in ~/.tahoe. If one is not found, one will be created in ~/Library/Application Support/Allmydata Tahoe, and that will be symlinked to ~/.tahoe if the basedir is lacking basic config (introducer.furl and root_dir.cap) then the wx config wizard will be launched to log into an account and to set up those files. if a webport file is not found, the default value of 8123 will be written into it. once the node has started running, a webbrowser will be opened to the webish interface at the users root_dir note that, once configured, the node runs as the main thread of the .app, no daemonisation is done, twistd is not involved. the binary itself, from within the .app bundle, i.e. "Allmydata Tahoe.app/Contents/MacOS/Allmydata Tahoe" can be used from the command line and functions as the 'tahoe' executable would in a unix environment, with one exception - when launched with no args it triggers the default behaviour of running a node, and if necessary config wizard, as if the user had launched the .app one other gotcha to be aware of is that symlinking to this binary from some other place in ones $PATH will most likely not work. when I tried this, something - wx I believe - exploded, since it seems to use argv[0] to figure out where necessary libraries reside and fails if argv[0] isn't in the .app bundle. it's pretty easy to set up a script a la #!/bin/bash /Blah/blah/blah/Allmydata\ Tahoe.app/Contents/MacOS/Allmydata\ Tahoe "${@}"
2008-01-23 02:32:26 +00:00
mac-dist:
VERSION=$(VER) $(MAKE) -C mac diskimage
mac-upload:
fuse/impl_c: move mac tahoefuse impl out into contrib/fuse For a variety of reasons, high amongst them the fact that many people interested in fuse support for tahoe seem to have missed its existence, the existing fuse implementation for tahoe, previously 'mac/tahoefuse.py' has been renamed and moved. It was suggested that, even though the mac build depends upon it, that the mac/tahoefuse implementation be moved into contrib/fuse along with the other fuse implementations. The fact that it's not as extensively covered by unit tests as mainline tahoe was given as corroboration. In a bid to try and stem the confusion inherent in having tahoe_fuse, tfuse and tahoefuse jumbled together (not necessarily helped by referring to them as impl_a, b and c respectively) I'm hereby renaming tahoefuse as 'blackmatch' (black match is, per wikipedia "a type of crude fuse" hey, I'm a punny guy) Maybe one day it'll be promoted to be 'quickmatch' instead... Anyway, this patch moves mac/tahoefuse.py out to contrib/fuse/impl_c/ as blackmatch.py, and makes appropriate changes to the mac build process to transclude blackmatch therein. this leaves the extant fuse.py and fuseparts business in mac/ as-is and doesn't attempt to address such issues in contrib/fuse/impl_c. it is left as an exercise to the reader (or the reader of a message to follow) as to how to deal with the 'fuse' python module on the mac. as of this time, blackmatch should work on both mac and linux, and passes the four extant tests in runtests. (fwiw neither impl_a nor impl_b have I managed to get working on the mac yet) since blackmatch supports a read-write and caching fuse interface to tahoe, some write tests obviously need to be added to runtests.
2008-09-25 01:42:14 +00:00
VERSION=$(VER) $(MAKE) -C mac upload
add mac native build This patch adds support for a mac native build. At the moment it's a fairly simple .app - i.e. so simple as to be unacceptable for a shipping product, but ok for testing and experiment at this point. notably once launched, the app's ui does not respond at all, although its dock icon does allow it to be force-quit. this produces a single .app bundle, which when run will look for a node basedir in ~/.tahoe. If one is not found, one will be created in ~/Library/Application Support/Allmydata Tahoe, and that will be symlinked to ~/.tahoe if the basedir is lacking basic config (introducer.furl and root_dir.cap) then the wx config wizard will be launched to log into an account and to set up those files. if a webport file is not found, the default value of 8123 will be written into it. once the node has started running, a webbrowser will be opened to the webish interface at the users root_dir note that, once configured, the node runs as the main thread of the .app, no daemonisation is done, twistd is not involved. the binary itself, from within the .app bundle, i.e. "Allmydata Tahoe.app/Contents/MacOS/Allmydata Tahoe" can be used from the command line and functions as the 'tahoe' executable would in a unix environment, with one exception - when launched with no args it triggers the default behaviour of running a node, and if necessary config wizard, as if the user had launched the .app one other gotcha to be aware of is that symlinking to this binary from some other place in ones $PATH will most likely not work. when I tried this, something - wx I believe - exploded, since it seems to use argv[0] to figure out where necessary libraries reside and fails if argv[0] isn't in the .app bundle. it's pretty easy to set up a script a la #!/bin/bash /Blah/blah/blah/Allmydata\ Tahoe.app/Contents/MacOS/Allmydata\ Tahoe "${@}"
2008-01-23 02:32:26 +00:00
mac-cleanup:
VERSION=$(VER) $(MAKE) -C mac cleanup
mac-dbg:
cd mac && $(PP) $(PYTHON)w allmydata_tahoe.py
stats: add a simple stats gathering system We have a desire to collect runtime statistics from multiple nodes primarily for server monitoring purposes. This implements a simple implementation of such a system, as a skeleton to build more sophistication upon. Each client now looks for a 'stats_gatherer.furl' config file. If it has been configured to use a stats gatherer, then it instantiates internally a StatsProvider. This is a central place for code which wishes to offer stats up for monitoring to report them to, either by calling stats_provider.count('stat.name', value) to increment a counter, or by registering a class as a stats producer with sp.register_producer(obj). The StatsProvider connects to the StatsGatherer server and provides its provider upon startup. The StatsGatherer is then responsible for polling the attached providers periodically to retrieve the data provided. The provider queries each registered producer when the gatherer queries the provider. Both the internal 'counters' and the queried 'stats' are then reported to the gatherer. This provides a simple gatherer app, (c.f. make stats-gatherer-run) which prints its furl and listens for incoming connections. Once a minute, the gatherer polls all connected providers, and writes the retrieved data into a pickle file. Also included is a munin plugin which knows how to read the gatherer's stats.pickle and output data munin can interpret. this plugin, tahoe-stats.py can be symlinked as multiple different names within munin's 'plugins' directory, and inspects argv to determine which data to display, doing a lookup in a table within that file. It looks in the environment for 'statsfile' to determine the path to the gatherer's stats.pickle. An example plugins-conf.d file is provided.
2008-01-31 03:11:07 +00:00
# This target runs a stats gatherer server
.PHONY: stats-gatherer-run
stats-gatherer-run:
$(RUNPP) -d stats_gatherer -p ../src/allmydata/stats.py