2020-12-29 14:28:14 +00:00
|
|
|
"""
|
|
|
|
Ported to Python 3.
|
|
|
|
"""
|
|
|
|
from __future__ import absolute_import
|
|
|
|
from __future__ import division
|
2019-03-24 13:14:00 +00:00
|
|
|
from __future__ import print_function
|
2020-12-29 14:28:14 +00:00
|
|
|
from __future__ import unicode_literals
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
|
2020-10-01 15:57:54 +00:00
|
|
|
from future.utils import PY2
|
|
|
|
if PY2:
|
2020-12-29 14:28:14 +00:00
|
|
|
from future.builtins import filter, map, zip, ascii, chr, hex, input, next, oct, open, pow, round, super, bytes, dict, list, object, range, str, max, min # noqa: F401
|
2021-05-05 14:37:49 +00:00
|
|
|
from time import clock as process_time
|
|
|
|
else:
|
|
|
|
from time import process_time
|
2020-12-29 14:28:14 +00:00
|
|
|
import time
|
feat(py3): Convert unicode-only modules to str
Modules that reference `unicode` but do *not* reference `str` can safely be converted to
use `str` in a way that's closest to the way it should be done under Python 3 but that
is still Python 2 compatible [per
`python-future`](https://python-future.org/compatible_idioms.html?highlight=unicode#unicode).
This change results in 4 additional tests passing under Python 3 that weren't before,
one previous test error is now a failure, and more coverage in a few modules. Here's
the diff of the output from running all tests under Python 3 before these changes and
after. I've elided the irrelevant changes (time stamps, object ids, etc.):
```diff
--- .tox/make-test-py3-all-old.log 2020-09-27 20:56:55.761691130 -0700
+++ .tox/make-test-py3-all-new.log 2020-09-27 20:58:16.242075678 -0700
@@ -1,6 +1,6 @@
...
@@ -4218,7 +4218,7 @@
[ERROR]
(#.### secs)
allmydata.test.mutable.test_version.Version.test_download_version ... Traceback (most recent call last):
- File "/home/rpatterson/src/work/sfu/tahoe-lafs/src/allmydata/test/mutable/test_version.py", line 274, in test_download_version
+ File "/home/rpatterson/src/work/sfu/tahoe-lafs/src/allmydata/test/mutable/test_version.py", line 279, in test_download_version
d = self.publish_multiple()
File "/home/rpatterson/src/work/sfu/tahoe-lafs/src/allmydata/test/mutable/util.py", line 372, in publish_multiple
self._nodemaker = make_nodemaker(self._storage)
@@ -4438,40 +4438,26 @@
allmydata.test.test_abbreviate.Abbreviate.test_time ... [OK]
(#.### secs)
allmydata.test.test_auth.AccountFileCheckerKeyTests.test_authenticated ... Traceback (most recent call last):
- File "/home/rpatterson/src/work/sfu/tahoe-lafs/src/allmydata/test/test_auth.py", line 42, in setUp
- abspath = abspath_expanduser_unicode(unicode(self.account_file.path))
-builtins.NameError: name 'unicode' is not defined
+Failure: twisted.cred.error.UnauthorizedLogin:
[ERROR]
(#.### secs)
allmydata.test.test_auth.AccountFileCheckerKeyTests.test_missing_signature ... Traceback (most recent call last):
- File "/home/rpatterson/src/work/sfu/tahoe-lafs/src/allmydata/test/test_auth.py", line 42, in setUp
- abspath = abspath_expanduser_unicode(unicode(self.account_file.path))
-builtins.NameError: name 'unicode' is not defined
-[ERROR]
+ File "/home/rpatterson/src/work/sfu/tahoe-lafs/.tox/py36-coverage/lib/python3.6/site-packages/twisted/trial/_asynctest.py", line 75, in _eb
+ raise self.failureException(output)
+twisted.trial.unittest.FailTest:
+Expected: (<class 'twisted.conch.error.ValidPublicKey'>,)
+Got:
+[Failure instance: Traceback (failure with no frames): <class 'twisted.cred.error.UnauthorizedLogin'>:
+]
+[FAILURE]
(#.### secs)
-allmydata.test.test_auth.AccountFileCheckerKeyTests.test_password_auth_user ... Traceback (most recent call last):
- File "/home/rpatterson/src/work/sfu/tahoe-lafs/src/allmydata/test/test_auth.py", line 42, in setUp
- abspath = abspath_expanduser_unicode(unicode(self.account_file.path))
-builtins.NameError: name 'unicode' is not defined
-[ERROR]
+allmydata.test.test_auth.AccountFileCheckerKeyTests.test_password_auth_user ... [OK]
(#.### secs)
-allmydata.test.test_auth.AccountFileCheckerKeyTests.test_unknown_user ... Traceback (most recent call last):
- File "/home/rpatterson/src/work/sfu/tahoe-lafs/src/allmydata/test/test_auth.py", line 42, in setUp
- abspath = abspath_expanduser_unicode(unicode(self.account_file.path))
-builtins.NameError: name 'unicode' is not defined
-[ERROR]
+allmydata.test.test_auth.AccountFileCheckerKeyTests.test_unknown_user ... [OK]
(#.### secs)
-allmydata.test.test_auth.AccountFileCheckerKeyTests.test_unrecognized_key ... Traceback (most recent call last):
- File "/home/rpatterson/src/work/sfu/tahoe-lafs/src/allmydata/test/test_auth.py", line 42, in setUp
- abspath = abspath_expanduser_unicode(unicode(self.account_file.path))
-builtins.NameError: name 'unicode' is not defined
-[ERROR]
+allmydata.test.test_auth.AccountFileCheckerKeyTests.test_unrecognized_key ... [OK]
(#.### secs)
-allmydata.test.test_auth.AccountFileCheckerKeyTests.test_wrong_signature ... Traceback (most recent call last):
- File "/home/rpatterson/src/work/sfu/tahoe-lafs/src/allmydata/test/test_auth.py", line 42, in setUp
- abspath = abspath_expanduser_unicode(unicode(self.account_file.path))
-builtins.NameError: name 'unicode' is not defined
-[ERROR]
+allmydata.test.test_auth.AccountFileCheckerKeyTests.test_wrong_signature ... [OK]
(#.### secs)
allmydata.test.test_backupdb.BackupDB.test_basic ... [OK]
(#.### secs)
@@ -4615,7 +4601,7 @@
src/allmydata/crypto/util.py 12 2 4 2 75% 13, 32, 12->13, 30->32
src/allmydata/deep_stats.py 83 63 26 0 18% 27-52, 56-58, 62-82, 86-91, 94, 97, 103-114, 117-121, 125-131, 135
src/allmydata/dirnode.py 525 420 178 0 15% 70-103, 112-116, 119-135, 140-143, 146-160, 165-173, 176-177, 180-205, 208-217, 223-229, 248-286, 293-299, 302, 310, 315, 318-324, 327-332, 336-340, 344-346, 355-406, 410, 413, 416, 419, 422, 425, 428, 431-433, 436, 439, 442, 445, 448-450, 453, 457, 459, 464, 469-472, 475-478, 481-484, 489-492, 498-501, 504-507, 510-518, 530-532, 539-555, 558-566, 570-589, 600-610, 613-620, 628-641, 646-652, 657-678, 693-714, 752-761, 765-770, 774-812, 819-820, 825, 828, 831, 836-839, 842-849, 852-853, 862-877, 880-881, 884-891, 894, 897-899
-src/allmydata/frontends/auth.py 100 71 28 0 26% 21-22, 30-48, 51, 54-56, 59-70, 80-87, 100-110, 117-118, 121, 124-142, 147-150, 156-159
+src/allmydata/frontends/auth.py 100 52 28 4 47% 21-22, 38, 41-44, 51, 54-56, 65-70, 80-87, 106-108, 117-118, 121, 124-142, 147-150, 156-159, 37->38, 40->41, 59->65, 101->106
src/allmydata/frontends/ftpd.py 255 254 84 0 1% 4-337
src/allmydata/frontends/sftpd.py 1211 1208 488 0 1% 4-2014
src/allmydata/hashtree.py 174 135 72 1 16% 59, 75-78, 106-108, 114-117, 123-126, 132-136, 142-149, 152-162, 165-169, 172, 175, 180, 183, 186, 218-232, 259-262, 295-306, 320-323, 326-331, 384-484, 58->59
@@ -4653,7 +4639,7 @@
src/allmydata/scripts/admin.py 51 31 2 0 38% 9-14, 17-21, 25, 28, 31-37, 40-46, 56-57, 59, 61-66, 74-78
src/allmydata/scripts/backupdb.py 146 91 14 1 36% 84-91, 94-96, 99, 103, 106, 111-114, 117-119, 122, 125, 128, 176-221, 231-242, 245-263, 266-272, 308-324, 327-333, 336-341, 306->308
src/allmydata/scripts/cli.py 259 124 46 6 46% 25-49, 69-72, 79-81, 103, 142-146, 175, 221-222, 258, 265-266, 284-285, 330-331, 338-341, 346-355, 361-362, 366-373, 388, 405, 417, 432, 449, 479-481, 484-486, 489-491, 494-496, 499-501, 504-515, 518-520, 523-525, 528-530, 533, 536-538, 541-543, 546-548, 551-553, 556-558, 561-563, 566-568, 571-573, 576-577, 60->exit, 61->exit, 174->175, 180->exit, 181->exit, 219->221
-src/allmydata/scripts/common.py 153 74 60 4 48% 64, 82, 88, 100, 114-126, 130-152, 159-163, 168-169, 172, 177, 191-236, 240-241, 47->49, 63->64, 79->82, 87->88
+src/allmydata/scripts/common.py 154 74 60 4 49% 69, 87, 93, 105, 119-131, 135-157, 164-168, 173-174, 177, 182, 196-241, 245-246, 52->54, 68->69, 84->87, 92->93
src/allmydata/scripts/common_http.py 77 58 20 0 20% 15-30, 34-36, 38, 42-83, 87, 90, 94-96, 101
src/allmydata/scripts/create_node.py 302 185 114 8 30% 24, 61-96, 99-111, 114-128, 136-139, 169-174, 191-194, 205-208, 224-229, 235, 242, 256-278, 289-292, 295-298, 329, 339, 347-380, 385-445, 448-450, 455-477, 223->224, 234->235, 241->242, 252->256, 288->289, 294->295, 328->329, 338->339
src/allmydata/scripts/debug.py 719 638 202 0 9% 14, 31-32, 35-49, 52-60, 63-142, 146-154, 157-164, 168-217, 220-304, 307-401, 407, 417, 437-465, 468-485, 488-602, 606, 609-611, 637-648, 653-656, 659, 683-689, 692-810, 813-842, 845-848, 851-865, 869, 888, 891-940, 946, 949-950, 957, 960-961, 967-972, 984-985, 999-1000, 1003-1004, 1020-1021, 1025-1031, 1046-1050
@@ -4661,10 +4647,10 @@
src/allmydata/scripts/run_common.py 135 18 24 6 85% 37, 41-46, 59-60, 149, 158, 192-193, 216-220, 226-227, 55->62, 135->exit, 135->exit, 148->149, 191->192, 225->226
src/allmydata/scripts/runner.py 138 53 42 11 56% 84-85, 91, 97-99, 104, 114, 123-132, 140, 146, 149-160, 174-181, 186, 189-190, 204-232, 248, 255, 31->36, 103->104, 113->114, 139->140, 145->146, 147->149, 185->186, 188->189, 202->204, 247->248, 254->255
src/allmydata/scripts/slow_operation.py 69 56 22 0 14% 15-44, 47-52, 55-61, 64-83
-src/allmydata/scripts/stats_gatherer.py 41 25 10 0 31% 20-25, 62-86
+src/allmydata/scripts/stats_gatherer.py 42 25 10 0 33% 25-30, 67-91
src/allmydata/scripts/tahoe_add_alias.py 106 91 30 0 11% 20-32, 35-59, 63-98, 102-111, 115-144
src/allmydata/scripts/tahoe_backup.py 331 267 85 0 15% 20-35, 38-51, 54-58, 71-73, 76-152, 155-157, 160-161, 164-174, 178-209, 212-242, 246-274, 278-279, 287-311, 322-331, 336, 339, 342-351, 356, 359, 362-367, 372-374, 379, 384, 389, 398, 417-425, 428, 431-461, 469-480, 483-486, 500-504, 511-512, 525, 538-542, 545-549, 552-555, 558-561, 564, 571, 578, 586-594
-src/allmydata/scripts/tahoe_check.py 263 235 121 0 7% 15, 20-100, 103-112, 120-129, 132-167, 170-173, 179-192, 195-256, 259-270, 277-323, 327-336, 339
+src/allmydata/scripts/tahoe_check.py 264 235 121 0 8% 20, 25-105, 108-117, 125-134, 137-172, 175-178, 184-197, 200-261, 264-275, 282-328, 332-341, 344
src/allmydata/scripts/tahoe_cp.py 602 503 226 0 12% 22, 26, 30-31, 34-37, 40-41, 44-47, 50-53, 56-60, 63-70, 75-77, 80, 83, 86, 90-91, 94, 98-99, 102, 106-111, 114, 117-134, 138-142, 145-159, 162-172, 175-177, 180, 185-189, 192, 195-197, 200-203, 206, 210-214, 218-223, 230-233, 236, 239-253, 256-263, 266-297, 303, 307-309, 316, 320-323, 326-333, 336-350, 354-358, 361-397, 403-413, 416-433, 436-437, 440-454, 465-496, 504-580, 583, 589-630, 636-689, 693-698, 701-703, 706-719, 723-762, 765-775, 778-806, 810-818, 821-838, 842, 845-857, 862-863, 867
src/allmydata/scripts/tahoe_get.py 37 32 12 0 10% 9-45
src/allmydata/scripts/tahoe_invite.py 59 41 8 0 27% 27-31, 36-71, 76-101
@@ -4679,7 +4665,7 @@
src/allmydata/scripts/tahoe_stop.py 60 47 10 0 19% 16, 24-84
src/allmydata/scripts/tahoe_unlink.py 28 23 6 0 15% 12-40
src/allmydata/scripts/tahoe_webopen.py 27 24 12 0 8% 7-31
-src/allmydata/stats.py 242 156 54 3 33% 28-34, 37-40, 43-47, 50-64, 67-72, 101, 104-110, 113-125, 144-146, 154-155, 160-163, 169-174, 178-187, 191, 200-207, 210, 213-219, 222-228, 232-234, 237, 241, 246-250, 253, 256-257, 263-278, 281-285, 288-293, 299-325, 100->101, 143->144, 153->154
+src/allmydata/stats.py 242 156 54 3 33% 29-35, 38-41, 44-48, 51-65, 68-73, 102, 105-111, 114-126, 145-147, 155-156, 161-164, 170-175, 179-188, 192, 201-208, 211, 214-220, 223-229, 233-235, 238, 242, 247-251, 254, 257-258, 264-279, 282-286, 289-294, 300-326, 101->102, 144->145, 154->155
src/allmydata/storage/common.py 24 2 4 2 86% 11, 28, 10->11, 36->39
src/allmydata/storage/crawler.py 222 125 64 6 37% 16, 90, 111-113, 148-178, 192-193, 231, 244, 251, 275-312, 315-363, 377-384, 393, 416, 428, 445, 453, 488-492, 495-508, 13->16, 89->90, 96->99, 228->231, 248->251, 268->271
src/allmydata/storage/expirer.py 240 183 81 2 21% 9, 74-79, 119, 122, 125-167, 171-233, 236-253, 256-261, 264-266, 269-274, 280-284, 288-322, 388-435, 7->9, 71->74
@@ -4748,7 +4734,7 @@
src/allmydata/windows/fixups.py 133 133 54 0 0% 1-237
src/allmydata/windows/registry.py 42 42 12 0 0% 1-77
------------------------------------------------------------------------------------------------
-TOTAL 27427 20411 8234 294 22%
+TOTAL 27430 20392 8234 298 22%
18 files skipped due to complete coverage.
+ '[' '!' -z 1 ']'
```
Trac: refs #3448, https://tahoe-lafs.org/trac/tahoe-lafs/ticket/3448
2020-09-27 20:00:19 +00:00
|
|
|
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
from twisted.application import service
|
|
|
|
from twisted.application.internet import TimerService
|
2017-02-27 17:56:49 +00:00
|
|
|
from zope.interface import implementer
|
2020-12-09 15:34:16 +00:00
|
|
|
from foolscap.api import eventually
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
|
2021-02-03 16:24:47 +00:00
|
|
|
from allmydata.util import log, dictutil
|
2020-12-09 15:34:16 +00:00
|
|
|
from allmydata.interfaces import IStatsProducer
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
|
2017-02-27 17:56:49 +00:00
|
|
|
@implementer(IStatsProducer)
|
2008-04-30 01:12:53 +00:00
|
|
|
class CPUUsageMonitor(service.MultiService):
|
2008-04-30 18:39:13 +00:00
|
|
|
HISTORY_LENGTH = 15
|
2020-11-29 20:56:39 +00:00
|
|
|
POLL_INTERVAL = 60 # type: float
|
2008-04-30 01:12:53 +00:00
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
service.MultiService.__init__(self)
|
2021-05-05 14:37:49 +00:00
|
|
|
# we don't use process_time() here, because the constructor is run by
|
2008-04-30 01:12:53 +00:00
|
|
|
# the twistd parent process (as it loads the .tac file), whereas the
|
|
|
|
# rest of the program will be run by the child process, after twistd
|
|
|
|
# forks. Instead, set self.initial_cpu as soon as the reactor starts
|
|
|
|
# up.
|
|
|
|
self.initial_cpu = 0.0 # just in case
|
|
|
|
eventually(self._set_initial_cpu)
|
|
|
|
self.samples = []
|
|
|
|
# we provide 1min, 5min, and 15min moving averages
|
2008-04-30 18:39:13 +00:00
|
|
|
TimerService(self.POLL_INTERVAL, self.check).setServiceParent(self)
|
2008-04-30 01:12:53 +00:00
|
|
|
|
|
|
|
def _set_initial_cpu(self):
|
2021-05-05 14:37:49 +00:00
|
|
|
self.initial_cpu = process_time()
|
2008-04-30 01:12:53 +00:00
|
|
|
|
|
|
|
def check(self):
|
|
|
|
now_wall = time.time()
|
2021-05-05 14:37:49 +00:00
|
|
|
now_cpu = process_time()
|
2008-04-30 01:12:53 +00:00
|
|
|
self.samples.append( (now_wall, now_cpu) )
|
2008-04-30 18:39:13 +00:00
|
|
|
while len(self.samples) > self.HISTORY_LENGTH+1:
|
2008-04-30 01:12:53 +00:00
|
|
|
self.samples.pop(0)
|
|
|
|
|
|
|
|
def _average_N_minutes(self, size):
|
|
|
|
if len(self.samples) < size+1:
|
|
|
|
return None
|
|
|
|
first = -size-1
|
|
|
|
elapsed_wall = self.samples[-1][0] - self.samples[first][0]
|
|
|
|
elapsed_cpu = self.samples[-1][1] - self.samples[first][1]
|
|
|
|
fraction = elapsed_cpu / elapsed_wall
|
|
|
|
return fraction
|
|
|
|
|
|
|
|
def get_stats(self):
|
|
|
|
s = {}
|
|
|
|
avg = self._average_N_minutes(1)
|
|
|
|
if avg is not None:
|
|
|
|
s["cpu_monitor.1min_avg"] = avg
|
|
|
|
avg = self._average_N_minutes(5)
|
|
|
|
if avg is not None:
|
|
|
|
s["cpu_monitor.5min_avg"] = avg
|
|
|
|
avg = self._average_N_minutes(15)
|
|
|
|
if avg is not None:
|
|
|
|
s["cpu_monitor.15min_avg"] = avg
|
2021-05-05 14:37:49 +00:00
|
|
|
now_cpu = process_time()
|
2008-04-30 01:12:53 +00:00
|
|
|
s["cpu_monitor.total"] = now_cpu - self.initial_cpu
|
|
|
|
return s
|
|
|
|
|
2008-11-18 08:46:20 +00:00
|
|
|
|
2020-12-09 15:34:16 +00:00
|
|
|
class StatsProvider(service.MultiService):
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
|
2020-12-09 15:34:16 +00:00
|
|
|
def __init__(self, node):
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
service.MultiService.__init__(self)
|
2008-02-01 04:10:15 +00:00
|
|
|
self.node = node
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
|
2021-02-03 16:24:47 +00:00
|
|
|
self.counters = dictutil.UnicodeKeyDict()
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
self.stats_producers = []
|
2008-04-30 01:12:53 +00:00
|
|
|
self.cpu_monitor = CPUUsageMonitor()
|
|
|
|
self.cpu_monitor.setServiceParent(self)
|
|
|
|
self.register_producer(self.cpu_monitor)
|
|
|
|
|
2008-04-11 00:25:44 +00:00
|
|
|
def count(self, name, delta=1):
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
val = self.counters.setdefault(name, 0)
|
|
|
|
self.counters[name] = val + delta
|
|
|
|
|
|
|
|
def register_producer(self, stats_producer):
|
2008-02-01 04:10:15 +00:00
|
|
|
self.stats_producers.append(IStatsProducer(stats_producer))
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
|
2008-04-14 21:17:08 +00:00
|
|
|
def get_stats(self):
|
stats: add a simple stats gathering system
We have a desire to collect runtime statistics from multiple nodes primarily
for server monitoring purposes. This implements a simple implementation of
such a system, as a skeleton to build more sophistication upon.
Each client now looks for a 'stats_gatherer.furl' config file. If it has
been configured to use a stats gatherer, then it instantiates internally
a StatsProvider. This is a central place for code which wishes to offer
stats up for monitoring to report them to, either by calling
stats_provider.count('stat.name', value) to increment a counter, or by
registering a class as a stats producer with sp.register_producer(obj).
The StatsProvider connects to the StatsGatherer server and provides its
provider upon startup. The StatsGatherer is then responsible for polling
the attached providers periodically to retrieve the data provided.
The provider queries each registered producer when the gatherer queries
the provider. Both the internal 'counters' and the queried 'stats' are
then reported to the gatherer.
This provides a simple gatherer app, (c.f. make stats-gatherer-run)
which prints its furl and listens for incoming connections. Once a
minute, the gatherer polls all connected providers, and writes the
retrieved data into a pickle file.
Also included is a munin plugin which knows how to read the gatherer's
stats.pickle and output data munin can interpret. this plugin,
tahoe-stats.py can be symlinked as multiple different names within
munin's 'plugins' directory, and inspects argv to determine which
data to display, doing a lookup in a table within that file.
It looks in the environment for 'statsfile' to determine the path to
the gatherer's stats.pickle. An example plugins-conf.d file is
provided.
2008-01-31 03:11:07 +00:00
|
|
|
stats = {}
|
|
|
|
for sp in self.stats_producers:
|
|
|
|
stats.update(sp.get_stats())
|
2008-04-09 23:10:53 +00:00
|
|
|
ret = { 'counters': self.counters, 'stats': stats }
|
2008-04-14 21:17:08 +00:00
|
|
|
log.msg(format='get_stats() -> %(stats)s', stats=ret, level=log.NOISY)
|
2008-04-09 23:10:53 +00:00
|
|
|
return ret
|