mirror of
https://github.com/tahoe-lafs/tahoe-lafs.git
synced 2024-12-23 14:52:26 +00:00
setup: remove a convenience copy of figleaf, to ease inclusion into Ubuntu Karmic Koala
We need to carefully document the licence of figleaf in order to get Tahoe-LAFS into Ubuntu Karmic Koala. However, figleaf isn't really a part of Tahoe-LAFS per se -- this is just a "convenience copy" of a development tool. The quickest way to make Tahoe-LAFS acceptable for Karmic then, is to remove figleaf from the Tahoe-LAFS tarball itself. People who want to run figleaf on Tahoe-LAFS (as everyone should want) can install figleaf themselves. I haven't tested this -- there may be incompatibilities between upstream figleaf and the copy that we had here...
This commit is contained in:
parent
abdf8a6f10
commit
aaaa633f18
@ -55,8 +55,7 @@ def write_el(r2, source):
|
||||
out.write(" results)\n")
|
||||
out.close()
|
||||
|
||||
#import figleaf
|
||||
from allmydata.util import figleaf
|
||||
import figleaf
|
||||
|
||||
def examine_source(filename):
|
||||
f = open(filename, "r")
|
||||
|
@ -1,3 +0,0 @@
|
||||
#! /usr/bin/env python
|
||||
from allmydata.util import figleaf_htmlizer
|
||||
figleaf_htmlizer.main()
|
@ -59,7 +59,7 @@ from twisted.trial.reporter import TreeReporter, VerboseTextReporter
|
||||
# in printSummary. To include import, we have to start in our own import and
|
||||
# finish in printSummary.
|
||||
|
||||
from allmydata.util import figleaf
|
||||
import figleaf
|
||||
figleaf.start()
|
||||
|
||||
|
||||
|
@ -1,400 +0,0 @@
|
||||
#! /usr/bin/env python
|
||||
"""
|
||||
figleaf is another tool to trace code coverage (yes, in Python ;).
|
||||
|
||||
figleaf uses the sys.settrace hook to record which statements are
|
||||
executed by the CPython interpreter; this record can then be saved
|
||||
into a file, or otherwise communicated back to a reporting script.
|
||||
|
||||
figleaf differs from the gold standard of coverage tools
|
||||
('coverage.py') in several ways. First and foremost, figleaf uses the
|
||||
same criterion for "interesting" lines of code as the sys.settrace
|
||||
function, which obviates some of the complexity in coverage.py (but
|
||||
does mean that your "loc" count goes down). Second, figleaf does not
|
||||
record code executed in the Python standard library, which results in
|
||||
a significant speedup. And third, the format in which the coverage
|
||||
format is saved is very simple and easy to work with.
|
||||
|
||||
You might want to use figleaf if you're recording coverage from
|
||||
multiple types of tests and need to aggregate the coverage in
|
||||
interesting ways, and/or control when coverage is recorded.
|
||||
coverage.py is a better choice for command-line execution, and its
|
||||
reporting is a fair bit nicer.
|
||||
|
||||
Command line usage: ::
|
||||
|
||||
figleaf.py <python file to execute> <args to python file>
|
||||
|
||||
The figleaf output is saved into the file '.figleaf', which is an
|
||||
*aggregate* of coverage reports from all figleaf runs from this
|
||||
directory. '.figleaf' contains a pickled dictionary of sets; the keys
|
||||
are source code filenames, and the sets contain all line numbers
|
||||
executed by the Python interpreter. See the docs or command-line
|
||||
programs in bin/ for more information.
|
||||
|
||||
High level API: ::
|
||||
|
||||
* ``start(ignore_lib=True)`` -- start recording code coverage.
|
||||
* ``stop()`` -- stop recording code coverage.
|
||||
* ``get_trace_obj()`` -- return the (singleton) trace object.
|
||||
* ``get_info()`` -- get the coverage dictionary
|
||||
|
||||
Classes & functions worth knowing about, i.e. a lower level API:
|
||||
|
||||
* ``get_lines(fp)`` -- return the set of interesting lines in the fp.
|
||||
* ``combine_coverage(d1, d2)`` -- combine coverage info from two dicts.
|
||||
* ``read_coverage(filename)`` -- load the coverage dictionary
|
||||
* ``write_coverage(filename)`` -- write the coverage out.
|
||||
* ``annotate_coverage(...)`` -- annotate a Python file with its coverage info.
|
||||
|
||||
Known problems:
|
||||
|
||||
-- module docstrings are *covered* but not found.
|
||||
|
||||
AUTHOR: C. Titus Brown, titus@idyll.org
|
||||
|
||||
'figleaf' is Copyright (C) 2006. It will be released under the BSD license.
|
||||
"""
|
||||
import sys
|
||||
import os
|
||||
import threading
|
||||
from cPickle import dump, load
|
||||
|
||||
### import builtin sets if in > 2.4, otherwise use 'sets' module.
|
||||
# we require 2.4 or later
|
||||
assert set
|
||||
|
||||
|
||||
from token import tok_name, NEWLINE, STRING, INDENT, DEDENT, COLON
|
||||
import parser, types, symbol
|
||||
|
||||
def get_token_name(x):
|
||||
"""
|
||||
Utility to help pretty-print AST symbols/Python tokens.
|
||||
"""
|
||||
if symbol.sym_name.has_key(x):
|
||||
return symbol.sym_name[x]
|
||||
return tok_name.get(x, '-')
|
||||
|
||||
class LineGrabber:
|
||||
"""
|
||||
Count 'interesting' lines of Python in source files, where
|
||||
'interesting' is defined as 'lines that could possibly be
|
||||
executed'.
|
||||
|
||||
@CTB this badly needs to be refactored... once I have automated
|
||||
tests ;)
|
||||
"""
|
||||
def __init__(self, fp):
|
||||
"""
|
||||
Count lines of code in 'fp'.
|
||||
"""
|
||||
self.lines = set()
|
||||
|
||||
self.ast = parser.suite(fp.read())
|
||||
self.tree = parser.ast2tuple(self.ast, True)
|
||||
|
||||
self.find_terminal_nodes(self.tree)
|
||||
|
||||
def find_terminal_nodes(self, tup):
|
||||
"""
|
||||
Recursively eat an AST in tuple form, finding the first line
|
||||
number for "interesting" code.
|
||||
"""
|
||||
(sym, rest) = tup[0], tup[1:]
|
||||
|
||||
line_nos = set()
|
||||
if type(rest[0]) == types.TupleType: ### node
|
||||
|
||||
for x in rest:
|
||||
token_line_no = self.find_terminal_nodes(x)
|
||||
if token_line_no is not None:
|
||||
line_nos.add(token_line_no)
|
||||
|
||||
if symbol.sym_name[sym] in ('stmt', 'suite', 'lambdef',
|
||||
'except_clause') and line_nos:
|
||||
# store the line number that this statement started at
|
||||
self.lines.add(min(line_nos))
|
||||
elif symbol.sym_name[sym] in ('if_stmt',):
|
||||
# add all lines under this
|
||||
self.lines.update(line_nos)
|
||||
elif symbol.sym_name[sym] in ('global_stmt',): # IGNORE
|
||||
return
|
||||
else:
|
||||
if line_nos:
|
||||
return min(line_nos)
|
||||
|
||||
else: ### leaf
|
||||
if sym not in (NEWLINE, STRING, INDENT, DEDENT, COLON) and \
|
||||
tup[1] != 'else':
|
||||
return tup[2]
|
||||
return None
|
||||
|
||||
def pretty_print(self, tup=None, indent=0):
|
||||
"""
|
||||
Pretty print the AST.
|
||||
"""
|
||||
if tup is None:
|
||||
tup = self.tree
|
||||
|
||||
s = tup[1]
|
||||
|
||||
if type(s) == types.TupleType:
|
||||
print ' '*indent, get_token_name(tup[0])
|
||||
for x in tup[1:]:
|
||||
self.pretty_print(x, indent+1)
|
||||
else:
|
||||
print ' '*indent, get_token_name(tup[0]), tup[1:]
|
||||
|
||||
def get_lines(fp):
|
||||
"""
|
||||
Return the set of interesting lines in the source code read from
|
||||
this file handle.
|
||||
"""
|
||||
l = LineGrabber(fp)
|
||||
return l.lines
|
||||
|
||||
class CodeTracer:
|
||||
"""
|
||||
Basic code coverage tracking, using sys.settrace.
|
||||
"""
|
||||
def __init__(self, ignore_prefixes=[]):
|
||||
self.c = {}
|
||||
self.started = False
|
||||
self.ignore_prefixes = ignore_prefixes
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
Start recording.
|
||||
"""
|
||||
if not self.started:
|
||||
self.started = True
|
||||
|
||||
sys.settrace(self.g)
|
||||
if hasattr(threading, 'settrace'):
|
||||
threading.settrace(self.g)
|
||||
|
||||
def stop(self):
|
||||
if self.started:
|
||||
sys.settrace(None)
|
||||
if hasattr(threading, 'settrace'):
|
||||
threading.settrace(None)
|
||||
|
||||
self.started = False
|
||||
|
||||
def g(self, f, e, a):
|
||||
"""
|
||||
global trace function.
|
||||
"""
|
||||
if e is 'call':
|
||||
for p in self.ignore_prefixes:
|
||||
if f.f_code.co_filename.startswith(p):
|
||||
return
|
||||
|
||||
return self.t
|
||||
|
||||
def t(self, f, e, a):
|
||||
"""
|
||||
local trace function.
|
||||
"""
|
||||
|
||||
if e is 'line':
|
||||
self.c[(f.f_code.co_filename, f.f_lineno)] = 1
|
||||
return self.t
|
||||
|
||||
def clear(self):
|
||||
"""
|
||||
wipe out coverage info
|
||||
"""
|
||||
|
||||
self.c = {}
|
||||
|
||||
def gather_files(self):
|
||||
"""
|
||||
Return the dictionary of lines of executed code; the dict
|
||||
contains items (k, v), where 'k' is the filename and 'v'
|
||||
is a set of line numbers.
|
||||
"""
|
||||
files = {}
|
||||
for (filename, line) in self.c.keys():
|
||||
d = files.get(filename, set())
|
||||
d.add(line)
|
||||
files[filename] = d
|
||||
|
||||
return files
|
||||
|
||||
def combine_coverage(d1, d2):
|
||||
"""
|
||||
Given two coverage dictionaries, combine the recorded coverage
|
||||
and return a new dictionary.
|
||||
"""
|
||||
keys = set(d1.keys())
|
||||
keys.update(set(d2.keys()))
|
||||
|
||||
new_d = {}
|
||||
for k in keys:
|
||||
v = d1.get(k, set())
|
||||
v2 = d2.get(k, set())
|
||||
|
||||
s = set(v)
|
||||
s.update(v2)
|
||||
new_d[k] = s
|
||||
|
||||
return new_d
|
||||
|
||||
def write_coverage(filename, combine=True):
|
||||
"""
|
||||
Write the current coverage info out to the given filename. If
|
||||
'combine' is false, destroy any previously recorded coverage info.
|
||||
"""
|
||||
if _t is None:
|
||||
return
|
||||
|
||||
d = _t.gather_files()
|
||||
|
||||
# combine?
|
||||
if combine:
|
||||
old = {}
|
||||
fp = None
|
||||
try:
|
||||
fp = open(filename)
|
||||
except IOError:
|
||||
pass
|
||||
|
||||
if fp:
|
||||
old = load(fp)
|
||||
fp.close()
|
||||
d = combine_coverage(d, old)
|
||||
|
||||
# ok, save.
|
||||
outfp = open(filename, 'w')
|
||||
try:
|
||||
dump(d, outfp)
|
||||
finally:
|
||||
outfp.close()
|
||||
|
||||
def read_coverage(filename):
|
||||
"""
|
||||
Read a coverage dictionary in from the given file.
|
||||
"""
|
||||
fp = open(filename)
|
||||
try:
|
||||
d = load(fp)
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
return d
|
||||
|
||||
def annotate_coverage(in_fp, out_fp, covered, all_lines,
|
||||
mark_possible_lines=False):
|
||||
"""
|
||||
A simple example coverage annotator that outputs text.
|
||||
"""
|
||||
for i, line in enumerate(in_fp):
|
||||
i = i + 1
|
||||
|
||||
if i in covered:
|
||||
symbol = '>'
|
||||
elif i in all_lines:
|
||||
symbol = '!'
|
||||
else:
|
||||
symbol = ' '
|
||||
|
||||
symbol2 = ''
|
||||
if mark_possible_lines:
|
||||
symbol2 = ' '
|
||||
if i in all_lines:
|
||||
symbol2 = '-'
|
||||
|
||||
out_fp.write('%s%s %s' % (symbol, symbol2, line,))
|
||||
|
||||
#######################
|
||||
|
||||
#
|
||||
# singleton functions/top-level API
|
||||
#
|
||||
|
||||
_t = None
|
||||
|
||||
def start(ignore_python_lib=True, ignore_prefixes=[]):
|
||||
"""
|
||||
Start tracing code coverage. If 'ignore_python_lib' is True,
|
||||
ignore all files that live below the same directory as the 'os'
|
||||
module.
|
||||
"""
|
||||
global _t
|
||||
if _t is None:
|
||||
ignore_prefixes = ignore_prefixes[:]
|
||||
if ignore_python_lib:
|
||||
ignore_prefixes.append(os.path.realpath(os.path.dirname(os.__file__)))
|
||||
_t = CodeTracer(ignore_prefixes)
|
||||
|
||||
_t.start()
|
||||
|
||||
def stop():
|
||||
"""
|
||||
Stop tracing code coverage.
|
||||
"""
|
||||
global _t
|
||||
if _t is not None:
|
||||
_t.stop()
|
||||
|
||||
def get_trace_obj():
|
||||
"""
|
||||
Return the (singleton) trace object, if it exists.
|
||||
"""
|
||||
return _t
|
||||
|
||||
def get_info():
|
||||
"""
|
||||
Get the coverage dictionary from the trace object.
|
||||
"""
|
||||
if _t:
|
||||
return _t.gather_files()
|
||||
|
||||
#############
|
||||
|
||||
def display_ast():
|
||||
l = LineGrabber(open(sys.argv[1]))
|
||||
l.pretty_print()
|
||||
|
||||
def main():
|
||||
"""
|
||||
Execute the given Python file with coverage, making it look like it is
|
||||
__main__.
|
||||
"""
|
||||
ignore_pylibs = False
|
||||
|
||||
def print_help():
|
||||
print 'Usage: figleaf [-i] <program-to-profile> <program-options>'
|
||||
print ''
|
||||
print 'Options:'
|
||||
print ' -i Ignore Python standard libraries when calculating coverage'
|
||||
|
||||
args = sys.argv[1:]
|
||||
|
||||
if len(args) < 1:
|
||||
print_help()
|
||||
raise SystemExit()
|
||||
elif len(args) > 2 and args[0] == '-i':
|
||||
ignore_pylibs = True
|
||||
|
||||
## Make sure to strip off the -i or --ignore-python-libs option if it exists
|
||||
args = args[1:]
|
||||
|
||||
## Reset system args so that the subsequently exec'd file can read from sys.argv
|
||||
sys.argv = args
|
||||
|
||||
sys.path[0] = os.path.dirname(args[0])
|
||||
|
||||
cwd = os.getcwd()
|
||||
|
||||
start(ignore_pylibs) # START code coverage
|
||||
|
||||
import __main__
|
||||
try:
|
||||
execfile(args[0], __main__.__dict__)
|
||||
finally:
|
||||
stop() # STOP code coverage
|
||||
|
||||
write_coverage(os.path.join(cwd, '.figleaf'))
|
@ -1,354 +0,0 @@
|
||||
#! /usr/bin/env python
|
||||
import sys
|
||||
import pickle
|
||||
import figleaf
|
||||
import os
|
||||
import re
|
||||
|
||||
from twisted.python import usage
|
||||
|
||||
class RenderOptions(usage.Options):
|
||||
optParameters = [
|
||||
("exclude-patterns", "x", None, "file containing regexp patterns to exclude"),
|
||||
("output-directory", "d", "html", "Directory for HTML output"),
|
||||
("root", "r", None, "only pay attention to modules under this directory"),
|
||||
("old-coverage", "o", None, "figleaf pickle from previous build"),
|
||||
]
|
||||
|
||||
def opt_root(self, value):
|
||||
self["root"] = os.path.abspath(value)
|
||||
if not self["root"].endswith("/"):
|
||||
self["root"] += "/"
|
||||
|
||||
def parseArgs(self, *filenames):
|
||||
self.filenames = [".figleaf"]
|
||||
if filenames:
|
||||
self.filenames = list(filenames)
|
||||
|
||||
class Renderer:
|
||||
|
||||
def run(self):
|
||||
self.opts = opts = RenderOptions()
|
||||
opts.parseOptions()
|
||||
|
||||
### load
|
||||
|
||||
coverage = {}
|
||||
for filename in opts.filenames:
|
||||
d = figleaf.read_coverage(filename)
|
||||
coverage = figleaf.combine_coverage(coverage, d)
|
||||
|
||||
if not coverage:
|
||||
sys.exit(-1)
|
||||
|
||||
self.old_coverage = None
|
||||
if opts["old-coverage"]:
|
||||
try:
|
||||
f = open(opts["old-coverage"], "rb")
|
||||
self.old_coverage = pickle.load(f)
|
||||
except EnvironmentError:
|
||||
pass
|
||||
|
||||
self.load_exclude_patterns(opts["exclude-patterns"])
|
||||
### make directory
|
||||
self.prepare_reportdir(opts["output-directory"])
|
||||
self.report_as_html(coverage, opts["output-directory"], opts["root"])
|
||||
|
||||
def load_exclude_patterns(self, f):
|
||||
self.exclude_patterns = []
|
||||
if not f:
|
||||
return
|
||||
for line in open(f, "r").readlines():
|
||||
line = line.rstrip()
|
||||
if line and not line.startswith('#'):
|
||||
self.exclude_patterns.append(re.compile(line))
|
||||
|
||||
def prepare_reportdir(self, dirname='html'):
|
||||
try:
|
||||
os.mkdir(dirname)
|
||||
except OSError: # already exists
|
||||
pass
|
||||
|
||||
def check_excludes(self, fn):
|
||||
for pattern in self.exclude_patterns:
|
||||
if pattern.search(fn):
|
||||
return True
|
||||
return False
|
||||
|
||||
def make_display_filename(self, fn):
|
||||
root = self.opts["root"]
|
||||
if not root:
|
||||
return fn
|
||||
display_filename = fn[len(root):]
|
||||
assert not display_filename.startswith("/")
|
||||
assert display_filename.endswith(".py")
|
||||
display_filename = display_filename[:-3] # trim .py
|
||||
display_filename = display_filename.replace("/", ".")
|
||||
return display_filename
|
||||
|
||||
def report_as_html(self, coverage, directory, root=None):
|
||||
### now, output.
|
||||
|
||||
keys = coverage.keys()
|
||||
info_dict = {}
|
||||
for k in keys:
|
||||
if self.check_excludes(k):
|
||||
continue
|
||||
if k.endswith('figleaf.py'):
|
||||
continue
|
||||
if not k.startswith("/"):
|
||||
continue
|
||||
if root and not k.startswith(root):
|
||||
continue
|
||||
|
||||
display_filename = self.make_display_filename(k)
|
||||
info = self.process_file(k, display_filename, coverage)
|
||||
if info:
|
||||
info_dict[k] = info
|
||||
|
||||
### print a summary, too.
|
||||
#print info_dict
|
||||
|
||||
info_dict_items = info_dict.items()
|
||||
|
||||
def sort_by_pcnt(a, b):
|
||||
a_cmp = (-a[1][4], a[1][5])
|
||||
b_cmp = (-b[1][4], b[1][5])
|
||||
return cmp(a_cmp,b_cmp)
|
||||
|
||||
def sort_by_uncovered(a, b):
|
||||
a_cmp = ( -(a[1][0] - a[1][1]), a[1][5])
|
||||
b_cmp = ( -(b[1][0] - b[1][1]), b[1][5])
|
||||
return cmp(a_cmp, b_cmp)
|
||||
|
||||
def sort_by_delta(a, b):
|
||||
# files which lost coverage line should appear first, followed by
|
||||
# files which gained coverage
|
||||
a_cmp = (-a[1][3], -a[1][2], a[1][5])
|
||||
b_cmp = (-b[1][3], -b[1][2], b[1][5])
|
||||
return cmp(a_cmp, b_cmp)
|
||||
|
||||
info_dict_items.sort(sort_by_uncovered)
|
||||
|
||||
summary_lines = sum([ v[0] for (k, v) in info_dict_items])
|
||||
summary_cover = sum([ v[1] for (k, v) in info_dict_items])
|
||||
summary_added = sum([ v[2] for (k, v) in info_dict_items])
|
||||
summary_removed = sum([ v[3] for (k, v) in info_dict_items])
|
||||
summary_pcnt = 0
|
||||
if summary_lines:
|
||||
summary_pcnt = float(summary_cover) * 100. / float(summary_lines)
|
||||
self.summary = (summary_lines, summary_cover,
|
||||
summary_added, summary_removed,
|
||||
summary_pcnt)
|
||||
|
||||
|
||||
pcnts = [ float(v[1]) * 100. / float(v[0]) for (k, v) in info_dict_items if v[0] ]
|
||||
pcnt_90 = [ x for x in pcnts if x >= 90 ]
|
||||
pcnt_75 = [ x for x in pcnts if x >= 75 ]
|
||||
pcnt_50 = [ x for x in pcnts if x >= 50 ]
|
||||
|
||||
stats_fp = open('%s/stats.out' % (directory,), 'w')
|
||||
self.write_stats(stats_fp, "total files: %d" % len(pcnts))
|
||||
self.write_stats(stats_fp, "total source lines: %d" % summary_lines)
|
||||
self.write_stats(stats_fp, "total covered lines: %d" % summary_cover)
|
||||
self.write_stats(stats_fp,
|
||||
"total uncovered lines: %d" % (summary_lines - summary_cover))
|
||||
if self.old_coverage is not None:
|
||||
self.write_stats(stats_fp, "lines added: %d" % summary_added)
|
||||
self.write_stats(stats_fp, "lines removed: %d" % summary_removed)
|
||||
self.write_stats(stats_fp,
|
||||
"total coverage percentage: %.1f" % summary_pcnt)
|
||||
stats_fp.close()
|
||||
|
||||
## index.html
|
||||
index_fp = open('%s/index.html' % (directory,), 'w')
|
||||
# summary info
|
||||
index_fp.write('<title>figleaf code coverage report</title>\n')
|
||||
index_fp.write('<h2>Summary</h2> %d files total: %d files > '
|
||||
'90%%, %d files > 75%%, %d files > 50%%<p>'
|
||||
% (len(pcnts), len(pcnt_90),
|
||||
len(pcnt_75), len(pcnt_50)))
|
||||
|
||||
# sorted by number of lines that aren't covered
|
||||
index_fp.write('<h3>Sorted by Lines Uncovered</h3>\n')
|
||||
self.emit_table(index_fp, info_dict_items, show_totals=True)
|
||||
|
||||
if self.old_coverage is not None:
|
||||
index_fp.write('<h3>Sorted by Coverage Added/Lost</h3>\n')
|
||||
info_dict_items.sort(sort_by_delta)
|
||||
self.emit_table(index_fp, info_dict_items, show_totals=False)
|
||||
|
||||
# sorted by module name
|
||||
index_fp.write('<h3>Sorted by Module Name (alphabetical)</h3>\n')
|
||||
info_dict_items.sort()
|
||||
self.emit_table(index_fp, info_dict_items, show_totals=False)
|
||||
|
||||
index_fp.close()
|
||||
|
||||
return len(info_dict)
|
||||
|
||||
def process_file(self, k, display_filename, coverage):
|
||||
|
||||
try:
|
||||
pyfile = open(k)
|
||||
except IOError:
|
||||
return
|
||||
|
||||
source_lines = figleaf.get_lines(pyfile)
|
||||
|
||||
have_old_coverage = False
|
||||
if self.old_coverage and k in self.old_coverage:
|
||||
have_old_coverage = True
|
||||
old_coverage = self.old_coverage[k]
|
||||
|
||||
# ok, got all the info. now annotate file ==> html.
|
||||
|
||||
covered = coverage[k]
|
||||
n_covered = n_lines = 0
|
||||
n_added = n_removed = 0
|
||||
|
||||
pyfile = open(k)
|
||||
output = []
|
||||
for i, line in enumerate(pyfile):
|
||||
i += 1 # coverage info is 1-based
|
||||
|
||||
if i in covered:
|
||||
color = "green"
|
||||
n_covered += 1
|
||||
n_lines += 1
|
||||
elif i in source_lines:
|
||||
color = "red"
|
||||
n_lines += 1
|
||||
else:
|
||||
color = "black"
|
||||
|
||||
delta = " "
|
||||
if have_old_coverage:
|
||||
if i in covered and i not in old_coverage:
|
||||
delta = "+"
|
||||
n_added += 1
|
||||
elif i in old_coverage and i not in covered:
|
||||
delta = "-"
|
||||
n_removed += 1
|
||||
|
||||
line = self.escape_html(line.rstrip())
|
||||
output.append('<font color="%s">%s%4d. %s</font>' %
|
||||
(color, delta, i, line.rstrip()))
|
||||
|
||||
try:
|
||||
pcnt = n_covered * 100. / n_lines
|
||||
except ZeroDivisionError:
|
||||
pcnt = 0
|
||||
|
||||
html_outfile = self.make_html_filename(display_filename)
|
||||
directory = self.opts["output-directory"]
|
||||
html_outfp = open(os.path.join(directory, html_outfile), 'w')
|
||||
html_outfp.write('source file: <b>%s</b><br>\n' % (k,))
|
||||
html_outfp.write('file stats: <b>%d lines, %d executed: %.1f%% covered</b><br>\n' % (n_lines, n_covered, pcnt))
|
||||
if have_old_coverage:
|
||||
html_outfp.write('coverage versus previous test: <b>%d lines added, %d lines removed</b><br>\n'
|
||||
% (n_added, n_removed))
|
||||
|
||||
html_outfp.write('<pre>\n')
|
||||
for line in output:
|
||||
html_outfp.write(line + "\n")
|
||||
html_outfp.write('</pre>\n')
|
||||
html_outfp.close()
|
||||
|
||||
return (n_lines, n_covered, n_added, n_removed, pcnt, display_filename)
|
||||
|
||||
def emit_table(self, index_fp, items, show_totals):
|
||||
have_old_coverage = self.old_coverage is not None
|
||||
if have_old_coverage:
|
||||
index_fp.write('<table border=1><tr><th>Filename</th>'
|
||||
'<th># lines</th><th># covered</th>'
|
||||
'<th># uncovered</th>'
|
||||
'<th># added</th>'
|
||||
'<th># removed</th>'
|
||||
'<th>% covered</th></tr>\n')
|
||||
else:
|
||||
index_fp.write('<table border=1><tr><th>Filename</th>'
|
||||
'<th># lines</th><th># covered</th>'
|
||||
'<th># uncovered</th>'
|
||||
'<th>% covered</th></tr>\n')
|
||||
if show_totals:
|
||||
(summary_lines, summary_cover,
|
||||
summary_added, summary_removed,
|
||||
summary_pcnt) = self.summary
|
||||
if have_old_coverage:
|
||||
index_fp.write('<tr><td><b>totals:</b></td>'
|
||||
'<td><b>%d</b></td>' # lines
|
||||
'<td><b>%d</b></td>' # cover
|
||||
'<td><b>%d</b></td>' # uncover
|
||||
'<td><b>%d</b></td>' # added
|
||||
'<td><b>%d</b></td>' # removed
|
||||
'<td><b>%.1f%%</b></td>'
|
||||
'</tr>'
|
||||
'<tr></tr>\n'
|
||||
% (summary_lines, summary_cover,
|
||||
(summary_lines - summary_cover),
|
||||
summary_added, summary_removed,
|
||||
summary_pcnt,))
|
||||
else:
|
||||
index_fp.write('<tr><td><b>totals:</b></td>'
|
||||
'<td><b>%d</b></td>'
|
||||
'<td><b>%d</b></td>'
|
||||
'<td><b>%d</b></td>'
|
||||
'<td><b>%.1f%%</b></td>'
|
||||
'</tr>'
|
||||
'<tr></tr>\n'
|
||||
% (summary_lines, summary_cover,
|
||||
(summary_lines - summary_cover),
|
||||
summary_pcnt,))
|
||||
|
||||
for filename, stuff in items:
|
||||
self.emit_table_row(index_fp, stuff)
|
||||
|
||||
index_fp.write('</table>\n')
|
||||
|
||||
def emit_table_row(self, index_fp, info):
|
||||
(n_lines, n_covered, n_added, n_removed,
|
||||
percent_covered, display_filename) = info
|
||||
html_outfile = self.make_html_filename(display_filename)
|
||||
|
||||
if self.old_coverage is not None:
|
||||
index_fp.write('<tr><td><a href="./%s">%s</a></td>'
|
||||
'<td>%d</td>' # lines
|
||||
'<td>%d</td>' # covered
|
||||
'<td>%d</td>' # uncovered
|
||||
'<td>%d</td>' # added
|
||||
'<td>%d</td>' # removed
|
||||
'<td>%.1f</td>'
|
||||
'</tr>\n'
|
||||
% (html_outfile, display_filename, n_lines,
|
||||
n_covered, (n_lines - n_covered),
|
||||
n_added, n_removed,
|
||||
percent_covered,))
|
||||
else:
|
||||
index_fp.write('<tr><td><a href="./%s">%s</a></td>'
|
||||
'<td>%d</td>'
|
||||
'<td>%d</td>'
|
||||
'<td>%d</td>'
|
||||
'<td>%.1f</td>'
|
||||
'</tr>\n'
|
||||
% (html_outfile, display_filename, n_lines,
|
||||
n_covered, (n_lines - n_covered),
|
||||
percent_covered,))
|
||||
|
||||
def make_html_filename(self, orig):
|
||||
return orig + ".html"
|
||||
|
||||
def escape_html(self, s):
|
||||
s = s.replace("&", "&")
|
||||
s = s.replace("<", "<")
|
||||
s = s.replace(">", ">")
|
||||
s = s.replace('"', """)
|
||||
return s
|
||||
|
||||
def write_stats(self, stats_fp, line):
|
||||
stats_fp.write(line + "\n")
|
||||
print line
|
||||
|
||||
def main():
|
||||
r = Renderer()
|
||||
r.run()
|
Loading…
Reference in New Issue
Block a user