parse_llvm_coverage.py 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. #!/usr/bin/env python
  2. # Copyright (c) 2015 The Chromium Authors. All rights reserved.
  3. # Use of this source code is governed by a BSD-style license that can be
  4. # found in the LICENSE file.
  5. """Parse an LLVM coverage report to generate useable results."""
  6. import argparse
  7. import json
  8. import os
  9. import re
  10. import subprocess
  11. import sys
  12. def _fix_filename(filename):
  13. """Return a filename which we can use to identify the file.
  14. The file paths printed by llvm-cov take the form:
  15. /path/to/repo/out/dir/../../src/filename.cpp
  16. And then they're truncated to 22 characters with leading ellipses:
  17. ...../../src/filename.cpp
  18. This makes it really tough to determine whether the file actually belongs in
  19. the Skia repo. This function strips out the leading junk so that, if the file
  20. exists in the repo, the returned string matches the end of some relative path
  21. in the repo. This doesn't guarantee correctness, but it's about as close as
  22. we can get.
  23. """
  24. return filename.split('..')[-1].lstrip('./')
  25. def _file_in_repo(filename, all_files):
  26. """Return the name of the checked-in file matching the given filename.
  27. Use suffix matching to determine which checked-in files the given filename
  28. matches. If there are no matches or multiple matches, return None.
  29. """
  30. new_file = _fix_filename(filename)
  31. matched = []
  32. for f in all_files:
  33. if f.endswith(new_file):
  34. matched.append(f)
  35. if len(matched) == 1:
  36. return matched[0]
  37. elif len(matched) > 1:
  38. print >> sys.stderr, ('WARNING: multiple matches for %s; skipping:\n\t%s'
  39. % (new_file, '\n\t'.join(matched)))
  40. return None
  41. def _get_per_file_per_line_coverage(report):
  42. """Return a dict whose keys are file names and values are coverage data.
  43. Values are lists which take the form (lineno, coverage, code).
  44. """
  45. all_files = []
  46. for root, dirs, files in os.walk(os.getcwd()):
  47. if 'third_party/externals' in root:
  48. continue
  49. files = [f for f in files if not (f[0] == '.' or f.endswith('.pyc'))]
  50. dirs[:] = [d for d in dirs if not d[0] == '.']
  51. for name in files:
  52. all_files.append(os.path.join(root[(len(os.getcwd()) + 1):], name))
  53. all_files.sort()
  54. lines = report.splitlines()
  55. current_file = None
  56. file_lines = []
  57. files = {}
  58. not_checked_in = '%' # Use this as the file name for not-checked-in files.
  59. for line in lines:
  60. m = re.match('([a-zA-Z0-9\./_-]+):', line)
  61. if m:
  62. if current_file and current_file != not_checked_in:
  63. files[current_file] = file_lines
  64. match_filename = _file_in_repo(m.groups()[0], all_files)
  65. current_file = match_filename or not_checked_in
  66. file_lines = []
  67. else:
  68. if current_file != not_checked_in:
  69. skip = re.match('^\s{2}-+$|^\s{2}\|.+$', line)
  70. if line and not skip:
  71. cov, linenum, code = line.split('|', 2)
  72. cov = cov.strip()
  73. if cov:
  74. cov = int(cov)
  75. else:
  76. cov = None # We don't care about coverage for this line.
  77. linenum = int(linenum.strip())
  78. assert linenum == len(file_lines) + 1
  79. file_lines.append((linenum, cov, code.decode('utf-8', 'replace')))
  80. return files
  81. def _testname(filename):
  82. """Transform the file name into an ingestible test name."""
  83. return re.sub(r'[^a-zA-Z0-9]', '_', filename)
  84. def _nanobench_json(results, properties, key):
  85. """Return the results in JSON format like that produced by nanobench."""
  86. rv = {}
  87. # Copy over the properties first, then set the 'key' and 'results' keys,
  88. # in order to avoid bad formatting in case the user passes in a properties
  89. # dict containing those keys.
  90. rv.update(properties)
  91. rv['key'] = key
  92. rv['results'] = {
  93. _testname(f): {
  94. 'coverage': {
  95. 'percent': percent,
  96. 'lines_not_covered': not_covered_lines,
  97. 'options': {
  98. 'fullname': f,
  99. 'dir': os.path.dirname(f),
  100. 'source_type': 'coverage',
  101. },
  102. },
  103. } for percent, not_covered_lines, f in results
  104. }
  105. return rv
  106. def _parse_key_value(kv_list):
  107. """Return a dict whose key/value pairs are derived from the given list.
  108. For example:
  109. ['k1', 'v1', 'k2', 'v2']
  110. becomes:
  111. {'k1': 'v1',
  112. 'k2': 'v2'}
  113. """
  114. if len(kv_list) % 2 != 0:
  115. raise Exception('Invalid key/value pairs: %s' % kv_list)
  116. rv = {}
  117. for i in xrange(len(kv_list) / 2):
  118. rv[kv_list[i*2]] = kv_list[i*2+1]
  119. return rv
  120. def _get_per_file_summaries(line_by_line):
  121. """Summarize the full line-by-line coverage report by file."""
  122. per_file = []
  123. for filepath, lines in line_by_line.iteritems():
  124. total_lines = 0
  125. covered_lines = 0
  126. for _, cov, _ in lines:
  127. if cov is not None:
  128. total_lines += 1
  129. if cov > 0:
  130. covered_lines += 1
  131. if total_lines > 0:
  132. per_file.append((float(covered_lines)/float(total_lines)*100.0,
  133. total_lines - covered_lines,
  134. filepath))
  135. return per_file
  136. def main():
  137. """Generate useful data from a coverage report."""
  138. # Parse args.
  139. parser = argparse.ArgumentParser()
  140. parser.add_argument('--report', help='input file; an llvm coverage report.',
  141. required=True)
  142. parser.add_argument('--nanobench', help='output file for nanobench data.')
  143. parser.add_argument(
  144. '--key', metavar='key_or_value', nargs='+',
  145. help='key/value pairs identifying this bot.')
  146. parser.add_argument(
  147. '--properties', metavar='key_or_value', nargs='+',
  148. help='key/value pairs representing properties of this build.')
  149. parser.add_argument('--linebyline',
  150. help='output file for line-by-line JSON data.')
  151. args = parser.parse_args()
  152. if args.nanobench and not (args.key and args.properties):
  153. raise Exception('--key and --properties are required with --nanobench')
  154. with open(args.report) as f:
  155. report = f.read()
  156. line_by_line = _get_per_file_per_line_coverage(report)
  157. if args.linebyline:
  158. with open(args.linebyline, 'w') as f:
  159. json.dump(line_by_line, f)
  160. if args.nanobench:
  161. # Parse the key and properties for use in the nanobench JSON output.
  162. key = _parse_key_value(args.key)
  163. properties = _parse_key_value(args.properties)
  164. # Get per-file summaries.
  165. per_file = _get_per_file_summaries(line_by_line)
  166. # Write results.
  167. format_results = _nanobench_json(per_file, properties, key)
  168. with open(args.nanobench, 'w') as f:
  169. json.dump(format_results, f)
  170. if __name__ == '__main__':
  171. main()