oe-build-perf-report 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661
  1. #!/usr/bin/python3
  2. #
  3. # Examine build performance test results
  4. #
  5. # Copyright (c) 2017, Intel Corporation.
  6. #
  7. # This program is free software; you can redistribute it and/or modify it
  8. # under the terms and conditions of the GNU General Public License,
  9. # version 2, as published by the Free Software Foundation.
  10. #
  11. # This program is distributed in the hope it will be useful, but WITHOUT
  12. # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. # more details.
  15. #
  16. import argparse
  17. import json
  18. import logging
  19. import os
  20. import re
  21. import sys
  22. from collections import namedtuple, OrderedDict
  23. from operator import attrgetter
  24. from xml.etree import ElementTree as ET
  25. # Import oe libs
  26. scripts_path = os.path.dirname(os.path.realpath(__file__))
  27. sys.path.append(os.path.join(scripts_path, 'lib'))
  28. import scriptpath
  29. from build_perf import print_table
  30. from build_perf.report import (metadata_xml_to_json, results_xml_to_json,
  31. aggregate_data, aggregate_metadata, measurement_stats,
  32. AggregateTestData)
  33. from build_perf import html
  34. from buildstats import BuildStats, diff_buildstats, BSVerDiff
  35. scriptpath.add_oe_lib_path()
  36. from oeqa.utils.git import GitRepo, GitError
  37. # Setup logging
  38. logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
  39. log = logging.getLogger('oe-build-perf-report')
  40. # Container class for tester revisions
  41. TestedRev = namedtuple('TestedRev', 'commit commit_number tags')
  42. def get_test_runs(repo, tag_name, **kwargs):
  43. """Get a sorted list of test runs, matching given pattern"""
  44. # First, get field names from the tag name pattern
  45. field_names = [m.group(1) for m in re.finditer(r'{(\w+)}', tag_name)]
  46. undef_fields = [f for f in field_names if f not in kwargs.keys()]
  47. # Fields for formatting tag name pattern
  48. str_fields = dict([(f, '*') for f in field_names])
  49. str_fields.update(kwargs)
  50. # Get a list of all matching tags
  51. tag_pattern = tag_name.format(**str_fields)
  52. tags = repo.run_cmd(['tag', '-l', tag_pattern]).splitlines()
  53. log.debug("Found %d tags matching pattern '%s'", len(tags), tag_pattern)
  54. # Parse undefined fields from tag names
  55. str_fields = dict([(f, r'(?P<{}>[\w\-.()]+)'.format(f)) for f in field_names])
  56. str_fields['branch'] = r'(?P<branch>[\w\-.()/]+)'
  57. str_fields['commit'] = '(?P<commit>[0-9a-f]{7,40})'
  58. str_fields['commit_number'] = '(?P<commit_number>[0-9]{1,7})'
  59. str_fields['tag_number'] = '(?P<tag_number>[0-9]{1,5})'
  60. # escape parenthesis in fields in order to not messa up the regexp
  61. fixed_fields = dict([(k, v.replace('(', r'\(').replace(')', r'\)')) for k, v in kwargs.items()])
  62. str_fields.update(fixed_fields)
  63. tag_re = re.compile(tag_name.format(**str_fields))
  64. # Parse fields from tags
  65. revs = []
  66. for tag in tags:
  67. m = tag_re.match(tag)
  68. groups = m.groupdict()
  69. revs.append([groups[f] for f in undef_fields] + [tag])
  70. # Return field names and a sorted list of revs
  71. return undef_fields, sorted(revs)
  72. def list_test_revs(repo, tag_name, verbosity, **kwargs):
  73. """Get list of all tested revisions"""
  74. valid_kwargs = dict([(k, v) for k, v in kwargs.items() if v is not None])
  75. fields, revs = get_test_runs(repo, tag_name, **valid_kwargs)
  76. ignore_fields = ['tag_number']
  77. if verbosity < 2:
  78. extra_fields = ['COMMITS', 'TEST RUNS']
  79. ignore_fields.extend(['commit_number', 'commit'])
  80. else:
  81. extra_fields = ['TEST RUNS']
  82. print_fields = [i for i, f in enumerate(fields) if f not in ignore_fields]
  83. # Sort revs
  84. rows = [[fields[i].upper() for i in print_fields] + extra_fields]
  85. prev = [''] * len(print_fields)
  86. prev_commit = None
  87. commit_cnt = 0
  88. commit_field = fields.index('commit')
  89. for rev in revs:
  90. # Only use fields that we want to print
  91. cols = [rev[i] for i in print_fields]
  92. if cols != prev:
  93. commit_cnt = 1
  94. test_run_cnt = 1
  95. new_row = [''] * (len(print_fields) + len(extra_fields))
  96. for i in print_fields:
  97. if cols[i] != prev[i]:
  98. break
  99. new_row[i:-len(extra_fields)] = cols[i:]
  100. rows.append(new_row)
  101. else:
  102. if rev[commit_field] != prev_commit:
  103. commit_cnt += 1
  104. test_run_cnt += 1
  105. if verbosity < 2:
  106. new_row[-2] = commit_cnt
  107. new_row[-1] = test_run_cnt
  108. prev = cols
  109. prev_commit = rev[commit_field]
  110. print_table(rows)
  111. def get_test_revs(repo, tag_name, **kwargs):
  112. """Get list of all tested revisions"""
  113. fields, runs = get_test_runs(repo, tag_name, **kwargs)
  114. revs = {}
  115. commit_i = fields.index('commit')
  116. commit_num_i = fields.index('commit_number')
  117. for run in runs:
  118. commit = run[commit_i]
  119. commit_num = run[commit_num_i]
  120. tag = run[-1]
  121. if not commit in revs:
  122. revs[commit] = TestedRev(commit, commit_num, [tag])
  123. else:
  124. assert commit_num == revs[commit].commit_number, "Commit numbers do not match"
  125. revs[commit].tags.append(tag)
  126. # Return in sorted table
  127. revs = sorted(revs.values(), key=attrgetter('commit_number'))
  128. log.debug("Found %d tested revisions:\n %s", len(revs),
  129. "\n ".join(['{} ({})'.format(rev.commit_number, rev.commit) for rev in revs]))
  130. return revs
  131. def rev_find(revs, attr, val):
  132. """Search from a list of TestedRev"""
  133. for i, rev in enumerate(revs):
  134. if getattr(rev, attr) == val:
  135. return i
  136. raise ValueError("Unable to find '{}' value '{}'".format(attr, val))
  137. def is_xml_format(repo, commit):
  138. """Check if the commit contains xml (or json) data"""
  139. if repo.rev_parse(commit + ':results.xml'):
  140. log.debug("Detected report in xml format in %s", commit)
  141. return True
  142. else:
  143. log.debug("No xml report in %s, assuming json formatted results", commit)
  144. return False
  145. def read_results(repo, tags, xml=True):
  146. """Read result files from repo"""
  147. def parse_xml_stream(data):
  148. """Parse multiple concatenated XML objects"""
  149. objs = []
  150. xml_d = ""
  151. for line in data.splitlines():
  152. if xml_d and line.startswith('<?xml version='):
  153. objs.append(ET.fromstring(xml_d))
  154. xml_d = line
  155. else:
  156. xml_d += line
  157. objs.append(ET.fromstring(xml_d))
  158. return objs
  159. def parse_json_stream(data):
  160. """Parse multiple concatenated JSON objects"""
  161. objs = []
  162. json_d = ""
  163. for line in data.splitlines():
  164. if line == '}{':
  165. json_d += '}'
  166. objs.append(json.loads(json_d, object_pairs_hook=OrderedDict))
  167. json_d = '{'
  168. else:
  169. json_d += line
  170. objs.append(json.loads(json_d, object_pairs_hook=OrderedDict))
  171. return objs
  172. num_revs = len(tags)
  173. # Optimize by reading all data with one git command
  174. log.debug("Loading raw result data from %d tags, %s...", num_revs, tags[0])
  175. if xml:
  176. git_objs = [tag + ':metadata.xml' for tag in tags] + [tag + ':results.xml' for tag in tags]
  177. data = parse_xml_stream(repo.run_cmd(['show'] + git_objs + ['--']))
  178. return ([metadata_xml_to_json(e) for e in data[0:num_revs]],
  179. [results_xml_to_json(e) for e in data[num_revs:]])
  180. else:
  181. git_objs = [tag + ':metadata.json' for tag in tags] + [tag + ':results.json' for tag in tags]
  182. data = parse_json_stream(repo.run_cmd(['show'] + git_objs + ['--']))
  183. return data[0:num_revs], data[num_revs:]
  184. def get_data_item(data, key):
  185. """Nested getitem lookup"""
  186. for k in key.split('.'):
  187. data = data[k]
  188. return data
  189. def metadata_diff(metadata_l, metadata_r):
  190. """Prepare a metadata diff for printing"""
  191. keys = [('Hostname', 'hostname', 'hostname'),
  192. ('Branch', 'branch', 'layers.meta.branch'),
  193. ('Commit number', 'commit_num', 'layers.meta.commit_count'),
  194. ('Commit', 'commit', 'layers.meta.commit'),
  195. ('Number of test runs', 'testrun_count', 'testrun_count')
  196. ]
  197. def _metadata_diff(key):
  198. """Diff metadata from two test reports"""
  199. try:
  200. val1 = get_data_item(metadata_l, key)
  201. except KeyError:
  202. val1 = '(N/A)'
  203. try:
  204. val2 = get_data_item(metadata_r, key)
  205. except KeyError:
  206. val2 = '(N/A)'
  207. return val1, val2
  208. metadata = OrderedDict()
  209. for title, key, key_json in keys:
  210. value_l, value_r = _metadata_diff(key_json)
  211. metadata[key] = {'title': title,
  212. 'value_old': value_l,
  213. 'value': value_r}
  214. return metadata
  215. def print_diff_report(metadata_l, data_l, metadata_r, data_r):
  216. """Print differences between two data sets"""
  217. # First, print general metadata
  218. print("\nTEST METADATA:\n==============")
  219. meta_diff = metadata_diff(metadata_l, metadata_r)
  220. rows = []
  221. row_fmt = ['{:{wid}} ', '{:<{wid}} ', '{:<{wid}}']
  222. rows = [['', 'CURRENT COMMIT', 'COMPARING WITH']]
  223. for key, val in meta_diff.items():
  224. # Shorten commit hashes
  225. if key == 'commit':
  226. rows.append([val['title'] + ':', val['value'][:20], val['value_old'][:20]])
  227. else:
  228. rows.append([val['title'] + ':', val['value'], val['value_old']])
  229. print_table(rows, row_fmt)
  230. # Print test results
  231. print("\nTEST RESULTS:\n=============")
  232. tests = list(data_l['tests'].keys())
  233. # Append tests that are only present in 'right' set
  234. tests += [t for t in list(data_r['tests'].keys()) if t not in tests]
  235. # Prepare data to be printed
  236. rows = []
  237. row_fmt = ['{:8}', '{:{wid}}', '{:{wid}}', ' {:>{wid}}', ' {:{wid}} ', '{:{wid}}',
  238. ' {:>{wid}}', ' {:>{wid}}']
  239. num_cols = len(row_fmt)
  240. for test in tests:
  241. test_l = data_l['tests'][test] if test in data_l['tests'] else None
  242. test_r = data_r['tests'][test] if test in data_r['tests'] else None
  243. pref = ' '
  244. if test_l is None:
  245. pref = '+'
  246. elif test_r is None:
  247. pref = '-'
  248. descr = test_l['description'] if test_l else test_r['description']
  249. heading = "{} {}: {}".format(pref, test, descr)
  250. rows.append([heading])
  251. # Generate the list of measurements
  252. meas_l = test_l['measurements'] if test_l else {}
  253. meas_r = test_r['measurements'] if test_r else {}
  254. measurements = list(meas_l.keys())
  255. measurements += [m for m in list(meas_r.keys()) if m not in measurements]
  256. for meas in measurements:
  257. m_pref = ' '
  258. if meas in meas_l:
  259. stats_l = measurement_stats(meas_l[meas], 'l.')
  260. else:
  261. stats_l = measurement_stats(None, 'l.')
  262. m_pref = '+'
  263. if meas in meas_r:
  264. stats_r = measurement_stats(meas_r[meas], 'r.')
  265. else:
  266. stats_r = measurement_stats(None, 'r.')
  267. m_pref = '-'
  268. stats = stats_l.copy()
  269. stats.update(stats_r)
  270. absdiff = stats['val_cls'](stats['r.mean'] - stats['l.mean'])
  271. reldiff = "{:+.1f} %".format(absdiff * 100 / stats['l.mean'])
  272. if stats['r.mean'] > stats['l.mean']:
  273. absdiff = '+' + str(absdiff)
  274. else:
  275. absdiff = str(absdiff)
  276. rows.append(['', m_pref, stats['name'] + ' ' + stats['quantity'],
  277. str(stats['l.mean']), '->', str(stats['r.mean']),
  278. absdiff, reldiff])
  279. rows.append([''] * num_cols)
  280. print_table(rows, row_fmt)
  281. print()
  282. class BSSummary(object):
  283. def __init__(self, bs1, bs2):
  284. self.tasks = {'count': bs2.num_tasks,
  285. 'change': '{:+d}'.format(bs2.num_tasks - bs1.num_tasks)}
  286. self.top_consumer = None
  287. self.top_decrease = None
  288. self.top_increase = None
  289. self.ver_diff = OrderedDict()
  290. tasks_diff = diff_buildstats(bs1, bs2, 'cputime')
  291. # Get top consumers of resources
  292. tasks_diff = sorted(tasks_diff, key=attrgetter('value2'))
  293. self.top_consumer = tasks_diff[-5:]
  294. # Get biggest increase and decrease in resource usage
  295. tasks_diff = sorted(tasks_diff, key=attrgetter('absdiff'))
  296. self.top_decrease = tasks_diff[0:5]
  297. self.top_increase = tasks_diff[-5:]
  298. # Compare recipe versions and prepare data for display
  299. ver_diff = BSVerDiff(bs1, bs2)
  300. if ver_diff:
  301. if ver_diff.new:
  302. self.ver_diff['New recipes'] = [(n, r.evr) for n, r in ver_diff.new.items()]
  303. if ver_diff.dropped:
  304. self.ver_diff['Dropped recipes'] = [(n, r.evr) for n, r in ver_diff.dropped.items()]
  305. if ver_diff.echanged:
  306. self.ver_diff['Epoch changed'] = [(n, "{} &rarr; {}".format(r.left.evr, r.right.evr)) for n, r in ver_diff.echanged.items()]
  307. if ver_diff.vchanged:
  308. self.ver_diff['Version changed'] = [(n, "{} &rarr; {}".format(r.left.version, r.right.version)) for n, r in ver_diff.vchanged.items()]
  309. if ver_diff.rchanged:
  310. self.ver_diff['Revision changed'] = [(n, "{} &rarr; {}".format(r.left.evr, r.right.evr)) for n, r in ver_diff.rchanged.items()]
  311. def print_html_report(data, id_comp, buildstats):
  312. """Print report in html format"""
  313. # Handle metadata
  314. metadata = metadata_diff(data[id_comp].metadata, data[-1].metadata)
  315. # Generate list of tests
  316. tests = []
  317. for test in data[-1].results['tests'].keys():
  318. test_r = data[-1].results['tests'][test]
  319. new_test = {'name': test_r['name'],
  320. 'description': test_r['description'],
  321. 'status': test_r['status'],
  322. 'measurements': [],
  323. 'err_type': test_r.get('err_type'),
  324. }
  325. # Limit length of err output shown
  326. if 'message' in test_r:
  327. lines = test_r['message'].splitlines()
  328. if len(lines) > 20:
  329. new_test['message'] = '...\n' + '\n'.join(lines[-20:])
  330. else:
  331. new_test['message'] = test_r['message']
  332. # Generate the list of measurements
  333. for meas in test_r['measurements'].keys():
  334. meas_r = test_r['measurements'][meas]
  335. meas_type = 'time' if meas_r['type'] == 'sysres' else 'size'
  336. new_meas = {'name': meas_r['name'],
  337. 'legend': meas_r['legend'],
  338. 'description': meas_r['name'] + ' ' + meas_type,
  339. }
  340. samples = []
  341. # Run through all revisions in our data
  342. for meta, test_data in data:
  343. if (not test in test_data['tests'] or
  344. not meas in test_data['tests'][test]['measurements']):
  345. samples.append(measurement_stats(None))
  346. continue
  347. test_i = test_data['tests'][test]
  348. meas_i = test_i['measurements'][meas]
  349. commit_num = get_data_item(meta, 'layers.meta.commit_count')
  350. samples.append(measurement_stats(meas_i))
  351. samples[-1]['commit_num'] = commit_num
  352. absdiff = samples[-1]['val_cls'](samples[-1]['mean'] - samples[id_comp]['mean'])
  353. new_meas['absdiff'] = absdiff
  354. new_meas['absdiff_str'] = str(absdiff) if absdiff < 0 else '+' + str(absdiff)
  355. new_meas['reldiff'] = "{:+.1f} %".format(absdiff * 100 / samples[id_comp]['mean'])
  356. new_meas['samples'] = samples
  357. new_meas['value'] = samples[-1]
  358. new_meas['value_type'] = samples[-1]['val_cls']
  359. # Compare buildstats
  360. bs_key = test + '.' + meas
  361. rev = metadata['commit_num']['value']
  362. comp_rev = metadata['commit_num']['value_old']
  363. if (rev in buildstats and bs_key in buildstats[rev] and
  364. comp_rev in buildstats and bs_key in buildstats[comp_rev]):
  365. new_meas['buildstats'] = BSSummary(buildstats[comp_rev][bs_key],
  366. buildstats[rev][bs_key])
  367. new_test['measurements'].append(new_meas)
  368. tests.append(new_test)
  369. # Chart options
  370. chart_opts = {'haxis': {'min': get_data_item(data[0][0], 'layers.meta.commit_count'),
  371. 'max': get_data_item(data[-1][0], 'layers.meta.commit_count')}
  372. }
  373. print(html.template.render(title="Build Perf Test Report",
  374. metadata=metadata, test_data=tests,
  375. chart_opts=chart_opts))
  376. def get_buildstats(repo, notes_ref, revs, outdir=None):
  377. """Get the buildstats from git notes"""
  378. full_ref = 'refs/notes/' + notes_ref
  379. if not repo.rev_parse(full_ref):
  380. log.error("No buildstats found, please try running "
  381. "'git fetch origin %s:%s' to fetch them from the remote",
  382. full_ref, full_ref)
  383. return
  384. missing = False
  385. buildstats = {}
  386. log.info("Parsing buildstats from 'refs/notes/%s'", notes_ref)
  387. for rev in revs:
  388. buildstats[rev.commit_number] = {}
  389. log.debug('Dumping buildstats for %s (%s)', rev.commit_number,
  390. rev.commit)
  391. for tag in rev.tags:
  392. log.debug(' %s', tag)
  393. try:
  394. bs_all = json.loads(repo.run_cmd(['notes', '--ref', notes_ref,
  395. 'show', tag + '^0']))
  396. except GitError:
  397. log.warning("Buildstats not found for %s", tag)
  398. bs_all = {}
  399. missing = True
  400. for measurement, bs in bs_all.items():
  401. # Write out onto disk
  402. if outdir:
  403. tag_base, run_id = tag.rsplit('/', 1)
  404. tag_base = tag_base.replace('/', '_')
  405. bs_dir = os.path.join(outdir, measurement, tag_base)
  406. if not os.path.exists(bs_dir):
  407. os.makedirs(bs_dir)
  408. with open(os.path.join(bs_dir, run_id + '.json'), 'w') as f:
  409. json.dump(bs, f, indent=2)
  410. # Read buildstats into a dict
  411. _bs = BuildStats.from_json(bs)
  412. if measurement not in buildstats[rev.commit_number]:
  413. buildstats[rev.commit_number][measurement] = _bs
  414. else:
  415. buildstats[rev.commit_number][measurement].aggregate(_bs)
  416. if missing:
  417. log.info("Buildstats were missing for some test runs, please "
  418. "run 'git fetch origin %s:%s' and try again",
  419. full_ref, full_ref)
  420. return buildstats
  421. def auto_args(repo, args):
  422. """Guess arguments, if not defined by the user"""
  423. # Get the latest commit in the repo
  424. log.debug("Guessing arguments from the latest commit")
  425. msg = repo.run_cmd(['log', '-1', '--branches', '--remotes', '--format=%b'])
  426. for line in msg.splitlines():
  427. split = line.split(':', 1)
  428. if len(split) != 2:
  429. continue
  430. key = split[0]
  431. val = split[1].strip()
  432. if key == 'hostname':
  433. log.debug("Using hostname %s", val)
  434. args.hostname = val
  435. elif key == 'branch':
  436. log.debug("Using branch %s", val)
  437. args.branch = val
  438. def parse_args(argv):
  439. """Parse command line arguments"""
  440. description = """
  441. Examine build performance test results from a Git repository"""
  442. parser = argparse.ArgumentParser(
  443. formatter_class=argparse.ArgumentDefaultsHelpFormatter,
  444. description=description)
  445. parser.add_argument('--debug', '-d', action='store_true',
  446. help="Verbose logging")
  447. parser.add_argument('--repo', '-r', required=True,
  448. help="Results repository (local git clone)")
  449. parser.add_argument('--list', '-l', action='count',
  450. help="List available test runs")
  451. parser.add_argument('--html', action='store_true',
  452. help="Generate report in html format")
  453. group = parser.add_argument_group('Tag and revision')
  454. group.add_argument('--tag-name', '-t',
  455. default='{hostname}/{branch}/{machine}/{commit_number}-g{commit}/{tag_number}',
  456. help="Tag name (pattern) for finding results")
  457. group.add_argument('--hostname', '-H')
  458. group.add_argument('--branch', '-B', default='master')
  459. group.add_argument('--machine', default='qemux86')
  460. group.add_argument('--history-length', default=25, type=int,
  461. help="Number of tested revisions to plot in html report")
  462. group.add_argument('--commit',
  463. help="Revision to search for")
  464. group.add_argument('--commit-number',
  465. help="Revision number to search for, redundant if "
  466. "--commit is specified")
  467. group.add_argument('--commit2',
  468. help="Revision to compare with")
  469. group.add_argument('--commit-number2',
  470. help="Revision number to compare with, redundant if "
  471. "--commit2 is specified")
  472. parser.add_argument('--dump-buildstats', nargs='?', const='.',
  473. help="Dump buildstats of the tests")
  474. return parser.parse_args(argv)
  475. def main(argv=None):
  476. """Script entry point"""
  477. args = parse_args(argv)
  478. if args.debug:
  479. log.setLevel(logging.DEBUG)
  480. repo = GitRepo(args.repo)
  481. if args.list:
  482. list_test_revs(repo, args.tag_name, args.list, hostname=args.hostname)
  483. return 0
  484. # Determine hostname which to use
  485. if not args.hostname:
  486. auto_args(repo, args)
  487. revs = get_test_revs(repo, args.tag_name, hostname=args.hostname,
  488. branch=args.branch, machine=args.machine)
  489. if len(revs) < 2:
  490. log.error("%d tester revisions found, unable to generate report",
  491. len(revs))
  492. return 1
  493. # Pick revisions
  494. if args.commit:
  495. if args.commit_number:
  496. log.warning("Ignoring --commit-number as --commit was specified")
  497. index1 = rev_find(revs, 'commit', args.commit)
  498. elif args.commit_number:
  499. index1 = rev_find(revs, 'commit_number', args.commit_number)
  500. else:
  501. index1 = len(revs) - 1
  502. if args.commit2:
  503. if args.commit_number2:
  504. log.warning("Ignoring --commit-number2 as --commit2 was specified")
  505. index2 = rev_find(revs, 'commit', args.commit2)
  506. elif args.commit_number2:
  507. index2 = rev_find(revs, 'commit_number', args.commit_number2)
  508. else:
  509. if index1 > 0:
  510. index2 = index1 - 1
  511. else:
  512. log.error("Unable to determine the other commit, use "
  513. "--commit2 or --commit-number2 to specify it")
  514. return 1
  515. index_l = min(index1, index2)
  516. index_r = max(index1, index2)
  517. rev_l = revs[index_l]
  518. rev_r = revs[index_r]
  519. log.debug("Using 'left' revision %s (%s), %s test runs:\n %s",
  520. rev_l.commit_number, rev_l.commit, len(rev_l.tags),
  521. '\n '.join(rev_l.tags))
  522. log.debug("Using 'right' revision %s (%s), %s test runs:\n %s",
  523. rev_r.commit_number, rev_r.commit, len(rev_r.tags),
  524. '\n '.join(rev_r.tags))
  525. # Check report format used in the repo (assume all reports in the same fmt)
  526. xml = is_xml_format(repo, revs[index_r].tags[-1])
  527. if args.html:
  528. index_0 = max(0, min(index_l, index_r - args.history_length))
  529. rev_range = range(index_0, index_r + 1)
  530. else:
  531. # We do not need range of commits for text report (no graphs)
  532. index_0 = index_l
  533. rev_range = (index_l, index_r)
  534. # Read raw data
  535. log.debug("Reading %d revisions, starting from %s (%s)",
  536. len(rev_range), revs[index_0].commit_number, revs[index_0].commit)
  537. raw_data = [read_results(repo, revs[i].tags, xml) for i in rev_range]
  538. data = []
  539. for raw_m, raw_d in raw_data:
  540. data.append(AggregateTestData(aggregate_metadata(raw_m),
  541. aggregate_data(raw_d)))
  542. # Read buildstats only when needed
  543. buildstats = None
  544. if args.dump_buildstats or args.html:
  545. outdir = 'oe-build-perf-buildstats' if args.dump_buildstats else None
  546. notes_ref = 'buildstats/{}/{}/{}'.format(args.hostname, args.branch,
  547. args.machine)
  548. buildstats = get_buildstats(repo, notes_ref, [rev_l, rev_r], outdir)
  549. # Print report
  550. if not args.html:
  551. print_diff_report(data[0].metadata, data[0].results,
  552. data[1].metadata, data[1].results)
  553. else:
  554. # Re-map 'left' list index to the data table where index_0 maps to 0
  555. print_html_report(data, index_l - index_0, buildstats)
  556. return 0
  557. if __name__ == "__main__":
  558. sys.exit(main())