oe-build-perf-test 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. #!/usr/bin/env python3
  2. #
  3. # Build performance test script
  4. #
  5. # Copyright (c) 2016, Intel Corporation.
  6. #
  7. # SPDX-License-Identifier: GPL-2.0-only
  8. #
  9. import argparse
  10. import errno
  11. import fcntl
  12. import json
  13. import logging
  14. import os
  15. import re
  16. import shutil
  17. import sys
  18. from datetime import datetime
  19. sys.path.insert(0, os.path.dirname(os.path.realpath(__file__)) + '/lib')
  20. import scriptpath
  21. scriptpath.add_oe_lib_path()
  22. scriptpath.add_bitbake_lib_path()
  23. import oeqa.buildperf
  24. from oeqa.buildperf import (BuildPerfTestLoader, BuildPerfTestResult,
  25. BuildPerfTestRunner, KernelDropCaches)
  26. from oeqa.utils.commands import runCmd
  27. from oeqa.utils.metadata import metadata_from_bb, write_metadata_file
  28. # Set-up logging
  29. LOG_FORMAT = '[%(asctime)s] %(levelname)s: %(message)s'
  30. logging.basicConfig(level=logging.INFO, format=LOG_FORMAT,
  31. datefmt='%Y-%m-%d %H:%M:%S')
  32. log = logging.getLogger()
  33. def acquire_lock(lock_f):
  34. """Acquire flock on file"""
  35. log.debug("Acquiring lock %s", os.path.abspath(lock_f.name))
  36. try:
  37. fcntl.flock(lock_f, fcntl.LOCK_EX | fcntl.LOCK_NB)
  38. except IOError as err:
  39. if err.errno == errno.EAGAIN:
  40. return False
  41. raise
  42. log.debug("Lock acquired")
  43. return True
  44. def pre_run_sanity_check():
  45. """Sanity check of build environment"""
  46. build_dir = os.environ.get("BUILDDIR")
  47. if not build_dir:
  48. log.error("BUILDDIR not set. Please run the build environmnent setup "
  49. "script.")
  50. return False
  51. if os.getcwd() != build_dir:
  52. log.error("Please run this script under BUILDDIR (%s)", build_dir)
  53. return False
  54. ret = runCmd('which bitbake', ignore_status=True)
  55. if ret.status:
  56. log.error("bitbake command not found")
  57. return False
  58. return True
  59. def setup_file_logging(log_file):
  60. """Setup loggin to file"""
  61. log_dir = os.path.dirname(log_file)
  62. if not os.path.exists(log_dir):
  63. os.makedirs(log_dir)
  64. formatter = logging.Formatter(LOG_FORMAT)
  65. handler = logging.FileHandler(log_file)
  66. handler.setFormatter(formatter)
  67. log.addHandler(handler)
  68. def archive_build_conf(out_dir):
  69. """Archive build/conf to test results"""
  70. src_dir = os.path.join(os.environ['BUILDDIR'], 'conf')
  71. tgt_dir = os.path.join(out_dir, 'build', 'conf')
  72. os.makedirs(os.path.dirname(tgt_dir))
  73. shutil.copytree(src_dir, tgt_dir)
  74. def update_globalres_file(result_obj, filename, metadata):
  75. """Write results to globalres csv file"""
  76. # Map test names to time and size columns in globalres
  77. # The tuples represent index and length of times and sizes
  78. # respectively
  79. gr_map = {'test1': ((0, 1), (8, 1)),
  80. 'test12': ((1, 1), (None, None)),
  81. 'test13': ((2, 1), (9, 1)),
  82. 'test2': ((3, 1), (None, None)),
  83. 'test3': ((4, 3), (None, None)),
  84. 'test4': ((7, 1), (10, 2))}
  85. values = ['0'] * 12
  86. for status, test, _ in result_obj.all_results():
  87. if status in ['ERROR', 'SKIPPED']:
  88. continue
  89. (t_ind, t_len), (s_ind, s_len) = gr_map[test.name]
  90. if t_ind is not None:
  91. values[t_ind:t_ind + t_len] = test.times
  92. if s_ind is not None:
  93. values[s_ind:s_ind + s_len] = test.sizes
  94. log.debug("Writing globalres log to %s", filename)
  95. rev_info = metadata['layers']['meta']
  96. with open(filename, 'a') as fobj:
  97. fobj.write('{},{}:{},{},'.format(metadata['hostname'],
  98. rev_info['branch'],
  99. rev_info['commit'],
  100. rev_info['commit']))
  101. fobj.write(','.join(values) + '\n')
  102. def parse_args(argv):
  103. """Parse command line arguments"""
  104. parser = argparse.ArgumentParser(
  105. formatter_class=argparse.ArgumentDefaultsHelpFormatter)
  106. parser.add_argument('-D', '--debug', action='store_true',
  107. help='Enable debug level logging')
  108. parser.add_argument('--globalres-file',
  109. type=os.path.abspath,
  110. help="Append results to 'globalres' csv file")
  111. parser.add_argument('--lock-file', default='./oe-build-perf.lock',
  112. metavar='FILENAME', type=os.path.abspath,
  113. help="Lock file to use")
  114. parser.add_argument('-o', '--out-dir', default='results-{date}',
  115. type=os.path.abspath,
  116. help="Output directory for test results")
  117. parser.add_argument('-x', '--xml', action='store_true',
  118. help='Enable JUnit xml output')
  119. parser.add_argument('--log-file',
  120. default='{out_dir}/oe-build-perf-test.log',
  121. help="Log file of this script")
  122. parser.add_argument('--run-tests', nargs='+', metavar='TEST',
  123. help="List of tests to run")
  124. return parser.parse_args(argv)
  125. def main(argv=None):
  126. """Script entry point"""
  127. args = parse_args(argv)
  128. # Set-up log file
  129. out_dir = args.out_dir.format(date=datetime.now().strftime('%Y%m%d%H%M%S'))
  130. setup_file_logging(args.log_file.format(out_dir=out_dir))
  131. if args.debug:
  132. log.setLevel(logging.DEBUG)
  133. lock_f = open(args.lock_file, 'w')
  134. if not acquire_lock(lock_f):
  135. log.error("Another instance of this script is running, exiting...")
  136. return 1
  137. if not pre_run_sanity_check():
  138. return 1
  139. # Check our capability to drop caches and ask pass if needed
  140. KernelDropCaches.check()
  141. # Load build perf tests
  142. loader = BuildPerfTestLoader()
  143. if args.run_tests:
  144. suite = loader.loadTestsFromNames(args.run_tests, oeqa.buildperf)
  145. else:
  146. suite = loader.loadTestsFromModule(oeqa.buildperf)
  147. # Save test metadata
  148. metadata = metadata_from_bb()
  149. log.info("Testing Git revision branch:commit %s:%s (%s)",
  150. metadata['layers']['meta']['branch'],
  151. metadata['layers']['meta']['commit'],
  152. metadata['layers']['meta']['commit_count'])
  153. if args.xml:
  154. write_metadata_file(os.path.join(out_dir, 'metadata.xml'), metadata)
  155. else:
  156. with open(os.path.join(out_dir, 'metadata.json'), 'w') as fobj:
  157. json.dump(metadata, fobj, indent=2)
  158. archive_build_conf(out_dir)
  159. runner = BuildPerfTestRunner(out_dir, verbosity=2)
  160. # Suppress logger output to stderr so that the output from unittest
  161. # is not mixed with occasional logger output
  162. log.handlers[0].setLevel(logging.CRITICAL)
  163. # Run actual tests
  164. result = runner.run(suite)
  165. # Restore logger output to stderr
  166. log.handlers[0].setLevel(log.level)
  167. if args.xml:
  168. result.write_results_xml()
  169. else:
  170. result.write_results_json()
  171. result.write_buildstats_json()
  172. if args.globalres_file:
  173. update_globalres_file(result, args.globalres_file, metadata)
  174. if result.wasSuccessful():
  175. return 0
  176. return 2
  177. if __name__ == '__main__':
  178. sys.exit(main())