runner.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. #
  2. # Copyright (C) 2016 Intel Corporation
  3. #
  4. # SPDX-License-Identifier: MIT
  5. #
  6. import os
  7. import time
  8. import unittest
  9. import logging
  10. import re
  11. import json
  12. import sys
  13. from unittest import TextTestResult as _TestResult
  14. from unittest import TextTestRunner as _TestRunner
  15. class OEStreamLogger(object):
  16. def __init__(self, logger):
  17. self.logger = logger
  18. self.buffer = ""
  19. def write(self, msg):
  20. if len(msg) > 1 and msg[0] != '\n':
  21. if '...' in msg:
  22. self.buffer += msg
  23. elif self.buffer:
  24. self.buffer += msg
  25. self.logger.log(logging.INFO, self.buffer)
  26. self.buffer = ""
  27. else:
  28. self.logger.log(logging.INFO, msg)
  29. def flush(self):
  30. for handler in self.logger.handlers:
  31. handler.flush()
  32. class OETestResult(_TestResult):
  33. def __init__(self, tc, *args, **kwargs):
  34. super(OETestResult, self).__init__(*args, **kwargs)
  35. self.successes = []
  36. self.starttime = {}
  37. self.endtime = {}
  38. self.progressinfo = {}
  39. # Inject into tc so that TestDepends decorator can see results
  40. tc.results = self
  41. self.tc = tc
  42. # stdout and stderr for each test case
  43. self.logged_output = {}
  44. def startTest(self, test):
  45. # May have been set by concurrencytest
  46. if test.id() not in self.starttime:
  47. self.starttime[test.id()] = time.time()
  48. super(OETestResult, self).startTest(test)
  49. def stopTest(self, test):
  50. self.endtime[test.id()] = time.time()
  51. if self.buffer:
  52. self.logged_output[test.id()] = (
  53. sys.stdout.getvalue(), sys.stderr.getvalue())
  54. super(OETestResult, self).stopTest(test)
  55. if test.id() in self.progressinfo:
  56. self.tc.logger.info(self.progressinfo[test.id()])
  57. # Print the errors/failures early to aid/speed debugging, its a pain
  58. # to wait until selftest finishes to see them.
  59. for t in ['failures', 'errors', 'skipped', 'expectedFailures']:
  60. for (scase, msg) in getattr(self, t):
  61. if test.id() == scase.id():
  62. self.tc.logger.info(str(msg))
  63. break
  64. def logSummary(self, component, context_msg=''):
  65. elapsed_time = self.tc._run_end_time - self.tc._run_start_time
  66. self.tc.logger.info("SUMMARY:")
  67. self.tc.logger.info("%s (%s) - Ran %d test%s in %.3fs" % (component,
  68. context_msg, self.testsRun, self.testsRun != 1 and "s" or "",
  69. elapsed_time))
  70. if self.wasSuccessful():
  71. msg = "%s - OK - All required tests passed" % component
  72. else:
  73. msg = "%s - FAIL - Required tests failed" % component
  74. msg += " (successes=%d, skipped=%d, failures=%d, errors=%d)" % (len(self.successes), len(self.skipped), len(self.failures), len(self.errors))
  75. self.tc.logger.info(msg)
  76. def _getTestResultDetails(self, case):
  77. result_types = {'failures': 'FAILED', 'errors': 'ERROR', 'skipped': 'SKIPPED',
  78. 'expectedFailures': 'EXPECTEDFAIL', 'successes': 'PASSED'}
  79. for rtype in result_types:
  80. found = False
  81. for (scase, msg) in getattr(self, rtype):
  82. if case.id() == scase.id():
  83. found = True
  84. break
  85. scase_str = str(scase.id())
  86. # When fails at module or class level the class name is passed as string
  87. # so figure out to see if match
  88. m = re.search(r"^setUpModule \((?P<module_name>.*)\)$", scase_str)
  89. if m:
  90. if case.__class__.__module__ == m.group('module_name'):
  91. found = True
  92. break
  93. m = re.search(r"^setUpClass \((?P<class_name>.*)\)$", scase_str)
  94. if m:
  95. class_name = "%s.%s" % (case.__class__.__module__,
  96. case.__class__.__name__)
  97. if class_name == m.group('class_name'):
  98. found = True
  99. break
  100. if found:
  101. return result_types[rtype], msg
  102. return 'UNKNOWN', None
  103. def addSuccess(self, test):
  104. #Added so we can keep track of successes too
  105. self.successes.append((test, None))
  106. super(OETestResult, self).addSuccess(test)
  107. def logDetails(self, json_file_dir=None, configuration=None, result_id=None,
  108. dump_streams=False):
  109. self.tc.logger.info("RESULTS:")
  110. result = {}
  111. logs = {}
  112. if hasattr(self.tc, "extraresults"):
  113. result = self.tc.extraresults
  114. for case_name in self.tc._registry['cases']:
  115. case = self.tc._registry['cases'][case_name]
  116. (status, log) = self._getTestResultDetails(case)
  117. t = ""
  118. if case.id() in self.starttime and case.id() in self.endtime:
  119. t = " (" + "{0:.2f}".format(self.endtime[case.id()] - self.starttime[case.id()]) + "s)"
  120. if status not in logs:
  121. logs[status] = []
  122. logs[status].append("RESULTS - %s: %s%s" % (case.id(), status, t))
  123. report = {'status': status}
  124. if log:
  125. report['log'] = log
  126. if dump_streams and case.id() in self.logged_output:
  127. (stdout, stderr) = self.logged_output[case.id()]
  128. report['stdout'] = stdout
  129. report['stderr'] = stderr
  130. result[case.id()] = report
  131. for i in ['PASSED', 'SKIPPED', 'EXPECTEDFAIL', 'ERROR', 'FAILED', 'UNKNOWN']:
  132. if i not in logs:
  133. continue
  134. for l in logs[i]:
  135. self.tc.logger.info(l)
  136. if json_file_dir:
  137. tresultjsonhelper = OETestResultJSONHelper()
  138. tresultjsonhelper.dump_testresult_file(json_file_dir, configuration, result_id, result)
  139. def wasSuccessful(self):
  140. # Override as we unexpected successes aren't failures for us
  141. return (len(self.failures) == len(self.errors) == 0)
  142. class OEListTestsResult(object):
  143. def wasSuccessful(self):
  144. return True
  145. class OETestRunner(_TestRunner):
  146. streamLoggerClass = OEStreamLogger
  147. def __init__(self, tc, *args, **kwargs):
  148. kwargs['stream'] = self.streamLoggerClass(tc.logger)
  149. super(OETestRunner, self).__init__(*args, **kwargs)
  150. self.tc = tc
  151. self.resultclass = OETestResult
  152. def _makeResult(self):
  153. return self.resultclass(self.tc, self.stream, self.descriptions,
  154. self.verbosity)
  155. def _walk_suite(self, suite, func):
  156. for obj in suite:
  157. if isinstance(obj, unittest.suite.TestSuite):
  158. if len(obj._tests):
  159. self._walk_suite(obj, func)
  160. elif isinstance(obj, unittest.case.TestCase):
  161. func(self.tc.logger, obj)
  162. self._walked_cases = self._walked_cases + 1
  163. def _list_tests_name(self, suite):
  164. from oeqa.core.decorator.oetag import OETestTag
  165. self._walked_cases = 0
  166. def _list_cases(logger, case):
  167. oetag = None
  168. if hasattr(case, 'decorators'):
  169. for d in case.decorators:
  170. if isinstance(d, OETestTag):
  171. oetag = d.oetag
  172. logger.info("%s\t\t%s" % (oetag, case.id()))
  173. self.tc.logger.info("Listing all available tests:")
  174. self._walked_cases = 0
  175. self.tc.logger.info("id\ttag\t\ttest")
  176. self.tc.logger.info("-" * 80)
  177. self._walk_suite(suite, _list_cases)
  178. self.tc.logger.info("-" * 80)
  179. self.tc.logger.info("Total found:\t%s" % self._walked_cases)
  180. def _list_tests_class(self, suite):
  181. self._walked_cases = 0
  182. curr = {}
  183. def _list_classes(logger, case):
  184. if not 'module' in curr or curr['module'] != case.__module__:
  185. curr['module'] = case.__module__
  186. logger.info(curr['module'])
  187. if not 'class' in curr or curr['class'] != \
  188. case.__class__.__name__:
  189. curr['class'] = case.__class__.__name__
  190. logger.info(" -- %s" % curr['class'])
  191. logger.info(" -- -- %s" % case._testMethodName)
  192. self.tc.logger.info("Listing all available test classes:")
  193. self._walk_suite(suite, _list_classes)
  194. def _list_tests_module(self, suite):
  195. self._walked_cases = 0
  196. listed = []
  197. def _list_modules(logger, case):
  198. if not case.__module__ in listed:
  199. if case.__module__.startswith('_'):
  200. logger.info("%s (hidden)" % case.__module__)
  201. else:
  202. logger.info(case.__module__)
  203. listed.append(case.__module__)
  204. self.tc.logger.info("Listing all available test modules:")
  205. self._walk_suite(suite, _list_modules)
  206. def list_tests(self, suite, display_type):
  207. if display_type == 'name':
  208. self._list_tests_name(suite)
  209. elif display_type == 'class':
  210. self._list_tests_class(suite)
  211. elif display_type == 'module':
  212. self._list_tests_module(suite)
  213. return OEListTestsResult()
  214. class OETestResultJSONHelper(object):
  215. testresult_filename = 'testresults.json'
  216. def _get_existing_testresults_if_available(self, write_dir):
  217. testresults = {}
  218. file = os.path.join(write_dir, self.testresult_filename)
  219. if os.path.exists(file):
  220. with open(file, "r") as f:
  221. testresults = json.load(f)
  222. return testresults
  223. def _write_file(self, write_dir, file_name, file_content):
  224. file_path = os.path.join(write_dir, file_name)
  225. with open(file_path, 'w') as the_file:
  226. the_file.write(file_content)
  227. def dump_testresult_file(self, write_dir, configuration, result_id, test_result):
  228. bb.utils.mkdirhier(write_dir)
  229. lf = bb.utils.lockfile(os.path.join(write_dir, 'jsontestresult.lock'))
  230. test_results = self._get_existing_testresults_if_available(write_dir)
  231. test_results[result_id] = {'configuration': configuration, 'result': test_result}
  232. json_testresults = json.dumps(test_results, sort_keys=True, indent=4)
  233. self._write_file(write_dir, self.testresult_filename, json_testresults)
  234. bb.utils.unlockfile(lf)