test_util.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. # SPDX-License-Identifier: GPL-2.0+
  2. #
  3. # Copyright (c) 2016 Google, Inc
  4. #
  5. from contextlib import contextmanager
  6. import glob
  7. import multiprocessing
  8. import os
  9. import sys
  10. import unittest
  11. from patman import command
  12. from io import StringIO
  13. use_concurrent = True
  14. try:
  15. from concurrencytest.concurrencytest import ConcurrentTestSuite
  16. from concurrencytest.concurrencytest import fork_for_tests
  17. except:
  18. use_concurrent = False
  19. def RunTestCoverage(prog, filter_fname, exclude_list, build_dir, required=None,
  20. extra_args=None):
  21. """Run tests and check that we get 100% coverage
  22. Args:
  23. prog: Program to run (with be passed a '-t' argument to run tests
  24. filter_fname: Normally all *.py files in the program's directory will
  25. be included. If this is not None, then it is used to filter the
  26. list so that only filenames that don't contain filter_fname are
  27. included.
  28. exclude_list: List of file patterns to exclude from the coverage
  29. calculation
  30. build_dir: Build directory, used to locate libfdt.py
  31. required: List of modules which must be in the coverage report
  32. extra_args (str): Extra arguments to pass to the tool before the -t/test
  33. arg
  34. Raises:
  35. ValueError if the code coverage is not 100%
  36. """
  37. # This uses the build output from sandbox_spl to get _libfdt.so
  38. path = os.path.dirname(prog)
  39. if filter_fname:
  40. glob_list = glob.glob(os.path.join(path, '*.py'))
  41. glob_list = [fname for fname in glob_list if filter_fname in fname]
  42. else:
  43. glob_list = []
  44. glob_list += exclude_list
  45. glob_list += ['*libfdt.py', '*site-packages*', '*dist-packages*']
  46. glob_list += ['*concurrencytest*']
  47. test_cmd = 'test' if 'binman' in prog or 'patman' in prog else '-t'
  48. prefix = ''
  49. if build_dir:
  50. prefix = 'PYTHONPATH=$PYTHONPATH:%s/sandbox_spl/tools ' % build_dir
  51. cmd = ('%spython3-coverage run '
  52. '--omit "%s" %s %s %s -P1' % (prefix, ','.join(glob_list),
  53. prog, extra_args or '', test_cmd))
  54. os.system(cmd)
  55. stdout = command.Output('python3-coverage', 'report')
  56. lines = stdout.splitlines()
  57. if required:
  58. # Convert '/path/to/name.py' just the module name 'name'
  59. test_set = set([os.path.splitext(os.path.basename(line.split()[0]))[0]
  60. for line in lines if '/etype/' in line])
  61. missing_list = required
  62. missing_list.discard('__init__')
  63. missing_list.difference_update(test_set)
  64. if missing_list:
  65. print('Missing tests for %s' % (', '.join(missing_list)))
  66. print(stdout)
  67. ok = False
  68. coverage = lines[-1].split(' ')[-1]
  69. ok = True
  70. print(coverage)
  71. if coverage != '100%':
  72. print(stdout)
  73. print("Type 'python3-coverage html' to get a report in "
  74. 'htmlcov/index.html')
  75. print('Coverage error: %s, but should be 100%%' % coverage)
  76. ok = False
  77. if not ok:
  78. raise ValueError('Test coverage failure')
  79. # Use this to suppress stdout/stderr output:
  80. # with capture_sys_output() as (stdout, stderr)
  81. # ...do something...
  82. @contextmanager
  83. def capture_sys_output():
  84. capture_out, capture_err = StringIO(), StringIO()
  85. old_out, old_err = sys.stdout, sys.stderr
  86. try:
  87. sys.stdout, sys.stderr = capture_out, capture_err
  88. yield capture_out, capture_err
  89. finally:
  90. sys.stdout, sys.stderr = old_out, old_err
  91. def ReportResult(toolname:str, test_name: str, result: unittest.TestResult):
  92. """Report the results from a suite of tests
  93. Args:
  94. toolname: Name of the tool that ran the tests
  95. test_name: Name of test that was run, or None for all
  96. result: A unittest.TestResult object containing the results
  97. """
  98. # Remove errors which just indicate a missing test. Since Python v3.5 If an
  99. # ImportError or AttributeError occurs while traversing name then a
  100. # synthetic test that raises that error when run will be returned. These
  101. # errors are included in the errors accumulated by result.errors.
  102. if test_name:
  103. errors = []
  104. for test, err in result.errors:
  105. if ("has no attribute '%s'" % test_name) not in err:
  106. errors.append((test, err))
  107. result.testsRun -= 1
  108. result.errors = errors
  109. print(result)
  110. for test, err in result.errors:
  111. print(test.id(), err)
  112. for test, err in result.failures:
  113. print(err, result.failures)
  114. if result.skipped:
  115. print('%d %s test%s SKIPPED:' % (len(result.skipped), toolname,
  116. 's' if len(result.skipped) > 1 else ''))
  117. for skip_info in result.skipped:
  118. print('%s: %s' % (skip_info[0], skip_info[1]))
  119. if result.errors or result.failures:
  120. print('%s tests FAILED' % toolname)
  121. return 1
  122. return 0
  123. def RunTestSuites(result, debug, verbosity, test_preserve_dirs, processes,
  124. test_name, toolpath, test_class_list):
  125. """Run a series of test suites and collect the results
  126. Args:
  127. result: A unittest.TestResult object to add the results to
  128. debug: True to enable debugging, which shows a full stack trace on error
  129. verbosity: Verbosity level to use (0-4)
  130. test_preserve_dirs: True to preserve the input directory used by tests
  131. so that it can be examined afterwards (only useful for debugging
  132. tests). If a single test is selected (in args[0]) it also preserves
  133. the output directory for this test. Both directories are displayed
  134. on the command line.
  135. processes: Number of processes to use to run tests (None=same as #CPUs)
  136. test_name: Name of test to run, or None for all
  137. toolpath: List of paths to use for tools
  138. test_class_list: List of test classes to run
  139. """
  140. for module in []:
  141. suite = doctest.DocTestSuite(module)
  142. suite.run(result)
  143. sys.argv = [sys.argv[0]]
  144. if debug:
  145. sys.argv.append('-D')
  146. if verbosity:
  147. sys.argv.append('-v%d' % verbosity)
  148. if toolpath:
  149. for path in toolpath:
  150. sys.argv += ['--toolpath', path]
  151. suite = unittest.TestSuite()
  152. loader = unittest.TestLoader()
  153. for module in test_class_list:
  154. # Test the test module about our arguments, if it is interested
  155. if hasattr(module, 'setup_test_args'):
  156. setup_test_args = getattr(module, 'setup_test_args')
  157. setup_test_args(preserve_indir=test_preserve_dirs,
  158. preserve_outdirs=test_preserve_dirs and test_name is not None,
  159. toolpath=toolpath, verbosity=verbosity)
  160. if test_name:
  161. try:
  162. suite.addTests(loader.loadTestsFromName(test_name, module))
  163. except AttributeError:
  164. continue
  165. else:
  166. suite.addTests(loader.loadTestsFromTestCase(module))
  167. if use_concurrent and processes != 1:
  168. concurrent_suite = ConcurrentTestSuite(suite,
  169. fork_for_tests(processes or multiprocessing.cpu_count()))
  170. concurrent_suite.run(result)
  171. else:
  172. suite.run(result)