common.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496
  1. # Copyright 2014 The Chromium Authors. All rights reserved.
  2. # Use of this source code is governed by a BSD-style license that can be
  3. # found in the LICENSE file.
  4. from __future__ import print_function
  5. import argparse
  6. import codecs
  7. import contextlib
  8. import json
  9. import os
  10. import logging
  11. import platform
  12. import subprocess
  13. import sys
  14. import tempfile
  15. import traceback
  16. logging.basicConfig(level=logging.INFO)
  17. # Add src/testing/ into sys.path for importing xvfb and test_env.
  18. sys.path.append(
  19. os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
  20. import test_env
  21. if sys.platform.startswith('linux'):
  22. import xvfb
  23. # Unfortunately we need to copy these variables from ../test_env.py.
  24. # Importing it and using its get_sandbox_env breaks test runs on Linux
  25. # (it seems to unset DISPLAY).
  26. CHROME_SANDBOX_ENV = 'CHROME_DEVEL_SANDBOX'
  27. CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
  28. SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
  29. SRC_DIR = os.path.abspath(
  30. os.path.join(SCRIPT_DIR, os.path.pardir, os.path.pardir))
  31. # Use result_sink.py in //build/util/lib/results/ for uploading the
  32. # results of non-isolated script tests.
  33. BUILD_UTIL_DIR = os.path.join(SRC_DIR, 'build', 'util')
  34. sys.path.insert(0, BUILD_UTIL_DIR)
  35. try:
  36. from lib.results import result_sink
  37. from lib.results import result_types
  38. except ImportError:
  39. # Some build-time scripts import this file and run into issues with
  40. # result_sink's dependency on requests since we can't depend on vpython
  41. # during build-time. So silently swallow the error in that case.
  42. result_sink = None
  43. # run_web_tests.py returns the number of failures as the return
  44. # code, but caps the return code at 101 to avoid overflow or colliding
  45. # with reserved values from the shell.
  46. MAX_FAILURES_EXIT_STATUS = 101
  47. # Exit code to indicate infrastructure issue.
  48. INFRA_FAILURE_EXIT_CODE = 87
  49. # ACL might be explicitly set or inherited.
  50. CORRECT_ACL_VARIANTS = [
  51. 'APPLICATION PACKAGE AUTHORITY' \
  52. '\\ALL RESTRICTED APPLICATION PACKAGES:(OI)(CI)(RX)', \
  53. 'APPLICATION PACKAGE AUTHORITY' \
  54. '\\ALL RESTRICTED APPLICATION PACKAGES:(I)(OI)(CI)(RX)'
  55. ]
  56. # pylint: disable=useless-object-inheritance
  57. def set_lpac_acls(acl_dir, is_test_script=False):
  58. """Sets LPAC ACLs on a directory. Windows 10 only."""
  59. if platform.release() != '10':
  60. return
  61. try:
  62. existing_acls = subprocess.check_output(['icacls', acl_dir],
  63. stderr=subprocess.STDOUT,
  64. universal_newlines=True)
  65. except subprocess.CalledProcessError as e:
  66. logging.error('Failed to retrieve existing ACLs for directory %s', acl_dir)
  67. logging.error('Command output: %s', e.output)
  68. sys.exit(e.returncode)
  69. acls_correct = False
  70. for acl in CORRECT_ACL_VARIANTS:
  71. if acl in existing_acls:
  72. acls_correct = True
  73. if not acls_correct:
  74. try:
  75. existing_acls = subprocess.check_output(
  76. ['icacls', acl_dir, '/grant', '*S-1-15-2-2:(OI)(CI)(RX)'],
  77. stderr=subprocess.STDOUT)
  78. except subprocess.CalledProcessError as e:
  79. logging.error(
  80. 'Failed to retrieve existing ACLs for directory %s', acl_dir)
  81. logging.error('Command output: %s', e.output)
  82. sys.exit(e.returncode)
  83. if not is_test_script:
  84. return
  85. # Bots running on luci use hardlinks that do not have correct ACLs so these
  86. # must be manually overridden here.
  87. with temporary_file() as tempfile_path:
  88. subprocess.check_output(
  89. ['icacls', acl_dir, '/save', tempfile_path, '/t', '/q', '/c'],
  90. stderr=subprocess.STDOUT)
  91. # ACL files look like this, e.g. for c:\a\b\c\d\Release_x64
  92. #
  93. # Release_x64
  94. # D:AI(A;OICI;0x1200a9;;;S-1-15-2-2)(A;OICIID;FA;;;BA)
  95. # Release_x64\icudtl_extra.dat
  96. # D:AI(A;ID;0x1200a9;;;S-1-15-2-2)(A;ID;FA;;;BA)(A;ID;0x1301bf;;;BU)
  97. with codecs.open(tempfile_path, encoding='utf_16_le') as aclfile:
  98. for filename in aclfile:
  99. acl = next(aclfile).strip()
  100. full_filename = os.path.abspath(
  101. os.path.join(acl_dir, os.pardir, filename.strip()))
  102. if 'S-1-15-2-2' in acl:
  103. continue
  104. if os.path.isdir(full_filename):
  105. continue
  106. subprocess.check_output(
  107. ['icacls', full_filename, '/grant', '*S-1-15-2-2:(RX)'],
  108. stderr=subprocess.STDOUT)
  109. def run_script(argv, funcs):
  110. def parse_json(path):
  111. with open(path) as f:
  112. return json.load(f)
  113. parser = argparse.ArgumentParser()
  114. # TODO(phajdan.jr): Make build-config-fs required after passing it in recipe.
  115. parser.add_argument('--build-config-fs')
  116. parser.add_argument('--paths', type=parse_json, default={})
  117. # Properties describe the environment of the build, and are the same per
  118. # script invocation.
  119. parser.add_argument('--properties', type=parse_json, default={})
  120. # Args contains per-invocation arguments that potentially change the
  121. # behavior of the script.
  122. parser.add_argument('--args', type=parse_json, default=[])
  123. subparsers = parser.add_subparsers()
  124. run_parser = subparsers.add_parser('run')
  125. run_parser.add_argument(
  126. '--output', type=argparse.FileType('w'), required=True)
  127. run_parser.add_argument('--filter-file', type=argparse.FileType('r'))
  128. run_parser.set_defaults(func=funcs['run'])
  129. run_parser = subparsers.add_parser('compile_targets')
  130. run_parser.add_argument(
  131. '--output', type=argparse.FileType('w'), required=True)
  132. run_parser.set_defaults(func=funcs['compile_targets'])
  133. args = parser.parse_args(argv)
  134. return args.func(args)
  135. def run_command(argv, env=None, cwd=None):
  136. print('Running %r in %r (env: %r)' % (argv, cwd, env), file=sys.stderr)
  137. rc = test_env.run_command(argv, env=env, cwd=cwd)
  138. print('Command %r returned exit code %d' % (argv, rc), file=sys.stderr)
  139. return rc
  140. @contextlib.contextmanager
  141. def temporary_file():
  142. fd, path = tempfile.mkstemp()
  143. os.close(fd)
  144. try:
  145. yield path
  146. finally:
  147. os.remove(path)
  148. def record_local_script_results(name, output_fd, failures, valid):
  149. """Records to a local json file and to RDB the results of the script test.
  150. For legacy reasons, local script tests (ie: script tests that run
  151. locally and that don't conform to the isolated-test API) are expected to
  152. record their results using a specific format. This method encapsulates
  153. that format and also uploads those results to Result DB.
  154. Args:
  155. name: Name of the script test.
  156. output_fd: A .write()-supporting file descriptor to write results to.
  157. failures: List of strings representing test failures.
  158. valid: Whether the results are valid.
  159. """
  160. local_script_results = {
  161. 'valid': valid,
  162. 'failures': failures
  163. }
  164. json.dump(local_script_results, output_fd)
  165. if not result_sink:
  166. return
  167. result_sink_client = result_sink.TryInitClient()
  168. if not result_sink_client:
  169. return
  170. status = result_types.PASS
  171. if not valid:
  172. status = result_types.UNKNOWN
  173. elif failures:
  174. status = result_types.FAIL
  175. test_log = '\n'.join(failures)
  176. result_sink_client.Post(name, status, None, test_log, None)
  177. def parse_common_test_results(json_results, test_separator='/'):
  178. def convert_trie_to_flat_paths(trie, prefix=None):
  179. # Also see blinkpy.web_tests.layout_package.json_results_generator
  180. result = {}
  181. for name, data in trie.items():
  182. if prefix:
  183. name = prefix + test_separator + name
  184. if len(data) and not 'actual' in data and not 'expected' in data:
  185. result.update(convert_trie_to_flat_paths(data, name))
  186. else:
  187. result[name] = data
  188. return result
  189. results = {
  190. 'passes': {},
  191. 'unexpected_passes': {},
  192. 'failures': {},
  193. 'unexpected_failures': {},
  194. 'flakes': {},
  195. 'unexpected_flakes': {},
  196. }
  197. # TODO(dpranke): crbug.com/357866 - we should simplify the handling of
  198. # both the return code and parsing the actual results, below.
  199. passing_statuses = ('PASS', 'SLOW', 'NEEDSREBASELINE')
  200. for test, result in convert_trie_to_flat_paths(
  201. json_results['tests']).items():
  202. key = 'unexpected_' if result.get('is_unexpected') else ''
  203. data = result['actual']
  204. actual_results = data.split()
  205. last_result = actual_results[-1]
  206. expected_results = result['expected'].split()
  207. if (len(actual_results) > 1 and
  208. (last_result in expected_results or last_result in passing_statuses)):
  209. key += 'flakes'
  210. elif last_result in passing_statuses:
  211. key += 'passes'
  212. # TODO(dpranke): crbug.com/357867 ... Why are we assigning result
  213. # instead of actual_result here. Do we even need these things to be
  214. # hashes, or just lists?
  215. data = result
  216. else:
  217. key += 'failures'
  218. results[key][test] = data
  219. return results
  220. def write_interrupted_test_results_to(filepath, test_start_time):
  221. """Writes a test results JSON file* to filepath.
  222. This JSON file is formatted to explain that something went wrong.
  223. *src/docs/testing/json_test_results_format.md
  224. Args:
  225. filepath: A path to a file to write the output to.
  226. test_start_time: The start time of the test run expressed as a
  227. floating-point offset in seconds from the UNIX epoch.
  228. """
  229. with open(filepath, 'w') as fh:
  230. output = {
  231. 'interrupted': True,
  232. 'num_failures_by_type': {},
  233. 'seconds_since_epoch': test_start_time,
  234. 'tests': {},
  235. 'version': 3,
  236. }
  237. json.dump(output, fh)
  238. def get_gtest_summary_passes(output):
  239. """Returns a mapping of test to boolean indicating if the test passed.
  240. Only partially parses the format. This code is based on code in tools/build,
  241. specifically
  242. https://chromium.googlesource.com/chromium/tools/build/+/17fef98756c5f250b20bf716829a0004857235ff/scripts/slave/recipe_modules/test_utils/util.py#189
  243. """
  244. if not output:
  245. return {}
  246. mapping = {}
  247. for cur_iteration_data in output.get('per_iteration_data', []):
  248. for test_fullname, results in cur_iteration_data.items():
  249. # Results is a list with one entry per test try. Last one is the final
  250. # result.
  251. last_result = results[-1]
  252. if last_result['status'] == 'SUCCESS':
  253. mapping[test_fullname] = True
  254. elif last_result['status'] != 'SKIPPED':
  255. mapping[test_fullname] = False
  256. return mapping
  257. def extract_filter_list(filter_list):
  258. """Helper for isolated script test wrappers. Parses the
  259. --isolated-script-test-filter command line argument. Currently, double-colon
  260. ('::') is used as the separator between test names, because a single colon may
  261. be used in the names of perf benchmarks, which contain URLs.
  262. """
  263. return filter_list.split('::')
  264. class BaseIsolatedScriptArgsAdapter(object):
  265. """The base class for all script adapters that need to translate flags
  266. set by isolated script test contract into the specific test script's flags.
  267. """
  268. def __init__(self):
  269. self._parser = argparse.ArgumentParser()
  270. self._options = None
  271. self._rest_args = None
  272. self._parser.add_argument(
  273. '--isolated-outdir', type=str,
  274. required=False,
  275. help='value of $ISOLATED_OUTDIR from swarming task')
  276. self._parser.add_argument(
  277. '--isolated-script-test-output', type=str,
  278. required=False,
  279. help='path to write test results JSON object to')
  280. self._parser.add_argument(
  281. '--isolated-script-test-filter', type=str,
  282. required=False)
  283. self._parser.add_argument(
  284. '--isolated-script-test-repeat', type=int,
  285. required=False)
  286. self._parser.add_argument(
  287. '--isolated-script-test-launcher-retry-limit', type=int,
  288. required=False)
  289. self._parser.add_argument(
  290. '--isolated-script-test-also-run-disabled-tests',
  291. default=False, action='store_true', required=False)
  292. self._parser.add_argument(
  293. '--xvfb',
  294. help='start xvfb. Ignored on unsupported platforms',
  295. action='store_true')
  296. # This argument is ignored for now.
  297. self._parser.add_argument(
  298. '--isolated-script-test-chartjson-output', type=str)
  299. # This argument is ignored for now.
  300. self._parser.add_argument('--isolated-script-test-perf-output', type=str)
  301. self.add_extra_arguments(self._parser)
  302. def add_extra_arguments(self, parser):
  303. pass
  304. def parse_args(self, args=None):
  305. self._options, self._rest_args = self._parser.parse_known_args(args)
  306. @property
  307. def parser(self):
  308. return self._parser
  309. @property
  310. def options(self):
  311. return self._options
  312. @property
  313. def rest_args(self):
  314. return self._rest_args
  315. def generate_test_output_args(self, output):
  316. del output # unused
  317. raise RuntimeError('this method is not yet implemented')
  318. def generate_test_filter_args(self, test_filter_str):
  319. del test_filter_str # unused
  320. raise RuntimeError('this method is not yet implemented')
  321. def generate_test_repeat_args(self, repeat_count):
  322. del repeat_count # unused
  323. raise RuntimeError('this method is not yet implemented')
  324. def generate_test_launcher_retry_limit_args(self, retry_limit):
  325. del retry_limit # unused
  326. raise RuntimeError('this method is not yet implemented')
  327. def generate_test_also_run_disabled_tests_args(self):
  328. raise RuntimeError('this method is not yet implemented')
  329. def generate_sharding_args(self, total_shards, shard_index):
  330. del total_shards, shard_index # unused
  331. raise RuntimeError('this method is not yet implemented')
  332. def select_python_executable(self):
  333. return sys.executable
  334. def generate_isolated_script_cmd(self):
  335. isolated_script_cmd = [ self.select_python_executable() ] + self.rest_args
  336. if self.options.isolated_script_test_output:
  337. isolated_script_cmd += self.generate_test_output_args(
  338. self.options.isolated_script_test_output)
  339. # Augment test filter args if needed
  340. if self.options.isolated_script_test_filter:
  341. isolated_script_cmd += self.generate_test_filter_args(
  342. self.options.isolated_script_test_filter)
  343. # Augment test repeat if needed
  344. if self.options.isolated_script_test_repeat is not None:
  345. isolated_script_cmd += self.generate_test_repeat_args(
  346. self.options.isolated_script_test_repeat)
  347. # Augment test launcher retry limit args if needed
  348. if self.options.isolated_script_test_launcher_retry_limit is not None:
  349. isolated_script_cmd += self.generate_test_launcher_retry_limit_args(
  350. self.options.isolated_script_test_launcher_retry_limit)
  351. # Augment test also run disable tests args if needed
  352. if self.options.isolated_script_test_also_run_disabled_tests:
  353. isolated_script_cmd += self.generate_test_also_run_disabled_tests_args()
  354. # Augment shard args if needed
  355. env = os.environ.copy()
  356. total_shards = None
  357. shard_index = None
  358. if 'GTEST_TOTAL_SHARDS' in env:
  359. total_shards = int(env['GTEST_TOTAL_SHARDS'])
  360. if 'GTEST_SHARD_INDEX' in env:
  361. shard_index = int(env['GTEST_SHARD_INDEX'])
  362. if total_shards is not None and shard_index is not None:
  363. isolated_script_cmd += self.generate_sharding_args(
  364. total_shards, shard_index)
  365. return isolated_script_cmd
  366. def clean_up_after_test_run(self):
  367. pass
  368. def do_pre_test_run_tasks(self):
  369. pass
  370. def do_post_test_run_tasks(self):
  371. pass
  372. def run_test(self):
  373. self.parse_args()
  374. cmd = self.generate_isolated_script_cmd()
  375. self.do_pre_test_run_tasks()
  376. env = os.environ.copy()
  377. # Assume we want to set up the sandbox environment variables all the
  378. # time; doing so is harmless on non-Linux platforms and is needed
  379. # all the time on Linux.
  380. env[CHROME_SANDBOX_ENV] = CHROME_SANDBOX_PATH
  381. valid = True
  382. try:
  383. env['CHROME_HEADLESS'] = '1'
  384. print('Running command: %s\nwith env: %r' % (
  385. ' '.join(cmd), env))
  386. sys.stdout.flush()
  387. if self.options.xvfb and sys.platform.startswith('linux'):
  388. exit_code = xvfb.run_executable(cmd, env)
  389. else:
  390. exit_code = test_env.run_command(cmd, env=env, log=False)
  391. print('Command returned exit code %d' % exit_code)
  392. sys.stdout.flush()
  393. self.do_post_test_run_tasks()
  394. return exit_code
  395. except Exception:
  396. traceback.print_exc()
  397. valid = False
  398. finally:
  399. self.clean_up_after_test_run()
  400. if not valid:
  401. failures = ['(entire test suite)']
  402. with open(self.options.isolated_script_test_output, 'w') as fp:
  403. json.dump({
  404. 'valid': valid,
  405. 'failures': failures,
  406. }, fp)
  407. return 1