generate_buildbot_json.py 93 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343
  1. #!/usr/bin/env vpython3
  2. # Copyright 2016 The Chromium Authors. All rights reserved.
  3. # Use of this source code is governed by a BSD-style license that can be
  4. # found in the LICENSE file.
  5. """Script to generate the majority of the JSON files in the src/testing/buildbot
  6. directory. Maintaining these files by hand is too unwieldy.
  7. """
  8. import argparse
  9. import ast
  10. import collections
  11. import copy
  12. import difflib
  13. import functools
  14. import glob
  15. import itertools
  16. import json
  17. import os
  18. import six
  19. import string
  20. import sys
  21. import buildbot_json_magic_substitutions as magic_substitutions
  22. # pylint: disable=super-with-arguments,useless-super-delegation
  23. THIS_DIR = os.path.dirname(os.path.abspath(__file__))
  24. BROWSER_CONFIG_TO_TARGET_SUFFIX_MAP = {
  25. 'android-chromium': '_android_chrome',
  26. 'android-chromium-monochrome': '_android_monochrome',
  27. 'android-weblayer': '_android_weblayer',
  28. 'android-webview': '_android_webview',
  29. }
  30. class BBGenErr(Exception):
  31. def __init__(self, message):
  32. super(BBGenErr, self).__init__(message)
  33. # This class is only present to accommodate certain machines on
  34. # chromium.android.fyi which run certain tests as instrumentation
  35. # tests, but not as gtests. If this discrepancy were fixed then the
  36. # notion could be removed.
  37. class TestSuiteTypes(object): # pylint: disable=useless-object-inheritance
  38. GTEST = 'gtest'
  39. class BaseGenerator(object): # pylint: disable=useless-object-inheritance
  40. def __init__(self, bb_gen):
  41. self.bb_gen = bb_gen
  42. def generate(self, waterfall, tester_name, tester_config, input_tests):
  43. raise NotImplementedError()
  44. def sort(self, tests):
  45. raise NotImplementedError()
  46. def custom_cmp(a, b):
  47. return int(a > b) - int(a < b)
  48. def cmp_tests(a, b):
  49. # Prefer to compare based on the "test" key.
  50. val = custom_cmp(a['test'], b['test'])
  51. if val != 0:
  52. return val
  53. if 'name' in a and 'name' in b:
  54. return custom_cmp(a['name'], b['name']) # pragma: no cover
  55. if 'name' not in a and 'name' not in b:
  56. return 0 # pragma: no cover
  57. # Prefer to put variants of the same test after the first one.
  58. if 'name' in a:
  59. return 1
  60. # 'name' is in b.
  61. return -1 # pragma: no cover
  62. class GPUTelemetryTestGenerator(BaseGenerator):
  63. def __init__(self, bb_gen, is_android_webview=False, is_cast_streaming=False):
  64. super(GPUTelemetryTestGenerator, self).__init__(bb_gen)
  65. self._is_android_webview = is_android_webview
  66. self._is_cast_streaming = is_cast_streaming
  67. def generate(self, waterfall, tester_name, tester_config, input_tests):
  68. isolated_scripts = []
  69. for test_name, test_config in sorted(input_tests.items()):
  70. # Variants allow more than one definition for a given test, and is defined
  71. # in array format from resolve_variants().
  72. if not isinstance(test_config, list):
  73. test_config = [test_config]
  74. for config in test_config:
  75. test = self.bb_gen.generate_gpu_telemetry_test(waterfall, tester_name,
  76. tester_config, test_name,
  77. config,
  78. self._is_android_webview,
  79. self._is_cast_streaming)
  80. if test:
  81. isolated_scripts.append(test)
  82. return isolated_scripts
  83. def sort(self, tests):
  84. return sorted(tests, key=lambda x: x['name'])
  85. class GTestGenerator(BaseGenerator):
  86. def __init__(self, bb_gen):
  87. super(GTestGenerator, self).__init__(bb_gen)
  88. def generate(self, waterfall, tester_name, tester_config, input_tests):
  89. # The relative ordering of some of the tests is important to
  90. # minimize differences compared to the handwritten JSON files, since
  91. # Python's sorts are stable and there are some tests with the same
  92. # key (see gles2_conform_d3d9_test and similar variants). Avoid
  93. # losing the order by avoiding coalescing the dictionaries into one.
  94. gtests = []
  95. for test_name, test_config in sorted(input_tests.items()):
  96. # Variants allow more than one definition for a given test, and is defined
  97. # in array format from resolve_variants().
  98. if not isinstance(test_config, list):
  99. test_config = [test_config]
  100. for config in test_config:
  101. test = self.bb_gen.generate_gtest(
  102. waterfall, tester_name, tester_config, test_name, config)
  103. if test:
  104. # generate_gtest may veto the test generation on this tester.
  105. gtests.append(test)
  106. return gtests
  107. def sort(self, tests):
  108. return sorted(tests, key=functools.cmp_to_key(cmp_tests))
  109. class IsolatedScriptTestGenerator(BaseGenerator):
  110. def __init__(self, bb_gen):
  111. super(IsolatedScriptTestGenerator, self).__init__(bb_gen)
  112. def generate(self, waterfall, tester_name, tester_config, input_tests):
  113. isolated_scripts = []
  114. for test_name, test_config in sorted(input_tests.items()):
  115. # Variants allow more than one definition for a given test, and is defined
  116. # in array format from resolve_variants().
  117. if not isinstance(test_config, list):
  118. test_config = [test_config]
  119. for config in test_config:
  120. test = self.bb_gen.generate_isolated_script_test(
  121. waterfall, tester_name, tester_config, test_name, config)
  122. if test:
  123. isolated_scripts.append(test)
  124. return isolated_scripts
  125. def sort(self, tests):
  126. return sorted(tests, key=lambda x: x['name'])
  127. class ScriptGenerator(BaseGenerator):
  128. def __init__(self, bb_gen):
  129. super(ScriptGenerator, self).__init__(bb_gen)
  130. def generate(self, waterfall, tester_name, tester_config, input_tests):
  131. scripts = []
  132. for test_name, test_config in sorted(input_tests.items()):
  133. test = self.bb_gen.generate_script_test(
  134. waterfall, tester_name, tester_config, test_name, test_config)
  135. if test:
  136. scripts.append(test)
  137. return scripts
  138. def sort(self, tests):
  139. return sorted(tests, key=lambda x: x['name'])
  140. class JUnitGenerator(BaseGenerator):
  141. def __init__(self, bb_gen):
  142. super(JUnitGenerator, self).__init__(bb_gen)
  143. def generate(self, waterfall, tester_name, tester_config, input_tests):
  144. scripts = []
  145. for test_name, test_config in sorted(input_tests.items()):
  146. test = self.bb_gen.generate_junit_test(
  147. waterfall, tester_name, tester_config, test_name, test_config)
  148. if test:
  149. scripts.append(test)
  150. return scripts
  151. def sort(self, tests):
  152. return sorted(tests, key=lambda x: x['test'])
  153. class SkylabGenerator(BaseGenerator):
  154. def __init__(self, bb_gen):
  155. super(SkylabGenerator, self).__init__(bb_gen)
  156. def generate(self, waterfall, tester_name, tester_config, input_tests):
  157. scripts = []
  158. for test_name, test_config in sorted(input_tests.items()):
  159. for config in test_config:
  160. test = self.bb_gen.generate_skylab_test(waterfall, tester_name,
  161. tester_config, test_name,
  162. config)
  163. if test:
  164. scripts.append(test)
  165. return scripts
  166. def sort(self, tests):
  167. return sorted(tests, key=lambda x: x['test'])
  168. def check_compound_references(other_test_suites=None,
  169. sub_suite=None,
  170. suite=None,
  171. target_test_suites=None,
  172. test_type=None,
  173. **kwargs):
  174. """Ensure comound reference's don't target other compounds"""
  175. del kwargs
  176. if sub_suite in other_test_suites or sub_suite in target_test_suites:
  177. raise BBGenErr('%s may not refer to other composition type test '
  178. 'suites (error found while processing %s)' %
  179. (test_type, suite))
  180. def check_basic_references(basic_suites=None,
  181. sub_suite=None,
  182. suite=None,
  183. **kwargs):
  184. """Ensure test has a basic suite reference"""
  185. del kwargs
  186. if sub_suite not in basic_suites:
  187. raise BBGenErr('Unable to find reference to %s while processing %s' %
  188. (sub_suite, suite))
  189. def check_conflicting_definitions(basic_suites=None,
  190. seen_tests=None,
  191. sub_suite=None,
  192. suite=None,
  193. test_type=None,
  194. **kwargs):
  195. """Ensure that if a test is reachable via multiple basic suites,
  196. all of them have an identical definition of the tests.
  197. """
  198. del kwargs
  199. for test_name in basic_suites[sub_suite]:
  200. if (test_name in seen_tests and
  201. basic_suites[sub_suite][test_name] !=
  202. basic_suites[seen_tests[test_name]][test_name]):
  203. raise BBGenErr('Conflicting test definitions for %s from %s '
  204. 'and %s in %s (error found while processing %s)'
  205. % (test_name, seen_tests[test_name], sub_suite,
  206. test_type, suite))
  207. seen_tests[test_name] = sub_suite
  208. def check_matrix_identifier(sub_suite=None,
  209. suite=None,
  210. suite_def=None,
  211. all_variants=None,
  212. **kwargs):
  213. """Ensure 'idenfitier' is defined for each variant"""
  214. del kwargs
  215. sub_suite_config = suite_def[sub_suite]
  216. for variant in sub_suite_config.get('variants', []):
  217. if isinstance(variant, str):
  218. if variant not in all_variants:
  219. raise BBGenErr('Missing variant definition for %s in variants.pyl'
  220. % variant)
  221. variant = all_variants[variant]
  222. if not 'identifier' in variant:
  223. raise BBGenErr('Missing required identifier field in matrix '
  224. 'compound suite %s, %s' % (suite, sub_suite))
  225. if variant['identifier'] == '':
  226. raise BBGenErr('Identifier field can not be "" in matrix '
  227. 'compound suite %s, %s' % (suite, sub_suite))
  228. if variant['identifier'].strip() != variant['identifier']:
  229. raise BBGenErr('Identifier field can not have leading and trailing '
  230. 'whitespace in matrix compound suite %s, %s' %
  231. (suite, sub_suite))
  232. class BBJSONGenerator(object): # pylint: disable=useless-object-inheritance
  233. def __init__(self, args):
  234. self.this_dir = THIS_DIR
  235. self.args = args
  236. self.waterfalls = None
  237. self.test_suites = None
  238. self.exceptions = None
  239. self.mixins = None
  240. self.gn_isolate_map = None
  241. self.variants = None
  242. @staticmethod
  243. def parse_args(argv):
  244. # RawTextHelpFormatter allows for styling of help statement
  245. parser = argparse.ArgumentParser(
  246. formatter_class=argparse.RawTextHelpFormatter)
  247. group = parser.add_mutually_exclusive_group()
  248. group.add_argument(
  249. '-c',
  250. '--check',
  251. action='store_true',
  252. help=
  253. 'Do consistency checks of configuration and generated files and then '
  254. 'exit. Used during presubmit. '
  255. 'Causes the tool to not generate any files.')
  256. group.add_argument(
  257. '--query',
  258. type=str,
  259. help=(
  260. "Returns raw JSON information of buildbots and tests.\n" +
  261. "Examples:\n" + " List all bots (all info):\n" +
  262. " --query bots\n\n" +
  263. " List all bots and only their associated tests:\n" +
  264. " --query bots/tests\n\n" +
  265. " List all information about 'bot1' " +
  266. "(make sure you have quotes):\n" + " --query bot/'bot1'\n\n" +
  267. " List tests running for 'bot1' (make sure you have quotes):\n" +
  268. " --query bot/'bot1'/tests\n\n" + " List all tests:\n" +
  269. " --query tests\n\n" +
  270. " List all tests and the bots running them:\n" +
  271. " --query tests/bots\n\n" +
  272. " List all tests that satisfy multiple parameters\n" +
  273. " (separation of parameters by '&' symbol):\n" +
  274. " --query tests/'device_os:Android&device_type:hammerhead'\n\n" +
  275. " List all tests that run with a specific flag:\n" +
  276. " --query bots/'--test-launcher-print-test-studio=always'\n\n" +
  277. " List specific test (make sure you have quotes):\n"
  278. " --query test/'test1'\n\n"
  279. " List all bots running 'test1' " +
  280. "(make sure you have quotes):\n" + " --query test/'test1'/bots"))
  281. parser.add_argument(
  282. '-n',
  283. '--new-files',
  284. action='store_true',
  285. help=
  286. 'Write output files as .new.json. Useful during development so old and '
  287. 'new files can be looked at side-by-side.')
  288. parser.add_argument('-v',
  289. '--verbose',
  290. action='store_true',
  291. help='Increases verbosity. Affects consistency checks.')
  292. parser.add_argument('waterfall_filters',
  293. metavar='waterfalls',
  294. type=str,
  295. nargs='*',
  296. help='Optional list of waterfalls to generate.')
  297. parser.add_argument(
  298. '--pyl-files-dir',
  299. type=os.path.realpath,
  300. help='Path to the directory containing the input .pyl files.')
  301. parser.add_argument(
  302. '--json',
  303. metavar='JSON_FILE_PATH',
  304. help='Outputs results into a json file. Only works with query function.'
  305. )
  306. parser.add_argument('--isolate-map-file',
  307. metavar='PATH',
  308. help='path to additional isolate map files.',
  309. default=[],
  310. action='append',
  311. dest='isolate_map_files')
  312. parser.add_argument(
  313. '--infra-config-dir',
  314. help='Path to the LUCI services configuration directory',
  315. default=os.path.abspath(
  316. os.path.join(os.path.dirname(__file__), '..', '..', 'infra',
  317. 'config')))
  318. args = parser.parse_args(argv)
  319. if args.json and not args.query:
  320. parser.error(
  321. "The --json flag can only be used with --query.") # pragma: no cover
  322. args.infra_config_dir = os.path.abspath(args.infra_config_dir)
  323. return args
  324. def generate_abs_file_path(self, relative_path):
  325. return os.path.join(self.this_dir, relative_path)
  326. def print_line(self, line):
  327. # Exists so that tests can mock
  328. print(line) # pragma: no cover
  329. def read_file(self, relative_path):
  330. with open(self.generate_abs_file_path(relative_path)) as fp:
  331. return fp.read()
  332. def write_file(self, relative_path, contents):
  333. with open(self.generate_abs_file_path(relative_path), 'wb') as fp:
  334. fp.write(contents.encode('utf-8'))
  335. def pyl_file_path(self, filename):
  336. if self.args and self.args.pyl_files_dir:
  337. return os.path.join(self.args.pyl_files_dir, filename)
  338. return filename
  339. # pylint: disable=inconsistent-return-statements
  340. def load_pyl_file(self, filename):
  341. try:
  342. return ast.literal_eval(self.read_file(
  343. self.pyl_file_path(filename)))
  344. except (SyntaxError, ValueError) as e: # pragma: no cover
  345. six.raise_from(
  346. BBGenErr('Failed to parse pyl file "%s": %s' % (filename, e)),
  347. e) # pragma: no cover
  348. # pylint: enable=inconsistent-return-statements
  349. # TOOD(kbr): require that os_type be specified for all bots in waterfalls.pyl.
  350. # Currently it is only mandatory for bots which run GPU tests. Change these to
  351. # use [] instead of .get().
  352. def is_android(self, tester_config):
  353. return tester_config.get('os_type') == 'android'
  354. def is_chromeos(self, tester_config):
  355. return tester_config.get('os_type') == 'chromeos'
  356. def is_fuchsia(self, tester_config):
  357. return tester_config.get('os_type') == 'fuchsia'
  358. def is_lacros(self, tester_config):
  359. return tester_config.get('os_type') == 'lacros'
  360. def is_linux(self, tester_config):
  361. return tester_config.get('os_type') == 'linux'
  362. def is_mac(self, tester_config):
  363. return tester_config.get('os_type') == 'mac'
  364. def is_win(self, tester_config):
  365. return tester_config.get('os_type') == 'win'
  366. def is_win64(self, tester_config):
  367. return (tester_config.get('os_type') == 'win' and
  368. tester_config.get('browser_config') == 'release_x64')
  369. def add_variant_to_test_name(self, test_name, variant_id):
  370. return '{} {}'.format(test_name, variant_id)
  371. def remove_variant_from_test_name(self, test_name, variant_id):
  372. return test_name.split(variant_id)[0].strip()
  373. def get_exception_for_test(self, test_name, test_config):
  374. # gtests may have both "test" and "name" fields, and usually, if the "name"
  375. # field is specified, it means that the same test is being repurposed
  376. # multiple times with different command line arguments. To handle this case,
  377. # prefer to lookup per the "name" field of the test itself, as opposed to
  378. # the "test_name", which is actually the "test" field.
  379. if 'name' in test_config:
  380. return self.exceptions.get(test_config['name'])
  381. return self.exceptions.get(test_name)
  382. def should_run_on_tester(self, waterfall, tester_name,test_name, test_config):
  383. # Currently, the only reason a test should not run on a given tester is that
  384. # it's in the exceptions. (Once the GPU waterfall generation script is
  385. # incorporated here, the rules will become more complex.)
  386. exception = self.get_exception_for_test(test_name, test_config)
  387. if not exception:
  388. return True
  389. remove_from = None
  390. remove_from = exception.get('remove_from')
  391. if remove_from:
  392. if tester_name in remove_from:
  393. return False
  394. # TODO(kbr): this code path was added for some tests (including
  395. # android_webview_unittests) on one machine (Nougat Phone
  396. # Tester) which exists with the same name on two waterfalls,
  397. # chromium.android and chromium.fyi; the tests are run on one
  398. # but not the other. Once the bots are all uniquely named (a
  399. # different ongoing project) this code should be removed.
  400. # TODO(kbr): add coverage.
  401. return (tester_name + ' ' + waterfall['name']
  402. not in remove_from) # pragma: no cover
  403. return True
  404. def get_test_modifications(self, test, test_name, tester_name):
  405. exception = self.get_exception_for_test(test_name, test)
  406. if not exception:
  407. return None
  408. return exception.get('modifications', {}).get(tester_name)
  409. def get_test_replacements(self, test, test_name, tester_name):
  410. exception = self.get_exception_for_test(test_name, test)
  411. if not exception:
  412. return None
  413. return exception.get('replacements', {}).get(tester_name)
  414. def merge_command_line_args(self, arr, prefix, splitter):
  415. prefix_len = len(prefix)
  416. idx = 0
  417. first_idx = -1
  418. accumulated_args = []
  419. while idx < len(arr):
  420. flag = arr[idx]
  421. delete_current_entry = False
  422. if flag.startswith(prefix):
  423. arg = flag[prefix_len:]
  424. accumulated_args.extend(arg.split(splitter))
  425. if first_idx < 0:
  426. first_idx = idx
  427. else:
  428. delete_current_entry = True
  429. if delete_current_entry:
  430. del arr[idx]
  431. else:
  432. idx += 1
  433. if first_idx >= 0:
  434. arr[first_idx] = prefix + splitter.join(accumulated_args)
  435. return arr
  436. def maybe_fixup_args_array(self, arr):
  437. # The incoming array of strings may be an array of command line
  438. # arguments. To make it easier to turn on certain features per-bot or
  439. # per-test-suite, look specifically for certain flags and merge them
  440. # appropriately.
  441. # --enable-features=Feature1 --enable-features=Feature2
  442. # are merged to:
  443. # --enable-features=Feature1,Feature2
  444. # and:
  445. # --extra-browser-args=arg1 --extra-browser-args=arg2
  446. # are merged to:
  447. # --extra-browser-args=arg1 arg2
  448. arr = self.merge_command_line_args(arr, '--enable-features=', ',')
  449. arr = self.merge_command_line_args(arr, '--extra-browser-args=', ' ')
  450. arr = self.merge_command_line_args(arr, '--test-launcher-filter-file=', ';')
  451. return arr
  452. def substitute_magic_args(self, test_config, tester_name, tester_config):
  453. """Substitutes any magic substitution args present in |test_config|.
  454. Substitutions are done in-place.
  455. See buildbot_json_magic_substitutions.py for more information on this
  456. feature.
  457. Args:
  458. test_config: A dict containing a configuration for a specific test on
  459. a specific builder, e.g. the output of update_and_cleanup_test.
  460. tester_name: A string containing the name of the tester that |test_config|
  461. came from.
  462. tester_config: A dict containing the configuration for the builder that
  463. |test_config| is for.
  464. """
  465. substituted_array = []
  466. for arg in test_config.get('args', []):
  467. if arg.startswith(magic_substitutions.MAGIC_SUBSTITUTION_PREFIX):
  468. function = arg.replace(
  469. magic_substitutions.MAGIC_SUBSTITUTION_PREFIX, '')
  470. if hasattr(magic_substitutions, function):
  471. substituted_array.extend(
  472. getattr(magic_substitutions, function)(test_config, tester_name,
  473. tester_config))
  474. else:
  475. raise BBGenErr(
  476. 'Magic substitution function %s does not exist' % function)
  477. else:
  478. substituted_array.append(arg)
  479. if substituted_array:
  480. test_config['args'] = self.maybe_fixup_args_array(substituted_array)
  481. def dictionary_merge(self, a, b, path=None, update=True):
  482. """http://stackoverflow.com/questions/7204805/
  483. python-dictionaries-of-dictionaries-merge
  484. merges b into a
  485. """
  486. if path is None:
  487. path = []
  488. for key in b:
  489. if key in a:
  490. if isinstance(a[key], dict) and isinstance(b[key], dict):
  491. self.dictionary_merge(a[key], b[key], path + [str(key)])
  492. elif a[key] == b[key]:
  493. pass # same leaf value
  494. elif isinstance(a[key], list) and isinstance(b[key], list):
  495. # Args arrays are lists of strings. Just concatenate them,
  496. # and don't sort them, in order to keep some needed
  497. # arguments adjacent (like --timeout-ms [arg], etc.)
  498. if all(isinstance(x, str)
  499. for x in itertools.chain(a[key], b[key])):
  500. a[key] = self.maybe_fixup_args_array(a[key] + b[key])
  501. else:
  502. # TODO(kbr): this only works properly if the two arrays are
  503. # the same length, which is currently always the case in the
  504. # swarming dimension_sets that we have to merge. It will fail
  505. # to merge / override 'args' arrays which are different
  506. # length.
  507. for idx in range(len(b[key])):
  508. try:
  509. a[key][idx] = self.dictionary_merge(a[key][idx], b[key][idx],
  510. path + [str(key), str(idx)],
  511. update=update)
  512. except (IndexError, TypeError) as e:
  513. six.raise_from(
  514. BBGenErr('Error merging lists by key "%s" from source %s '
  515. 'into target %s at index %s. Verify target list '
  516. 'length is equal or greater than source' %
  517. (str(key), str(b), str(a), str(idx))), e)
  518. elif update:
  519. if b[key] is None:
  520. del a[key]
  521. else:
  522. a[key] = b[key]
  523. else:
  524. raise BBGenErr('Conflict at %s' % '.'.join(
  525. path + [str(key)])) # pragma: no cover
  526. elif b[key] is not None:
  527. a[key] = b[key]
  528. return a
  529. def initialize_args_for_test(
  530. self, generated_test, tester_config, additional_arg_keys=None):
  531. args = []
  532. args.extend(generated_test.get('args', []))
  533. args.extend(tester_config.get('args', []))
  534. def add_conditional_args(key, fn):
  535. val = generated_test.pop(key, [])
  536. if fn(tester_config):
  537. args.extend(val)
  538. add_conditional_args('desktop_args', lambda cfg: not self.is_android(cfg))
  539. add_conditional_args('lacros_args', self.is_lacros)
  540. add_conditional_args('linux_args', self.is_linux)
  541. add_conditional_args('android_args', self.is_android)
  542. add_conditional_args('chromeos_args', self.is_chromeos)
  543. add_conditional_args('mac_args', self.is_mac)
  544. add_conditional_args('win_args', self.is_win)
  545. add_conditional_args('win64_args', self.is_win64)
  546. for key in additional_arg_keys or []:
  547. args.extend(generated_test.pop(key, []))
  548. args.extend(tester_config.get(key, []))
  549. if args:
  550. generated_test['args'] = self.maybe_fixup_args_array(args)
  551. def initialize_swarming_dictionary_for_test(self, generated_test,
  552. tester_config):
  553. if 'swarming' not in generated_test:
  554. generated_test['swarming'] = {}
  555. if not 'can_use_on_swarming_builders' in generated_test['swarming']:
  556. generated_test['swarming'].update({
  557. 'can_use_on_swarming_builders': tester_config.get('use_swarming',
  558. True)
  559. })
  560. if 'swarming' in tester_config:
  561. if ('dimension_sets' not in generated_test['swarming'] and
  562. 'dimension_sets' in tester_config['swarming']):
  563. generated_test['swarming']['dimension_sets'] = copy.deepcopy(
  564. tester_config['swarming']['dimension_sets'])
  565. self.dictionary_merge(generated_test['swarming'],
  566. tester_config['swarming'])
  567. # Apply any platform-specific Swarming dimensions after the generic ones.
  568. if 'android_swarming' in generated_test:
  569. if self.is_android(tester_config): # pragma: no cover
  570. self.dictionary_merge(
  571. generated_test['swarming'],
  572. generated_test['android_swarming']) # pragma: no cover
  573. del generated_test['android_swarming'] # pragma: no cover
  574. if 'chromeos_swarming' in generated_test:
  575. if self.is_chromeos(tester_config): # pragma: no cover
  576. self.dictionary_merge(
  577. generated_test['swarming'],
  578. generated_test['chromeos_swarming']) # pragma: no cover
  579. del generated_test['chromeos_swarming'] # pragma: no cover
  580. def clean_swarming_dictionary(self, swarming_dict):
  581. # Clean out redundant entries from a test's "swarming" dictionary.
  582. # This is really only needed to retain 100% parity with the
  583. # handwritten JSON files, and can be removed once all the files are
  584. # autogenerated.
  585. if 'shards' in swarming_dict:
  586. if swarming_dict['shards'] == 1: # pragma: no cover
  587. del swarming_dict['shards'] # pragma: no cover
  588. if 'hard_timeout' in swarming_dict:
  589. if swarming_dict['hard_timeout'] == 0: # pragma: no cover
  590. del swarming_dict['hard_timeout'] # pragma: no cover
  591. if not swarming_dict.get('can_use_on_swarming_builders', False):
  592. # Remove all other keys.
  593. for k in list(swarming_dict): # pragma: no cover
  594. if k != 'can_use_on_swarming_builders': # pragma: no cover
  595. del swarming_dict[k] # pragma: no cover
  596. def update_and_cleanup_test(self, test, test_name, tester_name, tester_config,
  597. waterfall):
  598. # Apply swarming mixins.
  599. test = self.apply_all_mixins(
  600. test, waterfall, tester_name, tester_config)
  601. # See if there are any exceptions that need to be merged into this
  602. # test's specification.
  603. modifications = self.get_test_modifications(test, test_name, tester_name)
  604. if modifications:
  605. test = self.dictionary_merge(test, modifications)
  606. if 'swarming' in test:
  607. self.clean_swarming_dictionary(test['swarming'])
  608. # Ensure all Android Swarming tests run only on userdebug builds if another
  609. # build type was not specified.
  610. if 'swarming' in test and self.is_android(tester_config):
  611. for d in test['swarming'].get('dimension_sets', []):
  612. if d.get('os') == 'Android' and not d.get('device_os_type'):
  613. d['device_os_type'] = 'userdebug'
  614. self.replace_test_args(test, test_name, tester_name)
  615. return test
  616. def replace_test_args(self, test, test_name, tester_name):
  617. replacements = self.get_test_replacements(
  618. test, test_name, tester_name) or {}
  619. valid_replacement_keys = ['args', 'non_precommit_args', 'precommit_args']
  620. for key, replacement_dict in replacements.items():
  621. if key not in valid_replacement_keys:
  622. raise BBGenErr(
  623. 'Given replacement key %s for %s on %s is not in the list of valid '
  624. 'keys %s' % (key, test_name, tester_name, valid_replacement_keys))
  625. for replacement_key, replacement_val in replacement_dict.items():
  626. found_key = False
  627. for i, test_key in enumerate(test.get(key, [])):
  628. # Handle both the key/value being replaced being defined as two
  629. # separate items or as key=value.
  630. if test_key == replacement_key:
  631. found_key = True
  632. # Handle flags without values.
  633. if replacement_val == None:
  634. del test[key][i]
  635. else:
  636. test[key][i+1] = replacement_val
  637. break
  638. if test_key.startswith(replacement_key + '='):
  639. found_key = True
  640. if replacement_val == None:
  641. del test[key][i]
  642. else:
  643. test[key][i] = '%s=%s' % (replacement_key, replacement_val)
  644. break
  645. if not found_key:
  646. raise BBGenErr('Could not find %s in existing list of values for key '
  647. '%s in %s on %s' % (replacement_key, key, test_name,
  648. tester_name))
  649. def add_common_test_properties(self, test, tester_config):
  650. if self.is_chromeos(tester_config) and tester_config.get('use_swarming',
  651. True):
  652. # The presence of the "device_type" dimension indicates that the tests
  653. # are targeting CrOS hardware and so need the special trigger script.
  654. dimension_sets = test['swarming']['dimension_sets']
  655. if all('device_type' in ds for ds in dimension_sets):
  656. test['trigger_script'] = {
  657. 'script': '//testing/trigger_scripts/chromeos_device_trigger.py',
  658. }
  659. def add_logdog_butler_cipd_package(self, tester_config, result):
  660. if not tester_config.get('skip_cipd_packages', False):
  661. cipd_packages = result['swarming'].get('cipd_packages', [])
  662. already_added = len([
  663. package for package in cipd_packages
  664. if package.get('cipd_package', "").find('logdog/butler') > 0
  665. ]) > 0
  666. if not already_added:
  667. cipd_packages.append({
  668. 'cipd_package':
  669. 'infra/tools/luci/logdog/butler/${platform}',
  670. 'location':
  671. 'bin',
  672. 'revision':
  673. 'git_revision:ff387eadf445b24c935f1cf7d6ddd279f8a6b04c',
  674. })
  675. result['swarming']['cipd_packages'] = cipd_packages
  676. def add_android_presentation_args(self, tester_config, test_name, result):
  677. args = result.get('args', [])
  678. bucket = tester_config.get('results_bucket', 'chromium-result-details')
  679. args.append('--gs-results-bucket=%s' % bucket)
  680. if (result['swarming']['can_use_on_swarming_builders'] and not
  681. tester_config.get('skip_merge_script', False)):
  682. result['merge'] = {
  683. 'args': [
  684. '--bucket',
  685. bucket,
  686. '--test-name',
  687. result.get('name', test_name)
  688. ],
  689. 'script': '//build/android/pylib/results/presentation/'
  690. 'test_results_presentation.py',
  691. }
  692. if not tester_config.get('skip_output_links', False):
  693. result['swarming']['output_links'] = [
  694. {
  695. 'link': [
  696. 'https://luci-logdog.appspot.com/v/?s',
  697. '=android%2Fswarming%2Flogcats%2F',
  698. '${TASK_ID}%2F%2B%2Funified_logcats',
  699. ],
  700. 'name': 'shard #${SHARD_INDEX} logcats',
  701. },
  702. ]
  703. if args:
  704. result['args'] = args
  705. def generate_gtest(self, waterfall, tester_name, tester_config, test_name,
  706. test_config):
  707. if not self.should_run_on_tester(
  708. waterfall, tester_name, test_name, test_config):
  709. return None
  710. result = copy.deepcopy(test_config)
  711. if 'test' in result:
  712. if 'name' not in result:
  713. result['name'] = test_name
  714. else:
  715. result['test'] = test_name
  716. self.initialize_swarming_dictionary_for_test(result, tester_config)
  717. self.initialize_args_for_test(
  718. result, tester_config, additional_arg_keys=['gtest_args'])
  719. if self.is_android(tester_config) and tester_config.get(
  720. 'use_swarming', True):
  721. if not test_config.get('use_isolated_scripts_api', False):
  722. # TODO(https://crbug.com/1137998) make Android presentation work with
  723. # isolated scripts in test_results_presentation.py merge script
  724. self.add_android_presentation_args(tester_config, test_name, result)
  725. result['args'] = result.get('args', []) + ['--recover-devices']
  726. self.add_logdog_butler_cipd_package(tester_config, result)
  727. result = self.update_and_cleanup_test(
  728. result, test_name, tester_name, tester_config, waterfall)
  729. self.add_common_test_properties(result, tester_config)
  730. self.substitute_magic_args(result, tester_name, tester_config)
  731. if not result.get('merge'):
  732. # TODO(https://crbug.com/958376): Consider adding the ability to not have
  733. # this default.
  734. if test_config.get('use_isolated_scripts_api', False):
  735. merge_script = 'standard_isolated_script_merge'
  736. else:
  737. merge_script = 'standard_gtest_merge'
  738. result['merge'] = {
  739. 'script': '//testing/merge_scripts/%s.py' % merge_script,
  740. 'args': [],
  741. }
  742. return result
  743. def generate_isolated_script_test(self, waterfall, tester_name, tester_config,
  744. test_name, test_config):
  745. if not self.should_run_on_tester(waterfall, tester_name, test_name,
  746. test_config):
  747. return None
  748. result = copy.deepcopy(test_config)
  749. result['isolate_name'] = result.get('isolate_name', test_name)
  750. result['name'] = result.get('name', test_name)
  751. self.initialize_swarming_dictionary_for_test(result, tester_config)
  752. self.initialize_args_for_test(result, tester_config)
  753. if self.is_android(tester_config) and tester_config.get(
  754. 'use_swarming', True):
  755. if tester_config.get('use_android_presentation', False):
  756. # TODO(https://crbug.com/1137998) make Android presentation work with
  757. # isolated scripts in test_results_presentation.py merge script
  758. self.add_android_presentation_args(tester_config, test_name, result)
  759. self.add_logdog_butler_cipd_package(tester_config, result)
  760. result = self.update_and_cleanup_test(
  761. result, test_name, tester_name, tester_config, waterfall)
  762. self.add_common_test_properties(result, tester_config)
  763. self.substitute_magic_args(result, tester_name, tester_config)
  764. if not result.get('merge'):
  765. # TODO(https://crbug.com/958376): Consider adding the ability to not have
  766. # this default.
  767. result['merge'] = {
  768. 'script': '//testing/merge_scripts/standard_isolated_script_merge.py',
  769. 'args': [],
  770. }
  771. return result
  772. def generate_script_test(self, waterfall, tester_name, tester_config,
  773. test_name, test_config):
  774. # TODO(https://crbug.com/953072): Remove this check whenever a better
  775. # long-term solution is implemented.
  776. if (waterfall.get('forbid_script_tests', False) or
  777. waterfall['machines'][tester_name].get('forbid_script_tests', False)):
  778. raise BBGenErr('Attempted to generate a script test on tester ' +
  779. tester_name + ', which explicitly forbids script tests')
  780. if not self.should_run_on_tester(waterfall, tester_name, test_name,
  781. test_config):
  782. return None
  783. result = {
  784. 'name': test_name,
  785. 'script': test_config['script']
  786. }
  787. result = self.update_and_cleanup_test(
  788. result, test_name, tester_name, tester_config, waterfall)
  789. self.substitute_magic_args(result, tester_name, tester_config)
  790. return result
  791. def generate_junit_test(self, waterfall, tester_name, tester_config,
  792. test_name, test_config):
  793. if not self.should_run_on_tester(waterfall, tester_name, test_name,
  794. test_config):
  795. return None
  796. result = copy.deepcopy(test_config)
  797. result.update({
  798. 'name': test_name,
  799. 'test': test_config.get('test', test_name),
  800. })
  801. self.initialize_args_for_test(result, tester_config)
  802. result = self.update_and_cleanup_test(
  803. result, test_name, tester_name, tester_config, waterfall)
  804. self.substitute_magic_args(result, tester_name, tester_config)
  805. return result
  806. def generate_skylab_test(self, waterfall, tester_name, tester_config,
  807. test_name, test_config):
  808. if not self.should_run_on_tester(waterfall, tester_name, test_name,
  809. test_config):
  810. return None
  811. result = copy.deepcopy(test_config)
  812. result.update({
  813. 'test': test_name,
  814. })
  815. self.initialize_args_for_test(result, tester_config)
  816. result = self.update_and_cleanup_test(result, test_name, tester_name,
  817. tester_config, waterfall)
  818. self.substitute_magic_args(result, tester_name, tester_config)
  819. return result
  820. def substitute_gpu_args(self, tester_config, swarming_config, args):
  821. substitutions = {
  822. # Any machine in waterfalls.pyl which desires to run GPU tests
  823. # must provide the os_type key.
  824. 'os_type': tester_config['os_type'],
  825. 'gpu_vendor_id': '0',
  826. 'gpu_device_id': '0',
  827. }
  828. dimension_set = swarming_config['dimension_sets'][0]
  829. if 'gpu' in dimension_set:
  830. # First remove the driver version, then split into vendor and device.
  831. gpu = dimension_set['gpu']
  832. if gpu != 'none':
  833. gpu = gpu.split('-')[0].split(':')
  834. substitutions['gpu_vendor_id'] = gpu[0]
  835. substitutions['gpu_device_id'] = gpu[1]
  836. return [string.Template(arg).safe_substitute(substitutions) for arg in args]
  837. def generate_gpu_telemetry_test(self, waterfall, tester_name, tester_config,
  838. test_name, test_config, is_android_webview,
  839. is_cast_streaming):
  840. # These are all just specializations of isolated script tests with
  841. # a bunch of boilerplate command line arguments added.
  842. # The step name must end in 'test' or 'tests' in order for the
  843. # results to automatically show up on the flakiness dashboard.
  844. # (At least, this was true some time ago.) Continue to use this
  845. # naming convention for the time being to minimize changes.
  846. step_name = test_config.get('name', test_name)
  847. variant_id = test_config.get('variant_id')
  848. if variant_id:
  849. step_name = self.remove_variant_from_test_name(step_name, variant_id)
  850. if not (step_name.endswith('test') or step_name.endswith('tests')):
  851. step_name = '%s_tests' % step_name
  852. if variant_id:
  853. step_name = self.add_variant_to_test_name(step_name, variant_id)
  854. if 'name' in test_config:
  855. test_config['name'] = step_name
  856. result = self.generate_isolated_script_test(
  857. waterfall, tester_name, tester_config, step_name, test_config)
  858. if not result:
  859. return None
  860. result['isolate_name'] = test_config.get(
  861. 'isolate_name',
  862. self.get_default_isolate_name(tester_config, is_android_webview))
  863. # Populate test_id_prefix.
  864. gn_entry = self.gn_isolate_map[result['isolate_name']]
  865. result['test_id_prefix'] = 'ninja:%s/' % gn_entry['label']
  866. args = result.get('args', [])
  867. test_to_run = result.pop('telemetry_test_name', test_name)
  868. # TODO(skbug.com/12149): Remove this once Gold-based tests no longer clobber
  869. # earlier results on retry attempts.
  870. is_gold_based_test = False
  871. for a in args:
  872. if '--git-revision' in a:
  873. is_gold_based_test = True
  874. break
  875. if is_gold_based_test:
  876. for a in args:
  877. if '--test-filter' in a or '--isolated-script-test-filter' in a:
  878. raise RuntimeError(
  879. '--test-filter/--isolated-script-test-filter are currently not '
  880. 'supported for Gold-based GPU tests. See skbug.com/12100 and '
  881. 'skbug.com/12149 for more details.')
  882. # These tests upload and download results from cloud storage and therefore
  883. # aren't idempotent yet. https://crbug.com/549140.
  884. result['swarming']['idempotent'] = False
  885. # The GPU tests act much like integration tests for the entire browser, and
  886. # tend to uncover flakiness bugs more readily than other test suites. In
  887. # order to surface any flakiness more readily to the developer of the CL
  888. # which is introducing it, we disable retries with patch on the commit
  889. # queue.
  890. result['should_retry_with_patch'] = False
  891. browser = ''
  892. if is_cast_streaming:
  893. browser = 'cast-streaming-shell'
  894. elif is_android_webview:
  895. browser = 'android-webview-instrumentation'
  896. else:
  897. browser = tester_config['browser_config']
  898. # Most platforms require --enable-logging=stderr to get useful browser logs.
  899. # However, this actively messes with logging on CrOS (because Chrome's
  900. # stderr goes nowhere on CrOS) AND --log-level=0 is required for some reason
  901. # in order to see JavaScript console messages. See
  902. # https://chromium.googlesource.com/chromium/src.git/+/HEAD/docs/chrome_os_logging.md
  903. logging_arg = '--log-level=0' if self.is_chromeos(
  904. tester_config) else '--enable-logging=stderr'
  905. args = [
  906. test_to_run,
  907. '--show-stdout',
  908. '--browser=%s' % browser,
  909. # --passthrough displays more of the logging in Telemetry when
  910. # run via typ, in particular some of the warnings about tests
  911. # being expected to fail, but passing.
  912. '--passthrough',
  913. '-v',
  914. '--extra-browser-args=%s --js-flags=--expose-gc' % logging_arg,
  915. ] + args
  916. result['args'] = self.maybe_fixup_args_array(self.substitute_gpu_args(
  917. tester_config, result['swarming'], args))
  918. return result
  919. def get_default_isolate_name(self, tester_config, is_android_webview):
  920. if self.is_android(tester_config):
  921. if is_android_webview:
  922. return 'telemetry_gpu_integration_test_android_webview'
  923. return (
  924. 'telemetry_gpu_integration_test' +
  925. BROWSER_CONFIG_TO_TARGET_SUFFIX_MAP[tester_config['browser_config']])
  926. if self.is_fuchsia(tester_config):
  927. return 'telemetry_gpu_integration_test_fuchsia'
  928. return 'telemetry_gpu_integration_test'
  929. def get_test_generator_map(self):
  930. return {
  931. 'android_webview_gpu_telemetry_tests':
  932. GPUTelemetryTestGenerator(self, is_android_webview=True),
  933. 'cast_streaming_tests':
  934. GPUTelemetryTestGenerator(self, is_cast_streaming=True),
  935. 'gpu_telemetry_tests':
  936. GPUTelemetryTestGenerator(self),
  937. 'gtest_tests':
  938. GTestGenerator(self),
  939. 'isolated_scripts':
  940. IsolatedScriptTestGenerator(self),
  941. 'junit_tests':
  942. JUnitGenerator(self),
  943. 'scripts':
  944. ScriptGenerator(self),
  945. 'skylab_tests':
  946. SkylabGenerator(self),
  947. }
  948. def get_test_type_remapper(self):
  949. return {
  950. # These are a specialization of isolated_scripts with a bunch of
  951. # boilerplate command line arguments added to each one.
  952. 'android_webview_gpu_telemetry_tests': 'isolated_scripts',
  953. 'cast_streaming_tests': 'isolated_scripts',
  954. 'gpu_telemetry_tests': 'isolated_scripts',
  955. }
  956. def check_composition_type_test_suites(self, test_type,
  957. additional_validators=None):
  958. """Pre-pass to catch errors reliabily for compound/matrix suites"""
  959. validators = [check_compound_references,
  960. check_basic_references,
  961. check_conflicting_definitions]
  962. if additional_validators:
  963. validators += additional_validators
  964. target_suites = self.test_suites.get(test_type, {})
  965. other_test_type = ('compound_suites'
  966. if test_type == 'matrix_compound_suites'
  967. else 'matrix_compound_suites')
  968. other_suites = self.test_suites.get(other_test_type, {})
  969. basic_suites = self.test_suites.get('basic_suites', {})
  970. for suite, suite_def in target_suites.items():
  971. if suite in basic_suites:
  972. raise BBGenErr('%s names may not duplicate basic test suite names '
  973. '(error found while processsing %s)'
  974. % (test_type, suite))
  975. seen_tests = {}
  976. for sub_suite in suite_def:
  977. for validator in validators:
  978. validator(
  979. basic_suites=basic_suites,
  980. other_test_suites=other_suites,
  981. seen_tests=seen_tests,
  982. sub_suite=sub_suite,
  983. suite=suite,
  984. suite_def=suite_def,
  985. target_test_suites=target_suites,
  986. test_type=test_type,
  987. all_variants=self.variants
  988. )
  989. def flatten_test_suites(self):
  990. new_test_suites = {}
  991. test_types = ['basic_suites', 'compound_suites', 'matrix_compound_suites']
  992. for category in test_types:
  993. for name, value in self.test_suites.get(category, {}).items():
  994. new_test_suites[name] = value
  995. self.test_suites = new_test_suites
  996. def resolve_test_id_prefixes(self):
  997. for suite in self.test_suites['basic_suites'].values():
  998. for key, test in suite.items():
  999. assert isinstance(test, dict)
  1000. # This assumes the recipe logic which prefers 'test' to 'isolate_name'
  1001. # https://source.chromium.org/chromium/chromium/tools/build/+/main:scripts/slave/recipe_modules/chromium_tests/generators.py;l=89;drc=14c062ba0eb418d3c4623dde41a753241b9df06b
  1002. # TODO(crbug.com/1035124): clean this up.
  1003. isolate_name = test.get('test') or test.get('isolate_name') or key
  1004. gn_entry = self.gn_isolate_map.get(isolate_name)
  1005. if gn_entry:
  1006. label = gn_entry['label']
  1007. if label.count(':') != 1:
  1008. raise BBGenErr(
  1009. 'Malformed GN label "%s" in gn_isolate_map for key "%s",'
  1010. ' implicit names (like //f/b meaning //f/b:b) are disallowed.' %
  1011. (label, isolate_name))
  1012. if label.split(':')[1] != isolate_name:
  1013. raise BBGenErr(
  1014. 'gn_isolate_map key name "%s" doesn\'t match GN target name in'
  1015. ' label "%s" see http://crbug.com/1071091 for details.' %
  1016. (isolate_name, label))
  1017. test['test_id_prefix'] = 'ninja:%s/' % label
  1018. else: # pragma: no cover
  1019. # Some tests do not have an entry gn_isolate_map.pyl, such as
  1020. # telemetry tests.
  1021. # TODO(crbug.com/1035304): require an entry in gn_isolate_map.
  1022. pass
  1023. def resolve_composition_test_suites(self):
  1024. self.check_composition_type_test_suites('compound_suites')
  1025. compound_suites = self.test_suites.get('compound_suites', {})
  1026. # check_composition_type_test_suites() checks that all basic suites
  1027. # referenced by compound suites exist.
  1028. basic_suites = self.test_suites.get('basic_suites')
  1029. for name, value in compound_suites.items():
  1030. # Resolve this to a dictionary.
  1031. full_suite = {}
  1032. for entry in value:
  1033. suite = basic_suites[entry]
  1034. full_suite.update(suite)
  1035. compound_suites[name] = full_suite
  1036. def resolve_variants(self, basic_test_definition, variants, mixins):
  1037. """ Merge variant-defined configurations to each test case definition in a
  1038. test suite.
  1039. The output maps a unique test name to an array of configurations because
  1040. there may exist more than one definition for a test name using variants. The
  1041. test name is referenced while mapping machines to test suites, so unpacking
  1042. the array is done by the generators.
  1043. Args:
  1044. basic_test_definition: a {} defined test suite in the format
  1045. test_name:test_config
  1046. variants: an [] of {} defining configurations to be applied to each test
  1047. case in the basic test_definition
  1048. Return:
  1049. a {} of test_name:[{}], where each {} is a merged configuration
  1050. """
  1051. # Each test in a basic test suite will have a definition per variant.
  1052. test_suite = {}
  1053. for test_name, test_config in basic_test_definition.items():
  1054. definitions = []
  1055. for variant in variants:
  1056. # Unpack the variant from variants.pyl if it's string based.
  1057. if isinstance(variant, str):
  1058. variant = self.variants[variant]
  1059. # If 'enabled' is set to False, we will not use this variant;
  1060. # otherwise if the variant doesn't include 'enabled' variable or
  1061. # 'enabled' is set to True, we will use this variant
  1062. if not variant.get('enabled', True):
  1063. continue
  1064. # Clone a copy of test_config so that we can have a uniquely updated
  1065. # version of it per variant
  1066. cloned_config = copy.deepcopy(test_config)
  1067. # The variant definition needs to be re-used for each test, so we'll
  1068. # create a clone and work with it as well.
  1069. cloned_variant = copy.deepcopy(variant)
  1070. cloned_config['args'] = (cloned_config.get('args', []) +
  1071. cloned_variant.get('args', []))
  1072. cloned_config['mixins'] = (cloned_config.get('mixins', []) +
  1073. cloned_variant.get('mixins', []) + mixins)
  1074. basic_swarming_def = cloned_config.get('swarming', {})
  1075. variant_swarming_def = cloned_variant.get('swarming', {})
  1076. if basic_swarming_def and variant_swarming_def:
  1077. if ('dimension_sets' in basic_swarming_def and
  1078. 'dimension_sets' in variant_swarming_def):
  1079. # Retain swarming dimension set merge behavior when both variant and
  1080. # the basic test configuration both define it
  1081. self.dictionary_merge(basic_swarming_def, variant_swarming_def)
  1082. # Remove dimension_sets from the variant definition, so that it does
  1083. # not replace what's been done by dictionary_merge in the update
  1084. # call below.
  1085. del variant_swarming_def['dimension_sets']
  1086. # Update the swarming definition with whatever is defined for swarming
  1087. # by the variant.
  1088. basic_swarming_def.update(variant_swarming_def)
  1089. cloned_config['swarming'] = basic_swarming_def
  1090. # Copy all skylab fields defined by the variant.
  1091. skylab_config = cloned_variant.get('skylab')
  1092. if skylab_config:
  1093. for k, v in skylab_config.items():
  1094. # cros_chrome_version is the ash chrome version in the cros img
  1095. # in the variant of cros_board. We don't want to include it in
  1096. # the final json files; so remove it.
  1097. if k == 'cros_chrome_version':
  1098. continue
  1099. cloned_config[k] = v
  1100. # The identifier is used to make the name of the test unique.
  1101. # Generators in the recipe uniquely identify a test by it's name, so we
  1102. # don't want to have the same name for each variant.
  1103. cloned_config['name'] = self.add_variant_to_test_name(
  1104. cloned_config.get('name') or test_name,
  1105. cloned_variant['identifier'])
  1106. # Attach the variant identifier to the test config so downstream
  1107. # generators can make modifications based on the original name. This
  1108. # is mainly used in generate_gpu_telemetry_test().
  1109. cloned_config['variant_id'] = cloned_variant['identifier']
  1110. definitions.append(cloned_config)
  1111. test_suite[test_name] = definitions
  1112. return test_suite
  1113. def resolve_matrix_compound_test_suites(self):
  1114. self.check_composition_type_test_suites('matrix_compound_suites',
  1115. [check_matrix_identifier])
  1116. matrix_compound_suites = self.test_suites.get('matrix_compound_suites', {})
  1117. # check_composition_type_test_suites() checks that all basic suites are
  1118. # referenced by matrix suites exist.
  1119. basic_suites = self.test_suites.get('basic_suites')
  1120. for test_name, matrix_config in matrix_compound_suites.items():
  1121. full_suite = {}
  1122. for test_suite, mtx_test_suite_config in matrix_config.items():
  1123. basic_test_def = copy.deepcopy(basic_suites[test_suite])
  1124. if 'variants' in mtx_test_suite_config:
  1125. mixins = mtx_test_suite_config.get('mixins', [])
  1126. result = self.resolve_variants(basic_test_def,
  1127. mtx_test_suite_config['variants'],
  1128. mixins)
  1129. full_suite.update(result)
  1130. else:
  1131. suite = basic_suites[test_suite]
  1132. full_suite.update(suite)
  1133. matrix_compound_suites[test_name] = full_suite
  1134. def link_waterfalls_to_test_suites(self):
  1135. for waterfall in self.waterfalls:
  1136. for tester_name, tester in waterfall['machines'].items():
  1137. for suite, value in tester.get('test_suites', {}).items():
  1138. if not value in self.test_suites:
  1139. # Hard / impossible to cover this in the unit test.
  1140. raise self.unknown_test_suite(
  1141. value, tester_name, waterfall['name']) # pragma: no cover
  1142. tester['test_suites'][suite] = self.test_suites[value]
  1143. def load_configuration_files(self):
  1144. self.waterfalls = self.load_pyl_file('waterfalls.pyl')
  1145. self.test_suites = self.load_pyl_file('test_suites.pyl')
  1146. self.exceptions = self.load_pyl_file('test_suite_exceptions.pyl')
  1147. self.mixins = self.load_pyl_file('mixins.pyl')
  1148. self.gn_isolate_map = self.load_pyl_file('gn_isolate_map.pyl')
  1149. for isolate_map in self.args.isolate_map_files:
  1150. isolate_map = self.load_pyl_file(isolate_map)
  1151. duplicates = set(isolate_map).intersection(self.gn_isolate_map)
  1152. if duplicates:
  1153. raise BBGenErr('Duplicate targets in isolate map files: %s.' %
  1154. ', '.join(duplicates))
  1155. self.gn_isolate_map.update(isolate_map)
  1156. self.variants = self.load_pyl_file('variants.pyl')
  1157. def resolve_configuration_files(self):
  1158. self.resolve_test_id_prefixes()
  1159. self.resolve_composition_test_suites()
  1160. self.resolve_matrix_compound_test_suites()
  1161. self.flatten_test_suites()
  1162. self.link_waterfalls_to_test_suites()
  1163. def unknown_bot(self, bot_name, waterfall_name):
  1164. return BBGenErr(
  1165. 'Unknown bot name "%s" on waterfall "%s"' % (bot_name, waterfall_name))
  1166. def unknown_test_suite(self, suite_name, bot_name, waterfall_name):
  1167. return BBGenErr(
  1168. 'Test suite %s from machine %s on waterfall %s not present in '
  1169. 'test_suites.pyl' % (suite_name, bot_name, waterfall_name))
  1170. def unknown_test_suite_type(self, suite_type, bot_name, waterfall_name):
  1171. return BBGenErr(
  1172. 'Unknown test suite type ' + suite_type + ' in bot ' + bot_name +
  1173. ' on waterfall ' + waterfall_name)
  1174. def apply_all_mixins(self, test, waterfall, builder_name, builder):
  1175. """Applies all present swarming mixins to the test for a given builder.
  1176. Checks in the waterfall, builder, and test objects for mixins.
  1177. """
  1178. def valid_mixin(mixin_name):
  1179. """Asserts that the mixin is valid."""
  1180. if mixin_name not in self.mixins:
  1181. raise BBGenErr("bad mixin %s" % mixin_name)
  1182. def must_be_list(mixins, typ, name):
  1183. """Asserts that given mixins are a list."""
  1184. if not isinstance(mixins, list):
  1185. raise BBGenErr("'%s' in %s '%s' must be a list" % (mixins, typ, name))
  1186. test_name = test.get('name')
  1187. remove_mixins = set()
  1188. if 'remove_mixins' in builder:
  1189. must_be_list(builder['remove_mixins'], 'builder', builder_name)
  1190. for rm in builder['remove_mixins']:
  1191. valid_mixin(rm)
  1192. remove_mixins.add(rm)
  1193. if 'remove_mixins' in test:
  1194. must_be_list(test['remove_mixins'], 'test', test_name)
  1195. for rm in test['remove_mixins']:
  1196. valid_mixin(rm)
  1197. remove_mixins.add(rm)
  1198. del test['remove_mixins']
  1199. if 'mixins' in waterfall:
  1200. must_be_list(waterfall['mixins'], 'waterfall', waterfall['name'])
  1201. for mixin in waterfall['mixins']:
  1202. if mixin in remove_mixins:
  1203. continue
  1204. valid_mixin(mixin)
  1205. test = self.apply_mixin(self.mixins[mixin], test, builder)
  1206. if 'mixins' in builder:
  1207. must_be_list(builder['mixins'], 'builder', builder_name)
  1208. for mixin in builder['mixins']:
  1209. if mixin in remove_mixins:
  1210. continue
  1211. valid_mixin(mixin)
  1212. test = self.apply_mixin(self.mixins[mixin], test, builder)
  1213. if not 'mixins' in test:
  1214. return test
  1215. if not test_name:
  1216. test_name = test.get('test')
  1217. if not test_name: # pragma: no cover
  1218. # Not the best name, but we should say something.
  1219. test_name = str(test)
  1220. must_be_list(test['mixins'], 'test', test_name)
  1221. for mixin in test['mixins']:
  1222. # We don't bother checking if the given mixin is in remove_mixins here
  1223. # since this is already the lowest level, so if a mixin is added here that
  1224. # we don't want, we can just delete its entry.
  1225. valid_mixin(mixin)
  1226. test = self.apply_mixin(self.mixins[mixin], test, builder)
  1227. del test['mixins']
  1228. return test
  1229. def apply_mixin(self, mixin, test, builder):
  1230. """Applies a mixin to a test.
  1231. Mixins will not override an existing key. This is to ensure exceptions can
  1232. override a setting a mixin applies.
  1233. Swarming dimensions are handled in a special way. Instead of specifying
  1234. 'dimension_sets', which is how normal test suites specify their dimensions,
  1235. you specify a 'dimensions' key, which maps to a dictionary. This dictionary
  1236. is then applied to every dimension set in the test.
  1237. """
  1238. new_test = copy.deepcopy(test)
  1239. mixin = copy.deepcopy(mixin)
  1240. if 'swarming' in mixin:
  1241. swarming_mixin = mixin['swarming']
  1242. new_test.setdefault('swarming', {})
  1243. # Copy over any explicit dimension sets first so that they will be updated
  1244. # by any subsequent 'dimensions' entries.
  1245. if 'dimension_sets' in swarming_mixin:
  1246. existing_dimension_sets = new_test['swarming'].setdefault(
  1247. 'dimension_sets', [])
  1248. # Appending to the existing list could potentially result in different
  1249. # behavior depending on the order the mixins were applied, but that's
  1250. # already the case for other parts of mixins, so trust that the user
  1251. # will verify that the generated output is correct before submitting.
  1252. for dimension_set in swarming_mixin['dimension_sets']:
  1253. if dimension_set not in existing_dimension_sets:
  1254. existing_dimension_sets.append(dimension_set)
  1255. del swarming_mixin['dimension_sets']
  1256. if 'dimensions' in swarming_mixin:
  1257. new_test['swarming'].setdefault('dimension_sets', [{}])
  1258. for dimension_set in new_test['swarming']['dimension_sets']:
  1259. dimension_set.update(swarming_mixin['dimensions'])
  1260. del swarming_mixin['dimensions']
  1261. # python dict update doesn't do recursion at all. Just hard code the
  1262. # nested update we need (mixin['swarming'] shouldn't clobber
  1263. # test['swarming'], but should update it).
  1264. new_test['swarming'].update(swarming_mixin)
  1265. del mixin['swarming']
  1266. if '$mixin_append' in mixin:
  1267. # Values specified under $mixin_append should be appended to existing
  1268. # lists, rather than replacing them.
  1269. mixin_append = mixin['$mixin_append']
  1270. del mixin['$mixin_append']
  1271. # Append swarming named cache and delete swarming key, since it's under
  1272. # another layer of dict.
  1273. if 'named_caches' in mixin_append.get('swarming', {}):
  1274. new_test['swarming'].setdefault('named_caches', [])
  1275. new_test['swarming']['named_caches'].extend(
  1276. mixin_append['swarming']['named_caches'])
  1277. if len(mixin_append['swarming']) > 1:
  1278. raise BBGenErr('Only named_caches is supported under swarming key in '
  1279. '$mixin_append, but there are: %s' %
  1280. sorted(mixin_append['swarming'].keys()))
  1281. del mixin_append['swarming']
  1282. for key in mixin_append:
  1283. new_test.setdefault(key, [])
  1284. if not isinstance(mixin_append[key], list):
  1285. raise BBGenErr(
  1286. 'Key "' + key + '" in $mixin_append must be a list.')
  1287. if not isinstance(new_test[key], list):
  1288. raise BBGenErr(
  1289. 'Cannot apply $mixin_append to non-list "' + key + '".')
  1290. new_test[key].extend(mixin_append[key])
  1291. args = new_test.get('args', [])
  1292. # Array so we can assign to it in a nested scope.
  1293. args_need_fixup = [False]
  1294. if 'args' in mixin_append:
  1295. args_need_fixup[0] = True
  1296. def add_conditional_args(key, fn):
  1297. val = new_test.pop(key, [])
  1298. if val and fn(builder):
  1299. args.extend(val)
  1300. args_need_fixup[0] = True
  1301. add_conditional_args('desktop_args', lambda cfg: not self.is_android(cfg))
  1302. add_conditional_args('lacros_args', self.is_lacros)
  1303. add_conditional_args('linux_args', self.is_linux)
  1304. add_conditional_args('android_args', self.is_android)
  1305. add_conditional_args('chromeos_args', self.is_chromeos)
  1306. add_conditional_args('mac_args', self.is_mac)
  1307. add_conditional_args('win_args', self.is_win)
  1308. add_conditional_args('win64_args', self.is_win64)
  1309. if args_need_fixup[0]:
  1310. new_test['args'] = self.maybe_fixup_args_array(args)
  1311. new_test.update(mixin)
  1312. return new_test
  1313. def generate_output_tests(self, waterfall):
  1314. """Generates the tests for a waterfall.
  1315. Args:
  1316. waterfall: a dictionary parsed from a master pyl file
  1317. Returns:
  1318. A dictionary mapping builders to test specs
  1319. """
  1320. return {
  1321. name: self.get_tests_for_config(waterfall, name, config)
  1322. for name, config in waterfall['machines'].items()
  1323. }
  1324. def get_tests_for_config(self, waterfall, name, config):
  1325. generator_map = self.get_test_generator_map()
  1326. test_type_remapper = self.get_test_type_remapper()
  1327. tests = {}
  1328. # Copy only well-understood entries in the machine's configuration
  1329. # verbatim into the generated JSON.
  1330. if 'additional_compile_targets' in config:
  1331. tests['additional_compile_targets'] = config[
  1332. 'additional_compile_targets']
  1333. for test_type, input_tests in config.get('test_suites', {}).items():
  1334. if test_type not in generator_map:
  1335. raise self.unknown_test_suite_type(
  1336. test_type, name, waterfall['name']) # pragma: no cover
  1337. test_generator = generator_map[test_type]
  1338. # Let multiple kinds of generators generate the same kinds
  1339. # of tests. For example, gpu_telemetry_tests are a
  1340. # specialization of isolated_scripts.
  1341. new_tests = test_generator.generate(
  1342. waterfall, name, config, input_tests)
  1343. remapped_test_type = test_type_remapper.get(test_type, test_type)
  1344. tests[remapped_test_type] = test_generator.sort(
  1345. tests.get(remapped_test_type, []) + new_tests)
  1346. return tests
  1347. def jsonify(self, all_tests):
  1348. return json.dumps(
  1349. all_tests, indent=2, separators=(',', ': '),
  1350. sort_keys=True) + '\n'
  1351. def generate_outputs(self): # pragma: no cover
  1352. self.load_configuration_files()
  1353. self.resolve_configuration_files()
  1354. filters = self.args.waterfall_filters
  1355. result = collections.defaultdict(dict)
  1356. required_fields = ('name',)
  1357. for waterfall in self.waterfalls:
  1358. for field in required_fields:
  1359. # Verify required fields
  1360. if field not in waterfall:
  1361. raise BBGenErr("Waterfall %s has no %s" % (waterfall['name'], field))
  1362. # Handle filter flag, if specified
  1363. if filters and waterfall['name'] not in filters:
  1364. continue
  1365. # Join config files and hardcoded values together
  1366. all_tests = self.generate_output_tests(waterfall)
  1367. result[waterfall['name']] = all_tests
  1368. # Add do not edit warning
  1369. for tests in result.values():
  1370. tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}
  1371. tests['AAAAA2 See generate_buildbot_json.py to make changes'] = {}
  1372. return result
  1373. def write_json_result(self, result): # pragma: no cover
  1374. suffix = '.json'
  1375. if self.args.new_files:
  1376. suffix = '.new' + suffix
  1377. for filename, contents in result.items():
  1378. jsonstr = self.jsonify(contents)
  1379. self.write_file(self.pyl_file_path(filename + suffix), jsonstr)
  1380. def get_valid_bot_names(self):
  1381. # Extract bot names from infra/config/generated/luci/luci-milo.cfg.
  1382. # NOTE: This reference can cause issues; if a file changes there, the
  1383. # presubmit here won't be run by default. A manually maintained list there
  1384. # tries to run presubmit here when luci-milo.cfg is changed. If any other
  1385. # references to configs outside of this directory are added, please change
  1386. # their presubmit to run `generate_buildbot_json.py -c`, so that the tree
  1387. # never ends up in an invalid state.
  1388. # Get the generated project.pyl so we can check if we should be enforcing
  1389. # that the specs are for builders that actually exist
  1390. # If not, return None to indicate that we won't enforce that builders in
  1391. # waterfalls.pyl are defined in LUCI
  1392. project_pyl_path = os.path.join(self.args.infra_config_dir, 'generated',
  1393. 'project.pyl')
  1394. if os.path.exists(project_pyl_path):
  1395. settings = ast.literal_eval(self.read_file(project_pyl_path))
  1396. if not settings.get('validate_source_side_specs_have_builder', True):
  1397. return None
  1398. bot_names = set()
  1399. milo_configs = glob.glob(
  1400. os.path.join(self.args.infra_config_dir, 'generated', 'luci',
  1401. 'luci-milo*.cfg'))
  1402. for c in milo_configs:
  1403. for l in self.read_file(c).splitlines():
  1404. if (not 'name: "buildbucket/luci.chromium.' in l and
  1405. not 'name: "buildbucket/luci.chrome.' in l):
  1406. continue
  1407. # l looks like
  1408. # `name: "buildbucket/luci.chromium.try/win_chromium_dbg_ng"`
  1409. # Extract win_chromium_dbg_ng part.
  1410. bot_names.add(l[l.rindex('/') + 1:l.rindex('"')])
  1411. return bot_names
  1412. def get_builders_that_do_not_actually_exist(self):
  1413. # Some of the bots on the chromium.fyi waterfall in particular
  1414. # are defined only to be mirrored into trybots, and don't actually
  1415. # exist on any of the waterfalls or consoles.
  1416. return [
  1417. # chromium.fyi
  1418. 'linux-blink-optional-highdpi-rel-dummy',
  1419. 'mac10.13-blink-rel-dummy',
  1420. 'mac10.14-blink-rel-dummy',
  1421. 'mac10.15-blink-rel-dummy',
  1422. 'mac11.0-blink-rel-dummy',
  1423. 'mac11.0.arm64-blink-rel-dummy',
  1424. ]
  1425. def get_internal_waterfalls(self):
  1426. # Similar to get_builders_that_do_not_actually_exist above, but for
  1427. # waterfalls defined in internal configs.
  1428. return [
  1429. 'chrome', 'chrome.pgo', 'internal.chrome.fyi', 'internal.chromeos.fyi',
  1430. 'internal.soda'
  1431. ]
  1432. def check_input_file_consistency(self, verbose=False):
  1433. self.check_input_files_sorting(verbose)
  1434. self.load_configuration_files()
  1435. self.check_composition_type_test_suites('compound_suites')
  1436. self.check_composition_type_test_suites('matrix_compound_suites',
  1437. [check_matrix_identifier])
  1438. self.resolve_test_id_prefixes()
  1439. self.flatten_test_suites()
  1440. # All bots should exist.
  1441. bot_names = self.get_valid_bot_names()
  1442. builders_that_dont_exist = self.get_builders_that_do_not_actually_exist()
  1443. if bot_names is not None:
  1444. internal_waterfalls = self.get_internal_waterfalls()
  1445. for waterfall in self.waterfalls:
  1446. # TODO(crbug.com/991417): Remove the need for this exception.
  1447. if waterfall['name'] in internal_waterfalls:
  1448. continue # pragma: no cover
  1449. for bot_name in waterfall['machines']:
  1450. if bot_name in builders_that_dont_exist:
  1451. continue # pragma: no cover
  1452. if bot_name not in bot_names:
  1453. if waterfall['name'] in [
  1454. 'client.v8.chromium', 'client.v8.fyi', 'tryserver.v8'
  1455. ]:
  1456. # TODO(thakis): Remove this once these bots move to luci.
  1457. continue # pragma: no cover
  1458. if waterfall['name'] in ['tryserver.webrtc',
  1459. 'webrtc.chromium.fyi.experimental']:
  1460. # These waterfalls have their bot configs in a different repo.
  1461. # so we don't know about their bot names.
  1462. continue # pragma: no cover
  1463. if waterfall['name'] in ['client.devtools-frontend.integration',
  1464. 'tryserver.devtools-frontend',
  1465. 'chromium.devtools-frontend']:
  1466. continue # pragma: no cover
  1467. if waterfall['name'] in ['client.openscreen.chromium']:
  1468. continue # pragma: no cover
  1469. raise self.unknown_bot(bot_name, waterfall['name'])
  1470. # All test suites must be referenced.
  1471. suites_seen = set()
  1472. generator_map = self.get_test_generator_map()
  1473. for waterfall in self.waterfalls:
  1474. for bot_name, tester in waterfall['machines'].items():
  1475. for suite_type, suite in tester.get('test_suites', {}).items():
  1476. if suite_type not in generator_map:
  1477. raise self.unknown_test_suite_type(suite_type, bot_name,
  1478. waterfall['name'])
  1479. if suite not in self.test_suites:
  1480. raise self.unknown_test_suite(suite, bot_name, waterfall['name'])
  1481. suites_seen.add(suite)
  1482. # Since we didn't resolve the configuration files, this set
  1483. # includes both composition test suites and regular ones.
  1484. resolved_suites = set()
  1485. for suite_name in suites_seen:
  1486. suite = self.test_suites[suite_name]
  1487. for sub_suite in suite:
  1488. resolved_suites.add(sub_suite)
  1489. resolved_suites.add(suite_name)
  1490. # At this point, every key in test_suites.pyl should be referenced.
  1491. missing_suites = set(self.test_suites.keys()) - resolved_suites
  1492. if missing_suites:
  1493. raise BBGenErr('The following test suites were unreferenced by bots on '
  1494. 'the waterfalls: ' + str(missing_suites))
  1495. # All test suite exceptions must refer to bots on the waterfall.
  1496. all_bots = set()
  1497. missing_bots = set()
  1498. for waterfall in self.waterfalls:
  1499. for bot_name, tester in waterfall['machines'].items():
  1500. all_bots.add(bot_name)
  1501. # In order to disambiguate between bots with the same name on
  1502. # different waterfalls, support has been added to various
  1503. # exceptions for concatenating the waterfall name after the bot
  1504. # name.
  1505. all_bots.add(bot_name + ' ' + waterfall['name'])
  1506. for exception in self.exceptions.values():
  1507. removals = (exception.get('remove_from', []) +
  1508. exception.get('remove_gtest_from', []) +
  1509. list(exception.get('modifications', {}).keys()))
  1510. for removal in removals:
  1511. if removal not in all_bots:
  1512. missing_bots.add(removal)
  1513. missing_bots = missing_bots - set(builders_that_dont_exist)
  1514. if missing_bots:
  1515. raise BBGenErr('The following nonexistent machines were referenced in '
  1516. 'the test suite exceptions: ' + str(missing_bots))
  1517. # All mixins must be referenced
  1518. seen_mixins = set()
  1519. for waterfall in self.waterfalls:
  1520. seen_mixins = seen_mixins.union(waterfall.get('mixins', set()))
  1521. for bot_name, tester in waterfall['machines'].items():
  1522. seen_mixins = seen_mixins.union(tester.get('mixins', set()))
  1523. for suite in self.test_suites.values():
  1524. if isinstance(suite, list):
  1525. # Don't care about this, it's a composition, which shouldn't include a
  1526. # swarming mixin.
  1527. continue
  1528. for test in suite.values():
  1529. assert isinstance(test, dict)
  1530. seen_mixins = seen_mixins.union(test.get('mixins', set()))
  1531. for variant in self.variants:
  1532. # Unpack the variant from variants.pyl if it's string based.
  1533. if isinstance(variant, str):
  1534. variant = self.variants[variant]
  1535. seen_mixins = seen_mixins.union(variant.get('mixins', set()))
  1536. missing_mixins = set(self.mixins.keys()) - seen_mixins
  1537. if missing_mixins:
  1538. raise BBGenErr('The following mixins are unreferenced: %s. They must be'
  1539. ' referenced in a waterfall, machine, or test suite.' % (
  1540. str(missing_mixins)))
  1541. # All variant references must be referenced
  1542. seen_variants = set()
  1543. for suite in self.test_suites.values():
  1544. if isinstance(suite, list):
  1545. continue
  1546. for test in suite.values():
  1547. if isinstance(test, dict):
  1548. for variant in test.get('variants', []):
  1549. if isinstance(variant, str):
  1550. seen_variants.add(variant)
  1551. missing_variants = set(self.variants.keys()) - seen_variants
  1552. if missing_variants:
  1553. raise BBGenErr('The following variants were unreferenced: %s. They must '
  1554. 'be referenced in a matrix test suite under the variants '
  1555. 'key.' % str(missing_variants))
  1556. def type_assert(self, node, typ, filename, verbose=False):
  1557. """Asserts that the Python AST node |node| is of type |typ|.
  1558. If verbose is set, it prints out some helpful context lines, showing where
  1559. exactly the error occurred in the file.
  1560. """
  1561. if not isinstance(node, typ):
  1562. if verbose:
  1563. lines = [""] + self.read_file(filename).splitlines()
  1564. context = 2
  1565. lines_start = max(node.lineno - context, 0)
  1566. # Add one to include the last line
  1567. lines_end = min(node.lineno + context, len(lines)) + 1
  1568. lines = (
  1569. ['== %s ==\n' % filename] +
  1570. ["<snip>\n"] +
  1571. ['%d %s' % (lines_start + i, line) for i, line in enumerate(
  1572. lines[lines_start:lines_start + context])] +
  1573. ['-' * 80 + '\n'] +
  1574. ['%d %s' % (node.lineno, lines[node.lineno])] +
  1575. ['-' * (node.col_offset + 3) + '^' + '-' * (
  1576. 80 - node.col_offset - 4) + '\n'] +
  1577. ['%d %s' % (node.lineno + 1 + i, line) for i, line in enumerate(
  1578. lines[node.lineno + 1:lines_end])] +
  1579. ["<snip>\n"]
  1580. )
  1581. # Print out a useful message when a type assertion fails.
  1582. for l in lines:
  1583. self.print_line(l.strip())
  1584. node_dumped = ast.dump(node, annotate_fields=False)
  1585. # If the node is huge, truncate it so everything fits in a terminal
  1586. # window.
  1587. if len(node_dumped) > 60: # pragma: no cover
  1588. node_dumped = node_dumped[:30] + ' <SNIP> ' + node_dumped[-30:]
  1589. raise BBGenErr(
  1590. 'Invalid .pyl file %r. Python AST node %r on line %s expected to'
  1591. ' be %s, is %s' % (
  1592. filename, node_dumped,
  1593. node.lineno, typ, type(node)))
  1594. def check_ast_list_formatted(self, keys, filename, verbose,
  1595. check_sorting=True):
  1596. """Checks if a list of ast keys are correctly formatted.
  1597. Currently only checks to ensure they're correctly sorted, and that there
  1598. are no duplicates.
  1599. Args:
  1600. keys: An python list of AST nodes.
  1601. It's a list of AST nodes instead of a list of strings because
  1602. when verbose is set, it tries to print out context of where the
  1603. diffs are in the file.
  1604. filename: The name of the file this node is from.
  1605. verbose: If set, print out diff information about how the keys are
  1606. incorrectly formatted.
  1607. check_sorting: If true, checks if the list is sorted.
  1608. Returns:
  1609. If the keys are correctly formatted.
  1610. """
  1611. if not keys:
  1612. return True
  1613. assert isinstance(keys[0], ast.Str)
  1614. keys_strs = [k.s for k in keys]
  1615. # Keys to diff against. Used below.
  1616. keys_to_diff_against = None
  1617. # If the list is properly formatted.
  1618. list_formatted = True
  1619. # Duplicates are always bad.
  1620. if len(set(keys_strs)) != len(keys_strs):
  1621. list_formatted = False
  1622. keys_to_diff_against = list(collections.OrderedDict.fromkeys(keys_strs))
  1623. if check_sorting and sorted(keys_strs) != keys_strs:
  1624. list_formatted = False
  1625. if list_formatted:
  1626. return True
  1627. if verbose:
  1628. line_num = keys[0].lineno
  1629. keys = [k.s for k in keys]
  1630. if check_sorting:
  1631. # If we have duplicates, sorting this will take care of it anyways.
  1632. keys_to_diff_against = sorted(set(keys))
  1633. # else, keys_to_diff_against is set above already
  1634. self.print_line('=' * 80)
  1635. self.print_line('(First line of keys is %s)' % line_num)
  1636. for line in difflib.context_diff(
  1637. keys, keys_to_diff_against,
  1638. fromfile='current (%r)' % filename, tofile='sorted', lineterm=''):
  1639. self.print_line(line)
  1640. self.print_line('=' * 80)
  1641. return False
  1642. def check_ast_dict_formatted(self, node, filename, verbose):
  1643. """Checks if an ast dictionary's keys are correctly formatted.
  1644. Just a simple wrapper around check_ast_list_formatted.
  1645. Args:
  1646. node: An AST node. Assumed to be a dictionary.
  1647. filename: The name of the file this node is from.
  1648. verbose: If set, print out diff information about how the keys are
  1649. incorrectly formatted.
  1650. check_sorting: If true, checks if the list is sorted.
  1651. Returns:
  1652. If the dictionary is correctly formatted.
  1653. """
  1654. keys = []
  1655. # The keys of this dict are ordered as ordered in the file; normal python
  1656. # dictionary keys are given an arbitrary order, but since we parsed the
  1657. # file itself, the order as given in the file is preserved.
  1658. for key in node.keys:
  1659. self.type_assert(key, ast.Str, filename, verbose)
  1660. keys.append(key)
  1661. return self.check_ast_list_formatted(keys, filename, verbose)
  1662. def check_input_files_sorting(self, verbose=False):
  1663. # TODO(https://crbug.com/886993): Add the ability for this script to
  1664. # actually format the files, rather than just complain if they're
  1665. # incorrectly formatted.
  1666. bad_files = set()
  1667. def parse_file(filename):
  1668. """Parses and validates a .pyl file.
  1669. Returns an AST node representing the value in the pyl file."""
  1670. parsed = ast.parse(self.read_file(self.pyl_file_path(filename)))
  1671. # Must be a module.
  1672. self.type_assert(parsed, ast.Module, filename, verbose)
  1673. module = parsed.body
  1674. # Only one expression in the module.
  1675. self.type_assert(module, list, filename, verbose)
  1676. if len(module) != 1: # pragma: no cover
  1677. raise BBGenErr('Invalid .pyl file %s' % filename)
  1678. expr = module[0]
  1679. self.type_assert(expr, ast.Expr, filename, verbose)
  1680. return expr.value
  1681. # Handle this separately
  1682. filename = 'waterfalls.pyl'
  1683. value = parse_file(filename)
  1684. # Value should be a list.
  1685. self.type_assert(value, ast.List, filename, verbose)
  1686. keys = []
  1687. for elm in value.elts:
  1688. self.type_assert(elm, ast.Dict, filename, verbose)
  1689. waterfall_name = None
  1690. for key, val in zip(elm.keys, elm.values):
  1691. self.type_assert(key, ast.Str, filename, verbose)
  1692. if key.s == 'machines':
  1693. if not self.check_ast_dict_formatted(val, filename, verbose):
  1694. bad_files.add(filename)
  1695. if key.s == "name":
  1696. self.type_assert(val, ast.Str, filename, verbose)
  1697. waterfall_name = val
  1698. assert waterfall_name
  1699. keys.append(waterfall_name)
  1700. if not self.check_ast_list_formatted(keys, filename, verbose):
  1701. bad_files.add(filename)
  1702. for filename in (
  1703. 'mixins.pyl',
  1704. 'test_suites.pyl',
  1705. 'test_suite_exceptions.pyl',
  1706. ):
  1707. value = parse_file(filename)
  1708. # Value should be a dictionary.
  1709. self.type_assert(value, ast.Dict, filename, verbose)
  1710. if not self.check_ast_dict_formatted(
  1711. value, filename, verbose):
  1712. bad_files.add(filename)
  1713. if filename == 'test_suites.pyl':
  1714. expected_keys = ['basic_suites',
  1715. 'compound_suites',
  1716. 'matrix_compound_suites']
  1717. actual_keys = [node.s for node in value.keys]
  1718. assert all(key in expected_keys for key in actual_keys), (
  1719. 'Invalid %r file; expected keys %r, got %r' % (
  1720. filename, expected_keys, actual_keys))
  1721. suite_dicts = list(value.values)
  1722. # Only two keys should mean only 1 or 2 values
  1723. assert len(suite_dicts) <= 3
  1724. for suite_group in suite_dicts:
  1725. if not self.check_ast_dict_formatted(
  1726. suite_group, filename, verbose):
  1727. bad_files.add(filename)
  1728. for key, suite in zip(value.keys, value.values):
  1729. # The compound suites are checked in
  1730. # 'check_composition_type_test_suites()'
  1731. if key.s == 'basic_suites':
  1732. for group in suite.values:
  1733. if not self.check_ast_dict_formatted(group, filename, verbose):
  1734. bad_files.add(filename)
  1735. break
  1736. elif filename == 'test_suite_exceptions.pyl':
  1737. # Check the values for each test.
  1738. for test in value.values:
  1739. for kind, node in zip(test.keys, test.values):
  1740. if isinstance(node, ast.Dict):
  1741. if not self.check_ast_dict_formatted(node, filename, verbose):
  1742. bad_files.add(filename)
  1743. elif kind.s == 'remove_from':
  1744. # Don't care about sorting; these are usually grouped, since the
  1745. # same bug can affect multiple builders. Do want to make sure
  1746. # there aren't duplicates.
  1747. if not self.check_ast_list_formatted(node.elts, filename, verbose,
  1748. check_sorting=False):
  1749. bad_files.add(filename)
  1750. if bad_files:
  1751. raise BBGenErr(
  1752. 'The following files have invalid keys: %s\n. They are either '
  1753. 'unsorted, or have duplicates. Re-run this with --verbose to see '
  1754. 'more details.' % ', '.join(bad_files))
  1755. def check_output_file_consistency(self, verbose=False):
  1756. self.load_configuration_files()
  1757. # All waterfalls/bucket .json files must have been written
  1758. # by this script already.
  1759. self.resolve_configuration_files()
  1760. ungenerated_files = set()
  1761. outputs = self.generate_outputs()
  1762. for filename, expected_contents in outputs.items():
  1763. expected = self.jsonify(expected_contents)
  1764. file_path = filename + '.json'
  1765. current = self.read_file(self.pyl_file_path(file_path))
  1766. if expected != current:
  1767. ungenerated_files.add(filename)
  1768. if verbose: # pragma: no cover
  1769. self.print_line('File ' + filename +
  1770. '.json did not have the following expected '
  1771. 'contents:')
  1772. for line in difflib.unified_diff(
  1773. expected.splitlines(),
  1774. current.splitlines(),
  1775. fromfile='expected', tofile='current'):
  1776. self.print_line(line)
  1777. if ungenerated_files:
  1778. raise BBGenErr(
  1779. 'The following files have not been properly '
  1780. 'autogenerated by generate_buildbot_json.py: ' +
  1781. ', '.join([filename + '.json' for filename in ungenerated_files]))
  1782. for builder_group, builders in outputs.items():
  1783. for builder, step_types in builders.items():
  1784. for step_data in step_types.get('gtest_tests', []):
  1785. step_name = step_data.get('name', step_data['test'])
  1786. self._check_swarming_config(builder_group, builder, step_name,
  1787. step_data)
  1788. for step_data in step_types.get('isolated_scripts', []):
  1789. step_name = step_data.get('name', step_data['isolate_name'])
  1790. self._check_swarming_config(builder_group, builder, step_name,
  1791. step_data)
  1792. def _check_swarming_config(self, filename, builder, step_name, step_data):
  1793. # TODO(crbug.com/1203436): Ensure all swarming tests specify os and cpu, not
  1794. # just mac tests.
  1795. if ('mac' in builder.lower()
  1796. and step_data['swarming']['can_use_on_swarming_builders']):
  1797. dimension_sets = step_data['swarming'].get('dimension_sets')
  1798. if not dimension_sets:
  1799. raise BBGenErr('%s: %s / %s : os and cpu must be specified for mac '
  1800. 'swarmed tests' % (filename, builder, step_name))
  1801. for s in dimension_sets:
  1802. if not s.get('os') or not s.get('cpu'):
  1803. raise BBGenErr('%s: %s / %s : os and cpu must be specified for mac '
  1804. 'swarmed tests' % (filename, builder, step_name))
  1805. def check_consistency(self, verbose=False):
  1806. self.check_input_file_consistency(verbose) # pragma: no cover
  1807. self.check_output_file_consistency(verbose) # pragma: no cover
  1808. def does_test_match(self, test_info, params_dict):
  1809. """Checks to see if the test matches the parameters given.
  1810. Compares the provided test_info with the params_dict to see
  1811. if the bot matches the parameters given. If so, returns True.
  1812. Else, returns false.
  1813. Args:
  1814. test_info (dict): Information about a specific bot provided
  1815. in the format shown in waterfalls.pyl
  1816. params_dict (dict): Dictionary of parameters and their values
  1817. to look for in the bot
  1818. Ex: {
  1819. 'device_os':'android',
  1820. '--flag':True,
  1821. 'mixins': ['mixin1', 'mixin2'],
  1822. 'ex_key':'ex_value'
  1823. }
  1824. """
  1825. DIMENSION_PARAMS = ['device_os', 'device_type', 'os',
  1826. 'kvm', 'pool', 'integrity'] # dimension parameters
  1827. SWARMING_PARAMS = ['shards', 'hard_timeout', 'idempotent',
  1828. 'can_use_on_swarming_builders']
  1829. for param in params_dict:
  1830. # if dimension parameter
  1831. if param in DIMENSION_PARAMS or param in SWARMING_PARAMS:
  1832. if not 'swarming' in test_info:
  1833. return False
  1834. swarming = test_info['swarming']
  1835. if param in SWARMING_PARAMS:
  1836. if not param in swarming:
  1837. return False
  1838. if not str(swarming[param]) == params_dict[param]:
  1839. return False
  1840. else:
  1841. if not 'dimension_sets' in swarming:
  1842. return False
  1843. d_set = swarming['dimension_sets']
  1844. # only looking at the first dimension set
  1845. if not param in d_set[0]:
  1846. return False
  1847. if not d_set[0][param] == params_dict[param]:
  1848. return False
  1849. # if flag
  1850. elif param.startswith('--'):
  1851. if not 'args' in test_info:
  1852. return False
  1853. if not param in test_info['args']:
  1854. return False
  1855. # not dimension parameter/flag/mixin
  1856. else:
  1857. if not param in test_info:
  1858. return False
  1859. if not test_info[param] == params_dict[param]:
  1860. return False
  1861. return True
  1862. def error_msg(self, msg):
  1863. """Prints an error message.
  1864. In addition to a catered error message, also prints
  1865. out where the user can find more help. Then, program exits.
  1866. """
  1867. self.print_line(msg + (' If you need more information, ' +
  1868. 'please run with -h or --help to see valid commands.'))
  1869. sys.exit(1)
  1870. def find_bots_that_run_test(self, test, bots):
  1871. matching_bots = []
  1872. for bot in bots:
  1873. bot_info = bots[bot]
  1874. tests = self.flatten_tests_for_bot(bot_info)
  1875. for test_info in tests:
  1876. test_name = ""
  1877. if 'name' in test_info:
  1878. test_name = test_info['name']
  1879. elif 'test' in test_info:
  1880. test_name = test_info['test']
  1881. if not test_name == test:
  1882. continue
  1883. matching_bots.append(bot)
  1884. return matching_bots
  1885. def find_tests_with_params(self, tests, params_dict):
  1886. matching_tests = []
  1887. for test_name in tests:
  1888. test_info = tests[test_name]
  1889. if not self.does_test_match(test_info, params_dict):
  1890. continue
  1891. if not test_name in matching_tests:
  1892. matching_tests.append(test_name)
  1893. return matching_tests
  1894. def flatten_waterfalls_for_query(self, waterfalls):
  1895. bots = {}
  1896. for waterfall in waterfalls:
  1897. waterfall_tests = self.generate_output_tests(waterfall)
  1898. for bot in waterfall_tests:
  1899. bot_info = waterfall_tests[bot]
  1900. bots[bot] = bot_info
  1901. return bots
  1902. def flatten_tests_for_bot(self, bot_info):
  1903. """Returns a list of flattened tests.
  1904. Returns a list of tests not grouped by test category
  1905. for a specific bot.
  1906. """
  1907. TEST_CATS = self.get_test_generator_map().keys()
  1908. tests = []
  1909. for test_cat in TEST_CATS:
  1910. if not test_cat in bot_info:
  1911. continue
  1912. test_cat_tests = bot_info[test_cat]
  1913. tests = tests + test_cat_tests
  1914. return tests
  1915. def flatten_tests_for_query(self, test_suites):
  1916. """Returns a flattened dictionary of tests.
  1917. Returns a dictionary of tests associate with their
  1918. configuration, not grouped by their test suite.
  1919. """
  1920. tests = {}
  1921. for test_suite in test_suites.values():
  1922. for test in test_suite:
  1923. test_info = test_suite[test]
  1924. test_name = test
  1925. if 'name' in test_info:
  1926. test_name = test_info['name']
  1927. tests[test_name] = test_info
  1928. return tests
  1929. def parse_query_filter_params(self, params):
  1930. """Parses the filter parameters.
  1931. Creates a dictionary from the parameters provided
  1932. to filter the bot array.
  1933. """
  1934. params_dict = {}
  1935. for p in params:
  1936. # flag
  1937. if p.startswith("--"):
  1938. params_dict[p] = True
  1939. else:
  1940. pair = p.split(":")
  1941. if len(pair) != 2:
  1942. self.error_msg('Invalid command.')
  1943. # regular parameters
  1944. if pair[1].lower() == "true":
  1945. params_dict[pair[0]] = True
  1946. elif pair[1].lower() == "false":
  1947. params_dict[pair[0]] = False
  1948. else:
  1949. params_dict[pair[0]] = pair[1]
  1950. return params_dict
  1951. def get_test_suites_dict(self, bots):
  1952. """Returns a dictionary of bots and their tests.
  1953. Returns a dictionary of bots and a list of their associated tests.
  1954. """
  1955. test_suite_dict = dict()
  1956. for bot in bots:
  1957. bot_info = bots[bot]
  1958. tests = self.flatten_tests_for_bot(bot_info)
  1959. test_suite_dict[bot] = tests
  1960. return test_suite_dict
  1961. def output_query_result(self, result, json_file=None):
  1962. """Outputs the result of the query.
  1963. If a json file parameter name is provided, then
  1964. the result is output into the json file. If not,
  1965. then the result is printed to the console.
  1966. """
  1967. output = json.dumps(result, indent=2)
  1968. if json_file:
  1969. self.write_file(json_file, output)
  1970. else:
  1971. self.print_line(output)
  1972. # pylint: disable=inconsistent-return-statements
  1973. def query(self, args):
  1974. """Queries tests or bots.
  1975. Depending on the arguments provided, outputs a json of
  1976. tests or bots matching the appropriate optional parameters provided.
  1977. """
  1978. # split up query statement
  1979. query = args.query.split('/')
  1980. self.load_configuration_files()
  1981. self.resolve_configuration_files()
  1982. # flatten bots json
  1983. tests = self.test_suites
  1984. bots = self.flatten_waterfalls_for_query(self.waterfalls)
  1985. cmd_class = query[0]
  1986. # For queries starting with 'bots'
  1987. if cmd_class == "bots":
  1988. if len(query) == 1:
  1989. return self.output_query_result(bots, args.json)
  1990. # query with specific parameters
  1991. if len(query) == 2:
  1992. if query[1] == 'tests':
  1993. test_suites_dict = self.get_test_suites_dict(bots)
  1994. return self.output_query_result(test_suites_dict, args.json)
  1995. self.error_msg("This query should be in the format: bots/tests.")
  1996. else:
  1997. self.error_msg("This query should have 0 or 1 '/', found %s instead."
  1998. % str(len(query)-1))
  1999. # For queries starting with 'bot'
  2000. elif cmd_class == "bot":
  2001. if not len(query) == 2 and not len(query) == 3:
  2002. self.error_msg("Command should have 1 or 2 '/', found %s instead."
  2003. % str(len(query)-1))
  2004. bot_id = query[1]
  2005. if not bot_id in bots:
  2006. self.error_msg("No bot named '" + bot_id + "' found.")
  2007. bot_info = bots[bot_id]
  2008. if len(query) == 2:
  2009. return self.output_query_result(bot_info, args.json)
  2010. if not query[2] == 'tests':
  2011. self.error_msg("The query should be in the format:" +
  2012. "bot/<bot-name>/tests.")
  2013. bot_tests = self.flatten_tests_for_bot(bot_info)
  2014. return self.output_query_result(bot_tests, args.json)
  2015. # For queries starting with 'tests'
  2016. elif cmd_class == "tests":
  2017. if not len(query) == 1 and not len(query) == 2:
  2018. self.error_msg("The query should have 0 or 1 '/', found %s instead."
  2019. % str(len(query)-1))
  2020. flattened_tests = self.flatten_tests_for_query(tests)
  2021. if len(query) == 1:
  2022. return self.output_query_result(flattened_tests, args.json)
  2023. # create params dict
  2024. params = query[1].split('&')
  2025. params_dict = self.parse_query_filter_params(params)
  2026. matching_bots = self.find_tests_with_params(flattened_tests, params_dict)
  2027. return self.output_query_result(matching_bots)
  2028. # For queries starting with 'test'
  2029. elif cmd_class == "test":
  2030. if not len(query) == 2 and not len(query) == 3:
  2031. self.error_msg("The query should have 1 or 2 '/', found %s instead."
  2032. % str(len(query)-1))
  2033. test_id = query[1]
  2034. if len(query) == 2:
  2035. flattened_tests = self.flatten_tests_for_query(tests)
  2036. for test in flattened_tests:
  2037. if test == test_id:
  2038. return self.output_query_result(flattened_tests[test], args.json)
  2039. self.error_msg("There is no test named %s." % test_id)
  2040. if not query[2] == 'bots':
  2041. self.error_msg("The query should be in the format: " +
  2042. "test/<test-name>/bots")
  2043. bots_for_test = self.find_bots_that_run_test(test_id, bots)
  2044. return self.output_query_result(bots_for_test)
  2045. else:
  2046. self.error_msg("Your command did not match any valid commands." +
  2047. "Try starting with 'bots', 'bot', 'tests', or 'test'.")
  2048. # pylint: enable=inconsistent-return-statements
  2049. def main(self): # pragma: no cover
  2050. if self.args.check:
  2051. self.check_consistency(verbose=self.args.verbose)
  2052. elif self.args.query:
  2053. self.query(self.args)
  2054. else:
  2055. self.write_json_result(self.generate_outputs())
  2056. return 0
  2057. if __name__ == "__main__": # pragma: no cover
  2058. generator = BBJSONGenerator(BBJSONGenerator.parse_args(sys.argv[1:]))
  2059. sys.exit(generator.main())