123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505 |
- # Copyright (C) 2010 Google Inc. All rights reserved.
- #
- # Redistribution and use in source and binary forms, with or without
- # modification, are permitted provided that the following conditions are
- # met:
- #
- # * Redistributions of source code must retain the above copyright
- # notice, this list of conditions and the following disclaimer.
- # * Redistributions in binary form must reproduce the above
- # copyright notice, this list of conditions and the following disclaimer
- # in the documentation and/or other materials provided with the
- # distribution.
- # * Neither the Google name nor the names of its
- # contributors may be used to endorse or promote products derived from
- # this software without specific prior written permission.
- #
- # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- """Abstract base class for Port classes.
- The Port classes encapsulate Port-specific (platform-specific) behavior
- in the web test infrastructure.
- """
- import time
- import collections
- import json
- import logging
- import optparse
- import re
- import sys
- import tempfile
- from collections import defaultdict
- import six
- from six.moves import zip_longest
- from urllib.parse import urljoin
- from blinkpy.common import exit_codes
- from blinkpy.common import find_files
- from blinkpy.common import read_checksum_from_png
- from blinkpy.common import path_finder
- from blinkpy.common.memoized import memoized
- from blinkpy.common.system.executive import ScriptError
- from blinkpy.common.system.path import abspath_to_uri
- from blinkpy.w3c.wpt_manifest import WPTManifest, MANIFEST_NAME
- from blinkpy.web_tests.layout_package.bot_test_expectations import BotTestExpectationsFactory
- from blinkpy.web_tests.models.test_configuration import TestConfiguration
- from blinkpy.web_tests.models.test_run_results import TestRunException
- from blinkpy.web_tests.models.typ_types import (
- TestExpectations,
- ResultType,
- SerializableTypHost,
- )
- from blinkpy.web_tests.port import driver
- from blinkpy.web_tests.port import server_process
- from blinkpy.web_tests.port.factory import PortFactory
- from blinkpy.web_tests.servers import apache_http
- from blinkpy.web_tests.servers import pywebsocket
- from blinkpy.web_tests.servers import wptserve
- from blinkpy.web_tests.skia_gold import blink_skia_gold_properties as sgp
- from blinkpy.web_tests.skia_gold import blink_skia_gold_session_manager as sgsm
- _log = logging.getLogger(__name__)
- # Path relative to the build directory.
- CONTENT_SHELL_FONTS_DIR = "test_fonts"
- FONT_FILES = [
- [[CONTENT_SHELL_FONTS_DIR], 'Ahem.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Arimo-Bold.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Arimo-BoldItalic.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Arimo-Italic.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Arimo-Regular.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Cousine-Bold.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Cousine-BoldItalic.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Cousine-Italic.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Cousine-Regular.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'DejaVuSans.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'GardinerModBug.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'GardinerModCat.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Garuda.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Gelasio-Bold.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Gelasio-BoldItalic.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Gelasio-Italic.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Gelasio-Regular.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Lohit-Devanagari.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Lohit-Gurmukhi.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Lohit-Tamil.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'MuktiNarrow.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'NotoColorEmoji.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'NotoSansCJK-VF.otf.ttc', None],
- [[CONTENT_SHELL_FONTS_DIR], 'NotoSansKhmer-Regular.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'NotoSansSymbols2-Regular.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'NotoSansTibetan-Regular.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Tinos-Bold.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Tinos-BoldItalic.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Tinos-Italic.ttf', None],
- [[CONTENT_SHELL_FONTS_DIR], 'Tinos-Regular.ttf', None],
- ]
- # This is the fingerprint of wpt's certificate found in
- # blinkpy/third_party/wpt/certs. The following line is updated by
- # update_cert.py.
- WPT_FINGERPRINT = 'Nxvaj3+bY3oVrTc+Jp7m3E3sB1n3lXtnMDCyBsqEXiY='
- # One for 127.0.0.1.sxg.pem
- SXG_FINGERPRINT = '55qC1nKu2A88ESbFmk5sTPQS/ScG+8DD7P+2bgFA9iM='
- # And one for external/wpt/signed-exchange/resources/127.0.0.1.sxg.pem
- SXG_WPT_FINGERPRINT = '0Rt4mT6SJXojEMHTnKnlJ/hBKMBcI4kteBlhR1eTTdk='
- # A convervative rule for names that are valid for file or directory names.
- VALID_FILE_NAME_REGEX = re.compile(r'^[\w\-=]+$')
- # This sub directory will be inside the results directory and it will
- # contain all the disc artifacts created by web tests
- ARTIFACTS_SUB_DIR = 'layout-test-results'
- class Port(object):
- """Abstract class for Port-specific hooks for the web_test package."""
- # Subclasses override this. This should indicate the basic implementation
- # part of the port name, e.g., 'mac', 'win', 'gtk'; there is one unique
- # value per class.
- # FIXME: Rename this to avoid confusion with the "full port name".
- port_name = None
- # Test paths use forward slash as separator on all platforms.
- TEST_PATH_SEPARATOR = '/'
- ALL_BUILD_TYPES = ('debug', 'release')
- CONTENT_SHELL_NAME = 'content_shell'
- # Update the first line in third_party/blink/web_tests/TestExpectations and
- # the documentation in docs/testing/web_test_expectations.md when this list
- # changes.
- ALL_SYSTEMS = (
- ('mac10.13', 'x86'),
- ('mac10.14', 'x86'),
- ('mac10.15', 'x86'),
- ('mac11', 'x86'),
- ('mac11-arm64', 'arm64'),
- ('mac12', 'x86_64'),
- ('mac12-arm64', 'arm64'),
- ('win10.20h2', 'x86'),
- ('win11', 'x64'),
- ('trusty', 'x86_64'),
- ('fuchsia', 'x86_64'),
- )
- CONFIGURATION_SPECIFIER_MACROS = {
- 'mac': [
- 'mac10.13', 'mac10.14', 'mac10.15', 'mac11', 'mac11-arm64',
- 'mac12', 'mac12-arm64'
- ],
- 'win': ['win10.20h2', 'win11'],
- 'linux': ['trusty'],
- 'fuchsia': ['fuchsia'],
- }
- # List of ports open on the host that the tests will connect to. When tests
- # run on a separate machine (Android and Fuchsia) these ports need to be
- # forwarded back to the host.
- # 8000, 8080 and 8443 are for http/https tests;
- # 8880 is for websocket tests (see apache_http.py and pywebsocket.py).
- # 8001, 8081, 8444, and 8445 are for http/https WPT;
- # 9001 and 9444 are for websocket WPT (see wptserve.py).
- SERVER_PORTS = [8000, 8001, 8080, 8081, 8443, 8444, 8445, 8880, 9001, 9444]
- FALLBACK_PATHS = {}
- SUPPORTED_VERSIONS = []
- # URL to the build requirements page.
- BUILD_REQUIREMENTS_URL = ''
- # The suffixes of baseline files (not extensions).
- BASELINE_SUFFIX = '-expected'
- BASELINE_MISMATCH_SUFFIX = '-expected-mismatch'
- # All of the non-reftest baseline extensions we use.
- BASELINE_EXTENSIONS = ('.wav', '.txt', '.png')
- FLAG_EXPECTATIONS_PREFIX = 'FlagExpectations'
- # The following is used for concetenating WebDriver test names.
- WEBDRIVER_SUBTEST_SEPARATOR = '>>'
- # The following is used for concetenating WebDriver test names in pytest format.
- WEBDRIVER_SUBTEST_PYTEST_SEPARATOR = '::'
- # The following two constants must match. When adding a new WPT root, also
- # remember to add an alias rule to //third_party/wpt_tools/wpt.config.json.
- # WPT_DIRS maps WPT roots on the file system to URL prefixes on wptserve.
- # The order matters: '/' MUST be the last URL prefix.
- WPT_DIRS = collections.OrderedDict([
- ('wpt_internal', '/wpt_internal/'),
- ('external/wpt', '/'),
- ])
- # WPT_REGEX captures: 1. the root directory of WPT relative to web_tests
- # (without a trailing slash), 2. the path of the test within WPT (without a
- # leading slash).
- WPT_REGEX = re.compile(
- r'^(?:virtual/[^/]+/)?(external/wpt|wpt_internal)/(.*)$')
- # This regex parses the WPT-style style fuzzy match syntax. For actual WPT
- # tests, this is not needed since this information is contained in the
- # manifest. However, we reuse this syntax for some non-WPT tests as well.
- WPT_FUZZY_REGEX = re.compile(
- r'<(?:html:)?meta\s+name=(?:fuzzy|"fuzzy")\s+content='
- r'"(?:(.+):)?(?:maxDifference=)?(?:(\d+)-)?(\d+);(?:totalPixels=)?(?:(\d+)-)?(\d+)"\s*/?>'
- )
- # Because this is an abstract base class, arguments to functions may be
- # unused in this class - pylint: disable=unused-argument
- @classmethod
- def latest_platform_fallback_path(cls):
- return cls.FALLBACK_PATHS[cls.SUPPORTED_VERSIONS[-1]]
- @classmethod
- def determine_full_port_name(cls, host, options, port_name):
- """Return a fully-specified port name that can be used to construct objects."""
- # Subclasses will usually override this.
- assert port_name.startswith(cls.port_name)
- return port_name
- def __init__(self, host, port_name, options=None, **kwargs):
- # This value is the "full port name", and may be different from
- # cls.port_name by having version modifiers appended to it.
- self._name = port_name
- # These are default values that should be overridden in a subclasses.
- self._version = ''
- self._architecture = 'x86'
- # FIXME: Ideally we'd have a package-wide way to get a well-formed
- # options object that had all of the necessary options defined on it.
- self._options = options or optparse.Values()
- self.host = host
- self._executive = host.executive
- self._filesystem = host.filesystem
- self._path_finder = path_finder.PathFinder(host.filesystem)
- self._http_server = None
- self._websocket_server = None
- self._wpt_server = None
- self._image_differ = None
- self.server_process_constructor = server_process.ServerProcess # This can be overridden for testing.
- self._http_lock = None # FIXME: Why does this live on the port object?
- self._dump_reader = None
- if not hasattr(options, 'configuration') or not options.configuration:
- self.set_option_default('configuration',
- self.default_configuration())
- if not hasattr(options, 'target') or not options.target:
- self.set_option_default('target', self._options.configuration)
- if not hasattr(options, 'no_virtual_tests'):
- self.set_option_default('no_virtual_tests', False)
- self._test_configuration = None
- self._results_directory = None
- self._virtual_test_suites = None
- self._used_expectation_files = None
- self._skia_gold_temp_dir = None
- self._skia_gold_session_manager = None
- self._skia_gold_properties = None
- def __del__(self):
- if self._skia_gold_temp_dir:
- self._filesystem.rmtree(self._skia_gold_temp_dir,
- ignore_errors=True)
- def __str__(self):
- return 'Port{name=%s, version=%s, architecture=%s, test_configuration=%s}' % (
- self._name, self._version, self._architecture,
- self._test_configuration)
- def get_platform_tags(self):
- """Returns system condition tags that are used to find active expectations
- for a test run on a specific system"""
- return frozenset([
- self._options.configuration.lower(), self._version, self.port_name,
- self._architecture
- ])
- @memoized
- def flag_specific_config_name(self):
- """Returns the name of the flag-specific configuration if it's specified in
- --flag-specific option, or None. The name must be defined in
- FlagSpecificConfig or an AssertionError will be raised.
- """
- config_name = self.get_option('flag_specific')
- if config_name:
- configs = self._flag_specific_configs()
- assert config_name in configs, '{} is not defined in FlagSpecificConfig'.format(
- config_name)
- return config_name
- return None
- @memoized
- def _flag_specific_configs(self):
- """Reads configuration from FlagSpecificConfig and returns a dictionary from name to args."""
- config_file = self._filesystem.join(self.web_tests_dir(),
- 'FlagSpecificConfig')
- if not self._filesystem.exists(config_file):
- return {}
- try:
- json_configs = json.loads(
- self._filesystem.read_text_file(config_file))
- except ValueError as error:
- raise ValueError('{} is not a valid JSON file: {}'.format(
- config_file, error))
- configs = {}
- for config in json_configs:
- name = config['name']
- args = config['args']
- if not VALID_FILE_NAME_REGEX.match(name):
- raise ValueError(
- '{}: name "{}" contains invalid characters'.format(
- config_file, name))
- if name in configs:
- raise ValueError('{} contains duplicated name {}.'.format(
- config_file, name))
- if args in configs.values():
- raise ValueError(
- '{}: name "{}" has the same args as another entry.'.format(
- config_file, name))
- configs[name] = args
- return configs
- def _specified_additional_driver_flags(self):
- """Returns the list of additional driver flags specified by the user in
- the following ways, concatenated:
- 1. Flags in web_tests/additional-driver-flag.setting.
- 2. flags expanded from --flag-specific=<name> based on flag-specific config.
- 3. Zero or more flags passed by --additional-driver-flag.
- """
- flags = []
- flag_file = self._filesystem.join(self.web_tests_dir(),
- 'additional-driver-flag.setting')
- if self._filesystem.exists(flag_file):
- flags = self._filesystem.read_text_file(flag_file).split()
- flag_specific_option = self.flag_specific_config_name()
- if flag_specific_option:
- flags += self._flag_specific_configs()[flag_specific_option]
- flags += self.get_option('additional_driver_flag', [])
- return flags
- def additional_driver_flags(self):
- flags = self._specified_additional_driver_flags()
- if self.driver_name() == self.CONTENT_SHELL_NAME:
- flags += [
- '--run-web-tests',
- '--ignore-certificate-errors-spki-list=' + WPT_FINGERPRINT +
- ',' + SXG_FINGERPRINT + ',' + SXG_WPT_FINGERPRINT,
- # Required for WebTransport tests.
- '--origin-to-force-quic-on=web-platform.test:11000',
- '--user-data-dir'
- ]
- if self.get_option('nocheck_sys_deps', False):
- flags.append('--disable-system-font-check')
- # If we're already repeating the tests more than once, then we're not
- # particularly concerned with speed. Resetting the shell between tests
- # increases test run time by 2-5X, but provides more consistent results
- # [less state leaks between tests].
- if (self.get_option('reset_shell_between_tests')
- or (self.get_option('repeat_each')
- and self.get_option('repeat_each') > 1)
- or (self.get_option('iterations')
- and self.get_option('iterations') > 1)):
- flags += ['--reset-shell-between-tests']
- return flags
- def supports_per_test_timeout(self):
- return False
- def default_smoke_test_only(self):
- return False
- def _default_timeout_ms(self):
- return 6000
- def timeout_ms(self):
- timeout_ms = self._default_timeout_ms()
- if self.get_option('configuration') == 'Debug':
- # Debug is about 5x slower than Release.
- return 5 * timeout_ms
- if self._build_has_dcheck_always_on():
- # Release with DCHECK is also slower than pure Release.
- return 2 * timeout_ms
- return timeout_ms
- def skia_gold_temp_dir(self):
- return self._skia_gold_temp_dir
- def skia_gold_properties(self):
- if not self._skia_gold_properties:
- self._skia_gold_properties = sgp.BlinkSkiaGoldProperties(
- self._options)
- return self._skia_gold_properties
- def skia_gold_session_manager(self):
- if not self._skia_gold_session_manager:
- self._skia_gold_temp_dir = self._filesystem.mkdtemp()
- self._skia_gold_session_manager = sgsm.BlinkSkiaGoldSessionManager(
- str(self._skia_gold_temp_dir), self.skia_gold_properties())
- return self._skia_gold_session_manager
- def skia_gold_json_keys(self):
- return {
- 'configuration': self._options.configuration.lower(),
- 'version': self._version,
- 'port': self.port_name,
- 'architecture': self._architecture,
- 'ignore': '1',
- }
- @memoized
- def _build_has_dcheck_always_on(self):
- args_gn_file = self._build_path('args.gn')
- if not self._filesystem.exists(args_gn_file):
- _log.error('Unable to find %s', args_gn_file)
- return False
- contents = self._filesystem.read_text_file(args_gn_file)
- return bool(
- re.search(r'^\s*dcheck_always_on\s*=\s*true\s*(#.*)?$', contents,
- re.MULTILINE))
- def driver_stop_timeout(self):
- """Returns the amount of time in seconds to wait before killing the process in driver.stop()."""
- # We want to wait for at least 3 seconds, but if we are really slow, we
- # want to be slow on cleanup as well (for things like ASAN, Valgrind, etc.)
- return (3.0 * float(self.get_option('timeout_ms', '0')) /
- self._default_timeout_ms())
- def default_batch_size(self):
- """Returns the default batch size to use for this port."""
- if self.get_option('enable_sanitizer'):
- # ASAN/MSAN/TSAN use more memory than regular content_shell. Their
- # memory usage may also grow over time, up to a certain point.
- # Relaunching the driver periodically helps keep it under control.
- return 40
- # The default batch size now is 100, to battle against resource leak.
- return 100
- def default_child_processes(self):
- """Returns the number of child processes to use for this port."""
- return self._executive.cpu_count()
- def default_max_locked_shards(self):
- """Returns the number of "locked" shards to run in parallel (like the http tests)."""
- max_locked_shards = int(self.default_child_processes()) // 4
- if not max_locked_shards:
- return 1
- return max_locked_shards
- def baseline_version_dir(self):
- """Returns the absolute path to the platform-and-version-specific results."""
- baseline_search_paths = self.baseline_search_path()
- return baseline_search_paths[0]
- def baseline_flag_specific_dir(self):
- """If --flag-specific is specified, returns the absolute path to the flag-specific
- platform-independent results. Otherwise returns None."""
- config_name = self.flag_specific_config_name()
- if not config_name:
- return None
- return self._filesystem.join(self.web_tests_dir(), 'flag-specific',
- config_name)
- def baseline_search_path(self):
- return (self.get_option('additional_platform_directory', []) +
- self._flag_specific_baseline_search_path() +
- self._compare_baseline() +
- list(self.default_baseline_search_path()))
- def default_baseline_search_path(self):
- """Returns a list of absolute paths to directories to search under for baselines.
- The directories are searched in order.
- """
- return map(self._absolute_baseline_path,
- self.FALLBACK_PATHS[self.version()])
- @memoized
- def _compare_baseline(self):
- factory = PortFactory(self.host)
- target_port = self.get_option('compare_port')
- if target_port:
- return factory.get(target_port).default_baseline_search_path()
- return []
- def _check_file_exists(self,
- path_to_file,
- file_description,
- override_step=None,
- more_logging=True):
- """Verifies that the file is present where expected, or logs an error.
- Args:
- file_name: The (human friendly) name or description of the file
- you're looking for (e.g., "HTTP Server"). Used for error logging.
- override_step: An optional string to be logged if the check fails.
- more_logging: Whether or not to log the error messages.
- Returns:
- True if the file exists, else False.
- """
- if not self._filesystem.exists(path_to_file):
- if more_logging:
- _log.error('Unable to find %s', file_description)
- _log.error(' at %s', path_to_file)
- if override_step:
- _log.error(' %s', override_step)
- _log.error('')
- return False
- return True
- def check_build(self, needs_http, printer):
- if not self._check_file_exists(self._path_to_driver(), 'test driver'):
- return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
- if not self._check_driver_build_up_to_date(
- self.get_option('configuration')):
- return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
- if not self._check_file_exists(self._path_to_image_diff(),
- 'image_diff'):
- return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
- if self._dump_reader and not self._dump_reader.check_is_functional():
- return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
- if needs_http and not self.check_httpd():
- return exit_codes.UNEXPECTED_ERROR_EXIT_STATUS
- return exit_codes.OK_EXIT_STATUS
- def check_sys_deps(self):
- """Checks whether the system is properly configured.
- Most checks happen during invocation of the driver prior to running
- tests. This can be overridden to run custom checks.
- Returns:
- An exit status code.
- """
- return exit_codes.OK_EXIT_STATUS
- def check_httpd(self):
- httpd_path = self.path_to_apache()
- if httpd_path:
- try:
- env = self.setup_environ_for_server()
- if self._executive.run_command(
- [httpd_path, '-v'], env=env, return_exit_code=True) != 0:
- _log.error('httpd seems broken. Cannot run http tests.')
- return False
- return True
- except OSError as e:
- _log.error('while trying to run: ' + httpd_path)
- _log.error('httpd launch error: ' + repr(e))
- _log.error('No httpd found. Cannot run http tests.')
- return False
- def do_text_results_differ(self, expected_text, actual_text):
- return expected_text != actual_text
- def do_audio_results_differ(self, expected_audio, actual_audio):
- return expected_audio != actual_audio
- def diff_image(self,
- expected_contents,
- actual_contents,
- max_channel_diff=None,
- max_pixels_diff=None):
- """Compares two images and returns an (image diff, error string) pair.
- If an error occurs (like image_diff isn't found, or crashes), we log an
- error and return True (for a diff).
- """
- # If only one of them exists, return that one.
- if not actual_contents and not expected_contents:
- return (None, None, None)
- if not actual_contents:
- return (expected_contents, None, None)
- if not expected_contents:
- return (actual_contents, None, None)
- tempdir = self._filesystem.mkdtemp()
- expected_filename = self._filesystem.join(str(tempdir), 'expected.png')
- self._filesystem.write_binary_file(expected_filename,
- expected_contents)
- actual_filename = self._filesystem.join(str(tempdir), 'actual.png')
- self._filesystem.write_binary_file(actual_filename, actual_contents)
- diff_filename = self._filesystem.join(str(tempdir), 'diff.png')
- executable = self._path_to_image_diff()
- # Although we are handed 'old', 'new', image_diff wants 'new', 'old'.
- command = [
- executable, '--diff', actual_filename, expected_filename,
- diff_filename
- ]
- # Notifies image_diff to allow a tolerance when calculating the pixel
- # diff. To account for variances when the tests are ran on an actual
- # GPU.
- if self.get_option('fuzzy_diff'):
- command.append('--fuzzy-diff')
- # The max_channel_diff and max_pixels_diff arguments are used by WPT
- # tests for fuzzy reftests. See
- # https://web-platform-tests.org/writing-tests/reftests.html#fuzzy-matching
- if max_channel_diff is not None:
- command.append('--fuzzy-max-channel-diff={}'.format('-'.join(
- map(str, max_channel_diff))))
- if max_pixels_diff is not None:
- command.append('--fuzzy-max-pixels-diff={}'.format('-'.join(
- map(str, max_pixels_diff))))
- result = None
- stats = None
- err_str = None
- def handle_output(output):
- if output:
- match = re.search(
- "Found pixels_different: (\d+), max_channel_diff: (\d+)",
- output)
- _log.debug(output)
- if match:
- return {
- "maxDifference": int(match.group(2)),
- "totalPixels": int(match.group(1))
- }
- return None
- try:
- output = self._executive.run_command(command)
- stats = handle_output(output)
- except ScriptError as error:
- if error.exit_code == 1:
- result = self._filesystem.read_binary_file(diff_filename)
- stats = handle_output(error.output)
- else:
- err_str = 'Image diff returned an exit code of %s. See http://crbug.com/278596' % error.exit_code
- except OSError as error:
- err_str = 'error running image diff: %s' % error
- finally:
- self._filesystem.rmtree(str(tempdir))
- return (result, stats, err_str or None)
- def driver_name(self):
- if self.get_option('driver_name'):
- return self.get_option('driver_name')
- return self.CONTENT_SHELL_NAME
- def expected_baselines_by_extension(self, test_name):
- """Returns a dict mapping baseline suffix to relative path for each baseline in a test.
- For reftests, it returns ".==" or ".!=" instead of the suffix.
- """
- # FIXME: The name similarity between this and expected_baselines()
- # below, is unfortunate. We should probably rename them both.
- baseline_dict = {}
- reference_files = self.reference_files(test_name)
- if reference_files:
- # FIXME: How should this handle more than one type of reftest?
- baseline_dict['.' + reference_files[0][0]] = \
- self.relative_test_filename(reference_files[0][1])
- for extension in self.BASELINE_EXTENSIONS:
- path = self.expected_filename(
- test_name, extension, return_default=False)
- baseline_dict[extension] = self.relative_test_filename(
- path) if path else path
- return baseline_dict
- def output_filename(self, test_name, suffix, extension):
- """Generates the output filename for a test.
- This method gives a proper filename for various outputs of a test,
- including baselines and actual results. Usually, the output filename
- follows the pattern: test_name_without_ext+suffix+extension, but when
- the test name contains query strings, e.g. external/wpt/foo.html?wss,
- test_name_without_ext is mangled to be external/wpt/foo_wss.
- It is encouraged to use this method instead of writing another mangling.
- Args:
- test_name: The name of a test.
- suffix: A suffix string to add before the extension
- (e.g. "-expected").
- extension: The extension of the output file (starting with .).
- Returns:
- A string, the output filename.
- """
- # WPT names might contain query strings, e.g. external/wpt/foo.html?wss,
- # in which case we mangle test_name_root (the part of a path before the
- # last extension point) to external/wpt/foo_wss, and the output filename
- # becomes external/wpt/foo_wss-expected.txt.
- index = test_name.find('?')
- if index != -1:
- test_name_root, _ = self._filesystem.splitext(test_name[:index])
- query_part = test_name[index:]
- test_name_root += self._filesystem.sanitize_filename(query_part)
- else:
- test_name_root, _ = self._filesystem.splitext(test_name)
- return test_name_root + suffix + extension
- def expected_baselines(self,
- test_name,
- extension,
- all_baselines=False,
- match=True):
- """Given a test name, finds where the baseline results are located.
- Return values will be in the format appropriate for the current
- platform (e.g., "\\" for path separators on Windows). If the results
- file is not found, then None will be returned for the directory,
- but the expected relative pathname will still be returned.
- This routine is generic but lives here since it is used in
- conjunction with the other baseline and filename routines that are
- platform specific.
- Args:
- test_name: Name of test file (usually a relative path under web_tests/)
- extension: File extension of the expected results, including dot;
- e.g. '.txt' or '.png'. This should not be None, but may be an
- empty string.
- all_baselines: If True, return an ordered list of all baseline paths
- for the given platform. If False, return only the first one.
- match: Whether the baseline is a match or a mismatch.
- Returns:
- A list of (baseline_dir, results_filename) pairs, where
- baseline_dir - abs path to the top of the results tree (or test
- tree)
- results_filename - relative path from top of tree to the results
- file
- (port.join() of the two gives you the full path to the file,
- unless None was returned.)
- """
- baseline_filename = self.output_filename(
- test_name,
- self.BASELINE_SUFFIX if match else self.BASELINE_MISMATCH_SUFFIX,
- extension)
- baseline_search_path = self.baseline_search_path()
- baselines = []
- for baseline_dir in baseline_search_path:
- if self._filesystem.exists(
- self._filesystem.join(baseline_dir, baseline_filename)):
- baselines.append((baseline_dir, baseline_filename))
- if not all_baselines and baselines:
- return baselines
- baseline_dir = self.generic_baselines_dir()
- if self._filesystem.exists(
- self._filesystem.join(baseline_dir, baseline_filename)):
- baselines.append((baseline_dir, baseline_filename))
- if baselines:
- return baselines
- return [(None, baseline_filename)]
- def expected_filename(self,
- test_name,
- extension,
- return_default=True,
- fallback_base_for_virtual=True,
- match=True,
- look_for_same_folder_reference_file=False):
- """Given a test name, returns an absolute path to its expected results.
- If no expected results are found in any of the searched directories,
- the directory in which the test itself is located will be returned.
- The return value is in the format appropriate for the platform
- (e.g., "\\" for path separators on windows).
- This routine is generic but is implemented here to live alongside
- the other baseline and filename manipulation routines.
- Args:
- test_name: Name of test file (usually a relative path under web_tests/)
- extension: File extension of the expected results, including dot;
- e.g. '.txt' or '.png'. This should not be None, but may be an
- empty string.
- return_default: If True, returns the path to the generic expectation
- if nothing else is found; if False, returns None.
- fallback_base_for_virtual: For virtual test only. When no virtual
- specific baseline is found, if this parameter is True, fallback
- to find baselines of the base test; if False, depending on
- |return_default|, returns the generic virtual baseline or None.
- match: Whether the baseline is a match or a mismatch.
- look_for_same_folder_reference_file: For reference test only. Returns
- the reference file if found in the same folder of the test file.
- Returns:
- An absolute path to its expected results, or None if not found.
- """
- # The [0] means the first expected baseline (which is the one to be
- # used) in the fallback paths.
- baseline_dir, baseline_filename = self.expected_baselines(
- test_name, extension, match=match)[0]
- if baseline_dir:
- return self._filesystem.join(baseline_dir, baseline_filename)
- if look_for_same_folder_reference_file:
- path = self._filesystem.join(self.web_tests_dir(),
- baseline_filename)
- if self._filesystem.exists(path):
- return path
- if fallback_base_for_virtual:
- actual_test_name = self.lookup_virtual_test_base(test_name)
- if actual_test_name:
- return self.expected_filename(
- actual_test_name,
- extension,
- return_default,
- match=match,
- look_for_same_folder_reference_file=look_for_same_folder_reference_file
- )
- if return_default:
- return self._filesystem.join(self.generic_baselines_dir(),
- baseline_filename)
- return None
- def fallback_expected_filename(self, test_name, extension):
- """Given a test name, returns an absolute path to its next fallback baseline.
- Args:
- same as expected_filename()
- Returns:
- An absolute path to the next fallback baseline, or None if not found.
- """
- baselines = self.expected_baselines(
- test_name, extension, all_baselines=True)
- if len(baselines) < 2:
- actual_test_name = self.lookup_virtual_test_base(test_name)
- if actual_test_name:
- if len(baselines) == 0:
- return self.fallback_expected_filename(
- actual_test_name, extension)
- # In this case, baselines[0] is the current baseline of the
- # virtual test, so the first base test baseline is the fallback
- # baseline of the virtual test.
- return self.expected_filename(
- actual_test_name, extension, return_default=False)
- return None
- baseline_dir, baseline_filename = baselines[1]
- if baseline_dir:
- return self._filesystem.join(baseline_dir, baseline_filename)
- return None
- def expected_checksum(self, test_name):
- """Returns the checksum of the image we expect the test to produce,
- or None if it is a text-only test.
- """
- png_path = self.expected_filename(test_name, '.png')
- if self._filesystem.exists(png_path):
- with self._filesystem.open_binary_file_for_reading(
- png_path) as filehandle:
- return read_checksum_from_png.read_checksum(filehandle)
- return None
- def expected_image(self, test_name):
- """Returns the image we expect the test to produce."""
- baseline_path = self.expected_filename(test_name, '.png')
- if not self._filesystem.exists(baseline_path):
- return None
- return self._filesystem.read_binary_file(baseline_path)
- def expected_audio(self, test_name):
- baseline_path = self.expected_filename(test_name, '.wav')
- if not self._filesystem.exists(baseline_path):
- return None
- return self._filesystem.read_binary_file(baseline_path)
- def expected_text(self, test_name):
- """Returns the text output we expect the test to produce, or None
- if we don't expect there to be any text output.
- End-of-line characters are normalized to '\n'.
- """
- # FIXME: DRT output is actually utf-8, but since we don't decode the
- # output from DRT (instead treating it as a binary string), we read the
- # baselines as a binary string, too.
- baseline_path = self.expected_filename(test_name, '.txt')
- if not self._filesystem.exists(baseline_path):
- return None
- text = self._filesystem.read_binary_file(baseline_path)
- return text.replace(b'\r\n', b'\n')
- def expected_subtest_failure(self, test_name):
- baseline = self.expected_text(test_name)
- if baseline:
- baseline = baseline.decode('utf8', 'replace')
- if re.search(r"^(FAIL|NOTRUN|TIMEOUT)", baseline, re.MULTILINE):
- return True
- return False
- def expected_harness_error(self, test_name):
- baseline = self.expected_text(test_name)
- if baseline:
- baseline = baseline.decode('utf8', 'replace')
- if re.search(r"^Harness Error\.", baseline, re.MULTILINE):
- return True
- return False
- def reference_files(self, test_name):
- """Returns a list of expectation (== or !=) and filename pairs"""
- # Try to find -expected.* or -expected-mismatch.* in the same directory.
- reftest_list = []
- for expectation in ('==', '!='):
- for extension in Port.supported_file_extensions:
- path = self.expected_filename(
- test_name,
- extension,
- match=(expectation == '=='),
- look_for_same_folder_reference_file=True
- )
- if self._filesystem.exists(path):
- reftest_list.append((expectation, path))
- if reftest_list:
- return reftest_list
- # Try to extract information from MANIFEST.json.
- match = self.WPT_REGEX.match(test_name)
- if not match:
- return []
- wpt_path = match.group(1)
- path_in_wpt = match.group(2)
- for expectation, ref_path_in_wpt in self.wpt_manifest(
- wpt_path).extract_reference_list(path_in_wpt):
- ref_absolute_path = self._filesystem.join(
- self.web_tests_dir(), wpt_path + ref_path_in_wpt)
- reftest_list.append((expectation, ref_absolute_path))
- return reftest_list
- def tests(self, paths=None):
- """Returns all tests or tests matching supplied paths.
- Args:
- paths: Array of paths to match. If supplied, this function will only
- return tests matching at least one path in paths.
- Returns:
- An array of test paths and test names. The latter are web platform
- tests that don't correspond to file paths but are valid tests,
- for instance a file path test.any.js could correspond to two test
- names: test.any.html and test.any.worker.html.
- """
- tests = self.real_tests(paths)
- if paths:
- if not self._options.no_virtual_tests:
- tests.extend(self._virtual_tests_matching_paths(paths))
- if (any(wpt_path in path for wpt_path in self.WPT_DIRS
- for path in paths)
- # TODO(robertma): Remove this special case when external/wpt is moved to wpt.
- or any('external' in path for path in paths)):
- tests.extend(self._wpt_test_urls_matching_paths(paths))
- else:
- # '/' is used instead of filesystem.sep as the WPT manifest always
- # uses '/' for paths (it is not OS dependent).
- wpt_tests = [
- wpt_path + '/' + test for wpt_path in self.WPT_DIRS
- for test in self.wpt_manifest(wpt_path).all_urls()
- ]
- tests_by_dir = defaultdict(list)
- for test in tests + wpt_tests:
- dirname = self._filesystem.dirname(test) + '/'
- tests_by_dir[dirname].append(test)
- if not self._options.no_virtual_tests:
- tests.extend(self._all_virtual_tests(tests_by_dir))
- tests.extend(wpt_tests)
- return tests
- def real_tests_from_dict(self, paths, tests_by_dir):
- """Find all real tests in paths, using results saved in dict."""
- files = []
- for path in paths:
- if self._has_supported_extension_for_all(path):
- # only append the file when it is in tests_by_dir
- dirname = self._filesystem.dirname(path) + '/'
- if path in tests_by_dir.get(dirname, []):
- files.append(path)
- continue
- path = path + '/' if path[-1] != '/' else path
- for key, value in tests_by_dir.items():
- if key.startswith(path):
- files.extend(value)
- return files
- def real_tests(self, paths):
- """Find all real tests in paths except WPT."""
- # When collecting test cases, skip these directories.
- skipped_directories = set([
- 'platform', 'resources', 'support', 'script-tests', 'reference',
- 'reftest', 'SmokeTests'
- ])
- # Also ignore all WPT directories. Note that this is only an
- # optimization; is_non_wpt_test_file should skip WPT regardless.
- skipped_directories |= set(self.WPT_DIRS)
- files = find_files.find(self._filesystem, self.web_tests_dir(), paths, skipped_directories,
- lambda _, dirname, filename: self.is_non_wpt_test_file(dirname, filename),
- self.test_key)
- return [self.relative_test_filename(f) for f in files]
- @staticmethod
- def is_reference_html_file(filesystem, dirname, filename):
- # TODO(robertma): We probably do not need prefixes/suffixes other than
- # -expected{-mismatch} any more. Or worse, there might be actual tests
- # with these prefixes/suffixes.
- if filename.startswith('ref-') or filename.startswith('notref-'):
- return True
- filename_without_ext, _ = filesystem.splitext(filename)
- for suffix in ['-expected', '-expected-mismatch', '-ref', '-notref']:
- if filename_without_ext.endswith(suffix):
- return True
- return False
- # When collecting test cases, we include any file with these extensions.
- supported_file_extensions = set([
- '.html',
- '.xml',
- '.xhtml',
- '.xht',
- '.pl',
- '.htm',
- '.php',
- '.svg',
- '.mht',
- '.pdf',
- ])
- def _has_supported_extension_for_all(self, filename):
- extension = self._filesystem.splitext(filename)[1]
- if 'inspector-protocol' in filename and extension == '.js':
- return True
- if 'devtools' in filename and extension == '.js':
- return True
- return extension in self.supported_file_extensions
- def _has_supported_extension(self, filename):
- """Returns True if filename is one of the file extensions we want to run a test on."""
- extension = self._filesystem.splitext(filename)[1]
- return extension in self.supported_file_extensions
- def is_non_wpt_test_file(self, dirname, filename):
- # Convert dirname to a relative path to web_tests with slashes
- # normalized and ensure it has a trailing slash.
- normalized_test_dir = self.relative_test_filename(
- dirname) + self.TEST_PATH_SEPARATOR
- if any(
- normalized_test_dir.startswith(d + self.TEST_PATH_SEPARATOR)
- for d in self.WPT_DIRS):
- return False
- extension = self._filesystem.splitext(filename)[1]
- if 'inspector-protocol' in dirname and extension == '.js':
- return True
- if 'devtools' in dirname and extension == '.js':
- return True
- return (self._has_supported_extension(filename)
- and not Port.is_reference_html_file(self._filesystem, dirname,
- filename))
- @memoized
- def wpt_manifest(self, path):
- assert path in self.WPT_DIRS
- # Convert '/' to the platform-specific separator.
- path = self._filesystem.normpath(path)
- self._filesystem.maybe_make_directory(
- self._filesystem.join(self.web_tests_dir(), path))
- manifest_path = self._filesystem.join(self.web_tests_dir(), path,
- MANIFEST_NAME)
- if not self._filesystem.exists(manifest_path) or self.get_option(
- 'manifest_update', False):
- _log.debug('Generating MANIFEST.json for %s...', path)
- WPTManifest.ensure_manifest(self, path)
- return WPTManifest(self.host, manifest_path)
- def is_wpt_crash_test(self, test_name):
- """Returns whether a WPT test is a crashtest.
- See https://web-platform-tests.org/writing-tests/crashtest.html.
- """
- match = self.WPT_REGEX.match(test_name)
- if not match:
- return False
- wpt_path = match.group(1)
- path_in_wpt = match.group(2)
- return self.wpt_manifest(wpt_path).is_crash_test(path_in_wpt)
- def is_slow_wpt_test(self, test_name):
- # When DCHECK is enabled, idlharness tests run 5-6x slower due to the
- # amount of JavaScript they use (most web_tests run very little JS).
- # This causes flaky timeouts for a lot of them, as a 0.5-1s test becomes
- # close to the default 6s timeout.
- if (self.is_wpt_idlharness_test(test_name)
- and self._build_has_dcheck_always_on()):
- return True
- match = self.WPT_REGEX.match(test_name)
- if not match:
- return False
- wpt_path = match.group(1)
- path_in_wpt = match.group(2)
- return self.wpt_manifest(wpt_path).is_slow_test(path_in_wpt)
- def extract_wpt_pac(self, test_name):
- match = self.WPT_REGEX.match(test_name)
- if not match:
- return None
- wpt_path = match.group(1)
- path_in_wpt = match.group(2)
- pac = self.wpt_manifest(wpt_path).extract_test_pac(path_in_wpt)
- if pac is None:
- return None
- hosts_and_ports = self.create_driver(0).WPT_HOST_AND_PORTS
- return urljoin(
- "http://{}:{}".format(hosts_and_ports[0], hosts_and_ports[1]),
- urljoin(path_in_wpt, pac))
- def get_wpt_fuzzy_metadata(self, test_name):
- """Returns the WPT-style fuzzy metadata for the given test.
- The metadata is a pair of lists, (maxDifference, totalPixels), where
- each list is a [min, max] range, inclusive. If the test has no fuzzy metadata,
- returns (None, None).
- See https://web-platform-tests.org/writing-tests/reftests.html#fuzzy-matching
- """
- match = self.WPT_REGEX.match(test_name)
- if match:
- # This is an actual WPT test, so we can get the metadata from the manifest.
- wpt_path = match.group(1)
- path_in_wpt = match.group(2)
- return self.wpt_manifest(wpt_path).extract_fuzzy_metadata(
- path_in_wpt)
- # This is not a WPT test, so we will parse the metadata ourselves.
- if not self.test_isfile(test_name):
- return (None, None)
- # We use a safe encoding because some test files are incompatible with utf-8.
- test_file = self.read_test(test_name, "latin-1")
- if not test_file:
- return (None, None)
- # We only take the first match which is in line with what we do for WPT tests.
- fuzzy_match = self.WPT_FUZZY_REGEX.search(test_file)
- if not fuzzy_match:
- return (None, None)
- _, max_diff_min, max_diff_max, tot_pix_min, tot_pix_max = \
- fuzzy_match.groups()
- if not max_diff_min:
- max_diff_min = max_diff_max
- if not tot_pix_min:
- tot_pix_min = tot_pix_max
- return ([int(max_diff_min),
- int(max_diff_max)], [int(tot_pix_min),
- int(tot_pix_max)])
- def get_file_path_for_wpt_test(self, test_name):
- """Returns the real file path for the given WPT test.
- Or None if the test is not a WPT.
- """
- match = self.WPT_REGEX.match(test_name)
- if not match:
- return None
- wpt_path = match.group(1)
- path_in_wpt = match.group(2)
- file_path_in_wpt = self.wpt_manifest(wpt_path).file_path_for_test_url(
- path_in_wpt)
- if not file_path_in_wpt:
- return None
- return self._filesystem.join(wpt_path, file_path_in_wpt)
- def test_key(self, test_name):
- """Turns a test name into a pair of sublists: the natural sort key of the
- dirname, and the natural sort key of the basename.
- This can be used when sorting paths so that files in a directory.
- directory are kept together rather than being mixed in with files in
- subdirectories.
- """
- dirname, basename = self.split_test(test_name)
- return (self._natural_sort_key(dirname + self.TEST_PATH_SEPARATOR),
- self._natural_sort_key(basename))
- def _natural_sort_key(self, string_to_split):
- """Turns a string into a list of string and number chunks.
- For example: "z23a" -> ["z", 23, "a"]
- This can be used to implement "natural sort" order. See:
- http://www.codinghorror.com/blog/2007/12/sorting-for-humans-natural-sort-order.html
- http://nedbatchelder.com/blog/200712.html#e20071211T054956
- """
- def tryint(val):
- try:
- return int(val)
- except ValueError:
- return val
- return [tryint(chunk) for chunk in re.split(r'(\d+)', string_to_split)]
- def read_test(self, test_name, encoding="utf8"):
- """Returns the contents of the given test according to the given encoding.
- If no corresponding file can be found, returns None instead.
- Warning: some tests are in utf8-incompatible encodings.
- """
- path = self.abspath_for_test(test_name)
- if self._filesystem.isfile(path):
- return self._filesystem.read_binary_file(path).decode(encoding)
- base = self.lookup_virtual_test_base(test_name)
- if not base:
- return None
- path = self.abspath_for_test(base)
- if self._filesystem.isfile(path):
- return self._filesystem.read_binary_file(path).decode(encoding)
- return None
- @memoized
- def test_isfile(self, test_name):
- """Returns True if the test name refers to an existing test file."""
- # Used by test_expectations.py to apply rules to a file.
- if self._filesystem.isfile(self.abspath_for_test(test_name)):
- return True
- base = self.lookup_virtual_test_base(test_name)
- return base and self._filesystem.isfile(self.abspath_for_test(base))
- @memoized
- def test_isdir(self, test_name):
- """Returns True if the test name refers to an existing directory of tests."""
- # Used by test_expectations.py to apply rules to whole directories.
- if self._filesystem.isdir(self.abspath_for_test(test_name)):
- return True
- base = self.lookup_virtual_test_base(test_name)
- return base and self._filesystem.isdir(self.abspath_for_test(base))
- @memoized
- def test_exists(self, test_name):
- """Returns True if the test name refers to an existing test directory or file."""
- # Used by lint_test_expectations.py to determine if an entry refers to a
- # valid test.
- if self.is_wpt_test(test_name):
- # A virtual WPT test must have valid virtual prefix and base.
- if test_name.startswith('virtual/'):
- return bool(self.lookup_virtual_test_base(test_name))
- # Otherwise treat any WPT test as existing regardless of their real
- # existence on the file system.
- # TODO(crbug.com/959958): Actually check existence of WPT tests.
- return True
- return self.test_isfile(test_name) or self.test_isdir(test_name)
- def split_test(self, test_name):
- """Splits a test name into the 'directory' part and the 'basename' part."""
- index = test_name.rfind(self.TEST_PATH_SEPARATOR)
- if index < 1:
- return ('', test_name)
- return (test_name[0:index], test_name[index:])
- def normalize_test_name(self, test_name):
- """Returns a normalized version of the test name or test directory."""
- if test_name.endswith('/'):
- return test_name
- if self.test_isdir(test_name):
- return test_name + '/'
- return test_name
- def driver_cmd_line(self):
- """Prints the DRT (DumpRenderTree) command that will be used."""
- return self.create_driver(0).cmd_line([])
- def update_baseline(self, baseline_path, data):
- """Updates the baseline for a test.
- Args:
- baseline_path: the actual path to use for baseline, not the path to
- the test. This function is used to update either generic or
- platform-specific baselines, but we can't infer which here.
- data: contents of the baseline.
- """
- self._filesystem.write_binary_file(baseline_path, data)
- def _path_from_chromium_base(self, *comps):
- return self._path_finder.path_from_chromium_base(*comps)
- def _perf_tests_dir(self):
- return self._path_finder.perf_tests_dir()
- def web_tests_dir(self):
- custom_web_tests_dir = self.get_option('layout_tests_directory')
- if custom_web_tests_dir:
- return self._filesystem.abspath(custom_web_tests_dir)
- return self._path_finder.web_tests_dir()
- def generic_baselines_dir(self):
- return self._filesystem.join(self.web_tests_dir(), "platform", "generic")
- def skips_test(self, test):
- """Checks whether the given test is skipped for this port.
- Returns True if:
- - the test is a manual test
- - the port runs smoke tests only and the test is not in the list
- - the test is marked as Skip in NeverFixTest
- - the test is a virtual test not intended to run on this platform.
- """
- return (self.is_manual_test(test)
- or self.skipped_due_to_smoke_tests(test)
- or self.skipped_in_never_fix_tests(test)
- or self.virtual_test_skipped_due_to_platform_config(test))
- @memoized
- def _tests_from_file(self, filename):
- tests = set()
- file_contents = self._filesystem.read_text_file(filename)
- for line in file_contents.splitlines():
- line = line.strip()
- if line.startswith('#') or not line:
- continue
- tests.add(line)
- return tests
- def is_manual_test(self, test):
- """Skip the test if it is a WPT manual test"""
- return self.is_wpt_test(test) and '-manual.' in test
- def skipped_due_to_smoke_tests(self, test):
- """Checks if the test is skipped based on the set of Smoke tests.
- Returns True if this port runs only smoke tests, and the test is not
- in the smoke tests file; returns False otherwise.
- """
- if not self.default_smoke_test_only():
- return False
- smoke_test_filename = self.path_to_smoke_tests_file()
- if not self._filesystem.exists(smoke_test_filename):
- return False
- smoke_tests = self._tests_from_file(smoke_test_filename)
- return test not in smoke_tests
- def path_to_smoke_tests_file(self):
- # Historically we only have one smoke tests list. That one now becomes
- # the default
- return self._filesystem.join(self.web_tests_dir(), 'SmokeTests',
- 'Default.txt')
- def skipped_in_never_fix_tests(self, test):
- """Checks if the test is marked as Skip in NeverFixTests for this port.
- Skip in NeverFixTests indicate we will never fix the failure and
- permanently skip the test. Only Skip lines are allowed in NeverFixTests.
- Some lines in NeverFixTests are platform-specific.
- Note: this will not work with skipped directories. See also the same
- issue with update_all_test_expectations_files in test_importer.py.
- """
- # Note: The parsing logic here (reading the file, constructing a
- # parser, etc.) is very similar to blinkpy/w3c/test_copier.py.
- path = self.path_to_never_fix_tests_file()
- contents = self._filesystem.read_text_file(path)
- test_expectations = TestExpectations(tags=self.get_platform_tags())
- test_expectations.parse_tagged_list(contents)
- return ResultType.Skip in test_expectations.expectations_for(
- test).results
- def path_to_never_fix_tests_file(self):
- return self._filesystem.join(self.web_tests_dir(), 'NeverFixTests')
- def virtual_test_skipped_due_to_platform_config(self, test):
- """Checks if the virtual test is skipped based on the platform config.
- Returns True if the virtual test is not intend to run on this port, due
- to the platform config in VirtualTestSuites; returns False otherwise.
- """
- suite = self._lookup_virtual_suite(test)
- if suite is not None:
- return self.operating_system() not in suite.platforms
- return False
- def name(self):
- """Returns a name that uniquely identifies this particular type of port.
- This is the full port name including both base port name and version,
- and can be passed to PortFactory.get() to instantiate a port.
- """
- return self._name
- def operating_system(self):
- raise NotImplementedError
- def version(self):
- """Returns a string indicating the version of a given platform
- For example, "win10" or "trusty". This is used to help identify the
- exact port when parsing test expectations, determining search paths,
- and logging information.
- """
- return self._version
- def architecture(self):
- return self._architecture
- def python3_command(self):
- """Returns the correct command to use to run python3.
- This exists because Windows has inconsistent behavior between the bots
- and local developer machines, such that determining which python3 name
- to use is non-trivial. See https://crbug.com/1155616.
- Once blinkpy runs under python3, this can be removed in favour of
- callers using sys.executable.
- """
- if six.PY3:
- # Prefer sys.executable when the current script runs under python3.
- # The current script might be running with vpython3 and in that case
- # using the same executable will share the same virtualenv.
- return sys.executable
- return 'python3'
- def get_option(self, name, default_value=None):
- return getattr(self._options, name, default_value)
- def set_option_default(self, name, default_value):
- return self._options.ensure_value(name, default_value)
- def relative_test_filename(self, filename):
- """Returns a Unix-style path for a filename relative to web_tests.
- Ports may legitimately return absolute paths here if no relative path
- makes sense.
- """
- # Ports that run on windows need to override this method to deal with
- # filenames with backslashes in them.
- if filename.startswith(self.web_tests_dir()):
- return self.host.filesystem.relpath(filename, self.web_tests_dir())
- else:
- return self.host.filesystem.abspath(filename)
- @memoized
- def abspath_for_test(self, test_name):
- """Returns the full path to the file for a given test name.
- This is the inverse of relative_test_filename().
- """
- return self._filesystem.join(self.web_tests_dir(), test_name)
- @memoized
- def args_for_test(self, test_name):
- args = self._lookup_virtual_test_args(test_name)
- pac_url = self.extract_wpt_pac(test_name)
- if pac_url is not None:
- args.append("--proxy-pac-url=" + pac_url)
- tracing_categories = self.get_option('enable_tracing')
- if tracing_categories:
- args.append('--trace-startup=' + tracing_categories)
- # Do not finish the trace until the test is finished.
- args.append('--trace-startup-duration=0')
- # Append the current time to the output file name to ensure that
- # the subsequent repetitions of the test do not overwrite older
- # trace files.
- current_time = time.strftime("%Y-%m-%d-%H-%M-%S")
- file_name = 'trace_layout_test_{}_{}.json'.format(
- self._filesystem.sanitize_filename(test_name), current_time)
- args.append('--trace-startup-file=' + file_name)
- return args
- @memoized
- def name_for_test(self, test_name):
- test_base = self.lookup_virtual_test_base(test_name)
- if test_base and not self._filesystem.exists(
- self.abspath_for_test(test_name)):
- return test_base
- return test_name
- def bot_test_times_path(self):
- # TODO(crbug.com/1030434): For the not_site_per_process_blink_web_tests step on linux,
- # an exception is raised when merging the bot times json files. This happens whenever they
- # are outputted into the results directory. Temporarily we will return the bot times json
- # file relative to the target directory.
- return self._build_path('webkit_test_times', 'bot_times_ms.json')
- def results_directory(self):
- """Returns the absolute path directory which will store all web tests outputted
- files. It may include a sub directory for artifacts and it may store performance test results."""
- if not self._results_directory:
- option_val = self.get_option(
- 'results_directory') or self.default_results_directory()
- assert not self._filesystem.basename(option_val) == 'layout-test-results', (
- 'crbug.com/1026494, crbug.com/1027708: The layout-test-results sub directory should '
- 'not be passed as part of the --results-directory command line argument.')
- self._results_directory = self._filesystem.abspath(option_val)
- return self._results_directory
- def artifacts_directory(self):
- """Returns path to artifacts sub directory of the results directory. This
- directory will store test artifacts, which may include actual and expected
- output from web tests."""
- return self._filesystem.join(self.results_directory(),
- ARTIFACTS_SUB_DIR)
- def perf_results_directory(self):
- return self.results_directory()
- def inspector_build_directory(self):
- return self._build_path('gen', 'third_party', 'devtools-frontend',
- 'src', 'front_end')
- def generated_sources_directory(self):
- return self._build_path('gen')
- def apache_config_directory(self):
- return self._path_finder.path_from_blink_tools('apache_config')
- def default_results_directory(self):
- """Returns the absolute path to the build directory."""
- return self._build_path()
- @memoized
- def typ_host(self):
- return SerializableTypHost()
- def setup_test_run(self):
- """Performs port-specific work at the beginning of a test run."""
- # Delete the disk cache if any to ensure a clean test run.
- dump_render_tree_binary_path = self._path_to_driver()
- cachedir = self._filesystem.dirname(dump_render_tree_binary_path)
- cachedir = self._filesystem.join(cachedir, 'cache')
- if self._filesystem.exists(cachedir):
- self._filesystem.rmtree(cachedir)
- if self._dump_reader:
- self._filesystem.maybe_make_directory(
- self._dump_reader.crash_dumps_directory())
- def num_workers(self, requested_num_workers):
- """Returns the number of available workers (possibly less than the number requested)."""
- return requested_num_workers
- def clean_up_test_run(self):
- """Performs port-specific work at the end of a test run."""
- if self._image_differ:
- self._image_differ.stop()
- self._image_differ = None
- def setup_environ_for_server(self):
- # We intentionally copy only a subset of the environment when
- # launching subprocesses to ensure consistent test results.
- clean_env = {}
- variables_to_copy = [
- 'CHROME_DEVEL_SANDBOX',
- 'CHROME_IPC_LOGGING',
- 'ASAN_OPTIONS',
- 'TSAN_OPTIONS',
- 'MSAN_OPTIONS',
- 'LSAN_OPTIONS',
- 'UBSAN_OPTIONS',
- 'VALGRIND_LIB',
- 'VALGRIND_LIB_INNER',
- 'TMPDIR',
- ]
- if 'TMPDIR' not in self.host.environ:
- self.host.environ['TMPDIR'] = tempfile.gettempdir()
- # CGIs are run directory-relative so they need an absolute TMPDIR
- self.host.environ['TMPDIR'] = self._filesystem.abspath(
- self.host.environ['TMPDIR'])
- if self.host.platform.is_linux() or self.host.platform.is_freebsd():
- variables_to_copy += [
- 'XAUTHORITY', 'HOME', 'LANG', 'LD_LIBRARY_PATH',
- 'DBUS_SESSION_BUS_ADDRESS', 'XDG_DATA_DIRS', 'XDG_RUNTIME_DIR'
- ]
- clean_env['DISPLAY'] = self.host.environ.get('DISPLAY', ':1')
- if self.host.platform.is_mac():
- variables_to_copy += [
- 'HOME',
- ]
- if self.host.platform.is_win():
- variables_to_copy += [
- 'PATH',
- ]
- for variable in variables_to_copy:
- if variable in self.host.environ:
- clean_env[variable] = self.host.environ[variable]
- for string_variable in self.get_option('additional_env_var', []):
- [name, value] = string_variable.split('=', 1)
- clean_env[name] = value
- if self.host.platform.is_linux() and not self.use_system_httpd():
- # set up LD_LIBRARY_PATH when we are using httpd built from 3pp.
- path_to_libs = self._filesystem.join(self.apache_server_root(), 'lib')
- if clean_env.get('LD_LIBRARY_PATH'):
- clean_env['LD_LIBRARY_PATH'] = path_to_libs + ':' + clean_env['LD_LIBRARY_PATH']
- else:
- clean_env['LD_LIBRARY_PATH'] = path_to_libs
- return clean_env
- def show_results_html_file(self, results_filename):
- """Displays the given HTML file in a user's browser."""
- return self.host.user.open_url(
- abspath_to_uri(self.host.platform, results_filename))
- def create_driver(self, worker_number, no_timeout=False):
- """Returns a newly created Driver subclass for starting/stopping the
- test driver.
- """
- return self._driver_class()(self, worker_number, no_timeout=no_timeout)
- def requires_http_server(self):
- # Does the port require an HTTP server for running tests? This could
- # be the case when the tests aren't run on the host platform.
- return False
- def start_http_server(self,
- additional_dirs,
- number_of_drivers,
- output_dir=''):
- """Start a web server. Raise an error if it can't start or is already running.
- Ports can stub this out if they don't need a web server to be running.
- """
- assert not self._http_server, 'Already running an http server.'
- output_dir = output_dir or self.artifacts_directory()
- server = apache_http.ApacheHTTP(
- self,
- output_dir,
- additional_dirs=additional_dirs,
- number_of_servers=(number_of_drivers * 4))
- server.start()
- self._http_server = server
- def start_websocket_server(self, output_dir=''):
- """Start a web server. Raise an error if it can't start or is already running.
- Ports can stub this out if they don't need a websocket server to be running.
- """
- assert not self._websocket_server, 'Already running a websocket server.'
- output_dir = output_dir or self.artifacts_directory()
- server = pywebsocket.PyWebSocket(
- self,
- output_dir,
- python_executable=self._options.python_executable)
- server.start()
- self._websocket_server = server
- @staticmethod
- def is_wpt_test(test):
- """Whether a test is considered a web-platform-tests test."""
- return Port.WPT_REGEX.match(test)
- @staticmethod
- def is_wpt_idlharness_test(test_file):
- """Returns whether a WPT test is (probably) an idlharness test.
- There are no rules in WPT that can be used to identify idlharness tests
- without examining the file contents (which would be expensive). This
- method utilizes a filename heuristic, based on the convention of
- including 'idlharness' in the appropriate test names.
- """
- match = Port.WPT_REGEX.match(test_file)
- if not match:
- return False
- filename = match.group(2).split('/')[-1]
- return 'idlharness' in filename
- @staticmethod
- def should_use_wptserve(test):
- return Port.is_wpt_test(test)
- def start_wptserve(self, output_dir=''):
- """Starts a WPT web server.
- Raises an error if it can't start or is already running.
- """
- assert not self._wpt_server, 'Already running a WPT server.'
- output_dir = output_dir or self.artifacts_directory()
- # We currently don't support any output mechanism for the WPT server.
- server = wptserve.WPTServe(self, output_dir)
- server.start()
- self._wpt_server = server
- def stop_wptserve(self):
- """Shuts down the WPT server if it is running."""
- if self._wpt_server:
- self._wpt_server.stop()
- self._wpt_server = None
- def http_server_requires_http_protocol_options_unsafe(self):
- httpd_path = self.path_to_apache()
- intentional_syntax_error = 'INTENTIONAL_SYNTAX_ERROR'
- # yapf: disable
- cmd = [
- httpd_path,
- '-t',
- '-f', self.path_to_apache_config_file(),
- '-C', 'ServerRoot "%s"' % self.apache_server_root(),
- '-C', 'HttpProtocolOptions Unsafe',
- '-C', intentional_syntax_error
- ]
- # yapf: enable
- env = self.setup_environ_for_server()
- def error_handler(err):
- pass
- output = self._executive.run_command(
- cmd, env=env, error_handler=error_handler)
- # If apache complains about the intentional error, it apparently
- # accepted the HttpProtocolOptions directive, and we should add it.
- return intentional_syntax_error in output
- def http_server_supports_ipv6(self):
- # Apache < 2.4 on win32 does not support IPv6.
- return not self.host.platform.is_win()
- def stop_http_server(self):
- """Shuts down the http server if it is running."""
- if self._http_server:
- self._http_server.stop()
- self._http_server = None
- def stop_websocket_server(self):
- """Shuts down the websocket server if it is running."""
- if self._websocket_server:
- self._websocket_server.stop()
- self._websocket_server = None
- #
- # TEST EXPECTATION-RELATED METHODS
- #
- def test_configuration(self):
- """Returns the current TestConfiguration for the port."""
- if not self._test_configuration:
- self._test_configuration = TestConfiguration(
- self._version, self._architecture,
- self._options.configuration.lower())
- return self._test_configuration
- # FIXME: Belongs on a Platform object.
- @memoized
- def all_test_configurations(self):
- """Returns a list of TestConfiguration instances, representing all available
- test configurations for this port.
- """
- return self._generate_all_test_configurations()
- # FIXME: Belongs on a Platform object.
- def configuration_specifier_macros(self):
- """Ports may provide a way to abbreviate configuration specifiers to conveniently
- refer to them as one term or alias specific values to more generic ones. For example:
- (win10, win11) -> win # Abbreviate all Windows versions into one namesake.
- (precise, trusty) -> linux # Change specific name of Linux distro to a more generic term.
- Returns a dictionary, each key representing a macro term ('win', for example),
- and value being a list of valid configuration specifiers (such as ['win10', 'win11']).
- """
- return self.CONFIGURATION_SPECIFIER_MACROS
- def _generate_all_test_configurations(self):
- """Returns a sequence of the TestConfigurations the port supports."""
- # By default, we assume we want to test every graphics type in
- # every configuration on every system.
- test_configurations = []
- for version, architecture in self.ALL_SYSTEMS:
- for build_type in self.ALL_BUILD_TYPES:
- test_configurations.append(
- TestConfiguration(version, architecture, build_type))
- return test_configurations
- def _flag_specific_expectations_path(self):
- config_name = self.flag_specific_config_name()
- if config_name:
- return self.path_to_flag_specific_expectations_file(config_name)
- def _flag_specific_baseline_search_path(self):
- dir = self.baseline_flag_specific_dir()
- return [dir] if dir else []
- def expectations_dict(self):
- """Returns an OrderedDict of name -> expectations strings.
- The names are expected to be (but not required to be) paths in the
- filesystem. If the name is a path, the file can be considered updatable
- for things like rebaselining, so don't use names that are paths if
- they're not paths.
- Generally speaking the ordering should be files in the filesystem in
- cascade order (TestExpectations followed by Skipped, if the port honors
- both formats), then any built-in expectations (e.g., from compile-time
- exclusions), then --additional-expectations options.
- """
- # FIXME: rename this to test_expectations() once all the callers are
- # updated to know about the ordered dict.
- expectations = collections.OrderedDict()
- default_expectations_files = set(self.default_expectations_files())
- ignore_default = self.get_option('ignore_default_expectations', False)
- for path in self.used_expectations_files():
- is_default = path in default_expectations_files
- if ignore_default and is_default:
- continue
- path_exists = self._filesystem.exists(path)
- if is_default:
- if path_exists:
- expectations[path] = self._filesystem.read_text_file(path)
- else:
- if path_exists:
- _log.debug(
- "reading additional_expectations from path '%s'", path)
- expectations[path] = self._filesystem.read_text_file(path)
- else:
- # TODO(rmhasan): Fix additional expectation paths for
- # not_site_per_process_blink_web_tests, then change this
- # back to raising exceptions for incorrect expectation
- # paths.
- _log.warning(
- "additional_expectations path '%s' does not exist",
- path)
- return expectations
- def all_expectations_dict(self):
- """Returns an OrderedDict of name -> expectations strings."""
- expectations = self.expectations_dict()
- flag_path = self._filesystem.join(self.web_tests_dir(),
- self.FLAG_EXPECTATIONS_PREFIX)
- if not self._filesystem.exists(flag_path):
- return expectations
- for (_, _, filenames) in self._filesystem.walk(flag_path):
- if 'README.txt' in filenames:
- filenames.remove('README.txt')
- if 'PRESUBMIT.py' in filenames:
- filenames.remove('PRESUBMIT.py')
- for filename in filenames:
- path = self._filesystem.join(flag_path, filename)
- try:
- expectations[path] = self._filesystem.read_text_file(path)
- except UnicodeDecodeError:
- _log.error('Failed to read expectations file: \'%s\'',
- path)
- raise
- return expectations
- def bot_expectations(self):
- if not self.get_option('ignore_flaky_tests'):
- return {}
- full_port_name = self.determine_full_port_name(
- self.host, self._options, self.port_name)
- builder_category = self.get_option('ignore_builder_category', 'layout')
- step_names = ['blink_web_tests', 'blink_wpt_tests']
- retval = {}
- for step_name in step_names:
- factory = BotTestExpectationsFactory(self.host.builders, step_name)
- # FIXME: This only grabs release builder's flakiness data. If we're running debug,
- # when we should grab the debug builder's data.
- expectations = factory.expectations_for_port(full_port_name,
- builder_category)
- if not expectations:
- continue
- ignore_mode = self.get_option('ignore_flaky_tests')
- if ignore_mode == 'very-flaky' or ignore_mode == 'maybe-flaky':
- retval.update(expectations.flakes_by_path(ignore_mode == 'very-flaky'))
- elif ignore_mode == 'unexpected':
- retval.update(expectations.unexpected_results_by_path())
- else:
- _log.warning("Unexpected ignore mode: '%s'.", ignore_mode)
- return retval
- def default_expectations_files(self):
- """Returns a list of paths to expectations files that apply by default.
- There are other "test expectations" files that may be applied if
- the --additional-expectations flag is passed; those aren't included
- here.
- """
- return filter(None, [
- self.path_to_generic_test_expectations_file(),
- self.path_to_webdriver_expectations_file(),
- self._filesystem.join(self.web_tests_dir(), 'NeverFixTests'),
- self._filesystem.join(self.web_tests_dir(),
- 'StaleTestExpectations'),
- self._filesystem.join(self.web_tests_dir(), 'SlowTests')
- ])
- def used_expectations_files(self):
- """Returns a list of paths to expectation files that are used."""
- if self._used_expectation_files is None:
- self._used_expectation_files = list(
- self.default_expectations_files())
- flag_specific = self._flag_specific_expectations_path()
- if flag_specific:
- self._used_expectation_files.append(flag_specific)
- for path in self.get_option('additional_expectations', []):
- expanded_path = self._filesystem.expanduser(path)
- abs_path = self._filesystem.abspath(expanded_path)
- self._used_expectation_files.append(abs_path)
- return self._used_expectation_files
- def extra_expectations_files(self):
- """Returns a list of paths to test expectations not loaded by default.
- These paths are passed via --additional-expectations on some builders.
- """
- return [
- self._filesystem.join(self.web_tests_dir(), 'ASANExpectations'),
- self._filesystem.join(self.web_tests_dir(), 'LeakExpectations'),
- self._filesystem.join(self.web_tests_dir(), 'MSANExpectations'),
- ]
- @memoized
- def path_to_generic_test_expectations_file(self):
- return self._filesystem.join(self.web_tests_dir(), 'TestExpectations')
- @memoized
- def path_to_webdriver_expectations_file(self):
- return self._filesystem.join(self.web_tests_dir(),
- 'WebDriverExpectations')
- def path_to_flag_specific_expectations_file(self, flag_specific):
- return self._filesystem.join(self.web_tests_dir(),
- self.FLAG_EXPECTATIONS_PREFIX,
- flag_specific)
- def repository_path(self):
- """Returns the repository path for the chromium code base."""
- return self._path_from_chromium_base('build')
- def default_configuration(self):
- return 'Release'
- def clobber_old_port_specific_results(self):
- pass
- def use_system_httpd(self):
- # We use system httpd on linux-arm64 and BSD
- return False
- # FIXME: This does not belong on the port object.
- @memoized
- def path_to_apache(self):
- """Returns the full path to the apache binary.
- This is needed only by ports that use the apache_http_server module.
- """
- raise NotImplementedError('Port.path_to_apache')
- def apache_server_root(self):
- """Returns the root that the apache binary is installed to.
- This is used for the ServerRoot directive.
- """
- executable = self.path_to_apache()
- return self._filesystem.dirname(self._filesystem.dirname(executable))
- def path_to_apache_config_file(self):
- """Returns the full path to the apache configuration file.
- If the WEBKIT_HTTP_SERVER_CONF_PATH environment variable is set, its
- contents will be used instead.
- This is needed only by ports that use the apache_http_server module.
- """
- config_file_from_env = self.host.environ.get(
- 'WEBKIT_HTTP_SERVER_CONF_PATH')
- if config_file_from_env:
- if not self._filesystem.exists(config_file_from_env):
- raise IOError(
- '%s was not found on the system' % config_file_from_env)
- return config_file_from_env
- config_file_name = self._apache_config_file_name_for_platform()
- return self._filesystem.join(self.apache_config_directory(),
- config_file_name)
- def _apache_version(self):
- env = self.setup_environ_for_server()
- config = self._executive.run_command([self.path_to_apache(), '-v'], env=env)
- # Log version including patch level.
- _log.debug(
- 'Found apache version %s',
- re.sub(
- r'(?:.|\n)*Server version: Apache/(\d+\.\d+(?:\.\d+)?)(?:.|\n)*',
- r'\1', config))
- return re.sub(r'(?:.|\n)*Server version: Apache/(\d+\.\d+)(?:.|\n)*',
- r'\1', config)
- def _apache_config_file_name_for_platform(self):
- # Keep the logic to use apache version even though we only have
- # configuration file for 2.4 now, in case we will have newer version in
- # future.
- return 'apache2-httpd-' + self._apache_version() + '-php7.conf'
- def _path_to_driver(self, target=None):
- """Returns the full path to the test driver."""
- return self._build_path(target, self.driver_name())
- def _path_to_image_diff(self):
- """Returns the full path to the image_diff binary, or None if it is not available.
- This is likely used only by diff_image()
- """
- return self._build_path('image_diff')
- def _absolute_baseline_path(self, platform_dir):
- """Return the absolute path to the top of the baseline tree for a
- given platform directory.
- """
- return self._filesystem.join(self.web_tests_dir(), 'platform',
- platform_dir)
- def _driver_class(self):
- """Returns the port's driver implementation."""
- return driver.Driver
- def output_contains_sanitizer_messages(self, output):
- if not output:
- return None
- if (b'AddressSanitizer' in output) or (b'MemorySanitizer' in output):
- return True
- return False
- def _get_crash_log(self, name, pid, stdout, stderr, newer_than):
- if self.output_contains_sanitizer_messages(stderr):
- # Running the symbolizer script can take a lot of memory, so we need to
- # serialize access to it across all the concurrently running drivers.
- llvm_symbolizer_path = self._path_from_chromium_base(
- 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
- 'llvm-symbolizer')
- if self._filesystem.exists(llvm_symbolizer_path):
- env = self.host.environ.copy()
- env['LLVM_SYMBOLIZER_PATH'] = llvm_symbolizer_path
- else:
- env = None
- sanitizer_filter_path = self._path_from_chromium_base(
- 'tools', 'valgrind', 'asan', 'asan_symbolize.py')
- sanitizer_strip_path_prefix = 'Release/../../'
- if self._filesystem.exists(sanitizer_filter_path):
- stderr = self._executive.run_command([
- 'flock', sys.executable, sanitizer_filter_path,
- sanitizer_strip_path_prefix
- ],
- input=stderr,
- decode_output=False,
- env=env)
- name_str = name or '<unknown process name>'
- pid_str = str(pid or '<unknown>')
- # We require stdout and stderr to be bytestrings, not character strings.
- if stdout:
- stdout_lines = stdout.decode('utf8', 'replace').splitlines()
- else:
- stdout_lines = [u'<empty>']
- if stderr:
- stderr_lines = stderr.decode('utf8', 'replace').splitlines()
- else:
- stderr_lines = [u'<empty>']
- return (stderr,
- ('crash log for %s (pid %s):\n%s\n%s\n' %
- (name_str, pid_str, '\n'.join(
- ('STDOUT: ' + l) for l in stdout_lines), '\n'.join(
- ('STDERR: ' + l)
- for l in stderr_lines))).encode('utf8', 'replace'),
- self._get_crash_site(stderr_lines))
- def _get_crash_site(self, stderr_lines):
- # [blah:blah:blah:FATAL:
- prefix_re = r'\[[\w:/.]*FATAL:'
- # crash_file.ext(line)
- site_re = r'(?P<site>[\w_]*\.[\w_]*\(\d*\))'
- # ] blah failed
- suffix_re = r'\]\s*(Check failed|Security DCHECK failed)'
- pattern = re.compile(prefix_re + site_re + suffix_re)
- for line in stderr_lines:
- match = pattern.search(line)
- if match:
- return match.group('site')
- return None
- def look_for_new_crash_logs(self, crashed_processes, start_time):
- pass
- def look_for_new_samples(self, unresponsive_processes, start_time):
- pass
- def sample_process(self, name, pid):
- pass
- def virtual_test_suites(self):
- if self._virtual_test_suites is None:
- path_to_virtual_test_suites = self._filesystem.join(
- self.web_tests_dir(), 'VirtualTestSuites')
- assert self._filesystem.exists(path_to_virtual_test_suites), \
- path_to_virtual_test_suites + ' not found'
- try:
- test_suite_json = json.loads(
- self._filesystem.read_text_file(
- path_to_virtual_test_suites))
- self._virtual_test_suites = []
- for json_config in test_suite_json:
- vts = VirtualTestSuite(**json_config)
- if any(vts.full_prefix == s.full_prefix
- for s in self._virtual_test_suites):
- raise ValueError(
- '{} contains entries with the same prefix: {!r}. Please combine them'
- .format(path_to_virtual_test_suites, json_config))
- self._virtual_test_suites.append(vts)
- except ValueError as error:
- raise ValueError('{} is not a valid JSON file: {}'.format(
- path_to_virtual_test_suites, error))
- return self._virtual_test_suites
- def _all_virtual_tests(self, tests_by_dir):
- tests = []
- for suite in self.virtual_test_suites():
- if suite.bases:
- tests.extend(map(lambda x: suite.full_prefix + x,
- self.real_tests_from_dict(suite.bases, tests_by_dir)))
- return tests
- def _get_bases_for_suite_with_paths(self, suite, paths):
- """Returns a set of bases of the virutual suite that are referenced by
- paths. E.g. given a virtual test suite `foo` with the following bases:
- bar/baz
- bar/quu
- qux
- and given paths of [virtual/foo/bar], this method would return
- [bar/baz, bar/quu]
- Given paths of [virtual/foo/bar/baz/test.html], the return would be
- [bar/baz]
- """
- real_paths = [p.replace(suite.full_prefix, '', 1) for p in paths \
- if p.startswith(suite.full_prefix)]
- # Test for paths that are under the suite's bases, so that we don't run
- # a non-existent test.
- bases = set()
- for real_path in real_paths:
- for base in suite.bases:
- if real_path.startswith(base) or base.startswith(real_path):
- bases.add(base)
- return list(bases)
- def _virtual_tests_for_suite_with_paths(self, suite, paths):
- if not suite.bases:
- return []
- bases = self._get_bases_for_suite_with_paths(suite, paths)
- if not bases:
- return []
- tests = []
- tests.extend(
- map(lambda x: suite.full_prefix + x, self.real_tests(bases)))
- wpt_bases = []
- for base in bases:
- if any(base.startswith(wpt_dir) for wpt_dir in self.WPT_DIRS):
- wpt_bases.append(base)
- if wpt_bases:
- tests.extend(
- self._wpt_test_urls_matching_paths(
- wpt_bases, [suite.full_prefix] * len(wpt_bases)))
- return tests
- def _virtual_tests_matching_paths(self, paths):
- tests = []
- normalized_paths = [self.normalize_test_name(p) for p in paths]
- for suite in self.virtual_test_suites():
- virtual_paths = [
- p for p in normalized_paths if p.startswith(suite.full_prefix)
- ]
- if not virtual_paths:
- continue
- for test in self._virtual_tests_for_suite_with_paths(
- suite, virtual_paths):
- if any(test.startswith(p) for p in normalized_paths):
- tests.append(test)
- if any(self._path_has_wildcard(path) for path in paths):
- _log.warning(
- 'WARNING: Wildcards in paths are not supported for virtual test suites.'
- )
- return tests
- def _path_has_wildcard(self, path):
- return '*' in path
- def _wpt_test_urls_matching_paths(self, filter_paths, virtual_prefixes=[]):
- """Returns a set of paths that are tests to be run from the
- web-platform-test manifest files.
- filter_paths: A list of strings that are prefix matched against the
- list of tests in the WPT manifests. Only tests that match are returned.
- virtual_prefixes: A list of prefixes corresponding to paths in |filter_paths|.
- If present, each test path output should have its virtual prefix
- prepended to the resulting path to the test.
- """
- # Generate the manifest files if needed and then read them. Do this once
- # for this whole method as the file is large and generation/loading is
- # slow.
- wpts = [(wpt_path, self.wpt_manifest(wpt_path))
- for wpt_path in self.WPT_DIRS]
- tests = []
- # This walks through the set of paths where we should look for tests.
- # For each path, a map can be provided that we replace 'path' with in
- # the result.
- for filter_path, virtual_prefix in zip_longest(filter_paths,
- virtual_prefixes):
- # This is to make sure "external[\\/]?" can also match to
- # external/wpt.
- # TODO(robertma): Remove this special case when external/wpt is
- # moved to wpt.
- if filter_path.rstrip('\\/').endswith('external'):
- filter_path = self._filesystem.join(filter_path, 'wpt')
- # '/' is used throughout this function instead of filesystem.sep as
- # the WPT manifest always uses '/' for paths (it is not OS
- # dependent).
- if self._filesystem.sep != '/':
- filter_path = filter_path.replace(self._filesystem.sep, '/')
- # Drop empty path components.
- filter_path = filter_path.replace('//', '/')
- # We now have in |filter_path| a path to an actual test directory or file
- # on disk, in unix format, relative to the root of the web_tests
- # directory.
- for wpt_path, wpt_manifest in wpts:
- # If the |filter_path| is not inside a WPT dir, then we will
- # match no tests in the manifest.
- if not filter_path.startswith(wpt_path):
- continue
- # Drop the WPT prefix (including the joining '/') from |path|.
- filter_path_from_wpt = filter_path[len(wpt_path) + 1:]
- # An empty filter matches everything.
- if filter_path_from_wpt:
- # If the filter is to a specific test file that ends with .js,
- # we match that against tests with any extension by dropping
- # the extension from the filter.
- #
- # Else, when matching a directory, ensure the filter ends in '/'
- # to only match the exact directory name and not directories
- # with the filter as a prefix.
- if wpt_manifest.is_test_file(filter_path_from_wpt):
- filter_path_from_wpt = re.sub(r'\.js$', '.',
- filter_path_from_wpt)
- elif not wpt_manifest.is_test_url(filter_path_from_wpt):
- filter_path_from_wpt = filter_path_from_wpt.rstrip(
- '/') + '/'
- # We now have a path to an actual test directory or file on
- # disk, in unix format, relative to the WPT directory.
- #
- # Look for all tests in the manifest that are under the relative
- # |filter_path_from_wpt|.
- for test_path_from_wpt in wpt_manifest.all_urls():
- assert not test_path_from_wpt.startswith('/')
- assert not test_path_from_wpt.endswith('/')
- # Drop empty path components.
- test_path_from_wpt = test_path_from_wpt.replace('//', '/')
- if test_path_from_wpt.startswith(filter_path_from_wpt):
- # The result is a test path from the root web test
- # directory. If a |virtual_prefix| was given, we prepend
- # that to the result.
- prefix = virtual_prefix if virtual_prefix else ''
- tests.append(prefix + wpt_path + '/' +
- test_path_from_wpt)
- return tests
- def _lookup_virtual_suite(self, test_name):
- if not test_name.startswith('virtual/'):
- return None
- for suite in self.virtual_test_suites():
- if test_name.startswith(suite.full_prefix):
- return suite
- return None
- def lookup_virtual_test_base(self, test_name):
- suite = self._lookup_virtual_suite(test_name)
- if not suite:
- return None
- assert test_name.startswith(suite.full_prefix)
- maybe_base = self.normalize_test_name(
- test_name[len(suite.full_prefix):])
- for base in suite.bases:
- normalized_base = self.normalize_test_name(base)
- if normalized_base.startswith(maybe_base) or maybe_base.startswith(
- normalized_base):
- return maybe_base
- return None
- def _lookup_virtual_test_args(self, test_name):
- normalized_test_name = self.normalize_test_name(test_name)
- for suite in self.virtual_test_suites():
- if normalized_test_name.startswith(suite.full_prefix):
- return suite.args
- return []
- def _build_path(self, *comps):
- """Returns a path from the build directory."""
- return self._build_path_with_target(self._options.target, *comps)
- def _build_path_with_target(self, target, *comps):
- target = target or self.get_option('target')
- return self._filesystem.join(
- self._path_from_chromium_base(),
- self.get_option('build_directory') or 'out', target, *comps)
- def _check_driver_build_up_to_date(self, target):
- # FIXME: We should probably get rid of this check altogether as it has
- # outlived its usefulness in a GN-based world, but for the moment we
- # will just check things if they are using the standard Debug or Release
- # target directories.
- if target not in ('Debug', 'Release'):
- return True
- try:
- debug_path = self._path_to_driver('Debug')
- release_path = self._path_to_driver('Release')
- debug_mtime = self._filesystem.mtime(debug_path)
- release_mtime = self._filesystem.mtime(release_path)
- if (debug_mtime > release_mtime and target == 'Release'
- or release_mtime > debug_mtime and target == 'Debug'):
- most_recent_binary = 'Release' if target == 'Debug' else 'Debug'
- _log.warning(
- 'You are running the %s binary. However the %s binary appears to be more recent. '
- 'Please pass --%s.', target, most_recent_binary,
- most_recent_binary.lower())
- _log.warning('')
- # This will fail if we don't have both a debug and release binary.
- # That's fine because, in this case, we must already be running the
- # most up-to-date one.
- except OSError:
- pass
- return True
- def _get_font_files(self):
- """Returns list of font files that should be used by the test."""
- # TODO(sergeyu): Currently FONT_FILES is valid only on Linux. Make it
- # usable on other platforms if necessary.
- result = []
- for (font_dirs, font_file, package) in FONT_FILES:
- exists = False
- for font_dir in font_dirs:
- font_path = self._filesystem.join(font_dir, font_file)
- if not self._filesystem.isabs(font_path):
- font_path = self._build_path(font_path)
- if self._check_file_exists(font_path, '', more_logging=False):
- result.append(font_path)
- exists = True
- break
- if not exists:
- message = 'You are missing %s under %s.' % (font_file,
- font_dirs)
- if package:
- message += ' Try installing %s. See build instructions.' % package
- _log.error(message)
- raise TestRunException(exit_codes.SYS_DEPS_EXIT_STATUS,
- message)
- return result
- @staticmethod
- def split_webdriver_test_name(test_name):
- """Splits a WebDriver test name into a filename and a subtest name and
- returns both of them. E.g.
- test.py>>foo.html -> (test.py, foo.html)
- test.py -> (test.py, None)
- """
- separator_index = test_name.find(Port.WEBDRIVER_SUBTEST_SEPARATOR)
- if separator_index == -1:
- return (test_name, None)
- webdriver_test_name = test_name[:separator_index]
- separator_len = len(Port.WEBDRIVER_SUBTEST_SEPARATOR)
- subtest_suffix = test_name[separator_index + separator_len:]
- return (webdriver_test_name, subtest_suffix)
- @staticmethod
- def add_webdriver_subtest_suffix(test_name, subtest_name):
- """Appends a subtest name to a WebDriver test name. E.g.
- (test.py, foo.html) -> test.py>>foo.html
- (test.py, None) -> test.py
- """
- if subtest_name:
- return test_name + Port.WEBDRIVER_SUBTEST_SEPARATOR + subtest_name
- return test_name
- @staticmethod
- def split_webdriver_subtest_pytest_name(test_name):
- """Splits a WebDriver test name in pytest format into a filename and a subtest name and
- returns both of them. E.g.
- test.py::foo.html -> (test.py, foo.html)
- test.py -> (test.py, None)
- """
- names_after_split = test_name.split(
- Port.WEBDRIVER_SUBTEST_PYTEST_SEPARATOR)
- assert len(names_after_split) <= 2, \
- "%s has a length greater than 2 after split by ::" % (test_name)
- if len(names_after_split) == 1:
- return (names_after_split[0], None)
- return (names_after_split[0], names_after_split[1])
- @staticmethod
- def add_webdriver_subtest_pytest_suffix(test_name, subtest_name):
- if subtest_name is None:
- return test_name
- return test_name + Port.WEBDRIVER_SUBTEST_PYTEST_SEPARATOR + subtest_name
- class VirtualTestSuite(object):
- def __init__(self, prefix=None, platforms=None, bases=None, args=None):
- assert VALID_FILE_NAME_REGEX.match(prefix), \
- "Virtual test suite prefix '{}' contains invalid characters".format(prefix)
- assert isinstance(platforms, list)
- assert isinstance(bases, list)
- assert args
- assert isinstance(args, list)
- self.full_prefix = 'virtual/' + prefix + '/'
- self.platforms = [x.lower() for x in platforms]
- self.bases = bases
- self.args = args
- def __repr__(self):
- return "VirtualTestSuite('%s', %s, %s, %s)" % (self.full_prefix,
- self.platforms,
- self.bases, self.args)
|