Update web-platform-tests to revision 097043b336e46876e281ddec3bb014fe9c480128

This commit is contained in:
WPT Sync Bot 2019-08-03 10:25:42 +00:00
parent ecd32570c0
commit b68253eac0
405 changed files with 9164 additions and 3050 deletions

View file

@ -1,2 +1 @@
mozprocess==1.0.0
selenium==3.141.0

View file

@ -2,18 +2,18 @@ import subprocess
from .base import Browser, ExecutorBrowser, require_arg
from .base import get_timeout_multiplier # noqa: F401
from .chrome import executor_kwargs as chrome_executor_kwargs
from ..webdriver_server import ChromeDriverServer
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorselenium import (SeleniumTestharnessExecutor, # noqa: F401
SeleniumRefTestExecutor) # noqa: F401
from ..executors.executorwebdriver import (WebDriverTestharnessExecutor, # noqa: F401
WebDriverRefTestExecutor) # noqa: F401
from ..executors.executorchrome import ChromeDriverWdspecExecutor # noqa: F401
__wptrunner__ = {"product": "chrome_android",
"check_args": "check_args",
"browser": "ChromeAndroidBrowser",
"executor": {"testharness": "SeleniumTestharnessExecutor",
"reftest": "SeleniumRefTestExecutor",
"executor": {"testharness": "WebDriverTestharnessExecutor",
"reftest": "WebDriverRefTestExecutor",
"wdspec": "ChromeDriverWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
@ -25,42 +25,35 @@ _wptserve_ports = set()
def check_args(**kwargs):
require_arg(kwargs, "package_name")
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(test_type, run_info_data, config, **kwargs):
return {"binary": kwargs["binary"],
return {"package_name": kwargs["package_name"],
"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
from selenium.webdriver import DesiredCapabilities
# Use extend() to modify the global list in place.
# Use update() to modify the global list in place.
_wptserve_ports.update(set(
server_config['ports']['http'] + server_config['ports']['https'] +
server_config['ports']['ws'] + server_config['ports']['wss']
))
executor_kwargs = base_executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs)
executor_kwargs["close_after_done"] = True
capabilities = dict(DesiredCapabilities.CHROME.items())
capabilities["goog:chromeOptions"] = {}
# TODO(chrome): browser_channel should be properly supported.
package_name = "com.android.chrome" # stable channel
# Required to start on mobile
capabilities["goog:chromeOptions"]["androidPackage"] = package_name
executor_kwargs = chrome_executor_kwargs(test_type, server_config,
cache_manager, run_info_data,
**kwargs)
# Remove unsupported options on mobile.
del executor_kwargs["capabilities"]["goog:chromeOptions"]["prefs"]
del executor_kwargs["capabilities"]["goog:chromeOptions"]["useAutomationExtension"]
assert kwargs["package_name"], "missing --package-name"
executor_kwargs["capabilities"]["goog:chromeOptions"]["androidPackage"] = \
kwargs["package_name"]
for (kwarg, capability) in [("binary", "binary"), ("binary_args", "args")]:
if kwargs[kwarg] is not None:
capabilities["goog:chromeOptions"][capability] = kwargs[kwarg]
if test_type == "testharness":
capabilities["useAutomationExtension"] = False
capabilities["excludeSwitches"] = ["enable-automation"]
executor_kwargs["capabilities"] = capabilities
return executor_kwargs
@ -77,12 +70,10 @@ class ChromeAndroidBrowser(Browser):
``wptrunner.webdriver.ChromeDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary="chromedriver",
def __init__(self, logger, package_name, webdriver_binary="chromedriver",
webdriver_args=None):
"""Creates a new representation of Chrome. The `binary` argument gives
the browser binary to use for testing."""
Browser.__init__(self, logger)
self.binary = binary
self.package_name = package_name
self.server = ChromeDriverServer(self.logger,
binary=webdriver_binary,
args=webdriver_args)

View file

@ -179,8 +179,8 @@ def run_info_browser_version(binary):
def update_properties():
return (["debug", "webrender", "e10s", "os", "version", "processor", "bits"],
{"debug", "e10s", "webrender"})
return (["os", "debug", "webrender", "e10s", "sw-e10s", "processor"],
{"os": ["version"], "processor": ["bits"]})
class FirefoxBrowser(Browser):

View file

@ -1,7 +1,6 @@
import os
import moznetwork
from mozprocess import ProcessHandler
from mozprofile import FirefoxProfile
from mozrunner import FennecEmulatorRunner
@ -52,7 +51,8 @@ def browser_kwargs(test_type, run_info_data, config, **kwargs):
"timeout_multiplier": get_timeout_multiplier(test_type,
run_info_data,
**kwargs),
"leak_check": kwargs["leak_check"],
# desktop only
"leak_check": False,
"stylo_threads": kwargs["stylo_threads"],
"chaos_mode_flags": kwargs["chaos_mode_flags"],
"config": config,
@ -145,13 +145,7 @@ class FirefoxAndroidBrowser(FirefoxBrowser):
with open(os.path.join(font_dir, "Ahem.ttf"), "wb") as dest:
dest.write(src.read())
if self.leak_check and kwargs.get("check_leaks", True):
self.leak_report_file = os.path.join(self.profile.profile, "runtests_leaks.log")
if os.path.exists(self.leak_report_file):
os.remove(self.leak_report_file)
env["XPCOM_MEM_BLOAT_LOG"] = self.leak_report_file
else:
self.leak_report_file = None
self.leak_report_file = None
if self.ca_certificate_path is not None:
self.setup_ssl()
@ -168,9 +162,7 @@ class FirefoxAndroidBrowser(FirefoxBrowser):
symbols_path=self.symbols_path,
serial=self.device_serial,
# TODO - choose appropriate log dir
logdir=os.getcwd(),
process_class=ProcessHandler,
process_args={"processOutputLine": [self.on_output]})
logdir=os.getcwd())
self.logger.debug("Starting %s" % self.package_name)
# connect to a running emulator
@ -201,11 +193,11 @@ class FirefoxAndroidBrowser(FirefoxBrowser):
except Exception as e:
self.logger.warning("Failed to remove forwarded or reversed ports: %s" % e)
# We assume that stopping the runner prompts the
# browser to shut down. This allows the leak log to be written
# browser to shut down.
self.runner.stop()
self.logger.debug("stopped")
def check_crash(self, process, test):
if not os.environ.get("MINIDUMP_STACKWALK", "") and self.stackwalk_binary:
os.environ["MINIDUMP_STACKWALK"] = self.stackwalk_binary
return self.runner.check_for_crashes()
return bool(self.runner.check_for_crashes(test_name=test))

View file

@ -61,7 +61,7 @@ def env_options():
def update_properties():
return ["debug", "os", "version", "processor", "bits"], None
return ["debug", "os", "processor"], {"os": ["version"], "processor": ["bits"]}
class ServoBrowser(NullBrowser):

View file

@ -63,7 +63,7 @@ def env_options():
def update_properties():
return ["debug", "os", "version", "processor", "bits"], None
return ["debug", "os", "processor"], {"os": ["version"], "processor": ["bits"]}
def write_hosts_file(config):

View file

@ -86,6 +86,7 @@ class WebDriverTestharnessProtocolPart(TestharnessProtocolPart):
self.parent.base.execute_script(self.runner_script % format_map)
def close_old_windows(self):
self.webdriver.actions.release()
handles = [item for item in self.webdriver.handles if item != self.runner_handle]
for handle in handles:
try:

View file

@ -0,0 +1,131 @@
from math import log
from collections import defaultdict
class Node(object):
def __init__(self, prop, value):
self.prop = prop
self.value = value
self.parent = None
self.children = set()
# Populated for leaf nodes
self.run_info = set()
self.result_values = defaultdict(int)
def add(self, node):
self.children.add(node)
node.parent = self
def __iter__(self):
yield self
for node in self.children:
for item in node:
yield item
def __len__(self):
return 1 + sum(len(item) for item in self.children)
def entropy(results):
"""This is basically a measure of the uniformity of the values in results
based on the shannon entropy"""
result_counts = defaultdict(int)
total = float(len(results))
for values in results.itervalues():
# Not sure this is right, possibly want to treat multiple values as
# distinct from multiple of the same value?
for value in values:
result_counts[value] += 1
entropy_sum = 0
for count in result_counts.itervalues():
prop = float(count) / total
entropy_sum -= prop * log(prop, 2)
return entropy_sum
def split_results(prop, results):
"""Split a dictionary of results into a dictionary of dictionaries where
each sub-dictionary has a specific value of the given property"""
by_prop = defaultdict(dict)
for run_info, value in results.iteritems():
by_prop[run_info[prop]][run_info] = value
return by_prop
def build_tree(properties, dependent_props, results, tree=None):
"""Build a decision tree mapping properties to results
:param properties: - A list of run_info properties to consider
in the tree
:param dependent_props: - A dictionary mapping property name
to properties that should only be considered
after the properties in the key. For example
{"os": ["version"]} means that "version" won't
be used until after os.
:param results: Dictionary mapping run_info to set of results
:tree: A Node object to use as the root of the (sub)tree"""
if tree is None:
tree = Node(None, None)
prop_index = {prop: i for i, prop in enumerate(properties)}
all_results = defaultdict(int)
for result_values in results.itervalues():
for result_value, count in result_values.iteritems():
all_results[result_value] += count
# If there is only one result we are done
if not properties or len(all_results) == 1:
for value, count in all_results.iteritems():
tree.result_values[value] += count
tree.run_info |= set(results.keys())
return tree
results_partitions = []
remove_properties = set()
for prop in properties:
result_sets = split_results(prop, results)
if len(result_sets) == 1:
# If this property doesn't partition the space then just remove it
# from the set to consider
remove_properties.add(prop)
continue
new_entropy = 0.
results_sets_entropy = []
for prop_value, result_set in result_sets.iteritems():
results_sets_entropy.append((entropy(result_set), prop_value, result_set))
new_entropy += (float(len(result_set)) / len(results)) * results_sets_entropy[-1][0]
results_partitions.append((new_entropy,
prop,
results_sets_entropy))
# In the case that no properties partition the space
if not results_partitions:
for value, count in all_results.iteritems():
tree.result_values[value] += count
tree.run_info |= set(results.keys())
return tree
# split by the property with the highest entropy
results_partitions.sort(key=lambda x: (x[0], prop_index[x[1]]))
_, best_prop, sub_results = results_partitions[0]
# Create a new set of properties that can be used
new_props = properties[:prop_index[best_prop]] + properties[prop_index[best_prop] + 1:]
new_props.extend(dependent_props.get(best_prop, []))
if remove_properties:
new_props = [item for item in new_props if item not in remove_properties]
for _, prop_value, results_sets in sub_results:
node = Node(best_prop, prop_value)
tree.add(node)
build_tree(new_props, dependent_props, results_sets, node)
return tree

View file

@ -28,11 +28,15 @@ class ChromiumFormatter(base.BaseFormatter):
# the subtest messages for this test.
self.messages = defaultdict(str)
# List of tests that have failing subtests.
self.tests_with_subtest_fails = set()
def _append_test_message(self, test, subtest, status, message):
"""
Appends the message data for a test.
:param str test: the name of the test
:param str subtest: the name of the subtest with the message
:param str status: the subtest status
:param str message: the string to append to the message for this test
"""
if not message:
@ -107,15 +111,25 @@ class ChromiumFormatter(base.BaseFormatter):
else time.time())
def test_status(self, data):
test_name = data["test"]
if data["status"] != "PASS" and test_name not in self.tests_with_subtest_fails:
self.tests_with_subtest_fails.add(test_name)
if "message" in data:
self._append_test_message(data["test"], data["subtest"],
self._append_test_message(test_name, data["subtest"],
data["status"], data["message"])
def test_end(self, data):
actual_status = self._map_status_name(data["status"])
expected_status = (self._map_status_name(data["expected"])
if "expected" in data else "PASS")
test_name = data["test"]
actual_status = self._map_status_name(data["status"])
if actual_status == "PASS" and test_name in self.tests_with_subtest_fails:
# This test passed but it has failing subtests, so we flip the status
# to FAIL.
actual_status = "FAIL"
# Clean up the test list to avoid accumulating too many.
self.tests_with_subtest_fails.remove(test_name)
if "message" in data:
self._append_test_message(test_name, None, actual_status,
data["message"])

View file

@ -162,3 +162,52 @@ def test_subtest_messages(capfd):
t2_log = output_json["tests"]["t2"]["artifacts"]["log"]
assert t2_log == "[TIMEOUT] t2_message\n"
def test_subtest_failure(capfd):
# Tests that a test fails if a subtest fails
# Set up the handler.
output = StringIO()
logger = structuredlog.StructuredLogger("test_a")
formatter = ChromiumFormatter()
logger.add_handler(handlers.StreamHandler(output, formatter))
# Run a test with some subtest failures.
logger.suite_start(["t1"], run_info={}, time=123)
logger.test_start("t1")
logger.test_status("t1", status="FAIL", subtest="t1_a",
message="t1_a_message")
logger.test_status("t1", status="PASS", subtest="t1_b",
message="t1_b_message")
logger.test_status("t1", status="TIMEOUT", subtest="t1_c",
message="t1_c_message")
# Make sure the test name was added to the set of tests with subtest fails
assert "t1" in formatter.tests_with_subtest_fails
# The test status is reported as a pass here because the harness was able to
# run the test to completion.
logger.test_end("t1", status="PASS", expected="PASS")
logger.suite_end()
# check nothing got output to stdout/stderr
# (note that mozlog outputs exceptions during handling to stderr!)
captured = capfd.readouterr()
assert captured.out == ""
assert captured.err == ""
# check the actual output of the formatter
output.seek(0)
output_json = json.load(output)
test_obj = output_json["tests"]["t1"]
t1_log = test_obj["artifacts"]["log"]
assert t1_log == "[FAIL] t1_a: t1_a_message\n" \
"[PASS] t1_b: t1_b_message\n" \
"[TIMEOUT] t1_c: t1_c_message\n"
# The status of the test in the output is a failure because subtests failed,
# despite the harness reporting that the test passed.
assert test_obj["actual"] == "FAIL"
# Also ensure that the formatter cleaned up its internal state
assert "t1" not in formatter.tests_with_subtest_fails

View file

@ -1,9 +1,6 @@
from __future__ import print_function
import array
import os
import shutil
import tempfile
import uuid
from collections import defaultdict, namedtuple
from mozlog import structuredlog
@ -25,28 +22,56 @@ except ImportError:
import json
class RunInfo(object):
"""A wrapper around RunInfo dicts so that they can be hashed by identity"""
def __init__(self, dict_value):
self.data = dict_value
self.canonical_repr = tuple(tuple(item) for item in sorted(dict_value.items()))
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
raise TypeError
def __hash__(self):
return hash(self.canonical_repr)
def __eq__(self, other):
return self.canonical_repr == other.canonical_repr
def iteritems(self):
for key, value in self.data.iteritems():
yield key, value
def items(self):
return list(self.iteritems())
def update_expected(test_paths, serve_root, log_file_names,
rev_old=None, rev_new="HEAD", ignore_existing=False,
sync_root=None, property_order=None, boolean_properties=None,
stability=None):
update_properties, rev_old=None, rev_new="HEAD",
full_update=False, sync_root=None, disable_intermittent=None,
update_intermittent=False, remove_intermittent=False):
"""Update the metadata files for web-platform-tests based on
the results obtained in a previous run or runs
If stability is not None, assume log_file_names refers to logs from repeated
If disable_intermittent is not None, assume log_file_names refers to logs from repeated
test jobs, disable tests that don't behave as expected on all runs"""
do_delayed_imports(serve_root)
id_test_map = load_test_data(test_paths)
for metadata_path, updated_ini in update_from_logs(id_test_map,
*log_file_names,
ignore_existing=ignore_existing,
property_order=property_order,
boolean_properties=boolean_properties,
stability=stability):
update_properties,
disable_intermittent,
update_intermittent,
remove_intermittent,
full_update,
*log_file_names):
write_new_expected(metadata_path, updated_ini)
if stability:
if disable_intermittent:
for test in updated_ini.iterchildren():
for subtest in test.iterchildren():
if subtest.new_disabled:
@ -113,7 +138,7 @@ def unexpected_changes(manifests, change_data, files_changed):
# for each conditional:
# If all the new values match (or there aren't any) retain that conditional
# If any new values mismatch:
# If stability and any repeated values don't match, disable the test
# If disable_intermittent and any repeated values don't match, disable the test
# else mark the test as needing human attention
# Check if all the RHS values are the same; if so collapse the conditionals
@ -139,6 +164,9 @@ class InternedData(object):
# Reserve 0 as a sentinal
self._data = [None], {}
def clear(self):
self.__init__()
def store(self, obj):
if self.type_conv is not None:
obj = self.type_conv(obj)
@ -160,6 +188,10 @@ class InternedData(object):
obj = self.rev_type_conv(obj)
return obj
def __iter__(self):
for i in xrange(1, len(self._data[0])):
yield self.get(i)
class RunInfoInterned(InternedData):
def type_conv(self, value):
@ -170,7 +202,7 @@ class RunInfoInterned(InternedData):
prop_intern = InternedData(4)
run_info_intern = RunInfoInterned()
run_info_intern = InternedData(8)
status_intern = InternedData(4)
@ -185,25 +217,27 @@ def load_test_data(test_paths):
return id_test_map
def update_from_logs(id_test_map, *log_filenames, **kwargs):
ignore_existing = kwargs.get("ignore_existing", False)
property_order = kwargs.get("property_order")
boolean_properties = kwargs.get("boolean_properties")
stability = kwargs.get("stability")
def update_from_logs(id_test_map, update_properties, disable_intermittent, update_intermittent,
remove_intermittent, full_update, *log_filenames):
updater = ExpectedUpdater(id_test_map,
ignore_existing=ignore_existing)
updater = ExpectedUpdater(id_test_map)
for i, log_filename in enumerate(log_filenames):
print("Processing log %d/%d" % (i + 1, len(log_filenames)))
with open(log_filename) as f:
updater.update_from_log(f)
for item in update_results(id_test_map, property_order, boolean_properties, stability):
for item in update_results(id_test_map, update_properties, full_update,
disable_intermittent, update_intermittent, remove_intermittent):
yield item
def update_results(id_test_map, property_order, boolean_properties, stability):
def update_results(id_test_map,
update_properties,
full_update,
disable_intermittent,
update_intermittent=False,
remove_intermittent=False):
test_file_items = set(id_test_map.itervalues())
default_expected_by_type = {}
@ -214,8 +248,9 @@ def update_results(id_test_map, property_order, boolean_properties, stability):
default_expected_by_type[(test_type, True)] = test_cls.subtest_result_cls.default_expected
for test_file in test_file_items:
updated_expected = test_file.update(property_order, boolean_properties, stability,
default_expected_by_type)
updated_expected = test_file.update(default_expected_by_type, update_properties,
full_update, disable_intermittent,
update_intermittent, remove_intermittent)
if updated_expected is not None and updated_expected.modified:
yield test_file.metadata_path, updated_expected
@ -229,36 +264,12 @@ def directory_manifests(metadata_path):
return rv
def write_changes(metadata_path, expected):
# First write the new manifest files to a temporary directory
temp_path = tempfile.mkdtemp(dir=os.path.split(metadata_path)[0])
write_new_expected(temp_path, expected)
# Copy all files in the root to the temporary location since
# these cannot be ini files
keep_files = [item for item in os.listdir(metadata_path) if
not os.path.isdir(os.path.join(metadata_path, item))]
for item in keep_files:
dest_dir = os.path.dirname(os.path.join(temp_path, item))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
shutil.copyfile(os.path.join(metadata_path, item),
os.path.join(temp_path, item))
# Then move the old manifest files to a new location
temp_path_2 = metadata_path + str(uuid.uuid4())
os.rename(metadata_path, temp_path_2)
# Move the new files to the destination location and remove the old files
os.rename(temp_path, metadata_path)
shutil.rmtree(temp_path_2)
def write_new_expected(metadata_path, expected):
# Serialize the data back to a file
path = expected_path(metadata_path, expected.test_path)
if not expected.is_empty:
manifest_str = wptmanifest.serialize(expected.node, skip_empty_data=True)
manifest_str = wptmanifest.serialize(expected.node,
skip_empty_data=True)
assert manifest_str != ""
dir = os.path.split(path)[0]
if not os.path.exists(dir):
@ -281,9 +292,8 @@ def write_new_expected(metadata_path, expected):
class ExpectedUpdater(object):
def __init__(self, id_test_map, ignore_existing=False):
def __init__(self, id_test_map):
self.id_test_map = id_test_map
self.ignore_existing = ignore_existing
self.run_info = None
self.action_map = {"suite_start": self.suite_start,
"test_start": self.test_start,
@ -331,11 +341,11 @@ class ExpectedUpdater(object):
"subtest": subtest["name"],
"status": subtest["status"],
"expected": subtest.get("expected"),
"known_intermittent": subtest.get("known_intermittent")})
"known_intermittent": subtest.get("known_intermittent", [])})
action_map["test_end"]({"test": test["test"],
"status": test["status"],
"expected": test.get("expected"),
"known_intermittent": test.get("known_intermittent")})
"known_intermittent": test.get("known_intermittent", [])})
if "asserts" in test:
asserts = test["asserts"]
action_map["assertion_count"]({"test": test["test"],
@ -355,19 +365,16 @@ class ExpectedUpdater(object):
action_map[action](item_data)
def suite_start(self, data):
self.run_info = run_info_intern.store(data["run_info"])
self.run_info = run_info_intern.store(RunInfo(data["run_info"]))
def test_start(self, data):
test_id = intern(data["test"].encode("utf8"))
try:
test_data = self.id_test_map[test_id]
self.id_test_map[test_id]
except KeyError:
print("Test not found %s, skipping" % test_id)
return
if self.ignore_existing:
test_data.set_requires_update()
test_data.clear.add("expected")
self.tests_visited[test_id] = set()
def test_status(self, data):
@ -530,7 +537,7 @@ class PackedResultList(object):
class TestFileData(object):
__slots__ = ("url_base", "item_type", "test_path", "metadata_path", "tests",
"_requires_update", "clear", "data")
"_requires_update", "data")
def __init__(self, url_base, item_type, metadata_path, test_path, tests):
self.url_base = url_base
@ -539,37 +546,75 @@ class TestFileData(object):
self.metadata_path = metadata_path
self.tests = {intern(item.id.encode("utf8")) for item in tests}
self._requires_update = False
self.clear = set()
self.data = defaultdict(lambda: defaultdict(PackedResultList))
def set_requires_update(self):
self._requires_update = True
@property
def requires_update(self):
return self._requires_update
def set(self, test_id, subtest_id, prop, run_info, value):
self.data[test_id][subtest_id].append(prop_intern.store(prop),
run_info,
value)
def expected(self, property_order, boolean_properties):
def expected(self, update_properties):
expected_data = load_expected(self.url_base,
self.metadata_path,
self.test_path,
self.tests,
property_order,
boolean_properties)
update_properties)
if expected_data is None:
expected_data = create_expected(self.url_base,
self.test_path,
property_order,
boolean_properties)
update_properties)
return expected_data
def update(self, property_order, boolean_properties, stability,
default_expected_by_type):
if not self._requires_update:
def is_disabled(self, test):
# This conservatively assumes that anything that was disabled remains disabled
# we could probably do better by checking if it's in the full set of run infos
return test.has_key("disabled")
def orphan_subtests(self, expected):
# Return subtest nodes present in the expected file, but missing from the data
rv = []
for test_id, subtests in self.data.iteritems():
test = expected.get_test(test_id.decode("utf8"))
if not test:
continue
seen_subtests = set(item.decode("utf8") for item in subtests.iterkeys() if item is not None)
missing_subtests = set(test.subtests.keys()) - seen_subtests
for item in missing_subtests:
expected_subtest = test.get_subtest(item)
if not self.is_disabled(expected_subtest):
rv.append(expected_subtest)
return rv
def update(self, default_expected_by_type, update_properties,
full_update=False, disable_intermittent=None, update_intermittent=False,
remove_intermittent=False):
# If we are doing a full update, we may need to prune missing nodes
# even if the expectations didn't change
if not self.requires_update and not full_update:
return
expected = self.expected(property_order, boolean_properties)
expected = self.expected(update_properties)
if full_update:
orphans = self.orphan_subtests(expected)
if not self.requires_update and not orphans:
return
if orphans:
expected.modified = True
for item in orphans:
item.remove()
expected_by_test = {}
for test_id in self.tests:
@ -577,10 +622,9 @@ class TestFileData(object):
expected.append(manifestupdate.TestNode.create(test_id))
test_expected = expected.get_test(test_id)
expected_by_test[test_id] = test_expected
for prop in self.clear:
test_expected.clear(prop)
for test_id, test_data in self.data.iteritems():
test_id = test_id.decode("utf8")
for subtest_id, results_list in test_data.iteritems():
for prop, run_info, value in results_list:
# Special case directory metadata
@ -601,17 +645,28 @@ class TestFileData(object):
if subtest_id is None:
item_expected = test_expected
else:
if isinstance(subtest_id, str):
subtest_id = subtest_id.decode("utf8")
item_expected = test_expected.get_subtest(subtest_id)
if prop == "status":
item_expected.set_result(run_info, value)
elif prop == "asserts":
item_expected.set_asserts(run_info, value)
expected.coalesce_properties(stability=stability)
expected.update(full_update=full_update,
disable_intermittent=disable_intermittent,
update_intermittent=update_intermittent,
remove_intermittent=remove_intermittent)
for test in expected.iterchildren():
for subtest in test.iterchildren():
subtest.coalesce_properties(stability=stability)
test.coalesce_properties(stability=stability)
subtest.update(full_update=full_update,
disable_intermittent=disable_intermittent,
update_intermittent=update_intermittent,
remove_intermittent=remove_intermittent)
test.update(full_update=full_update,
disable_intermittent=disable_intermittent,
update_intermittent=update_intermittent,
remove_intermittent=remove_intermittent)
return expected
@ -619,29 +674,17 @@ class TestFileData(object):
Result = namedtuple("Result", ["status", "default_expected"])
def create_expected(url_base, test_path, property_order=None,
boolean_properties=None):
def create_expected(url_base, test_path, run_info_properties):
expected = manifestupdate.ExpectedManifest(None,
test_path,
url_base,
property_order=property_order,
boolean_properties=boolean_properties)
run_info_properties)
return expected
def load_expected(url_base, metadata_path, test_path, tests, property_order=None,
boolean_properties=None):
def load_expected(url_base, metadata_path, test_path, tests, run_info_properties):
expected_manifest = manifestupdate.get_manifest(metadata_path,
test_path,
url_base,
property_order=property_order,
boolean_properties=boolean_properties)
if expected_manifest is None:
return
# Remove expected data for tests that no longer exist
for test in expected_manifest.iterchildren():
if test.id not in tests:
test.remove()
run_info_properties)
return expected_manifest

View file

@ -73,6 +73,6 @@ def load_product_update(config, product):
data = module.__wptrunner__
update_properties = (getattr(module, data["update_properties"])()
if "update_properties" in data else (None, None))
if "update_properties" in data else {})
return update_properties

View file

@ -576,7 +576,8 @@ class TestRunnerManager(threading.Thread):
if test.disabled(result.name):
continue
expected = test.expected(result.name)
is_unexpected = expected != result.status
known_intermittent = test.known_intermittent(result.name)
is_unexpected = expected != result.status and result.status not in known_intermittent
if is_unexpected:
self.unexpected_count += 1
@ -587,6 +588,7 @@ class TestRunnerManager(threading.Thread):
result.status,
message=result.message,
expected=expected,
known_intermittent=known_intermittent,
stack=result.stack)
# We have a couple of status codes that are used internally, but not exposed to the
@ -598,13 +600,14 @@ class TestRunnerManager(threading.Thread):
status_subns = {"INTERNAL-ERROR": "ERROR",
"EXTERNAL-TIMEOUT": "TIMEOUT"}
expected = test.expected()
known_intermittent = test.known_intermittent()
status = status_subns.get(file_result.status, file_result.status)
if self.browser.check_crash(test.id):
status = "CRASH"
self.test_count += 1
is_unexpected = expected != status
is_unexpected = expected != status and status not in known_intermittent
if is_unexpected:
self.unexpected_count += 1
self.logger.debug("Unexpected count in this thread %i" % self.unexpected_count)
@ -623,6 +626,7 @@ class TestRunnerManager(threading.Thread):
status,
message=file_result.message,
expected=expected,
known_intermittent=known_intermittent,
extra=file_result.extra,
stack=file_result.stack)

View file

@ -0,0 +1,132 @@
import sys
import pytest
from .. import expectedtree, metadata
from collections import defaultdict
def dump_tree(tree):
rv = []
def dump_node(node, indent=0):
prefix = " " * indent
if not node.prop:
data = "root"
else:
data = "%s:%s" % (node.prop, node.value)
if node.result_values:
data += " result_values:%s" % (",".join(sorted(node.result_values)))
rv.append("%s<%s>" % (prefix, data))
for child in sorted(node.children, key=lambda x:x.value):
dump_node(child, indent + 2)
dump_node(tree)
return "\n".join(rv)
def results_object(results):
results_obj = defaultdict(lambda: defaultdict(int))
for run_info, status in results:
run_info = metadata.RunInfo(run_info)
results_obj[run_info][status] += 1
return results_obj
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_build_tree_0():
# Pass if debug
results = [({"os": "linux", "version": "18.04", "debug": True}, "FAIL"),
({"os": "linux", "version": "18.04", "debug": False}, "PASS"),
({"os": "linux", "version": "16.04", "debug": False}, "PASS"),
({"os": "mac", "version": "10.12", "debug": True}, "FAIL"),
({"os": "mac", "version": "10.12", "debug": False}, "PASS"),
({"os": "win", "version": "7", "debug": False}, "PASS"),
({"os": "win", "version": "10", "debug": False}, "PASS")]
results_obj = results_object(results)
tree = expectedtree.build_tree(["os", "version", "debug"], {}, results_obj)
expected = """<root>
<debug:False result_values:PASS>
<debug:True result_values:FAIL>"""
assert dump_tree(tree) == expected
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_build_tree_1():
# Pass if linux or windows 10
results = [({"os": "linux", "version": "18.04", "debug": True}, "PASS"),
({"os": "linux", "version": "18.04", "debug": False}, "PASS"),
({"os": "linux", "version": "16.04", "debug": False}, "PASS"),
({"os": "mac", "version": "10.12", "debug": True}, "FAIL"),
({"os": "mac", "version": "10.12", "debug": False}, "FAIL"),
({"os": "win", "version": "7", "debug": False}, "FAIL"),
({"os": "win", "version": "10", "debug": False}, "PASS")]
results_obj = results_object(results)
tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results_obj)
expected = """<root>
<os:linux result_values:PASS>
<os:mac result_values:FAIL>
<os:win>
<version:10 result_values:PASS>
<version:7 result_values:FAIL>"""
assert dump_tree(tree) == expected
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_build_tree_2():
# Fails in a specific configuration
results = [({"os": "linux", "version": "18.04", "debug": True}, "PASS"),
({"os": "linux", "version": "18.04", "debug": False}, "FAIL"),
({"os": "linux", "version": "16.04", "debug": False}, "PASS"),
({"os": "linux", "version": "16.04", "debug": True}, "PASS"),
({"os": "mac", "version": "10.12", "debug": True}, "PASS"),
({"os": "mac", "version": "10.12", "debug": False}, "PASS"),
({"os": "win", "version": "7", "debug": False}, "PASS"),
({"os": "win", "version": "10", "debug": False}, "PASS")]
results_obj = results_object(results)
tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results_obj)
expected = """<root>
<os:linux>
<debug:False>
<version:16.04 result_values:PASS>
<version:18.04 result_values:FAIL>
<debug:True result_values:PASS>
<os:mac result_values:PASS>
<os:win result_values:PASS>"""
assert dump_tree(tree) == expected
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_build_tree_3():
results = [({"os": "linux", "version": "18.04", "debug": True, "unused": False}, "PASS"),
({"os": "linux", "version": "18.04", "debug": True, "unused": True}, "FAIL")]
results_obj = results_object(results)
tree = expectedtree.build_tree(["os", "debug"], {"os": ["version"]}, results_obj)
expected = """<root result_values:FAIL,PASS>"""
assert dump_tree(tree) == expected
@pytest.mark.xfail(sys.version[0] == "3",
reason="metadata doesn't support py3")
def test_build_tree_4():
# Check counts for multiple statuses
results = [({"os": "linux", "version": "18.04", "debug": False}, "FAIL"),
({"os": "linux", "version": "18.04", "debug": False}, "PASS"),
({"os": "linux", "version": "18.04", "debug": False}, "PASS")]
results_obj = results_object(results)
tree = expectedtree.build_tree(["os", "version", "debug"], {}, results_obj)
assert tree.result_values["PASS"] == 2
assert tree.result_values["FAIL"] == 1

View file

@ -6,13 +6,10 @@ from .base import Step, StepRunner
class GetUpdatePropertyList(Step):
provides = ["property_order", "boolean_properties"]
provides = ["update_properties"]
def create(self, state):
property_order, boolean_properties = products.load_product_update(
state.config, state.product)
state.property_order = (property_order or []) + state.extra_properties
state.boolean_properties = boolean_properties
state.update_properties = products.load_product_update(state.config, state.product)
class UpdateExpected(Step):
@ -27,12 +24,13 @@ class UpdateExpected(Step):
metadata.update_expected(state.paths,
state.serve_root,
state.run_log,
update_properties=state.update_properties,
rev_old=None,
ignore_existing=state.ignore_existing,
full_update=state.full_update,
sync_root=sync_root,
property_order=state.property_order,
boolean_properties=state.boolean_properties,
stability=state.stability)
disable_intermittent=state.disable_intermittent,
update_intermittent=state.update_intermittent,
remove_intermittent=state.remove_intermittent)
class CreateMetadataPatch(Step):

View file

@ -85,12 +85,14 @@ class UpdateMetadata(Step):
kwargs = state.kwargs
with state.push(["local_tree", "sync_tree", "paths", "serve_root"]):
state.run_log = kwargs["run_log"]
state.ignore_existing = kwargs["ignore_existing"]
state.stability = kwargs["stability"]
state.disable_intermittent = kwargs["disable_intermittent"]
state.update_intermittent = kwargs["update_intermittent"]
state.remove_intermittent = kwargs["remove_intermittent"]
state.patch = kwargs["patch"]
state.suite_name = kwargs["suite_name"]
state.product = kwargs["product"]
state.config = kwargs["config"]
state.full_update = kwargs["full"]
state.extra_properties = kwargs["extra_property"]
runner = MetadataUpdateRunner(self.logger, state)
runner.run()

View file

@ -269,7 +269,7 @@ scheme host and port.""")
help="Defines an extra user preference (overrides those in prefs_root)")
gecko_group.add_argument("--leak-check", dest="leak_check", action="store_true", default=None,
help="Enable leak checking (enabled by default for debug builds, "
"silently ignored for opt)")
"silently ignored for opt, mobile)")
gecko_group.add_argument("--no-leak-check", dest="leak_check", action="store_false", default=None,
help="Disable leak checking")
gecko_group.add_argument("--stylo-threads", action="store", type=int, default=1,
@ -609,11 +609,15 @@ def create_parser_update(product_choices=None):
help="Don't create a VCS commit containing the changes.")
parser.add_argument("--sync", dest="sync", action="store_true", default=False,
help="Sync the tests with the latest from upstream (implies --patch)")
parser.add_argument("--ignore-existing", action="store_true",
help="When updating test results only consider results from the logfiles provided, not existing expectations.")
parser.add_argument("--stability", nargs="?", action="store", const="unstable", default=None,
parser.add_argument("--full", action="store_true", default=False,
help=("For all tests that are updated, remove any existing conditions and missing subtests"))
parser.add_argument("--disable-intermittent", nargs="?", action="store", const="unstable", default=None,
help=("Reason for disabling tests. When updating test results, disable tests that have "
"inconsistent results across many runs with the given reason."))
parser.add_argument("--update-intermittent", action="store_true", default=False,
help=("Update test metadata with expected intermittent statuses."))
parser.add_argument("--remove-intermittent", action="store_true", default=False,
help=("Remove obsolete intermittent statuses from expected statuses."))
parser.add_argument("--no-remove-obsolete", action="store_false", dest="remove_obsolete", default=True,
help=("Don't remove metadata files that no longer correspond to a test file"))
parser.add_argument("--no-store-state", action="store_false", dest="store_state",

View file

@ -1,16 +1,18 @@
import operator
from ..node import NodeVisitor, DataNode, ConditionalNode, KeyValueNode, ListNode, ValueNode
from ..node import NodeVisitor, DataNode, ConditionalNode, KeyValueNode, ListNode, ValueNode, BinaryExpressionNode, VariableNode
from ..parser import parse
class ConditionalValue(object):
def __init__(self, node, condition_func):
self.node = node
assert callable(condition_func)
self.condition_func = condition_func
if isinstance(node, ConditionalNode):
assert len(node.children) == 2
self.condition_node = self.node.children[0]
assert isinstance(node.children[1], (ValueNode, ListNode))
self.value_node = self.node.children[1]
else:
assert isinstance(node, (ValueNode, ListNode))
@ -59,6 +61,20 @@ class ConditionalValue(object):
self.node.parent.remove()
self.node.remove()
@property
def variables(self):
rv = set()
if self.condition_node is None:
return rv
stack = [self.condition_node]
while stack:
node = stack.pop()
if isinstance(node, VariableNode):
rv.add(node.data)
for child in reversed(node.children):
stack.append(child)
return rv
class Compiler(NodeVisitor):
def compile(self, tree, data_cls_getter=None, **kwargs):
@ -191,6 +207,7 @@ class Compiler(NodeVisitor):
return {"not": operator.not_}[node.data]
def visit_BinaryOperatorNode(self, node):
assert isinstance(node.parent, BinaryExpressionNode)
return {"and": operator.and_,
"or": operator.or_,
"==": operator.eq,
@ -216,6 +233,12 @@ class ManifestItem(object):
def __contains__(self, key):
return key in self._data
def __iter__(self):
yield self
for child in self.children:
for node in child:
yield node
@property
def is_empty(self):
if self._data:
@ -282,9 +305,12 @@ class ManifestItem(object):
else:
value_node = ValueNode(unicode(value))
if condition is not None:
conditional_node = ConditionalNode()
conditional_node.append(condition)
conditional_node.append(value_node)
if not isinstance(condition, ConditionalNode):
conditional_node = ConditionalNode()
conditional_node.append(condition)
conditional_node.append(value_node)
else:
conditional_node = condition
node.append(conditional_node)
cond_value = Compiler().compile_condition(conditional_node)
else:
@ -300,6 +326,21 @@ class ManifestItem(object):
else:
self._data[key].append(cond_value)
def clear(self, key):
"""Clear all the expected data for this node"""
if key in self._data:
for child in self.node.children:
if (isinstance(child, KeyValueNode) and
child.data == key):
child.remove()
del self._data[key]
break
def get_conditions(self, property_name):
if property_name in self._data:
return self._data[property_name]
return []
def _add_key_value(self, node, values):
"""Called during construction to set a key-value node"""
self._data[node.data] = values

View file

@ -68,9 +68,11 @@ class KeyValueNode(Node):
# Append that retains the invariant that conditional nodes
# come before unconditional nodes
other.parent = self
if isinstance(other, ValueNode):
if not isinstance(other, (ListNode, ValueNode, ConditionalNode)):
raise TypeError
if isinstance(other, (ListNode, ValueNode)):
if self.children:
assert not isinstance(self.children[-1], ValueNode)
assert not isinstance(self.children[-1], (ListNode, ValueNode))
self.children.append(other)
else:
if self.children and isinstance(self.children[-1], ValueNode):
@ -95,7 +97,17 @@ class AtomNode(ValueNode):
class ConditionalNode(Node):
pass
def append(self, other):
if not len(self.children):
if not isinstance(other, (BinaryExpressionNode, UnaryExpressionNode, VariableNode)):
raise TypeError
else:
if len(self.children) > 1:
raise ValueError
if not isinstance(other, (ListNode, ValueNode)):
raise TypeError
other.parent = self
self.children.append(other)
class UnaryExpressionNode(Node):

View file

@ -16,7 +16,7 @@ from __future__ import unicode_literals
from six import binary_type, text_type, BytesIO
from .node import (AtomNode, BinaryExpressionNode, BinaryOperatorNode,
from .node import (Node, AtomNode, BinaryExpressionNode, BinaryOperatorNode,
ConditionalNode, DataNode, IndexNode, KeyValueNode, ListNode,
NumberNode, StringNode, UnaryExpressionNode,
UnaryOperatorNode, ValueNode, VariableNode)
@ -339,7 +339,14 @@ class Tokenizer(object):
spaces = 0
rv += c
self.consume()
yield (token_types.string, decode(rv))
rv = decode(rv)
if rv.startswith("if "):
# Hack to avoid a problem where people write
# disabled: if foo
# and expect that to disable conditionally
raise ParseError(self.filename, self.line_number, "Strings starting 'if ' must be quoted "
"(expressions must start on a newline and be indented)")
yield (token_types.string, rv)
def comment_state(self):
while self.char() is not eol:
@ -698,13 +705,16 @@ class Treebuilder(object):
self.node = root
def append(self, node):
assert isinstance(node, Node)
self.node.append(node)
self.node = node
assert self.node is not None
return node
def pop(self):
node = self.node
self.node = self.node.parent
assert self.node is not None
return node

View file

@ -78,8 +78,10 @@ class ManifestSerializer(NodeVisitor):
data = unicode(node.data)
else:
data = node.data
if "#" in data or (isinstance(node.parent, ListNode) and
("," in data or "]" in data)):
if ("#" in data or
data.startswith("if ") or
(isinstance(node.parent, ListNode) and
("," in data or "]" in data))):
if "\"" in data:
quote = "'"
else:

View file

@ -90,7 +90,7 @@ key:
self.compare(
"""
key:
if x == 1: if b: value""",
if x == 1: 'if b: value'""",
["DataNode", None,
[["KeyValueNode", "key",
[["ConditionalNode", None,
@ -111,6 +111,10 @@ key:
with self.assertRaises(parser.ParseError):
self.parse("key: @true")
def test_if_1(self):
with self.assertRaises(parser.ParseError):
self.parse("key: if foo")
if __name__ == "__main__":
unittest.main()

View file

@ -230,3 +230,7 @@ class TokenizerTest(unittest.TestCase):
self.compare("""foo:
if a or b: [1, 2]
""")
def test_if_string_0(self):
self.compare("""foo: "if bar"
""")