mirror of
https://github.com/servo/servo.git
synced 2025-09-07 05:28:21 +01:00
Update web-platform-tests to revision 60220357131c65146444da1f54624d5b54d0975d
This commit is contained in:
parent
c45192614c
commit
775b784f79
2144 changed files with 58115 additions and 29658 deletions
|
@ -1,3 +1,3 @@
|
|||
mozprocess == 0.26
|
||||
selenium==3.12.0
|
||||
requests==2.18.4
|
||||
requests==2.19.1
|
||||
|
|
|
@ -33,26 +33,4 @@ deps =
|
|||
pep8-naming==0.4.1
|
||||
|
||||
commands =
|
||||
flake8
|
||||
|
||||
[flake8]
|
||||
# flake8 config should be kept in sync across tools/tox.ini, tools/wpt/tox.ini, and tools/wptrunner/tox.ini
|
||||
select = E,W,F,N
|
||||
# E128: continuation line under-indented for visual indent
|
||||
# E129: visually indented line with same indent as next logical line
|
||||
# E221: multiple spaces before operator
|
||||
# E226: missing whitespace around arithmetic operator
|
||||
# E231: missing whitespace after ‘,’, ‘;’, or ‘:’
|
||||
# E251: unexpected spaces around keyword / parameter equals
|
||||
# E265: block comment should start with ‘# ‘
|
||||
# E302: expected 2 blank lines, found 0
|
||||
# E303: too many blank lines (3)
|
||||
# E305: expected 2 blank lines after end of function or class
|
||||
# E402: module level import not at top of file
|
||||
# E731: do not assign a lambda expression, use a def
|
||||
# E901: SyntaxError or IndentationError
|
||||
# W601: .has_key() is deprecated, use ‘in’
|
||||
# N801: class names should use CapWords convention
|
||||
# N802: function name should be lowercase
|
||||
ignore = E128,E129,E221,E226,E231,E251,E265,E302,E303,E305,E402,E731,E901,W601,N801,N802
|
||||
max-line-length = 141
|
||||
flake8 --append-config=../flake8.ini
|
||||
|
|
|
@ -33,7 +33,8 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
|||
from selenium.webdriver import DesiredCapabilities
|
||||
|
||||
executor_kwargs = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
cache_manager, run_info_data,
|
||||
**kwargs)
|
||||
executor_kwargs["close_after_done"] = True
|
||||
capabilities = dict(DesiredCapabilities.CHROME.items())
|
||||
capabilities.setdefault("chromeOptions", {})["prefs"] = {
|
||||
|
|
|
@ -42,8 +42,8 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
|||
server_config['ports']['ws'] + server_config['ports']['wss']
|
||||
))
|
||||
|
||||
executor_kwargs = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
executor_kwargs = base_executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
||||
**kwargs)
|
||||
executor_kwargs["close_after_done"] = True
|
||||
capabilities = dict(DesiredCapabilities.CHROME.items())
|
||||
capabilities["chromeOptions"] = {}
|
||||
|
|
|
@ -36,7 +36,7 @@ def browser_kwargs(test_type, run_info_data, **kwargs):
|
|||
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
||||
**kwargs):
|
||||
executor_kwargs = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
cache_manager, run_info_data, **kwargs)
|
||||
executor_kwargs["close_after_done"] = True
|
||||
executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
|
||||
run_info_data,
|
||||
|
|
|
@ -88,11 +88,13 @@ def browser_kwargs(test_type, run_info_data, **kwargs):
|
|||
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
||||
**kwargs):
|
||||
executor_kwargs = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
cache_manager, run_info_data,
|
||||
**kwargs)
|
||||
executor_kwargs["close_after_done"] = test_type != "reftest"
|
||||
executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
|
||||
run_info_data,
|
||||
**kwargs)
|
||||
executor_kwargs["e10s"] = run_info_data["e10s"]
|
||||
capabilities = {}
|
||||
if test_type == "reftest":
|
||||
executor_kwargs["reftest_internal"] = kwargs["reftest_internal"]
|
||||
|
@ -111,6 +113,7 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
|||
capabilities["acceptInsecureCerts"] = True
|
||||
if capabilities:
|
||||
executor_kwargs["capabilities"] = capabilities
|
||||
executor_kwargs["debug"] = run_info_data["debug"]
|
||||
return executor_kwargs
|
||||
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
|||
capabilities = {}
|
||||
capabilities["se:ieOptions"] = options
|
||||
executor_kwargs = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
cache_manager, run_info_data, **kwargs)
|
||||
executor_kwargs["close_after_done"] = True
|
||||
executor_kwargs["capabilities"] = capabilities
|
||||
return executor_kwargs
|
||||
|
|
|
@ -33,7 +33,7 @@ def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
|||
from selenium.webdriver import DesiredCapabilities
|
||||
|
||||
executor_kwargs = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
cache_manager, run_info_data, **kwargs)
|
||||
executor_kwargs["close_after_done"] = True
|
||||
capabilities = dict(DesiredCapabilities.OPERA.items())
|
||||
capabilities.setdefault("operaOptions", {})["prefs"] = {
|
||||
|
|
|
@ -3,13 +3,15 @@ from ..webdriver_server import SafariDriverServer
|
|||
from ..executors import executor_kwargs as base_executor_kwargs
|
||||
from ..executors.executorselenium import (SeleniumTestharnessExecutor, # noqa: F401
|
||||
SeleniumRefTestExecutor) # noqa: F401
|
||||
from ..executors.executorsafari import SafariDriverWdspecExecutor # noqa: F401
|
||||
|
||||
|
||||
__wptrunner__ = {"product": "safari",
|
||||
"check_args": "check_args",
|
||||
"browser": "SafariBrowser",
|
||||
"executor": {"testharness": "SeleniumTestharnessExecutor",
|
||||
"reftest": "SeleniumRefTestExecutor"},
|
||||
"reftest": "SeleniumRefTestExecutor",
|
||||
"wdspec": "SafariDriverWdspecExecutor"},
|
||||
"browser_kwargs": "browser_kwargs",
|
||||
"executor_kwargs": "executor_kwargs",
|
||||
"env_extras": "env_extras",
|
||||
|
@ -27,12 +29,10 @@ def browser_kwargs(test_type, run_info_data, **kwargs):
|
|||
|
||||
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
||||
**kwargs):
|
||||
from selenium.webdriver import DesiredCapabilities
|
||||
|
||||
executor_kwargs = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
cache_manager, run_info_data, **kwargs)
|
||||
executor_kwargs["close_after_done"] = True
|
||||
executor_kwargs["capabilities"] = dict(DesiredCapabilities.SAFARI.items())
|
||||
executor_kwargs["capabilities"] = {}
|
||||
if kwargs["binary"] is not None:
|
||||
raise ValueError("Safari doesn't support setting executable location")
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ def browser_kwargs(test_type, run_info_data, **kwargs):
|
|||
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
||||
**kwargs):
|
||||
executor_kwargs = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
cache_manager, run_info_data, **kwargs)
|
||||
|
||||
executor_kwargs["capabilities"] = get_capabilities(**kwargs)
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ def browser_kwargs(test_type, run_info_data, **kwargs):
|
|||
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
||||
**kwargs):
|
||||
rv = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
cache_manager, run_info_data, **kwargs)
|
||||
rv["pause_after_test"] = kwargs["pause_after_test"]
|
||||
if test_type == "wdspec":
|
||||
rv["capabilities"] = {}
|
||||
|
|
|
@ -44,7 +44,7 @@ def browser_kwargs(test_type, run_info_data, **kwargs):
|
|||
|
||||
def executor_kwargs(test_type, server_config, cache_manager, run_info_data, **kwargs):
|
||||
rv = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
cache_manager, run_info_data, **kwargs)
|
||||
return rv
|
||||
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ def capabilities_for_port(server_config, **kwargs):
|
|||
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
||||
**kwargs):
|
||||
executor_kwargs = base_executor_kwargs(test_type, server_config,
|
||||
cache_manager, **kwargs)
|
||||
cache_manager, run_info_data, **kwargs)
|
||||
executor_kwargs["close_after_done"] = True
|
||||
executor_kwargs["capabilities"] = capabilities_for_port(server_config,
|
||||
**kwargs)
|
||||
|
|
|
@ -17,7 +17,8 @@ here = os.path.split(__file__)[0]
|
|||
extra_timeout = 5 # seconds
|
||||
|
||||
|
||||
def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
|
||||
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
|
||||
**kwargs):
|
||||
timeout_multiplier = kwargs["timeout_multiplier"]
|
||||
if timeout_multiplier is None:
|
||||
timeout_multiplier = 1
|
||||
|
@ -61,12 +62,12 @@ class TestharnessResultConverter(object):
|
|||
2: "TIMEOUT",
|
||||
3: "NOTRUN"}
|
||||
|
||||
def __call__(self, test, result):
|
||||
def __call__(self, test, result, extra=None):
|
||||
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
|
||||
result_url, status, message, stack, subtest_results = result
|
||||
assert result_url == test.url, ("Got results from %s, expected %s" %
|
||||
(result_url, test.url))
|
||||
harness_result = test.result_cls(self.harness_codes[status], message)
|
||||
(result_url, test.url))
|
||||
harness_result = test.result_cls(self.harness_codes[status], message, extra=extra, stack=stack)
|
||||
return (harness_result,
|
||||
[test.subtest_result_cls(st_name, self.test_codes[st_status], st_message, st_stack)
|
||||
for st_name, st_status, st_message, st_stack in subtest_results])
|
||||
|
@ -76,8 +77,11 @@ testharness_result_converter = TestharnessResultConverter()
|
|||
|
||||
|
||||
def reftest_result_converter(self, test, result):
|
||||
return (test.result_cls(result["status"], result["message"],
|
||||
extra=result.get("extra")), [])
|
||||
return (test.result_cls(
|
||||
result["status"],
|
||||
result["message"],
|
||||
extra=result.get("extra", {}),
|
||||
stack=result.get("stack")), [])
|
||||
|
||||
|
||||
def pytest_result_converter(self, test, data):
|
||||
|
|
|
@ -20,7 +20,8 @@ from .base import (CallbackHandler,
|
|||
WebDriverProtocol,
|
||||
extra_timeout,
|
||||
strip_server)
|
||||
from .protocol import (BaseProtocolPart,
|
||||
from .protocol import (AssertsProtocolPart,
|
||||
BaseProtocolPart,
|
||||
TestharnessProtocolPart,
|
||||
PrefsProtocolPart,
|
||||
Protocol,
|
||||
|
@ -288,6 +289,50 @@ class MarionetteStorageProtocolPart(StorageProtocolPart):
|
|||
self.marionette.execute_script(script)
|
||||
|
||||
|
||||
class MarionetteAssertsProtocolPart(AssertsProtocolPart):
|
||||
def setup(self):
|
||||
self.assert_count = {"chrome": 0, "content": 0}
|
||||
self.chrome_assert_count = 0
|
||||
self.marionette = self.parent.marionette
|
||||
|
||||
def get(self):
|
||||
script = """
|
||||
debug = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2);
|
||||
if (debug.isDebugBuild) {
|
||||
return debug.assertionCount;
|
||||
}
|
||||
return 0;
|
||||
"""
|
||||
|
||||
def get_count(context, **kwargs):
|
||||
try:
|
||||
context_count = self.marionette.execute_script(script, **kwargs)
|
||||
if context_count:
|
||||
self.parent.logger.info("Got %s assert count %s" % (context, context_count))
|
||||
test_count = context_count - self.assert_count[context]
|
||||
self.assert_count[context] = context_count
|
||||
return test_count
|
||||
except errors.NoSuchWindowException:
|
||||
# If the window was already closed
|
||||
self.parent.logger.warning("Failed to get assertion count; window was closed")
|
||||
except (errors.MarionetteException, socket.error):
|
||||
# This usually happens if the process crashed
|
||||
pass
|
||||
|
||||
counts = []
|
||||
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
|
||||
counts.append(get_count("chrome"))
|
||||
if self.parent.e10s:
|
||||
counts.append(get_count("content", sandbox="system"))
|
||||
|
||||
counts = [item for item in counts if item is not None]
|
||||
|
||||
if not counts:
|
||||
return None
|
||||
|
||||
return sum(counts)
|
||||
|
||||
|
||||
class MarionetteSelectorProtocolPart(SelectorProtocolPart):
|
||||
def setup(self):
|
||||
self.marionette = self.parent.marionette
|
||||
|
@ -303,6 +348,7 @@ class MarionetteClickProtocolPart(ClickProtocolPart):
|
|||
def element(self, element):
|
||||
return element.click()
|
||||
|
||||
|
||||
class MarionetteSendKeysProtocolPart(SendKeysProtocolPart):
|
||||
def setup(self):
|
||||
self.marionette = self.parent.marionette
|
||||
|
@ -310,6 +356,7 @@ class MarionetteSendKeysProtocolPart(SendKeysProtocolPart):
|
|||
def send_keys(self, element, keys):
|
||||
return element.send_keys(keys)
|
||||
|
||||
|
||||
class MarionetteTestDriverProtocolPart(TestDriverProtocolPart):
|
||||
def setup(self):
|
||||
self.marionette = self.parent.marionette
|
||||
|
@ -332,9 +379,10 @@ class MarionetteProtocol(Protocol):
|
|||
MarionetteSelectorProtocolPart,
|
||||
MarionetteClickProtocolPart,
|
||||
MarionetteSendKeysProtocolPart,
|
||||
MarionetteTestDriverProtocolPart]
|
||||
MarionetteTestDriverProtocolPart,
|
||||
MarionetteAssertsProtocolPart]
|
||||
|
||||
def __init__(self, executor, browser, capabilities=None, timeout_multiplier=1):
|
||||
def __init__(self, executor, browser, capabilities=None, timeout_multiplier=1, e10s=True):
|
||||
do_delayed_imports()
|
||||
|
||||
super(MarionetteProtocol, self).__init__(executor, browser)
|
||||
|
@ -343,11 +391,12 @@ class MarionetteProtocol(Protocol):
|
|||
self.capabilities = capabilities
|
||||
self.timeout_multiplier = timeout_multiplier
|
||||
self.runner_handle = None
|
||||
self.e10s = e10s
|
||||
|
||||
def connect(self):
|
||||
self.logger.debug("Connecting to Marionette on port %i" % self.marionette_port)
|
||||
startup_timeout = marionette.Marionette.DEFAULT_STARTUP_TIMEOUT * self.timeout_multiplier
|
||||
self.marionette = marionette.Marionette(host='localhost',
|
||||
self.marionette = marionette.Marionette(host='127.0.0.1',
|
||||
port=self.marionette_port,
|
||||
socket_timeout=None,
|
||||
startup_timeout=startup_timeout)
|
||||
|
@ -432,15 +481,18 @@ class ExecuteAsyncScriptRun(object):
|
|||
self.logger.error("Lost marionette connection before starting test")
|
||||
return Stop
|
||||
|
||||
executor = threading.Thread(target = self._run)
|
||||
executor.start()
|
||||
|
||||
if timeout is not None:
|
||||
wait_timeout = timeout + 2 * extra_timeout
|
||||
else:
|
||||
wait_timeout = None
|
||||
|
||||
self.result_flag.wait(wait_timeout)
|
||||
timer = threading.Timer(wait_timeout, self._timeout)
|
||||
timer.start()
|
||||
|
||||
self._run()
|
||||
|
||||
self.result_flag.wait()
|
||||
timer.cancel()
|
||||
|
||||
if self.result == (None, None):
|
||||
self.logger.debug("Timed out waiting for a result")
|
||||
|
@ -476,23 +528,27 @@ class ExecuteAsyncScriptRun(object):
|
|||
finally:
|
||||
self.result_flag.set()
|
||||
|
||||
def _timeout(self):
|
||||
self.result = False, ("EXTERNAL-TIMEOUT", None)
|
||||
self.result_flag.set()
|
||||
|
||||
|
||||
class MarionetteTestharnessExecutor(TestharnessExecutor):
|
||||
supports_testdriver = True
|
||||
|
||||
def __init__(self, browser, server_config, timeout_multiplier=1,
|
||||
close_after_done=True, debug_info=None, capabilities=None,
|
||||
**kwargs):
|
||||
debug=False, **kwargs):
|
||||
"""Marionette-based executor for testharness.js tests"""
|
||||
TestharnessExecutor.__init__(self, browser, server_config,
|
||||
timeout_multiplier=timeout_multiplier,
|
||||
debug_info=debug_info)
|
||||
|
||||
self.protocol = MarionetteProtocol(self, browser, capabilities, timeout_multiplier)
|
||||
self.protocol = MarionetteProtocol(self, browser, capabilities, timeout_multiplier, kwargs["e10s"])
|
||||
self.script = open(os.path.join(here, "testharness_webdriver.js")).read()
|
||||
self.script_resume = open(os.path.join(here, "testharness_webdriver_resume.js")).read()
|
||||
self.close_after_done = close_after_done
|
||||
self.window_id = str(uuid.uuid4())
|
||||
self.debug = debug
|
||||
|
||||
self.original_pref_values = {}
|
||||
|
||||
|
@ -517,10 +573,23 @@ class MarionetteTestharnessExecutor(TestharnessExecutor):
|
|||
self.protocol,
|
||||
self.test_url(test),
|
||||
timeout).run()
|
||||
if success:
|
||||
return self.convert_result(test, data)
|
||||
# The format of data depends on whether the test ran to completion or not
|
||||
# For asserts we only care about the fact that if it didn't complete, the
|
||||
# status is in the first field.
|
||||
status = None
|
||||
if not success:
|
||||
status = data[0]
|
||||
|
||||
return (test.result_cls(*data), [])
|
||||
extra = None
|
||||
if self.debug and (success or status not in ("CRASH", "INTERNAL-ERROR")):
|
||||
assertion_count = self.protocol.asserts.get()
|
||||
if assertion_count is not None:
|
||||
extra = {"assertion_count": assertion_count}
|
||||
|
||||
if success:
|
||||
return self.convert_result(test, data, extra=extra)
|
||||
|
||||
return (test.result_cls(extra=extra, *data), [])
|
||||
|
||||
def do_testharness(self, protocol, url, timeout):
|
||||
protocol.base.execute_script("if (window.win) {window.win.close()}")
|
||||
|
@ -561,7 +630,7 @@ class MarionetteRefTestExecutor(RefTestExecutor):
|
|||
screenshot_cache=None, close_after_done=True,
|
||||
debug_info=None, reftest_internal=False,
|
||||
reftest_screenshot="unexpected",
|
||||
group_metadata=None, capabilities=None, **kwargs):
|
||||
group_metadata=None, capabilities=None, debug=False, **kwargs):
|
||||
"""Marionette-based executor for reftests"""
|
||||
RefTestExecutor.__init__(self,
|
||||
browser,
|
||||
|
@ -570,7 +639,7 @@ class MarionetteRefTestExecutor(RefTestExecutor):
|
|||
timeout_multiplier=timeout_multiplier,
|
||||
debug_info=debug_info)
|
||||
self.protocol = MarionetteProtocol(self, browser, capabilities,
|
||||
timeout_multiplier)
|
||||
timeout_multiplier, kwargs["e10s"])
|
||||
self.implementation = (InternalRefTestImplementation
|
||||
if reftest_internal
|
||||
else RefTestImplementation)(self)
|
||||
|
@ -581,6 +650,7 @@ class MarionetteRefTestExecutor(RefTestExecutor):
|
|||
self.has_window = False
|
||||
self.original_pref_values = {}
|
||||
self.group_metadata = group_metadata
|
||||
self.debug = debug
|
||||
|
||||
with open(os.path.join(here, "reftest.js")) as f:
|
||||
self.script = f.read()
|
||||
|
@ -621,6 +691,13 @@ class MarionetteRefTestExecutor(RefTestExecutor):
|
|||
self.has_window = True
|
||||
|
||||
result = self.implementation.run_test(test)
|
||||
|
||||
if self.debug:
|
||||
assertion_count = self.protocol.asserts.get()
|
||||
if "extra" not in result:
|
||||
result["extra"] = {}
|
||||
result["extra"]["assertion_count"] = assertion_count
|
||||
|
||||
return self.convert_result(test, result)
|
||||
|
||||
def screenshot(self, test, viewport_size, dpi):
|
||||
|
|
|
@ -0,0 +1,10 @@
|
|||
from ..webdriver_server import SafariDriverServer
|
||||
from .base import WdspecExecutor, WebDriverProtocol
|
||||
|
||||
|
||||
class SafariDriverProtocol(WebDriverProtocol):
|
||||
server_cls = SafariDriverServer
|
||||
|
||||
|
||||
class SafariDriverWdspecExecutor(WdspecExecutor):
|
||||
protocol_cls = SafariDriverProtocol
|
|
@ -290,3 +290,16 @@ class TestDriverProtocolPart(ProtocolPart):
|
|||
previous command succeeded.
|
||||
:param str message: Additional data to add to the message."""
|
||||
pass
|
||||
|
||||
|
||||
class AssertsProtocolPart(ProtocolPart):
|
||||
"""ProtocolPart that implements the functionality required to get a count of non-fatal
|
||||
assertions triggered"""
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
name = "asserts"
|
||||
|
||||
@abstractmethod
|
||||
def get(self):
|
||||
"""Get a count of assertions since the last browser start"""
|
||||
pass
|
||||
|
|
|
@ -54,6 +54,8 @@ class WptreportFormatter(BaseFormatter):
|
|||
def test_status(self, data):
|
||||
subtest = self.create_subtest(data)
|
||||
subtest["status"] = data["status"]
|
||||
if "expected" in data:
|
||||
subtest["expected"] = data["expected"]
|
||||
if "message" in data:
|
||||
subtest["message"] = data["message"]
|
||||
|
||||
|
@ -62,5 +64,15 @@ class WptreportFormatter(BaseFormatter):
|
|||
start_time = test.pop("start_time")
|
||||
test["duration"] = data["time"] - start_time
|
||||
test["status"] = data["status"]
|
||||
if "expected" in data:
|
||||
test["expected"] = data["expected"]
|
||||
if "message" in data:
|
||||
test["message"] = data["message"]
|
||||
|
||||
def assertion_count(self, data):
|
||||
test = self.find_or_create_test(data)
|
||||
test["asserts"] = {
|
||||
"count": data["count"],
|
||||
"min": data["min_expected"],
|
||||
"max": data["max_expected"]
|
||||
}
|
||||
|
|
|
@ -33,6 +33,14 @@ def bool_prop(name, node):
|
|||
return None
|
||||
|
||||
|
||||
def int_prop(name, node):
|
||||
"""Boolean property"""
|
||||
try:
|
||||
return int(node.get(name))
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
def tags(node):
|
||||
"""Set of tags that have been applied to the test"""
|
||||
try:
|
||||
|
@ -113,6 +121,14 @@ class ExpectedManifest(ManifestItem):
|
|||
def leaks(self):
|
||||
return bool_prop("leaks", self)
|
||||
|
||||
@property
|
||||
def min_assertion_count(self):
|
||||
return int_prop("min-asserts", self)
|
||||
|
||||
@property
|
||||
def max_assertion_count(self):
|
||||
return int_prop("max-asserts", self)
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
return tags(self)
|
||||
|
@ -135,6 +151,14 @@ class DirectoryManifest(ManifestItem):
|
|||
def leaks(self):
|
||||
return bool_prop("leaks", self)
|
||||
|
||||
@property
|
||||
def min_assertion_count(self):
|
||||
return int_prop("min-asserts", self)
|
||||
|
||||
@property
|
||||
def max_assertion_count(self):
|
||||
return int_prop("max-asserts", self)
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
return tags(self)
|
||||
|
@ -184,6 +208,14 @@ class TestNode(ManifestItem):
|
|||
def leaks(self):
|
||||
return bool_prop("leaks", self)
|
||||
|
||||
@property
|
||||
def min_assertion_count(self):
|
||||
return int_prop("min-asserts", self)
|
||||
|
||||
@property
|
||||
def max_assertion_count(self):
|
||||
return int_prop("max-asserts", self)
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
return tags(self)
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import itertools
|
||||
import os
|
||||
import urlparse
|
||||
from collections import namedtuple, defaultdict
|
||||
|
@ -35,7 +36,7 @@ class ConditionError(Exception):
|
|||
self.cond = cond
|
||||
|
||||
|
||||
Result = namedtuple("Result", ["run_info", "status"])
|
||||
Value = namedtuple("Value", ["run_info", "value"])
|
||||
|
||||
|
||||
def data_cls_getter(output_node, visited_node):
|
||||
|
@ -113,12 +114,14 @@ class TestNode(ManifestItem):
|
|||
:param node: AST node associated with the test"""
|
||||
|
||||
ManifestItem.__init__(self, node)
|
||||
self.updated_expected = []
|
||||
self.new_expected = []
|
||||
self.new_disabled = False
|
||||
self.subtests = {}
|
||||
self.default_status = None
|
||||
self._from_file = True
|
||||
self.new_disabled = False
|
||||
self.update_properties = {
|
||||
"expected": ExpectedUpdate(self),
|
||||
"max-asserts": MaxAssertsUpdate(self),
|
||||
"min-asserts": MinAssertsUpdate(self)
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def create(cls, test_id):
|
||||
|
@ -128,7 +131,7 @@ class TestNode(ManifestItem):
|
|||
:param test_id: The id of the test"""
|
||||
|
||||
url = test_id
|
||||
name = url.split("/")[-1]
|
||||
name = url.rsplit("/", 1)[1]
|
||||
node = DataNode(name)
|
||||
self = cls(node)
|
||||
|
||||
|
@ -168,130 +171,37 @@ class TestNode(ManifestItem):
|
|||
to this run
|
||||
:param result: Status of the test in this run"""
|
||||
|
||||
if self.default_status is not None:
|
||||
assert self.default_status == result.default_expected
|
||||
else:
|
||||
self.default_status = result.default_expected
|
||||
self.update_properties["expected"].set(run_info, result)
|
||||
|
||||
# Add this result to the list of results satisfying
|
||||
# any condition in the list of updated results it matches
|
||||
for (cond, values) in self.updated_expected:
|
||||
if cond(run_info):
|
||||
values.append(Result(run_info, result.status))
|
||||
if result.status != cond.value:
|
||||
self.root.modified = True
|
||||
break
|
||||
else:
|
||||
# We didn't find a previous value for this
|
||||
self.new_expected.append(Result(run_info, result.status))
|
||||
self.root.modified = True
|
||||
def set_asserts(self, run_info, count):
|
||||
"""Set the assert count of a test
|
||||
|
||||
def coalesce_expected(self, stability=None):
|
||||
"""Update the underlying manifest AST for this test based on all the
|
||||
added results.
|
||||
|
||||
This will update existing conditionals if they got the same result in
|
||||
all matching runs in the updated results, will delete existing conditionals
|
||||
that get more than one different result in the updated run, and add new
|
||||
conditionals for anything that doesn't match an existing conditional.
|
||||
|
||||
Conditionals not matched by any added result are not changed.
|
||||
|
||||
When `stability` is not None, disable any test that shows multiple
|
||||
unexpected results for the same set of parameters.
|
||||
"""
|
||||
|
||||
try:
|
||||
unconditional_status = self.get("expected")
|
||||
except KeyError:
|
||||
unconditional_status = self.default_status
|
||||
|
||||
for conditional_value, results in self.updated_expected:
|
||||
if not results:
|
||||
# The conditional didn't match anything in these runs so leave it alone
|
||||
pass
|
||||
elif all(results[0].status == result.status for result in results):
|
||||
# All the new values for this conditional matched, so update the node
|
||||
result = results[0]
|
||||
if (result.status == unconditional_status and
|
||||
conditional_value.condition_node is not None):
|
||||
if "expected" in self:
|
||||
self.remove_value("expected", conditional_value)
|
||||
else:
|
||||
conditional_value.value = result.status
|
||||
elif conditional_value.condition_node is not None:
|
||||
# Blow away the existing condition and rebuild from scratch
|
||||
# This isn't sure to work if we have a conditional later that matches
|
||||
# these values too, but we can hope, verify that we get the results
|
||||
# we expect, and if not let a human sort it out
|
||||
self.remove_value("expected", conditional_value)
|
||||
self.new_expected.extend(results)
|
||||
elif conditional_value.condition_node is None:
|
||||
self.new_expected.extend(result for result in results
|
||||
if result.status != unconditional_status)
|
||||
|
||||
# It is an invariant that nothing in new_expected matches an existing
|
||||
# condition except for the default condition
|
||||
|
||||
if self.new_expected:
|
||||
if all(self.new_expected[0].status == result.status
|
||||
for result in self.new_expected) and not self.updated_expected:
|
||||
status = self.new_expected[0].status
|
||||
if status != self.default_status:
|
||||
self.set("expected", status, condition=None)
|
||||
else:
|
||||
try:
|
||||
conditionals = group_conditionals(
|
||||
self.new_expected,
|
||||
property_order=self.root.property_order,
|
||||
boolean_properties=self.root.boolean_properties)
|
||||
except ConditionError as e:
|
||||
if stability is not None:
|
||||
self.set("disabled", stability or "unstable", e.cond.children[0])
|
||||
self.new_disabled = True
|
||||
else:
|
||||
print "Conflicting test results for %s, cannot update" % self.root.test_path
|
||||
return
|
||||
for conditional_node, status in conditionals:
|
||||
if status != unconditional_status:
|
||||
self.set("expected", status, condition=conditional_node.children[0])
|
||||
|
||||
if ("expected" in self._data and
|
||||
len(self._data["expected"]) > 0 and
|
||||
self._data["expected"][-1].condition_node is None and
|
||||
self._data["expected"][-1].value == self.default_status):
|
||||
|
||||
self.remove_value("expected", self._data["expected"][-1])
|
||||
|
||||
if ("expected" in self._data and
|
||||
len(self._data["expected"]) == 0):
|
||||
for child in self.node.children:
|
||||
if (isinstance(child, KeyValueNode) and
|
||||
child.data == "expected"):
|
||||
child.remove()
|
||||
break
|
||||
self.update_properties["min-asserts"].set(run_info, count)
|
||||
self.update_properties["max-asserts"].set(run_info, count)
|
||||
|
||||
def _add_key_value(self, node, values):
|
||||
ManifestItem._add_key_value(self, node, values)
|
||||
if node.data == "expected":
|
||||
self.updated_expected = []
|
||||
if node.data in self.update_properties:
|
||||
new_updated = []
|
||||
self.update_properties[node.data].updated = new_updated
|
||||
for value in values:
|
||||
self.updated_expected.append((value, []))
|
||||
new_updated.append((value, []))
|
||||
|
||||
def clear_expected(self):
|
||||
def clear(self, key):
|
||||
"""Clear all the expected data for this test and all of its subtests"""
|
||||
|
||||
self.updated_expected = []
|
||||
if "expected" in self._data:
|
||||
self.updated = []
|
||||
if key in self._data:
|
||||
for child in self.node.children:
|
||||
if (isinstance(child, KeyValueNode) and
|
||||
child.data == "expected"):
|
||||
child.data == key):
|
||||
child.remove()
|
||||
del self._data["expected"]
|
||||
del self._data[key]
|
||||
break
|
||||
|
||||
for subtest in self.subtests.itervalues():
|
||||
subtest.clear_expected()
|
||||
subtest.clear(key)
|
||||
|
||||
def append(self, node):
|
||||
child = ManifestItem.append(self, node)
|
||||
|
@ -311,6 +221,10 @@ class TestNode(ManifestItem):
|
|||
self.append(subtest)
|
||||
return subtest
|
||||
|
||||
def coalesce_properties(self, stability):
|
||||
for prop_update in self.update_properties.itervalues():
|
||||
prop_update.coalesce(stability)
|
||||
|
||||
|
||||
class SubtestNode(TestNode):
|
||||
def __init__(self, node):
|
||||
|
@ -330,21 +244,252 @@ class SubtestNode(TestNode):
|
|||
return True
|
||||
|
||||
|
||||
class PropertyUpdate(object):
|
||||
property_name = None
|
||||
cls_default_value = None
|
||||
value_type = None
|
||||
|
||||
def __init__(self, node):
|
||||
self.node = node
|
||||
self.updated = []
|
||||
self.new = []
|
||||
self.default_value = self.cls_default_value
|
||||
|
||||
def set(self, run_info, in_value):
|
||||
self.check_default(in_value)
|
||||
value = self.get_value(in_value)
|
||||
|
||||
# Add this result to the list of results satisfying
|
||||
# any condition in the list of updated results it matches
|
||||
for (cond, values) in self.updated:
|
||||
if cond(run_info):
|
||||
values.append(Value(run_info, value))
|
||||
if value != cond.value_as(self.value_type):
|
||||
self.node.root.modified = True
|
||||
break
|
||||
else:
|
||||
# We didn't find a previous value for this
|
||||
self.new.append(Value(run_info, value))
|
||||
self.node.root.modified = True
|
||||
|
||||
def check_default(self, result):
|
||||
return
|
||||
|
||||
def get_value(self, in_value):
|
||||
return in_value
|
||||
|
||||
def coalesce(self, stability=None):
|
||||
"""Update the underlying manifest AST for this test based on all the
|
||||
added results.
|
||||
|
||||
This will update existing conditionals if they got the same result in
|
||||
all matching runs in the updated results, will delete existing conditionals
|
||||
that get more than one different result in the updated run, and add new
|
||||
conditionals for anything that doesn't match an existing conditional.
|
||||
|
||||
Conditionals not matched by any added result are not changed.
|
||||
|
||||
When `stability` is not None, disable any test that shows multiple
|
||||
unexpected results for the same set of parameters.
|
||||
"""
|
||||
|
||||
try:
|
||||
unconditional_value = self.node.get(self.property_name)
|
||||
if self.value_type:
|
||||
unconditional_value = self.value_type(unconditional_value)
|
||||
except KeyError:
|
||||
unconditional_value = self.default_value
|
||||
|
||||
for conditional_value, results in self.updated:
|
||||
if not results:
|
||||
# The conditional didn't match anything in these runs so leave it alone
|
||||
pass
|
||||
elif all(results[0].value == result.value for result in results):
|
||||
# All the new values for this conditional matched, so update the node
|
||||
result = results[0]
|
||||
if (result.value == unconditional_value and
|
||||
conditional_value.condition_node is not None):
|
||||
if self.property_name in self.node:
|
||||
self.node.remove_value(self.property_name, conditional_value)
|
||||
else:
|
||||
conditional_value.value = self.update_value(conditional_value.value_as(self.value_type),
|
||||
result.value)
|
||||
elif conditional_value.condition_node is not None:
|
||||
# Blow away the existing condition and rebuild from scratch
|
||||
# This isn't sure to work if we have a conditional later that matches
|
||||
# these values too, but we can hope, verify that we get the results
|
||||
# we expect, and if not let a human sort it out
|
||||
self.node.remove_value(self.property_name, conditional_value)
|
||||
self.new.extend(results)
|
||||
elif conditional_value.condition_node is None:
|
||||
self.new.extend(result for result in results
|
||||
if result.value != unconditional_value)
|
||||
|
||||
# It is an invariant that nothing in new matches an existing
|
||||
# condition except for the default condition
|
||||
if self.new:
|
||||
update_default, new_default_value = self.update_default()
|
||||
if update_default:
|
||||
if new_default_value != self.default_value:
|
||||
self.node.set(self.property_name, self.update_value(None, new_default_value), condition=None)
|
||||
else:
|
||||
self.add_new(unconditional_value, stability)
|
||||
|
||||
# Remove cases where the value matches the default
|
||||
if (self.property_name in self.node._data and
|
||||
len(self.node._data[self.property_name]) > 0 and
|
||||
self.node._data[self.property_name][-1].condition_node is None and
|
||||
self.node._data[self.property_name][-1].value_as(self.value_type) == self.default_value):
|
||||
|
||||
self.node.remove_value(self.property_name, self.node._data[self.property_name][-1])
|
||||
|
||||
# Remove empty properties
|
||||
if (self.property_name in self.node._data and len(self.node._data[self.property_name]) == 0):
|
||||
for child in self.node.children:
|
||||
if (isinstance(child, KeyValueNode) and child.data == self.property_name):
|
||||
child.remove()
|
||||
break
|
||||
|
||||
def update_default(self):
|
||||
"""Get the updated default value for the property (i.e. the one chosen when no conditions match).
|
||||
|
||||
:returns: (update, new_default_value) where updated is a bool indicating whether the property
|
||||
should be updated, and new_default_value is the value to set if it should."""
|
||||
raise NotImplementedError
|
||||
|
||||
def add_new(self, unconditional_value, stability=False):
|
||||
"""Add new conditional values for the property.
|
||||
|
||||
Subclasses need not implement this if they only ever update the default value."""
|
||||
raise NotImplementedError
|
||||
|
||||
def update_value(self, old_value, new_value):
|
||||
"""Get a value to set on the property, given its previous value and the new value from logs.
|
||||
|
||||
By default this just returns the new value, but overriding is useful in cases
|
||||
where we want the new value to be some function of both old and new e.g. max(old_value, new_value)"""
|
||||
return new_value
|
||||
|
||||
|
||||
class ExpectedUpdate(PropertyUpdate):
|
||||
property_name = "expected"
|
||||
|
||||
def check_default(self, result):
|
||||
if self.default_value is not None:
|
||||
assert self.default_value == result.default_expected
|
||||
else:
|
||||
self.default_value = result.default_expected
|
||||
|
||||
def get_value(self, in_value):
|
||||
return in_value.status
|
||||
|
||||
def update_default(self):
|
||||
update_default = all(self.new[0].value == result.value
|
||||
for result in self.new) and not self.updated
|
||||
new_value = self.new[0].value
|
||||
return update_default, new_value
|
||||
|
||||
def add_new(self, unconditional_value, stability=False):
|
||||
try:
|
||||
conditionals = group_conditionals(
|
||||
self.new,
|
||||
property_order=self.node.root.property_order,
|
||||
boolean_properties=self.node.root.boolean_properties)
|
||||
except ConditionError as e:
|
||||
if stability is not None:
|
||||
self.node.set("disabled", stability or "unstable", e.cond.children[0])
|
||||
self.node.new_disabled = True
|
||||
else:
|
||||
print "Conflicting metadata values for %s, cannot update" % self.root.test_path
|
||||
return
|
||||
for conditional_node, value in conditionals:
|
||||
if value != unconditional_value:
|
||||
self.node.set(self.property_name, value, condition=conditional_node.children[0])
|
||||
|
||||
|
||||
class MaxAssertsUpdate(PropertyUpdate):
|
||||
property_name = "max-asserts"
|
||||
cls_default_value = 0
|
||||
value_type = int
|
||||
|
||||
def update_value(self, old_value, new_value):
|
||||
if old_value is not None:
|
||||
old_value = self.value_type(old_value)
|
||||
if old_value and old_value < new_value:
|
||||
return new_value
|
||||
if old_value is None:
|
||||
return new_value
|
||||
return old_value
|
||||
|
||||
def update_default(self):
|
||||
"""For asserts we always update the default value and never add new conditionals.
|
||||
The value we set as the default is the maximum the current default or one more than the
|
||||
number of asserts we saw in any configuration."""
|
||||
# Current values
|
||||
values = []
|
||||
current_default = None
|
||||
if self.property_name in self.node._data:
|
||||
current_default = [item for item in
|
||||
self.node._data[self.property_name]
|
||||
if item.condition_node is None]
|
||||
if current_default:
|
||||
values.append(int(current_default[0].value))
|
||||
values.extend(item.value + 1 for item in self.new)
|
||||
values.extend(item.value + 1 for item in
|
||||
itertools.chain.from_iterable(results for _, results in self.updated))
|
||||
new_value = max(values)
|
||||
return True, new_value
|
||||
|
||||
|
||||
class MinAssertsUpdate(PropertyUpdate):
|
||||
property_name = "min-asserts"
|
||||
cls_default_value = 0
|
||||
value_type = int
|
||||
|
||||
def update_value(self, old_value, new_value):
|
||||
if old_value is not None:
|
||||
old_value = self.value_type(old_value)
|
||||
if old_value and new_value < old_value:
|
||||
return 0
|
||||
if old_value is None:
|
||||
# If we are getting some asserts for the first time, set the minimum to 0
|
||||
return 0
|
||||
return old_value
|
||||
|
||||
def update_default(self):
|
||||
"""For asserts we always update the default value and never add new conditionals.
|
||||
This is either set to the current value or one less than the number of asserts
|
||||
we saw, whichever is lower."""
|
||||
values = []
|
||||
current_default = None
|
||||
if self.property_name in self.node._data:
|
||||
current_default = [item for item in
|
||||
self.node._data[self.property_name]
|
||||
if item.condition_node is None]
|
||||
if current_default:
|
||||
values.append(current_default[0].value_as(self.value_type))
|
||||
values.extend(max(0, item.value - 1) for item in self.new)
|
||||
values.extend(max(0, item.value - 1) for item in
|
||||
itertools.chain.from_iterable(results for _, results in self.updated))
|
||||
new_value = min(values)
|
||||
return True, new_value
|
||||
|
||||
|
||||
def group_conditionals(values, property_order=None, boolean_properties=None):
|
||||
"""Given a list of Result objects, return a list of
|
||||
"""Given a list of Value objects, return a list of
|
||||
(conditional_node, status) pairs representing the conditional
|
||||
expressions that are required to match each status
|
||||
|
||||
:param values: List of Results
|
||||
:param values: List of Values
|
||||
:param property_order: List of properties to use in expectation metadata
|
||||
from most to least significant.
|
||||
:param boolean_properties: Set of properties in property_order that should
|
||||
be treated as boolean."""
|
||||
|
||||
by_property = defaultdict(set)
|
||||
for run_info, status in values:
|
||||
for run_info, value in values:
|
||||
for prop_name, prop_value in run_info.iteritems():
|
||||
by_property[(prop_name, prop_value)].add(status)
|
||||
by_property[(prop_name, prop_value)].add(value)
|
||||
|
||||
if property_order is None:
|
||||
property_order = ["debug", "os", "version", "processor", "bits"]
|
||||
|
@ -372,21 +517,21 @@ def group_conditionals(values, property_order=None, boolean_properties=None):
|
|||
|
||||
conditions = {}
|
||||
|
||||
for run_info, status in values:
|
||||
for run_info, value in values:
|
||||
prop_set = tuple((prop, run_info[prop]) for prop in include_props)
|
||||
if prop_set in conditions:
|
||||
if conditions[prop_set][1] != status:
|
||||
if conditions[prop_set][1] != value:
|
||||
# A prop_set contains contradictory results
|
||||
raise ConditionError(make_expr(prop_set, status, boolean_properties))
|
||||
raise ConditionError(make_expr(prop_set, value, boolean_properties))
|
||||
continue
|
||||
|
||||
expr = make_expr(prop_set, status, boolean_properties=boolean_properties)
|
||||
conditions[prop_set] = (expr, status)
|
||||
expr = make_expr(prop_set, value, boolean_properties=boolean_properties)
|
||||
conditions[prop_set] = (expr, value)
|
||||
|
||||
return conditions.values()
|
||||
|
||||
|
||||
def make_expr(prop_set, status, boolean_properties=None):
|
||||
def make_expr(prop_set, rhs, boolean_properties=None):
|
||||
"""Create an AST that returns the value ``status`` given all the
|
||||
properties in prop_set match.
|
||||
|
||||
|
@ -434,7 +579,11 @@ def make_expr(prop_set, status, boolean_properties=None):
|
|||
node = expressions[0]
|
||||
|
||||
root.append(node)
|
||||
root.append(StringNode(status))
|
||||
if type(rhs) in number_types:
|
||||
rhs_node = NumberNode(rhs)
|
||||
else:
|
||||
rhs_node = StringNode(rhs)
|
||||
root.append(rhs_node)
|
||||
|
||||
return root
|
||||
|
||||
|
|
|
@ -17,6 +17,13 @@ manifestitem = None
|
|||
|
||||
logger = structuredlog.StructuredLogger("web-platform-tests")
|
||||
|
||||
try:
|
||||
import ujson
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
reader.json = ujson
|
||||
|
||||
|
||||
def load_test_manifests(serve_root, test_paths):
|
||||
do_delayed_imports(serve_root)
|
||||
|
@ -162,11 +169,12 @@ def update_from_logs(manifests, *log_filenames, **kwargs):
|
|||
for tree in manifest_expected.itervalues():
|
||||
for test in tree.iterchildren():
|
||||
for subtest in test.iterchildren():
|
||||
subtest.coalesce_expected(stability=stability)
|
||||
test.coalesce_expected(stability=stability)
|
||||
subtest.coalesce_properties(stability=stability)
|
||||
test.coalesce_properties(stability=stability)
|
||||
|
||||
return expected_map
|
||||
|
||||
|
||||
def directory_manifests(metadata_path):
|
||||
rv = []
|
||||
for dirpath, dirname, filenames in os.walk(metadata_path):
|
||||
|
@ -175,6 +183,7 @@ def directory_manifests(metadata_path):
|
|||
rv.append(os.path.join(rel_path, "__dir__.ini"))
|
||||
return rv
|
||||
|
||||
|
||||
def write_changes(metadata_path, expected_map):
|
||||
# First write the new manifest files to a temporary directory
|
||||
temp_path = tempfile.mkdtemp(dir=os.path.split(metadata_path)[0])
|
||||
|
@ -228,11 +237,18 @@ class ExpectedUpdater(object):
|
|||
self.action_map = {"suite_start": self.suite_start,
|
||||
"test_start": self.test_start,
|
||||
"test_status": self.test_status,
|
||||
"test_end": self.test_end}
|
||||
"test_end": self.test_end,
|
||||
"assertion_count": self.assertion_count}
|
||||
self.tests_visited = {}
|
||||
|
||||
self.test_cache = {}
|
||||
|
||||
self.types_by_path = {}
|
||||
for manifest in self.test_manifests.iterkeys():
|
||||
for test_type, path, _ in manifest:
|
||||
if test_type in wpttest.manifest_test_cls:
|
||||
self.types_by_path[path] = wpttest.manifest_test_cls[test_type]
|
||||
|
||||
def update_from_log(self, log_file):
|
||||
self.run_info = None
|
||||
log_reader = reader.read(log_file)
|
||||
|
@ -241,14 +257,6 @@ class ExpectedUpdater(object):
|
|||
def suite_start(self, data):
|
||||
self.run_info = data["run_info"]
|
||||
|
||||
def test_type(self, path):
|
||||
for manifest in self.test_manifests.iterkeys():
|
||||
tests = list(manifest.iterpath(path))
|
||||
if len(tests):
|
||||
assert all(test.item_type == tests[0].item_type for test in tests)
|
||||
return tests[0].item_type
|
||||
assert False
|
||||
|
||||
def test_start(self, data):
|
||||
test_id = data["test"]
|
||||
try:
|
||||
|
@ -261,23 +269,24 @@ class ExpectedUpdater(object):
|
|||
|
||||
if test_id not in self.tests_visited:
|
||||
if self.ignore_existing:
|
||||
expected_node.clear_expected()
|
||||
expected_node.clear("expected")
|
||||
self.tests_visited[test_id] = set()
|
||||
|
||||
def test_status(self, data):
|
||||
test = self.test_cache.get(data["test"])
|
||||
test_id = data["test"]
|
||||
test = self.test_cache.get(test_id)
|
||||
if test is None:
|
||||
return
|
||||
test_cls = wpttest.manifest_test_cls[self.test_type(test.root.test_path)]
|
||||
test_cls = self.types_by_path[test.root.test_path]
|
||||
|
||||
subtest = test.get_subtest(data["subtest"])
|
||||
|
||||
self.tests_visited[test.id].add(data["subtest"])
|
||||
self.tests_visited[test_id].add(data["subtest"])
|
||||
|
||||
result = test_cls.subtest_result_cls(
|
||||
data["subtest"],
|
||||
data["status"],
|
||||
data.get("message"))
|
||||
None)
|
||||
|
||||
subtest.set_result(self.run_info, result)
|
||||
|
||||
|
@ -286,17 +295,25 @@ class ExpectedUpdater(object):
|
|||
test = self.test_cache.get(test_id)
|
||||
if test is None:
|
||||
return
|
||||
test_cls = wpttest.manifest_test_cls[self.test_type(test.root.test_path)]
|
||||
test_cls = self.types_by_path[test.root.test_path]
|
||||
|
||||
if data["status"] == "SKIP":
|
||||
return
|
||||
|
||||
result = test_cls.result_cls(
|
||||
data["status"],
|
||||
data.get("message"))
|
||||
None)
|
||||
test.set_result(self.run_info, result)
|
||||
del self.test_cache[test_id]
|
||||
|
||||
def assertion_count(self, data):
|
||||
test_id = data["test"]
|
||||
test = self.test_cache.get(test_id)
|
||||
if test is None:
|
||||
return
|
||||
|
||||
test.set_asserts(self.run_info, data["count"])
|
||||
|
||||
|
||||
def create_test_tree(metadata_path, test_manifest, property_order=None,
|
||||
boolean_properties=None):
|
||||
|
|
|
@ -582,11 +582,20 @@ class TestRunnerManager(threading.Thread):
|
|||
if status == "CRASH":
|
||||
self.browser.log_crash(test.id)
|
||||
|
||||
if "assertion_count" in file_result.extra:
|
||||
assertion_count = file_result.extra.pop("assertion_count")
|
||||
if assertion_count > 0:
|
||||
self.logger.assertion_count(test.id,
|
||||
int(assertion_count),
|
||||
test.min_assertion_count,
|
||||
test.max_assertion_count)
|
||||
|
||||
self.logger.test_end(test.id,
|
||||
status,
|
||||
message=file_result.message,
|
||||
expected=expected,
|
||||
extra=file_result.extra)
|
||||
extra=file_result.extra,
|
||||
stack=file_result.stack)
|
||||
|
||||
restart_before_next = (test.restart_after or
|
||||
file_result.status in ("CRASH", "EXTERNAL-TIMEOUT", "INTERNAL-ERROR") or
|
||||
|
|
|
@ -3,28 +3,37 @@ import sys
|
|||
from os.path import join, dirname
|
||||
from mozlog import structured
|
||||
|
||||
sys.path.insert(0, join(dirname(__file__), "..", ".."))
|
||||
sys.path.insert(0, join(dirname(__file__), "..", "..", ".."))
|
||||
|
||||
from wptrunner.testloader import EqualTimeChunker
|
||||
from manifest.sourcefile import SourceFile
|
||||
|
||||
structured.set_default_logger(structured.structuredlog.StructuredLogger("TestChunker"))
|
||||
|
||||
|
||||
testharness_test = """<script src="/resources/testharness.js"></script>
|
||||
<script src="/resources/testharnessreport.js"></script>"""
|
||||
|
||||
|
||||
class MockTest(object):
|
||||
default_timeout = 10
|
||||
|
||||
def __init__(self, id, timeout=10):
|
||||
def __init__(self, id, path, timeout=10, contents=testharness_test):
|
||||
self.id = id
|
||||
self.url = "/" + path
|
||||
self.item_type = "testharness"
|
||||
self.timeout = timeout
|
||||
self.source_file = SourceFile("/", path, "/", contents=contents)
|
||||
|
||||
|
||||
def make_mock_manifest(*items):
|
||||
rv = []
|
||||
for test_type, dir_path, num_tests in items:
|
||||
for i in range(num_tests):
|
||||
filename = "/%i.html" % i
|
||||
rv.append((test_type,
|
||||
dir_path + "/%i.test" % i,
|
||||
set([MockTest(i)])))
|
||||
dir_path + filename,
|
||||
set([MockTest("%i.html" % i, dir_path + filename)])))
|
||||
return rv
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
import os
|
||||
import sys
|
||||
from io import BytesIO
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
|
||||
|
||||
from wptrunner import manifestexpected, wpttest
|
||||
from .test_chunker import make_mock_manifest
|
||||
|
||||
dir_ini_0 = """\
|
||||
prefs: [a:b]
|
||||
"""
|
||||
|
||||
dir_ini_1 = """\
|
||||
prefs: [@Reset, b:c]
|
||||
max-asserts: 2
|
||||
min-asserts: 1
|
||||
tags: [b, c]
|
||||
"""
|
||||
|
||||
test_0 = """\
|
||||
[0.html]
|
||||
prefs: [c:d]
|
||||
max-asserts: 3
|
||||
tags: [a, @Reset]
|
||||
"""
|
||||
|
||||
|
||||
def test_metadata_inherit():
|
||||
tests = make_mock_manifest(("test", "a", 10), ("test", "a/b", 10),
|
||||
("test", "c", 10))
|
||||
|
||||
inherit_metadata = [
|
||||
manifestexpected.static.compile(
|
||||
BytesIO(item),
|
||||
{},
|
||||
data_cls_getter=lambda x,y: manifestexpected.DirectoryManifest)
|
||||
for item in [dir_ini_0, dir_ini_1]]
|
||||
test_metadata = manifestexpected.static.compile(BytesIO(test_0),
|
||||
{},
|
||||
data_cls_getter=manifestexpected.data_cls_getter,
|
||||
test_path="a",
|
||||
url_base="")
|
||||
|
||||
test = tests[0][2].pop()
|
||||
test_obj = wpttest.from_manifest(test, inherit_metadata, test_metadata.get_test(test.id))
|
||||
assert test_obj.max_assertion_count == 3
|
||||
assert test_obj.min_assertion_count == 1
|
||||
assert test_obj.prefs == {"b": "c", "c": "d"}
|
||||
assert test_obj.tags == {"a", "dir:a"}
|
|
@ -32,8 +32,20 @@ class ConditionalValue(object):
|
|||
return self.condition_func(run_info)
|
||||
|
||||
def set_value(self, value):
|
||||
if type(value) not in (str, unicode):
|
||||
value = unicode(value)
|
||||
self.value = value
|
||||
|
||||
def value_as(self, type_func):
|
||||
"""Get value and convert to a given type.
|
||||
|
||||
This is unfortunate, but we don't currently have a good way to specify that
|
||||
specific properties should have their data returned as specific types"""
|
||||
value = self.value
|
||||
if type_func is not None:
|
||||
value = type_func(value)
|
||||
return value
|
||||
|
||||
def remove(self):
|
||||
if len(self.node.parent.children) == 1:
|
||||
self.node.parent.remove()
|
||||
|
@ -255,7 +267,7 @@ class ManifestItem(object):
|
|||
node = KeyValueNode(key)
|
||||
self.node.append(node)
|
||||
|
||||
value_node = ValueNode(value)
|
||||
value_node = ValueNode(unicode(value))
|
||||
if condition is not None:
|
||||
conditional_node = ConditionalNode()
|
||||
conditional_node.append(condition)
|
||||
|
|
|
@ -70,15 +70,19 @@ class ManifestSerializer(NodeVisitor):
|
|||
return ["".join(rv)]
|
||||
|
||||
def visit_ValueNode(self, node):
|
||||
if "#" in node.data or (isinstance(node.parent, ListNode) and
|
||||
("," in node.data or "]" in node.data)):
|
||||
if "\"" in node.data:
|
||||
if not isinstance(node.data, (str, unicode)):
|
||||
data = unicode(node.data)
|
||||
else:
|
||||
data = node.data
|
||||
if "#" in data or (isinstance(node.parent, ListNode) and
|
||||
("," in data or "]" in data)):
|
||||
if "\"" in data:
|
||||
quote = "'"
|
||||
else:
|
||||
quote = "\""
|
||||
else:
|
||||
quote = ""
|
||||
return [quote + escape(node.data, extras=quote) + quote]
|
||||
return [quote + escape(data, extras=quote) + quote]
|
||||
|
||||
def visit_AtomNode(self, node):
|
||||
return [atom_names[node.data]]
|
||||
|
|
|
@ -9,13 +9,14 @@ enabled_tests = set(["testharness", "reftest", "wdspec"])
|
|||
|
||||
|
||||
class Result(object):
|
||||
def __init__(self, status, message, expected=None, extra=None):
|
||||
def __init__(self, status, message, expected=None, extra=None, stack=None):
|
||||
if status not in self.statuses:
|
||||
raise ValueError("Unrecognised status %s" % status)
|
||||
self.status = status
|
||||
self.message = message
|
||||
self.expected = expected
|
||||
self.extra = extra
|
||||
self.extra = extra if extra is not None else {}
|
||||
self.stack = stack
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s.%s %s>" % (self.__module__, self.__class__.__name__, self.status)
|
||||
|
@ -165,15 +166,14 @@ class Test(object):
|
|||
return self._test_metadata
|
||||
|
||||
def itermeta(self, subtest=None):
|
||||
for metadata in self._inherit_metadata:
|
||||
yield metadata
|
||||
|
||||
if self._test_metadata is not None:
|
||||
yield self._get_metadata()
|
||||
if subtest is not None:
|
||||
subtest_meta = self._get_metadata(subtest)
|
||||
if subtest_meta is not None:
|
||||
yield subtest_meta
|
||||
yield self._get_metadata()
|
||||
for metadata in reversed(self._inherit_metadata):
|
||||
yield metadata
|
||||
|
||||
def disabled(self, subtest=None):
|
||||
for meta in self.itermeta(subtest):
|
||||
|
@ -198,16 +198,31 @@ class Test(object):
|
|||
return leaks
|
||||
return False
|
||||
|
||||
@property
|
||||
def min_assertion_count(self):
|
||||
for meta in self.itermeta(None):
|
||||
count = meta.min_assertion_count
|
||||
if count is not None:
|
||||
return count
|
||||
return 0
|
||||
|
||||
@property
|
||||
def max_assertion_count(self):
|
||||
for meta in self.itermeta(None):
|
||||
count = meta.max_assertion_count
|
||||
if count is not None:
|
||||
return count
|
||||
return 0
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
tags = set()
|
||||
for meta in self.itermeta():
|
||||
meta_tags = meta.tags
|
||||
tags |= meta_tags
|
||||
if atom_reset in meta_tags:
|
||||
tags = meta_tags.copy()
|
||||
tags.remove(atom_reset)
|
||||
else:
|
||||
tags |= meta_tags
|
||||
break
|
||||
|
||||
tags.add("dir:%s" % self.id.lstrip("/").split("/")[0])
|
||||
|
||||
|
@ -218,11 +233,10 @@ class Test(object):
|
|||
prefs = {}
|
||||
for meta in self.itermeta():
|
||||
meta_prefs = meta.prefs
|
||||
if atom_reset in prefs:
|
||||
prefs = meta_prefs.copy()
|
||||
prefs.update(meta_prefs)
|
||||
if atom_reset in meta_prefs:
|
||||
del prefs[atom_reset]
|
||||
else:
|
||||
prefs.update(meta_prefs)
|
||||
break
|
||||
return prefs
|
||||
|
||||
def expected(self, subtest=None):
|
||||
|
@ -251,12 +265,13 @@ class TestharnessTest(Test):
|
|||
|
||||
def __init__(self, tests_root, url, inherit_metadata, test_metadata,
|
||||
timeout=None, path=None, protocol="http", testdriver=False,
|
||||
jsshell=False):
|
||||
jsshell=False, scripts=None):
|
||||
Test.__init__(self, tests_root, url, inherit_metadata, test_metadata, timeout,
|
||||
path, protocol)
|
||||
|
||||
self.testdriver = testdriver
|
||||
self.jsshell = jsshell
|
||||
self.scripts = scripts or []
|
||||
|
||||
@classmethod
|
||||
def from_manifest(cls, manifest_item, inherit_metadata, test_metadata):
|
||||
|
@ -264,6 +279,8 @@ class TestharnessTest(Test):
|
|||
protocol = "https" if hasattr(manifest_item, "https") and manifest_item.https else "http"
|
||||
testdriver = manifest_item.testdriver if hasattr(manifest_item, "testdriver") else False
|
||||
jsshell = manifest_item.jsshell if hasattr(manifest_item, "jsshell") else False
|
||||
script_metadata = manifest_item.source_file.script_metadata or []
|
||||
scripts = [v for (k, v) in script_metadata if k == b"script"]
|
||||
return cls(manifest_item.source_file.tests_root,
|
||||
manifest_item.url,
|
||||
inherit_metadata,
|
||||
|
@ -272,7 +289,8 @@ class TestharnessTest(Test):
|
|||
path=manifest_item.source_file.path,
|
||||
protocol=protocol,
|
||||
testdriver=testdriver,
|
||||
jsshell=jsshell)
|
||||
jsshell=jsshell,
|
||||
scripts=scripts)
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue