mirror of
https://github.com/servo/servo.git
synced 2025-09-02 02:58:22 +01:00
Update web-platform-tests to revision b7a8b84debb42268ea95a45bdad8f727d1facdf7
This commit is contained in:
parent
ba929208e4
commit
953dbda9a6
215 changed files with 6409 additions and 1644 deletions
|
@ -0,0 +1,97 @@
|
|||
import json
|
||||
import time
|
||||
|
||||
from collections import defaultdict
|
||||
from mozlog.formatters import base
|
||||
|
||||
|
||||
class ChromiumFormatter(base.BaseFormatter):
|
||||
"""Formatter to produce results matching the Chromium JSON Test Results format.
|
||||
https://chromium.googlesource.com/chromium/src/+/master/docs/testing/json_test_results_format.md
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# Whether the run was interrupted, either by the test runner or user.
|
||||
self.interrupted = False
|
||||
|
||||
# A map of test status to the number of tests that had that status.
|
||||
self.num_failures_by_status = defaultdict(int)
|
||||
|
||||
# Start time, expressed as offset since UNIX epoch in seconds.
|
||||
self.start_timestamp_seconds = None
|
||||
|
||||
# Trie of test results. Each directory in the test name is a node in
|
||||
# the trie and the leaf contains the dict of per-test data.
|
||||
self.tests = {}
|
||||
|
||||
def _store_test_result(self, name, actual, expected):
|
||||
"""
|
||||
Stores the result of a single test in |self.tests|
|
||||
:param str name: name of the test.
|
||||
:param str actual: actual status of the test.
|
||||
:param str expected: expected status of the test.
|
||||
"""
|
||||
# The test name can contain a leading / which will produce an empty
|
||||
# string in the first position of the list returned by split. We use
|
||||
# filter(None) to remove such entries.
|
||||
name_parts = filter(None, name.split("/"))
|
||||
cur_dict = self.tests
|
||||
for name_part in name_parts:
|
||||
cur_dict = cur_dict.setdefault(name_part, {})
|
||||
cur_dict["actual"] = actual
|
||||
cur_dict["expected"] = expected
|
||||
|
||||
def _map_status_name(self, status):
|
||||
"""
|
||||
Maps a WPT status to a Chromium status.
|
||||
|
||||
Chromium has five main statuses that we have to map to:
|
||||
CRASH: the test harness crashed
|
||||
FAIL: the test did not run as expected
|
||||
PASS: the test ran as expected
|
||||
SKIP: the test was not run
|
||||
TIMEOUT: the did not finish in time and was aborted
|
||||
|
||||
:param str status: the string status of a test from WPT
|
||||
:return: a corresponding string status for Chromium
|
||||
"""
|
||||
if status == "OK":
|
||||
return "PASS"
|
||||
if status == "NOTRUN":
|
||||
return "SKIP"
|
||||
if status == "EXTERNAL-TIMEOUT":
|
||||
return "TIMEOUT"
|
||||
if status in ("ERROR", "CRASH"):
|
||||
# CRASH in WPT means a browser crash, which Chromium treats as a
|
||||
# test failure.
|
||||
return "FAIL"
|
||||
if status == "INTERNAL-ERROR":
|
||||
# CRASH in Chromium refers to an error in the test runner not the
|
||||
# browser.
|
||||
return "CRASH"
|
||||
# Any other status just gets returned as-is.
|
||||
return status
|
||||
|
||||
def suite_start(self, data):
|
||||
self.start_timestamp_seconds = data["time"] if "time" in data else time.time()
|
||||
|
||||
def test_end(self, data):
|
||||
actual_status = self._map_status_name(data["status"])
|
||||
expected_status = self._map_status_name(data["expected"]) if "expected" in data else "PASS"
|
||||
self._store_test_result(data["test"], actual_status, expected_status)
|
||||
|
||||
# Update the count of how many tests ran with each status.
|
||||
self.num_failures_by_status[actual_status] += 1
|
||||
|
||||
def suite_end(self, data):
|
||||
# Create the final result dictionary
|
||||
final_result = {
|
||||
# There are some required fields that we just hard-code.
|
||||
"interrupted": False,
|
||||
"path_delimiter": "/",
|
||||
"version": 3,
|
||||
"seconds_since_epoch": self.start_timestamp_seconds,
|
||||
"num_failures_by_type": self.num_failures_by_status,
|
||||
"tests": self.tests
|
||||
}
|
||||
return json.dumps(final_result)
|
|
@ -0,0 +1,118 @@
|
|||
import json
|
||||
import sys
|
||||
from os.path import dirname, join
|
||||
from StringIO import StringIO
|
||||
|
||||
from mozlog import handlers, structuredlog
|
||||
|
||||
sys.path.insert(0, join(dirname(__file__), "..", ".."))
|
||||
from formatters import chromium
|
||||
|
||||
|
||||
def test_chromium_required_fields(capfd):
|
||||
# Test that the test results contain a handful of required fields.
|
||||
|
||||
# Set up the handler.
|
||||
output = StringIO()
|
||||
logger = structuredlog.StructuredLogger("test_a")
|
||||
logger.add_handler(handlers.StreamHandler(output, chromium.ChromiumFormatter()))
|
||||
|
||||
# output a bunch of stuff
|
||||
logger.suite_start(["test-id-1"], run_info={}, time=123)
|
||||
logger.test_start("test-id-1")
|
||||
logger.test_end("test-id-1", status="PASS", expected="PASS")
|
||||
logger.suite_end()
|
||||
|
||||
# check nothing got output to stdout/stderr
|
||||
# (note that mozlog outputs exceptions during handling to stderr!)
|
||||
captured = capfd.readouterr()
|
||||
assert captured.out == ""
|
||||
assert captured.err == ""
|
||||
|
||||
# check the actual output of the formatter
|
||||
output.seek(0)
|
||||
output_obj = json.load(output)
|
||||
|
||||
# Check for existence of required fields
|
||||
assert "interrupted" in output_obj
|
||||
assert "path_delimiter" in output_obj
|
||||
assert "version" in output_obj
|
||||
assert "num_failures_by_type" in output_obj
|
||||
assert "tests" in output_obj
|
||||
|
||||
test_obj = output_obj["tests"]["test-id-1"]
|
||||
assert "actual" in test_obj
|
||||
assert "expected" in test_obj
|
||||
|
||||
def test_chromium_test_name_trie(capfd):
|
||||
# Ensure test names are broken into directories and stored in a trie with
|
||||
# test results at the leaves.
|
||||
|
||||
# Set up the handler.
|
||||
output = StringIO()
|
||||
logger = structuredlog.StructuredLogger("test_a")
|
||||
logger.add_handler(handlers.StreamHandler(output, chromium.ChromiumFormatter()))
|
||||
|
||||
# output a bunch of stuff
|
||||
logger.suite_start(["/foo/bar/test-id-1", "/foo/test-id-2"], run_info={}, time=123)
|
||||
logger.test_start("/foo/bar/test-id-1")
|
||||
logger.test_end("/foo/bar/test-id-1", status="TIMEOUT", expected="FAIL")
|
||||
logger.test_start("/foo/test-id-2")
|
||||
logger.test_end("/foo/test-id-2", status="ERROR", expected="TIMEOUT")
|
||||
logger.suite_end()
|
||||
|
||||
# check nothing got output to stdout/stderr
|
||||
# (note that mozlog outputs exceptions during handling to stderr!)
|
||||
captured = capfd.readouterr()
|
||||
assert captured.out == ""
|
||||
assert captured.err == ""
|
||||
|
||||
# check the actual output of the formatter
|
||||
output.seek(0)
|
||||
output_obj = json.load(output)
|
||||
|
||||
# Ensure that the test names are broken up by directory name and that the
|
||||
# results are stored at the leaves.
|
||||
test_obj = output_obj["tests"]["foo"]["bar"]["test-id-1"]
|
||||
assert test_obj["actual"] == "TIMEOUT"
|
||||
assert test_obj["expected"] == "FAIL"
|
||||
|
||||
test_obj = output_obj["tests"]["foo"]["test-id-2"]
|
||||
# The ERROR status is mapped to FAIL for Chromium
|
||||
assert test_obj["actual"] == "FAIL"
|
||||
assert test_obj["expected"] == "TIMEOUT"
|
||||
|
||||
def test_num_failures_by_type(capfd):
|
||||
# Test that the number of failures by status type is correctly calculated.
|
||||
|
||||
# Set up the handler.
|
||||
output = StringIO()
|
||||
logger = structuredlog.StructuredLogger("test_a")
|
||||
logger.add_handler(handlers.StreamHandler(output, chromium.ChromiumFormatter()))
|
||||
|
||||
# Run some tests with different statuses: 3 passes, 1 timeout
|
||||
logger.suite_start(["t1", "t2", "t3", "t4"], run_info={}, time=123)
|
||||
logger.test_start("t1")
|
||||
logger.test_end("t1", status="PASS", expected="PASS")
|
||||
logger.test_start("t2")
|
||||
logger.test_end("t2", status="PASS", expected="PASS")
|
||||
logger.test_start("t3")
|
||||
logger.test_end("t3", status="PASS", expected="FAIL")
|
||||
logger.test_start("t4")
|
||||
logger.test_end("t4", status="TIMEOUT", expected="CRASH")
|
||||
logger.suite_end()
|
||||
|
||||
# check nothing got output to stdout/stderr
|
||||
# (note that mozlog outputs exceptions during handling to stderr!)
|
||||
captured = capfd.readouterr()
|
||||
assert captured.out == ""
|
||||
assert captured.err == ""
|
||||
|
||||
# check the actual output of the formatter
|
||||
output.seek(0)
|
||||
num_failures_by_type = json.load(output)["num_failures_by_type"]
|
||||
|
||||
# We expect 3 passes and 1 timeout, nothing else.
|
||||
assert sorted(num_failures_by_type.keys()) == ["PASS", "TIMEOUT"]
|
||||
assert num_failures_by_type["PASS"] == 3
|
||||
assert num_failures_by_type["TIMEOUT"] == 1
|
|
@ -8,8 +8,7 @@ from datetime import timedelta
|
|||
|
||||
import config
|
||||
import wpttest
|
||||
from formatters import wptreport, wptscreenshot
|
||||
|
||||
from formatters import chromium, wptreport, wptscreenshot
|
||||
|
||||
def abs_path(path):
|
||||
return os.path.abspath(os.path.expanduser(path))
|
||||
|
@ -326,6 +325,7 @@ scheme host and port.""")
|
|||
help="List of URLs for tests to run, or paths including tests to run. "
|
||||
"(equivalent to --include)")
|
||||
|
||||
commandline.log_formatters["chromium"] = (chromium.ChromiumFormatter, "Chromium Layout Tests format")
|
||||
commandline.log_formatters["wptreport"] = (wptreport.WptreportFormatter, "wptreport format")
|
||||
commandline.log_formatters["wptscreenshot"] = (wptscreenshot.WptscreenshotFormatter, "wpt.fyi screenshots")
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue