Update web-platform-tests and CSS tests.

- Update CSS tests to revision e05bfd5e30ed662c2f8a353577003f8eed230180.
- Update web-platform-tests to revision a052787dd5c069a340031011196b73affbd68cd9.
This commit is contained in:
Ms2ger 2017-02-06 11:06:12 +01:00
parent fb4f421c8b
commit 296fa2512b
21852 changed files with 2080936 additions and 892894 deletions

View file

@ -13,9 +13,10 @@ from collections import defaultdict
from . import fnmatch
from ..localpaths import repo_root
from ..gitignore.gitignore import PathFilter
from manifest.sourcefile import SourceFile
from six import iteritems, itervalues
from manifest.sourcefile import SourceFile, meta_re
from six import binary_type, iteritems, itervalues
from six.moves import range
here = os.path.abspath(os.path.split(__file__)[0])
@ -39,13 +40,42 @@ def all_git_paths(repo_root):
for item in output.split("\n"):
yield item
def all_filesystem_paths(repo_root):
path_filter = PathFilter(repo_root, extras=[".git/*"])
for dirpath, dirnames, filenames in os.walk(repo_root):
for filename in filenames:
path = os.path.relpath(os.path.join(dirpath, filename), repo_root)
if path_filter(path):
yield path
dirnames[:] = [item for item in dirnames if
path_filter(os.path.relpath(os.path.join(dirpath, item) + "/",
repo_root))]
def check_path_length(repo_root, path):
def all_paths(repo_root, ignore_local):
fn = all_git_paths if ignore_local else all_filesystem_paths
for item in fn(repo_root):
yield item
def check_path_length(repo_root, path, css_mode):
if len(path) + 1 > 150:
return [("PATH LENGTH", "/%s longer than maximum path length (%d > 150)" % (path, len(path) + 1), path, None)]
return []
def check_worker_collision(repo_root, path, css_mode):
endings = [(".any.html", ".any.js"),
(".any.worker.html", ".any.js"),
(".worker.html", ".worker.js")]
for path_ending, generated in endings:
if path.endswith(path_ending):
return [("WORKER COLLISION",
"path ends with %s which collides with generated tests from %s files" % (path_ending, generated),
path,
None)]
return []
def parse_whitelist(f):
"""
Parse the whitelist file given by `f`, and return the parsed structure.
@ -87,7 +117,7 @@ def filter_whitelist_errors(data, path, errors):
normpath = os.path.normcase(path)
for file_match, whitelist_errors in iteritems(data):
if fnmatch.fnmatchcase(path, file_match):
if fnmatch.fnmatchcase(normpath, file_match):
for i, (error_type, msg, path, line) in enumerate(errors):
if error_type in whitelist_errors:
allowed_lines = whitelist_errors[error_type]
@ -158,7 +188,7 @@ regexps = [item() for item in
ConsoleRegexp,
PrintRegexp]]
def check_regexp_line(repo_root, path, f):
def check_regexp_line(repo_root, path, f, css_mode):
errors = []
applicable_regexps = [regexp for regexp in regexps if regexp.applies(path)]
@ -170,11 +200,17 @@ def check_regexp_line(repo_root, path, f):
return errors
def check_parsed(repo_root, path, f):
def check_parsed(repo_root, path, f, css_mode):
source_file = SourceFile(repo_root, path, "/", contents=f.read())
errors = []
if css_mode or path.startswith("css/"):
if (source_file.type == "support" and
not source_file.name_is_non_test and
not source_file.name_is_reference):
return [("SUPPORT-WRONG-DIR", "Support file not in support directory", path, None)]
if source_file.name_is_non_test or source_file.name_is_manual:
return []
@ -184,6 +220,12 @@ def check_parsed(repo_root, path, f):
if source_file.root is None:
return [("PARSE-FAILED", "Unable to parse file", path, None)]
if source_file.type == "manual" and not source_file.name_is_manual:
return [("CONTENT-MANUAL", "Manual test whose filename doesn't end in '-manual'", path, None)]
if source_file.type == "visual" and not source_file.name_is_visual:
return [("CONTENT-VISUAL", "Visual test whose filename doesn't end in '-visual'", path, None)]
if len(source_file.timeout_nodes) > 1:
errors.append(("MULTIPLE-TIMEOUT", "More than one meta name='timeout'", path, None))
@ -283,7 +325,7 @@ class OpenModeCheck(ASTCheck):
ast_checkers = [item() for item in [OpenModeCheck]]
def check_python_ast(repo_root, path, f):
def check_python_ast(repo_root, path, f, css_mode):
if not path.endswith(".py"):
return []
@ -299,34 +341,70 @@ def check_python_ast(repo_root, path, f):
return errors
def check_path(repo_root, path):
broken_metadata = re.compile(b"//\s*META:")
def check_script_metadata(repo_root, path, f, css_mode):
if not path.endswith((".worker.js", ".any.js")):
return []
done = False
errors = []
for idx, line in enumerate(f):
assert isinstance(line, binary_type), line
m = meta_re.match(line)
if m:
key, value = m.groups()
if key == b"timeout":
if value != b"long":
errors.append(("UNKNOWN-TIMEOUT-METADATA", "Unexpected value for timeout metadata", path, idx + 1))
elif key == b"script":
pass
else:
errors.append(("UNKNOWN-METADATA", "Unexpected kind of metadata", path, idx + 1))
else:
done = True
if done:
if meta_re.match(line):
errors.append(("STRAY-METADATA", "Metadata comments should start the file", path, idx + 1))
elif meta_re.search(line):
errors.append(("INDENTED-METADATA", "Metadata comments should start the line", path, idx + 1))
elif broken_metadata.search(line):
errors.append(("BROKEN-METADATA", "Metadata comment is not formatted correctly", path, idx + 1))
return errors
def check_path(repo_root, path, css_mode):
"""
Runs lints that check the file path.
:param repo_root: the repository root
:param path: the path of the file within the repository
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``path``
"""
errors = []
for path_fn in path_lints:
errors.extend(path_fn(repo_root, path))
errors.extend(path_fn(repo_root, path, css_mode))
return errors
def check_file_contents(repo_root, path, f):
def check_file_contents(repo_root, path, f, css_mode):
"""
Runs lints that check the file contents.
:param repo_root: the repository root
:param path: the path of the file within the repository
:param f: a file-like object with the file contents
:param css_mode: whether we're in CSS testsuite mode
:returns: a list of errors found in ``f``
"""
errors = []
for file_fn in file_lints:
errors.extend(file_fn(repo_root, path, f))
errors.extend(file_fn(repo_root, path, f, css_mode))
f.seek(0)
return errors
@ -360,14 +438,18 @@ def parse_args():
help="List of paths to lint")
parser.add_argument("--json", action="store_true",
help="Output machine-readable JSON format")
parser.add_argument("--ignore-local", action="store_true",
help="Ignore locally added files in the working directory (requires git).")
parser.add_argument("--css-mode", action="store_true",
help="Run CSS testsuite specific lints")
return parser.parse_args()
def main():
def main(force_css_mode=False):
args = parse_args()
paths = args.paths if args.paths else all_git_paths(repo_root)
return lint(repo_root, paths, args.json)
paths = args.paths if args.paths else all_paths(repo_root, args.ignore_local)
return lint(repo_root, paths, args.json, force_css_mode or args.css_mode)
def lint(repo_root, paths, output_json):
def lint(repo_root, paths, output_json, css_mode):
error_count = defaultdict(int)
last = None
@ -408,12 +490,12 @@ def lint(repo_root, paths, output_json):
if any(fnmatch.fnmatch(path, file_match) for file_match in ignored_files):
continue
errors = check_path(repo_root, path)
errors = check_path(repo_root, path, css_mode)
last = process_errors(path, errors) or last
if not os.path.isdir(abs_path):
with open(abs_path, 'rb') as f:
errors = check_file_contents(repo_root, path, f)
errors = check_file_contents(repo_root, path, f, css_mode)
last = process_errors(path, errors) or last
if not output_json:
@ -422,8 +504,8 @@ def lint(repo_root, paths, output_json):
print(ERROR_MSG % (last[0], last[1], last[0], last[1]))
return sum(itervalues(error_count))
path_lints = [check_path_length]
file_lints = [check_regexp_line, check_parsed, check_python_ast]
path_lints = [check_path_length, check_worker_collision]
file_lints = [check_regexp_line, check_parsed, check_python_ast, check_script_metadata]
if __name__ == "__main__":
error_count = main()

View file

@ -26,7 +26,7 @@ INTERESTING_FILE_NAMES = {
def check_with_files(input_bytes):
return {
filename: (check_file_contents("", filename, six.BytesIO(input_bytes)), kind)
filename: (check_file_contents("", filename, six.BytesIO(input_bytes), False), kind)
for (filename, kind) in
(
(os.path.join("html", filename), kind)
@ -380,7 +380,7 @@ def fifth():
def test_open_mode():
for method in ["open", "file"]:
code = open_mode_code.format(method).encode("utf-8")
errors = check_file_contents("", "test.py", six.BytesIO(code))
errors = check_file_contents("", "test.py", six.BytesIO(code), False)
check_errors(errors)
message = ("File opened without providing an explicit mode (note: " +
@ -390,3 +390,68 @@ def test_open_mode():
("OPEN-NO-MODE", message, "test.py", 3),
("OPEN-NO-MODE", message, "test.py", 12),
]
@pytest.mark.parametrize(
"filename,css_mode,expect_error",
[
("foo/bar.html", False, False),
("foo/bar.html", True, True),
("css/bar.html", False, True),
("css/bar.html", True, True),
])
def test_css_support_file(filename, css_mode, expect_error):
errors = check_file_contents("", filename, six.BytesIO(b""), css_mode)
check_errors(errors)
if expect_error:
assert errors == [
('SUPPORT-WRONG-DIR',
'Support file not in support directory',
filename,
None),
]
else:
assert errors == []
@pytest.mark.parametrize("filename", [
"foo.worker.js",
"foo.any.js",
])
@pytest.mark.parametrize("input,error", [
(b"""//META: timeout=long\n""", None),
(b"""// META: timeout=long\n""", None),
(b"""// META: timeout=long\n""", None),
(b"""// META: script=foo.js\n""", None),
(b"""\n// META: timeout=long\n""", (2, "STRAY-METADATA")),
(b""" // META: timeout=long\n""", (1, "INDENTED-METADATA")),
(b"""// META: timeout=long\n// META: timeout=long\n""", None),
(b"""// META: timeout=long\n\n// META: timeout=long\n""", (3, "STRAY-METADATA")),
(b"""// META: timeout=long\n// Start of the test\n// META: timeout=long\n""", (3, "STRAY-METADATA")),
(b"""// META:\n""", (1, "BROKEN-METADATA")),
(b"""// META: foobar\n""", (1, "BROKEN-METADATA")),
(b"""// META: foo=bar\n""", (1, "UNKNOWN-METADATA")),
(b"""// META: timeout=bar\n""", (1, "UNKNOWN-TIMEOUT-METADATA")),
])
def test_script_metadata(filename, input, error):
errors = check_file_contents("", filename, six.BytesIO(input), False)
check_errors(errors)
if error is not None:
line, kind = error
messages = {
"STRAY-METADATA": "Metadata comments should start the file",
"INDENTED-METADATA": "Metadata comments should start the line",
"BROKEN-METADATA": "Metadata comment is not formatted correctly",
"UNKNOWN-TIMEOUT-METADATA": "Unexpected value for timeout metadata",
"UNKNOWN-METADATA": "Unexpected kind of metadata",
}
assert errors == [
(kind,
messages[kind],
filename,
line),
]
else:
assert errors == []

View file

@ -18,8 +18,36 @@ def _mock_lint(name):
def test_filter_whitelist_errors():
filtered = filter_whitelist_errors({}, '', [])
whitelist = {
'svg/*': {
'CONSOLE': {12},
'INDENT TABS': {None}
}
}
# parse_whitelist normalises the case/path of the match string so need to do the same
whitelist = {os.path.normcase(p): e for p, e in whitelist.items()}
# paths passed into filter_whitelist_errors are always Unix style
filteredfile = 'svg/test.html'
unfilteredfile = 'html/test.html'
# Tests for passing no errors
filtered = filter_whitelist_errors(whitelist, filteredfile, [])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, unfilteredfile, [])
assert filtered == []
# Tests for filtering on file and line number
filtered = filter_whitelist_errors(whitelist, filteredfile, [['CONSOLE', '', filteredfile, 12]])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, unfilteredfile, [['CONSOLE', '', unfilteredfile, 12]])
assert filtered == [['CONSOLE', '', unfilteredfile, 12]]
filtered = filter_whitelist_errors(whitelist, filteredfile, [['CONSOLE', '', filteredfile, 11]])
assert filtered == [['CONSOLE', '', filteredfile, 11]]
# Tests for filtering on just file
filtered = filter_whitelist_errors(whitelist, filteredfile, [['INDENT TABS', filteredfile, '', 12]])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, filteredfile, [['INDENT TABS', filteredfile, '', 11]])
assert filtered == []
filtered = filter_whitelist_errors(whitelist, unfilteredfile, [['INDENT TABS', unfilteredfile, '', 11]])
assert filtered == [['INDENT TABS', unfilteredfile, '', 11]]
def test_parse_whitelist():
@ -67,7 +95,7 @@ CONSOLE:streams/resources/test-utils.js: 12
def test_lint_no_files(capsys):
rv = lint(_dummy_repo, [], False)
rv = lint(_dummy_repo, [], False, False)
assert rv == 0
out, err = capsys.readouterr()
assert out == ""
@ -77,7 +105,7 @@ def test_lint_no_files(capsys):
def test_lint_ignored_file(capsys):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["broken_ignored.html"], False)
rv = lint(_dummy_repo, ["broken_ignored.html"], False, False)
assert rv == 0
assert not mocked_check_path.called
assert not mocked_check_file_contents.called
@ -91,7 +119,7 @@ def test_lint_not_existing_file(capsys):
with _mock_lint("check_file_contents") as mocked_check_file_contents:
# really long path-linted filename
name = "a" * 256 + ".html"
rv = lint(_dummy_repo, [name], False)
rv = lint(_dummy_repo, [name], False, False)
assert rv == 0
assert not mocked_check_path.called
assert not mocked_check_file_contents.called
@ -103,7 +131,7 @@ def test_lint_not_existing_file(capsys):
def test_lint_passing(capsys):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["okay.html"], False)
rv = lint(_dummy_repo, ["okay.html"], False, False)
assert rv == 0
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
@ -115,7 +143,7 @@ def test_lint_passing(capsys):
def test_lint_failing(capsys):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["broken.html"], False)
rv = lint(_dummy_repo, ["broken.html"], False, False)
assert rv == 1
assert mocked_check_path.call_count == 1
assert mocked_check_file_contents.call_count == 1
@ -128,7 +156,7 @@ def test_lint_failing(capsys):
def test_lint_passing_and_failing(capsys):
with _mock_lint("check_path") as mocked_check_path:
with _mock_lint("check_file_contents") as mocked_check_file_contents:
rv = lint(_dummy_repo, ["broken.html", "okay.html"], False)
rv = lint(_dummy_repo, ["broken.html", "okay.html"], False, False)
assert rv == 1
assert mocked_check_path.call_count == 2
assert mocked_check_file_contents.call_count == 2

View file

@ -11,7 +11,7 @@ def test_allowed_path_length():
for idx in range(5):
filename = basename + idx * "a"
errors = check_path("/foo/", filename)
errors = check_path("/foo/", filename, False)
check_errors(errors)
assert errors == []
@ -23,6 +23,19 @@ def test_forbidden_path_length():
filename = basename + idx * "a"
message = "/%s longer than maximum path length (%s > 150)" % (filename, 146 + idx)
errors = check_path("/foo/", filename)
errors = check_path("/foo/", filename, False)
check_errors(errors)
assert errors == [("PATH LENGTH", message, filename, None)]
@pytest.mark.parametrize("path_ending,generated", [(".worker.html", ".worker.js"),
(".any.worker.html", ".any.js"),
(".any.html", ".any.js")])
def test_forbidden_path_endings(path_ending, generated):
path = "/test/test" + path_ending
message = ("path ends with %s which collides with generated tests from %s files" %
(path_ending, generated))
errors = check_path("/foo/", path, False)
check_errors(errors)
assert errors == [("WORKER COLLISION", message, path, None)]