Update web-platform-tests to revision 85e8612e81c8b478c8cac7260436646e48d3f7ae

This commit is contained in:
WPT Sync Bot 2019-04-16 21:36:56 -04:00
parent a14b952fa3
commit 87dcce0f06
66 changed files with 697 additions and 266 deletions

View file

@ -84,8 +84,7 @@ def get_paths(**kwargs):
revish = kwargs["revish"]
changed, _ = files_changed(revish)
all_changed = set(os.path.relpath(item, wpt_root)
for item in set(changed))
all_changed = {os.path.relpath(item, wpt_root) for item in set(changed)}
return all_changed

View file

@ -1,6 +1,6 @@
from tools.ci import jobs
all_jobs = set([
all_jobs = {
"build_css",
"lint",
"manifest_upload",
@ -11,9 +11,9 @@ all_jobs = set([
"wpt_integration",
"wptrunner_infrastructure",
"wptrunner_unittest",
])
}
default_jobs = set(["lint", "manifest_upload"])
default_jobs = {"lint", "manifest_upload"}
def test_all():
@ -25,19 +25,19 @@ def test_default():
def test_testharness():
assert jobs.get_jobs(["resources/testharness.js"]) == default_jobs | set(["resources_unittest",
"wptrunner_infrastructure"])
assert jobs.get_jobs(["resources/testharness.js"]) == default_jobs | {"resources_unittest",
"wptrunner_infrastructure"}
assert jobs.get_jobs(["resources/testharness.js"],
includes=["resources_unittest"]) == set(["resources_unittest"])
includes=["resources_unittest"]) == {"resources_unittest"}
assert jobs.get_jobs(["tools/wptserve/wptserve/config.py"],
includes=["resources_unittest"]) == set(["resources_unittest"])
includes=["resources_unittest"]) == {"resources_unittest"}
assert jobs.get_jobs(["foo/resources/testharness.js"],
includes=["resources_unittest"]) == set()
def test_stability():
assert jobs.get_jobs(["dom/historical.html"],
includes=["stability"]) == set(["stability"])
includes=["stability"]) == {"stability"}
assert jobs.get_jobs(["tools/pytest.ini"],
includes=["stability"]) == set()
assert jobs.get_jobs(["serve"],
@ -55,15 +55,15 @@ def test_stability():
assert jobs.get_jobs(["css/build-css-testsuite.sh"],
includes=["stability"]) == set()
assert jobs.get_jobs(["css/CSS21/test-001.html"],
includes=["stability"]) == set(["stability"])
includes=["stability"]) == {"stability"}
assert jobs.get_jobs(["css/build-css-testsuite.sh",
"css/CSS21/test-001.html"],
includes=["stability"]) == set(["stability"])
includes=["stability"]) == {"stability"}
def test_tools_unittest():
assert jobs.get_jobs(["tools/ci/test/test_jobs.py"],
includes=["tools_unittest"]) == set(["tools_unittest"])
includes=["tools_unittest"]) == {"tools_unittest"}
assert jobs.get_jobs(["dom/tools/example.py"],
includes=["tools_unittest"]) == set()
assert jobs.get_jobs(["dom/historical.html"],
@ -72,38 +72,38 @@ def test_tools_unittest():
def test_wptrunner_unittest():
assert jobs.get_jobs(["tools/wptrunner/wptrunner/wptrunner.py"],
includes=["wptrunner_unittest"]) == set(["wptrunner_unittest"])
includes=["wptrunner_unittest"]) == {"wptrunner_unittest"}
assert jobs.get_jobs(["tools/example.py"],
includes=["wptrunner_unittest"]) == set(["wptrunner_unittest"])
includes=["wptrunner_unittest"]) == {"wptrunner_unittest"}
def test_build_css():
assert jobs.get_jobs(["css/css-build-testsuites.sh"],
includes=["build_css"]) == set(["build_css"])
includes=["build_css"]) == {"build_css"}
assert jobs.get_jobs(["css/CSS21/test.html"],
includes=["build_css"]) == set(["build_css"])
includes=["build_css"]) == {"build_css"}
assert jobs.get_jobs(["html/css/CSS21/test.html"],
includes=["build_css"]) == set()
def test_update_built():
assert jobs.get_jobs(["2dcontext/foo.html"],
includes=["update_built"]) == set(["update_built"])
includes=["update_built"]) == {"update_built"}
assert jobs.get_jobs(["html/foo.html"],
includes=["update_built"]) == set(["update_built"])
includes=["update_built"]) == {"update_built"}
assert jobs.get_jobs(["offscreen-canvas/foo.html"],
includes=["update_built"]) == set(["update_built"])
includes=["update_built"]) == {"update_built"}
def test_wpt_integration():
assert jobs.get_jobs(["tools/wpt/wpt.py"],
includes=["wpt_integration"]) == set(["wpt_integration"])
includes=["wpt_integration"]) == {"wpt_integration"}
assert jobs.get_jobs(["tools/wptrunner/wptrunner/wptrunner.py"],
includes=["wpt_integration"]) == set(["wpt_integration"])
includes=["wpt_integration"]) == {"wpt_integration"}
def test_wpt_infrastructure():
assert jobs.get_jobs(["tools/hammer.html"],
includes=["wptrunner_infrastructure"]) == set(["wptrunner_infrastructure"])
includes=["wptrunner_infrastructure"]) == {"wptrunner_infrastructure"}
assert jobs.get_jobs(["infrastructure/assumptions/ahem.html"],
includes=["wptrunner_infrastructure"]) == set(["wptrunner_infrastructure"])
includes=["wptrunner_infrastructure"]) == {"wptrunner_infrastructure"}

View file

@ -6,12 +6,12 @@ from tools.ci import run_tc
@pytest.mark.parametrize("msg,expected", [
("Some initial line\n\ntc-jobs:foo,bar", set(["foo", "bar"])),
("Some initial line\n\ntc-jobs:foo, bar", set(["foo", "bar"])),
("tc-jobs:foo, bar \nbaz", set(["foo", "bar"])),
("tc-jobs:all", set(["all"])),
("Some initial line\n\ntc-jobs:foo,bar", {"foo", "bar"}),
("Some initial line\n\ntc-jobs:foo, bar", {"foo", "bar"}),
("tc-jobs:foo, bar \nbaz", {"foo", "bar"}),
("tc-jobs:all", {"all"}),
("", set()),
("tc-jobs:foo\ntc-jobs:bar", set(["foo"]))])
("tc-jobs:foo\ntc-jobs:bar", {"foo"})])
@pytest.mark.parametrize("event", [
{"commits": [{"message": "<message>"}]},
{"pull_request": {"body": "<message>"}}

View file

@ -373,46 +373,46 @@ class CRRegexp(Regexp):
description = "CR character in line separator"
class SetTimeoutRegexp(Regexp):
pattern = b"setTimeout\s*\("
pattern = br"setTimeout\s*\("
error = "SET TIMEOUT"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "setTimeout used; step_timeout should typically be used instead"
class W3CTestOrgRegexp(Regexp):
pattern = b"w3c\-test\.org"
pattern = br"w3c\-test\.org"
error = "W3C-TEST.ORG"
description = "External w3c-test.org domain used"
class WebPlatformTestRegexp(Regexp):
pattern = b"web\-platform\.test"
pattern = br"web\-platform\.test"
error = "WEB-PLATFORM.TEST"
description = "Internal web-platform.test domain used"
class Webidl2Regexp(Regexp):
pattern = b"webidl2\.js"
pattern = br"webidl2\.js"
error = "WEBIDL2.JS"
description = "Legacy webidl2.js script used"
class ConsoleRegexp(Regexp):
pattern = b"console\.[a-zA-Z]+\s*\("
pattern = br"console\.[a-zA-Z]+\s*\("
error = "CONSOLE"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "Console logging API used"
class GenerateTestsRegexp(Regexp):
pattern = b"generate_tests\s*\("
pattern = br"generate_tests\s*\("
error = "GENERATE_TESTS"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "generate_tests used"
class PrintRegexp(Regexp):
pattern = b"print(?:\s|\s*\()"
pattern = br"print(?:\s|\s*\()"
error = "PRINT STATEMENT"
file_extensions = [".py"]
description = "Print function used"
class LayoutTestsRegexp(Regexp):
pattern = b"eventSender|testRunner|window\.internals"
pattern = br"eventSender|testRunner|window\.internals"
error = "LAYOUTTESTS APIS"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "eventSender/testRunner/window.internals used; these are LayoutTests-specific APIs (WebKit/Blink)"
@ -640,8 +640,8 @@ def check_python_ast(repo_root, path, f):
return errors
broken_js_metadata = re.compile(b"//\s*META:")
broken_python_metadata = re.compile(b"#\s*META:")
broken_js_metadata = re.compile(br"//\s*META:")
broken_python_metadata = re.compile(br"#\s*META:")
def check_global_metadata(value):

View file

@ -26,13 +26,6 @@ class ManifestVersionMismatch(ManifestError):
pass
def iterfilter(filters, iter):
for f in filters:
iter = f(iter)
for item in iter:
yield item
item_classes = {"testharness": TestharnessTest,
"reftest": RefTest,
"reftest_node": RefTestNode,
@ -45,7 +38,7 @@ item_classes = {"testharness": TestharnessTest,
class TypeData(object):
def __init__(self, manifest, type_cls, meta_filters):
def __init__(self, manifest, type_cls):
"""Dict-like object containing the TestItems for each test type.
Loading an actual Item class for each test is unnecessarily
@ -61,7 +54,6 @@ class TypeData(object):
self.json_data = {}
self.tests_root = None
self.data = {}
self.meta_filters = meta_filters or []
def __getitem__(self, key):
if key not in self.data:
@ -134,7 +126,7 @@ class TypeData(object):
if self.json_data is not None:
data = set()
path = from_os_path(key)
for test in iterfilter(self.meta_filters, self.json_data.get(path, [])):
for test in self.json_data.get(path, []):
manifest_item = self.type_cls.from_json(self.manifest, path, test)
data.add(manifest_item)
try:
@ -153,7 +145,7 @@ class TypeData(object):
if key in self.data:
continue
data = set()
for test in iterfilter(self.meta_filters, self.json_data.get(path, [])):
for test in self.json_data.get(path, []):
manifest_item = self.type_cls.from_json(self.manifest, path, test)
data.add(manifest_item)
self.data[key] = data
@ -185,17 +177,17 @@ class TypeData(object):
without actually constructing all the items"""
rv = set(iterkeys(self.data))
if self.json_data:
rv |= set(to_os_path(item) for item in iterkeys(self.json_data))
rv |= {to_os_path(item) for item in iterkeys(self.json_data)}
return rv
class ManifestData(dict):
def __init__(self, manifest, meta_filters=None):
def __init__(self, manifest):
"""Dictionary subclass containing a TypeData instance for each test type,
keyed by type name"""
self.initialized = False
for key, value in iteritems(item_classes):
self[key] = TypeData(manifest, value, meta_filters=meta_filters)
self[key] = TypeData(manifest, value)
self.initialized = True
self.json_obj = None
@ -214,10 +206,10 @@ class ManifestData(dict):
class Manifest(object):
def __init__(self, tests_root=None, url_base="/", meta_filters=None):
def __init__(self, tests_root=None, url_base="/"):
assert url_base is not None
self._path_hash = {}
self._data = ManifestData(self, meta_filters)
self._data = ManifestData(self)
self._reftest_nodes_by_url = None
self.tests_root = tests_root
self.url_base = url_base
@ -396,12 +388,12 @@ class Manifest(object):
return rv
@classmethod
def from_json(cls, tests_root, obj, types=None, meta_filters=None):
def from_json(cls, tests_root, obj, types=None):
version = obj.get("version")
if version != CURRENT_VERSION:
raise ManifestVersionMismatch
self = cls(tests_root, url_base=obj.get("url_base", "/"), meta_filters=meta_filters)
self = cls(tests_root, url_base=obj.get("url_base", "/"))
if not hasattr(obj, "items") and hasattr(obj, "paths"):
raise ManifestError
@ -419,17 +411,17 @@ class Manifest(object):
return self
def load(tests_root, manifest, types=None, meta_filters=None):
def load(tests_root, manifest, types=None):
logger = get_logger()
logger.warning("Prefer load_and_update instead")
return _load(logger, tests_root, manifest, types, meta_filters)
return _load(logger, tests_root, manifest, types)
__load_cache = {}
def _load(logger, tests_root, manifest, types=None, meta_filters=None, allow_cached=True):
def _load(logger, tests_root, manifest, types=None, allow_cached=True):
# "manifest" is a path or file-like object.
manifest_path = (manifest if isinstance(manifest, string_types)
else manifest.name)
@ -445,8 +437,7 @@ def _load(logger, tests_root, manifest, types=None, meta_filters=None, allow_cac
with open(manifest) as f:
rv = Manifest.from_json(tests_root,
fast_json.load(f),
types=types,
meta_filters=meta_filters)
types=types)
except IOError:
return None
except ValueError:
@ -455,8 +446,7 @@ def _load(logger, tests_root, manifest, types=None, meta_filters=None, allow_cac
else:
rv = Manifest.from_json(tests_root,
fast_json.load(manifest),
types=types,
meta_filters=meta_filters)
types=types)
if allow_cached:
__load_cache[manifest_path] = rv
@ -472,7 +462,6 @@ def load_and_update(tests_root,
cache_root=None,
working_copy=True,
types=None,
meta_filters=None,
write_manifest=True,
allow_cached=True):
logger = get_logger()
@ -484,7 +473,6 @@ def load_and_update(tests_root,
tests_root,
manifest_path,
types=types,
meta_filters=meta_filters,
allow_cached=allow_cached)
except ManifestVersionMismatch:
logger.info("Manifest version changed, rebuilding")
@ -493,7 +481,7 @@ def load_and_update(tests_root,
logger.info("Manifest url base did not match, rebuilding")
if manifest is None:
manifest = Manifest(tests_root, url_base, meta_filters=meta_filters)
manifest = Manifest(tests_root, url_base)
update = True
if update:

View file

@ -17,8 +17,8 @@ from .item import Stub, ManualTest, WebDriverSpecTest, RefTestNode, TestharnessT
from .utils import ContextManagerBytesIO, cached_property
wd_pattern = "*.py"
js_meta_re = re.compile(b"//\s*META:\s*(\w*)=(.*)$")
python_meta_re = re.compile(b"#\s*META:\s*(\w*)=(.*)$")
js_meta_re = re.compile(br"//\s*META:\s*(\w*)=(.*)$")
python_meta_re = re.compile(br"#\s*META:\s*(\w*)=(.*)$")
reference_file_re = re.compile(r'(^|[\-_])(not)?ref[0-9]*([\-_]|$)')
@ -146,11 +146,11 @@ class SourceFile(object):
"xhtml":_parse_xml,
"svg":_parse_xml}
root_dir_non_test = set(["common"])
root_dir_non_test = {"common"}
dir_non_test = set(["resources",
"support",
"tools"])
dir_non_test = {"resources",
"support",
"tools"}
dir_path_non_test = {("css21", "archive"),
("css", "CSS2", "archive"),

View file

@ -293,39 +293,11 @@ def test_iterpath():
SourceFileWithTest("test3", "0"*40, item.TestharnessTest)]
m.update([(s, True) for s in sources])
assert set(item.url for item in m.iterpath("test2")) == set(["/test2-1.html",
"/test2-2.html"])
assert {item.url for item in m.iterpath("test2")} == {"/test2-1.html",
"/test2-2.html"}
assert set(m.iterpath("missing")) == set()
def test_filter():
m = manifest.Manifest()
sources = [SourceFileWithTest("test1", "0"*40, item.RefTestNode, references=[("/test1-ref", "==")]),
SourceFileWithTests("test2", "0"*40, item.TestharnessTest, [("test2-1.html",),
("test2-2.html",)]),
SourceFileWithTest("test3", "0"*40, item.TestharnessTest)]
m.update([(s, True) for s in sources])
json = m.to_json()
def filter(it):
for test in it:
if test[0] in ["test2-2.html", "test3"]:
yield test
filtered_manifest = manifest.Manifest.from_json("/", json, types=["testharness"], meta_filters=[filter])
actual = [
(ty, path, [test.id for test in tests])
for (ty, path, tests) in filtered_manifest
]
assert actual == [
("testharness", "test2", ["/test2-2.html"]),
("testharness", "test3", ["/test3"]),
]
def test_reftest_node_by_url():
m = manifest.Manifest()

View file

@ -484,7 +484,7 @@ class ClientHandshakeProcessor(ClientHandshakeBase):
if ch == '\n':
break
m = re.match('HTTP/\\d+\.\\d+ (\\d\\d\\d) .*\r\n', status_line)
m = re.match('HTTP/\\d+\\.\\d+ (\\d\\d\\d) .*\r\n', status_line)
if m is None:
raise ClientHandshakeError(
'Wrong status line format: %r' % status_line)

View file

@ -369,7 +369,7 @@ def _alias_handlers(dispatcher, websock_handlers_map_file):
for line in fp:
if line[0] == '#' or line.isspace():
continue
m = re.match('(\S+)\s+(\S+)', line)
m = re.match(r'(\S+)\s+(\S+)', line)
if not m:
logging.warning('Wrong format in map file:' + line)
continue

View file

@ -316,7 +316,7 @@ class WebSocketHandshake(object):
self._options.server_port,
self._options.use_tls))
if self._options.version is 8:
if self._options.version == 8:
fields.append(_sec_origin_header(self._options.origin))
else:
fields.append(_origin_header(self._options.origin))

View file

@ -160,7 +160,7 @@ def all_tests(data):
for UA, results in data.iteritems():
for result in results["results"]:
id = test_id(result["test"])
tests[id] |= set(subtest["name"] for subtest in result["subtests"])
tests[id] |= {subtest["name"] for subtest in result["subtests"]}
return tests
@ -179,7 +179,7 @@ def group_results(data):
def result():
return {
"harness": dict((UA, (None, None)) for UA in UAs),
"harness": {UA: (None, None) for UA in UAs},
"subtests": None # init this later
}
@ -191,9 +191,9 @@ def group_results(data):
result = results_by_test[id]
if result["subtests"] is None:
result["subtests"] = dict(
(name, dict((UA, (None, None)) for UA in UAs)) for name in tests[id]
)
result["subtests"] = {
name: {UA: (None, None) for UA in UAs} for name in tests[id]
}
result["harness"][UA] = (test_data["status"], test_data["message"])
for subtest in test_data["subtests"]:

View file

@ -722,7 +722,7 @@ def build_config(override_path=None, **kwargs):
return rv
def _make_subdomains_product(s, depth=2):
return set(u".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1))))
return {u".".join(x) for x in chain(*(product(s, repeat=i) for i in range(1, depth+1)))}
_subdomains = {u"www",
u"www1",

View file

@ -304,7 +304,7 @@ class Firefox(Browser):
# This is used rather than an API call to avoid rate limits
tags = call("git", "ls-remote", "--tags", "--refs",
"https://github.com/mozilla/geckodriver.git")
release_re = re.compile(".*refs/tags/v(\d+)\.(\d+)\.(\d+)")
release_re = re.compile(r".*refs/tags/v(\d+)\.(\d+)\.(\d+)")
latest_release = 0
for item in tags.split("\n"):
m = release_re.match(item)

View file

@ -121,7 +121,7 @@ def check_environ(product):
missing_hosts = set(expected_hosts)
if is_windows:
hosts_path = "%s\System32\drivers\etc\hosts" % os.environ.get("SystemRoot", "C:\Windows")
hosts_path = r"%s\System32\drivers\etc\hosts" % os.environ.get("SystemRoot", r"C:\Windows")
else:
hosts_path = "/etc/hosts"

View file

@ -196,7 +196,7 @@ def affected_testfiles(files_changed, skip_dirs=None,
manifest_path=None, manifest_update=True):
"""Determine and return list of test files that reference changed files."""
if skip_dirs is None:
skip_dirs = set(["conformance-checkers", "docs", "tools"])
skip_dirs = {"conformance-checkers", "docs", "tools"}
affected_testfiles = set()
# Exclude files that are in the repo root, because
# they are not part of any test.
@ -219,7 +219,7 @@ def affected_testfiles(files_changed, skip_dirs=None,
interfaces_changed = interfaces_files.intersection(nontests_changed)
nontests_changed = nontests_changed.intersection(support_files)
tests_changed = set(item for item in files_changed if item in test_files)
tests_changed = {item for item in files_changed if item in test_files}
nontest_changed_paths = set()
rewrites = {"/resources/webidl2/lib/webidl2.js": "/resources/WebIDLParser.js"}
@ -301,7 +301,7 @@ def get_parser():
# TODO: Consolidate with `./wpt run --affected`:
# https://github.com/web-platform-tests/wpt/issues/14560
parser.add_argument("--ignore-rules", nargs="*", type=set,
default=set(["resources/testharness*"]),
default={"resources/testharness*"},
help="Rules for paths to exclude from lists of changes. Rules are paths "
"relative to the test root, with * before a separator or the end matching "
"anything other than a path separator and ** in that position matching "
@ -355,7 +355,7 @@ def run_tests_affected(**kwargs):
manifest_path = os.path.join(kwargs["metadata_root"], "MANIFEST.json")
tests_changed, dependents = affected_testfiles(
changed,
set(["conformance-checkers", "docs", "tools"]),
{"conformance-checkers", "docs", "tools"},
manifest_path=manifest_path
)

View file

@ -322,8 +322,8 @@ def test_files_changed_ignore():
def test_files_changed_ignore_rules():
from tools.wpt.testfiles import compile_ignore_rule
assert compile_ignore_rule("foo*bar*/baz").pattern == "^foo\*bar[^/]*/baz$"
assert compile_ignore_rule("foo**bar**/baz").pattern == "^foo\*\*bar.*/baz$"
assert compile_ignore_rule("foo*bar*/baz").pattern == r"^foo\*bar[^/]*/baz$"
assert compile_ignore_rule("foo**bar**/baz").pattern == r"^foo\*\*bar.*/baz$"
assert compile_ignore_rule("foobar/baz/*").pattern == "^foobar/baz/[^/]*$"
assert compile_ignore_rule("foobar/baz/**").pattern == "^foobar/baz/.*$"

View file

@ -135,7 +135,7 @@ class MarionetteTestharnessProtocolPart(TestharnessProtocolPart):
"Loading initial page %s failed. Ensure that the "
"there are no other programs bound to this port and "
"that your firewall rules or network setup does not "
"prevent access.\e%s" % (url, traceback.format_exc(e)))
r"prevent access.\e%s" % (url, traceback.format_exc(e)))
raise
self.runner_handle = self.marionette.current_window_handle
format_map = {"title": threading.current_thread().name.replace("'", '"')}
@ -212,7 +212,7 @@ class MarionetteTestharnessProtocolPart(TestharnessProtocolPart):
if test_window is None:
handles = self.marionette.window_handles
if len(handles) == 2:
test_window = next(iter(set(handles) - set([parent])))
test_window = next(iter(set(handles) - {parent}))
elif handles[0] == parent and len(handles) > 2:
# Hope the first one here is the test window
test_window = handles[1]

View file

@ -128,7 +128,7 @@ class SeleniumTestharnessProtocolPart(TestharnessProtocolPart):
if test_window is None:
after = self.webdriver.window_handles
if len(after) == 2:
test_window = next(iter(set(after) - set([parent])))
test_window = next(iter(set(after) - {parent}))
elif after[0] == parent and len(after) > 2:
# Hope the first one here is the test window
test_window = after[1]
@ -247,7 +247,7 @@ class SeleniumRun(object):
timeout = self.timeout
try:
self.protocol.base.set_timeout((timeout + extra_timeout))
self.protocol.base.set_timeout(timeout + extra_timeout)
except exceptions.ErrorInResponseException:
self.logger.error("Lost WebDriver connection")
return Stop

View file

@ -121,7 +121,7 @@ class WebDriverTestharnessProtocolPart(TestharnessProtocolPart):
if test_window is None:
after = self.webdriver.handles
if len(after) == 2:
test_window = next(iter(set(after) - set([parent])))
test_window = next(iter(set(after) - {parent}))
elif after[0] == parent and len(after) > 2:
# Hope the first one here is the test window
test_window = after[1]
@ -258,7 +258,7 @@ class WebDriverRun(object):
timeout = self.timeout
try:
self.protocol.base.set_timeout((timeout + extra_timeout))
self.protocol.base.set_timeout(timeout + extra_timeout)
except client.UnknownErrorException:
self.logger.error("Lost WebDriver connection")
return Stop

View file

@ -353,7 +353,7 @@ class TestNode(ManifestItem):
@property
def is_empty(self):
required_keys = set(["type"])
required_keys = {"type"}
if set(self._data.keys()) != required_keys:
return False
return all(child.is_empty for child in self.children)

View file

@ -182,7 +182,7 @@ class TestNode(ManifestItem):
@property
def is_empty(self):
ignore_keys = set(["type"])
ignore_keys = {"type"}
if set(self._data.keys()) - ignore_keys:
return False
return all(child.is_empty for child in self.children)
@ -663,7 +663,7 @@ def group_conditionals(values, property_order=None, boolean_properties=None):
property_order = ["debug", "os", "version", "processor", "bits"]
if boolean_properties is None:
boolean_properties = set(["debug"])
boolean_properties = {"debug"}
else:
boolean_properties = set(boolean_properties)
@ -676,7 +676,7 @@ def group_conditionals(values, property_order=None, boolean_properties=None):
if not by_property:
raise ConditionError
properties = set(item[0] for item in by_property.iterkeys())
properties = {item[0] for item in by_property.iterkeys()}
include_props = []
for prop in property_order:

View file

@ -516,7 +516,7 @@ class PackedResultList(object):
else:
value = status_intern.get(value_idx)
run_info = run_info_intern.get((packed & 0x00FF))
run_info = run_info_intern.get(packed & 0x00FF)
return prop, run_info, value

View file

@ -110,14 +110,13 @@ class TagFilter(object):
class ManifestLoader(object):
def __init__(self, test_paths, force_manifest_update=False, manifest_download=False,
types=None, meta_filters=None):
types=None):
do_delayed_imports()
self.test_paths = test_paths
self.force_manifest_update = force_manifest_update
self.manifest_download = manifest_download
self.types = types
self.logger = structured.get_default_logger()
self.meta_filters = meta_filters
if self.logger is None:
self.logger = structured.structuredlog.StructuredLogger("ManifestLoader")
@ -137,7 +136,7 @@ class ManifestLoader(object):
download_from_github(manifest_path, tests_path)
return manifest.load_and_update(tests_path, manifest_path, url_base,
cache_root=cache_root, update=self.force_manifest_update,
meta_filters=self.meta_filters, types=self.types)
types=self.types)
def iterfilter(filters, iter):

View file

@ -62,7 +62,7 @@ def make_mock_manifest(*items):
filename = dir_path + "/%i.html" % i
tests.append((test_type,
filename,
set([TestharnessTest("/foo.bar", filename, "/", filename)])))
{TestharnessTest("/foo.bar", filename, "/", filename)}))
return rv

View file

@ -478,7 +478,7 @@ class Tokenizer(object):
elif c == "U":
return self.decode_escape(6)
elif c in ["a", "b", "f", "n", "r", "t", "v"]:
return eval("'\%s'" % c)
return eval(r"'\%s'" % c)
elif c is eol:
raise ParseError(self.filename, self.line_number, "EOL in escape")
else:

View file

@ -3,7 +3,7 @@ from .parser import atoms, precedence
atom_names = {v:"@%s" % k for (k,v) in atoms.items()}
named_escapes = set(["\a", "\b", "\f", "\n", "\r", "\t", "\v"])
named_escapes = {"\a", "\b", "\f", "\n", "\r", "\t", "\v"}
def escape(string, extras=""):
# Assumes input bytes are either UTF8 bytes or unicode.

View file

@ -142,4 +142,4 @@ key_1: other_value
self.assertTrue(manifest.has_key("key_1"))
self.assertFalse(manifest.has_key("key_2"))
self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
self.assertEquals(set(manifest.iterkeys()), {"key", "key_1"})

View file

@ -146,7 +146,7 @@ class TokenizerTest(unittest.TestCase):
""")
def test_18(self):
self.compare("""key: \]
self.compare(r"""key: \]
""", """key: ]
""")

View file

@ -87,8 +87,8 @@ key_1: other_value
self.assertTrue(manifest.has_key("key_1"))
self.assertFalse(manifest.has_key("key_2"))
self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
self.assertEquals(set(manifest.itervalues()), set(["value_1", "other_value"]))
self.assertEquals(set(manifest.iterkeys()), {"key", "key_1"})
self.assertEquals(set(manifest.itervalues()), {"value_1", "other_value"})
def test_is_empty_1(self):
data = """

View file

@ -27,7 +27,7 @@ class TokenizerTest(unittest.TestCase):
(token_types.paren, "]")])
def test_heading_1(self):
self.compare(b"""[Heading [text\]]""",
self.compare(br"""[Heading [text\]]""",
[(token_types.paren, "["),
(token_types.string, "Heading [text]"),
(token_types.paren, "]")])
@ -39,7 +39,7 @@ class TokenizerTest(unittest.TestCase):
(token_types.paren, "]")])
def test_heading_3(self):
self.compare(b"""[Heading [\]text]""",
self.compare(br"""[Heading [\]text]""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])
@ -49,7 +49,7 @@ class TokenizerTest(unittest.TestCase):
self.tokenize(b"[Heading")
def test_heading_5(self):
self.compare(b"""[Heading [\]text] #comment""",
self.compare(br"""[Heading [\]text] #comment""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])

View file

@ -6,7 +6,7 @@ from collections import defaultdict
from .wptmanifest.parser import atoms
atom_reset = atoms["Reset"]
enabled_tests = set(["testharness", "reftest", "wdspec"])
enabled_tests = {"testharness", "reftest", "wdspec"}
class Result(object):
@ -39,28 +39,28 @@ class SubtestResult(object):
class TestharnessResult(Result):
default_expected = "OK"
statuses = set(["OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"])
statuses = {"OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"}
class TestharnessSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = set(["PASS", "FAIL", "TIMEOUT", "NOTRUN"])
statuses = {"PASS", "FAIL", "TIMEOUT", "NOTRUN"}
class ReftestResult(Result):
default_expected = "PASS"
statuses = set(["PASS", "FAIL", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT",
"CRASH"])
statuses = {"PASS", "FAIL", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT",
"CRASH"}
class WdspecResult(Result):
default_expected = "OK"
statuses = set(["OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"])
statuses = {"OK", "ERROR", "INTERNAL-ERROR", "TIMEOUT", "EXTERNAL-TIMEOUT", "CRASH"}
class WdspecSubtestResult(SubtestResult):
default_expected = "PASS"
statuses = set(["PASS", "FAIL", "ERROR"])
statuses = {"PASS", "FAIL", "ERROR"}
def get_run_info(metadata_root, product, **kwargs):

View file

@ -75,7 +75,7 @@ class TestUsingServer(unittest.TestCase):
req.add_data(body)
if auth is not None:
req.add_header("Authorization", b"Basic %s" % base64.b64encode((b"%s:%s" % auth)))
req.add_header("Authorization", b"Basic %s" % base64.b64encode(b"%s:%s" % auth))
return urlopen(req)

View file

@ -113,10 +113,10 @@ class Response(object):
time or interval from now when the cookie expires
"""
days = dict((i+1, name) for i, name in enumerate(["jan", "feb", "mar",
"apr", "may", "jun",
"jul", "aug", "sep",
"oct", "nov", "dec"]))
days = {i+1: name for i, name in enumerate(["jan", "feb", "mar",
"apr", "may", "jun",
"jul", "aug", "sep",
"oct", "nov", "dec"])}
if value is None:
value = ''
max_age = 0

View file

@ -102,7 +102,7 @@ class Router(object):
self.register(*route)
def register(self, methods, path, handler):
"""Register a handler for a set of paths.
r"""Register a handler for a set of paths.
:param methods: Set of methods this should match. "*" is a
special value indicating that all methods should