Update web-platform-tests to revision 14cfa4d648cc1c853b4153268df672d21425f8c1

This commit is contained in:
Josh Matthews 2017-10-30 09:31:22 -04:00
parent 1b73cf3352
commit 75736751d9
1213 changed files with 19434 additions and 12344 deletions

View file

@ -6,6 +6,8 @@ if [[ $(./wpt test-jobs --includes $JOB; echo $?) -eq 0 ]]; then
git submodule update --init --recursive 1>&2
export DISPLAY=:99.0
sh -e /etc/init.d/xvfb start 1>&2
# For uploading the manifest
export WPT_MANIFEST_FILE=$HOME/meta/MANIFEST-$(git rev-parse HEAD).json
else
export RUN_JOB=0
fi

View file

@ -9,7 +9,8 @@ from ConfigParser import SafeConfigParser
import requests
wpt_root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
here = os.path.dirname(__file__)
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
sys.path.insert(0, wpt_root)
from tools.wpt import testfiles
@ -26,6 +27,8 @@ wptrunner = None
def setup_logging():
"""Set up basic debug logger."""
global logger
logger = logging.getLogger(here)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(logging.BASIC_FORMAT, None)
handler.setFormatter(formatter)
@ -241,6 +244,25 @@ def post_results(results, pr_number, iterations, product, url, status):
requests.post(url, json=payload)
def get_changed_files(manifest_path, rev, ignore_changes, skip_tests):
if not rev:
branch_point = testfiles.branch_point()
revish = "%s..HEAD" % branch_point
else:
revish = rev
files_changed, files_ignored = testfiles.files_changed(revish, ignore_changes)
if files_ignored:
logger.info("Ignoring %s changed files:\n%s" %
(len(files_ignored), "".join(" * %s\n" % item for item in files_ignored)))
tests_changed, files_affected = testfiles.affected_testfiles(files_changed, skip_tests,
manifest_path=manifest_path)
return tests_changed, files_affected
def main():
"""Perform check_stability functionality and return exit code."""
@ -280,8 +302,6 @@ def run(venv, wpt_args, **kwargs):
except OSError:
pass
logger = logging.getLogger(os.path.splitext(__file__)[0])
setup_logging()
browser_name = wpt_args.product.split(":")[0]
@ -304,29 +324,25 @@ def run(venv, wpt_args, **kwargs):
head_sha1 = get_sha1()
logger.info("Testing web-platform-tests at revision %s" % head_sha1)
if not kwargs["rev"]:
branch_point = testfiles.branch_point()
revish = "%s..HEAD" % branch_point
else:
revish = kwargs["rev"]
files_changed, files_ignored = testfiles.files_changed(revish, ignore_changes)
if files_ignored:
logger.info("Ignoring %s changed files:\n%s" % (len(files_ignored),
"".join(" * %s\n" % item for item in files_ignored)))
tests_changed, files_affected = testfiles.affected_testfiles(files_changed, skip_tests,
manifest_path=os.path.join(
wpt_args.metadata_root,
"MANIFEST.json"))
if not (tests_changed or files_affected):
logger.info("No tests changed")
return 0
wpt_kwargs = Kwargs(vars(wpt_args))
wpt_kwargs["test_list"] = list(tests_changed | files_affected)
if not wpt_kwargs["test_list"]:
manifest_path = os.path.join(wpt_kwargs["metadata_root"], "MANIFEST.json")
tests_changed, files_affected = get_changed_files(manifest_path, kwargs["rev"],
ignore_changes, skip_tests)
if not (tests_changed or files_affected):
logger.info("No tests changed")
return 0
if tests_changed:
logger.debug("Tests changed:\n%s" % "".join(" * %s\n" % item for item in tests_changed))
if files_affected:
logger.debug("Affected tests:\n%s" % "".join(" * %s\n" % item for item in files_affected))
wpt_kwargs["test_list"] = list(tests_changed | files_affected)
set_default_args(wpt_kwargs)
do_delayed_imports()
@ -340,12 +356,6 @@ def run(venv, wpt_args, **kwargs):
logger.info("Using binary %s" % wpt_kwargs["binary"])
if tests_changed:
logger.debug("Tests changed:\n%s" % "".join(" * %s\n" % item for item in tests_changed))
if files_affected:
logger.debug("Affected tests:\n%s" % "".join(" * %s\n" % item for item in files_affected))
with TravisFold("running_tests"):
logger.info("Starting tests")
@ -377,7 +387,7 @@ def run(venv, wpt_args, **kwargs):
if __name__ == "__main__":
try:
retcode = main()
except:
except Exception:
import traceback
traceback.print_exc()
sys.exit(1)

View file

@ -0,0 +1,12 @@
set -ex
SCRIPT_DIR=$(dirname $(readlink -f "$0"))
WPT_ROOT=$(readlink -f $SCRIPT_DIR/../..)
cd $WPT_ROOT
mkdir -p ~/meta
./wpt manifest -p ~/meta/MANIFEST.json
cp ~/meta/MANIFEST.json $WPT_MANIFEST_FILE
# Force overwrite of any existing file
gzip -f $WPT_MANIFEST_FILE

View file

@ -21,6 +21,7 @@ job_path_map = {
"!.*/README",
"!css/[^/]*$"],
"lint": [".*"],
"manifest_upload": [".*"],
"resources_unittest": ["resources/"],
"tools_unittest": ["tools/"],
"wptrunner_unittest": ["tools/wptrunner/*"],
@ -99,6 +100,12 @@ def get_jobs(paths, **kwargs):
if not rules:
break
# Default jobs shuld run even if there were no changes
if not paths:
for job, path_re in iteritems(job_path_map):
if ".*" in path_re:
jobs.add(job)
return jobs
@ -116,4 +123,4 @@ def run(**kwargs):
for item in sorted(jobs):
print(item)
else:
return 0 if set(kwargs["includes"]) == jobs else 1
return 0 if set(kwargs["includes"]).issubset(jobs) else 1

View file

@ -1,12 +1,16 @@
from tools.ci import jobs
default_jobs = set(["lint", "manifest_upload"])
def test_testharness():
assert jobs.get_jobs(["resources/testharness.js"]) == set(["lint", "resources_unittest"])
assert jobs.get_jobs(["resources/testharness.js"]) == default_jobs | set(["resources_unittest"])
assert jobs.get_jobs(["resources/testharness.js"],
includes=["resources_unittest"]) == set(["resources_unittest"])
assert jobs.get_jobs(["foo/resources/testharness.js"],
includes=["resources_unittest"]) == set()
def test_stability():
assert jobs.get_jobs(["dom/historical.html"],
includes=["stability"]) == set(["stability"])
@ -32,8 +36,10 @@ def test_stability():
"css/CSS21/test-001.html"],
includes=["stability"]) == set(["stability"])
def test_lint():
assert jobs.get_jobs(["README.md"]) == set(["lint"])
def test_default():
assert jobs.get_jobs(["README.md"]) == default_jobs
def test_tools_unittest():
assert jobs.get_jobs(["tools/ci/test/test_jobs.py"],
@ -43,12 +49,14 @@ def test_tools_unittest():
assert jobs.get_jobs(["dom/historical.html"],
includes=["tools_unittest"]) == set()
def test_wptrunner_unittest():
assert jobs.get_jobs(["tools/wptrunner/wptrunner/wptrunner.py"],
includes=["wptrunner_unittest"]) == set(["wptrunner_unittest"])
assert jobs.get_jobs(["tools/example.py"],
includes=["wptrunner_unittest"]) == set()
def test_build_css():
assert jobs.get_jobs(["css/css-build-testsuites.sh"],
includes=["build_css"]) == set(["build_css"])

View file

@ -66,7 +66,7 @@ def fnmatch_translate(pat, path_name=False):
parts.append("$")
try:
return re.compile("".join(parts))
except:
except Exception:
raise

View file

@ -361,6 +361,13 @@ class LayoutTestsRegexp(Regexp):
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "eventSender/testRunner/window.internals used; these are LayoutTests-specific APIs (WebKit/Blink)"
class SpecialPowersRegexp(Regexp):
pattern = b"SpecialPowers"
error = "SPECIALPOWERS API"
file_extensions = [".html", ".htm", ".js", ".xht", ".xhtml", ".svg"]
description = "SpecialPowers used; this is gecko-specific and not supported in wpt"
regexps = [item() for item in
[TrailingWhitespaceRegexp,
TabsRegexp,
@ -371,7 +378,8 @@ regexps = [item() for item in
ConsoleRegexp,
GenerateTestsRegexp,
PrintRegexp,
LayoutTestsRegexp]]
LayoutTestsRegexp,
SpecialPowersRegexp]]
def check_regexp_line(repo_root, path, f, css_mode):
errors = []

View file

@ -1,3 +1,6 @@
{"manifest":
{"path": "update.py", "script": "run", "parser": "create_parser", "help": "Update the MANIFEST.json file",
"virtualenv": false},
"manifest-download":
{"path": "download.py", "script": "run", "parser": "create_parser", "help": "Download recent pregenerated MANIFEST.json file",
"virtualenv": false}}

View file

@ -0,0 +1,130 @@
import argparse
import gzip
import json
import io
import log
import os
from datetime import datetime, timedelta
import urllib2
from vcs import Git
here = os.path.dirname(__file__)
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
logger = log.get_logger()
def abs_path(path):
return os.path.abspath(os.path.expanduser(path))
def should_download(manifest_path, rebuild_time=timedelta(days=5)):
if not os.path.exists(manifest_path):
return True
mtime = datetime.fromtimestamp(os.path.getmtime(manifest_path))
if mtime < datetime.now() - rebuild_time:
return True
logger.info("Skipping manifest download because existing file is recent")
return False
def git_commits(repo_root):
git = Git.get_func(repo_root)
return [item for item in git("log", "--format=%H", "-n50").split("\n") if item]
def github_url(commits):
try:
resp = urllib2.urlopen("https://api.github.com/repos/w3c/web-platform-tests/releases")
except Exception:
return None
if resp.code != 200:
return None
try:
releases = json.load(resp.fp)
except ValueError:
logger.warning("Response was not valid JSON")
return None
fallback = None
for release in releases:
for commit in commits:
for item in release["assets"]:
if item["name"] == "MANIFEST-%s.json.gz" % commit:
return item["browser_download_url"]
elif item["name"] == "MANIFEST.json.gz" and not fallback:
fallback = item["browser_download_url"]
if fallback:
logger.info("Can't find a commit-specific manifest so just using the most recent one")
return fallback
def download_manifest(manifest_path, commits_func, url_func, force=False):
if not force and not should_download(manifest_path):
return False
commits = commits_func()
url = url_func(commits)
if not url:
logger.warning("No generated manifest found")
return False
logger.info("Downloading manifest from %s" % url)
try:
resp = urllib2.urlopen(url)
except Exception:
logger.warning("Downloading pregenerated manifest failed")
return False
if resp.code != 200:
logger.warning("Downloading pregenerated manifest failed; got HTTP status %d" %
resp.code)
return False
gzf = gzip.GzipFile(fileobj=io.BytesIO(resp.read()))
try:
decompressed = gzf.read()
except IOError:
logger.warning("Failed to decompress downloaded file")
return False
try:
with open(manifest_path, "w") as f:
f.write(decompressed)
except Exception:
logger.warning("Failed to write manifest")
return False
logger.info("Manifest downloaded")
return True
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--path", type=abs_path, help="Path to manifest file.")
parser.add_argument(
"--tests-root", type=abs_path, default=wpt_root, help="Path to root of tests.")
parser.add_argument(
"--force", action="store_true",
help="Always download, even if the existing manifest is recent")
return parser
def download_from_github(path, tests_root, force=False):
return download_manifest(path, lambda: git_commits(tests_root), github_url,
force=force)
def run(**kwargs):
if kwargs["path"] is None:
path = os.path.join(kwargs["tests_root"], "MANIFEST.json")
else:
path = kwargs["path"]
success = download_from_github(path, kwargs["tests_root"], kwargs["force"])
return 0 if success else 1

View file

@ -7,13 +7,16 @@ import sys
import manifest
from . import vcs
from .log import get_logger
from .download import download_from_github
here = os.path.dirname(__file__)
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
logger = get_logger()
def update(tests_root, manifest, working_copy=False):
logger.info("Updating manifest")
tree = None
if not working_copy:
tree = vcs.Git.for_path(tests_root, manifest.url_base)
@ -29,7 +32,9 @@ def update_from_cli(**kwargs):
assert tests_root is not None
m = None
logger = get_logger()
if kwargs["download"]:
download_from_github(path, tests_root)
if not kwargs.get("rebuild", False):
try:
@ -37,8 +42,6 @@ def update_from_cli(**kwargs):
except manifest.ManifestVersionMismatch:
logger.info("Manifest version changed, rebuilding")
m = None
else:
logger.info("Updating manifest")
if m is None:
m = manifest.Manifest(kwargs["url_base"])
@ -69,6 +72,9 @@ def create_parser():
parser.add_argument(
"--url-base", action="store", default="/",
help="Base url to use as the mount point for tests in this manifest.")
parser.add_argument(
"--no-download", dest="download", action="store_false", default=True,
help="Never attempt to download the manifest.")
return parser

View file

@ -4,8 +4,9 @@
<title>Web tests</title>
<link rel='stylesheet' href='css/bootstrap.min.css'>
<link rel='stylesheet' href='css/bootstrap-theme.min.css'>
<link rel=stylesheet href=runner.css>
<script src=runner.js></script>
<link rel='stylesheet' href='runner.css'>
<script src='/common/get-host-info.sub.js'></script>
<script src='runner.js'></script>
<header class="navbar navbar-inverse navbar-fixed-top">

View file

@ -654,7 +654,8 @@ Results.prototype = {
};
function Runner(manifest_path) {
this.server = location.protocol + "//" + location.host;
this.server = get_host_info().HTTP_ORIGIN;
this.https_server = get_host_info().HTTPS_ORIGIN;
this.manifest = new Manifest(manifest_path);
this.path = null;
this.test_types = null;
@ -832,7 +833,10 @@ Runner.prototype = {
load: function(path) {
this.ensure_test_window();
this.test_window.location.href = this.server + path;
if (path.match(".https."))
this.test_window.location.href = this.https_server + path;
else
this.test_window.location.href = this.server + path;
},
progress: function() {
@ -844,8 +848,26 @@ Runner.prototype = {
this.num_tests = this.manifest_iterator.count();
}
return this.num_tests;
}
},
on_complete: function(tests, status) {
var harness_status_map = {0:"OK", 1:"ERROR", 2:"TIMEOUT", 3:"NOTRUN"};
var subtest_status_map = {0:"PASS", 1:"FAIL", 2:"TIMEOUT", 3:"NOTRUN"};
// this ugly hack is because IE really insists on holding on to the objects it creates in
// other windows, and on losing track of them when the window gets closed
var subtest_results = JSON.parse(JSON.stringify(
tests.map(function (test) {
return {name: test.name,
status: subtest_status_map[test.status],
message: test.message};
})
));
runner.on_result(harness_status_map[status.status],
status.message,
subtest_results);
}
};
@ -883,26 +905,12 @@ function setup() {
test_control.get_use_regex());
return;
}
window.addEventListener("message", function(e) {
if (e.data.type === "complete")
runner.on_complete(e.data.tests, e.data.status);
});
}
window.completion_callback = function(tests, status) {
var harness_status_map = {0:"OK", 1:"ERROR", 2:"TIMEOUT", 3:"NOTRUN"};
var subtest_status_map = {0:"PASS", 1:"FAIL", 2:"TIMEOUT", 3:"NOTRUN"};
// this ugly hack is because IE really insists on holding on to the objects it creates in
// other windows, and on losing track of them when the window gets closed
var subtest_results = JSON.parse(JSON.stringify(
tests.map(function (test) {
return {name: test.name,
status: subtest_status_map[test.status],
message: test.message};
})
));
runner.on_result(harness_status_map[status.status],
status.message,
subtest_results);
};
window.addEventListener("DOMContentLoaded", setup, false);
})();

View file

@ -279,7 +279,7 @@ def setup_logger(level):
import logging
global logger
logger = logging.getLogger("web-platform-tests")
logging.basicConfig(level=getattr(logging, level.upper()))
logger.setLevel(getattr(logging, level.upper()))
set_logger(logger)
@ -397,7 +397,7 @@ class ServerProc(object):
except socket.error:
print("Socket error on port %s" % port, file=sys.stderr)
raise
except:
except Exception:
print(traceback.format_exc(), file=sys.stderr)
raise
@ -408,7 +408,7 @@ class ServerProc(object):
self.stop.wait()
except KeyboardInterrupt:
pass
except:
except Exception:
print(traceback.format_exc(), file=sys.stderr)
raise

View file

@ -33,7 +33,7 @@ class Response(object):
if body:
try:
body = json.loads(body)
except:
except Exception:
raise error.UnknownErrorException("Failed to decode body as json:\n%s" % body)
return cls(status, body)

View file

@ -5,5 +5,6 @@
"help": "Get a list of files that have changed", "virtualenv": false},
"tests-affected": {"path": "testfiles.py", "script": "run_tests_affected", "parser": "get_parser_affected",
"help": "Get a list of tests affected by changes", "virtualenv": false},
"install": {"path": "install.py", "script": "run", "parser": "get_parser", "help": "Install browser components"}
"install": {"path": "install.py", "script": "run", "parser": "get_parser", "help": "Install browser components"},
"branch-point": {"path": "testfiles.py", "script": "display_branch_point", "parser": null, "help": "Print branch point from master", "virtualenv": false}
}

View file

@ -64,6 +64,7 @@ def args_general(kwargs):
kwargs.set_if_none("tests_root", wpt_root)
kwargs.set_if_none("metadata_root", wpt_root)
kwargs.set_if_none("manifest_update", True)
kwargs.set_if_none("manifest_download", True)
if kwargs["ssl_type"] in (None, "pregenerated"):
cert_root = os.path.join(wpt_root, "tools", "certs")
@ -109,8 +110,8 @@ def check_environ(product):
for line in f:
line = line.split("#", 1)[0].strip()
parts = line.split()
if len(parts) == 2:
host = parts[1]
hosts = parts[1:]
for host in hosts:
missing_hosts.discard(host)
if missing_hosts:
raise WptrunError("""Missing hosts file configuration. Expected entries like:
@ -415,5 +416,5 @@ if __name__ == "__main__":
from tools import localpaths
try:
main()
except:
except Exception:
pdb.post_mortem()

View file

@ -27,9 +27,17 @@ def get_git_cmd(repo_path):
return git
def display_branch_point():
print(branch_point())
def branch_point():
git = get_git_cmd(wpt_root)
if os.environ.get("TRAVIS_PULL_REQUEST", "false") != "false":
if (os.environ.get("TRAVIS_PULL_REQUEST", "false") == "false" and
os.environ.get("TRAVIS_BRANCH") == "master"):
# For builds on the master branch just return the HEAD commit
return git("rev-parse", "HEAD")
elif os.environ.get("TRAVIS_PULL_REQUEST", "false") != "false":
# This is a PR, so the base branch is in TRAVIS_BRANCH
travis_branch = os.environ.get("TRAVIS_BRANCH")
assert travis_branch, "TRAVIS_BRANCH environment variable is defined"

View file

@ -101,7 +101,6 @@ def test_tests_affected(capsys):
assert excinfo.value.code == 0
out, err = capsys.readouterr()
assert "html/browsers/offline/appcache/workers/appcache-worker.html" in out
assert err == ""
def test_serve():

View file

@ -10,11 +10,17 @@ logger = logging.getLogger(__name__)
class Kwargs(dict):
def set_if_none(self, name, value, err_fn=None, desc=None, extra_cond=None):
def set_if_none(self,
name, # type: str
value, # type: Any
err_fn=None, # type: (Kwargs, str) -> Any
desc=None, # type: str
extra_cond=None # type: (Kwargs) -> bool
):
if desc is None:
desc = name
if self[name] is None:
if name not in self or self[name] is None:
if extra_cond is not None and not extra_cond(self):
return
if callable(value):

View file

@ -1,6 +1,7 @@
import argparse
import os
import json
import logging
import os
import sys
from tools import localpaths
@ -89,6 +90,8 @@ def setup_virtualenv(path, props):
def main(prog=None, argv=None):
logging.basicConfig(level=logging.INFO)
if prog is None:
prog = sys.argv[0]
if argv is None:

View file

@ -14,12 +14,17 @@ from mozlog import structured
manifest = None
manifest_update = None
download_from_github = None
manifest_log = None
def do_delayed_imports():
# This relies on an already loaded module having set the sys.path correctly :(
global manifest, manifest_update
global manifest, manifest_update, download_from_github, manifest_log
from manifest import manifest
from manifest import update as manifest_update
from manifest.download import download_from_github
from manifest import log as manifest_log
class TestChunker(object):
def __init__(self, total_chunks, chunk_number):
@ -372,10 +377,11 @@ class TagFilter(object):
yield test
class ManifestLoader(object):
def __init__(self, test_paths, force_manifest_update=False):
def __init__(self, test_paths, force_manifest_update=False, manifest_download=False):
do_delayed_imports()
self.test_paths = test_paths
self.force_manifest_update = force_manifest_update
self.manifest_download = manifest_download
self.logger = structured.get_default_logger()
if self.logger is None:
self.logger = structured.structuredlog.StructuredLogger("ManifestLoader")
@ -391,13 +397,19 @@ class ManifestLoader(object):
return rv
def create_manifest(self, manifest_path, tests_path, url_base="/"):
self.update_manifest(manifest_path, tests_path, url_base, recreate=True)
self.update_manifest(manifest_path, tests_path, url_base, recreate=True,
download=self.manifest_download)
def update_manifest(self, manifest_path, tests_path, url_base="/",
recreate=False):
recreate=False, download=False):
self.logger.info("Updating test manifest %s" % manifest_path)
manifest_log.setup()
json_data = None
if download:
# TODO: make this not github-specific
download_from_github(manifest_path, tests_path)
if not recreate:
try:
with open(manifest_path) as f:
@ -422,7 +434,7 @@ class ManifestLoader(object):
manifest_path = os.path.join(metadata_path, "MANIFEST.json")
if (not os.path.exists(manifest_path) or
self.force_manifest_update):
self.update_manifest(manifest_path, tests_path, url_base)
self.update_manifest(manifest_path, tests_path, url_base, download=self.manifest_download)
manifest_file = manifest.load(tests_path, manifest_path)
if manifest_file.url_base != url_base:
self.logger.info("Updating url_base in manifest from %s to %s" % (manifest_file.url_base,

View file

@ -157,6 +157,7 @@ class BrowserManager(object):
self.started = False
self.init_timer = None
self.command_queue = command_queue
def update_settings(self, test):
browser_settings = self.browser.settings(test)

View file

@ -44,6 +44,8 @@ class State(object):
def load(cls, logger):
"""Load saved state from a file"""
try:
if not os.path.isfile(cls.filename):
return None
with open(cls.filename) as f:
try:
rv = pickle.load(f)

View file

@ -51,6 +51,8 @@ scheme host and port.""")
help="Regenerate the test manifest.")
parser.add_argument("--no-manifest-update", action="store_false", dest="manifest_update",
help="Prevent regeneration of the test manifest.")
parser.add_argument("--manifest-download", action="store_true", default=None,
help="Attempt to download a preexisting manifest when updating.")
parser.add_argument("--timeout-multiplier", action="store", type=float, default=None,
help="Multiplier relative to standard test timeout to use")

View file

@ -45,7 +45,8 @@ def get_loader(test_paths, product, ssl_env, debug=None, run_info_extras=None, *
run_info = wpttest.get_run_info(kwargs["run_info"], product, debug=debug,
extras=run_info_extras)
test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"]).load()
test_manifests = testloader.ManifestLoader(test_paths, force_manifest_update=kwargs["manifest_update"],
manifest_download=kwargs["manifest_download"]).load()
manifest_filters = []
meta_filters = []