Reorganize Servo's WPT Python scripts

This change moves all of Servo's WPT Python support scripts into one
directory as they were previously scattered throughout the directory
structure. This should allow more code reuse and make it easier to
understand how everything fits together.

The changes:

- `tests/wpt/update` → `python/wpt/importer`
- `etc/ci/upstream-wpt-changes/wptupstreamer` → `python/wpt/exporter`
- `etc/ci/upstream-wpt-changes/test.py` → `python/wpt/test.py`
- `etc/ci/upstream-wpt-changes/tests` → `python/wpt/tests`
- `tests/wpt/servowpt.py` →
    - `python/wpt/update.py`
    - `python/wpt/run.py`
- `tests/wpt/manifestupdate.py` → `python/wpt/manifestupdate.py`

This change also removes
 - The ability to run the `update-wpt` and `test-wpt` commands without
   using `mach`. These didn't work very well, because it was difficult
   to get all of the wptrunner and mach dependencies installed outside
   of the Python virtualenv. It's simpler if they are always run through
   `mach`.
- The old WPT change upstreaming script that was no longer used.
This commit is contained in:
Martin Robinson 2023-04-16 11:33:02 +02:00
parent 9acb9cc5cf
commit e2cf3e8d1a
52 changed files with 237 additions and 888 deletions

View file

@ -9,7 +9,6 @@ In particular, this folder contains:
* `config.ini`: some configuration for the web-platform-tests.
* `include.ini`: the subset of web-platform-tests we currently run.
* `servowpt.py`: run the web-platform-tests in Servo.
* `web-platform-tests`: copy of the web-platform-tests.
* `metadata`: expected failures for the web-platform-tests we run.
* `mozilla`: web-platform-tests that cannot be upstreamed.
@ -78,12 +77,6 @@ testharnessreport.js may have been installed incorrectly (see
[**Running the tests manually**](#running-the-tests-manually)
for more details).
Running the tests without mach
------------------------------
When avoiding `mach` for some reason, one can run `servowpt.py`
directly. However, this requires that all the dependencies for
`wptrunner` are available in the current python environment.
Running the tests manually
--------------------------

View file

@ -1,367 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# This allows using types that are defined later in the file.
from __future__ import annotations
import collections
import os
import sys
import mozlog
import mozlog.formatters.base
import mozlog.reader
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Any
from six import itervalues
DEFAULT_MOVE_UP_CODE = u"\x1b[A"
DEFAULT_CLEAR_EOL_CODE = u"\x1b[K"
@dataclass
class UnexpectedSubtestResult():
path: str
subtest: str
actual: str
expected: str
message: str
time: int
stack: Optional[str]
@dataclass
class UnexpectedResult():
path: str
actual: str
expected: str
message: str
time: int
stack: Optional[str]
unexpected_subtest_results: list[UnexpectedSubtestResult] = field(
default_factory=list)
issues: list[str] = field(default_factory=list)
flaky: bool = False
def __str__(self):
output = UnexpectedResult.to_lines(self)
if self.unexpected_subtest_results:
def make_subtests_failure(subtest_results):
# Test names sometimes contain control characters, which we want
# to be printed in their raw form, and not their interpreted form.
lines = []
for subtest in subtest_results[:-1]:
lines += UnexpectedResult.to_lines(
subtest, print_stack=False)
lines += UnexpectedResult.to_lines(subtest_results[-1])
return self.wrap_and_indent_lines(lines, " ").splitlines()
# Organize the failures by stack trace so we don't print the same stack trace
# more than once. They are really tall and we don't want to flood the screen
# with duplicate information.
results_by_stack = collections.defaultdict(list)
for subtest_result in self.unexpected_subtest_results:
results_by_stack[subtest_result.stack].append(subtest_result)
# Print stackless results first. They are all separate.
if None in results_by_stack:
output += make_subtests_failure(results_by_stack.pop(None))
for subtest_results in results_by_stack.values():
output += make_subtests_failure(subtest_results)
return UnexpectedResult.wrap_and_indent_lines(output, " ")
@staticmethod
def wrap_and_indent_lines(lines, indent):
if not lines:
return ""
output = indent + u"\u25B6 %s\n" % lines[0]
for line in lines[1:-1]:
output += indent + u"\u2502 %s\n" % line
if len(lines) > 1:
output += indent + u"\u2514 %s\n" % lines[-1]
return output
@staticmethod
def to_lines(result: Any[UnexpectedSubtestResult, UnexpectedResult], print_stack=True):
first_line = result.actual
if result.expected != result.actual:
first_line += f" [expected {result.expected}]"
# Test names sometimes contain control characters, which we want
# to be printed in their raw form, and not their interpreted form.
first_line += f" {result.path.encode('unicode-escape').decode('utf-8')}"
if isinstance(result, UnexpectedResult) and result.issues:
first_line += f" ({', '.join([f'#{bug}' for bug in result.issues])})"
lines = [first_line]
if result.message:
for message_line in result.message.splitlines():
lines.append(f" \u2192 {message_line}")
if print_stack and result.stack:
lines.append("")
lines.extend(result.stack.splitlines())
return lines
class ServoHandler(mozlog.reader.LogHandler):
"""LogHandler designed to collect unexpected results for use by
script or by the ServoFormatter output formatter."""
def __init__(self):
self.reset_state()
def reset_state(self):
self.number_of_tests = 0
self.completed_tests = 0
self.need_to_erase_last_line = False
self.running_tests: Dict[str, str] = {}
self.test_output = collections.defaultdict(str)
self.subtest_failures = collections.defaultdict(list)
self.tests_with_failing_subtests = []
self.unexpected_results: List[UnexpectedResult] = []
self.expected = {
'OK': 0,
'PASS': 0,
'FAIL': 0,
'ERROR': 0,
'TIMEOUT': 0,
'SKIP': 0,
'CRASH': 0,
'PRECONDITION_FAILED': 0,
}
self.unexpected_tests = {
'OK': [],
'PASS': [],
'FAIL': [],
'ERROR': [],
'TIMEOUT': [],
'CRASH': [],
'PRECONDITION_FAILED': [],
}
def suite_start(self, data):
self.reset_state()
self.number_of_tests = sum(len(tests) for tests in itervalues(data["tests"]))
self.suite_start_time = data["time"]
def suite_end(self, _):
pass
def test_start(self, data):
self.running_tests[data['thread']] = data['test']
@staticmethod
def data_was_for_expected_result(data):
if "expected" not in data:
return True
return "known_intermittent" in data \
and data["status"] in data["known_intermittent"]
def test_end(self, data: dict) -> Optional[UnexpectedResult]:
self.completed_tests += 1
test_status = data["status"]
test_path = data["test"]
del self.running_tests[data['thread']]
had_expected_test_result = self.data_was_for_expected_result(data)
subtest_failures = self.subtest_failures.pop(test_path, [])
if had_expected_test_result and not subtest_failures:
self.expected[test_status] += 1
return None
# If the test crashed or timed out, we also include any process output,
# because there is a good chance that the test produced a stack trace
# or other error messages.
stack = data.get("stack", None)
if test_status in ("CRASH", "TIMEOUT"):
stack = f"\n{stack}" if stack else ""
stack = f"{self.test_output[test_path]}{stack}"
result = UnexpectedResult(
test_path,
test_status,
data.get("expected", test_status),
data.get("message", ""),
data["time"],
stack,
subtest_failures
)
if not had_expected_test_result:
self.unexpected_tests[result.actual].append(data)
if subtest_failures:
self.tests_with_failing_subtests.append(data)
self.unexpected_results.append(result)
return result
def test_status(self, data: dict):
if self.data_was_for_expected_result(data):
return
self.subtest_failures[data["test"]].append(UnexpectedSubtestResult(
data["test"],
data["subtest"],
data["status"],
data["expected"],
data.get("message", ""),
data["time"],
data.get('stack', None),
))
def process_output(self, data):
if data['thread'] not in self.running_tests:
return
test_name = self.running_tests[data['thread']]
self.test_output[test_name] += data['data'] + "\n"
def log(self, _):
pass
class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
"""Formatter designed to produce unexpected test results grouped
together in a readable format."""
def __init__(self):
ServoHandler.__init__(self)
self.current_display = ""
self.interactive = os.isatty(sys.stdout.fileno())
if self.interactive:
self.line_width = os.get_terminal_size().columns
self.move_up = DEFAULT_MOVE_UP_CODE
self.clear_eol = DEFAULT_CLEAR_EOL_CODE
try:
import blessings
self.terminal = blessings.Terminal()
self.move_up = self.terminal.move_up
self.clear_eol = self.terminal.clear_eol
except Exception as exception:
sys.stderr.write("GroupingFormatter: Could not get terminal "
"control characters: %s\n" % exception)
def text_to_erase_display(self):
if not self.interactive or not self.current_display:
return ""
return ((self.move_up + self.clear_eol)
* self.current_display.count('\n'))
def generate_output(self, text=None, new_display=None):
if not self.interactive:
return text
output = self.text_to_erase_display()
if text:
output += text
if new_display is not None:
self.current_display = new_display
return output + self.current_display
def test_counter(self):
if self.number_of_tests == 0:
return " [%i] " % self.completed_tests
else:
return " [%i/%i] " % (self.completed_tests, self.number_of_tests)
def build_status_line(self):
new_display = self.test_counter()
if self.running_tests:
indent = " " * len(new_display)
if self.interactive:
max_width = self.line_width - len(new_display)
else:
max_width = sys.maxsize
return new_display + ("\n%s" % indent).join(
val[:max_width] for val in self.running_tests.values()) + "\n"
else:
return new_display + "No tests running.\n"
def suite_start(self, data):
ServoHandler.suite_start(self, data)
if self.number_of_tests == 0:
return "Running tests in %s\n\n" % data[u'source']
else:
return "Running %i tests in %s\n\n" % (self.number_of_tests, data[u'source'])
def test_start(self, data):
ServoHandler.test_start(self, data)
if self.interactive:
return self.generate_output(new_display=self.build_status_line())
def test_end(self, data):
unexpected_result = ServoHandler.test_end(self, data)
if not unexpected_result:
if self.interactive:
return self.generate_output(new_display=self.build_status_line())
else:
return self.generate_output(text="%s%s\n" % (self.test_counter(), data["test"]))
# Surround test output by newlines so that it is easier to read.
output_for_unexpected_test = f"{unexpected_result}\n"
return self.generate_output(text=output_for_unexpected_test,
new_display=self.build_status_line())
def test_status(self, data):
ServoHandler.test_status(self, data)
def suite_end(self, data):
ServoHandler.suite_end(self, data)
if not self.interactive:
output = u"\n"
else:
output = ""
output += u"Ran %i tests finished in %.1f seconds.\n" % (
self.completed_tests, (data["time"] - self.suite_start_time) / 1000)
output += u" \u2022 %i ran as expected. %i tests skipped.\n" % (
sum(self.expected.values()), self.expected['SKIP'])
def text_for_unexpected_list(text, section):
tests = self.unexpected_tests[section]
if not tests:
return u""
return u" \u2022 %i tests %s\n" % (len(tests), text)
output += text_for_unexpected_list(u"crashed unexpectedly", 'CRASH')
output += text_for_unexpected_list(u"had errors unexpectedly", 'ERROR')
output += text_for_unexpected_list(u"failed unexpectedly", 'FAIL')
output += text_for_unexpected_list(u"precondition failed unexpectedly", 'PRECONDITION_FAILED')
output += text_for_unexpected_list(u"timed out unexpectedly", 'TIMEOUT')
output += text_for_unexpected_list(u"passed unexpectedly", 'PASS')
output += text_for_unexpected_list(u"unexpectedly okay", 'OK')
num_with_failing_subtests = len(self.tests_with_failing_subtests)
if num_with_failing_subtests:
output += (u" \u2022 %i tests had unexpected subtest results\n"
% num_with_failing_subtests)
output += "\n"
# Repeat failing test output, so that it is easier to find, since the
# non-interactive version prints all the test names.
if not self.interactive and self.unexpected_results:
output += u"Tests with unexpected results:\n"
output += "".join([str(result)
for result in self.unexpected_results])
return self.generate_output(text=output, new_display="")
def process_output(self, data):
ServoHandler.process_output(self, data)
def log(self, data):
ServoHandler.log(self, data)
# We are logging messages that begin with STDERR, because that is how exceptions
# in this formatter are indicated.
if data['message'].startswith('STDERR'):
return self.generate_output(text=data['message'] + "\n")
if data['level'] in ('CRITICAL', 'ERROR'):
return self.generate_output(text=data['message'] + "\n")

View file

@ -1,183 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import argparse
import imp
import os
import sys
import tempfile
from collections import defaultdict
from six import iterkeys, iteritems
from mozlog.structured import commandline
from wptrunner.wptcommandline import get_test_paths, set_from_config
manifest = None
servo_root = os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir)
def do_delayed_imports(wpt_dir):
global manifest
sys.path.insert(0, os.path.join(wpt_dir, "tools", "manifest"))
import manifest # noqa
def create_parser():
p = argparse.ArgumentParser()
p.add_argument("--check-clean", action="store_true",
help="Check that updating the manifest doesn't lead to any changes")
p.add_argument("--rebuild", action="store_true",
help="Rebuild the manifest from scratch")
commandline.add_logging_group(p)
return p
def update(logger, wpt_dir, check_clean=True, rebuild=False):
localpaths = imp.load_source("localpaths", # noqa
os.path.join(wpt_dir, "web-platform-tests", "tools", "localpaths.py"))
kwargs = {"config": os.path.join(wpt_dir, "config.ini"),
"manifest_path": os.path.join(wpt_dir, "metadata"),
"tests_root": None,
"metadata_root": None}
set_from_config(kwargs)
config = kwargs["config"]
test_paths = get_test_paths(config)
do_delayed_imports(wpt_dir)
if check_clean:
return _check_clean(logger, test_paths)
return _update(logger, test_paths, rebuild)
def _update(logger, test_paths, rebuild):
for url_base, paths in iteritems(test_paths):
manifest_path = os.path.join(paths["metadata_path"], "MANIFEST.json")
cache_subdir = os.path.relpath(os.path.dirname(manifest_path),
os.path.dirname(__file__))
manifest.manifest.load_and_update(paths["tests_path"],
manifest_path,
url_base,
working_copy=True,
rebuild=rebuild,
cache_root=os.path.join(servo_root, ".wpt",
cache_subdir))
return 0
def _check_clean(logger, test_paths):
manifests_by_path = {}
rv = 0
for url_base, paths in iteritems(test_paths):
tests_path = paths["tests_path"]
manifest_path = os.path.join(paths["metadata_path"], "MANIFEST.json")
old_manifest = manifest.manifest.load_and_update(tests_path,
manifest_path,
url_base,
working_copy=False,
update=False,
write_manifest=False,)
# Even if no cache is specified, one will be used automatically by the
# VCS integration. Create a brand new cache every time to ensure that
# the VCS integration always thinks that any file modifications in the
# working directory are new and interesting.
cache_root = tempfile.mkdtemp()
new_manifest = manifest.manifest.load_and_update(tests_path,
manifest_path,
url_base,
working_copy=True,
update=True,
cache_root=cache_root,
write_manifest=False,
allow_cached=False)
manifests_by_path[manifest_path] = (old_manifest, new_manifest)
for manifest_path, (old_manifest, new_manifest) in iteritems(manifests_by_path):
if not diff_manifests(logger, manifest_path, old_manifest, new_manifest):
logger.error("Manifest %s is outdated, use |./mach update-manifest| to fix." % manifest_path)
rv = 1
return rv
def diff_manifests(logger, manifest_path, old_manifest, new_manifest):
"""Lint the differences between old and new versions of a
manifest. Differences are considered significant (and so produce
lint errors) if they produce a meaningful difference in the actual
tests run.
:param logger: mozlog logger to use for output
:param manifest_path: Path to the manifest being linted
:param old_manifest: Manifest object representing the initial manifest
:param new_manifest: Manifest object representing the updated manifest
"""
logger.info("Diffing old and new manifests %s" % manifest_path)
old_items, new_items = defaultdict(set), defaultdict(set)
for manifest, items in [(old_manifest, old_items),
(new_manifest, new_items)]:
for test_type, path, tests in manifest:
for test in tests:
test_id = [test.id]
if hasattr(test, "script_metadata"):
if test.script_metadata is not None:
test_id.extend(tuple(item) for item in test.script_metadata)
if hasattr(test, "references"):
test_id.extend(tuple(item) for item in test.references)
test_id = tuple(test_id)
items[path].add((test_type, test_id))
old_paths = set(iterkeys(old_items))
new_paths = set(iterkeys(new_items))
added_paths = new_paths - old_paths
deleted_paths = old_paths - new_paths
common_paths = new_paths & old_paths
clean = True
for path in added_paths:
clean = False
log_error(logger, manifest_path, "%s in source but not in manifest." % path)
for path in deleted_paths:
clean = False
log_error(logger, manifest_path, "%s in manifest but removed from source." % path)
for path in common_paths:
old_tests = old_items[path]
new_tests = new_items[path]
added_tests = new_tests - old_tests
removed_tests = old_tests - new_tests
if added_tests or removed_tests:
clean = False
log_error(logger, manifest_path, "%s changed test types or metadata" % path)
if clean:
# Manifest currently has some list vs tuple inconsistencies that break
# a simple equality comparison.
old_paths = old_manifest.to_json()['items']
new_paths = new_manifest.to_json()['items']
if old_paths != new_paths:
logger.warning("Manifest %s contains correct tests but file hashes changed." % manifest_path) # noqa
clean = False
return clean
def log_error(logger, manifest_path, msg):
logger.lint_error(path=manifest_path,
message=msg,
lineno=0,
source="",
linter="wpt-manifest")

View file

@ -1,369 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import dataclasses
import grouping_formatter
import json
import os
import re
import sys
import urllib.error
import urllib.parse
import urllib.request
import mozlog
import mozlog.formatters
import multiprocessing
from typing import List, NamedTuple, Optional, Union
from grouping_formatter import UnexpectedResult, UnexpectedSubtestResult
SCRIPT_PATH = os.path.abspath(os.path.dirname(__file__))
SERVO_ROOT = os.path.abspath(os.path.join(SCRIPT_PATH, "..", ".."))
WPT_TOOLS_PATH = os.path.join(SCRIPT_PATH, "web-platform-tests", "tools")
CERTS_PATH = os.path.join(WPT_TOOLS_PATH, "certs")
sys.path.insert(0, WPT_TOOLS_PATH)
import localpaths # noqa: F401,E402
import update # noqa: F401,E402
TRACKER_API = "https://build.servo.org/intermittent-tracker"
TRACKER_API_ENV_VAR = "INTERMITTENT_TRACKER_API"
TRACKER_DASHBOARD_SECRET_ENV_VAR = "INTERMITTENT_TRACKER_DASHBOARD_SECRET"
def determine_build_type(kwargs: dict, target_dir: str):
if kwargs["release"]:
return "release"
elif kwargs["debug"]:
return "debug"
elif os.path.exists(os.path.join(target_dir, "debug")):
return "debug"
elif os.path.exists(os.path.join(target_dir, "release")):
return "release"
return "debug"
def set_if_none(args: dict, key: str, value):
if key not in args or args[key] is None:
args[key] = value
def update_args_for_layout_2020(kwargs: dict):
if kwargs.pop("layout_2020"):
kwargs["test_paths"]["/"]["metadata_path"] = os.path.join(
SCRIPT_PATH, "metadata-layout-2020"
)
kwargs["test_paths"]["/_mozilla/"]["metadata_path"] = os.path.join(
SCRIPT_PATH, "mozilla", "meta-layout-2020"
)
kwargs["include_manifest"] = os.path.join(
SCRIPT_PATH, "include-layout-2020.ini"
)
def run_tests(**kwargs):
from wptrunner import wptrunner
from wptrunner import wptcommandline
# By default, Rayon selects the number of worker threads based on the
# available CPU count. This doesn't work very well when running tests on CI,
# since we run so many Servo processes in parallel. The result is a lot of
# extra timeouts. Instead, force Rayon to assume we are running on a 2 CPU
# environment.
os.environ["RAYON_RS_NUM_CPUS"] = "2"
os.environ["RUST_BACKTRACE"] = "1"
os.environ["HOST_FILE"] = os.path.join(SERVO_ROOT, "tests", "wpt", "hosts")
set_if_none(kwargs, "product", "servo")
set_if_none(kwargs, "config", os.path.join(SCRIPT_PATH, "config.ini"))
set_if_none(kwargs, "include_manifest", os.path.join(SCRIPT_PATH, "include.ini"))
set_if_none(kwargs, "manifest_update", False)
set_if_none(kwargs, "processes", multiprocessing.cpu_count())
set_if_none(kwargs, "ca_cert_path", os.path.join(CERTS_PATH, "cacert.pem"))
set_if_none(
kwargs, "host_key_path", os.path.join(CERTS_PATH, "web-platform.test.key")
)
set_if_none(
kwargs, "host_cert_path", os.path.join(CERTS_PATH, "web-platform.test.pem")
)
kwargs["user_stylesheets"].append(os.path.join(SERVO_ROOT, "resources", "ahem.css"))
if "CARGO_TARGET_DIR" in os.environ:
target_dir = os.path.join(os.environ["CARGO_TARGET_DIR"])
else:
target_dir = os.path.join(SERVO_ROOT, "target")
default_binary_path = os.path.join(
target_dir, determine_build_type(kwargs, target_dir), "servo"
)
if sys.platform == "win32":
target_dir += ".exe"
set_if_none(kwargs, "binary", default_binary_path)
set_if_none(kwargs, "webdriver_binary", default_binary_path)
if kwargs.pop("rr_chaos"):
kwargs["debugger"] = "rr"
kwargs["debugger_args"] = "record --chaos"
kwargs["repeat_until_unexpected"] = True
# TODO: Delete rr traces from green test runs?
prefs = kwargs.pop("prefs")
if prefs:
kwargs["binary_args"] = ["--pref=" + pref for pref in prefs]
if not kwargs.get("no_default_test_types"):
test_types = {
"servo": ["testharness", "reftest", "wdspec"],
"servodriver": ["testharness", "reftest"],
}
product = kwargs.get("product") or "servo"
kwargs["test_types"] = test_types[product]
filter_intermittents_output = kwargs.pop("filter_intermittents", None)
unexpected_raw_log_output_file = kwargs.pop("log_raw_unexpected", None)
raw_log_outputs = kwargs.get("log_raw", [])
wptcommandline.check_args(kwargs)
update_args_for_layout_2020(kwargs)
mozlog.commandline.log_formatters["servo"] = (
grouping_formatter.ServoFormatter,
"Servo's grouping output formatter",
)
use_mach_logging = False
if len(kwargs["test_list"]) == 1:
file_ext = os.path.splitext(kwargs["test_list"][0])[1].lower()
if file_ext in [".htm", ".html", ".js", ".xhtml", ".xht", ".py"]:
use_mach_logging = True
if use_mach_logging:
logger = wptrunner.setup_logging(kwargs, {"mach": sys.stdout})
else:
logger = wptrunner.setup_logging(kwargs, {"servo": sys.stdout})
handler = grouping_formatter.ServoHandler()
logger.add_handler(handler)
wptrunner.run_tests(**kwargs)
return_value = 0 if not handler.unexpected_results else 1
# Filter intermittents if that was specified on the command-line.
if handler.unexpected_results and filter_intermittents_output:
# Copy the list of unexpected results from the first run, so that we
# can access them after the tests are rerun (which changes
# `handler.unexpected_results`). After rerunning some tests will be
# marked as flaky but otherwise the contents of this original list
# won't change.
unexpected_results = list(handler.unexpected_results)
# This isn't strictly necessary since `handler.suite_start()` clears
# the state, but make sure that we are starting with a fresh handler.
handler.reset_state()
print(80 * "=")
print(f"Rerunning {len(unexpected_results)} tests "
"with unexpected results to detect flaky tests.")
unexpected_results_tests = [result.path for result in unexpected_results]
kwargs["test_list"] = unexpected_results_tests
kwargs["include"] = unexpected_results_tests
kwargs["pause_after_test"] = False
wptrunner.run_tests(**kwargs)
# Use the second run to mark tests from the first run as flaky, but
# discard the results otherwise.
# TODO: It might be a good idea to send the new results to the
# dashboard if they were also unexpected.
stable_tests = [result.path for result in handler.unexpected_results]
for result in unexpected_results:
result.flaky = result.path not in stable_tests
all_filtered = filter_intermittents(unexpected_results,
filter_intermittents_output)
return_value = 0 if all_filtered else 1
# Write the unexpected-only raw log if that was specified on the command-line.
if unexpected_raw_log_output_file:
if not raw_log_outputs:
print("'--log-raw-unexpected' not written without '--log-raw'.")
else:
write_unexpected_only_raw_log(
handler.unexpected_results,
raw_log_outputs[0].name,
unexpected_raw_log_output_file
)
return return_value
def update_tests(**kwargs):
from update import updatecommandline
set_if_none(kwargs, "product", "servo")
set_if_none(kwargs, "config", os.path.join(SCRIPT_PATH, "config.ini"))
kwargs["store_state"] = False
updatecommandline.check_args(kwargs)
update_args_for_layout_2020(kwargs)
logger = update.setup_logging(kwargs, {"mach": sys.stdout})
return_value = update.run_update(logger, **kwargs)
return 1 if return_value is update.exit_unclean else 0
class GithubContextInformation(NamedTuple):
build_url: Optional[str]
pull_url: Optional[str]
branch_name: Optional[str]
class TrackerDashboardFilter():
def __init__(self):
base_url = os.environ.get(TRACKER_API_ENV_VAR, TRACKER_API)
self.headers = {
"Content-Type": "application/json"
}
if TRACKER_DASHBOARD_SECRET_ENV_VAR in os.environ and os.environ[TRACKER_DASHBOARD_SECRET_ENV_VAR]:
self.url = f"{base_url}/dashboard/attempts"
secret = os.environ[TRACKER_DASHBOARD_SECRET_ENV_VAR]
self.headers["Authorization"] = f"Bearer {secret}"
else:
self.url = f"{base_url}/dashboard/query"
@staticmethod
def get_github_context_information() -> GithubContextInformation:
github_context = json.loads(os.environ.get("GITHUB_CONTEXT", "{}"))
if not github_context:
return GithubContextInformation(None, None, None)
repository = github_context['repository']
repo_url = f"https://github.com/{repository}"
run_id = github_context['run_id']
build_url = f"{repo_url}/actions/runs/{run_id}"
commit_title = github_context["event"]["head_commit"]["message"]
match = re.match(r"^Auto merge of #(\d+)", commit_title)
pr_url = f"{repo_url}/pull/{match.group(1)}" if match else None
return GithubContextInformation(
build_url,
pr_url,
github_context["ref_name"]
)
def make_data_from_result(
self,
result: Union[UnexpectedResult, UnexpectedSubtestResult],
) -> dict:
data = {
'path': result.path,
'subtest': None,
'expected': result.expected,
'actual': result.actual,
'time': result.time // 1000,
'message': result.message,
'stack': result.stack,
}
if isinstance(result, UnexpectedSubtestResult):
data["subtest"] = result.subtest
return data
def report_failures(self, unexpected_results: List[UnexpectedResult]):
attempts = []
for result in unexpected_results:
attempts.append(self.make_data_from_result(result))
for subtest_result in result.unexpected_subtest_results:
attempts.append(self.make_data_from_result(subtest_result))
context = self.get_github_context_information()
try:
request = urllib.request.Request(
url=self.url,
method='POST',
data=json.dumps({
'branch': context.branch_name,
'build_url': context.build_url,
'pull_url': context.pull_url,
'attempts': attempts
}).encode('utf-8'),
headers=self.headers)
known_intermittents = dict()
with urllib.request.urlopen(request) as response:
for test in json.load(response)["known"]:
known_intermittents[test["path"]] = \
[issue["number"] for issue in test["issues"]]
except urllib.error.HTTPError as e:
print(e)
print(e.readlines())
raise(e)
for result in unexpected_results:
result.issues = known_intermittents.get(result.path, [])
def filter_intermittents(
unexpected_results: List[UnexpectedResult],
output_path: str
) -> bool:
print(f"Filtering {len(unexpected_results)} "
"unexpected results for known intermittents")
dashboard = TrackerDashboardFilter()
dashboard.report_failures(unexpected_results)
def add_result(output, text, results, filter_func) -> None:
filtered = [str(result) for result in filter(filter_func, results)]
if filtered:
output += [f"{text} ({len(results)}): ", *filtered]
def is_stable_and_unexpected(result):
return not result.flaky and not result.issues
output = []
add_result(output, "Flaky unexpected results", unexpected_results,
lambda result: result.flaky)
add_result(output, "Stable unexpected results that are known-intermittent",
unexpected_results, lambda result: not result.flaky and result.issues)
add_result(output, "Stable unexpected results",
unexpected_results, is_stable_and_unexpected)
print("\n".join(output))
with open(output_path, "w", encoding="utf-8") as file:
json.dump([dataclasses.asdict(result) for result in unexpected_results], file)
return not any([is_stable_and_unexpected(result) for result in unexpected_results])
def write_unexpected_only_raw_log(
unexpected_results: List[UnexpectedResult],
raw_log_file: str,
filtered_raw_log_file: str
):
tests = [result.path for result in unexpected_results]
print(f"Writing unexpected-only raw log to {filtered_raw_log_file}")
with open(filtered_raw_log_file, "w", encoding="utf-8") as output:
with open(raw_log_file) as input:
for line in input.readlines():
data = json.loads(line)
if data["action"] in ["suite_start", "suite_end"] or \
("test" in data and data["test"] in tests):
output.write(line)
def main():
from wptrunner import wptcommandline
parser = wptcommandline.create_parser()
kwargs = vars(parser.parse_args())
return run_tests(**kwargs)
if __name__ == "__main__":
sys.exit(0 if main() else 1)

View file

@ -1,41 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
#!/usr/bin/env python
import os
import subprocess
import sys
from mozlog.structured import structuredlog
here = os.path.split(__file__)[0]
sys.path.insert(0, os.path.abspath(os.path.join(here, os.pardir, "web-platform-tests", "tools", "wptrunner")))
sys.path.insert(0, os.path.abspath(os.path.join(here, os.pardir, "web-platform-tests", "tools", "wptserve")))
sys.path.insert(0, os.path.abspath(os.path.join(here, os.pardir, "web-platform-tests", "tools")))
import localpaths
from wptrunner.update import setup_logging, WPTUpdate
from wptrunner.update.base import exit_unclean
from . import updatecommandline
from .update import UpdateRunner
def run_update(logger, **kwargs):
updater = WPTUpdate(logger, runner_cls=UpdateRunner, **kwargs)
return updater.run()
if __name__ == "__main__":
args = updatecommandline.parse_args()
logger = setup_logging(args, {"mach": sys.stdout})
assert structuredlog.get_default_logger() is not None
rv = run_update(logger, **args)
if rv is exit_unclean:
sys.exit(1)
else:
sys.exit(0)

View file

@ -1,179 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from __future__ import print_function
import json
import urllib
requests = None
class GitHubError(Exception):
def __init__(self, status, data):
self.status = status
self.data = data
class GitHub(object):
url_base = "https://api.github.com"
def __init__(self, token):
# Defer the import of requests since it isn't installed by default
global requests
if requests is None:
import requests
self.headers = {"Accept": "application/vnd.github.v3+json"}
self.auth = (token, "x-oauth-basic")
def get(self, path):
return self._request("GET", path)
def post(self, path, data):
return self._request("POST", path, data=data)
def put(self, path, data):
return self._request("PUT", path, data=data)
def _request(self, method, path, data=None):
url = urllib.parse.urljoin(self.url_base, path)
kwargs = {"headers": self.headers,
"auth": self.auth}
if data is not None:
kwargs["data"] = json.dumps(data)
resp = requests.request(method, url, **kwargs)
if 200 <= resp.status_code < 300:
return resp.json()
else:
print(resp.status_code, resp.json())
raise GitHubError(resp.status_code, resp.json())
def repo(self, owner, name):
"""GitHubRepo for a particular repository.
:param owner: String repository owner
:param name: String repository name
"""
return GitHubRepo.from_name(self, owner, name)
class GitHubRepo(object):
def __init__(self, github, data):
"""Object respresenting a GitHub respoitory"""
self.gh = github
self.owner = data["owner"]
self.name = data["name"]
self.url = data["ssh_url"]
self._data = data
@classmethod
def from_name(cls, github, owner, name):
data = github.get("/repos/%s/%s" % (owner, name))
return cls(github, data)
@property
def url_base(self):
return "/repos/%s/" % (self._data["full_name"])
def create_pr(self, title, head, base, body):
"""Create a Pull Request in the repository
:param title: Pull Request title
:param head: ref to the HEAD of the PR branch.
:param base: ref to the base branch for the Pull Request
:param body: Description of the PR
"""
return PullRequest.create(self, title, head, base, body)
def load_pr(self, number):
"""Load an existing Pull Request by number.
:param number: Pull Request number
"""
return PullRequest.from_number(self, number)
def path(self, suffix):
return urllib.parse.urljoin(self.url_base, suffix)
class PullRequest(object):
def __init__(self, repo, data):
"""Object representing a Pull Request"""
self.repo = repo
self._data = data
self.number = data["number"]
self.title = data["title"]
self.base = data["base"]["ref"]
self.base = data["head"]["ref"]
self._issue = None
@classmethod
def from_number(cls, repo, number):
data = repo.gh.get(repo.path("pulls/%i" % number))
return cls(repo, data)
@classmethod
def create(cls, repo, title, head, base, body):
data = repo.gh.post(repo.path("pulls"),
{"title": title,
"head": head,
"base": base,
"body": body})
return cls(repo, data)
def path(self, suffix):
return urllib.parse.urljoin(self.repo.path("pulls/%i/" % self.number), suffix)
@property
def issue(self):
"""Issue related to the Pull Request"""
if self._issue is None:
self._issue = Issue.from_number(self.repo, self.number)
return self._issue
def merge(self, commit_message=None):
"""Merge the Pull Request into its base branch.
:param commit_message: Message to use for the merge commit. If None a default
message is used instead
"""
if commit_message is None:
commit_message = "Merge pull request #%i from %s" % (self.number, self.base)
self.repo.gh.put(self.path("merge"),
{"commit_message": commit_message})
class Issue(object):
def __init__(self, repo, data):
"""Object representing a GitHub Issue"""
self.repo = repo
self._data = data
self.number = data["number"]
@classmethod
def from_number(cls, repo, number):
data = repo.gh.get(repo.path("issues/%i" % number))
return cls(repo, data)
def path(self, suffix):
return urllib.parse.urljoin(self.repo.path("issues/%i/" % self.number), suffix)
def add_label(self, label):
"""Add a label to the issue.
:param label: The name of the label
"""
self.repo.gh.post(self.path("labels"), [label])
def add_comment(self, message):
"""Add a comment to the issue
:param message: The text of the comment
"""
self.repo.gh.post(self.path("comments"),
{"body": message})

View file

@ -1,209 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from distutils.spawn import find_executable
import re
import subprocess
import sys
import tempfile
from wptrunner import update as wptupdate
from wptrunner.update.tree import Commit, CommitMessage, get_unique_name
class HgTree(wptupdate.tree.HgTree):
def __init__(self, *args, **kwargs):
self.commit_cls = kwargs.pop("commit_cls", Commit)
wptupdate.tree.HgTree.__init__(self, *args, **kwargs)
# TODO: The extra methods for upstreaming patches from a
# hg checkout
class GitTree(wptupdate.tree.GitTree):
def __init__(self, *args, **kwargs):
"""Extension of the basic GitTree with extra methods for
transfering patches"""
commit_cls = kwargs.pop("commit_cls", Commit)
wptupdate.tree.GitTree.__init__(self, *args, **kwargs)
self.commit_cls = commit_cls
def create_branch(self, name, ref=None):
"""Create a named branch,
:param name: String representing the branch name.
:param ref: None to use current HEAD or rev that the branch should point to"""
args = []
if ref is not None:
if hasattr(ref, "sha1"):
ref = ref.sha1
args.append(ref)
self.git("branch", name, *args)
def commits_by_message(self, message, path=None):
"""List of commits with messages containing a given string.
:param message: The string that must be contained in the message.
:param path: Path to a file or directory the commit touches
"""
args = ["--pretty=format:%H", "--reverse", "-z", "--grep=%s" % message]
if path is not None:
args.append("--")
args.append(path)
data = self.git("log", *args)
return [self.commit_cls(self, sha1) for sha1 in data.split("\0")]
def log(self, base_commit=None, path=None):
"""List commits touching a certian path from a given base commit.
:base_param commit: Commit object for the base commit from which to log
:param path: Path that the commits must touch
"""
args = ["--pretty=format:%H", "--reverse", "-z", "--no-merges"]
if base_commit is not None:
args.append("%s.." % base_commit.sha1)
if path is not None:
args.append("--")
args.append(path)
data = self.git("log", *args)
return [self.commit_cls(self, sha1) for sha1 in data.split("\0") if sha1]
def import_patch(self, patch, strip_count):
"""Import a patch file into the tree and commit it
:param patch: a Patch object containing the patch to import
"""
with tempfile.NamedTemporaryFile() as f:
f.write(patch.diff)
f.flush()
f.seek(0)
self.git("apply", "--index", f.name, "-p", str(strip_count))
self.git("commit", "-m", patch.message.text, "--author=%s" % patch.full_author)
def rebase(self, ref, continue_rebase=False):
"""Rebase the current branch onto another commit.
:param ref: A Commit object for the commit to rebase onto
:param continue_rebase: Continue an in-progress rebase"""
if continue_rebase:
args = ["--continue"]
else:
if hasattr(ref, "sha1"):
ref = ref.sha1
args = [ref]
self.git("rebase", *args)
def push(self, remote, local_ref, remote_ref, force=False):
"""Push local changes to a remote.
:param remote: URL of the remote to push to
:param local_ref: Local branch to push
:param remote_ref: Name of the remote branch to push to
:param force: Do a force push
"""
args = []
if force:
args.append("-f")
args.extend([remote, "%s:%s" % (local_ref, remote_ref)])
self.git("push", *args)
def unique_branch_name(self, prefix):
"""Get an unused branch name in the local tree
:param prefix: Prefix to use at the start of the branch name"""
branches = [ref[len("refs/heads/"):] for sha1, ref in self.list_refs()
if ref.startswith("refs/heads/")]
return get_unique_name(branches, prefix)
class Patch(object):
def __init__(self, author, email, message, merge_message, diff):
self.author = author
self.email = email
self.merge_message = merge_message
if isinstance(message, CommitMessage):
self.message = message
else:
self.message = GeckoCommitMessage(message)
self.diff = diff
def __repr__(self):
return "<Patch (%s)>" % self.message.full_summary
@property
def full_author(self):
return "%s <%s>" % (self.author, self.email)
@property
def empty(self):
return bool(self.diff.strip())
class GeckoCommitMessage(CommitMessage):
"""Commit message following the Gecko conventions for identifying bug number
and reviewer"""
# c.f. http://hg.mozilla.org/hgcustom/version-control-tools/file/tip/hghooks/mozhghooks/commit-message.py
# which has the regexps that are actually enforced by the VCS hooks. These are
# slightly different because we need to parse out specific parts of the message rather
# than just enforce a general pattern.
_bug_re = re.compile("^Bug (\d+)[^\w]*(?:Part \d+[^\w]*)?(.*?)\s*(?:r=(\w*))?$",
re.IGNORECASE)
_merge_re = re.compile("^Auto merge of #(\d+) - [^:]+:[^,]+, r=(.+)$", re.IGNORECASE)
_backout_re = re.compile("^(?:Back(?:ing|ed)\s+out)|Backout|(?:Revert|(?:ed|ing))",
re.IGNORECASE)
_backout_sha1_re = re.compile("(?:\s|\:)(0-9a-f){12}")
def _parse_message(self):
CommitMessage._parse_message(self)
if self._backout_re.match(self.full_summary):
self.backouts = self._backout_re.findall(self.full_summary)
else:
self.backouts = []
m = self._merge_re.match(self.full_summary)
if m is not None:
self.bug, self.reviewer = m.groups()
self.summary = self.full_summary
else:
m = self._bug_re.match(self.full_summary)
if m is not None:
self.bug, self.summary, self.reviewer = m.groups()
else:
self.bug, self.summary, self.reviewer = None, self.full_summary, None
class GeckoCommit(Commit):
msg_cls = GeckoCommitMessage
def __init__(self, tree, sha1, is_merge=False):
Commit.__init__(self, tree, sha1)
if not is_merge:
args = ["-c", sha1]
try:
merge_rev = self.git("when-merged", *args).strip()
except subprocess.CalledProcessError as exn:
if not find_executable('git-when-merged'):
print('Please add the `when-merged` git command to your PATH ' +
'(https://github.com/mhagger/git-when-merged/).')
sys.exit(1)
raise exn
self.merge = GeckoCommit(tree, merge_rev, True)
def export_patch(self, path=None):
"""Convert a commit in the tree to a Patch with the bug number and
reviewer stripped from the message"""
args = ["--binary", self.sha1]
if path is not None:
args.append("--")
args.append(path)
diff = self.git("show", *args)
merge_message = self.merge.message if self.merge else None
return Patch(self.author, self.email, self.message, merge_message, diff)

View file

@ -1,42 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import os
from wptrunner.update.base import Step, StepRunner
from wptrunner.update.update import LoadConfig, SyncFromUpstream, UpdateMetadata
from wptrunner.update.tree import NoVCSTree
from .tree import GitTree, HgTree, GeckoCommit
from .upstream import SyncToUpstream
class LoadTrees(Step):
"""Load gecko tree and sync tree containing web-platform-tests"""
provides = ["local_tree", "sync_tree"]
def create(self, state):
if os.path.exists(state.sync["path"]):
sync_tree = GitTree(root=state.sync["path"])
else:
sync_tree = None
if GitTree.is_type():
local_tree = GitTree(commit_cls=GeckoCommit)
elif HgTree.is_type():
local_tree = HgTree(commit_cls=GeckoCommit)
else:
local_tree = NoVCSTree()
state.update({"local_tree": local_tree,
"sync_tree": sync_tree})
class UpdateRunner(StepRunner):
"""Overall runner for updating web-platform-tests in Gecko."""
steps = [LoadConfig,
LoadTrees,
SyncToUpstream,
SyncFromUpstream,
UpdateMetadata]

View file

@ -1,44 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
def create_parser():
from wptrunner import wptcommandline
parser = wptcommandline.create_parser_update()
parser.add_argument("--upstream", dest="upstream", action="store_true", default=None,
help="Push local changes to upstream repository even when not syncing")
parser.add_argument("--no-upstream", dest="upstream", action="store_false", default=None,
help="Dont't push local changes to upstream repository when syncing")
parser.add_argument("--token-file", action="store", type=wptcommandline.abs_path,
help="Path to file containing github token")
parser.add_argument("--token", action="store", help="GitHub token to use")
parser.add_argument("--layout-2020", "--with-layout-2020", default=False, action="store_true",
help="Use expected results for the 2020 layout engine")
return parser
def check_args(kwargs):
from wptrunner import wptcommandline
wptcommandline.set_from_config(kwargs)
if hasattr(wptcommandline, 'check_paths'):
wptcommandline.check_paths(kwargs)
kwargs["upstream"] = kwargs["upstream"] if kwargs["upstream"] is not None else kwargs["sync"]
if kwargs["upstream"]:
if kwargs["rev"]:
raise ValueError("Setting --rev with --upstream isn't supported")
if kwargs["token"] is None:
if kwargs["token_file"] is None:
raise ValueError("Must supply either a token file or a token")
with open(kwargs["token_file"]) as f:
token = f.read().strip()
kwargs["token"] = token
del kwargs["token_file"]
return kwargs
def parse_args():
parser = create_parser()
kwargs = vars(parser.parse_args())
return check_args(kwargs)

View file

@ -1,389 +0,0 @@
from __future__ import print_function
import os
import re
import subprocess
import sys
import urllib
from six.moves import input
from six import iteritems
from wptrunner.update.sync import UpdateCheckout
from wptrunner.update.tree import get_unique_name
from wptrunner.update.base import Step, StepRunner, exit_clean, exit_unclean
from .tree import Commit, GitTree, Patch
from .github import GitHub
def rewrite_patch(patch, strip_dir):
"""Take a Patch and rewrite the message to remove the bug number and reviewer, but add
a bugzilla link in the summary.
:param patch: the Patch to convert
"""
return Patch(patch.author, patch.email, rewrite_message(patch), None, patch.diff)
def rewrite_message(patch):
if patch.merge_message and patch.merge_message.bug:
bug = patch.merge_message.bug
else:
bug = patch.message.bug
if bug is not None:
return "\n".join([patch.message.summary,
patch.message.body,
"",
"Upstreamed from https://github.com/servo/servo/pull/%s [ci skip]" %
bug])
return "\n".join([patch.message.full_summary, "%s\n[ci skip]\n" % patch.message.body])
class SyncToUpstream(Step):
"""Sync local changes to upstream"""
def create(self, state):
if not state.kwargs["upstream"]:
return
if not isinstance(state.local_tree, GitTree):
self.logger.error("Cannot sync with upstream from a non-Git checkout.")
return exit_clean
try:
import requests
except ImportError:
self.logger.error("Upstream sync requires the requests module to be installed")
return exit_clean
if not state.sync_tree:
os.makedirs(state.sync["path"])
state.sync_tree = GitTree(root=state.sync["path"])
kwargs = state.kwargs
with state.push(["local_tree", "sync_tree", "tests_path", "metadata_path",
"sync"]):
state.token = kwargs["token"]
runner = SyncToUpstreamRunner(self.logger, state)
runner.run()
class GetLastSyncData(Step):
"""Find the gecko commit at which we last performed a sync with upstream and the upstream
commit that was synced."""
provides = ["sync_data_path", "last_sync_commit", "old_upstream_rev"]
def create(self, state):
self.logger.info("Looking for last sync commit")
state.sync_data_path = os.path.join(state.metadata_path, "mozilla-sync")
items = {}
with open(state.sync_data_path) as f:
for line in f.readlines():
key, value = [item.strip() for item in line.split(":", 1)]
items[key] = value
state.last_sync_commit = Commit(state.local_tree, items["local"])
state.old_upstream_rev = items["upstream"]
if not state.local_tree.contains_commit(state.last_sync_commit):
self.logger.error("Could not find last sync commit %s" % last_sync_sha1)
return exit_clean
self.logger.info("Last sync to web-platform-tests happened in %s" % state.last_sync_commit.sha1)
class CheckoutBranch(Step):
"""Create a branch in the sync tree pointing at the last upstream sync commit
and check it out"""
provides = ["branch"]
def create(self, state):
self.logger.info("Updating sync tree from %s" % state.sync["remote_url"])
state.branch = state.sync_tree.unique_branch_name(
"outbound_update_%s" % state.old_upstream_rev)
state.sync_tree.update(state.sync["remote_url"],
state.sync["branch"],
state.branch)
state.sync_tree.checkout(state.old_upstream_rev, state.branch, force=True)
class GetBaseCommit(Step):
"""Find the latest upstream commit on the branch that we are syncing with"""
provides = ["base_commit"]
def create(self, state):
state.base_commit = state.sync_tree.get_remote_sha1(state.sync["remote_url"],
state.sync["branch"])
self.logger.debug("New base commit is %s" % state.base_commit.sha1)
class LoadCommits(Step):
"""Get a list of commits in the gecko tree that need to be upstreamed"""
provides = ["source_commits"]
def create(self, state):
state.source_commits = state.local_tree.log(state.last_sync_commit,
state.tests_path)
update_regexp = re.compile("Update web-platform-tests to revision [0-9a-f]{40}")
for i, commit in enumerate(state.source_commits[:]):
if update_regexp.match(commit.message.text):
# This is a previous update commit so ignore it
state.source_commits.remove(commit)
continue
if commit.message.backouts:
#TODO: Add support for collapsing backouts
raise NotImplementedError("Need to get the Git->Hg commits for backouts and remove the backed out patch")
if not commit.message.bug and not (commit.merge and commit.merge.message.bug):
self.logger.error("Commit %i (%s) doesn't have an associated bug number." %
(i + 1, commit.sha1))
return exit_unclean
self.logger.debug("Source commits: %s" % state.source_commits)
class SelectCommits(Step):
"""Provide a UI to select which commits to upstream"""
def create(self, state):
if not state.source_commits:
return
while True:
commits = state.source_commits[:]
for i, commit in enumerate(commits):
print("%i:\t%s" % (i, commit.message.summary))
remove = input("Provide a space-separated list of any commits numbers to remove from the list to upstream:\n").strip()
remove_idx = set()
invalid = False
for item in remove.split(" "):
if not item:
continue
try:
item = int(item)
except:
invalid = True
break
if item < 0 or item >= len(commits):
invalid = True
break
remove_idx.add(item)
if invalid:
continue
keep_commits = [(i,cmt) for i,cmt in enumerate(commits) if i not in remove_idx]
#TODO: consider printed removed commits
print("Selected the following commits to keep:")
for i, commit in keep_commits:
print("%i:\t%s" % (i, commit.message.summary))
confirm = input("Keep the above commits? y/n\n").strip().lower()
if confirm == "y":
state.source_commits = [item[1] for item in keep_commits]
break
class MovePatches(Step):
"""Convert gecko commits into patches against upstream and commit these to the sync tree."""
provides = ["commits_loaded"]
def create(self, state):
state.commits_loaded = 0
strip_path = os.path.relpath(state.tests_path,
state.local_tree.root)
self.logger.debug("Stripping patch %s" % strip_path)
for commit in state.source_commits[state.commits_loaded:]:
i = state.commits_loaded + 1
self.logger.info("Moving commit %i: %s" % (i, commit.message.full_summary))
patch = commit.export_patch(state.tests_path)
stripped_patch = rewrite_patch(patch, strip_path)
strip_count = strip_path.count('/')
if strip_path[-1] != '/':
strip_count += 1
try:
state.sync_tree.import_patch(stripped_patch, 1 + strip_count)
except:
print(patch.diff)
raise
state.commits_loaded = i
class RebaseCommits(Step):
"""Rebase commits from the current branch on top of the upstream destination branch.
This step is particularly likely to fail if the rebase generates merge conflicts.
In that case the conflicts can be fixed up locally and the sync process restarted
with --continue.
"""
provides = ["rebased_commits"]
def create(self, state):
self.logger.info("Rebasing local commits")
continue_rebase = False
# Check if there's a rebase in progress
if (os.path.exists(os.path.join(state.sync_tree.root,
".git",
"rebase-merge")) or
os.path.exists(os.path.join(state.sync_tree.root,
".git",
"rebase-apply"))):
continue_rebase = True
try:
state.sync_tree.rebase(state.base_commit, continue_rebase=continue_rebase)
except subprocess.CalledProcessError:
self.logger.info("Rebase failed, fix merge and run %s again with --continue" % sys.argv[0])
raise
state.rebased_commits = state.sync_tree.log(state.base_commit)
self.logger.info("Rebase successful")
class CheckRebase(Step):
"""Check if there are any commits remaining after rebase"""
def create(self, state):
if not state.rebased_commits:
self.logger.info("Nothing to upstream, exiting")
return exit_clean
class MergeUpstream(Step):
"""Run steps to push local commits as seperate PRs and merge upstream."""
provides = ["merge_index", "gh_repo"]
def create(self, state):
gh = GitHub(state.token)
if "merge_index" not in state:
state.merge_index = 0
org, name = urllib.parse.urlsplit(state.sync["remote_url"]).path[1:].split("/")
if name.endswith(".git"):
name = name[:-4]
state.gh_repo = gh.repo(org, name)
for commit in state.rebased_commits[state.merge_index:]:
with state.push(["gh_repo", "sync_tree"]):
state.commit = commit
pr_merger = PRMergeRunner(self.logger, state)
rv = pr_merger.run()
if rv is not None:
return rv
state.merge_index += 1
class UpdateLastSyncData(Step):
"""Update the gecko commit at which we last performed a sync with upstream."""
provides = []
def create(self, state):
self.logger.info("Updating last sync commit")
data = {"local": state.local_tree.rev,
"upstream": state.sync_tree.rev}
with open(state.sync_data_path, "w") as f:
for key, value in iteritems(data):
f.write("%s: %s\n" % (key, value))
# This gets added to the patch later on
class MergeLocalBranch(Step):
"""Create a local branch pointing at the commit to upstream"""
provides = ["local_branch"]
def create(self, state):
branch_prefix = "sync_%s" % state.commit.sha1
local_branch = state.sync_tree.unique_branch_name(branch_prefix)
state.sync_tree.create_branch(local_branch, state.commit)
state.local_branch = local_branch
class MergeRemoteBranch(Step):
"""Get an unused remote branch name to use for the PR"""
provides = ["remote_branch"]
def create(self, state):
remote_branch = "sync_%s" % state.commit.sha1
branches = [ref[len("refs/heads/"):] for sha1, ref in
state.sync_tree.list_remote(state.gh_repo.url)
if ref.startswith("refs/heads")]
state.remote_branch = get_unique_name(branches, remote_branch)
class PushUpstream(Step):
"""Push local branch to remote"""
def create(self, state):
self.logger.info("Pushing commit upstream")
state.sync_tree.push(state.gh_repo.url,
state.local_branch,
state.remote_branch)
class CreatePR(Step):
"""Create a PR for the remote branch"""
provides = ["pr"]
def create(self, state):
self.logger.info("Creating a PR")
commit = state.commit
state.pr = state.gh_repo.create_pr(commit.message.full_summary,
state.remote_branch,
"master",
commit.message.body if commit.message.body else "")
class PRAddComment(Step):
"""Add an issue comment indicating that the code has been reviewed already"""
def create(self, state):
state.pr.issue.add_comment("Code reviewed upstream.")
state.pr.issue.add_label("servo-export")
class MergePR(Step):
"""Merge the PR"""
def create(self, state):
self.logger.info("Merging PR")
state.pr.merge()
class PRDeleteBranch(Step):
"""Delete the remote branch"""
def create(self, state):
self.logger.info("Deleting remote branch")
state.sync_tree.push(state.gh_repo.url, "", state.remote_branch)
class SyncToUpstreamRunner(StepRunner):
"""Runner for syncing local changes to upstream"""
steps = [GetLastSyncData,
UpdateCheckout,
CheckoutBranch,
GetBaseCommit,
LoadCommits,
SelectCommits,
MovePatches,
RebaseCommits,
CheckRebase,
MergeUpstream,
UpdateLastSyncData]
class PRMergeRunner(StepRunner):
"""(Sub)Runner for creating and merging a PR"""
steps = [
MergeLocalBranch,
MergeRemoteBranch,
PushUpstream,
CreatePR,
PRAddComment,
MergePR,
PRDeleteBranch,
]