Use ruff to enforce python code formatting (#37117)

Requires servo/servo#37045 for deps and config.

Testing: No need for tests to test tests.
Fixes: servo/servo#37041

---------

Signed-off-by: zefr0x <zer0-x.7ty50@aleeas.com>
This commit is contained in:
zefr0x 2025-05-26 14:54:43 +03:00 committed by GitHub
parent 41ecfb53a1
commit c96de69e80
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
67 changed files with 3021 additions and 3085 deletions

View file

@ -27,22 +27,36 @@ import wptrunner.wptcommandline # noqa: E402
def create_parser():
parser = wptrunner.wptcommandline.create_parser()
parser.add_argument('--rr-chaos', default=False, action="store_true",
help="Run under chaos mode in rr until a failure is captured")
parser.add_argument('--pref', default=[], action="append", dest="prefs",
help="Pass preferences to servo")
parser.add_argument('--log-servojson', action="append", type=mozlog.commandline.log_file,
help="Servo's JSON logger of unexpected results")
parser.add_argument('--always-succeed', default=False, action="store_true",
help="Always yield exit code of zero")
parser.add_argument('--no-default-test-types', default=False, action="store_true",
help="Run all of the test types provided by wptrunner or specified explicitly by --test-types")
parser.add_argument('--filter-intermittents', default=None, action="store",
help="Filter intermittents against known intermittents "
"and save the filtered output to the given file.")
parser.add_argument('--log-raw-unexpected', default=None, action="store",
help="Raw structured log messages for unexpected results."
" '--log-raw' Must also be passed in order to use this.")
parser.add_argument(
"--rr-chaos", default=False, action="store_true", help="Run under chaos mode in rr until a failure is captured"
)
parser.add_argument("--pref", default=[], action="append", dest="prefs", help="Pass preferences to servo")
parser.add_argument(
"--log-servojson",
action="append",
type=mozlog.commandline.log_file,
help="Servo's JSON logger of unexpected results",
)
parser.add_argument("--always-succeed", default=False, action="store_true", help="Always yield exit code of zero")
parser.add_argument(
"--no-default-test-types",
default=False,
action="store_true",
help="Run all of the test types provided by wptrunner or specified explicitly by --test-types",
)
parser.add_argument(
"--filter-intermittents",
default=None,
action="store",
help="Filter intermittents against known intermittents and save the filtered output to the given file.",
)
parser.add_argument(
"--log-raw-unexpected",
default=None,
action="store",
help="Raw structured log messages for unexpected results."
" '--log-raw' Must also be passed in order to use this.",
)
return parser

View file

@ -21,20 +21,20 @@ from exporter import WPTSync
def main() -> int:
context = json.loads(os.environ['GITHUB_CONTEXT'])
context = json.loads(os.environ["GITHUB_CONTEXT"])
logging.getLogger().level = logging.INFO
success = WPTSync(
servo_repo='servo/servo',
wpt_repo='web-platform-tests/wpt',
downstream_wpt_repo='servo/wpt',
servo_path='./servo',
wpt_path='./wpt',
github_api_token=os.environ['WPT_SYNC_TOKEN'],
github_api_url='https://api.github.com/',
github_username='servo-wpt-sync',
github_email='ghbot+wpt-sync@servo.org',
github_name='Servo WPT Sync',
servo_repo="servo/servo",
wpt_repo="web-platform-tests/wpt",
downstream_wpt_repo="servo/wpt",
servo_path="./servo",
wpt_path="./wpt",
github_api_token=os.environ["WPT_SYNC_TOKEN"],
github_api_url="https://api.github.com/",
github_username="servo-wpt-sync",
github_email="ghbot+wpt-sync@servo.org",
github_name="Servo WPT Sync",
).run(context["event"])
return 0 if success else 1

View file

@ -24,26 +24,28 @@ import subprocess
from typing import Callable, Optional
from .common import \
CLOSING_EXISTING_UPSTREAM_PR, \
NO_SYNC_SIGNAL, \
NO_UPSTREAMBLE_CHANGES_COMMENT, \
OPENED_NEW_UPSTREAM_PR, \
UPDATED_EXISTING_UPSTREAM_PR, \
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR, \
UPSTREAMABLE_PATH, \
wpt_branch_name_from_servo_pr_number
from .common import (
CLOSING_EXISTING_UPSTREAM_PR,
NO_SYNC_SIGNAL,
NO_UPSTREAMBLE_CHANGES_COMMENT,
OPENED_NEW_UPSTREAM_PR,
UPDATED_EXISTING_UPSTREAM_PR,
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR,
UPSTREAMABLE_PATH,
wpt_branch_name_from_servo_pr_number,
)
from .github import GithubRepository, PullRequest
from .step import \
AsyncValue, \
ChangePRStep, \
CommentStep, \
CreateOrUpdateBranchForPRStep, \
MergePRStep, \
OpenPRStep, \
RemoveBranchForPRStep, \
Step
from .step import (
AsyncValue,
ChangePRStep,
CommentStep,
CreateOrUpdateBranchForPRStep,
MergePRStep,
OpenPRStep,
RemoveBranchForPRStep,
Step,
)
class LocalGitRepo:
@ -57,8 +59,7 @@ class LocalGitRepo:
def run_without_encoding(self, *args, env: dict = {}):
command_line = [self.git_path] + list(args)
logging.info(" → Execution (cwd='%s'): %s",
self.path, " ".join(command_line))
logging.info(" → Execution (cwd='%s'): %s", self.path, " ".join(command_line))
env.setdefault("GIT_AUTHOR_EMAIL", self.sync.github_email)
env.setdefault("GIT_COMMITTER_EMAIL", self.sync.github_email)
@ -66,20 +67,15 @@ class LocalGitRepo:
env.setdefault("GIT_COMMITTER_NAME", self.sync.github_name)
try:
return subprocess.check_output(
command_line, cwd=self.path, env=env, stderr=subprocess.STDOUT
)
return subprocess.check_output(command_line, cwd=self.path, env=env, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exception:
logging.warning("Process execution failed with output:\n%s",
exception.output.decode("utf-8", errors="surrogateescape"))
logging.warning(
"Process execution failed with output:\n%s", exception.output.decode("utf-8", errors="surrogateescape")
)
raise exception
def run(self, *args, env: dict = {}):
return (
self
.run_without_encoding(*args, env=env)
.decode("utf-8", errors="surrogateescape")
)
return self.run_without_encoding(*args, env=env).decode("utf-8", errors="surrogateescape")
@dataclasses.dataclass()
@ -167,11 +163,7 @@ class WPTSync:
if action not in ["opened", "synchronize", "reopened", "edited", "closed"]:
return True
if (
action == "edited"
and "title" not in payload["changes"]
and "body" not in payload["changes"]
):
if action == "edited" and "title" not in payload["changes"] and "body" not in payload["changes"]:
return True
try:
@ -179,15 +171,11 @@ class WPTSync:
downstream_wpt_branch = self.downstream_wpt.get_branch(
wpt_branch_name_from_servo_pr_number(servo_pr.number)
)
upstream_pr = self.wpt.get_open_pull_request_for_branch(
self.github_username, downstream_wpt_branch
)
upstream_pr = self.wpt.get_open_pull_request_for_branch(self.github_username, downstream_wpt_branch)
if upstream_pr:
logging.info(
" → Detected existing upstream PR %s", upstream_pr)
logging.info(" → Detected existing upstream PR %s", upstream_pr)
run = SyncRun(self, servo_pr, AsyncValue(
upstream_pr), step_callback)
run = SyncRun(self, servo_pr, AsyncValue(upstream_pr), step_callback)
pull_data = payload["pull_request"]
if payload["action"] in ["opened", "synchronize", "reopened"]:
@ -210,50 +198,44 @@ class WPTSync:
num_commits = pull_data["commits"]
head_sha = pull_data["head"]["sha"]
is_upstreamable = (
len(
self.local_servo_repo.run(
"diff", head_sha, f"{head_sha}~{num_commits}", "--", UPSTREAMABLE_PATH
)
)
> 0
len(self.local_servo_repo.run("diff", head_sha, f"{head_sha}~{num_commits}", "--", UPSTREAMABLE_PATH)) > 0
)
logging.info(" → PR is upstreamable: '%s'", is_upstreamable)
title = pull_data['title']
body = pull_data['body']
title = pull_data["title"]
body = pull_data["body"]
if run.upstream_pr.has_value():
if is_upstreamable:
# In case this is adding new upstreamable changes to a PR that was closed
# due to a lack of upstreamable changes, force it to be reopened.
# Github refuses to reopen a PR that had a branch force pushed, so be sure
# to do this first.
run.add_step(ChangePRStep(
run.upstream_pr.value(), "opened", title, body))
run.add_step(ChangePRStep(run.upstream_pr.value(), "opened", title, body))
# Push the relevant changes to the upstream branch.
run.add_step(CreateOrUpdateBranchForPRStep(
pull_data, run.servo_pr))
run.add_step(CommentStep(
run.servo_pr, UPDATED_EXISTING_UPSTREAM_PR))
run.add_step(CreateOrUpdateBranchForPRStep(pull_data, run.servo_pr))
run.add_step(CommentStep(run.servo_pr, UPDATED_EXISTING_UPSTREAM_PR))
else:
# Close the upstream PR, since would contain no changes otherwise.
run.add_step(CommentStep(run.upstream_pr.value(),
NO_UPSTREAMBLE_CHANGES_COMMENT))
run.add_step(CommentStep(run.upstream_pr.value(), NO_UPSTREAMBLE_CHANGES_COMMENT))
run.add_step(ChangePRStep(run.upstream_pr.value(), "closed"))
run.add_step(RemoveBranchForPRStep(pull_data))
run.add_step(CommentStep(
run.servo_pr, CLOSING_EXISTING_UPSTREAM_PR))
run.add_step(CommentStep(run.servo_pr, CLOSING_EXISTING_UPSTREAM_PR))
elif is_upstreamable:
# Push the relevant changes to a new upstream branch.
branch = run.add_step(
CreateOrUpdateBranchForPRStep(pull_data, run.servo_pr))
branch = run.add_step(CreateOrUpdateBranchForPRStep(pull_data, run.servo_pr))
# Create a pull request against the upstream repository for the new branch.
assert branch
upstream_pr = run.add_step(OpenPRStep(
branch, self.wpt, title, body,
["servo-export", "do not merge yet"],
))
upstream_pr = run.add_step(
OpenPRStep(
branch,
self.wpt,
title,
body,
["servo-export", "do not merge yet"],
)
)
assert upstream_pr
run.upstream_pr = upstream_pr
@ -264,12 +246,8 @@ class WPTSync:
def handle_edited_pull_request(self, run: SyncRun, pull_data: dict):
logging.info("Changing upstream PR title")
if run.upstream_pr.has_value():
run.add_step(ChangePRStep(
run.upstream_pr.value(
), "open", pull_data["title"], pull_data["body"]
))
run.add_step(CommentStep(
run.servo_pr, UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR))
run.add_step(ChangePRStep(run.upstream_pr.value(), "open", pull_data["title"], pull_data["body"]))
run.add_step(CommentStep(run.servo_pr, UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR))
def handle_closed_pull_request(self, run: SyncRun, pull_data: dict):
logging.info("Processing closed PR")
@ -279,8 +257,7 @@ class WPTSync:
if pull_data["merged"]:
# Since the upstreamable changes have now been merged locally, merge the
# corresponding upstream PR.
run.add_step(MergePRStep(
run.upstream_pr.value(), ["do not merge yet"]))
run.add_step(MergePRStep(run.upstream_pr.value(), ["do not merge yet"]))
else:
# If a PR with upstreamable changes is closed without being merged, we
# don't want to merge the changes upstream either.

View file

@ -12,17 +12,11 @@
UPSTREAMABLE_PATH = "tests/wpt/tests/"
NO_SYNC_SIGNAL = "[no-wpt-sync]"
OPENED_NEW_UPSTREAM_PR = (
"🤖 Opened new upstream WPT pull request ({upstream_pr}) "
"with upstreamable changes."
)
OPENED_NEW_UPSTREAM_PR = "🤖 Opened new upstream WPT pull request ({upstream_pr}) with upstreamable changes."
UPDATED_EXISTING_UPSTREAM_PR = (
"📝 Transplanted new upstreamable changes to existing "
"upstream WPT pull request ({upstream_pr})."
)
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR = (
"✍ Updated existing upstream WPT pull request ({upstream_pr}) title and body."
"📝 Transplanted new upstreamable changes to existing upstream WPT pull request ({upstream_pr})."
)
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR = "✍ Updated existing upstream WPT pull request ({upstream_pr}) title and body."
CLOSING_EXISTING_UPSTREAM_PR = (
"🤖 This change no longer contains upstreamable changes to WPT; closed existing "
"upstream pull request ({upstream_pr})."

View file

@ -40,13 +40,9 @@ def authenticated(sync: WPTSync, method, url, json=None) -> requests.Response:
}
url = urllib.parse.urljoin(sync.github_api_url, url)
response = requests.request(
method, url, headers=headers, json=json, timeout=TIMEOUT
)
response = requests.request(method, url, headers=headers, json=json, timeout=TIMEOUT)
if int(response.status_code / 100) != 2:
raise ValueError(
f"Got unexpected {response.status_code} response: {response.text}"
)
raise ValueError(f"Got unexpected {response.status_code} response: {response.text}")
return response
@ -71,33 +67,27 @@ class GithubRepository:
def get_branch(self, name: str) -> GithubBranch:
return GithubBranch(self, name)
def get_open_pull_request_for_branch(
self,
github_username: str,
branch: GithubBranch
) -> Optional[PullRequest]:
def get_open_pull_request_for_branch(self, github_username: str, branch: GithubBranch) -> Optional[PullRequest]:
"""If this repository has an open pull request with the
given source head reference targeting the main branch,
return the first matching pull request, otherwise return None."""
params = "+".join([
"is:pr",
"state:open",
f"repo:{self.repo}",
f"author:{github_username}",
f"head:{branch.name}",
])
params = "+".join(
[
"is:pr",
"state:open",
f"repo:{self.repo}",
f"author:{github_username}",
f"head:{branch.name}",
]
)
response = authenticated(self.sync, "GET", f"search/issues?q={params}")
if int(response.status_code / 100) != 2:
return None
json = response.json()
if not isinstance(json, dict) or \
"total_count" not in json or \
"items" not in json:
raise ValueError(
f"Got unexpected response from GitHub search: {response.text}"
)
if not isinstance(json, dict) or "total_count" not in json or "items" not in json:
raise ValueError(f"Got unexpected response from GitHub search: {response.text}")
if json["total_count"] < 1:
return None
@ -152,9 +142,7 @@ class PullRequest:
return authenticated(self.context, *args, **kwargs)
def leave_comment(self, comment: str):
return self.api(
"POST", f"{self.base_issues_url}/comments", json={"body": comment}
)
return self.api("POST", f"{self.base_issues_url}/comments", json={"body": comment})
def change(
self,

View file

@ -46,7 +46,7 @@ class Step:
return
T = TypeVar('T')
T = TypeVar("T")
class AsyncValue(Generic[T]):
@ -76,8 +76,7 @@ class CreateOrUpdateBranchForPRStep(Step):
def run(self, run: SyncRun):
try:
commits = self._get_upstreamable_commits_from_local_servo_repo(
run.sync)
commits = self._get_upstreamable_commits_from_local_servo_repo(run.sync)
branch_name = self._create_or_update_branch_for_pr(run, commits)
branch = run.sync.downstream_wpt.get_branch(branch_name)
@ -88,21 +87,15 @@ class CreateOrUpdateBranchForPRStep(Step):
logging.info(exception, exc_info=True)
run.steps = []
run.add_step(CommentStep(
self.pull_request, COULD_NOT_APPLY_CHANGES_DOWNSTREAM_COMMENT
))
run.add_step(CommentStep(self.pull_request, COULD_NOT_APPLY_CHANGES_DOWNSTREAM_COMMENT))
if run.upstream_pr.has_value():
run.add_step(CommentStep(
run.upstream_pr.value(), COULD_NOT_APPLY_CHANGES_UPSTREAM_COMMENT
))
run.add_step(CommentStep(run.upstream_pr.value(), COULD_NOT_APPLY_CHANGES_UPSTREAM_COMMENT))
def _get_upstreamable_commits_from_local_servo_repo(self, sync: WPTSync):
local_servo_repo = sync.local_servo_repo
number_of_commits = self.pull_data["commits"]
pr_head = self.pull_data["head"]["sha"]
commit_shas = local_servo_repo.run(
"log", "--pretty=%H", pr_head, f"-{number_of_commits}"
).splitlines()
commit_shas = local_servo_repo.run("log", "--pretty=%H", pr_head, f"-{number_of_commits}").splitlines()
filtered_commits = []
# We must iterate the commits in reverse to ensure we apply older changes first,
@ -128,12 +121,8 @@ class CreateOrUpdateBranchForPRStep(Step):
# commit to another repository.
filtered_commits += [
{
"author": local_servo_repo.run(
"show", "-s", "--pretty=%an <%ae>", sha
),
"message": local_servo_repo.run(
"show", "-s", "--pretty=%B", sha
),
"author": local_servo_repo.run("show", "-s", "--pretty=%an <%ae>", sha),
"message": local_servo_repo.run("show", "-s", "--pretty=%B", sha),
"diff": diff,
}
]
@ -146,23 +135,16 @@ class CreateOrUpdateBranchForPRStep(Step):
try:
with open(patch_path, "wb") as file:
file.write(commit["diff"])
run.sync.local_wpt_repo.run(
"apply", PATCH_FILE_NAME, "-p", str(strip_count)
)
run.sync.local_wpt_repo.run("apply", PATCH_FILE_NAME, "-p", str(strip_count))
finally:
# Ensure the patch file is not added with the other changes.
os.remove(patch_path)
run.sync.local_wpt_repo.run("add", "--all")
run.sync.local_wpt_repo.run(
"commit", "--message", commit["message"], "--author", commit["author"]
)
run.sync.local_wpt_repo.run("commit", "--message", commit["message"], "--author", commit["author"])
def _create_or_update_branch_for_pr(
self, run: SyncRun, commits: list[dict], pre_commit_callback=None
):
branch_name = wpt_branch_name_from_servo_pr_number(
self.pull_data["number"])
def _create_or_update_branch_for_pr(self, run: SyncRun, commits: list[dict], pre_commit_callback=None):
branch_name = wpt_branch_name_from_servo_pr_number(self.pull_data["number"])
try:
# Create a new branch with a unique name that is consistent between
# updates of the same PR.
@ -176,7 +158,6 @@ class CreateOrUpdateBranchForPRStep(Step):
# Push the branch upstream (forcing to overwrite any existing changes).
if not run.sync.suppress_force_push:
# In order to push to our downstream branch we need to ensure that
# the local repository isn't a shallow clone. Shallow clones are
# commonly created by GitHub actions.
@ -186,8 +167,7 @@ class CreateOrUpdateBranchForPRStep(Step):
token = run.sync.github_api_token
repo = run.sync.downstream_wpt_repo
remote_url = f"https://{user}:{token}@github.com/{repo}.git"
run.sync.local_wpt_repo.run(
"push", "-f", remote_url, branch_name)
run.sync.local_wpt_repo.run("push", "-f", remote_url, branch_name)
return branch_name
finally:
@ -201,8 +181,7 @@ class CreateOrUpdateBranchForPRStep(Step):
class RemoveBranchForPRStep(Step):
def __init__(self, pull_request):
Step.__init__(self, "RemoveBranchForPRStep")
self.branch_name = wpt_branch_name_from_servo_pr_number(
pull_request["number"])
self.branch_name = wpt_branch_name_from_servo_pr_number(pull_request["number"])
def run(self, run: SyncRun):
self.name += f":{run.sync.downstream_wpt.get_branch(self.branch_name)}"
@ -212,8 +191,7 @@ class RemoveBranchForPRStep(Step):
token = run.sync.github_api_token
repo = run.sync.downstream_wpt_repo
remote_url = f"https://{user}:{token}@github.com/{repo}.git"
run.sync.local_wpt_repo.run("push", remote_url, "--delete",
self.branch_name)
run.sync.local_wpt_repo.run("push", remote_url, "--delete", self.branch_name)
class ChangePRStep(Step):
@ -238,9 +216,7 @@ class ChangePRStep(Step):
body = self.body
if body:
body = run.prepare_body_text(body)
self.name += (
f':{textwrap.shorten(body, width=20, placeholder="...")}[{len(body)}]'
)
self.name += f":{textwrap.shorten(body, width=20, placeholder='...')}[{len(body)}]"
self.pull_request.change(state=self.state, title=self.title, body=body)
@ -261,12 +237,8 @@ class MergePRStep(Step):
logging.warning(exception, exc_info=True)
run.steps = []
run.add_step(CommentStep(
self.pull_request, COULD_NOT_MERGE_CHANGES_UPSTREAM_COMMENT
))
run.add_step(CommentStep(
run.servo_pr, COULD_NOT_MERGE_CHANGES_DOWNSTREAM_COMMENT
))
run.add_step(CommentStep(self.pull_request, COULD_NOT_MERGE_CHANGES_UPSTREAM_COMMENT))
run.add_step(CommentStep(run.servo_pr, COULD_NOT_MERGE_CHANGES_DOWNSTREAM_COMMENT))
self.pull_request.add_labels(["stale-servo-export"])

View file

@ -16,12 +16,12 @@ from dataclasses import dataclass, field
from typing import Dict, List, Optional, Any
from six import itervalues
DEFAULT_MOVE_UP_CODE = u"\x1b[A"
DEFAULT_CLEAR_EOL_CODE = u"\x1b[K"
DEFAULT_MOVE_UP_CODE = "\x1b[A"
DEFAULT_CLEAR_EOL_CODE = "\x1b[K"
@dataclass
class UnexpectedSubtestResult():
class UnexpectedSubtestResult:
path: str
subtest: str
actual: str
@ -32,15 +32,14 @@ class UnexpectedSubtestResult():
@dataclass
class UnexpectedResult():
class UnexpectedResult:
path: str
actual: str
expected: str
message: str
time: int
stack: Optional[str]
unexpected_subtest_results: list[UnexpectedSubtestResult] = field(
default_factory=list)
unexpected_subtest_results: list[UnexpectedSubtestResult] = field(default_factory=list)
issues: list[str] = field(default_factory=list)
flaky: bool = False
@ -48,13 +47,13 @@ class UnexpectedResult():
output = UnexpectedResult.to_lines(self)
if self.unexpected_subtest_results:
def make_subtests_failure(subtest_results):
# Test names sometimes contain control characters, which we want
# to be printed in their raw form, and not their interpreted form.
lines = []
for subtest in subtest_results[:-1]:
lines += UnexpectedResult.to_lines(
subtest, print_stack=False)
lines += UnexpectedResult.to_lines(subtest, print_stack=False)
lines += UnexpectedResult.to_lines(subtest_results[-1])
return self.wrap_and_indent_lines(lines, " ").splitlines()
@ -78,11 +77,11 @@ class UnexpectedResult():
if not lines:
return ""
output = indent + u"\u25B6 %s\n" % lines[0]
output = indent + "\u25b6 %s\n" % lines[0]
for line in lines[1:-1]:
output += indent + u"\u2502 %s\n" % line
output += indent + "\u2502 %s\n" % line
if len(lines) > 1:
output += indent + u"\u2514 %s\n" % lines[-1]
output += indent + "\u2514 %s\n" % lines[-1]
return output
@staticmethod
@ -111,7 +110,8 @@ class UnexpectedResult():
class ServoHandler(mozlog.reader.LogHandler):
"""LogHandler designed to collect unexpected results for use by
script or by the ServoFormatter output formatter."""
script or by the ServoFormatter output formatter."""
def __init__(self):
self.reset_state()
@ -126,24 +126,24 @@ class ServoHandler(mozlog.reader.LogHandler):
self.unexpected_results: List[UnexpectedResult] = []
self.expected = {
'OK': 0,
'PASS': 0,
'FAIL': 0,
'ERROR': 0,
'TIMEOUT': 0,
'SKIP': 0,
'CRASH': 0,
'PRECONDITION_FAILED': 0,
"OK": 0,
"PASS": 0,
"FAIL": 0,
"ERROR": 0,
"TIMEOUT": 0,
"SKIP": 0,
"CRASH": 0,
"PRECONDITION_FAILED": 0,
}
self.unexpected_tests = {
'OK': [],
'PASS': [],
'FAIL': [],
'ERROR': [],
'TIMEOUT': [],
'CRASH': [],
'PRECONDITION_FAILED': [],
"OK": [],
"PASS": [],
"FAIL": [],
"ERROR": [],
"TIMEOUT": [],
"CRASH": [],
"PRECONDITION_FAILED": [],
}
def suite_start(self, data):
@ -155,20 +155,19 @@ class ServoHandler(mozlog.reader.LogHandler):
pass
def test_start(self, data):
self.running_tests[data['thread']] = data['test']
self.running_tests[data["thread"]] = data["test"]
@staticmethod
def data_was_for_expected_result(data):
if "expected" not in data:
return True
return "known_intermittent" in data \
and data["status"] in data["known_intermittent"]
return "known_intermittent" in data and data["status"] in data["known_intermittent"]
def test_end(self, data: dict) -> Optional[UnexpectedResult]:
self.completed_tests += 1
test_status = data["status"]
test_path = data["test"]
del self.running_tests[data['thread']]
del self.running_tests[data["thread"]]
had_expected_test_result = self.data_was_for_expected_result(data)
subtest_failures = self.subtest_failures.pop(test_path, [])
@ -191,7 +190,7 @@ class ServoHandler(mozlog.reader.LogHandler):
data.get("message", ""),
data["time"],
stack,
subtest_failures
subtest_failures,
)
if not had_expected_test_result:
@ -205,19 +204,21 @@ class ServoHandler(mozlog.reader.LogHandler):
def test_status(self, data: dict):
if self.data_was_for_expected_result(data):
return
self.subtest_failures[data["test"]].append(UnexpectedSubtestResult(
data["test"],
data["subtest"],
data["status"],
data["expected"],
data.get("message", ""),
data["time"],
data.get('stack', None),
))
self.subtest_failures[data["test"]].append(
UnexpectedSubtestResult(
data["test"],
data["subtest"],
data["status"],
data["expected"],
data.get("message", ""),
data["time"],
data.get("stack", None),
)
)
def process_output(self, data):
if 'test' in data:
self.test_output[data['test']] += data['data'] + "\n"
if "test" in data:
self.test_output[data["test"]] += data["data"] + "\n"
def log(self, _):
pass
@ -225,7 +226,8 @@ class ServoHandler(mozlog.reader.LogHandler):
class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
"""Formatter designed to produce unexpected test results grouped
together in a readable format."""
together in a readable format."""
def __init__(self):
ServoHandler.__init__(self)
self.current_display = ""
@ -239,18 +241,17 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
try:
import blessed
self.terminal = blessed.Terminal()
self.move_up = self.terminal.move_up
self.clear_eol = self.terminal.clear_eol
except Exception as exception:
sys.stderr.write("GroupingFormatter: Could not get terminal "
"control characters: %s\n" % exception)
sys.stderr.write("GroupingFormatter: Could not get terminal control characters: %s\n" % exception)
def text_to_erase_display(self):
if not self.interactive or not self.current_display:
return ""
return ((self.move_up + self.clear_eol)
* self.current_display.count('\n'))
return (self.move_up + self.clear_eol) * self.current_display.count("\n")
def generate_output(self, text=None, new_display=None):
if not self.interactive:
@ -278,17 +279,16 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
max_width = self.line_width - len(new_display)
else:
max_width = sys.maxsize
return new_display + ("\n%s" % indent).join(
val[:max_width] for val in self.running_tests.values()) + "\n"
return new_display + ("\n%s" % indent).join(val[:max_width] for val in self.running_tests.values()) + "\n"
else:
return new_display + "No tests running.\n"
def suite_start(self, data):
ServoHandler.suite_start(self, data)
if self.number_of_tests == 0:
return "Running tests in %s\n\n" % data[u'source']
return "Running tests in %s\n\n" % data["source"]
else:
return "Running %i tests in %s\n\n" % (self.number_of_tests, data[u'source'])
return "Running %i tests in %s\n\n" % (self.number_of_tests, data["source"])
def test_start(self, data):
ServoHandler.test_start(self, data)
@ -300,8 +300,7 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
if unexpected_result:
# Surround test output by newlines so that it is easier to read.
output_for_unexpected_test = f"{unexpected_result}\n"
return self.generate_output(text=output_for_unexpected_test,
new_display=self.build_status_line())
return self.generate_output(text=output_for_unexpected_test, new_display=self.build_status_line())
# Print reason that tests are skipped.
if data["status"] == "SKIP":
@ -321,12 +320,14 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
def suite_end(self, data):
ServoHandler.suite_end(self, data)
if not self.interactive:
output = u"\n"
output = "\n"
else:
output = ""
output += u"Ran %i tests finished in %.1f seconds.\n" % (
self.completed_tests, (data["time"] - self.suite_start_time) / 1000)
output += "Ran %i tests finished in %.1f seconds.\n" % (
self.completed_tests,
(data["time"] - self.suite_start_time) / 1000,
)
# Sum the number of expected test results from each category
expected_test_results = sum(self.expected.values())
@ -337,29 +338,27 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
def text_for_unexpected_list(text, section):
tests = self.unexpected_tests[section]
if not tests:
return u""
return u" \u2022 %i tests %s\n" % (len(tests), text)
return ""
return " \u2022 %i tests %s\n" % (len(tests), text)
output += text_for_unexpected_list(u"crashed unexpectedly", 'CRASH')
output += text_for_unexpected_list(u"had errors unexpectedly", 'ERROR')
output += text_for_unexpected_list(u"failed unexpectedly", 'FAIL')
output += text_for_unexpected_list(u"precondition failed unexpectedly", 'PRECONDITION_FAILED')
output += text_for_unexpected_list(u"timed out unexpectedly", 'TIMEOUT')
output += text_for_unexpected_list(u"passed unexpectedly", 'PASS')
output += text_for_unexpected_list(u"unexpectedly okay", 'OK')
output += text_for_unexpected_list("crashed unexpectedly", "CRASH")
output += text_for_unexpected_list("had errors unexpectedly", "ERROR")
output += text_for_unexpected_list("failed unexpectedly", "FAIL")
output += text_for_unexpected_list("precondition failed unexpectedly", "PRECONDITION_FAILED")
output += text_for_unexpected_list("timed out unexpectedly", "TIMEOUT")
output += text_for_unexpected_list("passed unexpectedly", "PASS")
output += text_for_unexpected_list("unexpectedly okay", "OK")
num_with_failing_subtests = len(self.tests_with_failing_subtests)
if num_with_failing_subtests:
output += (u" \u2022 %i tests had unexpected subtest results\n"
% num_with_failing_subtests)
output += " \u2022 %i tests had unexpected subtest results\n" % num_with_failing_subtests
output += "\n"
# Repeat failing test output, so that it is easier to find, since the
# non-interactive version prints all the test names.
if not self.interactive and self.unexpected_results:
output += u"Tests with unexpected results:\n"
output += "".join([str(result)
for result in self.unexpected_results])
output += "Tests with unexpected results:\n"
output += "".join([str(result) for result in self.unexpected_results])
return self.generate_output(text=output, new_display="")
@ -371,8 +370,8 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
# We are logging messages that begin with STDERR, because that is how exceptions
# in this formatter are indicated.
if data['message'].startswith('STDERR'):
return self.generate_output(text=data['message'] + "\n")
if data["message"].startswith("STDERR"):
return self.generate_output(text=data["message"] + "\n")
if data['level'] in ('CRITICAL', 'ERROR'):
return self.generate_output(text=data['message'] + "\n")
if data["level"] in ("CRITICAL", "ERROR"):
return self.generate_output(text=data["message"] + "\n")

View file

@ -22,10 +22,10 @@ from wptrunner import wptlogging
def create_parser():
p = argparse.ArgumentParser()
p.add_argument("--check-clean", action="store_true",
help="Check that updating the manifest doesn't lead to any changes")
p.add_argument("--rebuild", action="store_true",
help="Rebuild the manifest from scratch")
p.add_argument(
"--check-clean", action="store_true", help="Check that updating the manifest doesn't lead to any changes"
)
p.add_argument("--rebuild", action="store_true", help="Rebuild the manifest from scratch")
commandline.add_logging_group(p)
return p
@ -34,11 +34,13 @@ def create_parser():
def update(check_clean=True, rebuild=False, logger=None, **kwargs):
if not logger:
logger = wptlogging.setup(kwargs, {"mach": sys.stdout})
kwargs = {"config": os.path.join(WPT_PATH, "config.ini"),
"product": "servo",
"manifest_path": os.path.join(WPT_PATH, "meta"),
"tests_root": None,
"metadata_root": None}
kwargs = {
"config": os.path.join(WPT_PATH, "config.ini"),
"product": "servo",
"manifest_path": os.path.join(WPT_PATH, "meta"),
"tests_root": None,
"metadata_root": None,
}
set_from_config(kwargs)
config = kwargs["config"]
@ -53,15 +55,15 @@ def update(check_clean=True, rebuild=False, logger=None, **kwargs):
def _update(logger, test_paths, rebuild):
for url_base, paths in iteritems(test_paths):
manifest_path = os.path.join(paths.metadata_path, "MANIFEST.json")
cache_subdir = os.path.relpath(os.path.dirname(manifest_path),
os.path.dirname(__file__))
wptmanifest.manifest.load_and_update(paths.tests_path,
manifest_path,
url_base,
working_copy=True,
rebuild=rebuild,
cache_root=os.path.join(SERVO_ROOT, ".wpt",
cache_subdir))
cache_subdir = os.path.relpath(os.path.dirname(manifest_path), os.path.dirname(__file__))
wptmanifest.manifest.load_and_update(
paths.tests_path,
manifest_path,
url_base,
working_copy=True,
rebuild=rebuild,
cache_root=os.path.join(SERVO_ROOT, ".wpt", cache_subdir),
)
return 0
@ -72,26 +74,25 @@ def _check_clean(logger, test_paths):
tests_path = paths.tests_path
manifest_path = os.path.join(paths.metadata_path, "MANIFEST.json")
old_manifest = wptmanifest.manifest.load_and_update(tests_path,
manifest_path,
url_base,
working_copy=False,
update=False,
write_manifest=False)
old_manifest = wptmanifest.manifest.load_and_update(
tests_path, manifest_path, url_base, working_copy=False, update=False, write_manifest=False
)
# Even if no cache is specified, one will be used automatically by the
# VCS integration. Create a brand new cache every time to ensure that
# the VCS integration always thinks that any file modifications in the
# working directory are new and interesting.
cache_root = tempfile.mkdtemp()
new_manifest = wptmanifest.manifest.load_and_update(tests_path,
manifest_path,
url_base,
working_copy=True,
update=True,
cache_root=cache_root,
write_manifest=False,
allow_cached=False)
new_manifest = wptmanifest.manifest.load_and_update(
tests_path,
manifest_path,
url_base,
working_copy=True,
update=True,
cache_root=cache_root,
write_manifest=False,
allow_cached=False,
)
manifests_by_path[manifest_path] = (old_manifest, new_manifest)
@ -116,8 +117,7 @@ def diff_manifests(logger, manifest_path, old_manifest, new_manifest):
"""
logger.info("Diffing old and new manifests %s" % manifest_path)
old_items, new_items = defaultdict(set), defaultdict(set)
for manifest, items in [(old_manifest, old_items),
(new_manifest, new_items)]:
for manifest, items in [(old_manifest, old_items), (new_manifest, new_items)]:
for test_type, path, tests in manifest:
for test in tests:
test_id = [test.id]
@ -158,8 +158,8 @@ def diff_manifests(logger, manifest_path, old_manifest, new_manifest):
if clean:
# Manifest currently has some list vs tuple inconsistencies that break
# a simple equality comparison.
old_paths = old_manifest.to_json()['items']
new_paths = new_manifest.to_json()['items']
old_paths = old_manifest.to_json()["items"]
new_paths = new_manifest.to_json()["items"]
if old_paths != new_paths:
logger.warning("Manifest %s contains correct tests but file hashes changed." % manifest_path) # noqa
clean = False
@ -168,8 +168,4 @@ def diff_manifests(logger, manifest_path, old_manifest, new_manifest):
def log_error(logger, manifest_path, msg):
logger.lint_error(path=manifest_path,
message=msg,
lineno=0,
source="",
linter="wpt-manifest")
logger.lint_error(path=manifest_path, message=msg, lineno=0, source="", linter="wpt-manifest")

View file

@ -19,10 +19,7 @@ import mozlog
import mozlog.formatters
from . import SERVO_ROOT, WPT_PATH, WPT_TOOLS_PATH
from .grouping_formatter import (
ServoFormatter, ServoHandler,
UnexpectedResult, UnexpectedSubtestResult
)
from .grouping_formatter import ServoFormatter, ServoHandler, UnexpectedResult, UnexpectedSubtestResult
from wptrunner import wptcommandline
from wptrunner import wptrunner
@ -63,12 +60,8 @@ def run_tests(default_binary_path: str, **kwargs):
set_if_none(kwargs, "processes", multiprocessing.cpu_count())
set_if_none(kwargs, "ca_cert_path", os.path.join(CERTS_PATH, "cacert.pem"))
set_if_none(
kwargs, "host_key_path", os.path.join(CERTS_PATH, "web-platform.test.key")
)
set_if_none(
kwargs, "host_cert_path", os.path.join(CERTS_PATH, "web-platform.test.pem")
)
set_if_none(kwargs, "host_key_path", os.path.join(CERTS_PATH, "web-platform.test.key"))
set_if_none(kwargs, "host_cert_path", os.path.join(CERTS_PATH, "web-platform.test.pem"))
# Set `id_hash` as the default chunk, as this better distributes testing across different
# chunks and leads to more consistent timing on GitHub Actions.
set_if_none(kwargs, "chunk_type", "id_hash")
@ -139,8 +132,7 @@ def run_tests(default_binary_path: str, **kwargs):
handler.reset_state()
print(80 * "=")
print(f"Rerunning {len(unexpected_results)} tests "
"with unexpected results to detect flaky tests.")
print(f"Rerunning {len(unexpected_results)} tests with unexpected results to detect flaky tests.")
unexpected_results_tests = [result.path for result in unexpected_results]
kwargs["test_list"] = unexpected_results_tests
kwargs["include"] = unexpected_results_tests
@ -158,8 +150,7 @@ def run_tests(default_binary_path: str, **kwargs):
for result in unexpected_results:
result.flaky = result.path not in stable_tests
all_filtered = filter_intermittents(unexpected_results,
filter_intermittents_output)
all_filtered = filter_intermittents(unexpected_results, filter_intermittents_output)
return_value = 0 if all_filtered else 1
# Write the unexpected-only raw log if that was specified on the command-line.
@ -168,9 +159,7 @@ def run_tests(default_binary_path: str, **kwargs):
print("'--log-raw-unexpected' not written without '--log-raw'.")
else:
write_unexpected_only_raw_log(
handler.unexpected_results,
raw_log_outputs[0].name,
unexpected_raw_log_output_file
handler.unexpected_results, raw_log_outputs[0].name, unexpected_raw_log_output_file
)
return return_value
@ -182,12 +171,10 @@ class GithubContextInformation(NamedTuple):
branch_name: Optional[str]
class TrackerDashboardFilter():
class TrackerDashboardFilter:
def __init__(self):
base_url = os.environ.get(TRACKER_API_ENV_VAR, TRACKER_API)
self.headers = {
"Content-Type": "application/json"
}
self.headers = {"Content-Type": "application/json"}
if TRACKER_DASHBOARD_SECRET_ENV_VAR in os.environ and os.environ[TRACKER_DASHBOARD_SECRET_ENV_VAR]:
self.url = f"{base_url}/dashboard/attempts"
secret = os.environ[TRACKER_DASHBOARD_SECRET_ENV_VAR]
@ -201,10 +188,10 @@ class TrackerDashboardFilter():
if not github_context:
return GithubContextInformation(None, None, None)
repository = github_context['repository']
repository = github_context["repository"]
repo_url = f"https://github.com/{repository}"
run_id = github_context['run_id']
run_id = github_context["run_id"]
build_url = f"{repo_url}/actions/runs/{run_id}"
commit_title = "<no title>"
@ -214,32 +201,27 @@ class TrackerDashboardFilter():
commit_title = github_context["event"]["head_commit"]["message"]
pr_url = None
match = re.match(r"^Auto merge of #(\d+)", commit_title) or \
re.match(r"\(#(\d+)\)", commit_title)
match = re.match(r"^Auto merge of #(\d+)", commit_title) or re.match(r"\(#(\d+)\)", commit_title)
if match:
pr_url = f"{repo_url}/pull/{match.group(1)}" if match else None
return GithubContextInformation(
build_url,
pr_url,
github_context["ref_name"]
)
return GithubContextInformation(build_url, pr_url, github_context["ref_name"])
def make_data_from_result(
self,
result: Union[UnexpectedResult, UnexpectedSubtestResult],
) -> dict:
data = {
'path': result.path,
'subtest': None,
'expected': result.expected,
'actual': result.actual,
'time': result.time // 1000,
"path": result.path,
"subtest": None,
"expected": result.expected,
"actual": result.actual,
"time": result.time // 1000,
# Truncate the message, to avoid issues with lots of output causing "HTTP
# Error 413: Request Entity Too Large."
# See https://github.com/servo/servo/issues/31845.
'message': result.message[0:TRACKER_DASHBOARD_MAXIMUM_OUTPUT_LENGTH],
'stack': result.stack,
"message": result.message[0:TRACKER_DASHBOARD_MAXIMUM_OUTPUT_LENGTH],
"stack": result.stack,
}
if isinstance(result, UnexpectedSubtestResult):
data["subtest"] = result.subtest
@ -256,20 +238,22 @@ class TrackerDashboardFilter():
try:
request = urllib.request.Request(
url=self.url,
method='POST',
data=json.dumps({
'branch': context.branch_name,
'build_url': context.build_url,
'pull_url': context.pull_url,
'attempts': attempts
}).encode('utf-8'),
headers=self.headers)
method="POST",
data=json.dumps(
{
"branch": context.branch_name,
"build_url": context.build_url,
"pull_url": context.pull_url,
"attempts": attempts,
}
).encode("utf-8"),
headers=self.headers,
)
known_intermittents = dict()
with urllib.request.urlopen(request) as response:
for test in json.load(response)["known"]:
known_intermittents[test["path"]] = \
[issue["number"] for issue in test["issues"]]
known_intermittents[test["path"]] = [issue["number"] for issue in test["issues"]]
except urllib.error.HTTPError as e:
print(e)
@ -280,13 +264,9 @@ class TrackerDashboardFilter():
result.issues = known_intermittents.get(result.path, [])
def filter_intermittents(
unexpected_results: List[UnexpectedResult],
output_path: str
) -> bool:
def filter_intermittents(unexpected_results: List[UnexpectedResult], output_path: str) -> bool:
dashboard = TrackerDashboardFilter()
print(f"Filtering {len(unexpected_results)} "
f"unexpected results for known intermittents via <{dashboard.url}>")
print(f"Filtering {len(unexpected_results)} unexpected results for known intermittents via <{dashboard.url}>")
dashboard.report_failures(unexpected_results)
def add_result(output, text, results: List[UnexpectedResult], filter_func) -> None:
@ -298,12 +278,14 @@ def filter_intermittents(
return not result.flaky and not result.issues
output: List[str] = []
add_result(output, "Flaky unexpected results", unexpected_results,
lambda result: result.flaky)
add_result(output, "Stable unexpected results that are known-intermittent",
unexpected_results, lambda result: not result.flaky and result.issues)
add_result(output, "Stable unexpected results",
unexpected_results, is_stable_and_unexpected)
add_result(output, "Flaky unexpected results", unexpected_results, lambda result: result.flaky)
add_result(
output,
"Stable unexpected results that are known-intermittent",
unexpected_results,
lambda result: not result.flaky and result.issues,
)
add_result(output, "Stable unexpected results", unexpected_results, is_stable_and_unexpected)
print("\n".join(output))
with open(output_path, "w", encoding="utf-8") as file:
@ -313,9 +295,7 @@ def filter_intermittents(
def write_unexpected_only_raw_log(
unexpected_results: List[UnexpectedResult],
raw_log_file: str,
filtered_raw_log_file: str
unexpected_results: List[UnexpectedResult], raw_log_file: str, filtered_raw_log_file: str
):
tests = [result.path for result in unexpected_results]
print(f"Writing unexpected-only raw log to {filtered_raw_log_file}")
@ -324,6 +304,5 @@ def write_unexpected_only_raw_log(
with open(raw_log_file) as input:
for line in input.readlines():
data = json.loads(line)
if data["action"] in ["suite_start", "suite_end"] or \
("test" in data and data["test"] in tests):
if data["action"] in ["suite_start", "suite_end"] or ("test" in data and data["test"] in tests):
output.write(line)

View file

@ -49,13 +49,13 @@ PORT = 9000
@dataclasses.dataclass
class MockPullRequest():
class MockPullRequest:
head: str
number: int
state: str = "open"
class MockGitHubAPIServer():
class MockGitHubAPIServer:
def __init__(self, port: int):
self.port = port
self.disable_logging()
@ -65,18 +65,19 @@ class MockGitHubAPIServer():
class NoLoggingHandler(WSGIRequestHandler):
def log_message(self, *args):
pass
if logging.getLogger().level == logging.DEBUG:
handler = WSGIRequestHandler
else:
handler = NoLoggingHandler
self.server = make_server('localhost', self.port, self.app, handler_class=handler)
self.server = make_server("localhost", self.port, self.app, handler_class=handler)
self.start_server_thread()
def disable_logging(self):
flask.cli.show_server_banner = lambda *args: None
logging.getLogger("werkzeug").disabled = True
logging.getLogger('werkzeug').setLevel(logging.CRITICAL)
logging.getLogger("werkzeug").setLevel(logging.CRITICAL)
def start(self):
self.thread.start()
@ -84,21 +85,21 @@ class MockGitHubAPIServer():
# Wait for the server to be started.
while True:
try:
response = requests.get(f'http://localhost:{self.port}/ping', timeout=1)
response = requests.get(f"http://localhost:{self.port}/ping", timeout=1)
assert response.status_code == 200
assert response.text == 'pong'
assert response.text == "pong"
break
except Exception:
time.sleep(0.1)
def reset_server_state_with_pull_requests(self, pulls: list[MockPullRequest]):
response = requests.get(
f'http://localhost:{self.port}/reset-mock-github',
f"http://localhost:{self.port}/reset-mock-github",
json=[dataclasses.asdict(pull_request) for pull_request in pulls],
timeout=1
timeout=1,
)
assert response.status_code == 200
assert response.text == '👍'
assert response.text == "👍"
def shutdown(self):
self.server.shutdown()
@ -111,26 +112,25 @@ class MockGitHubAPIServer():
@self.app.route("/ping")
def ping():
return ('pong', 200)
return ("pong", 200)
@self.app.route("/reset-mock-github")
def reset_server():
self.pulls = [
MockPullRequest(pull_request['head'],
pull_request['number'],
pull_request['state'])
for pull_request in flask.request.json]
return ('👍', 200)
MockPullRequest(pull_request["head"], pull_request["number"], pull_request["state"])
for pull_request in flask.request.json
]
return ("👍", 200)
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>/merge", methods=['PUT'])
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>/merge", methods=["PUT"])
def merge_pull_request(org, repo, number):
for pull_request in self.pulls:
if pull_request.number == number:
pull_request.state = 'closed'
return ('', 204)
return ('', 404)
pull_request.state = "closed"
return ("", 204)
return ("", 404)
@self.app.route("/search/issues", methods=['GET'])
@self.app.route("/search/issues", methods=["GET"])
def search():
params = {}
param_strings = flask.request.args.get("q", "").split(" ")
@ -145,38 +145,29 @@ class MockGitHubAPIServer():
for pull_request in self.pulls:
if pull_request.head.endswith(head_ref):
return json.dumps({
"total_count": 1,
"items": [{
"number": pull_request.number
}]
})
return json.dumps({"total_count": 1, "items": [{"number": pull_request.number}]})
return json.dumps({"total_count": 0, "items": []})
@self.app.route("/repos/<org>/<repo>/pulls", methods=['POST'])
@self.app.route("/repos/<org>/<repo>/pulls", methods=["POST"])
def create_pull_request(org, repo):
new_pr_number = len(self.pulls) + 1
self.pulls.append(MockPullRequest(
flask.request.json["head"],
new_pr_number,
"open"
))
self.pulls.append(MockPullRequest(flask.request.json["head"], new_pr_number, "open"))
return {"number": new_pr_number}
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>", methods=['PATCH'])
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>", methods=["PATCH"])
def update_pull_request(org, repo, number):
for pull_request in self.pulls:
if pull_request.number == number:
if 'state' in flask.request.json:
pull_request.state = flask.request.json['state']
return ('', 204)
return ('', 404)
if "state" in flask.request.json:
pull_request.state = flask.request.json["state"]
return ("", 204)
return ("", 404)
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels", methods=['GET', 'POST'])
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels/<label>", methods=['DELETE'])
@self.app.route("/repos/<org>/<repo>/issues/<issue>/comments", methods=['GET', 'POST'])
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels", methods=["GET", "POST"])
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels/<label>", methods=["DELETE"])
@self.app.route("/repos/<org>/<repo>/issues/<issue>/comments", methods=["GET", "POST"])
def other_requests(*args, **kwargs):
return ('', 204)
return ("", 204)
class TestCleanUpBodyText(unittest.TestCase):
@ -196,28 +187,22 @@ class TestCleanUpBodyText(unittest.TestCase):
)
self.assertEqual(
"Subject\n\nBody text #<!-- nolink -->1",
SyncRun.clean_up_body_text(
"Subject\n\nBody text #1\n---<!-- Thank you for contributing"
),
SyncRun.clean_up_body_text("Subject\n\nBody text #1\n---<!-- Thank you for contributing"),
)
self.assertEqual(
"Subject\n\nNo dashes",
SyncRun.clean_up_body_text(
"Subject\n\nNo dashes<!-- Thank you for contributing"
),
SyncRun.clean_up_body_text("Subject\n\nNo dashes<!-- Thank you for contributing"),
)
self.assertEqual(
"Subject\n\nNo --- comment",
SyncRun.clean_up_body_text(
"Subject\n\nNo --- comment\n---Other stuff that"
),
SyncRun.clean_up_body_text("Subject\n\nNo --- comment\n---Other stuff that"),
)
self.assertEqual(
"Subject\n\n#<!-- nolink -->3 servo#<!-- nolink -->3 servo/servo#3",
SyncRun.clean_up_body_text(
"Subject\n\n#3 servo#3 servo/servo#3",
),
"Only relative and bare issue reference links should be escaped."
"Only relative and bare issue reference links should be escaped.",
)
@ -236,9 +221,7 @@ class TestApplyCommitsToWPT(unittest.TestCase):
pull_request = SYNC.servo.get_pull_request(pr_number)
step = CreateOrUpdateBranchForPRStep({"number": pr_number}, pull_request)
def get_applied_commits(
num_commits: int, applied_commits: list[Tuple[str, str]]
):
def get_applied_commits(num_commits: int, applied_commits: list[Tuple[str, str]]):
assert SYNC is not None
repo = SYNC.local_wpt_repo
log = ["log", "--oneline", f"-{num_commits}"]
@ -252,17 +235,13 @@ class TestApplyCommitsToWPT(unittest.TestCase):
applied_commits: list[Any] = []
callback = partial(get_applied_commits, len(commits), applied_commits)
step._create_or_update_branch_for_pr(
SyncRun(SYNC, pull_request, None, None), commits, callback
)
step._create_or_update_branch_for_pr(SyncRun(SYNC, pull_request, None, None), commits, callback)
expected_commits = [(commit["author"], commit["message"]) for commit in commits]
self.assertListEqual(applied_commits, expected_commits)
def test_simple_commit(self):
self.run_test(
45, [["test author <test@author>", "test commit message", "18746.diff"]]
)
self.run_test(45, [["test author <test@author>", "test commit message", "18746.diff"]])
def test_two_commits(self):
self.run_test(
@ -299,9 +278,7 @@ class TestFullSyncRun(unittest.TestCase):
assert SYNC is not None
# Clean up any old files.
first_commit_hash = SYNC.local_servo_repo.run("rev-list", "HEAD").splitlines()[
-1
]
first_commit_hash = SYNC.local_servo_repo.run("rev-list", "HEAD").splitlines()[-1]
SYNC.local_servo_repo.run("reset", "--hard", first_commit_hash)
SYNC.local_servo_repo.run("clean", "-fxd")
@ -339,9 +316,7 @@ class TestFullSyncRun(unittest.TestCase):
SYNC.local_servo_repo.run("reset", "--hard", orig_sha)
return last_commit_sha
def run_test(
self, payload_file: str, diffs: list, existing_prs: list[MockPullRequest] = []
):
def run_test(self, payload_file: str, diffs: list, existing_prs: list[MockPullRequest] = []):
with open(os.path.join(TESTS_DIR, payload_file), encoding="utf-8") as file:
payload = json.loads(file.read())
@ -413,12 +388,8 @@ class TestFullSyncRun(unittest.TestCase):
)
def test_opened_new_mr_with_no_sync_signal(self):
self.assertListEqual(
self.run_test("opened-with-no-sync-signal.json", ["18746.diff"]), []
)
self.assertListEqual(
self.run_test("opened-with-no-sync-signal.json", ["non-wpt.diff"]), []
)
self.assertListEqual(self.run_test("opened-with-no-sync-signal.json", ["18746.diff"]), [])
self.assertListEqual(self.run_test("opened-with-no-sync-signal.json", ["non-wpt.diff"]), [])
def test_opened_upstreamable_pr_not_applying_cleanly_to_upstream(self):
self.assertListEqual(
@ -459,7 +430,7 @@ class TestFullSyncRun(unittest.TestCase):
"RemoveBranchForPRStep:servo/wpt/servo_export_18746",
"CommentStep:servo/servo#18746:🤖 This change no longer contains upstreamable changes "
"to WPT; closed existing upstream pull request (wpt/wpt#1).",
]
],
)
def test_opened_upstreamable_pr_with_non_utf8_file_contents(self):
@ -502,10 +473,7 @@ class TestFullSyncRun(unittest.TestCase):
["18746.diff"],
[MockPullRequest("servo:servo_export_18746", 10)],
),
[
"ChangePRStep:wpt/wpt#10:closed",
"RemoveBranchForPRStep:servo/wpt/servo_export_18746"
]
["ChangePRStep:wpt/wpt#10:closed", "RemoveBranchForPRStep:servo/wpt/servo_export_18746"],
)
def test_synchronize_move_new_changes_to_preexisting_upstream_pr(self):
@ -520,7 +488,7 @@ class TestFullSyncRun(unittest.TestCase):
"CreateOrUpdateBranchForPRStep:1:servo/wpt/servo_export_19612",
"CommentStep:servo/servo#19612:📝 Transplanted new upstreamable changes to existing "
"upstream WPT pull request (wpt/wpt#10).",
]
],
)
def test_synchronize_close_upstream_pr_after_new_changes_do_not_include_wpt(self):
@ -537,7 +505,7 @@ class TestFullSyncRun(unittest.TestCase):
"RemoveBranchForPRStep:servo/wpt/servo_export_19612",
"CommentStep:servo/servo#19612:🤖 This change no longer contains upstreamable changes to WPT; "
"closed existing upstream pull request (wpt/wpt#11).",
]
],
)
def test_synchronize_open_upstream_pr_after_new_changes_include_wpt(self):
@ -548,7 +516,7 @@ class TestFullSyncRun(unittest.TestCase):
"OpenPRStep:servo/wpt/servo_export_19612→wpt/wpt#1",
"CommentStep:servo/servo#19612:🤖 Opened new upstream WPT pull request "
"(wpt/wpt#1) with upstreamable changes.",
]
],
)
def test_synchronize_fail_to_update_preexisting_pr_after_new_changes_do_not_apply(
@ -567,20 +535,17 @@ class TestFullSyncRun(unittest.TestCase):
"latest upstream WPT. Servo's copy of the Web Platform Tests may be out of sync.",
"CommentStep:wpt/wpt#11:🛠 Changes from the source pull request (servo/servo#19612) can "
"no longer be cleanly applied. Waiting for a new version of these changes downstream.",
]
],
)
def test_edited_with_upstream_pr(self):
self.assertListEqual(
self.run_test(
"edited.json", ["wpt.diff"],
[MockPullRequest("servo:servo_export_19620", 10)]
),
self.run_test("edited.json", ["wpt.diff"], [MockPullRequest("servo:servo_export_19620", 10)]),
[
"ChangePRStep:wpt/wpt#10:open:A cool new title:Reference #<!--...[136]",
"CommentStep:servo/servo#19620:✍ Updated existing upstream WPT pull "
"request (wpt/wpt#10) title and body."
]
"request (wpt/wpt#10) title and body.",
],
)
def test_edited_with_no_upstream_pr(self):
@ -590,15 +555,13 @@ class TestFullSyncRun(unittest.TestCase):
self,
):
self.assertListEqual(
self.run_test(
"synchronize-multiple.json", ["18746.diff", "non-wpt.diff", "wpt.diff"]
),
self.run_test("synchronize-multiple.json", ["18746.diff", "non-wpt.diff", "wpt.diff"]),
[
"CreateOrUpdateBranchForPRStep:2:servo/wpt/servo_export_19612",
"OpenPRStep:servo/wpt/servo_export_19612→wpt/wpt#1",
"CommentStep:servo/servo#19612:"
"🤖 Opened new upstream WPT pull request (wpt/wpt#1) with upstreamable changes.",
]
],
)
def test_synchronize_with_non_upstreamable_changes(self):
@ -606,15 +569,8 @@ class TestFullSyncRun(unittest.TestCase):
def test_merge_upstream_pr_after_merge(self):
self.assertListEqual(
self.run_test(
"merged.json",
["18746.diff"],
[MockPullRequest("servo:servo_export_19620", 100)]
),
[
"MergePRStep:wpt/wpt#100",
"RemoveBranchForPRStep:servo/wpt/servo_export_19620"
]
self.run_test("merged.json", ["18746.diff"], [MockPullRequest("servo:servo_export_19620", 100)]),
["MergePRStep:wpt/wpt#100", "RemoveBranchForPRStep:servo/wpt/servo_export_19620"],
)
def test_pr_merged_no_upstream_pr(self):
@ -644,8 +600,7 @@ def setUpModule():
)
def setup_mock_repo(repo_name, local_repo, default_branch: str):
subprocess.check_output(
["cp", "-R", "-p", os.path.join(TESTS_DIR, repo_name), local_repo.path])
subprocess.check_output(["cp", "-R", "-p", os.path.join(TESTS_DIR, repo_name), local_repo.path])
local_repo.run("init", "-b", default_branch)
local_repo.run("add", ".")
local_repo.run("commit", "-a", "-m", "Initial commit")
@ -666,12 +621,16 @@ def run_tests():
verbosity = 1 if logging.getLogger().level >= logging.WARN else 2
def run_suite(test_case: Type[unittest.TestCase]):
return unittest.TextTestRunner(verbosity=verbosity).run(
unittest.TestLoader().loadTestsFromTestCase(test_case)
).wasSuccessful()
return (
unittest.TextTestRunner(verbosity=verbosity)
.run(unittest.TestLoader().loadTestsFromTestCase(test_case))
.wasSuccessful()
)
return all([
run_suite(TestApplyCommitsToWPT),
run_suite(TestCleanUpBodyText),
run_suite(TestFullSyncRun),
])
return all(
[
run_suite(TestApplyCommitsToWPT),
run_suite(TestCleanUpBodyText),
run_suite(TestFullSyncRun),
]
)

View file

@ -5,6 +5,6 @@ index 10d52a0..92fb89d 100644
@@ -8,3 +8,4 @@
# except according to those terms.
print('this is a python file')
+print('this is a change')
print("this is a python file")
+print("this is a change")

View file

@ -7,4 +7,4 @@
# option. This file may not be copied, modified, or distributed
# except according to those terms.
print('this is a python file')
print("this is a python file")

View file

@ -15,11 +15,8 @@ from wptrunner import wptcommandline # noqa: F401
from . import WPT_PATH
from . import manifestupdate
TEST_ROOT = os.path.join(WPT_PATH, 'tests')
META_ROOTS = [
os.path.join(WPT_PATH, 'meta'),
os.path.join(WPT_PATH, 'meta-legacy')
]
TEST_ROOT = os.path.join(WPT_PATH, "tests")
META_ROOTS = [os.path.join(WPT_PATH, "meta"), os.path.join(WPT_PATH, "meta-legacy")]
def do_sync(**kwargs) -> int:
@ -28,8 +25,8 @@ def do_sync(**kwargs) -> int:
# Commits should always be authored by the GitHub Actions bot.
os.environ["GIT_AUTHOR_NAME"] = "Servo WPT Sync"
os.environ["GIT_AUTHOR_EMAIL"] = "ghbot+wpt-sync@servo.org"
os.environ["GIT_COMMITTER_NAME"] = os.environ['GIT_AUTHOR_NAME']
os.environ["GIT_COMMITTER_EMAIL"] = os.environ['GIT_AUTHOR_EMAIL']
os.environ["GIT_COMMITTER_NAME"] = os.environ["GIT_AUTHOR_NAME"]
os.environ["GIT_COMMITTER_EMAIL"] = os.environ["GIT_AUTHOR_EMAIL"]
print("Updating WPT from upstream...")
run_update(**kwargs)
@ -67,7 +64,7 @@ def remove_unused_metadata():
dir_path = os.path.join(base_dir, dir_name)
# Skip any known directories that are meta-metadata.
if dir_name == '.cache':
if dir_name == ".cache":
unused_dirs.append(dir_path)
continue
@ -78,12 +75,11 @@ def remove_unused_metadata():
for fname in files:
# Skip any known files that are meta-metadata.
if not fname.endswith(".ini") or fname == '__dir__.ini':
if not fname.endswith(".ini") or fname == "__dir__.ini":
continue
# Turn tests/wpt/meta/foo/bar.html.ini into tests/wpt/tests/foo/bar.html.
test_file = os.path.join(
TEST_ROOT, os.path.relpath(base_dir, meta_root), fname[:-4])
test_file = os.path.join(TEST_ROOT, os.path.relpath(base_dir, meta_root), fname[:-4])
if not os.path.exists(test_file):
unused_files.append(os.path.join(base_dir, fname))
@ -106,10 +102,10 @@ def update_tests(**kwargs) -> int:
kwargs["store_state"] = False
wptcommandline.set_from_config(kwargs)
if hasattr(wptcommandline, 'check_paths'):
if hasattr(wptcommandline, "check_paths"):
wptcommandline.check_paths(kwargs["test_paths"])
if kwargs.get('sync', False):
if kwargs.get("sync", False):
return do_sync(**kwargs)
return 0 if run_update(**kwargs) else 1