Remove more Taskcluster and Treeherder integration

Servo no longer uses Taskcluster and Treeherder, so this change removes
script references to those services and support files.
This commit is contained in:
Martin Robinson 2023-04-10 13:56:47 +02:00
parent d579bd91b8
commit bc3abf9953
25 changed files with 11 additions and 2174 deletions

View file

@ -1,169 +0,0 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import gzip
import json
import os
import re
import sys
import urllib.request
from html import escape as html_escape
TASKCLUSTER_ROOT_URL = "https://community-tc.services.mozilla.com"
def fetch(url):
url = TASKCLUSTER_ROOT_URL + "/api/" + url
print("Fetching " + url)
response = urllib.request.urlopen(url)
assert response.getcode() == 200
encoding = response.info().get("Content-Encoding")
if not encoding:
return response
elif encoding == "gzip":
return gzip.GzipFile(fileobj=response)
else:
raise ValueError("Unsupported Content-Encoding: %s" % encoding)
def fetch_json(url):
with fetch(url) as response:
return json.load(response)
def task(platform, chunk, key):
return "index/v1/task/project.servo.%s_wpt_%s.%s" % (platform, chunk, key)
def failing_reftests(platform, key):
chunk_1_task_id = fetch_json(task(platform, 1, key))["taskId"]
name = fetch_json("queue/v1/task/" + chunk_1_task_id)["metadata"]["name"]
match = re.search("WPT chunk (\d+) / (\d+)", name)
assert match.group(1) == "1"
total_chunks = int(match.group(2))
for chunk in range(1, total_chunks + 1):
with fetch(task(platform, chunk, key) + "/artifacts/public/test-wpt.log") as response:
yield from parse(response)
def parse(file_like):
seen = set()
for line in file_like:
message = json.loads(line)
status = message.get("status")
if status not in {None, "OK", "PASS"}:
screenshots = message.get("extra", {}).get("reftest_screenshots")
if screenshots:
url = message["test"]
assert url.startswith("/")
yield url[1:], message.get("expected") == "PASS", screenshots
def main(source, commit_sha=None):
failures = Directory()
if commit_sha:
title = "<h1>Layout 2020 regressions in commit <code>%s</code></h1>" % commit_sha
failures_2013 = {url for url, _, _ in failing_reftests("linux_x64", source)}
for url, _expected_pass, screenshots in failing_reftests("linux_x64_2020", source):
if url not in failures_2013:
failures.add(url, screenshots)
else:
title = "Unexpected failures"
with open(source, "rb") as file_obj:
for url, expected_pass, screenshots in parse(file_obj):
if expected_pass:
failures.add(url, screenshots)
here = os.path.dirname(__file__)
with open(os.path.join(here, "prism.js")) as f:
prism_js = f.read()
with open(os.path.join(here, "prism.css")) as f:
prism_css = f.read()
with open(os.path.join(here, "report.html"), "w", encoding="utf-8") as html:
os.chdir(os.path.join(here, ".."))
html.write("""
<!doctype html>
<meta charset=utf-8>
<title>WPT reftests failures report</title>
<link rel=stylesheet href=prism.css>
<style>
ul { padding-left: 1em }
li { list-style: "" }
li.expanded { list-style: "" }
li:not(.expanded) > ul, li:not(.expanded) > div { display: none }
li > div { display: grid; grid-gap: 1em; grid-template-columns: 1fr 1fr }
li > div > p { grid-column: span 2 }
li > div > img { grid-row: 2; width: 300px; box-shadow: 0 0 10px }
li > div > img:hover { transform: scale(3); transform-origin: 0 0 }
li > div > pre { grid-row: 3; font-size: 12px !important }
pre code { white-space: pre-wrap !important }
<h1>%s</h1>
</style>
%s
""" % (prism_css, title))
failures.write(html)
html.write("""
<script>
for (let li of document.getElementsByTagName("li")) {
li.addEventListener('click', event => {
li.classList.toggle("expanded")
event.stopPropagation()
})
}
%s
</script>
""" % prism_js)
class Directory:
def __init__(self):
self.count = 0
self.contents = {}
def add(self, path, screenshots):
self.count += 1
first, _, rest = path.partition("/")
if rest:
self.contents.setdefault(first, Directory()).add(rest, screenshots)
else:
assert path not in self.contents
self.contents[path] = screenshots
def write(self, html):
html.write("<ul>\n")
for k, v in self.contents.items():
html.write("<li><code>%s</code>\n" % k)
if isinstance(v, Directory):
html.write("<strong>%s</strong>\n" % v.count)
v.write(html)
else:
a, rel, b = v
html.write("<div>\n<p><code>%s</code> %s <code>%s</code></p>\n"
% (a["url"], rel, b["url"]))
for side in [a, b]:
html.write("<img src='data:image/png;base64,%s'>\n" % side["screenshot"])
url = side["url"]
prefix = "/_mozilla/"
if url.startswith(prefix):
filename = "mozilla/tests/" + url[len(prefix):]
elif url == "about:blank":
src = ""
filename = None
else:
filename = "web-platform-tests" + url
if filename:
with open(filename, encoding="utf-8") as f:
src = html_escape(f.read())
html.write("<pre><code class=language-html>%s</code></pre>\n" % src)
html.write("</li>\n")
html.write("</ul>\n")
if __name__ == "__main__":
sys.exit(main(*sys.argv[1:]))

View file

@ -1,141 +0,0 @@
/* PrismJS 1.19.0
https://prismjs.com/download.html#themes=prism&languages=markup+css+clike+javascript */
/**
* prism.js default theme for JavaScript, CSS and HTML
* Based on dabblet (http://dabblet.com)
* @author Lea Verou
*/
code[class*="language-"],
pre[class*="language-"] {
color: black;
background: none;
text-shadow: 0 1px white;
font-family: Consolas, Monaco, 'Andale Mono', 'Ubuntu Mono', monospace;
font-size: 1em;
text-align: left;
white-space: pre;
word-spacing: normal;
word-break: normal;
word-wrap: normal;
line-height: 1.5;
-moz-tab-size: 4;
-o-tab-size: 4;
tab-size: 4;
-webkit-hyphens: none;
-moz-hyphens: none;
-ms-hyphens: none;
hyphens: none;
}
pre[class*="language-"]::-moz-selection, pre[class*="language-"] ::-moz-selection,
code[class*="language-"]::-moz-selection, code[class*="language-"] ::-moz-selection {
text-shadow: none;
background: #b3d4fc;
}
pre[class*="language-"]::selection, pre[class*="language-"] ::selection,
code[class*="language-"]::selection, code[class*="language-"] ::selection {
text-shadow: none;
background: #b3d4fc;
}
@media print {
code[class*="language-"],
pre[class*="language-"] {
text-shadow: none;
}
}
/* Code blocks */
pre[class*="language-"] {
padding: 1em;
margin: .5em 0;
overflow: auto;
}
:not(pre) > code[class*="language-"],
pre[class*="language-"] {
background: #f5f2f0;
}
/* Inline code */
:not(pre) > code[class*="language-"] {
padding: .1em;
border-radius: .3em;
white-space: normal;
}
.token.comment,
.token.prolog,
.token.doctype,
.token.cdata {
color: slategray;
}
.token.punctuation {
color: #999;
}
.token.namespace {
opacity: .7;
}
.token.property,
.token.tag,
.token.boolean,
.token.number,
.token.constant,
.token.symbol,
.token.deleted {
color: #905;
}
.token.selector,
.token.attr-name,
.token.string,
.token.char,
.token.builtin,
.token.inserted {
color: #690;
}
.token.operator,
.token.entity,
.token.url,
.language-css .token.string,
.style .token.string {
color: #9a6e3a;
background: hsla(0, 0%, 100%, .5);
}
.token.atrule,
.token.attr-value,
.token.keyword {
color: #07a;
}
.token.function,
.token.class-name {
color: #DD4A68;
}
.token.regex,
.token.important,
.token.variable {
color: #e90;
}
.token.important,
.token.bold {
font-weight: bold;
}
.token.italic {
font-style: italic;
}
.token.entity {
cursor: help;
}

File diff suppressed because one or more lines are too long

View file

@ -1,99 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import argparse
import cStringIO
import gzip
import json
import os
import requests
import six.moves.urllib as urllib
treeherder_base = "https://treeherder.mozilla.org/"
"""Simple script for downloading structured logs from treeherder.
For the moment this is specialised to work with web-platform-tests
logs; in due course it should move somewhere generic and get hooked
up to mach or similar"""
# Interpretation of the "job" list from
# https://github.com/mozilla/treeherder-service/blob/master/treeherder/webapp/api/utils.py#L18
def create_parser():
parser = argparse.ArgumentParser()
parser.add_argument("branch", action="store",
help="Branch on which jobs ran")
parser.add_argument("commit",
action="store",
help="Commit hash for push")
return parser
def download(url, prefix, dest, force_suffix=True):
if dest is None:
dest = "."
if prefix and not force_suffix:
name = os.path.join(dest, prefix + ".log")
else:
name = None
counter = 0
while not name or os.path.exists(name):
counter += 1
sep = "" if not prefix else "-"
name = os.path.join(dest, prefix + sep + str(counter) + ".log")
with open(name, "wb") as f:
resp = requests.get(url, stream=True)
for chunk in resp.iter_content(1024):
f.write(chunk)
def get_blobber_url(branch, job):
job_id = job["id"]
resp = requests.get(urllib.parse.urljoin(treeherder_base,
"/api/project/%s/artifact/?job_id=%i&name=Job%%20Info" % (branch,
job_id)))
job_data = resp.json()
if job_data:
assert len(job_data) == 1
job_data = job_data[0]
try:
details = job_data["blob"]["job_details"]
for item in details:
if item["value"] == "wpt_raw.log":
return item["url"]
except:
return None
def get_structured_logs(branch, commit, dest=None):
resp = requests.get(urllib.parse.urljoin(treeherder_base, "/api/project/%s/resultset/?revision=%s" % (branch, commit)))
revision_data = resp.json()
result_set = revision_data["results"][0]["id"]
resp = requests.get(urllib.parse.urljoin(treeherder_base, "/api/project/%s/jobs/?result_set_id=%s&count=2000&exclusion_profile=false" % (branch, result_set)))
job_data = resp.json()
for result in job_data["results"]:
job_type_name = result["job_type_name"]
if job_type_name.startswith("W3C Web Platform"):
url = get_blobber_url(branch, result)
if url:
prefix = result["platform"] # platform
download(url, prefix, None)
def main():
parser = create_parser()
args = parser.parse_args()
get_structured_logs(args.branch, args.commit)
if __name__ == "__main__":
main()