chore: Remove some stale files (#37401)

Stale files in root dir are really annoying.

We could probably remove more stuff in etc/ci, but I was very
conservative.

Testing: Just removing old as earth files

Signed-off-by: sagudev <16504129+sagudev@users.noreply.github.com>
This commit is contained in:
sagudev 2025-06-12 00:11:06 +02:00 committed by GitHub
parent 8e1cf31db3
commit 8a14dd318a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 0 additions and 824 deletions

View file

@ -1,121 +0,0 @@
---
Language: Cpp
BasedOnStyle: Mozilla
AccessModifierOffset: -2
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlines: Right
AlignOperands: true
AlignTrailingComments: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: MultiLine
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeInheritanceComma: false
BreakInheritanceList: BeforeColon
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Preserve
IncludeCategories:
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
Priority: 2
- Regex: '^(<|"(gtest|gmock|isl|json)/)'
Priority: 3
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: false
IndentPPDirectives: None
IndentWidth: 2
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: true
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Auto
ObjCBlockIndentWidth: 2
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: true
SortIncludes: false
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
TabWidth: 8
UseTab: Never
...

View file

@ -1,8 +0,0 @@
# To trigger a clobber replace ALL of the textual description below,
# giving a pull request number and a one line description of why a clobber is
# required.
#
# Modifying this file will now automatically clobber the buildbot machines \o/
#
Pull 16722 - Added CLOBBER file

View file

@ -1,177 +0,0 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import argparse
import csv
from datetime import datetime, date
import json
from math import floor
import os
from urllib.request import urlopen, HTTPError
SCRIPT_PATH = os.path.split(__file__)[0]
def main():
default_output_dir = os.path.join(SCRIPT_PATH, "output")
default_cache_dir = os.path.join(SCRIPT_PATH, ".cache")
parser = argparse.ArgumentParser(description="Download buildbot metadata")
parser.add_argument(
"--index-url",
type=str,
default="https://build.servo.org/json",
help="the URL to get the JSON index data index from. Default: https://build.servo.org/json",
)
parser.add_argument(
"--build-url",
type=str,
default="https://build.servo.org/json/builders/{}/builds/{}",
help="the URL to get the JSON build data from. Default: https://build.servo.org/json/builders/{}/builds/{}",
)
parser.add_argument(
"--cache-dir",
type=str,
default=default_cache_dir,
help="the directory to cache JSON files in. Default: " + default_cache_dir,
)
parser.add_argument(
"--cache-name",
type=str,
default="build-{}-{}.json",
help="the filename to cache JSON data in. Default: build-{}-{}.json",
)
parser.add_argument(
"--output-dir",
type=str,
default=default_output_dir,
help="the directory to save the CSV data to. Default: " + default_output_dir,
)
parser.add_argument(
"--output-name",
type=str,
default="builds-{}-{}.csv",
help="the filename to save the CSV data to. Default: builds-{}-{}.csv",
)
parser.add_argument("--verbose", "-v", action="store_true", help="print every HTTP request")
args = parser.parse_args()
os.makedirs(args.cache_dir, exist_ok=True)
os.makedirs(args.output_dir, exist_ok=True)
# Get the index to find out the list of builder names
# Note: this isn't cached
if args.verbose:
print("Downloading index {}.".format(args.index_url))
with urlopen(args.index_url) as response:
index = json.loads(response.read().decode("utf-8"))
builds = []
for builder in sorted(index["builders"]):
# The most recent build is at offset -1
# Fetch it to find out the build number
# Note: this isn't cached
recent_build_url = args.build_url.format(builder, -1)
if args.verbose:
print("Downloading recent build {}.".format(recent_build_url))
with urlopen(recent_build_url) as response:
recent_build = json.loads(response.read().decode("utf-8"))
recent_build_number = recent_build["number"]
# Download each build, and convert to CSV
for build_number in range(0, recent_build_number):
# Rather annoyingly, we can't just use the Python http cache,
# because it doesn't cache 404 responses. So we roll our own.
cache_json_name = args.cache_name.format(builder, build_number)
cache_json = os.path.join(args.cache_dir, cache_json_name)
if os.path.isfile(cache_json):
with open(cache_json) as f:
build = json.load(f)
else:
# Get the build data
build_url = args.build_url.format(builder, build_number)
if args.verbose:
print("Downloading build {}.".format(build_url))
try:
with urlopen(build_url) as response:
build = json.loads(response.read().decode("utf-8"))
except HTTPError as e:
if e.code == 404:
build = {}
else:
raise
# Don't cache current builds.
if build.get("currentStep"):
continue
with open(cache_json, "w+") as f:
json.dump(build, f)
if "times" in build:
builds.append(build)
years = {}
for build in builds:
build_date = date.fromtimestamp(build["times"][0])
years.setdefault(build_date.year, {}).setdefault(build_date.month, []).append(build)
for year, months in years.items():
for month, builds in months.items():
output_name = args.output_name.format(year, month)
output = os.path.join(args.output_dir, output_name)
# Create the CSV file.
if args.verbose:
print("Creating file {}.".format(output))
with open(output, "w+") as output_file:
output_csv = csv.writer(output_file)
# The CSV column names
output_csv.writerow(
[
"builder",
"buildNumber",
"buildTimestamp",
"stepName",
"stepText",
"stepNumber",
"stepStart",
"stepFinish",
]
)
for build in builds:
builder = build["builderName"]
build_number = build["number"]
build_timestamp = datetime.fromtimestamp(build["times"][0]).replace(microsecond=0)
# Write out the timing data for each step
for step in build["steps"]:
if step["isFinished"]:
step_name = step["name"]
step_text = " ".join(step["text"])
step_number = step["step_number"]
step_start = floor(step["times"][0])
step_finish = floor(step["times"][1])
output_csv.writerow(
[
builder,
build_number,
build_timestamp,
step_name,
step_text,
step_number,
step_start,
step_finish,
]
)
if __name__ == "__main__":
main()

View file

@ -1,51 +0,0 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import argparse
import boto3
def main():
parser = argparse.ArgumentParser(
description=(
"Set the policy of the servo-perf bucket. Remember to set your S3 credentials https://github.com/boto/boto3"
)
)
parser.parse_args()
s3 = boto3.resource("s3")
BUCKET = "servo-perf"
POLICY = """{
"Version":"2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Principal":"*",
"Action":[
"s3:ListBucket",
"s3:GetBucketLocation"
],
"Resource":"arn:aws:s3:::servo-perf"
},
{
"Effect":"Allow",
"Principal":"*",
"Action":[
"s3:GetObject",
"s3:GetObjectAcl"
],
"Resource":"arn:aws:s3:::servo-perf/*"
}
]
}"""
s3.BucketPolicy(BUCKET).put(Policy=POLICY)
print("Done!")
if __name__ == "__main__":
main()

View file

@ -1,312 +0,0 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import argparse
from functools import partial, reduce
import json
import operator
import os
import random
import string
from thclient import TreeherderClient, TreeherderResultSetCollection, TreeherderJobCollection
import time
from runner import format_result_summary
def geometric_mean(iterable):
filtered = list(filter(lambda x: x > 0, iterable))
return (reduce(operator.mul, filtered)) ** (1.0 / len(filtered))
def format_testcase_name(name):
temp = name.replace("http://localhost:8000/page_load_test/", "")
temp = temp.replace("http://localhost:8000/tp6/", "")
temp = temp.split("/")[0]
temp = temp[0:80]
return temp
def format_perf_data(perf_json, engine="servo"):
suites = []
measurement = "domComplete" # Change this to an array when we have more
def get_time_from_nav_start(timings, measurement):
return timings[measurement] - timings["navigationStart"]
measurementFromNavStart = partial(get_time_from_nav_start, measurement=measurement)
if engine == "gecko":
name = "gecko.{}".format(measurement)
else:
name = measurement
suite = {"name": name, "value": geometric_mean(map(measurementFromNavStart, perf_json)), "subtests": []}
for testcase in perf_json:
if measurementFromNavStart(testcase) < 0:
value = -1
# print('Error: test case has negative timing. Test timeout?')
else:
value = measurementFromNavStart(testcase)
suite["subtests"].append({"name": format_testcase_name(testcase["testcase"]), "value": value})
suites.append(suite)
return {
"performance_data": {
# https://bugzilla.mozilla.org/show_bug.cgi?id=1271472
"framework": {"name": "servo-perf"},
"suites": suites,
}
}
def create_resultset_collection(dataset):
print("[DEBUG] ResultSet Collection:")
print(dataset)
trsc = TreeherderResultSetCollection()
for data in dataset:
trs = trsc.get_resultset()
trs.add_push_timestamp(data["push_timestamp"])
trs.add_revision(data["revision"])
trs.add_author(data["author"])
# TODO: figure out where type is used
# trs.add_type(data['type'])
revisions = []
for rev in data["revisions"]:
tr = trs.get_revision()
tr.add_revision(rev["revision"])
tr.add_author(rev["author"])
tr.add_comment(rev["comment"])
tr.add_repository(rev["repository"])
revisions.append(tr)
trs.add_revisions(revisions)
trsc.add(trs)
return trsc
def create_job_collection(dataset):
print("[DEBUG] Job Collection:")
print(dataset)
tjc = TreeherderJobCollection()
for data in dataset:
tj = tjc.get_job()
tj.add_revision(data["revision"])
tj.add_project(data["project"])
tj.add_coalesced_guid(data["job"]["coalesced"])
tj.add_job_guid(data["job"]["job_guid"])
tj.add_job_name(data["job"]["name"])
tj.add_job_symbol(data["job"]["job_symbol"])
tj.add_group_name(data["job"]["group_name"])
tj.add_group_symbol(data["job"]["group_symbol"])
tj.add_description(data["job"]["desc"])
tj.add_product_name(data["job"]["product_name"])
tj.add_state(data["job"]["state"])
tj.add_result(data["job"]["result"])
tj.add_reason(data["job"]["reason"])
tj.add_who(data["job"]["who"])
tj.add_tier(data["job"]["tier"])
tj.add_submit_timestamp(data["job"]["submit_timestamp"])
tj.add_start_timestamp(data["job"]["start_timestamp"])
tj.add_end_timestamp(data["job"]["end_timestamp"])
tj.add_machine(data["job"]["machine"])
tj.add_build_info(
data["job"]["build_platform"]["os_name"],
data["job"]["build_platform"]["platform"],
data["job"]["build_platform"]["architecture"],
)
tj.add_machine_info(
data["job"]["machine_platform"]["os_name"],
data["job"]["machine_platform"]["platform"],
data["job"]["machine_platform"]["architecture"],
)
tj.add_option_collection(data["job"]["option_collection"])
for artifact_data in data["job"]["artifacts"]:
tj.add_artifact(artifact_data["name"], artifact_data["type"], artifact_data["blob"])
tjc.add(tj)
return tjc
# TODO: refactor this big function to smaller chunks
def submit(perf_data, failures, revision, summary, engine):
print("[DEBUG] failures:")
print(list(map(lambda x: x["testcase"], failures)))
author = "{} <{}>".format(revision["author"]["name"], revision["author"]["email"])
dataset = [
{
# The top-most revision in the list of commits for a push.
"revision": revision["commit"],
"author": author,
"push_timestamp": int(revision["author"]["timestamp"]),
"type": "push",
# a list of revisions associated with the resultset. There should
# be at least one.
"revisions": [
{
"comment": revision["subject"],
"revision": revision["commit"],
"repository": "servo",
"author": author,
}
],
}
]
trsc = create_resultset_collection(dataset)
result = "success"
# TODO: verify a failed test won't affect Perfherder visualization
# if len(failures) > 0:
# result = "testfailed"
hashlen = len(revision["commit"])
job_guid = "".join(random.choice(string.ascii_letters + string.digits) for i in range(hashlen))
if engine == "gecko":
project = "servo"
job_symbol = "PLG"
group_symbol = "SPG"
group_name = "Servo Perf on Gecko"
else:
project = "servo"
job_symbol = "PL"
group_symbol = "SP"
group_name = "Servo Perf"
dataset = [
{
"project": project,
"revision": revision["commit"],
"job": {
"job_guid": job_guid,
"product_name": project,
"reason": "scheduler",
# TODO: What is `who` for?
"who": "Servo",
"desc": "Servo Page Load Time Tests",
"name": "Servo Page Load Time",
# The symbol representing the job displayed in
# treeherder.allizom.org
"job_symbol": job_symbol,
# The symbol representing the job group in
# treeherder.allizom.org
"group_symbol": group_symbol,
"group_name": group_name,
# TODO: get the real timing from the test runner
"submit_timestamp": str(int(time.time())),
"start_timestamp": str(int(time.time())),
"end_timestamp": str(int(time.time())),
"state": "completed",
"result": result, # "success" or "testfailed"
"machine": "local-machine",
# TODO: read platform from test result
"build_platform": {"platform": "linux64", "os_name": "linux", "architecture": "x86_64"},
"machine_platform": {"platform": "linux64", "os_name": "linux", "architecture": "x86_64"},
"option_collection": {"opt": True},
# jobs can belong to different tiers
# setting the tier here will determine which tier the job
# belongs to. However, if a job is set as Tier of 1, but
# belongs to the Tier 2 profile on the server, it will still
# be saved as Tier 2.
"tier": 1,
# the ``name`` of the log can be the default of "buildbot_text"
# however, you can use a custom name. See below.
# TODO: point this to the log when we have them uploaded to S3
"log_references": [{"url": "TBD", "name": "test log"}],
# The artifact can contain any kind of structured data
# associated with a test.
"artifacts": [
{
"type": "json",
"name": "performance_data",
# TODO: include the job_guid when the runner actually
# generates one
# 'job_guid': job_guid,
"blob": perf_data,
},
{
"type": "json",
"name": "Job Info",
# 'job_guid': job_guid,
"blob": {
"job_details": [{"content_type": "raw_html", "title": "Result Summary", "value": summary}]
},
},
],
# List of job guids that were coalesced to this job
"coalesced": [],
},
}
]
tjc = create_job_collection(dataset)
# TODO: extract this read credential code out of this function.
cred = {"client_id": os.environ["TREEHERDER_CLIENT_ID"], "secret": os.environ["TREEHERDER_CLIENT_SECRET"]}
client = TreeherderClient(
server_url="https://treeherder.mozilla.org", client_id=cred["client_id"], secret=cred["secret"]
)
# data structure validation is automatically performed here, if validation
# fails a TreeherderClientError is raised
client.post_collection("servo", trsc)
client.post_collection("servo", tjc)
def main():
parser = argparse.ArgumentParser(
description=(
"Submit Servo performance data to Perfherder. "
"Remember to set your Treeherder credential as environment"
" variable 'TREEHERDER_CLIENT_ID' and "
"'TREEHERDER_CLIENT_SECRET'"
)
)
parser.add_argument("perf_json", help="the output json from runner")
parser.add_argument("revision_json", help="the json containing the servo revision data")
parser.add_argument(
"--engine",
type=str,
default="servo",
help=("The engine to run the tests on. Currently only servo and gecko are supported."),
)
args = parser.parse_args()
with open(args.perf_json, "r") as f:
result_json = json.load(f)
with open(args.revision_json, "r") as f:
revision = json.load(f)
perf_data = format_perf_data(result_json, args.engine)
failures = list(filter(lambda x: x["domComplete"] == -1, result_json))
summary = format_result_summary(result_json).replace("\n", "<br/>")
submit(perf_data, failures, revision, summary, args.engine)
print("Done!")
if __name__ == "__main__":
main()

View file

@ -1,29 +0,0 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import argparse
import boto3
def main():
parser = argparse.ArgumentParser(
description=(
"Submit Servo performance data to S3. Remember to set your S3 credentials https://github.com/boto/boto3"
)
)
parser.add_argument("perf_file", help="the output CSV file from runner")
parser.add_argument("perf_key", help="the S3 key to upload to")
args = parser.parse_args()
s3 = boto3.client("s3")
BUCKET = "servo-perf"
s3.upload_file(args.perf_file, BUCKET, args.perf_key)
print("Done!")
if __name__ == "__main__":
main()

View file

@ -1,126 +0,0 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import submit_to_perfherder
def test_format_testcase_name():
assert "about:blank" == submit_to_perfherder.format_testcase_name("about:blank")
assert "163.com" == submit_to_perfherder.format_testcase_name(
("http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html")
)
assert (
"12345678902234567890323456789042345678905234567890623456789072345678908234567890"
) == submit_to_perfherder.format_testcase_name(
("123456789022345678903234567890423456789052345678906234567890723456789082345678909234567890")
)
assert "news.ycombinator.com" == submit_to_perfherder.format_testcase_name(
"http://localhost:8000/tp6/news.ycombinator.com/index.html"
)
def test_format_perf_data():
mock_result = [
{
"unloadEventStart": None,
"domLoading": 1460444930000,
"fetchStart": None,
"responseStart": None,
"loadEventEnd": None,
"connectStart": None,
"domainLookupStart": None,
"redirectStart": None,
"domContentLoadedEventEnd": 1460444930000,
"requestStart": None,
"secureConnectionStart": None,
"connectEnd": None,
"navigationStart": 1460444930000,
"loadEventStart": None,
"domInteractive": 1460444930000,
"domContentLoadedEventStart": 1460444930000,
"redirectEnd": None,
"domainLookupEnd": None,
"unloadEventEnd": None,
"responseEnd": None,
"testcase": "about:blank",
"domComplete": 1460444931000,
},
{
"unloadEventStart": None,
"domLoading": 1460444934000,
"fetchStart": None,
"responseStart": None,
"loadEventEnd": None,
"connectStart": None,
"domainLookupStart": None,
"redirectStart": None,
"domContentLoadedEventEnd": 1460444946000,
"requestStart": None,
"secureConnectionStart": None,
"connectEnd": None,
"navigationStart": 1460444934000,
"loadEventStart": None,
"domInteractive": 1460444946000,
"domContentLoadedEventStart": 1460444946000,
"redirectEnd": None,
"domainLookupEnd": None,
"unloadEventEnd": None,
"responseEnd": None,
"testcase": (
"http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html"
),
"domComplete": 1460444948000,
},
]
expected = {
"performance_data": {
"framework": {"name": "servo-perf"},
"suites": [
{
"name": "domComplete",
"value": 3741.657386773941,
"subtests": [
{"name": "about:blank", "value": 1000},
{"name": "163.com", "value": 14000},
],
}
],
}
}
result = submit_to_perfherder.format_perf_data(mock_result)
assert expected == result
def test_format_bad_perf_data():
mock_result = [
{"navigationStart": 1460444930000, "testcase": "about:blank", "domComplete": 0},
{
"navigationStart": 1460444934000,
"testcase": (
"http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html"
),
"domComplete": 1460444948000,
},
]
expected = {
"performance_data": {
"framework": {"name": "servo-perf"},
"suites": [
{
"name": "domComplete",
"value": 14000.0,
"subtests": [
{"name": "about:blank", "value": -1}, # Timeout
{"name": "163.com", "value": 14000},
],
}
],
}
}
result = submit_to_perfherder.format_perf_data(mock_result)
assert expected == result