Use ruff to enforce python code formatting (#37117)

Requires servo/servo#37045 for deps and config.

Testing: No need for tests to test tests.
Fixes: servo/servo#37041

---------

Signed-off-by: zefr0x <zer0-x.7ty50@aleeas.com>
This commit is contained in:
zefr0x 2025-05-26 14:54:43 +03:00 committed by GitHub
parent 41ecfb53a1
commit c96de69e80
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
67 changed files with 3021 additions and 3085 deletions

View file

@ -17,27 +17,31 @@ import os
def size(args):
size = os.path.getsize(args.binary)
print(size)
with open(args.bmf_output, 'w', encoding='utf-8') as f:
json.dump({
with open(args.bmf_output, "w", encoding="utf-8") as f:
json.dump(
{
args.variant: {
'file-size': {
'value': float(size),
"file-size": {
"value": float(size),
}
}
}, f, indent=4)
},
f,
indent=4,
)
def merge(args):
output: dict[str, object] = dict()
for input_file in args.inputs:
with open(input_file, 'r', encoding='utf-8') as f:
with open(input_file, "r", encoding="utf-8") as f:
data = json.load(f)
diff = set(data) & set(output)
if diff:
print("Duplicated keys:", diff)
output = data | output
with open(args.bmf_output, 'w', encoding='utf-8') as f:
with open(args.bmf_output, "w", encoding="utf-8") as f:
json.dump(output, f, indent=4)

View file

@ -24,7 +24,7 @@ TEST_CMD = [
"--log-raw=-",
# We run the content-security-policy test because it creates
# cross-origin iframes, which are good for stress-testing pipelines
"content-security-policy"
"content-security-policy",
]
# Note that there will probably be test failures caused
@ -35,7 +35,7 @@ test_results = Popen(TEST_CMD, stdout=PIPE)
any_crashes = False
for line in test_results.stdout:
report = json.loads(line.decode('utf-8'))
report = json.loads(line.decode("utf-8"))
if report.get("action") == "process_output":
print("{} - {}".format(report.get("thread"), report.get("data")))
status = report.get("status")

View file

@ -12,35 +12,46 @@ import re
import subprocess
import sys
symbol_regex = re.compile(br"D \*UND\*\t(.*) (.*)$")
allowed_symbols = frozenset([
b'unshare',
b'malloc_usable_size',
b'__cxa_type_match',
b'signal',
b'tcgetattr',
b'tcsetattr',
b'__strncpy_chk2',
b'rand',
b'__read_chk',
b'fesetenv',
b'srand',
b'abs',
b'fegetenv',
b'sigemptyset',
b'AHardwareBuffer_allocate',
b'AHardwareBuffer_release',
b'getentropy',
])
symbol_regex = re.compile(rb"D \*UND\*\t(.*) (.*)$")
allowed_symbols = frozenset(
[
b"unshare",
b"malloc_usable_size",
b"__cxa_type_match",
b"signal",
b"tcgetattr",
b"tcsetattr",
b"__strncpy_chk2",
b"rand",
b"__read_chk",
b"fesetenv",
b"srand",
b"abs",
b"fegetenv",
b"sigemptyset",
b"AHardwareBuffer_allocate",
b"AHardwareBuffer_release",
b"getentropy",
]
)
actual_symbols = set()
objdump_output = subprocess.check_output([
objdump_output = subprocess.check_output(
[
os.path.join(
'android-toolchains', 'ndk', 'toolchains', 'arm-linux-androideabi-4.9',
'prebuilt', 'linux-x86_64', 'bin', 'arm-linux-androideabi-objdump'),
'-T',
'target/android/armv7-linux-androideabi/debug/libservoshell.so']
).split(b'\n')
"android-toolchains",
"ndk",
"toolchains",
"arm-linux-androideabi-4.9",
"prebuilt",
"linux-x86_64",
"bin",
"arm-linux-androideabi-objdump",
),
"-T",
"target/android/armv7-linux-androideabi/debug/libservoshell.so",
]
).split(b"\n")
for line in objdump_output:
m = symbol_regex.search(line)

View file

@ -16,43 +16,47 @@ SCRIPT_PATH = os.path.split(__file__)[0]
def main():
default_output_dir = os.path.join(SCRIPT_PATH, 'output')
default_cache_dir = os.path.join(SCRIPT_PATH, '.cache')
default_output_dir = os.path.join(SCRIPT_PATH, "output")
default_cache_dir = os.path.join(SCRIPT_PATH, ".cache")
parser = argparse.ArgumentParser(
description="Download buildbot metadata"
parser = argparse.ArgumentParser(description="Download buildbot metadata")
parser.add_argument(
"--index-url",
type=str,
default="https://build.servo.org/json",
help="the URL to get the JSON index data index from. Default: https://build.servo.org/json",
)
parser.add_argument("--index-url",
parser.add_argument(
"--build-url",
type=str,
default='https://build.servo.org/json',
help="the URL to get the JSON index data index from. "
"Default: https://build.servo.org/json")
parser.add_argument("--build-url",
type=str,
default='https://build.servo.org/json/builders/{}/builds/{}',
help="the URL to get the JSON build data from. "
"Default: https://build.servo.org/json/builders/{}/builds/{}")
parser.add_argument("--cache-dir",
default="https://build.servo.org/json/builders/{}/builds/{}",
help="the URL to get the JSON build data from. Default: https://build.servo.org/json/builders/{}/builds/{}",
)
parser.add_argument(
"--cache-dir",
type=str,
default=default_cache_dir,
help="the directory to cache JSON files in. Default: " + default_cache_dir)
parser.add_argument("--cache-name",
help="the directory to cache JSON files in. Default: " + default_cache_dir,
)
parser.add_argument(
"--cache-name",
type=str,
default='build-{}-{}.json',
help="the filename to cache JSON data in. "
"Default: build-{}-{}.json")
parser.add_argument("--output-dir",
default="build-{}-{}.json",
help="the filename to cache JSON data in. Default: build-{}-{}.json",
)
parser.add_argument(
"--output-dir",
type=str,
default=default_output_dir,
help="the directory to save the CSV data to. Default: " + default_output_dir)
parser.add_argument("--output-name",
help="the directory to save the CSV data to. Default: " + default_output_dir,
)
parser.add_argument(
"--output-name",
type=str,
default='builds-{}-{}.csv',
help="the filename to save the CSV data to. "
"Default: builds-{}-{}.csv")
parser.add_argument("--verbose", "-v",
action='store_true',
help="print every HTTP request")
default="builds-{}-{}.csv",
help="the filename to save the CSV data to. Default: builds-{}-{}.csv",
)
parser.add_argument("--verbose", "-v", action="store_true", help="print every HTTP request")
args = parser.parse_args()
os.makedirs(args.cache_dir, exist_ok=True)
@ -63,7 +67,7 @@ def main():
if args.verbose:
print("Downloading index {}.".format(args.index_url))
with urlopen(args.index_url) as response:
index = json.loads(response.read().decode('utf-8'))
index = json.loads(response.read().decode("utf-8"))
builds = []
@ -75,12 +79,11 @@ def main():
if args.verbose:
print("Downloading recent build {}.".format(recent_build_url))
with urlopen(recent_build_url) as response:
recent_build = json.loads(response.read().decode('utf-8'))
recent_build = json.loads(response.read().decode("utf-8"))
recent_build_number = recent_build["number"]
# Download each build, and convert to CSV
for build_number in range(0, recent_build_number):
# Rather annoyingly, we can't just use the Python http cache,
# because it doesn't cache 404 responses. So we roll our own.
cache_json_name = args.cache_name.format(builder, build_number)
@ -96,7 +99,7 @@ def main():
print("Downloading build {}.".format(build_url))
try:
with urlopen(build_url) as response:
build = json.loads(response.read().decode('utf-8'))
build = json.loads(response.read().decode("utf-8"))
except HTTPError as e:
if e.code == 404:
build = {}
@ -104,46 +107,46 @@ def main():
raise
# Don't cache current builds.
if build.get('currentStep'):
if build.get("currentStep"):
continue
with open(cache_json, 'w+') as f:
with open(cache_json, "w+") as f:
json.dump(build, f)
if 'times' in build:
if "times" in build:
builds.append(build)
years = {}
for build in builds:
build_date = date.fromtimestamp(build['times'][0])
build_date = date.fromtimestamp(build["times"][0])
years.setdefault(build_date.year, {}).setdefault(build_date.month, []).append(build)
for year, months in years.items():
for month, builds in months.items():
output_name = args.output_name.format(year, month)
output = os.path.join(args.output_dir, output_name)
# Create the CSV file.
if args.verbose:
print('Creating file {}.'.format(output))
with open(output, 'w+') as output_file:
print("Creating file {}.".format(output))
with open(output, "w+") as output_file:
output_csv = csv.writer(output_file)
# The CSV column names
output_csv.writerow([
'builder',
'buildNumber',
'buildTimestamp',
'stepName',
'stepText',
'stepNumber',
'stepStart',
'stepFinish'
])
output_csv.writerow(
[
"builder",
"buildNumber",
"buildTimestamp",
"stepName",
"stepText",
"stepNumber",
"stepStart",
"stepFinish",
]
)
for build in builds:
builder = build["builderName"]
build_number = build["number"]
build_timestamp = datetime.fromtimestamp(build["times"][0]).replace(microsecond=0)
@ -152,11 +155,12 @@ def main():
for step in build["steps"]:
if step["isFinished"]:
step_name = step["name"]
step_text = ' '.join(step["text"])
step_text = " ".join(step["text"])
step_number = step["step_number"]
step_start = floor(step["times"][0])
step_finish = floor(step["times"][1])
output_csv.writerow([
output_csv.writerow(
[
builder,
build_number,
build_timestamp,
@ -164,8 +168,9 @@ def main():
step_text,
step_number,
step_start,
step_finish
])
step_finish,
]
)
if __name__ == "__main__":

View file

@ -15,7 +15,7 @@ import sys
@contextmanager
def create_gecko_session():
try:
firefox_binary = os.environ['FIREFOX_BIN']
firefox_binary = os.environ["FIREFOX_BIN"]
except KeyError:
print("+=============================================================+")
print("| You must set the path to your firefox binary to FIREFOX_BIN |")
@ -36,10 +36,7 @@ def generate_placeholder(testcase):
# use a placeholder with values = -1 to make Treeherder happy, and still be
# able to identify failed tests (successful tests have time >=0).
timings = {
"testcase": testcase,
"title": ""
}
timings = {"testcase": testcase, "title": ""}
timing_names = [
"navigationStart",
@ -81,16 +78,9 @@ def run_gecko_test(testcase, url, date, timeout, is_async):
return generate_placeholder(testcase)
try:
timings = {
"testcase": testcase,
"title": driver.title.replace(",", "&#44;")
}
timings = {"testcase": testcase, "title": driver.title.replace(",", "&#44;")}
timings.update(json.loads(
driver.execute_script(
"return JSON.stringify(performance.timing)"
)
))
timings.update(json.loads(driver.execute_script("return JSON.stringify(performance.timing)")))
except Exception:
# We need to return a timing object no matter what happened.
# See the comment in generate_placeholder() for explanation
@ -101,17 +91,14 @@ def run_gecko_test(testcase, url, date, timeout, is_async):
# TODO: the timeout is hardcoded
driver.implicitly_wait(5) # sec
driver.find_element_by_id("GECKO_TEST_DONE")
timings.update(json.loads(
driver.execute_script(
"return JSON.stringify(window.customTimers)"
)
))
timings.update(json.loads(driver.execute_script("return JSON.stringify(window.customTimers)")))
return [timings]
if __name__ == '__main__':
if __name__ == "__main__":
# Just for manual testing
from pprint import pprint
url = "http://localhost:8000/page_load_test/tp5n/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html"
pprint(run_gecko_test(url, 15))

View file

@ -23,14 +23,13 @@ SYSTEM = platform.system()
def load_manifest(filename):
with open(filename, 'r') as f:
with open(filename, "r") as f:
text = f.read()
return list(parse_manifest(text))
def parse_manifest(text):
lines = filter(lambda x: x != "" and not x.startswith("#"),
map(lambda x: x.strip(), text.splitlines()))
lines = filter(lambda x: x != "" and not x.startswith("#"), map(lambda x: x.strip(), text.splitlines()))
output = []
for line in lines:
if line.split(" ")[0] == "async":
@ -46,21 +45,18 @@ def testcase_url(base, testcase):
# the server on port 80. To allow non-root users to run the test
# case, we take the URL to be relative to a base URL.
(scheme, netloc, path, query, fragment) = urlsplit(testcase)
relative_url = urlunsplit(('', '', '.' + path, query, fragment))
relative_url = urlunsplit(("", "", "." + path, query, fragment))
absolute_url = urljoin(base, relative_url)
return absolute_url
def execute_test(url, command, timeout):
try:
return subprocess.check_output(
command, stderr=subprocess.STDOUT, timeout=timeout
)
return subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
except subprocess.CalledProcessError as e:
print("Unexpected Fail:")
print(e)
print("You may want to re-run the test manually:\n{}"
.format(' '.join(command)))
print("You may want to re-run the test manually:\n{}".format(" ".join(command)))
except subprocess.TimeoutExpired:
print("Test FAILED due to timeout: {}".format(url))
return ""
@ -74,22 +70,21 @@ def run_servo_test(testcase, url, date, timeout, is_async):
ua_script_path = "{}/user-agent-js".format(os.getcwd())
command = [
"../../../target/release/servo", url,
"../../../target/release/servo",
url,
"--userscripts=" + ua_script_path,
"--headless",
"-x", "-o", "output.png"
"-x",
"-o",
"output.png",
]
log = ""
try:
log = subprocess.check_output(
command, stderr=subprocess.STDOUT, timeout=timeout
)
log = subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
except subprocess.CalledProcessError as e:
print("Unexpected Fail:")
print(e)
print("You may want to re-run the test manually:\n{}".format(
' '.join(command)
))
print("You may want to re-run the test manually:\n{}".format(" ".join(command)))
except subprocess.TimeoutExpired:
print("Test FAILED due to timeout: {}".format(testcase))
return parse_log(log, testcase, url, date)
@ -100,7 +95,7 @@ def parse_log(log, testcase, url, date):
block = []
copy = False
for line_bytes in log.splitlines():
line = line_bytes.decode('utf-8')
line = line_bytes.decode("utf-8")
if line.strip() == ("[PERF] perf block start"):
copy = True
@ -119,10 +114,10 @@ def parse_log(log, testcase, url, date):
except ValueError:
print("[DEBUG] failed to parse the following line:")
print(line)
print('[DEBUG] log:')
print('-----')
print("[DEBUG] log:")
print("-----")
print(log)
print('-----')
print("-----")
return None
if key == "testcase" or key == "title":
@ -133,10 +128,12 @@ def parse_log(log, testcase, url, date):
return timing
def valid_timing(timing, url=None):
if (timing is None
if (
timing is None
or testcase is None
or timing.get('title') == 'Error loading page'
or timing.get('testcase') != url):
or timing.get("title") == "Error loading page"
or timing.get("testcase") != url
):
return False
else:
return True
@ -178,10 +175,10 @@ def parse_log(log, testcase, url, date):
# Set the testcase field to contain the original testcase name,
# rather than the url.
def set_testcase(timing, testcase=None, date=None):
timing['testcase'] = testcase
timing['system'] = SYSTEM
timing['machine'] = MACHINE
timing['date'] = date
timing["testcase"] = testcase
timing["system"] = SYSTEM
timing["machine"] = MACHINE
timing["date"] = date
return timing
valid_timing_for_case = partial(valid_timing, url=url)
@ -190,10 +187,10 @@ def parse_log(log, testcase, url, date):
if len(timings) == 0:
print("Didn't find any perf data in the log, test timeout?")
print('[DEBUG] log:')
print('-----')
print("[DEBUG] log:")
print("-----")
print(log)
print('-----')
print("-----")
return [create_placeholder(testcase)]
else:
@ -204,22 +201,25 @@ def filter_result_by_manifest(result_json, manifest, base):
filtered = []
for name, is_async in manifest:
url = testcase_url(base, name)
match = [tc for tc in result_json if tc['testcase'] == url]
match = [tc for tc in result_json if tc["testcase"] == url]
if len(match) == 0:
raise Exception(("Missing test result: {}. This will cause a "
raise Exception(
(
"Missing test result: {}. This will cause a "
"discontinuity in the treeherder graph, "
"so we won't submit this data.").format(name))
"so we won't submit this data."
).format(name)
)
filtered += match
return filtered
def take_result_median(result_json, expected_runs):
median_results = []
for k, g in itertools.groupby(result_json, lambda x: x['testcase']):
for k, g in itertools.groupby(result_json, lambda x: x["testcase"]):
group = list(g)
if len(group) != expected_runs:
print(("Warning: Not enough test data for {},"
" maybe some runs failed?").format(k))
print(("Warning: Not enough test data for {}, maybe some runs failed?").format(k))
median_result = {}
for k, _ in group[0].items():
@ -227,8 +227,7 @@ def take_result_median(result_json, expected_runs):
median_result[k] = group[0][k]
else:
try:
median_result[k] = median([x[k] for x in group
if x[k] is not None])
median_result[k] = median([x[k] for x in group if x[k] is not None])
except StatisticsError:
median_result[k] = -1
median_results.append(median_result)
@ -236,72 +235,65 @@ def take_result_median(result_json, expected_runs):
def save_result_json(results, filename, manifest, expected_runs, base):
results = filter_result_by_manifest(results, manifest, base)
results = take_result_median(results, expected_runs)
if len(results) == 0:
with open(filename, 'w') as f:
json.dump("No test result found in the log. All tests timeout?",
f, indent=2)
with open(filename, "w") as f:
json.dump("No test result found in the log. All tests timeout?", f, indent=2)
else:
with open(filename, 'w') as f:
with open(filename, "w") as f:
json.dump(results, f, indent=2)
print("Result saved to {}".format(filename))
def save_result_csv(results, filename, manifest, expected_runs, base):
fieldnames = [
'system',
'machine',
'date',
'testcase',
'title',
'connectEnd',
'connectStart',
'domComplete',
'domContentLoadedEventEnd',
'domContentLoadedEventStart',
'domInteractive',
'domLoading',
'domainLookupEnd',
'domainLookupStart',
'fetchStart',
'loadEventEnd',
'loadEventStart',
'navigationStart',
'redirectEnd',
'redirectStart',
'requestStart',
'responseEnd',
'responseStart',
'secureConnectionStart',
'unloadEventEnd',
'unloadEventStart',
"system",
"machine",
"date",
"testcase",
"title",
"connectEnd",
"connectStart",
"domComplete",
"domContentLoadedEventEnd",
"domContentLoadedEventStart",
"domInteractive",
"domLoading",
"domainLookupEnd",
"domainLookupStart",
"fetchStart",
"loadEventEnd",
"loadEventStart",
"navigationStart",
"redirectEnd",
"redirectStart",
"requestStart",
"responseEnd",
"responseStart",
"secureConnectionStart",
"unloadEventEnd",
"unloadEventStart",
]
successes = [r for r in results if r['domComplete'] != -1]
successes = [r for r in results if r["domComplete"] != -1]
with open(filename, 'w', encoding='utf-8') as csvfile:
with open(filename, "w", encoding="utf-8") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames)
writer.writeheader()
writer.writerows(successes)
def format_result_summary(results):
failures = list(filter(lambda x: x['domComplete'] == -1, results))
failures = list(filter(lambda x: x["domComplete"] == -1, results))
result_log = """
========================================
Total {total} tests; {suc} succeeded, {fail} failed.
Failure summary:
""".format(
total=len(results),
suc=len(list(filter(lambda x: x['domComplete'] != -1, results))),
fail=len(failures)
)
uniq_failures = list(set(map(lambda x: x['testcase'], failures)))
""".format(total=len(results), suc=len(list(filter(lambda x: x["domComplete"] != -1, results))), fail=len(failures))
uniq_failures = list(set(map(lambda x: x["testcase"], failures)))
for failure in uniq_failures:
result_log += " - {}\n".format(failure)
@ -311,40 +303,40 @@ Failure summary:
def main():
parser = argparse.ArgumentParser(
description="Run page load test on servo"
)
parser.add_argument("tp5_manifest",
help="the test manifest in tp5 format")
parser.add_argument("output_file",
help="filename for the output json")
parser.add_argument("--base",
parser = argparse.ArgumentParser(description="Run page load test on servo")
parser.add_argument("tp5_manifest", help="the test manifest in tp5 format")
parser.add_argument("output_file", help="filename for the output json")
parser.add_argument(
"--base",
type=str,
default='http://localhost:8000/',
help="the base URL for tests. Default: http://localhost:8000/")
parser.add_argument("--runs",
type=int,
default=20,
help="number of runs for each test case. Defult: 20")
parser.add_argument("--timeout",
default="http://localhost:8000/",
help="the base URL for tests. Default: http://localhost:8000/",
)
parser.add_argument("--runs", type=int, default=20, help="number of runs for each test case. Defult: 20")
parser.add_argument(
"--timeout",
type=int,
default=300, # 5 min
help=("kill the test if not finished in time (sec)."
" Default: 5 min"))
parser.add_argument("--date",
help=("kill the test if not finished in time (sec). Default: 5 min"),
)
parser.add_argument(
"--date",
type=str,
default=None, # 5 min
help=("the date to use in the CSV file."))
parser.add_argument("--engine",
help=("the date to use in the CSV file."),
)
parser.add_argument(
"--engine",
type=str,
default='servo',
help=("The engine to run the tests on. Currently only"
" servo and gecko are supported."))
default="servo",
help=("The engine to run the tests on. Currently only servo and gecko are supported."),
)
args = parser.parse_args()
if args.engine == 'servo':
if args.engine == "servo":
run_test = run_servo_test
elif args.engine == 'gecko':
elif args.engine == "gecko":
import gecko_driver # Load this only when we need gecko test
run_test = gecko_driver.run_gecko_test
date = args.date or DATE
try:
@ -354,9 +346,7 @@ def main():
for testcase, is_async in testcases:
url = testcase_url(args.base, testcase)
for run in range(args.runs):
print("Running test {}/{} on {}".format(run + 1,
args.runs,
url))
print("Running test {}/{} on {}".format(run + 1, args.runs, url))
# results will be a mixure of timings dict and testcase strings
# testcase string indicates a failed test
results += run_test(testcase, url, date, args.timeout, is_async)
@ -364,7 +354,7 @@ def main():
# TODO: Record and analyze other performance.timing properties
print(format_result_summary(results))
if args.output_file.endswith('.csv'):
if args.output_file.endswith(".csv"):
save_result_csv(results, args.output_file, testcases, args.runs, args.base)
else:
save_result_json(results, args.output_file, testcases, args.runs, args.base)

View file

@ -10,13 +10,14 @@ import boto3
def main():
parser = argparse.ArgumentParser(
description=("Set the policy of the servo-perf bucket. "
"Remember to set your S3 credentials "
"https://github.com/boto/boto3"))
description=(
"Set the policy of the servo-perf bucket. Remember to set your S3 credentials https://github.com/boto/boto3"
)
)
parser.parse_args()
s3 = boto3.resource('s3')
BUCKET = 'servo-perf'
s3 = boto3.resource("s3")
BUCKET = "servo-perf"
POLICY = """{
"Version":"2012-10-17",
"Statement":[

View file

@ -11,8 +11,7 @@ import operator
import os
import random
import string
from thclient import (TreeherderClient, TreeherderResultSetCollection,
TreeherderJobCollection)
from thclient import TreeherderClient, TreeherderResultSetCollection, TreeherderJobCollection
import time
from runner import format_result_summary
@ -24,33 +23,28 @@ def geometric_mean(iterable):
def format_testcase_name(name):
temp = name.replace('http://localhost:8000/page_load_test/', '')
temp = temp.replace('http://localhost:8000/tp6/', '')
temp = temp.split('/')[0]
temp = name.replace("http://localhost:8000/page_load_test/", "")
temp = temp.replace("http://localhost:8000/tp6/", "")
temp = temp.split("/")[0]
temp = temp[0:80]
return temp
def format_perf_data(perf_json, engine='servo'):
def format_perf_data(perf_json, engine="servo"):
suites = []
measurement = "domComplete" # Change this to an array when we have more
def get_time_from_nav_start(timings, measurement):
return timings[measurement] - timings['navigationStart']
return timings[measurement] - timings["navigationStart"]
measurementFromNavStart = partial(get_time_from_nav_start,
measurement=measurement)
measurementFromNavStart = partial(get_time_from_nav_start, measurement=measurement)
if (engine == 'gecko'):
name = 'gecko.{}'.format(measurement)
if engine == "gecko":
name = "gecko.{}".format(measurement)
else:
name = measurement
suite = {
"name": name,
"value": geometric_mean(map(measurementFromNavStart, perf_json)),
"subtests": []
}
suite = {"name": name, "value": geometric_mean(map(measurementFromNavStart, perf_json)), "subtests": []}
for testcase in perf_json:
if measurementFromNavStart(testcase) < 0:
value = -1
@ -58,10 +52,7 @@ def format_perf_data(perf_json, engine='servo'):
else:
value = measurementFromNavStart(testcase)
suite["subtests"].append({
"name": format_testcase_name(testcase["testcase"]),
"value": value
})
suite["subtests"].append({"name": format_testcase_name(testcase["testcase"]), "value": value})
suites.append(suite)
@ -69,7 +60,7 @@ def format_perf_data(perf_json, engine='servo'):
"performance_data": {
# https://bugzilla.mozilla.org/show_bug.cgi?id=1271472
"framework": {"name": "servo-perf"},
"suites": suites
"suites": suites,
}
}
@ -82,20 +73,20 @@ def create_resultset_collection(dataset):
for data in dataset:
trs = trsc.get_resultset()
trs.add_push_timestamp(data['push_timestamp'])
trs.add_revision(data['revision'])
trs.add_author(data['author'])
trs.add_push_timestamp(data["push_timestamp"])
trs.add_revision(data["revision"])
trs.add_author(data["author"])
# TODO: figure out where type is used
# trs.add_type(data['type'])
revisions = []
for rev in data['revisions']:
for rev in data["revisions"]:
tr = trs.get_revision()
tr.add_revision(rev['revision'])
tr.add_author(rev['author'])
tr.add_comment(rev['comment'])
tr.add_repository(rev['repository'])
tr.add_revision(rev["revision"])
tr.add_author(rev["author"])
tr.add_comment(rev["comment"])
tr.add_repository(rev["repository"])
revisions.append(tr)
trs.add_revisions(revisions)
@ -114,46 +105,42 @@ def create_job_collection(dataset):
for data in dataset:
tj = tjc.get_job()
tj.add_revision(data['revision'])
tj.add_project(data['project'])
tj.add_coalesced_guid(data['job']['coalesced'])
tj.add_job_guid(data['job']['job_guid'])
tj.add_job_name(data['job']['name'])
tj.add_job_symbol(data['job']['job_symbol'])
tj.add_group_name(data['job']['group_name'])
tj.add_group_symbol(data['job']['group_symbol'])
tj.add_description(data['job']['desc'])
tj.add_product_name(data['job']['product_name'])
tj.add_state(data['job']['state'])
tj.add_result(data['job']['result'])
tj.add_reason(data['job']['reason'])
tj.add_who(data['job']['who'])
tj.add_tier(data['job']['tier'])
tj.add_submit_timestamp(data['job']['submit_timestamp'])
tj.add_start_timestamp(data['job']['start_timestamp'])
tj.add_end_timestamp(data['job']['end_timestamp'])
tj.add_machine(data['job']['machine'])
tj.add_revision(data["revision"])
tj.add_project(data["project"])
tj.add_coalesced_guid(data["job"]["coalesced"])
tj.add_job_guid(data["job"]["job_guid"])
tj.add_job_name(data["job"]["name"])
tj.add_job_symbol(data["job"]["job_symbol"])
tj.add_group_name(data["job"]["group_name"])
tj.add_group_symbol(data["job"]["group_symbol"])
tj.add_description(data["job"]["desc"])
tj.add_product_name(data["job"]["product_name"])
tj.add_state(data["job"]["state"])
tj.add_result(data["job"]["result"])
tj.add_reason(data["job"]["reason"])
tj.add_who(data["job"]["who"])
tj.add_tier(data["job"]["tier"])
tj.add_submit_timestamp(data["job"]["submit_timestamp"])
tj.add_start_timestamp(data["job"]["start_timestamp"])
tj.add_end_timestamp(data["job"]["end_timestamp"])
tj.add_machine(data["job"]["machine"])
tj.add_build_info(
data['job']['build_platform']['os_name'],
data['job']['build_platform']['platform'],
data['job']['build_platform']['architecture']
data["job"]["build_platform"]["os_name"],
data["job"]["build_platform"]["platform"],
data["job"]["build_platform"]["architecture"],
)
tj.add_machine_info(
data['job']['machine_platform']['os_name'],
data['job']['machine_platform']['platform'],
data['job']['machine_platform']['architecture']
data["job"]["machine_platform"]["os_name"],
data["job"]["machine_platform"]["platform"],
data["job"]["machine_platform"]["architecture"],
)
tj.add_option_collection(data['job']['option_collection'])
tj.add_option_collection(data["job"]["option_collection"])
for artifact_data in data['job']['artifacts']:
tj.add_artifact(
artifact_data['name'],
artifact_data['type'],
artifact_data['blob']
)
for artifact_data in data["job"]["artifacts"]:
tj.add_artifact(artifact_data["name"], artifact_data["type"], artifact_data["blob"])
tjc.add(tj)
return tjc
@ -161,30 +148,28 @@ def create_job_collection(dataset):
# TODO: refactor this big function to smaller chunks
def submit(perf_data, failures, revision, summary, engine):
print("[DEBUG] failures:")
print(list(map(lambda x: x['testcase'], failures)))
print(list(map(lambda x: x["testcase"], failures)))
author = "{} <{}>".format(revision['author']['name'],
revision['author']['email'])
author = "{} <{}>".format(revision["author"]["name"], revision["author"]["email"])
dataset = [
{
# The top-most revision in the list of commits for a push.
'revision': revision['commit'],
'author': author,
'push_timestamp': int(revision['author']['timestamp']),
'type': 'push',
"revision": revision["commit"],
"author": author,
"push_timestamp": int(revision["author"]["timestamp"]),
"type": "push",
# a list of revisions associated with the resultset. There should
# be at least one.
'revisions': [
"revisions": [
{
'comment': revision['subject'],
'revision': revision['commit'],
'repository': 'servo',
'author': author
"comment": revision["subject"],
"revision": revision["commit"],
"repository": "servo",
"author": author,
}
]
],
}
]
@ -195,158 +180,129 @@ def submit(perf_data, failures, revision, summary, engine):
# if len(failures) > 0:
# result = "testfailed"
hashlen = len(revision['commit'])
job_guid = ''.join(
random.choice(string.ascii_letters + string.digits) for i in range(hashlen)
)
hashlen = len(revision["commit"])
job_guid = "".join(random.choice(string.ascii_letters + string.digits) for i in range(hashlen))
if (engine == "gecko"):
if engine == "gecko":
project = "servo"
job_symbol = 'PLG'
group_symbol = 'SPG'
group_name = 'Servo Perf on Gecko'
job_symbol = "PLG"
group_symbol = "SPG"
group_name = "Servo Perf on Gecko"
else:
project = "servo"
job_symbol = 'PL'
group_symbol = 'SP'
group_name = 'Servo Perf'
job_symbol = "PL"
group_symbol = "SP"
group_name = "Servo Perf"
dataset = [
{
'project': project,
'revision': revision['commit'],
'job': {
'job_guid': job_guid,
'product_name': project,
'reason': 'scheduler',
"project": project,
"revision": revision["commit"],
"job": {
"job_guid": job_guid,
"product_name": project,
"reason": "scheduler",
# TODO: What is `who` for?
'who': 'Servo',
'desc': 'Servo Page Load Time Tests',
'name': 'Servo Page Load Time',
"who": "Servo",
"desc": "Servo Page Load Time Tests",
"name": "Servo Page Load Time",
# The symbol representing the job displayed in
# treeherder.allizom.org
'job_symbol': job_symbol,
"job_symbol": job_symbol,
# The symbol representing the job group in
# treeherder.allizom.org
'group_symbol': group_symbol,
'group_name': group_name,
"group_symbol": group_symbol,
"group_name": group_name,
# TODO: get the real timing from the test runner
'submit_timestamp': str(int(time.time())),
'start_timestamp': str(int(time.time())),
'end_timestamp': str(int(time.time())),
'state': 'completed',
'result': result, # "success" or "testfailed"
'machine': 'local-machine',
"submit_timestamp": str(int(time.time())),
"start_timestamp": str(int(time.time())),
"end_timestamp": str(int(time.time())),
"state": "completed",
"result": result, # "success" or "testfailed"
"machine": "local-machine",
# TODO: read platform from test result
'build_platform': {
'platform': 'linux64',
'os_name': 'linux',
'architecture': 'x86_64'
},
'machine_platform': {
'platform': 'linux64',
'os_name': 'linux',
'architecture': 'x86_64'
},
'option_collection': {'opt': True},
"build_platform": {"platform": "linux64", "os_name": "linux", "architecture": "x86_64"},
"machine_platform": {"platform": "linux64", "os_name": "linux", "architecture": "x86_64"},
"option_collection": {"opt": True},
# jobs can belong to different tiers
# setting the tier here will determine which tier the job
# belongs to. However, if a job is set as Tier of 1, but
# belongs to the Tier 2 profile on the server, it will still
# be saved as Tier 2.
'tier': 1,
"tier": 1,
# the ``name`` of the log can be the default of "buildbot_text"
# however, you can use a custom name. See below.
# TODO: point this to the log when we have them uploaded to S3
'log_references': [
{
'url': 'TBD',
'name': 'test log'
}
],
"log_references": [{"url": "TBD", "name": "test log"}],
# The artifact can contain any kind of structured data
# associated with a test.
'artifacts': [
"artifacts": [
{
'type': 'json',
'name': 'performance_data',
"type": "json",
"name": "performance_data",
# TODO: include the job_guid when the runner actually
# generates one
# 'job_guid': job_guid,
'blob': perf_data
"blob": perf_data,
},
{
'type': 'json',
'name': 'Job Info',
"type": "json",
"name": "Job Info",
# 'job_guid': job_guid,
"blob": {
"job_details": [
{
"content_type": "raw_html",
"title": "Result Summary",
"value": summary
}
]
}
}
"job_details": [{"content_type": "raw_html", "title": "Result Summary", "value": summary}]
},
},
],
# List of job guids that were coalesced to this job
'coalesced': []
}
"coalesced": [],
},
}
]
tjc = create_job_collection(dataset)
# TODO: extract this read credential code out of this function.
cred = {
'client_id': os.environ['TREEHERDER_CLIENT_ID'],
'secret': os.environ['TREEHERDER_CLIENT_SECRET']
}
cred = {"client_id": os.environ["TREEHERDER_CLIENT_ID"], "secret": os.environ["TREEHERDER_CLIENT_SECRET"]}
client = TreeherderClient(server_url='https://treeherder.mozilla.org',
client_id=cred['client_id'],
secret=cred['secret'])
client = TreeherderClient(
server_url="https://treeherder.mozilla.org", client_id=cred["client_id"], secret=cred["secret"]
)
# data structure validation is automatically performed here, if validation
# fails a TreeherderClientError is raised
client.post_collection('servo', trsc)
client.post_collection('servo', tjc)
client.post_collection("servo", trsc)
client.post_collection("servo", tjc)
def main():
parser = argparse.ArgumentParser(
description=("Submit Servo performance data to Perfherder. "
description=(
"Submit Servo performance data to Perfherder. "
"Remember to set your Treeherder credential as environment"
" variable \'TREEHERDER_CLIENT_ID\' and "
"\'TREEHERDER_CLIENT_SECRET\'"))
parser.add_argument("perf_json",
help="the output json from runner")
parser.add_argument("revision_json",
help="the json containing the servo revision data")
parser.add_argument("--engine",
" variable 'TREEHERDER_CLIENT_ID' and "
"'TREEHERDER_CLIENT_SECRET'"
)
)
parser.add_argument("perf_json", help="the output json from runner")
parser.add_argument("revision_json", help="the json containing the servo revision data")
parser.add_argument(
"--engine",
type=str,
default='servo',
help=("The engine to run the tests on. Currently only"
" servo and gecko are supported."))
default="servo",
help=("The engine to run the tests on. Currently only servo and gecko are supported."),
)
args = parser.parse_args()
with open(args.perf_json, 'r') as f:
with open(args.perf_json, "r") as f:
result_json = json.load(f)
with open(args.revision_json, 'r') as f:
with open(args.revision_json, "r") as f:
revision = json.load(f)
perf_data = format_perf_data(result_json, args.engine)
failures = list(filter(lambda x: x['domComplete'] == -1, result_json))
summary = format_result_summary(result_json).replace('\n', '<br/>')
failures = list(filter(lambda x: x["domComplete"] == -1, result_json))
summary = format_result_summary(result_json).replace("\n", "<br/>")
submit(perf_data, failures, revision, summary, args.engine)
print("Done!")

View file

@ -10,17 +10,16 @@ import boto3
def main():
parser = argparse.ArgumentParser(
description=("Submit Servo performance data to S3. "
"Remember to set your S3 credentials "
"https://github.com/boto/boto3"))
parser.add_argument("perf_file",
help="the output CSV file from runner")
parser.add_argument("perf_key",
help="the S3 key to upload to")
description=(
"Submit Servo performance data to S3. Remember to set your S3 credentials https://github.com/boto/boto3"
)
)
parser.add_argument("perf_file", help="the output CSV file from runner")
parser.add_argument("perf_key", help="the S3 key to upload to")
args = parser.parse_args()
s3 = boto3.client('s3')
BUCKET = 'servo-perf'
s3 = boto3.client("s3")
BUCKET = "servo-perf"
s3.upload_file(args.perf_file, BUCKET, args.perf_key)
print("Done!")

View file

@ -16,16 +16,16 @@ args = parser.parse_args()
def load_data(filename):
with open(filename, 'r') as f:
with open(filename, "r") as f:
results = {}
totals = {}
counts = {}
records = json.load(f)
for record in records:
key = record.get('testcase')
value = record.get('domComplete') - record.get('domLoading')
totals[key] = totals.get('key', 0) + value
counts[key] = counts.get('key', 0) + 1
key = record.get("testcase")
value = record.get("domComplete") - record.get("domLoading")
totals[key] = totals.get("key", 0) + value
counts[key] = counts.get("key", 0) + 1
results[key] = round(totals[key] / counts[key])
return results
@ -34,10 +34,10 @@ data1 = load_data(args.file1)
data2 = load_data(args.file2)
keys = set(data1.keys()).union(data2.keys())
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
END = '\033[0m'
BLUE = "\033[94m"
GREEN = "\033[92m"
WARNING = "\033[93m"
END = "\033[0m"
total1 = 0

View file

@ -10,7 +10,7 @@ import pytest
def test_log_parser():
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
mock_log = b'''
mock_log = b"""
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
[PERF],navigationStart,1460358376
@ -36,9 +36,10 @@ def test_log_parser():
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
"""
expected = [{
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
@ -60,14 +61,15 @@ Shutting down the Constellation after generating an output file or exit flag spe
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}]
"loadEventEnd": None,
}
]
result = runner.parse_log(mock_log, mock_url)
assert (expected == list(result))
assert expected == list(result)
def test_log_parser_complex():
mock_log = b'''
mock_log = b"""
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/content.html
[PERF],navigationStart,1460358300
@ -119,9 +121,10 @@ Some other js error logs here
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
"""
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
@ -143,14 +146,15 @@ Shutting down the Constellation after generating an output file or exit flag spe
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}]
"loadEventEnd": None,
}
]
result = runner.parse_log(mock_log, mock_url)
assert (expected == list(result))
assert expected == list(result)
def test_log_parser_empty():
mock_log = b'''
mock_log = b"""
[PERF] perf block start
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
@ -158,10 +162,11 @@ def test_log_parser_empty():
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF] perf block end
'''
"""
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"title": "",
"navigationStart": 0,
@ -184,17 +189,19 @@ def test_log_parser_empty():
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
"loadEventEnd": -1,
}
]
result = runner.parse_log(mock_log, mock_testcase)
assert (expected == list(result))
assert expected == list(result)
def test_log_parser_error():
mock_log = b'Nothing here! Test failed!'
mock_log = b"Nothing here! Test failed!"
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"title": "",
"navigationStart": 0,
@ -217,16 +224,17 @@ def test_log_parser_error():
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
"loadEventEnd": -1,
}
]
result = runner.parse_log(mock_log, mock_testcase)
assert (expected == list(result))
assert expected == list(result)
def test_log_parser_bad_testcase_name():
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
# Notice the testcase is about:blank, servo crashed
mock_log = b'''
mock_log = b"""
[PERF] perf block start
[PERF],testcase,about:blank
[PERF],navigationStart,1460358376
@ -252,9 +260,10 @@ def test_log_parser_bad_testcase_name():
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
"""
expected = [{
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"title": "",
"navigationStart": 0,
@ -277,157 +286,170 @@ Shutting down the Constellation after generating an output file or exit flag spe
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
"loadEventEnd": -1,
}
]
result = runner.parse_log(mock_log, mock_testcase)
assert (expected == list(result))
assert expected == list(result)
def test_manifest_loader():
text = '''
text = """
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
# Disabled! http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
'''
"""
expected = [
("http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html", False),
("http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html", False),
("http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html", False)
("http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html", False),
]
assert (expected == list(runner.parse_manifest(text)))
assert expected == list(runner.parse_manifest(text))
def test_manifest_loader_async():
text = '''
text = """
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
async http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
'''
"""
expected = [
("http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html", False),
("http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html", True),
]
assert (expected == list(runner.parse_manifest(text)))
assert expected == list(runner.parse_manifest(text))
def test_filter_result_by_manifest():
input_json = [{
input_json = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
"domComplete": 1460358389000,
}, {
},
{
"testcase": "non-existing-html",
"domComplete": 1460358389000,
}, {
},
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389000,
}]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389000,
}]
manifest = [
("http://localhost:8000/page_load_test/56.com/www.56.com/index.html", False)
},
]
assert (expected == runner.filter_result_by_manifest(input_json, manifest))
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389000,
}
]
manifest = [("http://localhost:8000/page_load_test/56.com/www.56.com/index.html", False)]
assert expected == runner.filter_result_by_manifest(input_json, manifest)
def test_filter_result_by_manifest_error():
input_json = [{
input_json = [
{
"testcase": "1.html",
"domComplete": 1460358389000,
}]
manifest = [
("1.html", False),
("2.html", False)
}
]
manifest = [("1.html", False), ("2.html", False)]
with pytest.raises(Exception) as execinfo:
runner.filter_result_by_manifest(input_json, manifest)
assert "Missing test result" in str(execinfo.value)
def test_take_result_median_odd():
input_json = [{
input_json = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001,
"domLoading": 1460358380002
}, {
"domLoading": 1460358380002,
},
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}, {
"domLoading": 1460358380001,
},
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389003,
"domLoading": 1460358380003
}]
"domLoading": 1460358380003,
},
]
expected = [{
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380002
}]
"domLoading": 1460358380002,
}
]
assert (expected == runner.take_result_median(input_json, len(input_json)))
assert expected == runner.take_result_median(input_json, len(input_json))
def test_take_result_median_even():
input_json = [{
input_json = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001,
"domLoading": 1460358380002
}, {
"domLoading": 1460358380002,
},
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}]
"domLoading": 1460358380001,
},
]
expected = [{
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001.5,
"domLoading": 1460358380001.5
}]
"domLoading": 1460358380001.5,
}
]
assert (expected == runner.take_result_median(input_json, len(input_json)))
assert expected == runner.take_result_median(input_json, len(input_json))
def test_take_result_median_error():
input_json = [{
input_json = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": None,
"domLoading": 1460358380002
}, {
"domLoading": 1460358380002,
},
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}]
"domLoading": 1460358380001,
},
]
expected = [{
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001.5
}]
"domLoading": 1460358380001.5,
}
]
assert (expected == runner.take_result_median(input_json, len(input_json)))
assert expected == runner.take_result_median(input_json, len(input_json))
def test_log_result():
results = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": -1
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": -1
}, {
"testcase": "http://localhost:8000/page_load_test/104.com/www.104.com/index.html",
"domComplete": 123456789
}]
results = [
{"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html", "domComplete": -1},
{"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html", "domComplete": -1},
{"testcase": "http://localhost:8000/page_load_test/104.com/www.104.com/index.html", "domComplete": 123456789},
]
expected = """
========================================
@ -437,4 +459,4 @@ Failure summary:
- http://localhost:8000/page_load_test/56.com/www.56.com/index.html
========================================
"""
assert (expected == runner.format_result_summary(results))
assert expected == runner.format_result_summary(results)

View file

@ -8,18 +8,18 @@ import submit_to_perfherder
def test_format_testcase_name():
assert ('about:blank' == submit_to_perfherder.format_testcase_name(
'about:blank'))
assert ('163.com' == submit_to_perfherder.format_testcase_name((
'http://localhost:8000/page_load_test/163.com/p.mail.163.com/'
'mailinfo/shownewmsg_www_1222.htm.html')))
assert (('1234567890223456789032345678904234567890'
'5234567890623456789072345678908234567890')
== submit_to_perfherder.format_testcase_name((
'1234567890223456789032345678904234567890'
'52345678906234567890723456789082345678909234567890')))
assert ('news.ycombinator.com' == submit_to_perfherder.format_testcase_name(
'http://localhost:8000/tp6/news.ycombinator.com/index.html'))
assert "about:blank" == submit_to_perfherder.format_testcase_name("about:blank")
assert "163.com" == submit_to_perfherder.format_testcase_name(
("http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html")
)
assert (
"12345678902234567890323456789042345678905234567890623456789072345678908234567890"
) == submit_to_perfherder.format_testcase_name(
("123456789022345678903234567890423456789052345678906234567890723456789082345678909234567890")
)
assert "news.ycombinator.com" == submit_to_perfherder.format_testcase_name(
"http://localhost:8000/tp6/news.ycombinator.com/index.html"
)
def test_format_perf_data():
@ -46,7 +46,7 @@ def test_format_perf_data():
"unloadEventEnd": None,
"responseEnd": None,
"testcase": "about:blank",
"domComplete": 1460444931000
"domComplete": 1460444931000,
},
{
"unloadEventStart": None,
@ -69,11 +69,11 @@ def test_format_perf_data():
"domainLookupEnd": None,
"unloadEventEnd": None,
"responseEnd": None,
"testcase": ("http://localhost:8000/page_load_test/163.com/"
"p.mail.163.com/mailinfo/"
"shownewmsg_www_1222.htm.html"),
"domComplete": 1460444948000
}
"testcase": (
"http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html"
),
"domComplete": 1460444948000,
},
]
expected = {
@ -84,33 +84,27 @@ def test_format_perf_data():
"name": "domComplete",
"value": 3741.657386773941,
"subtests": [
{"name": "about:blank",
"value": 1000},
{"name": "163.com",
"value": 14000},
]
{"name": "about:blank", "value": 1000},
{"name": "163.com", "value": 14000},
],
}
]
],
}
}
result = submit_to_perfherder.format_perf_data(mock_result)
assert (expected == result)
assert expected == result
def test_format_bad_perf_data():
mock_result = [
{
"navigationStart": 1460444930000,
"testcase": "about:blank",
"domComplete": 0
},
{"navigationStart": 1460444930000, "testcase": "about:blank", "domComplete": 0},
{
"navigationStart": 1460444934000,
"testcase": ("http://localhost:8000/page_load_test/163.com/"
"p.mail.163.com/mailinfo/"
"shownewmsg_www_1222.htm.html"),
"domComplete": 1460444948000
}
"testcase": (
"http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html"
),
"domComplete": 1460444948000,
},
]
expected = {
@ -121,14 +115,12 @@ def test_format_bad_perf_data():
"name": "domComplete",
"value": 14000.0,
"subtests": [
{"name": "about:blank",
"value": -1}, # Timeout
{"name": "163.com",
"value": 14000},
]
{"name": "about:blank", "value": -1}, # Timeout
{"name": "163.com", "value": 14000},
],
}
]
],
}
}
result = submit_to_perfherder.format_perf_data(mock_result)
assert (expected == result)
assert expected == result

View file

@ -37,7 +37,7 @@ class Item:
def from_result(cls, result: dict, title: Optional[str] = None, print_stack=True):
expected = result["expected"]
actual = result["actual"]
title = title if title else f'`{result["path"]}`'
title = title if title else f"`{result['path']}`"
if expected != actual:
title = f"{actual} [expected {expected}] {title}"
else:
@ -45,8 +45,7 @@ class Item:
issue_url = "http://github.com/servo/servo/issues/"
if "issues" in result and result["issues"]:
issues = ", ".join([f"[#{issue}]({issue_url}{issue})"
for issue in result["issues"]])
issues = ", ".join([f"[#{issue}]({issue_url}{issue})" for issue in result["issues"]])
title += f" ({issues})"
stack = result["stack"] if result["stack"] and print_stack else ""
@ -59,8 +58,9 @@ class Item:
cls.from_result(
subtest_result,
f"subtest: `{subtest_result['subtest']}`"
+ (f" \n```\n{subtest_result['message']}\n```\n" if subtest_result['message'] else ""),
False)
+ (f" \n```\n{subtest_result['message']}\n```\n" if subtest_result["message"] else ""),
False,
)
for subtest_result in subtest_results
]
return cls(title, body, children)
@ -68,10 +68,8 @@ class Item:
def to_string(self, bullet: str = "", indent: str = ""):
output = f"{indent}{bullet}{self.title}\n"
if self.body:
output += textwrap.indent(f"{self.body}\n",
" " * len(indent + bullet))
output += "\n".join([child.to_string("", indent + " ")
for child in self.children])
output += textwrap.indent(f"{self.body}\n", " " * len(indent + bullet))
output += "\n".join([child.to_string("", indent + " ") for child in self.children])
return output.rstrip().replace("`", "")
def to_html(self, level: int = 0) -> ElementTree.Element:
@ -88,17 +86,13 @@ class Item:
if self.children:
# Some tests have dozens of failing tests, which overwhelm the
# output. Limit the output for subtests in GitHub comment output.
max_children = len(
self.children) if level < 2 else SUBTEST_RESULT_TRUNCATION
max_children = len(self.children) if level < 2 else SUBTEST_RESULT_TRUNCATION
if len(self.children) > max_children:
children = self.children[:max_children]
children.append(Item(
f"And {len(self.children) - max_children} more unexpected results...",
"", []))
children.append(Item(f"And {len(self.children) - max_children} more unexpected results...", "", []))
else:
children = self.children
container = ElementTree.SubElement(
result, "div" if not level else "ul")
container = ElementTree.SubElement(result, "div" if not level else "ul")
for child in children:
container.append(child.to_html(level + 1))
@ -125,17 +119,16 @@ def get_results(filenames: list[str], tag: str = "") -> Optional[Item]:
return not is_flaky(result) and not result["issues"]
def add_children(children: List[Item], results: List[dict], filter_func, text):
filtered = [Item.from_result(result) for result in
filter(filter_func, results)]
filtered = [Item.from_result(result) for result in filter(filter_func, results)]
if filtered:
children.append(Item(f"{text} ({len(filtered)})", "", filtered))
children: List[Item] = []
add_children(children, unexpected, is_flaky, "Flaky unexpected result")
add_children(children, unexpected, is_stable_and_known,
"Stable unexpected results that are known to be intermittent")
add_children(children, unexpected, is_stable_and_unexpected,
"Stable unexpected results")
add_children(
children, unexpected, is_stable_and_known, "Stable unexpected results that are known to be intermittent"
)
add_children(children, unexpected, is_stable_and_unexpected, "Stable unexpected results")
run_url = get_github_run_url()
text = "Test results"
@ -154,8 +147,8 @@ def get_github_run_url() -> Optional[str]:
return None
if "run_id" not in github_context:
return None
repository = github_context['repository']
run_id = github_context['run_id']
repository = github_context["repository"]
run_id = github_context["run_id"]
return f"[#{run_id}](https://github.com/{repository}/actions/runs/{run_id})"
@ -197,14 +190,14 @@ def create_github_reports(body: str, tag: str = ""):
# This process is based on the documentation here:
# https://docs.github.com/en/rest/checks/runs?apiVersion=2022-11-28#create-a-check-runs
results = json.loads(os.environ.get("RESULTS", "{}"))
if all(r == 'success' for r in results):
conclusion = 'success'
if all(r == "success" for r in results):
conclusion = "success"
elif "failure" in results:
conclusion = 'failure'
conclusion = "failure"
elif "cancelled" in results:
conclusion = 'cancelled'
conclusion = "cancelled"
else:
conclusion = 'neutral'
conclusion = "neutral"
github_token = os.environ.get("GITHUB_TOKEN")
github_context = json.loads(os.environ.get("GITHUB_CONTEXT", "{}"))
@ -214,34 +207,42 @@ def create_github_reports(body: str, tag: str = ""):
return None
repo = github_context["repository"]
data = {
'name': tag,
'head_sha': github_context["sha"],
'status': 'completed',
'started_at': datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
'conclusion': conclusion,
'completed_at': datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
'output': {
'title': f'Aggregated {tag} report',
'summary': body,
'images': [{'alt': 'WPT logo', 'image_url': 'https://avatars.githubusercontent.com/u/37226233'}]
"name": tag,
"head_sha": github_context["sha"],
"status": "completed",
"started_at": datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
"conclusion": conclusion,
"completed_at": datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
"output": {
"title": f"Aggregated {tag} report",
"summary": body,
"images": [{"alt": "WPT logo", "image_url": "https://avatars.githubusercontent.com/u/37226233"}],
},
'actions': [
]
"actions": [],
}
subprocess.Popen(["curl", "-L",
"-X", "POST",
"-H", "Accept: application/vnd.github+json",
"-H", f"Authorization: Bearer {github_token}",
"-H", "X-GitHub-Api-Version: 2022-11-28",
subprocess.Popen(
[
"curl",
"-L",
"-X",
"POST",
"-H",
"Accept: application/vnd.github+json",
"-H",
f"Authorization: Bearer {github_token}",
"-H",
"X-GitHub-Api-Version: 2022-11-28",
f"https://api.github.com/repos/{repo}/check-runs",
"-d", json.dumps(data)]).wait()
"-d",
json.dumps(data),
]
).wait()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--tag", default="wpt", action="store",
help="A string tag used to distinguish the results.")
parser.add_argument("--tag", default="wpt", action="store", help="A string tag used to distinguish the results.")
args, filenames = parser.parse_known_args()
results = get_results(filenames, args.tag)
if not results:
@ -251,14 +252,12 @@ def main():
print(results.to_string())
html_string = ElementTree.tostring(
results.to_html(), encoding="unicode")
html_string = ElementTree.tostring(results.to_html(), encoding="unicode")
create_github_reports(html_string, args.tag)
pr_number = get_pr_number()
if pr_number:
process = subprocess.Popen(
['gh', 'pr', 'comment', pr_number, '-F', '-'], stdin=subprocess.PIPE)
process = subprocess.Popen(["gh", "pr", "comment", pr_number, "-F", "-"], stdin=subprocess.PIPE)
print(process.communicate(input=html_string.encode("utf-8"))[0])
else:
print("Could not find PR number in environment. Not making GitHub comment.")

View file

@ -35,6 +35,7 @@ def main(crate=None):
for dependency in graph.get(name, []):
filtered.setdefault(name, []).append(dependency)
traverse(dependency)
traverse(crate)
else:
filtered = graph

View file

@ -42,12 +42,15 @@ import signal
import sys
from argparse import ArgumentParser
from subprocess import Popen, PIPE
try:
from termcolor import colored
except ImportError:
def colored(text, *args, **kwargs):
return text
fields = ["frame.time", "tcp.srcport", "tcp.payload"]
@ -57,10 +60,14 @@ def record_data(file, port):
# Create tshark command
cmd = [
"tshark",
"-T", "fields",
"-i", "lo",
"-d", f"tcp.port=={port},http",
"-w", file,
"-T",
"fields",
"-i",
"lo",
"-d",
f"tcp.port=={port},http",
"-w",
file,
] + [e for f in fields for e in ("-e", f)]
process = Popen(cmd, stdout=PIPE)
@ -84,8 +91,10 @@ def read_data(file):
# Create tshark command
cmd = [
"tshark",
"-T", "fields",
"-r", file,
"-T",
"fields",
"-r",
file,
] + [e for f in fields for e in ("-e", f)]
process = Popen(cmd, stdout=PIPE)
@ -182,7 +191,7 @@ def parse_message(msg, *, json_output=False):
time, sender, i, data = msg
from_servo = sender == "Servo"
colored_sender = colored(sender, 'black', 'on_yellow' if from_servo else 'on_magenta', attrs=['bold'])
colored_sender = colored(sender, "black", "on_yellow" if from_servo else "on_magenta", attrs=["bold"])
if not json_output:
print(f"\n{colored_sender} - {colored(i, 'blue')} - {colored(time, 'dark_grey')}")
@ -199,7 +208,7 @@ def parse_message(msg, *, json_output=False):
assert False, "Message is neither a request nor a response"
else:
if from_servo and "from" in content:
print(colored(f"Actor: {content['from']}", 'yellow'))
print(colored(f"Actor: {content['from']}", "yellow"))
print(json.dumps(content, sort_keys=True, indent=4))
except json.JSONDecodeError:
print(f"Warning: Couldn't decode json\n{data}")
@ -236,7 +245,7 @@ if __name__ == "__main__":
if args.range and len(args.range.split(":")) == 2:
min, max = args.range.split(":")
for msg in data[int(min):int(max) + 1]:
for msg in data[int(min) : int(max) + 1]:
# Filter the messages if specified
if not args.filter or args.filter.lower() in msg[3].lower():
parse_message(msg, json_output=args.json)

View file

@ -21,14 +21,14 @@ def extract_memory_reports(lines):
report_lines = []
times = []
for line in lines:
if line.startswith('Begin memory reports'):
if line.startswith("Begin memory reports"):
in_report = True
report_lines += [[]]
times += [line.strip().split()[-1]]
elif line == 'End memory reports\n':
elif line == "End memory reports\n":
in_report = False
elif in_report:
if line.startswith('|'):
if line.startswith("|"):
report_lines[-1].append(line.strip())
return (report_lines, times)
@ -38,11 +38,11 @@ def parse_memory_report(lines):
parents = []
last_separator_index = None
for line in lines:
assert (line[0] == '|')
assert line[0] == "|"
line = line[1:]
if not line:
continue
separator_index = line.index('--')
separator_index = line.index("--")
if last_separator_index and separator_index <= last_separator_index:
while parents and parents[-1][1] >= separator_index:
parents.pop()
@ -50,13 +50,9 @@ def parse_memory_report(lines):
amount, unit, _, name = line.split()
dest_report = reports
for (parent, index) in parents:
dest_report = dest_report[parent]['children']
dest_report[name] = {
'amount': amount,
'unit': unit,
'children': {}
}
for parent, index in parents:
dest_report = dest_report[parent]["children"]
dest_report[name] = {"amount": amount, "unit": unit, "children": {}}
parents += [(name, separator_index)]
last_separator_index = separator_index
@ -68,24 +64,26 @@ def transform_report_for_test(report):
remaining = list(report.items())
while remaining:
(name, value) = remaining.pop()
transformed[name] = '%s %s' % (value['amount'], value['unit'])
remaining += map(lambda k_v: (name + '/' + k_v[0], k_v[1]), list(value['children'].items()))
transformed[name] = "%s %s" % (value["amount"], value["unit"])
remaining += map(lambda k_v: (name + "/" + k_v[0], k_v[1]), list(value["children"].items()))
return transformed
def test_extract_memory_reports():
input = ["Begin memory reports",
input = [
"Begin memory reports",
"|",
" 154.56 MiB -- explicit\n",
"| 107.88 MiB -- system-heap-unclassified\n",
"End memory reports\n"]
expected = ([['|', '| 107.88 MiB -- system-heap-unclassified']], ['reports'])
assert (extract_memory_reports(input) == expected)
"End memory reports\n",
]
expected = ([["|", "| 107.88 MiB -- system-heap-unclassified"]], ["reports"])
assert extract_memory_reports(input) == expected
return 0
def test():
input = '''|
input = """|
| 23.89 MiB -- explicit
| 21.35 MiB -- jemalloc-heap-unclassified
| 2.54 MiB -- url(https://servo.org/)
@ -97,33 +95,33 @@ def test():
| 0.27 MiB -- stylist
| 0.12 MiB -- dom-tree
|
| 25.18 MiB -- jemalloc-heap-active'''
| 25.18 MiB -- jemalloc-heap-active"""
expected = {
'explicit': '23.89 MiB',
'explicit/jemalloc-heap-unclassified': '21.35 MiB',
'explicit/url(https://servo.org/)': '2.54 MiB',
'explicit/url(https://servo.org/)/js': '2.16 MiB',
'explicit/url(https://servo.org/)/js/gc-heap': '1.00 MiB',
'explicit/url(https://servo.org/)/js/gc-heap/decommitted': '0.77 MiB',
'explicit/url(https://servo.org/)/js/non-heap': '1.00 MiB',
'explicit/url(https://servo.org/)/layout-thread': '0.27 MiB',
'explicit/url(https://servo.org/)/layout-thread/stylist': '0.27 MiB',
'explicit/url(https://servo.org/)/dom-tree': '0.12 MiB',
'jemalloc-heap-active': '25.18 MiB',
"explicit": "23.89 MiB",
"explicit/jemalloc-heap-unclassified": "21.35 MiB",
"explicit/url(https://servo.org/)": "2.54 MiB",
"explicit/url(https://servo.org/)/js": "2.16 MiB",
"explicit/url(https://servo.org/)/js/gc-heap": "1.00 MiB",
"explicit/url(https://servo.org/)/js/gc-heap/decommitted": "0.77 MiB",
"explicit/url(https://servo.org/)/js/non-heap": "1.00 MiB",
"explicit/url(https://servo.org/)/layout-thread": "0.27 MiB",
"explicit/url(https://servo.org/)/layout-thread/stylist": "0.27 MiB",
"explicit/url(https://servo.org/)/dom-tree": "0.12 MiB",
"jemalloc-heap-active": "25.18 MiB",
}
report = parse_memory_report(input.split('\n'))
report = parse_memory_report(input.split("\n"))
transformed = transform_report_for_test(report)
assert (sorted(transformed.keys()) == sorted(expected.keys()))
assert sorted(transformed.keys()) == sorted(expected.keys())
for k, v in transformed.items():
assert (v == expected[k])
assert v == expected[k]
test_extract_memory_reports()
return 0
def usage():
print('%s --test - run automated tests' % sys.argv[0])
print('%s file - extract all memory reports that are present in file' % sys.argv[0])
print("%s --test - run automated tests" % sys.argv[0])
print("%s file - extract all memory reports that are present in file" % sys.argv[0])
return 1
@ -131,19 +129,19 @@ if __name__ == "__main__":
if len(sys.argv) == 1:
sys.exit(usage())
if sys.argv[1] == '--test':
if sys.argv[1] == "--test":
sys.exit(test())
with open(sys.argv[1]) as f:
lines = f.readlines()
(reports, times) = extract_memory_reports(lines)
json_reports = []
for (report_lines, seconds) in zip(reports, times):
for report_lines, seconds in zip(reports, times):
report = parse_memory_report(report_lines)
json_reports += [{'seconds': seconds, 'report': report}]
json_reports += [{"seconds": seconds, "report": report}]
with tempfile.NamedTemporaryFile(delete=False) as output:
thisdir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(thisdir, 'memory_chart.html')) as template:
with open(os.path.join(thisdir, "memory_chart.html")) as template:
content = template.read()
output.write(content.replace('[/* json data */]', json.dumps(json_reports)))
webbrowser.open_new_tab('file://' + output.name)
output.write(content.replace("[/* json data */]", json.dumps(json_reports)))
webbrowser.open_new_tab("file://" + output.name)

View file

@ -31,7 +31,7 @@ Example:
""")
sys.exit(0)
rust_source = open(sys.argv[1], 'r')
rust_source = open(sys.argv[1], "r")
lines = iter(rust_source)
for line in lines:
if line.lstrip().startswith("pub enum ProfilerCategory"):
@ -53,21 +53,21 @@ plist = ElementTree.ElementTree(ElementTree.fromstring(xml))
elems = iter(plist.findall("./dict/*"))
for elem in elems:
if elem.tag != 'key' or elem.text != '$objects':
if elem.tag != "key" or elem.text != "$objects":
continue
array = elems.next()
break
elems = iter(array.findall("./*"))
for elem in elems:
if elem.tag != 'string' or elem.text != 'kdebugIntervalRule':
if elem.tag != "string" or elem.text != "kdebugIntervalRule":
continue
dictionary = elems.next()
break
elems = iter(dictionary.findall("./*"))
for elem in elems:
if elem.tag != 'key' or elem.text != 'NS.objects':
if elem.tag != "key" or elem.text != "NS.objects":
continue
objects_array = elems.next()
break
@ -76,33 +76,33 @@ child_count = sum(1 for _ in iter(array.findall("./*")))
for code_pair in code_pairs:
number_index = child_count
integer = Element('integer')
integer = Element("integer")
integer.text = str(int(code_pair[0], 0))
array.append(integer)
child_count += 1
string_index = child_count
string = Element('string')
string = Element("string")
string.text = code_pair[1]
array.append(string)
child_count += 1
dictionary = Element('dict')
key = Element('key')
dictionary = Element("dict")
key = Element("key")
key.text = "CF$UID"
dictionary.append(key)
integer = Element('integer')
integer = Element("integer")
integer.text = str(number_index)
dictionary.append(integer)
objects_array.append(dictionary)
dictionary = Element('dict')
key = Element('key')
dictionary = Element("dict")
key = Element("key")
key.text = "CF$UID"
dictionary.append(key)
integer = Element('integer')
integer = Element("integer")
integer.text = str(string_index)
dictionary.append(integer)
objects_array.append(dictionary)
plist.write(sys.stdout, encoding='utf-8', xml_declaration=True)
plist.write(sys.stdout, encoding="utf-8", xml_declaration=True)

View file

@ -53,17 +53,17 @@ stacks = {}
thread_data = defaultdict(list)
thread_order = {}
for sample in samples:
if sample['name']:
name = sample['name']
if sample["name"]:
name = sample["name"]
else:
name = "%s %d %d" % (sample['type'], sample['namespace'], sample['index'])
thread_data[name].append((sample['time'], sample['frames']))
name = "%s %d %d" % (sample["type"], sample["namespace"], sample["index"])
thread_data[name].append((sample["time"], sample["frames"]))
if name not in thread_order:
thread_order[name] = (sample['namespace'], sample['index'])
thread_order[name] = (sample["namespace"], sample["index"])
tid = 0
threads = []
for (name, raw_samples) in sorted(iter(thread_data.items()), key=lambda x: thread_order[x[0]]):
for name, raw_samples in sorted(iter(thread_data.items()), key=lambda x: thread_order[x[0]]):
string_table = StringTable()
tid += 1
@ -77,13 +77,13 @@ for (name, raw_samples) in sorted(iter(thread_data.items()), key=lambda x: threa
for sample in raw_samples:
prefix = None
for frame in sample[1]:
if not frame['name']:
if not frame["name"]:
continue
if frame['name'] not in frameMap:
frameMap[frame['name']] = len(frames)
frame_index = string_table.get(frame['name'])
if frame["name"] not in frameMap:
frameMap[frame["name"]] = len(frames)
frame_index = string_table.get(frame["name"])
frames.append([frame_index])
frame = frameMap[frame['name']]
frame = frameMap[frame["name"]]
stack_key = "%d,%d" % (frame, prefix) if prefix else str(frame)
if stack_key not in stackMap:
@ -93,61 +93,63 @@ for (name, raw_samples) in sorted(iter(thread_data.items()), key=lambda x: threa
prefix = stack
samples.append([stack, sample[0]])
threads.append({
'tid': tid,
'name': name,
'markers': {
'schema': {
'name': 0,
'time': 1,
'data': 2,
threads.append(
{
"tid": tid,
"name": name,
"markers": {
"schema": {
"name": 0,
"time": 1,
"data": 2,
},
'data': [],
"data": [],
},
'samples': {
'schema': {
'stack': 0,
'time': 1,
'responsiveness': 2,
'rss': 2,
'uss': 4,
'frameNumber': 5,
"samples": {
"schema": {
"stack": 0,
"time": 1,
"responsiveness": 2,
"rss": 2,
"uss": 4,
"frameNumber": 5,
},
'data': samples,
"data": samples,
},
'frameTable': {
'schema': {
'location': 0,
'implementation': 1,
'optimizations': 2,
'line': 3,
'category': 4,
"frameTable": {
"schema": {
"location": 0,
"implementation": 1,
"optimizations": 2,
"line": 3,
"category": 4,
},
'data': frames,
"data": frames,
},
'stackTable': {
'schema': {
'frame': 0,
'prefix': 1,
"stackTable": {
"schema": {
"frame": 0,
"prefix": 1,
},
'data': stacks,
"data": stacks,
},
'stringTable': string_table.contents(),
})
"stringTable": string_table.contents(),
}
)
output = {
'meta': {
'interval': rate,
'processType': 0,
'product': 'Servo',
'stackwalk': 1,
'startTime': startTime,
'version': 4,
'presymbolicated': True,
"meta": {
"interval": rate,
"processType": 0,
"product": "Servo",
"stackwalk": 1,
"startTime": startTime,
"version": 4,
"presymbolicated": True,
},
'libs': [],
'threads': threads,
"libs": [],
"threads": threads,
}
print(json.dumps(output))

View file

@ -27,8 +27,10 @@ def main(avd_name, apk_path, *args):
"-no-window",
"-no-snapshot",
"-no-snapstorage",
"-gpu", "guest",
"-port", emulator_port,
"-gpu",
"guest",
"-port",
emulator_port,
]
with terminate_on_exit(emulator_args, stdout=sys.stderr) as emulator_process:
# This is hopefully enough time for the emulator to exit
@ -70,7 +72,6 @@ def main(avd_name, apk_path, *args):
"*:S", # Hide everything else
]
with terminate_on_exit(adb + ["logcat"] + logcat_args) as logcat:
# This step needs to happen after application start
forward_webdriver(adb, args)
@ -84,8 +85,7 @@ def tool_path(directory, bin_name):
if os.path.exists(path):
return path
path = os.path.join(os.path.dirname(__file__), "..", "android-toolchains", "sdk",
directory, bin_name)
path = os.path.join(os.path.dirname(__file__), "..", "android-toolchains", "sdk", directory, bin_name)
if os.path.exists(path):
return path
@ -207,8 +207,7 @@ def interrupt(_signum, _frame):
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: %s avd_name apk_path [servo args...]" % sys.argv[0])
print("Example: %s servo-x86 target/i686-linux-android/release/servo.apk https://servo.org"
% sys.argv[0])
print("Example: %s servo-x86 target/i686-linux-android/release/servo.apk https://servo.org" % sys.argv[0])
sys.exit(1)
try:

View file

@ -29,16 +29,16 @@ import getopt
def print_help():
print('\nPlease enter the command as shown below: \n')
print('python3 ./etc/servo_automation_screenshot.py -p <port>'
+ ' -i /path/to/folder/containing/files -r <resolution>'
+ ' -n num_of_files\n')
print("\nPlease enter the command as shown below: \n")
print(
"python3 ./etc/servo_automation_screenshot.py -p <port>"
+ " -i /path/to/folder/containing/files -r <resolution>"
+ " -n num_of_files\n"
)
def servo_ready_to_accept(url, payload, headers):
while (True):
while True:
try:
# Before sending an additional request, we wait for one second each time
time.sleep(1)
@ -48,45 +48,46 @@ def servo_ready_to_accept(url, payload, headers):
break
except Exception as e:
time.sleep(5)
print('Exception: ', e)
print("Exception: ", e)
return json_string
def ensure_screenshots_directory_exists():
if not os.path.exists('screenshots'):
os.makedirs('screenshots')
if not os.path.exists("screenshots"):
os.makedirs("screenshots")
def render_html_files(num_of_files, url, file_url, json_string, headers, cwd):
for x in range(num_of_files):
json_data = {}
json_data['url'] = 'file://{0}file{1}.html'.format(file_url, str(x))
print(json_data['url'])
json_data["url"] = "file://{0}file{1}.html".format(file_url, str(x))
print(json_data["url"])
json_data = json.dumps(json_data)
requests.post('{0}/{1}/url'.format(url, json_string['value']['sessionId']), data=json_data, headers=headers)
screenshot_request = requests.get('{0}/{1}/screenshot'.format(url, json_string['value']['sessionId']))
image_data_encoded = screenshot_request.json()['value']
requests.post("{0}/{1}/url".format(url, json_string["value"]["sessionId"]), data=json_data, headers=headers)
screenshot_request = requests.get("{0}/{1}/screenshot".format(url, json_string["value"]["sessionId"]))
image_data_encoded = screenshot_request.json()["value"]
with open("screenshots/output_image_{0}.png".format(str(x)), "wb") as image_file:
image_file.write(base64.decodebytes(image_data_encoded.encode('utf-8')))
image_file.write(base64.decodebytes(image_data_encoded.encode("utf-8")))
print("################################")
print("The screenshot is stored in the location: {0}/screenshots/"
" with filename: output_image_{1}.png".format(cwd, str(x)))
print(
"The screenshot is stored in the location: {0}/screenshots/ with filename: output_image_{1}.png".format(
cwd, str(x)
)
)
print("################################")
def main(argv): # take inputs from command line by considering the options parameter i.e -h, -p etc.
# Local Variables
port = ''
resolution = ''
file_url = ''
num_of_files = ''
port = ""
resolution = ""
file_url = ""
num_of_files = ""
cwd = os.getcwd()
url = ''
url = ""
payload = "{}"
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
json_string = ''
headers = {"content-type": "application/json", "Accept-Charset": "UTF-8"}
json_string = ""
try:
# input options defined here.
opts, args = getopt.getopt(argv, "p:i:r:n:", ["port=", "ifile=", "resolution=", "num-files="])
@ -96,7 +97,7 @@ def main(argv): # take inputs from command line by considering the options para
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h': # -h means help. Displays how to input command line arguments
if opt == "-h": # -h means help. Displays how to input command line arguments
print_help()
sys.exit()
elif opt in ("-p", "--port"): # store the value provided with the option -p in port variable.
@ -108,7 +109,7 @@ def main(argv): # take inputs from command line by considering the options para
elif opt in ("-n", "--num-files"): # store the value provided with the option -n in num_of_files variable.
num_of_files = arg
url = 'http://localhost:{0}/session'.format(port)
url = "http://localhost:{0}/session".format(port)
num_of_files = int(num_of_files)
# Starting servo on specified port

View file

@ -68,7 +68,7 @@ class TrustedNodeAddressPrinter:
def children(self):
node_type = gdb.lookup_type("struct script::dom::node::Node").pointer()
value = self.val.cast(node_type)
return [('Node', value)]
return [("Node", value)]
def to_string(self):
return self.val.address
@ -83,7 +83,7 @@ class NodeTypeIdPrinter:
u8_ptr_type = gdb.lookup_type("u8").pointer()
enum_0 = self.val.address.cast(u8_ptr_type).dereference()
enum_type = self.val.type.fields()[int(enum_0)].type
return str(enum_type).lstrip('struct ')
return str(enum_type).lstrip("struct ")
# Printer for std::Option<>
@ -113,8 +113,8 @@ class OptionPrinter:
value_type = option_type.fields()[1].type.fields()[1].type
v_size = value_type.sizeof
data_ptr = (ptr + t_size - v_size).cast(value_type.pointer()).dereference()
return [('Some', data_ptr)]
return [('None', None)]
return [("Some", data_ptr)]
return [("None", None)]
def to_string(self):
return None
@ -130,19 +130,19 @@ class TestPrinter:
type_map = [
('struct Au', AuPrinter),
('FlowFlags', BitFieldU8Printer),
('IntrinsicWidths', ChildPrinter),
('PlacementInfo', ChildPrinter),
('TrustedNodeAddress', TrustedNodeAddressPrinter),
('NodeTypeId', NodeTypeIdPrinter),
('Option', OptionPrinter),
("struct Au", AuPrinter),
("FlowFlags", BitFieldU8Printer),
("IntrinsicWidths", ChildPrinter),
("PlacementInfo", ChildPrinter),
("TrustedNodeAddress", TrustedNodeAddressPrinter),
("NodeTypeId", NodeTypeIdPrinter),
("Option", OptionPrinter),
]
def lookup_servo_type(val):
val_type = str(val.type)
for (type_name, printer) in type_map:
for type_name, printer in type_map:
if val_type == type_name or val_type.endswith("::" + type_name):
return printer(val)
return None

View file

@ -12,13 +12,13 @@ Created on Mon Mar 26 20:08:25 2018
@author: Pranshu Sinha, Abhay Soni, Aayushi Agrawal
The script is intended to start servo on localhost:7002
"""
import subprocess
def start_servo(port, resolution):
# Use the below command if you are running this script on windows
# cmds = 'mach.bat run --webdriver ' + port + ' --window-size ' + resolution
cmds = './mach run --webdriver=' + port + ' --window-size ' + resolution
cmds = "./mach run --webdriver=" + port + " --window-size " + resolution
process = subprocess.Popen(cmds, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return process

View file

@ -21,7 +21,7 @@
import sys
import json
full_search = len(sys.argv) > 3 and sys.argv[3] == '--full'
full_search = len(sys.argv) > 3 and sys.argv[3] == "--full"
with open(sys.argv[1]) as f:
data = f.readlines()
@ -34,13 +34,9 @@ with open(sys.argv[1]) as f:
if "action" in entry and entry["action"] == "test_end":
thread = None
else:
if ("action" in entry
and entry["action"] == "test_start"
and entry["test"] == sys.argv[2]):
if "action" in entry and entry["action"] == "test_start" and entry["test"] == sys.argv[2]:
thread = entry["thread"]
print(json.dumps(entry))
elif (full_search
and "command" in entry
and sys.argv[2] in entry["command"]):
elif full_search and "command" in entry and sys.argv[2] in entry["command"]:
thread = entry["thread"]
print(json.dumps(entry))

View file

@ -45,9 +45,7 @@ def process_log(data):
elif entry["action"] == "test_end":
test = tests[entry["test"]]
test["end"] = int(entry["time"])
test_results[entry["status"]] += [
(entry["test"], test["end"] - test["start"])
]
test_results[entry["status"]] += [(entry["test"], test["end"] - test["start"])]
return test_results
@ -73,24 +71,18 @@ print("%d tests timed out." % len(test_results["TIMEOUT"]))
longest_crash = sorted(test_results["CRASH"], key=lambda x: x[1], reverse=True)
print("Longest CRASH test took %dms (%s)" % (longest_crash[0][1], longest_crash[0][0]))
longest_ok = sorted(
test_results["PASS"] + test_results["OK"],
key=lambda x: x[1], reverse=True
)
csv_data = [['Test path', 'Milliseconds']]
with open('longest_ok.csv', 'w') as csv_file:
longest_ok = sorted(test_results["PASS"] + test_results["OK"], key=lambda x: x[1], reverse=True)
csv_data = [["Test path", "Milliseconds"]]
with open("longest_ok.csv", "w") as csv_file:
writer = csv.writer(csv_file)
writer.writerows(csv_data + longest_ok)
longest_fail = sorted(
test_results["ERROR"] + test_results["FAIL"],
key=lambda x: x[1], reverse=True
)
with open('longest_err.csv', 'w') as csv_file:
longest_fail = sorted(test_results["ERROR"] + test_results["FAIL"], key=lambda x: x[1], reverse=True)
with open("longest_err.csv", "w") as csv_file:
writer = csv.writer(csv_file)
writer.writerows(csv_data + longest_fail)
longest_timeout = sorted(test_results["TIMEOUT"], key=lambda x: x[1], reverse=True)
with open('timeouts.csv', 'w') as csv_file:
with open("timeouts.csv", "w") as csv_file:
writer = csv.writer(csv_file)
writer.writerows(csv_data + longest_timeout)

View file

@ -20,8 +20,8 @@
import os
test_root = os.path.join('tests', 'wpt', 'tests')
meta_root = os.path.join('tests', 'wpt', 'meta')
test_root = os.path.join("tests", "wpt", "tests")
meta_root = os.path.join("tests", "wpt", "meta")
test_counts = {}
meta_counts = {}
@ -35,7 +35,7 @@ for base_dir, dir_names, files in os.walk(test_root):
continue
test_files = []
exts = ['.html', '.htm', '.xht', '.xhtml', '.window.js', '.worker.js', '.any.js']
exts = [".html", ".htm", ".xht", ".xhtml", ".window.js", ".worker.js", ".any.js"]
for f in files:
for ext in exts:
if f.endswith(ext):
@ -48,21 +48,21 @@ for base_dir, dir_names, files in os.walk(meta_root):
rel_base = os.path.relpath(base_dir, meta_root)
num_files = len(files)
if '__dir__.ini' in files:
if "__dir__.ini" in files:
num_files -= 1
meta_counts[rel_base] = num_files
final_counts = []
for (test_dir, test_count) in test_counts.items():
for test_dir, test_count in test_counts.items():
if not test_count:
continue
meta_count = meta_counts.get(test_dir, 0)
final_counts += [(test_dir, test_count, meta_count)]
print('Test counts')
print('dir: %% failed (num tests / num failures)')
print("Test counts")
print("dir: %% failed (num tests / num failures)")
s = sorted(final_counts, key=lambda x: x[2] / x[1])
for (test_dir, test_count, meta_count) in reversed(sorted(s, key=lambda x: x[2])):
for test_dir, test_count, meta_count in reversed(sorted(s, key=lambda x: x[2])):
if not meta_count:
continue
print('%s: %.2f%% (%d / %d)' % (test_dir, meta_count / test_count * 100, test_count, meta_count))
print("%s: %.2f%% (%d / %d)" % (test_dir, meta_count / test_count * 100, test_count, meta_count))

View file

@ -22,61 +22,61 @@ SEARCH_PATHS = [
# Individual files providing mach commands.
MACH_MODULES = [
os.path.join('python', 'servo', 'bootstrap_commands.py'),
os.path.join('python', 'servo', 'build_commands.py'),
os.path.join('python', 'servo', 'testing_commands.py'),
os.path.join('python', 'servo', 'post_build_commands.py'),
os.path.join('python', 'servo', 'package_commands.py'),
os.path.join('python', 'servo', 'devenv_commands.py'),
os.path.join("python", "servo", "bootstrap_commands.py"),
os.path.join("python", "servo", "build_commands.py"),
os.path.join("python", "servo", "testing_commands.py"),
os.path.join("python", "servo", "post_build_commands.py"),
os.path.join("python", "servo", "package_commands.py"),
os.path.join("python", "servo", "devenv_commands.py"),
]
CATEGORIES = {
'bootstrap': {
'short': 'Bootstrap Commands',
'long': 'Bootstrap the build system',
'priority': 90,
"bootstrap": {
"short": "Bootstrap Commands",
"long": "Bootstrap the build system",
"priority": 90,
},
'build': {
'short': 'Build Commands',
'long': 'Interact with the build system',
'priority': 80,
"build": {
"short": "Build Commands",
"long": "Interact with the build system",
"priority": 80,
},
'post-build': {
'short': 'Post-build Commands',
'long': 'Common actions performed after completing a build.',
'priority': 70,
"post-build": {
"short": "Post-build Commands",
"long": "Common actions performed after completing a build.",
"priority": 70,
},
'testing': {
'short': 'Testing',
'long': 'Run tests.',
'priority': 60,
"testing": {
"short": "Testing",
"long": "Run tests.",
"priority": 60,
},
'devenv': {
'short': 'Development Environment',
'long': 'Set up and configure your development environment.',
'priority': 50,
"devenv": {
"short": "Development Environment",
"long": "Set up and configure your development environment.",
"priority": 50,
},
'build-dev': {
'short': 'Low-level Build System Interaction',
'long': 'Interact with specific parts of the build system.',
'priority': 20,
"build-dev": {
"short": "Low-level Build System Interaction",
"long": "Interact with specific parts of the build system.",
"priority": 20,
},
'package': {
'short': 'Package',
'long': 'Create objects to distribute',
'priority': 15,
"package": {
"short": "Package",
"long": "Create objects to distribute",
"priority": 15,
},
'misc': {
'short': 'Potpourri',
'long': 'Potent potables and assorted snacks.',
'priority': 10,
"misc": {
"short": "Potpourri",
"long": "Potent potables and assorted snacks.",
"priority": 10,
},
'disabled': {
'short': 'Disabled',
'long': 'The disabled commands are hidden by default. Use -v to display them. These commands are unavailable '
"disabled": {
"short": "Disabled",
"long": "The disabled commands are hidden by default. Use -v to display them. These commands are unavailable "
'for your current context, run "mach <command>" to see why.',
'priority': 0,
}
"priority": 0,
},
}
@ -92,17 +92,25 @@ def _process_exec(args, cwd):
def install_virtual_env_requirements(project_path: str, marker_path: str):
requirements_paths = [
os.path.join(project_path, "python", "requirements.txt"),
os.path.join(project_path, WPT_TOOLS_PATH, "requirements_tests.txt",),
os.path.join(project_path, WPT_RUNNER_PATH, "requirements.txt",),
os.path.join(
project_path,
WPT_TOOLS_PATH,
"requirements_tests.txt",
),
os.path.join(
project_path,
WPT_RUNNER_PATH,
"requirements.txt",
),
]
requirements_hasher = hashlib.sha256()
for path in requirements_paths:
with open(path, 'rb') as file:
with open(path, "rb") as file:
requirements_hasher.update(file.read())
try:
with open(marker_path, 'r') as marker_file:
with open(marker_path, "r") as marker_file:
marker_hash = marker_file.read()
except FileNotFoundError:
marker_hash = None
@ -132,27 +140,28 @@ def _activate_virtualenv(topdir):
_process_exec(["uv", "venv"], cwd=topdir)
script_dir = "Scripts" if _is_windows() else "bin"
runpy.run_path(os.path.join(virtualenv_path, script_dir, 'activate_this.py'))
runpy.run_path(os.path.join(virtualenv_path, script_dir, "activate_this.py"))
install_virtual_env_requirements(topdir, marker_path)
# Turn off warnings about deprecated syntax in our indirect dependencies.
# TODO: Find a better approach for doing this.
import warnings
warnings.filterwarnings('ignore', category=SyntaxWarning, module=r'.*.venv')
warnings.filterwarnings("ignore", category=SyntaxWarning, module=r".*.venv")
def _ensure_case_insensitive_if_windows():
# The folder is called 'python'. By deliberately checking for it with the wrong case, we determine if the file
# system is case sensitive or not.
if _is_windows() and not os.path.exists('Python'):
print('Cannot run mach in a path on a case-sensitive file system on Windows.')
print('For more details, see https://github.com/pypa/virtualenv/issues/935')
if _is_windows() and not os.path.exists("Python"):
print("Cannot run mach in a path on a case-sensitive file system on Windows.")
print("For more details, see https://github.com/pypa/virtualenv/issues/935")
sys.exit(1)
def _is_windows():
return sys.platform == 'win32'
return sys.platform == "win32"
def bootstrap_command_only(topdir):
@ -168,9 +177,9 @@ def bootstrap_command_only(topdir):
import servo.util
try:
force = '-f' in sys.argv or '--force' in sys.argv
skip_platform = '--skip-platform' in sys.argv
skip_lints = '--skip-lints' in sys.argv
force = "-f" in sys.argv or "--force" in sys.argv
skip_platform = "--skip-platform" in sys.argv
skip_lints = "--skip-lints" in sys.argv
servo.platform.get().bootstrap(force, skip_platform, skip_lints)
except NotImplementedError as exception:
print(exception)
@ -186,9 +195,9 @@ def bootstrap(topdir):
# We don't support paths with spaces for now
# https://github.com/servo/servo/issues/9616
if ' ' in topdir and (not _is_windows()):
print('Cannot run mach in a path with spaces.')
print('Current path:', topdir)
if " " in topdir and (not _is_windows()):
print("Cannot run mach in a path with spaces.")
print("Current path:", topdir)
sys.exit(1)
_activate_virtualenv(topdir)
@ -196,7 +205,7 @@ def bootstrap(topdir):
def populate_context(context, key=None):
if key is None:
return
if key == 'topdir':
if key == "topdir":
return topdir
raise AttributeError(key)
@ -204,11 +213,12 @@ def bootstrap(topdir):
sys.path[0:0] = [WPT_PATH, WPT_RUNNER_PATH, WPT_SERVE_PATH]
import mach.main
mach = mach.main.Mach(os.getcwd())
mach.populate_context_handler = populate_context
for category, meta in CATEGORIES.items():
mach.define_category(category, meta['short'], meta['long'], meta['priority'])
mach.define_category(category, meta["short"], meta["long"], meta["priority"])
for path in MACH_MODULES:
# explicitly provide a module name

View file

@ -36,18 +36,10 @@ from servo.util import delete, download_bytes
@CommandProvider
class MachCommands(CommandBase):
@Command('bootstrap',
description='Install required packages for building.',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Boostrap without confirmation')
@CommandArgument('--skip-platform',
action='store_true',
help='Skip platform bootstrapping.')
@CommandArgument('--skip-lints',
action='store_true',
help='Skip tool necessary for linting.')
@Command("bootstrap", description="Install required packages for building.", category="bootstrap")
@CommandArgument("--force", "-f", action="store_true", help="Boostrap without confirmation")
@CommandArgument("--skip-platform", action="store_true", help="Skip platform bootstrapping.")
@CommandArgument("--skip-lints", action="store_true", help="Skip tool necessary for linting.")
def bootstrap(self, force=False, skip_platform=False, skip_lints=False):
# Note: This entry point isn't actually invoked by ./mach bootstrap.
# ./mach bootstrap calls mach_bootstrap.bootstrap_command_only so that
@ -59,12 +51,12 @@ class MachCommands(CommandBase):
return 1
return 0
@Command('bootstrap-gstreamer',
description='Set up a local copy of the gstreamer libraries (linux only).',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Boostrap without confirmation')
@Command(
"bootstrap-gstreamer",
description="Set up a local copy of the gstreamer libraries (linux only).",
category="bootstrap",
)
@CommandArgument("--force", "-f", action="store_true", help="Boostrap without confirmation")
def bootstrap_gstreamer(self, force=False):
try:
servo.platform.get().bootstrap_gstreamer(force)
@ -73,15 +65,15 @@ class MachCommands(CommandBase):
return 1
return 0
@Command('update-hsts-preload',
description='Download the HSTS preload list',
category='bootstrap')
@Command("update-hsts-preload", description="Download the HSTS preload list", category="bootstrap")
def bootstrap_hsts_preload(self, force=False):
preload_filename = "hsts_preload.fstmap"
preload_path = path.join(self.context.topdir, "resources")
chromium_hsts_url = "https://chromium.googlesource.com/chromium/src" + \
"/+/main/net/http/transport_security_state_static.json?format=TEXT"
chromium_hsts_url = (
"https://chromium.googlesource.com/chromium/src"
+ "/+/main/net/http/transport_security_state_static.json?format=TEXT"
)
try:
content_base64 = download_bytes("Chromium HSTS preload list", chromium_hsts_url)
@ -93,7 +85,7 @@ class MachCommands(CommandBase):
# The chromium "json" has single line comments in it which, of course,
# are non-standard/non-valid json. Simply strip them out before parsing
content_json = re.sub(r'(^|\s+)//.*$', '', content_decoded, flags=re.MULTILINE)
content_json = re.sub(r"(^|\s+)//.*$", "", content_decoded, flags=re.MULTILINE)
try:
pins_and_static_preloads = json.loads(content_json)
with tempfile.NamedTemporaryFile(mode="w") as csv_file:
@ -107,13 +99,15 @@ class MachCommands(CommandBase):
print(f"Unable to parse chromium HSTS preload list, has the format changed? \n{e}")
sys.exit(1)
@Command('update-pub-domains',
description='Download the public domains list and update resources/public_domains.txt',
category='bootstrap')
@Command(
"update-pub-domains",
description="Download the public domains list and update resources/public_domains.txt",
category="bootstrap",
)
def bootstrap_pub_suffix(self, force=False):
list_url = "https://publicsuffix.org/list/public_suffix_list.dat"
dst_filename = path.join(self.context.topdir, "resources", "public_domains.txt")
not_implemented_case = re.compile(r'^[^*]+\*')
not_implemented_case = re.compile(r"^[^*]+\*")
try:
content = download_bytes("Public suffix list", list_url)
@ -130,29 +124,22 @@ class MachCommands(CommandBase):
print("Warning: the new list contains a case that servo can't handle: %s" % suffix)
fo.write(suffix.encode("idna") + "\n")
@Command('clean-nightlies',
description='Clean unused nightly builds of Rust and Cargo',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Actually remove stuff')
@CommandArgument('--keep',
default='1',
help='Keep up to this many most recent nightlies')
@Command("clean-nightlies", description="Clean unused nightly builds of Rust and Cargo", category="bootstrap")
@CommandArgument("--force", "-f", action="store_true", help="Actually remove stuff")
@CommandArgument("--keep", default="1", help="Keep up to this many most recent nightlies")
def clean_nightlies(self, force=False, keep=None):
print(f"Current Rust version for Servo: {self.rust_toolchain()}")
old_toolchains = []
keep = int(keep)
stdout = subprocess.check_output(['git', 'log', '--format=%H', 'rust-toolchain.toml'])
stdout = subprocess.check_output(["git", "log", "--format=%H", "rust-toolchain.toml"])
for i, commit_hash in enumerate(stdout.split(), 1):
if i > keep:
toolchain_config_text = subprocess.check_output(
['git', 'show', f'{commit_hash}:rust-toolchain.toml'])
toolchain = toml.loads(toolchain_config_text)['toolchain']['channel']
toolchain_config_text = subprocess.check_output(["git", "show", f"{commit_hash}:rust-toolchain.toml"])
toolchain = toml.loads(toolchain_config_text)["toolchain"]["channel"]
old_toolchains.append(toolchain)
removing_anything = False
stdout = subprocess.check_output(['rustup', 'toolchain', 'list'])
stdout = subprocess.check_output(["rustup", "toolchain", "list"])
for toolchain_with_host in stdout.split():
for old in old_toolchains:
if toolchain_with_host.startswith(old):
@ -165,21 +152,12 @@ class MachCommands(CommandBase):
if not removing_anything:
print("Nothing to remove.")
elif not force:
print("Nothing done. "
"Run `./mach clean-nightlies -f` to actually remove.")
print("Nothing done. Run `./mach clean-nightlies -f` to actually remove.")
@Command('clean-cargo-cache',
description='Clean unused Cargo packages',
category='bootstrap')
@CommandArgument('--force', '-f',
action='store_true',
help='Actually remove stuff')
@CommandArgument('--show-size', '-s',
action='store_true',
help='Show packages size')
@CommandArgument('--keep',
default='1',
help='Keep up to this many most recent dependencies')
@Command("clean-cargo-cache", description="Clean unused Cargo packages", category="bootstrap")
@CommandArgument("--force", "-f", action="store_true", help="Actually remove stuff")
@CommandArgument("--show-size", "-s", action="store_true", help="Show packages size")
@CommandArgument("--keep", default="1", help="Keep up to this many most recent dependencies")
def clean_cargo_cache(self, force=False, show_size=False, keep=None):
def get_size(path):
if os.path.isfile(path):
@ -193,10 +171,11 @@ class MachCommands(CommandBase):
removing_anything = False
packages = {
'crates': {},
'git': {},
"crates": {},
"git": {},
}
import toml
if os.environ.get("CARGO_HOME", ""):
cargo_dir = os.environ.get("CARGO_HOME")
else:
@ -210,7 +189,7 @@ class MachCommands(CommandBase):
for package in content.get("package", []):
source = package.get("source", "")
version = package["version"]
if source == u"registry+https://github.com/rust-lang/crates.io-index":
if source == "registry+https://github.com/rust-lang/crates.io-index":
crate_name = "{}-{}".format(package["name"], version)
if not packages["crates"].get(crate_name, False):
packages["crates"][package["name"]] = {
@ -248,7 +227,7 @@ class MachCommands(CommandBase):
git_db_dir = path.join(git_dir, "db")
git_checkout_dir = path.join(git_dir, "checkouts")
if os.path.isdir(git_db_dir):
git_db_list = list(filter(lambda f: not f.startswith('.'), os.listdir(git_db_dir)))
git_db_list = list(filter(lambda f: not f.startswith("."), os.listdir(git_db_dir)))
else:
git_db_list = []
if os.path.isdir(git_checkout_dir):
@ -265,7 +244,7 @@ class MachCommands(CommandBase):
}
if os.path.isdir(path.join(git_checkout_dir, d)):
with cd(path.join(git_checkout_dir, d)):
git_crate_hash = glob.glob('*')
git_crate_hash = glob.glob("*")
if not git_crate_hash or not os.path.isdir(path.join(git_db_dir, d)):
packages["git"][crate_name]["exist"].append(("del", d, ""))
continue
@ -299,8 +278,12 @@ class MachCommands(CommandBase):
exist_item = exist[2] if packages_type == "git" else exist
if exist_item not in current_crate:
crate_count += 1
if int(crate_count) >= int(keep) or not current_crate or \
exist[0] == "del" or exist[2] == "master":
if (
int(crate_count) >= int(keep)
or not current_crate
or exist[0] == "del"
or exist[2] == "master"
):
removing_anything = True
crate_paths = []
if packages_type == "git":
@ -317,7 +300,7 @@ class MachCommands(CommandBase):
else:
crate_paths.append(exist_path)
exist_checkout_list = glob.glob(path.join(exist_checkout_path, '*'))
exist_checkout_list = glob.glob(path.join(exist_checkout_path, "*"))
if len(exist_checkout_list) <= 1:
crate_paths.append(exist_checkout_path)
if os.path.isdir(exist_db_path):
@ -347,5 +330,4 @@ class MachCommands(CommandBase):
if not removing_anything:
print("Nothing to remove.")
elif not force:
print("\nNothing done. "
"Run `./mach clean-cargo-cache -f` to actually remove.")
print("\nNothing done. Run `./mach clean-cargo-cache -f` to actually remove.")

View file

@ -37,8 +37,12 @@ from servo.command_base import BuildType, CommandBase, call, check_call
from servo.gstreamer import windows_dlls, windows_plugins, package_gstreamer_dylibs
from servo.platform.build_target import BuildTarget
SUPPORTED_ASAN_TARGETS = ["aarch64-apple-darwin", "aarch64-unknown-linux-gnu",
"x86_64-apple-darwin", "x86_64-unknown-linux-gnu"]
SUPPORTED_ASAN_TARGETS = [
"aarch64-apple-darwin",
"aarch64-unknown-linux-gnu",
"x86_64-apple-darwin",
"x86_64-unknown-linux-gnu",
]
def get_rustc_llvm_version() -> Optional[List[int]]:
@ -50,14 +54,14 @@ def get_rustc_llvm_version() -> Optional[List[int]]:
be valid in both rustup managed environment and on nix.
"""
try:
result = subprocess.run(['rustc', '--version', '--verbose'], encoding='utf-8', capture_output=True)
result = subprocess.run(["rustc", "--version", "--verbose"], encoding="utf-8", capture_output=True)
result.check_returncode()
for line in result.stdout.splitlines():
line_lowercase = line.lower()
if line_lowercase.startswith("llvm version:"):
llvm_version = line_lowercase.strip("llvm version:")
llvm_version = llvm_version.strip()
version = llvm_version.split('.')
version = llvm_version.split(".")
print(f"Info: rustc is using LLVM version {'.'.join(version)}")
return version
else:
@ -69,24 +73,27 @@ def get_rustc_llvm_version() -> Optional[List[int]]:
@CommandProvider
class MachCommands(CommandBase):
@Command('build', description='Build Servo', category='build')
@CommandArgument('--jobs', '-j',
default=None,
help='Number of jobs to run in parallel')
@CommandArgument('--no-package',
action='store_true',
help='For Android, disable packaging into a .apk after building')
@CommandArgument('--verbose', '-v',
action='store_true',
help='Print verbose output')
@CommandArgument('--very-verbose', '-vv',
action='store_true',
help='Print very verbose output')
@CommandArgument('params', nargs='...',
help="Command-line arguments to be passed through to Cargo")
@Command("build", description="Build Servo", category="build")
@CommandArgument("--jobs", "-j", default=None, help="Number of jobs to run in parallel")
@CommandArgument(
"--no-package", action="store_true", help="For Android, disable packaging into a .apk after building"
)
@CommandArgument("--verbose", "-v", action="store_true", help="Print verbose output")
@CommandArgument("--very-verbose", "-vv", action="store_true", help="Print very verbose output")
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to Cargo")
@CommandBase.common_command_arguments(build_configuration=True, build_type=True, package_configuration=True)
def build(self, build_type: BuildType, jobs=None, params=None, no_package=False,
verbose=False, very_verbose=False, with_asan=False, flavor=None, **kwargs):
def build(
self,
build_type: BuildType,
jobs=None,
params=None,
no_package=False,
verbose=False,
very_verbose=False,
with_asan=False,
flavor=None,
**kwargs,
):
opts = params or []
if build_type.is_release():
@ -112,8 +119,10 @@ class MachCommands(CommandBase):
if with_asan:
if target_triple not in SUPPORTED_ASAN_TARGETS:
print("AddressSanitizer is currently not supported on this platform\n",
"See https://doc.rust-lang.org/beta/unstable-book/compiler-flags/sanitizer.html")
print(
"AddressSanitizer is currently not supported on this platform\n",
"See https://doc.rust-lang.org/beta/unstable-book/compiler-flags/sanitizer.html",
)
sys.exit(1)
# do not use crown (clashes with different rust version)
@ -157,12 +166,14 @@ class MachCommands(CommandBase):
build_start = time()
if host != target_triple and 'windows' in target_triple:
if os.environ.get('VisualStudioVersion') or os.environ.get('VCINSTALLDIR'):
print("Can't cross-compile for Windows inside of a Visual Studio shell.\n"
if host != target_triple and "windows" in target_triple:
if os.environ.get("VisualStudioVersion") or os.environ.get("VCINSTALLDIR"):
print(
"Can't cross-compile for Windows inside of a Visual Studio shell.\n"
"Please run `python mach build [arguments]` to bypass automatic "
"Visual Studio shell, and make sure the VisualStudioVersion and "
"VCINSTALLDIR environment variables are not set.")
"VCINSTALLDIR environment variables are not set."
)
sys.exit(1)
# Gather Cargo build timings (https://doc.rust-lang.org/cargo/reference/timings.html).
@ -173,8 +184,7 @@ class MachCommands(CommandBase):
for key in env:
print((key, env[key]))
status = self.run_cargo_build_like_command(
"rustc", opts, env=env, verbose=verbose, **kwargs)
status = self.run_cargo_build_like_command("rustc", opts, env=env, verbose=verbose, **kwargs)
if status == 0:
built_binary = self.get_binary_path(build_type, asan=with_asan)
@ -201,12 +211,11 @@ class MachCommands(CommandBase):
# like Instruments.app.
try:
import Cocoa
icon_path = path.join(self.get_top_dir(), "resources", "servo_1024.png")
icon = Cocoa.NSImage.alloc().initWithContentsOfFile_(icon_path)
if icon is not None:
Cocoa.NSWorkspace.sharedWorkspace().setIcon_forFile_options_(icon,
built_binary,
0)
Cocoa.NSWorkspace.sharedWorkspace().setIcon_forFile_options_(icon, built_binary, 0)
except ImportError:
pass
@ -220,23 +229,16 @@ class MachCommands(CommandBase):
return status
@Command('clean',
description='Clean the target/ and Python virtual environment directories',
category='build')
@CommandArgument('--manifest-path',
default=None,
help='Path to the manifest to the package to clean')
@CommandArgument('--verbose', '-v',
action='store_true',
help='Print verbose output')
@CommandArgument('params', nargs='...',
help="Command-line arguments to be passed through to Cargo")
@Command("clean", description="Clean the target/ and Python virtual environment directories", category="build")
@CommandArgument("--manifest-path", default=None, help="Path to the manifest to the package to clean")
@CommandArgument("--verbose", "-v", action="store_true", help="Print verbose output")
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to Cargo")
def clean(self, manifest_path=None, params=[], verbose=False):
self.ensure_bootstrapped()
virtualenv_path = path.join(self.get_top_dir(), '.venv')
virtualenv_path = path.join(self.get_top_dir(), ".venv")
if path.exists(virtualenv_path):
print('Removing virtualenv directory: %s' % virtualenv_path)
print("Removing virtualenv directory: %s" % virtualenv_path)
shutil.rmtree(virtualenv_path)
opts = ["--manifest-path", manifest_path or path.join(self.context.topdir, "Cargo.toml")]
@ -263,6 +265,7 @@ class MachCommands(CommandBase):
def send_notification(self, **kwargs):
try:
import dbus
bus = dbus.SessionBus()
notify_obj = bus.get_object("org.freedesktop.Notifications", "/org/freedesktop/Notifications")
method = notify_obj.get_dbus_method("Notify", "org.freedesktop.Notifications")
@ -274,17 +277,15 @@ class MachCommands(CommandBase):
kwargs.get("notification_subtitle"),
[], # actions
{"transient": True}, # hints
-1 # timeout
-1, # timeout
)
except Exception as exception:
print(f"[Warning] Could not generate notification: {exception}",
file=sys.stderr)
print(f"[Warning] Could not generate notification: {exception}", file=sys.stderr)
return True
if notify_command:
if call([notify_command, title, message]) != 0:
print("[Warning] Could not generate notification: "
f"Could not run '{notify_command}'.", file=sys.stderr)
print(f"[Warning] Could not generate notification: Could not run '{notify_command}'.", file=sys.stderr)
else:
try:
notifier = LinuxNotifier if sys.platform.startswith("linux") else None
@ -384,11 +385,12 @@ def package_msvc_dlls(servo_exe_dir: str, target: BuildTarget):
"x86_64": "x64",
"i686": "x86",
"aarch64": "arm64",
}[target.triple().split('-')[0]]
}[target.triple().split("-")[0]]
for msvc_redist_dir in servo.visual_studio.find_msvc_redist_dirs(vs_platform):
if copy_file(os.path.join(msvc_redist_dir, "msvcp140.dll")) and \
copy_file(os.path.join(msvc_redist_dir, "vcruntime140.dll")):
if copy_file(os.path.join(msvc_redist_dir, "msvcp140.dll")) and copy_file(
os.path.join(msvc_redist_dir, "vcruntime140.dll")
):
break
# Different SDKs install the file into different directory structures within the

View file

@ -118,7 +118,7 @@ def find_dep_path_newest(package, bin_path):
deps_path = path.join(path.split(bin_path)[0], "build")
candidates = []
with cd(deps_path):
for c in glob(package + '-*'):
for c in glob(package + "-*"):
candidate_path = path.join(deps_path, c)
if path.exists(path.join(candidate_path, "output")):
candidates.append(candidate_path)
@ -152,24 +152,24 @@ def archive_deterministically(dir_to_archive, dest_archive, prepend_path=None):
file_list.append(os.path.join(root, name))
# Sort file entries with the fixed locale
with setlocale('C'):
with setlocale("C"):
file_list.sort(key=functools.cmp_to_key(locale.strcoll))
# Use a temporary file and atomic rename to avoid partially-formed
# packaging (in case of exceptional situations like running out of disk space).
# TODO do this in a temporary folder after #11983 is fixed
temp_file = '{}.temp~'.format(dest_archive)
with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0o644), 'wb') as out_file:
if dest_archive.endswith('.zip'):
with zipfile.ZipFile(temp_file, 'w', zipfile.ZIP_DEFLATED) as zip_file:
temp_file = "{}.temp~".format(dest_archive)
with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0o644), "wb") as out_file:
if dest_archive.endswith(".zip"):
with zipfile.ZipFile(temp_file, "w", zipfile.ZIP_DEFLATED) as zip_file:
for entry in file_list:
arcname = entry
if prepend_path is not None:
arcname = os.path.normpath(os.path.join(prepend_path, arcname))
zip_file.write(entry, arcname=arcname)
else:
with gzip.GzipFile(mode='wb', fileobj=out_file, mtime=0) as gzip_file:
with tarfile.open(fileobj=gzip_file, mode='w:') as tar_file:
with gzip.GzipFile(mode="wb", fileobj=out_file, mtime=0) as gzip_file:
with tarfile.open(fileobj=gzip_file, mode="w:") as tar_file:
for entry in file_list:
arcname = entry
if prepend_path is not None:
@ -180,35 +180,35 @@ def archive_deterministically(dir_to_archive, dest_archive, prepend_path=None):
def call(*args, **kwargs):
"""Wrap `subprocess.call`, printing the command if verbose=True."""
verbose = kwargs.pop('verbose', False)
verbose = kwargs.pop("verbose", False)
if verbose:
print(' '.join(args[0]))
print(" ".join(args[0]))
# we have to use shell=True in order to get PATH handling
# when looking for the binary on Windows
return subprocess.call(*args, shell=sys.platform == 'win32', **kwargs)
return subprocess.call(*args, shell=sys.platform == "win32", **kwargs)
def check_output(*args, **kwargs) -> bytes:
"""Wrap `subprocess.call`, printing the command if verbose=True."""
verbose = kwargs.pop('verbose', False)
verbose = kwargs.pop("verbose", False)
if verbose:
print(' '.join(args[0]))
print(" ".join(args[0]))
# we have to use shell=True in order to get PATH handling
# when looking for the binary on Windows
return subprocess.check_output(*args, shell=sys.platform == 'win32', **kwargs)
return subprocess.check_output(*args, shell=sys.platform == "win32", **kwargs)
def check_call(*args, **kwargs):
"""Wrap `subprocess.check_call`, printing the command if verbose=True.
Also fix any unicode-containing `env`, for subprocess """
verbose = kwargs.pop('verbose', False)
Also fix any unicode-containing `env`, for subprocess"""
verbose = kwargs.pop("verbose", False)
if verbose:
print(' '.join(args[0]))
print(" ".join(args[0]))
# we have to use shell=True in order to get PATH handling
# when looking for the binary on Windows
proc = subprocess.Popen(*args, shell=sys.platform == 'win32', **kwargs)
proc = subprocess.Popen(*args, shell=sys.platform == "win32", **kwargs)
status = None
# Leave it to the subprocess to handle Ctrl+C. If it terminates as
# a result of Ctrl+C, proc.wait() will return a status code, and,
@ -221,19 +221,19 @@ def check_call(*args, **kwargs):
pass
if status:
raise subprocess.CalledProcessError(status, ' '.join(*args))
raise subprocess.CalledProcessError(status, " ".join(*args))
def is_windows():
return sys.platform == 'win32'
return sys.platform == "win32"
def is_macosx():
return sys.platform == 'darwin'
return sys.platform == "darwin"
def is_linux():
return sys.platform.startswith('linux')
return sys.platform.startswith("linux")
class BuildNotFound(Exception):
@ -262,14 +262,13 @@ class CommandBase(object):
# Contents of env vars are strings by default. This returns the
# boolean value of the specified environment variable, or the
# speciried default if the var doesn't contain True or False
return {'True': True, 'False': False}.get(os.environ.get(var), default)
return {"True": True, "False": False}.get(os.environ.get(var), default)
def resolverelative(category, key):
# Allow ~
self.config[category][key] = path.expanduser(self.config[category][key])
# Resolve relative paths
self.config[category][key] = path.join(context.topdir,
self.config[category][key])
self.config[category][key] = path.join(context.topdir, self.config[category][key])
if not hasattr(self.context, "bootstrapped"):
self.context.bootstrapped = False
@ -286,8 +285,7 @@ class CommandBase(object):
self.config["tools"].setdefault("cache-dir", get_default_cache_dir(context.topdir))
resolverelative("tools", "cache-dir")
default_cargo_home = os.environ.get("CARGO_HOME",
path.join(context.topdir, ".cargo"))
default_cargo_home = os.environ.get("CARGO_HOME", path.join(context.topdir, ".cargo"))
self.config["tools"].setdefault("cargo-home-dir", default_cargo_home)
resolverelative("tools", "cargo-home-dir")
@ -323,7 +321,7 @@ class CommandBase(object):
return self._rust_toolchain
toolchain_file = path.join(self.context.topdir, "rust-toolchain.toml")
self._rust_toolchain = toml.load(toolchain_file)['toolchain']['channel']
self._rust_toolchain = toml.load(toolchain_file)["toolchain"]["channel"]
return self._rust_toolchain
def get_top_dir(self):
@ -337,14 +335,14 @@ class CommandBase(object):
binary_path = path.join(base_path, build_type.directory_name(), binary_name)
if not path.exists(binary_path):
raise BuildNotFound('No Servo binary found. Perhaps you forgot to run `./mach build`?')
raise BuildNotFound("No Servo binary found. Perhaps you forgot to run `./mach build`?")
return binary_path
def detach_volume(self, mounted_volume):
print("Detaching volume {}".format(mounted_volume))
try:
subprocess.check_call(['hdiutil', 'detach', mounted_volume])
subprocess.check_call(["hdiutil", "detach", mounted_volume])
except subprocess.CalledProcessError as e:
print("Could not detach volume {} : {}".format(mounted_volume, e.returncode))
sys.exit(1)
@ -356,7 +354,7 @@ class CommandBase(object):
def mount_dmg(self, dmg_path):
print("Mounting dmg {}".format(dmg_path))
try:
subprocess.check_call(['hdiutil', 'attach', dmg_path])
subprocess.check_call(["hdiutil", "attach", dmg_path])
except subprocess.CalledProcessError as e:
print("Could not mount Servo dmg : {}".format(e.returncode))
sys.exit(1)
@ -374,8 +372,9 @@ class CommandBase(object):
self.detach_volume(mounted_volume)
else:
if is_windows():
command = 'msiexec /a {} /qn TARGETDIR={}'.format(
os.path.join(nightlies_folder, destination_file), destination_folder)
command = "msiexec /a {} /qn TARGETDIR={}".format(
os.path.join(nightlies_folder, destination_file), destination_folder
)
if subprocess.call(command, stdout=PIPE, stderr=PIPE) != 0:
print("Could not extract the nightly executable from the msi package.")
sys.exit(1)
@ -394,8 +393,7 @@ class CommandBase(object):
if nightly_date is None:
return
if not nightly_date:
print(
"No nightly date has been provided although the --nightly or -n flag has been passed.")
print("No nightly date has been provided although the --nightly or -n flag has been passed.")
sys.exit(1)
# Will alow us to fetch the relevant builds from the nightly repository
os_prefix = "linux"
@ -406,55 +404,44 @@ class CommandBase(object):
nightly_date = nightly_date.strip()
# Fetch the filename to download from the build list
repository_index = NIGHTLY_REPOSITORY_URL + "?list-type=2&prefix=nightly"
req = urllib.request.Request(
"{}/{}/{}".format(repository_index, os_prefix, nightly_date))
req = urllib.request.Request("{}/{}/{}".format(repository_index, os_prefix, nightly_date))
try:
response = urllib.request.urlopen(req).read()
tree = XML(response)
namespaces = {'ns': tree.tag[1:tree.tag.index('}')]}
file_to_download = tree.find('ns:Contents', namespaces).find(
'ns:Key', namespaces).text
namespaces = {"ns": tree.tag[1 : tree.tag.index("}")]}
file_to_download = tree.find("ns:Contents", namespaces).find("ns:Key", namespaces).text
except urllib.error.URLError as e:
print("Could not fetch the available nightly versions from the repository : {}".format(
e.reason))
print("Could not fetch the available nightly versions from the repository : {}".format(e.reason))
sys.exit(1)
except AttributeError:
print("Could not fetch a nightly version for date {} and platform {}".format(
nightly_date, os_prefix))
print("Could not fetch a nightly version for date {} and platform {}".format(nightly_date, os_prefix))
sys.exit(1)
nightly_target_directory = path.join(self.context.topdir, "target")
# ':' is not an authorized character for a file name on Windows
# make sure the OS specific separator is used
target_file_path = file_to_download.replace(':', '-').split('/')
destination_file = os.path.join(
nightly_target_directory, os.path.join(*target_file_path))
target_file_path = file_to_download.replace(":", "-").split("/")
destination_file = os.path.join(nightly_target_directory, os.path.join(*target_file_path))
# Once extracted, the nightly folder name is the tar name without the extension
# (eg /foo/bar/baz.tar.gz extracts to /foo/bar/baz)
destination_folder = os.path.splitext(destination_file)[0]
nightlies_folder = path.join(
nightly_target_directory, 'nightly', os_prefix)
nightlies_folder = path.join(nightly_target_directory, "nightly", os_prefix)
# Make sure the target directory exists
if not os.path.isdir(nightlies_folder):
print("The nightly folder for the target does not exist yet. Creating {}".format(
nightlies_folder))
print("The nightly folder for the target does not exist yet. Creating {}".format(nightlies_folder))
os.makedirs(nightlies_folder)
# Download the nightly version
if os.path.isfile(path.join(nightlies_folder, destination_file)):
print("The nightly file {} has already been downloaded.".format(
destination_file))
print("The nightly file {} has already been downloaded.".format(destination_file))
else:
print("The nightly {} does not exist yet, downloading it.".format(
destination_file))
download_file(destination_file, NIGHTLY_REPOSITORY_URL
+ file_to_download, destination_file)
print("The nightly {} does not exist yet, downloading it.".format(destination_file))
download_file(destination_file, NIGHTLY_REPOSITORY_URL + file_to_download, destination_file)
# Extract the downloaded nightly version
if os.path.isdir(destination_folder):
print("The nightly folder {} has already been extracted.".format(
destination_folder))
print("The nightly folder {} has already been extracted.".format(destination_folder))
else:
self.extract_nightly(nightlies_folder, destination_folder, destination_file)
@ -493,34 +480,34 @@ class CommandBase(object):
elif self.config["build"]["incremental"] is not None:
env["CARGO_INCREMENTAL"] = "0"
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "")
env["RUSTFLAGS"] = env.get("RUSTFLAGS", "")
if self.config["build"]["rustflags"]:
env['RUSTFLAGS'] += " " + self.config["build"]["rustflags"]
env["RUSTFLAGS"] += " " + self.config["build"]["rustflags"]
if not (self.config["build"]["ccache"] == ""):
env['CCACHE'] = self.config["build"]["ccache"]
env["CCACHE"] = self.config["build"]["ccache"]
env["CARGO_TARGET_DIR"] = servo.util.get_target_dir()
# Work around https://github.com/servo/servo/issues/24446
# Argument-less str.split normalizes leading, trailing, and double spaces
env['RUSTFLAGS'] = " ".join(env['RUSTFLAGS'].split())
env["RUSTFLAGS"] = " ".join(env["RUSTFLAGS"].split())
# Suppress known false-positives during memory leak sanitizing.
env["LSAN_OPTIONS"] = f"{env.get('LSAN_OPTIONS', '')}:suppressions={ASAN_LEAK_SUPPRESSION_FILE}"
self.target.configure_build_environment(env, self.config, self.context.topdir)
if sys.platform == 'win32' and 'windows' not in self.target.triple():
if sys.platform == "win32" and "windows" not in self.target.triple():
# aws-lc-rs only supports the Ninja Generator when cross-compiling on windows hosts to non-windows.
env['TARGET_CMAKE_GENERATOR'] = "Ninja"
if shutil.which('ninja') is None:
env["TARGET_CMAKE_GENERATOR"] = "Ninja"
if shutil.which("ninja") is None:
print("Error: Cross-compiling servo on windows requires the Ninja tool to be installed and in PATH.")
print("Hint: Ninja-build is available on github at: https://github.com/ninja-build/ninja/releases")
exit(1)
# `tr` is also required by the CMake build rules of `aws-lc-rs`
if shutil.which('tr') is None:
if shutil.which("tr") is None:
print("Error: Cross-compiling servo on windows requires the `tr` tool, which was not found.")
print("Hint: Try running ./mach from `git bash` instead of powershell.")
exit(1)
@ -528,132 +515,146 @@ class CommandBase(object):
return env
@staticmethod
def common_command_arguments(build_configuration=False,
build_type=False,
binary_selection=False,
package_configuration=False
def common_command_arguments(
build_configuration=False, build_type=False, binary_selection=False, package_configuration=False
):
decorators = []
if build_type or binary_selection:
decorators += [
CommandArgumentGroup('Build Type'),
CommandArgument('--release', '-r', group="Build Type",
action='store_true',
help='Build in release mode'),
CommandArgument('--dev', '--debug', '-d', group="Build Type",
action='store_true',
help='Build in development mode'),
CommandArgument('--prod', '--production', group="Build Type",
action='store_true',
help='Build in release mode without debug assertions'),
CommandArgument('--profile', group="Build Type",
help='Build with custom Cargo profile'),
CommandArgument('--with-asan', action='store_true', help="Build with AddressSanitizer"),
CommandArgumentGroup("Build Type"),
CommandArgument(
"--release", "-r", group="Build Type", action="store_true", help="Build in release mode"
),
CommandArgument(
"--dev", "--debug", "-d", group="Build Type", action="store_true", help="Build in development mode"
),
CommandArgument(
"--prod",
"--production",
group="Build Type",
action="store_true",
help="Build in release mode without debug assertions",
),
CommandArgument("--profile", group="Build Type", help="Build with custom Cargo profile"),
CommandArgument("--with-asan", action="store_true", help="Build with AddressSanitizer"),
]
if build_configuration:
decorators += [
CommandArgumentGroup('Cross Compilation'),
CommandArgumentGroup("Cross Compilation"),
CommandArgument(
'--target', '-t',
"--target",
"-t",
group="Cross Compilation",
default=None,
help='Cross compile for given target platform',
help="Cross compile for given target platform",
),
CommandArgument(
'--android', default=None, action='store_true',
help='Build for Android. If --target is not specified, this '
f'will choose the default target architecture ({AndroidTarget.DEFAULT_TRIPLE}).',
"--android",
default=None,
action="store_true",
help="Build for Android. If --target is not specified, this "
f"will choose the default target architecture ({AndroidTarget.DEFAULT_TRIPLE}).",
),
CommandArgument(
'--ohos', default=None, action='store_true',
help='Build for OpenHarmony. If --target is not specified, this '
f'will choose a default target architecture ({OpenHarmonyTarget.DEFAULT_TRIPLE}).',
"--ohos",
default=None,
action="store_true",
help="Build for OpenHarmony. If --target is not specified, this "
f"will choose a default target architecture ({OpenHarmonyTarget.DEFAULT_TRIPLE}).",
),
CommandArgument('--win-arm64', action='store_true', help="Use arm64 Windows target"),
CommandArgumentGroup('Feature Selection'),
CommandArgument("--win-arm64", action="store_true", help="Use arm64 Windows target"),
CommandArgumentGroup("Feature Selection"),
CommandArgument(
'--features', default=None, group="Feature Selection", nargs='+',
help='Space-separated list of features to also build',
"--features",
default=None,
group="Feature Selection",
nargs="+",
help="Space-separated list of features to also build",
),
CommandArgument(
'--media-stack', default=None, group="Feature Selection",
choices=["gstreamer", "dummy"], help='Which media stack to use',
"--media-stack",
default=None,
group="Feature Selection",
choices=["gstreamer", "dummy"],
help="Which media stack to use",
),
CommandArgument(
'--debug-mozjs',
"--debug-mozjs",
default=False,
group="Feature Selection",
action='store_true',
help='Enable debug assertions in mozjs',
action="store_true",
help="Enable debug assertions in mozjs",
),
CommandArgument(
'--with-debug-assertions',
"--with-debug-assertions",
default=False,
group="Feature Selection",
action='store_true',
help='Enable debug assertions in release',
action="store_true",
help="Enable debug assertions in release",
),
CommandArgument(
'--with-frame-pointer',
default=None, group="Feature Selection",
action='store_true',
help='Build with frame pointer enabled, used by the background hang monitor.',
"--with-frame-pointer",
default=None,
group="Feature Selection",
action="store_true",
help="Build with frame pointer enabled, used by the background hang monitor.",
),
CommandArgument(
'--use-crown',
default=False,
action='store_true',
help="Enable Servo's `crown` linter tool"
)
"--use-crown", default=False, action="store_true", help="Enable Servo's `crown` linter tool"
),
]
if package_configuration:
decorators += [
CommandArgumentGroup('Packaging options'),
CommandArgumentGroup("Packaging options"),
CommandArgument(
'--flavor', default=None, group="Packaging options",
help='Product flavor to be used when packaging with Gradle/Hvigor (android/ohos).'
"--flavor",
default=None,
group="Packaging options",
help="Product flavor to be used when packaging with Gradle/Hvigor (android/ohos).",
),
]
if binary_selection:
decorators += [
CommandArgumentGroup('Binary selection'),
CommandArgument('--bin', default=None,
help='Launch with specific binary'),
CommandArgument('--nightly', '-n', default=None,
help='Specify a YYYY-MM-DD nightly build to run'),
CommandArgumentGroup("Binary selection"),
CommandArgument("--bin", default=None, help="Launch with specific binary"),
CommandArgument("--nightly", "-n", default=None, help="Specify a YYYY-MM-DD nightly build to run"),
]
def decorator_function(original_function):
def configuration_decorator(self, *args, **kwargs):
if build_type or binary_selection:
# If `build_type` already exists in kwargs we are doing a recursive dispatch.
if 'build_type' not in kwargs:
kwargs['build_type'] = self.configure_build_type(
kwargs['release'], kwargs['dev'], kwargs['prod'], kwargs['profile'],
if "build_type" not in kwargs:
kwargs["build_type"] = self.configure_build_type(
kwargs["release"],
kwargs["dev"],
kwargs["prod"],
kwargs["profile"],
)
kwargs.pop('release', None)
kwargs.pop('dev', None)
kwargs.pop('prod', None)
kwargs.pop('profile', None)
kwargs.pop("release", None)
kwargs.pop("dev", None)
kwargs.pop("prod", None)
kwargs.pop("profile", None)
if build_configuration:
self.configure_build_target(kwargs)
self.features = kwargs.get("features", None) or []
self.enable_media = self.is_media_enabled(kwargs['media_stack'])
self.enable_media = self.is_media_enabled(kwargs["media_stack"])
if binary_selection:
if 'servo_binary' not in kwargs:
kwargs['servo_binary'] = (kwargs.get('bin')
or self.get_nightly_binary_path(kwargs.get('nightly'))
or self.get_binary_path(kwargs.get('build_type'),
asan=kwargs.get('with_asan')))
kwargs.pop('bin')
kwargs.pop('nightly')
if "servo_binary" not in kwargs:
kwargs["servo_binary"] = (
kwargs.get("bin")
or self.get_nightly_binary_path(kwargs.get("nightly"))
or self.get_binary_path(kwargs.get("build_type"), asan=kwargs.get("with_asan"))
)
kwargs.pop("bin")
kwargs.pop("nightly")
if not build_type:
kwargs.pop('build_type')
kwargs.pop('with_asan')
kwargs.pop("build_type")
kwargs.pop("with_asan")
return original_function(self, *args, **kwargs)
@ -669,9 +670,9 @@ class CommandBase(object):
def allow_target_configuration(original_function):
def target_configuration_decorator(self, *args, **kwargs):
self.configure_build_target(kwargs, suppress_log=True)
kwargs.pop('target', False)
kwargs.pop('android', False)
kwargs.pop('ohos', False)
kwargs.pop("target", False)
kwargs.pop("android", False)
kwargs.pop("ohos", False)
return original_function(self, *args, **kwargs)
return target_configuration_decorator
@ -709,15 +710,15 @@ class CommandBase(object):
return BuildType.custom(profile)
def configure_build_target(self, kwargs: Dict[str, Any], suppress_log: bool = False):
if hasattr(self.context, 'target'):
if hasattr(self.context, "target"):
# This call is for a dispatched command and we've already configured
# the target, so just use it.
self.target = self.context.target
return
android = kwargs.get('android') or self.config["build"]["android"]
ohos = kwargs.get('ohos') or self.config["build"]["ohos"]
target_triple = kwargs.get('target')
android = kwargs.get("android") or self.config["build"]["android"]
ohos = kwargs.get("ohos") or self.config["build"]["ohos"]
target_triple = kwargs.get("target")
if android and ohos:
print("Cannot build both android and ohos targets simultaneously.")
@ -768,20 +769,23 @@ class CommandBase(object):
# Once we drop support for this platform (it's currently needed for wpt.fyi runners),
# we can remove this workaround and officially only support Ubuntu 22.04 and up.
platform = servo.platform.get()
if not self.target.is_cross_build() and platform.is_linux and \
not platform.is_gstreamer_installed(self.target):
if not self.target.is_cross_build() and platform.is_linux and not platform.is_gstreamer_installed(self.target):
return False
return media_stack != "dummy"
def run_cargo_build_like_command(
self, command: str, cargo_args: List[str],
env=None, verbose=False,
debug_mozjs=False, with_debug_assertions=False,
self,
command: str,
cargo_args: List[str],
env=None,
verbose=False,
debug_mozjs=False,
with_debug_assertions=False,
with_frame_pointer=False,
use_crown=False,
target_override: Optional[str] = None,
**_kwargs
**_kwargs,
):
env = env or self.build_env()
@ -790,8 +794,7 @@ class CommandBase(object):
platform = servo.platform.get()
if self.enable_media and not platform.is_gstreamer_installed(self.target):
raise FileNotFoundError(
"GStreamer libraries not found (>= version 1.18)."
"Please see installation instructions in README.md"
"GStreamer libraries not found (>= version 1.18).Please see installation instructions in README.md"
)
args = []
@ -806,21 +809,23 @@ class CommandBase(object):
args += ["--target", self.target.triple()]
if type(self.target) in [AndroidTarget, OpenHarmonyTarget]:
# Note: in practice `cargo rustc` should just be used unconditionally.
assert command != 'build', "For Android / OpenHarmony `cargo rustc` must be used instead of cargo build"
if command == 'rustc':
assert command != "build", "For Android / OpenHarmony `cargo rustc` must be used instead of cargo build"
if command == "rustc":
args += ["--lib", "--crate-type=cdylib"]
features = []
if use_crown:
if 'CARGO_BUILD_RUSTC' in env:
current_rustc = env['CARGO_BUILD_RUSTC']
if current_rustc != 'crown':
print('Error: `mach` was called with `--use-crown` while `CARGO_BUILD_RUSTC` was'
f'already set to `{current_rustc}` in the parent environment.\n'
'These options conflict, please specify only one of them.')
if "CARGO_BUILD_RUSTC" in env:
current_rustc = env["CARGO_BUILD_RUSTC"]
if current_rustc != "crown":
print(
"Error: `mach` was called with `--use-crown` while `CARGO_BUILD_RUSTC` was"
f"already set to `{current_rustc}` in the parent environment.\n"
"These options conflict, please specify only one of them."
)
sys.exit(1)
env['CARGO_BUILD_RUSTC'] = 'crown'
env["CARGO_BUILD_RUSTC"] = "crown"
# Modyfing `RUSTC` or `CARGO_BUILD_RUSTC` to use a linter does not cause
# `cargo check` to rebuild. To work around this bug use a `crown` feature
# to invalidate caches and force a rebuild / relint.
@ -835,7 +840,7 @@ class CommandBase(object):
features.append("debugmozjs")
if with_frame_pointer:
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C force-frame-pointers=yes"
env["RUSTFLAGS"] = env.get("RUSTFLAGS", "") + " -C force-frame-pointers=yes"
features.append("profilemozjs")
if self.config["build"]["webgl-backtrace"]:
features.append("webgl-backtrace")
@ -844,11 +849,11 @@ class CommandBase(object):
args += ["--features", " ".join(features)]
if with_debug_assertions or self.config["build"]["debug-assertions"]:
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C debug_assertions"
env["RUSTFLAGS"] = env.get("RUSTFLAGS", "") + " -C debug_assertions"
# mozjs gets its Python from `env['PYTHON3']`, which defaults to `python3`,
# but uv venv on Windows only provides a `python`, not `python3`.
env['PYTHON3'] = "python"
env["PYTHON3"] = "python"
return call(["cargo", command] + args + cargo_args, env=env, verbose=verbose)
@ -877,13 +882,9 @@ class CommandBase(object):
if not self.target.is_cross_build():
return
installed_targets = check_output(
["rustup", "target", "list", "--installed"],
cwd=self.context.topdir
).decode()
installed_targets = check_output(["rustup", "target", "list", "--installed"], cwd=self.context.topdir).decode()
if self.target.triple() not in installed_targets:
check_call(["rustup", "target", "add", self.target.triple()],
cwd=self.context.topdir)
check_call(["rustup", "target", "add", self.target.triple()], cwd=self.context.topdir)
def ensure_rustup_version(self):
try:
@ -891,16 +892,18 @@ class CommandBase(object):
["rustup" + servo.platform.get().executable_suffix(), "--version"],
# Silence "info: This is the version for the rustup toolchain manager,
# not the rustc compiler."
stderr=open(os.devnull, "wb")
stderr=open(os.devnull, "wb"),
)
except OSError as e:
if e.errno == NO_SUCH_FILE_OR_DIRECTORY:
print("It looks like rustup is not installed. See instructions at "
"https://github.com/servo/servo/#setting-up-your-environment")
print(
"It looks like rustup is not installed. See instructions at "
"https://github.com/servo/servo/#setting-up-your-environment"
)
print()
sys.exit(1)
raise
version = tuple(map(int, re.match(br"rustup (\d+)\.(\d+)\.(\d+)", version_line).groups()))
version = tuple(map(int, re.match(rb"rustup (\d+)\.(\d+)\.(\d+)", version_line).groups()))
version_needed = (1, 23, 0)
if version < version_needed:
print("rustup is at version %s.%s.%s, Servo requires %s.%s.%s or more recent." % (version + version_needed))
@ -910,25 +913,25 @@ class CommandBase(object):
def ensure_clobbered(self, target_dir=None):
if target_dir is None:
target_dir = util.get_target_dir()
auto = True if os.environ.get('AUTOCLOBBER', False) else False
src_clobber = os.path.join(self.context.topdir, 'CLOBBER')
target_clobber = os.path.join(target_dir, 'CLOBBER')
auto = True if os.environ.get("AUTOCLOBBER", False) else False
src_clobber = os.path.join(self.context.topdir, "CLOBBER")
target_clobber = os.path.join(target_dir, "CLOBBER")
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if not os.path.exists(target_clobber):
# Simply touch the file.
with open(target_clobber, 'a'):
with open(target_clobber, "a"):
pass
if auto:
if os.path.getmtime(src_clobber) > os.path.getmtime(target_clobber):
print('Automatically clobbering target directory: {}'.format(target_dir))
print("Automatically clobbering target directory: {}".format(target_dir))
try:
Registrar.dispatch("clean", context=self.context, verbose=True)
print('Successfully completed auto clobber.')
print("Successfully completed auto clobber.")
except subprocess.CalledProcessError as error:
sys.exit(error)
else:

View file

@ -25,12 +25,10 @@ from servo.command_base import CommandBase, cd, call
@CommandProvider
class MachCommands(CommandBase):
@Command('check',
description='Run "cargo check"',
category='devenv')
@Command("check", description='Run "cargo check"', category="devenv")
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to cargo check")
"params", default=None, nargs="...", help="Command-line arguments to be passed through to cargo check"
)
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
def check(self, params, **kwargs):
if not params:
@ -40,45 +38,34 @@ class MachCommands(CommandBase):
self.ensure_clobbered()
status = self.run_cargo_build_like_command("check", params, **kwargs)
if status == 0:
print('Finished checking, binary NOT updated. Consider ./mach build before ./mach run')
print("Finished checking, binary NOT updated. Consider ./mach build before ./mach run")
return status
@Command('cargo-update',
description='Same as update-cargo',
category='devenv')
@Command("cargo-update", description="Same as update-cargo", category="devenv")
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
@CommandArgument(
'--package', '-p', default=None,
help='Updates selected package')
@CommandArgument(
'--all-packages', '-a', action='store_true',
help='Updates all packages')
@CommandArgument(
'--dry-run', '-d', action='store_true',
help='Show outdated packages.')
"params", default=None, nargs="...", help="Command-line arguments to be passed through to cargo update"
)
@CommandArgument("--package", "-p", default=None, help="Updates selected package")
@CommandArgument("--all-packages", "-a", action="store_true", help="Updates all packages")
@CommandArgument("--dry-run", "-d", action="store_true", help="Show outdated packages.")
def cargo_update(self, params=None, package=None, all_packages=None, dry_run=None):
self.update_cargo(params, package, all_packages, dry_run)
@Command('update-cargo',
description='Update Cargo dependencies',
category='devenv')
@Command("update-cargo", description="Update Cargo dependencies", category="devenv")
@CommandArgument(
'params', default=None, nargs='...',
help='Command-line arguments to be passed through to cargo update')
"params", default=None, nargs="...", help="Command-line arguments to be passed through to cargo update"
)
@CommandArgument("--package", "-p", default=None, help="Updates the selected package")
@CommandArgument(
'--package', '-p', default=None,
help='Updates the selected package')
@CommandArgument(
'--all-packages', '-a', action='store_true',
help='Updates all packages. NOTE! This is very likely to break your '
'working copy, making it impossible to build servo. Only do '
'this if you really know what you are doing.')
@CommandArgument(
'--dry-run', '-d', action='store_true',
help='Show outdated packages.')
"--all-packages",
"-a",
action="store_true",
help="Updates all packages. NOTE! This is very likely to break your "
"working copy, making it impossible to build servo. Only do "
"this if you really know what you are doing.",
)
@CommandArgument("--dry-run", "-d", action="store_true", help="Show outdated packages.")
def update_cargo(self, params=None, package=None, all_packages=None, dry_run=None):
if not params:
params = []
@ -97,12 +84,8 @@ class MachCommands(CommandBase):
with cd(self.context.topdir):
call(["cargo", "update"] + params, env=self.build_env())
@Command('rustc',
description='Run the Rust compiler',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to rustc")
@Command("rustc", description="Run the Rust compiler", category="devenv")
@CommandArgument("params", default=None, nargs="...", help="Command-line arguments to be passed through to rustc")
def rustc(self, params):
if params is None:
params = []
@ -110,12 +93,10 @@ class MachCommands(CommandBase):
self.ensure_bootstrapped()
return call(["rustc"] + params, env=self.build_env())
@Command('cargo-fix',
description='Run "cargo fix"',
category='devenv')
@Command("cargo-fix", description='Run "cargo fix"', category="devenv")
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to cargo-fix")
"params", default=None, nargs="...", help="Command-line arguments to be passed through to cargo-fix"
)
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
def cargo_fix(self, params, **kwargs):
if not params:
@ -125,12 +106,8 @@ class MachCommands(CommandBase):
self.ensure_clobbered()
return self.run_cargo_build_like_command("fix", params, **kwargs)
@Command('clippy',
description='Run "cargo clippy"',
category='devenv')
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to clippy")
@Command("clippy", description='Run "cargo clippy"', category="devenv")
@CommandArgument("params", default=None, nargs="...", help="Command-line arguments to be passed through to clippy")
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
def cargo_clippy(self, params, **kwargs):
if not params:
@ -139,48 +116,42 @@ class MachCommands(CommandBase):
self.ensure_bootstrapped()
self.ensure_clobbered()
env = self.build_env()
env['RUSTC'] = 'rustc'
env["RUSTC"] = "rustc"
return self.run_cargo_build_like_command("clippy", params, env=env, **kwargs)
@Command('grep',
description='`git grep` for selected directories.',
category='devenv')
@Command("grep", description="`git grep` for selected directories.", category="devenv")
@CommandArgument(
'params', default=None, nargs='...',
help="Command-line arguments to be passed through to `git grep`")
"params", default=None, nargs="...", help="Command-line arguments to be passed through to `git grep`"
)
def grep(self, params):
if not params:
params = []
# get all directories under tests/
tests_dirs = listdir('tests')
tests_dirs = listdir("tests")
# Directories to be excluded under tests/
excluded_tests_dirs = ['wpt', 'jquery']
excluded_tests_dirs = ["wpt", "jquery"]
tests_dirs = filter(lambda dir: dir not in excluded_tests_dirs, tests_dirs)
# Set of directories in project root
root_dirs = ['components', 'ports', 'python', 'etc', 'resources']
root_dirs = ["components", "ports", "python", "etc", "resources"]
# Generate absolute paths for directories in tests/ and project-root/
tests_dirs_abs = [path.join(self.context.topdir, 'tests', s) for s in tests_dirs]
tests_dirs_abs = [path.join(self.context.topdir, "tests", s) for s in tests_dirs]
root_dirs_abs = [path.join(self.context.topdir, s) for s in root_dirs]
# Absolute paths for all directories to be considered
grep_paths = root_dirs_abs + tests_dirs_abs
return call(
["git"] + ["grep"] + params + ['--'] + grep_paths + [':(exclude)*.min.js', ':(exclude)*.min.css'],
env=self.build_env())
["git"] + ["grep"] + params + ["--"] + grep_paths + [":(exclude)*.min.js", ":(exclude)*.min.css"],
env=self.build_env(),
)
@Command('fetch',
description='Fetch Rust, Cargo and Cargo dependencies',
category='devenv')
@Command("fetch", description="Fetch Rust, Cargo and Cargo dependencies", category="devenv")
def fetch(self):
self.ensure_bootstrapped()
return call(["cargo", "fetch"], env=self.build_env())
@Command('ndk-stack',
description='Invoke the ndk-stack tool with the expected symbol paths',
category='devenv')
@CommandArgument('--release', action='store_true', help="Use release build symbols")
@CommandArgument('--target', action='store', default="armv7-linux-androideabi",
help="Build target")
@CommandArgument('logfile', action='store', help="Path to logcat output with crash report")
@Command("ndk-stack", description="Invoke the ndk-stack tool with the expected symbol paths", category="devenv")
@CommandArgument("--release", action="store_true", help="Use release build symbols")
@CommandArgument("--target", action="store", default="armv7-linux-androideabi", help="Build target")
@CommandArgument("logfile", action="store", help="Path to logcat output with crash report")
def stack(self, release, target, logfile):
if not path.isfile(logfile):
print(logfile + " doesn't exist")
@ -190,21 +161,13 @@ class MachCommands(CommandBase):
ndk_stack = path.join(env["ANDROID_NDK"], "ndk-stack")
self.setup_configuration_for_android_target(target)
sym_path = path.join(
"target",
target,
"release" if release else "debug",
"apk",
"obj",
"local",
self.config["android"]["lib"])
"target", target, "release" if release else "debug", "apk", "obj", "local", self.config["android"]["lib"]
)
print(subprocess.check_output([ndk_stack, "-sym", sym_path, "-dump", logfile]))
@Command('ndk-gdb',
description='Invoke ndk-gdb tool with the expected symbol paths',
category='devenv')
@CommandArgument('--release', action='store_true', help="Use release build symbols")
@CommandArgument('--target', action='store', default="armv7-linux-androideabi",
help="Build target")
@Command("ndk-gdb", description="Invoke ndk-gdb tool with the expected symbol paths", category="devenv")
@CommandArgument("--release", action="store_true", help="Use release build symbols")
@CommandArgument("--target", action="store", default="armv7-linux-androideabi", help="Build target")
def ndk_gdb(self, release, target):
env = self.build_env()
ndk_gdb = path.join(env["ANDROID_NDK"], "ndk-gdb")
@ -218,7 +181,7 @@ class MachCommands(CommandBase):
"apk",
"obj",
"local",
self.config["android"]["lib"]
self.config["android"]["lib"],
),
path.join(
getcwd(),
@ -227,27 +190,38 @@ class MachCommands(CommandBase):
"release" if release else "debug",
"apk",
"libs",
self.config["android"]["lib"]
self.config["android"]["lib"],
),
]
env["NDK_PROJECT_PATH"] = path.join(getcwd(), "support", "android", "apk")
signal.signal(signal.SIGINT, signal.SIG_IGN)
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write('\n'.join([
f.write(
"\n".join(
[
"python",
"param = gdb.parameter('solib-search-path')",
"param += ':{}'".format(':'.join(sym_paths)),
"param += ':{}'".format(":".join(sym_paths)),
"gdb.execute('set solib-search-path ' + param)",
"end",
]))
]
)
)
p = subprocess.Popen([
p = subprocess.Popen(
[
ndk_gdb,
"--adb", adb_path,
"--project", "support/android/apk/servoapp/src/main/",
"--launch", "org.servo.servoshell.MainActivity",
"-x", f.name,
"--adb",
adb_path,
"--project",
"support/android/apk/servoapp/src/main/",
"--launch",
"org.servo.servoshell.MainActivity",
"-x",
f.name,
"--verbose",
], env=env)
],
env=env,
)
return p.wait()

View file

@ -51,10 +51,21 @@ class DevtoolsTests(unittest.IsolatedAsyncioTestCase):
def test_sources_list(self):
self.start_web_server(test_dir=os.path.join(DevtoolsTests.script_path, "devtools_tests/sources"))
self.run_servoshell()
self.assert_sources_list(2, set([
tuple([f"{self.base_url}/classic.js", f"{self.base_url}/test.html", "https://servo.org/js/load-table.js"]),
self.assert_sources_list(
2,
set(
[
tuple(
[
f"{self.base_url}/classic.js",
f"{self.base_url}/test.html",
"https://servo.org/js/load-table.js",
]
),
tuple([f"{self.base_url}/worker.js"]),
]))
]
),
)
def test_sources_list_with_data_no_scripts(self):
self.run_servoshell(url="data:text/html,")
@ -70,7 +81,7 @@ class DevtoolsTests(unittest.IsolatedAsyncioTestCase):
def test_sources_list_with_data_external_classic_script(self):
self.start_web_server(test_dir=os.path.join(DevtoolsTests.script_path, "devtools_tests/sources"))
self.run_servoshell(url=f"data:text/html,<script src=\"{self.base_url}/classic.js\"></script>")
self.run_servoshell(url=f'data:text/html,<script src="{self.base_url}/classic.js"></script>')
self.assert_sources_list(1, set([tuple([f"{self.base_url}/classic.js"])]))
def test_sources_list_with_data_empty_inline_module_script(self):
@ -158,7 +169,9 @@ class DevtoolsTests(unittest.IsolatedAsyncioTestCase):
done.set_result(e)
client.add_event_listener(
watcher.actor_id, Events.Watcher.TARGET_AVAILABLE_FORM, on_target,
watcher.actor_id,
Events.Watcher.TARGET_AVAILABLE_FORM,
on_target,
)
watcher.watch_targets(WatcherActor.Targets.FRAME)
watcher.watch_targets(WatcherActor.Targets.WORKER)

View file

@ -15,7 +15,7 @@ from typing import Set
# This file is called as a script from components/servo/build.rs, so
# we need to explicitly modify the search path here.
sys.path[0:0] = [os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))]
sys.path[0:0] = [os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))]
from servo.platform.build_target import BuildTarget # noqa: E402
GSTREAMER_BASE_LIBS = [
@ -158,18 +158,12 @@ def windows_dlls():
def windows_plugins():
libs = [
*GSTREAMER_PLUGIN_LIBS,
*GSTREAMER_WIN_PLUGIN_LIBS
]
libs = [*GSTREAMER_PLUGIN_LIBS, *GSTREAMER_WIN_PLUGIN_LIBS]
return [f"{lib}.dll" for lib in libs]
def macos_plugins():
plugins = [
*GSTREAMER_PLUGIN_LIBS,
*GSTREAMER_MAC_PLUGIN_LIBS
]
plugins = [*GSTREAMER_PLUGIN_LIBS, *GSTREAMER_MAC_PLUGIN_LIBS]
return [f"lib{plugin}.dylib" for plugin in plugins]
@ -178,22 +172,23 @@ def write_plugin_list(target):
plugins = []
if "apple-" in target:
plugins = macos_plugins()
elif '-windows-' in target:
elif "-windows-" in target:
plugins = windows_plugins()
print('''/* This is a generated file. Do not modify. */
print(
"""/* This is a generated file. Do not modify. */
pub(crate) static GSTREAMER_PLUGINS: &[&str] = &[
%s
];
''' % ',\n'.join(map(lambda x: '"' + x + '"', plugins)))
"""
% ",\n".join(map(lambda x: '"' + x + '"', plugins))
)
def is_macos_system_library(library_path: str) -> bool:
"""Returns true if if the given dependency line from otool refers to
a system library that should not be packaged."""
return (library_path.startswith("/System/Library")
or library_path.startswith("/usr/lib")
or ".asan." in library_path)
return library_path.startswith("/System/Library") or library_path.startswith("/usr/lib") or ".asan." in library_path
def rewrite_dependencies_to_be_relative(binary: str, dependency_lines: Set[str], relative_path: str):
@ -205,7 +200,7 @@ def rewrite_dependencies_to_be_relative(binary: str, dependency_lines: Set[str],
continue
new_path = os.path.join("@executable_path", relative_path, os.path.basename(dependency_line))
arguments = ['install_name_tool', '-change', dependency_line, new_path, binary]
arguments = ["install_name_tool", "-change", dependency_line, new_path, binary]
try:
subprocess.check_call(arguments)
except subprocess.CalledProcessError as exception:
@ -220,7 +215,7 @@ def make_rpath_path_absolute(dylib_path_from_otool: str, rpath: str):
# Not every dependency is in the same directory as the binary that is references. For
# instance, plugins dylibs can be found in "gstreamer-1.0".
path_relative_to_rpath = dylib_path_from_otool.replace('@rpath/', '')
path_relative_to_rpath = dylib_path_from_otool.replace("@rpath/", "")
for relative_directory in ["", "..", "gstreamer-1.0"]:
full_path = os.path.join(rpath, relative_directory, path_relative_to_rpath)
if os.path.exists(full_path):
@ -232,13 +227,13 @@ def make_rpath_path_absolute(dylib_path_from_otool: str, rpath: str):
def find_non_system_dependencies_with_otool(binary_path: str) -> Set[str]:
"""Given a binary path, find all dylib dependency lines that do not refer to
system libraries."""
process = subprocess.Popen(['/usr/bin/otool', '-L', binary_path], stdout=subprocess.PIPE)
process = subprocess.Popen(["/usr/bin/otool", "-L", binary_path], stdout=subprocess.PIPE)
output = set()
for line in map(lambda line: line.decode('utf8'), process.stdout):
for line in map(lambda line: line.decode("utf8"), process.stdout):
if not line.startswith("\t"):
continue
dependency = line.split(' ', 1)[0][1:]
dependency = line.split(" ", 1)[0][1:]
# No need to do any processing for system libraries. They should be
# present on all macOS systems.
@ -288,8 +283,7 @@ def package_gstreamer_dylibs(binary_path: str, library_target_directory: str, ta
# which are loaded dynmically at runtime and don't appear in `otool` output.
binary_dependencies = set(find_non_system_dependencies_with_otool(binary_path))
binary_dependencies.update(
[os.path.join(gstreamer_root_libs, "gstreamer-1.0", plugin)
for plugin in macos_plugins()]
[os.path.join(gstreamer_root_libs, "gstreamer-1.0", plugin) for plugin in macos_plugins()]
)
rewrite_dependencies_to_be_relative(binary_path, binary_dependencies, relative_path)

View file

@ -15,12 +15,7 @@ import test
import logging
import random
test_summary = {
test.Status.KILLED: 0,
test.Status.SURVIVED: 0,
test.Status.SKIPPED: 0,
test.Status.UNEXPECTED: 0
}
test_summary = {test.Status.KILLED: 0, test.Status.SURVIVED: 0, test.Status.SKIPPED: 0, test.Status.UNEXPECTED: 0}
def get_folders_list(path):
@ -33,7 +28,7 @@ def get_folders_list(path):
def mutation_test_for(mutation_path):
test_mapping_file = join(mutation_path, 'test_mapping.json')
test_mapping_file = join(mutation_path, "test_mapping.json")
if isfile(test_mapping_file):
json_data = open(test_mapping_file).read()
test_mapping = json.loads(json_data)
@ -41,7 +36,7 @@ def mutation_test_for(mutation_path):
source_files = list(test_mapping.keys())
random.shuffle(source_files)
for src_file in source_files:
status = test.mutation_test(join(mutation_path, src_file.encode('utf-8')), test_mapping[src_file])
status = test.mutation_test(join(mutation_path, src_file.encode("utf-8")), test_mapping[src_file])
test_summary[status] += 1
# Run mutation test in all folder in the path.
for folder in get_folders_list(mutation_path):

View file

@ -39,7 +39,7 @@ class Strategy:
def mutate(self, file_name):
line_numbers = []
for line in fileinput.input(file_name):
if not is_comment(line) and re.search(self._replace_strategy['regex'], line):
if not is_comment(line) and re.search(self._replace_strategy["regex"], line):
line_numbers.append(fileinput.lineno())
if len(line_numbers) == 0:
return -1
@ -47,7 +47,7 @@ class Strategy:
mutation_line_number = line_numbers[random.randint(0, len(line_numbers) - 1)]
for line in fileinput.input(file_name, inplace=True):
if fileinput.lineno() == mutation_line_number:
line = re.sub(self._replace_strategy['regex'], self._replace_strategy['replaceString'], line)
line = re.sub(self._replace_strategy["regex"], self._replace_strategy["replaceString"], line)
print(line.rstrip())
return mutation_line_number
@ -56,30 +56,21 @@ class AndOr(Strategy):
def __init__(self):
Strategy.__init__(self)
logical_and = r"(?<=\s)&&(?=\s)"
self._replace_strategy = {
'regex': logical_and,
'replaceString': '||'
}
self._replace_strategy = {"regex": logical_and, "replaceString": "||"}
class IfTrue(Strategy):
def __init__(self):
Strategy.__init__(self)
if_condition = r"(?<=if\s)\s*(?!let\s)(.*)(?=\s\{)"
self._replace_strategy = {
'regex': if_condition,
'replaceString': 'true'
}
self._replace_strategy = {"regex": if_condition, "replaceString": "true"}
class IfFalse(Strategy):
def __init__(self):
Strategy.__init__(self)
if_condition = r"(?<=if\s)\s*(?!let\s)(.*)(?=\s\{)"
self._replace_strategy = {
'regex': if_condition,
'replaceString': 'false'
}
self._replace_strategy = {"regex": if_condition, "replaceString": "false"}
class ModifyComparision(Strategy):
@ -87,10 +78,7 @@ class ModifyComparision(Strategy):
Strategy.__init__(self)
less_than_equals = r"(?<=\s)(\<)\=(?=\s)"
greater_than_equals = r"(?<=\s)(\<)\=(?=\s)"
self._replace_strategy = {
'regex': (less_than_equals + '|' + greater_than_equals),
'replaceString': r"\1"
}
self._replace_strategy = {"regex": (less_than_equals + "|" + greater_than_equals), "replaceString": r"\1"}
class MinusToPlus(Strategy):
@ -98,10 +86,7 @@ class MinusToPlus(Strategy):
Strategy.__init__(self)
arithmetic_minus = r"(?<=\s)\-(?=\s.+)"
minus_in_shorthand = r"(?<=\s)\-(?=\=)"
self._replace_strategy = {
'regex': (arithmetic_minus + '|' + minus_in_shorthand),
'replaceString': '+'
}
self._replace_strategy = {"regex": (arithmetic_minus + "|" + minus_in_shorthand), "replaceString": "+"}
class PlusToMinus(Strategy):
@ -109,20 +94,14 @@ class PlusToMinus(Strategy):
Strategy.__init__(self)
arithmetic_plus = r"(?<=[^\"]\s)\+(?=\s[^A-Z\'?\":\{]+)"
plus_in_shorthand = r"(?<=\s)\+(?=\=)"
self._replace_strategy = {
'regex': (arithmetic_plus + '|' + plus_in_shorthand),
'replaceString': '-'
}
self._replace_strategy = {"regex": (arithmetic_plus + "|" + plus_in_shorthand), "replaceString": "-"}
class AtomicString(Strategy):
def __init__(self):
Strategy.__init__(self)
string_literal = r"(?<=\").+(?=\")"
self._replace_strategy = {
'regex': string_literal,
'replaceString': ' '
}
self._replace_strategy = {"regex": string_literal, "replaceString": " "}
class DuplicateLine(Strategy):
@ -136,9 +115,20 @@ class DuplicateLine(Strategy):
plus_equals_statement = r".+?\s\+\=\s.*"
minus_equals_statement = r".+?\s\-\=\s.*"
self._replace_strategy = {
'regex': (append_statement + '|' + remove_statement + '|' + push_statement
+ '|' + pop_statement + '|' + plus_equals_statement + '|' + minus_equals_statement),
'replaceString': r"\g<0>\n\g<0>",
"regex": (
append_statement
+ "|"
+ remove_statement
+ "|"
+ push_statement
+ "|"
+ pop_statement
+ "|"
+ plus_equals_statement
+ "|"
+ minus_equals_statement
),
"replaceString": r"\g<0>\n\g<0>",
}
@ -161,14 +151,17 @@ class DeleteIfBlock(Strategy):
while line_to_mutate <= len(code_lines):
current_line = code_lines[line_to_mutate - 1]
next_line = code_lines[line_to_mutate]
if re.search(self.else_block, current_line) is not None \
or re.search(self.else_block, next_line) is not None:
if (
re.search(self.else_block, current_line) is not None
or re.search(self.else_block, next_line) is not None
):
if_blocks.pop(random_index)
if len(if_blocks) == 0:
return -1
else:
random_index, start_counter, end_counter, lines_to_delete, line_to_mutate = \
init_variables(if_blocks)
random_index, start_counter, end_counter, lines_to_delete, line_to_mutate = init_variables(
if_blocks
)
continue
lines_to_delete.append(line_to_mutate)
for ch in current_line:
@ -183,8 +176,17 @@ class DeleteIfBlock(Strategy):
def get_strategies():
return AndOr, IfTrue, IfFalse, ModifyComparision, PlusToMinus, MinusToPlus, \
AtomicString, DuplicateLine, DeleteIfBlock
return (
AndOr,
IfTrue,
IfFalse,
ModifyComparision,
PlusToMinus,
MinusToPlus,
AtomicString,
DuplicateLine,
DeleteIfBlock,
)
class Mutator:

View file

@ -14,7 +14,8 @@ import logging
from mutator import Mutator, get_strategies
from enum import Enum
DEVNULL = open(os.devnull, 'wb')
DEVNULL = open(os.devnull, "wb")
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s", level=logging.DEBUG)
@ -28,7 +29,7 @@ class Status(Enum):
def mutation_test(file_name, tests):
status = Status.UNEXPECTED
local_changes_present = subprocess.call('git diff --quiet {0}'.format(file_name), shell=True)
local_changes_present = subprocess.call("git diff --quiet {0}".format(file_name), shell=True)
if local_changes_present == 1:
status = Status.SKIPPED
logging.warning("{0} has local changes, please commit/remove changes before running the test".format(file_name))
@ -46,24 +47,24 @@ def mutation_test(file_name, tests):
if subprocess.call(test_command, shell=True, stdout=DEVNULL):
logging.error("Compilation Failed: Unexpected error")
logging.error("Failed: while running `{0}`".format(test_command))
subprocess.call('git --no-pager diff {0}'.format(file_name), shell=True)
subprocess.call("git --no-pager diff {0}".format(file_name), shell=True)
status = Status.UNEXPECTED
else:
for test in tests:
test_command = "python mach test-wpt {0} --release".format(test.encode('utf-8'))
test_command = "python mach test-wpt {0} --release".format(test.encode("utf-8"))
logging.info("running `{0}` test for mutant {1}:{2}".format(test, file_name, mutated_line))
test_status = subprocess.call(test_command, shell=True, stdout=DEVNULL)
if test_status != 0:
logging.error("Failed: while running `{0}`".format(test_command))
logging.error("mutated file {0} diff".format(file_name))
subprocess.call('git --no-pager diff {0}'.format(file_name), shell=True)
subprocess.call("git --no-pager diff {0}".format(file_name), shell=True)
status = Status.SURVIVED
else:
logging.info("Success: Mutation killed by {0}".format(test.encode('utf-8')))
logging.info("Success: Mutation killed by {0}".format(test.encode("utf-8")))
status = Status.KILLED
break
logging.info("reverting mutant {0}:{1}\n".format(file_name, mutated_line))
subprocess.call('git checkout {0}'.format(file_name), shell=True)
subprocess.call("git checkout {0}".format(file_name), shell=True)
break
elif not len(strategies):
# All strategies are tried

View file

@ -42,23 +42,25 @@ from servo.command_base import (
from servo.util import delete, get_target_dir
PACKAGES = {
'android': [
'android/aarch64-linux-android/release/servoapp.apk',
'android/aarch64-linux-android/release/servoview.aar',
"android": [
"android/aarch64-linux-android/release/servoapp.apk",
"android/aarch64-linux-android/release/servoview.aar",
],
'linux': [
'production/servo-tech-demo.tar.gz',
"linux": [
"production/servo-tech-demo.tar.gz",
],
'mac': [
'production/servo-tech-demo.dmg',
"mac": [
"production/servo-tech-demo.dmg",
],
'windows-msvc': [
r'production\msi\Servo.exe',
r'production\msi\Servo.zip',
"windows-msvc": [
r"production\msi\Servo.exe",
r"production\msi\Servo.zip",
],
'ohos': [
('openharmony/aarch64-unknown-linux-ohos/release/entry/build/'
'default/outputs/default/servoshell-default-signed.hap')
"ohos": [
(
"openharmony/aarch64-unknown-linux-ohos/release/entry/build/"
"default/outputs/default/servoshell-default-signed.hap"
)
],
}
@ -71,8 +73,7 @@ def packages_for_platform(platform):
def listfiles(directory):
return [f for f in os.listdir(directory)
if path.isfile(path.join(directory, f))]
return [f for f in os.listdir(directory) if path.isfile(path.join(directory, f))]
def copy_windows_dependencies(binary_path, destination):
@ -101,20 +102,10 @@ def check_call_with_randomized_backoff(args: List[str], retries: int) -> int:
@CommandProvider
class PackageCommands(CommandBase):
@Command('package',
description='Package Servo',
category='package')
@CommandArgument('--android',
default=None,
action='store_true',
help='Package Android')
@CommandArgument('--ohos',
default=None,
action='store_true',
help='Package OpenHarmony')
@CommandArgument('--target', '-t',
default=None,
help='Package for given target platform')
@Command("package", description="Package Servo", category="package")
@CommandArgument("--android", default=None, action="store_true", help="Package Android")
@CommandArgument("--ohos", default=None, action="store_true", help="Package OpenHarmony")
@CommandArgument("--target", "-t", default=None, help="Package for given target platform")
@CommandBase.common_command_arguments(build_configuration=False, build_type=True, package_configuration=True)
@CommandBase.allow_target_configuration
def package(self, build_type: BuildType, flavor=None, with_asan=False):
@ -146,11 +137,11 @@ class PackageCommands(CommandBase):
if flavor is not None:
flavor_name = flavor.title()
dir_to_resources = path.join(self.get_top_dir(), 'target', 'android', 'resources')
dir_to_resources = path.join(self.get_top_dir(), "target", "android", "resources")
if path.exists(dir_to_resources):
delete(dir_to_resources)
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
shutil.copytree(path.join(dir_to_root, "resources"), dir_to_resources)
variant = ":assemble" + flavor_name + arch_string + build_type_string
apk_task_name = ":servoapp" + variant
@ -167,8 +158,7 @@ class PackageCommands(CommandBase):
# so copy the source files into the target/openharmony directory first.
ohos_app_dir = path.join(self.get_top_dir(), "support", "openharmony")
build_mode = build_type.directory_name()
ohos_target_dir = path.join(
self.get_top_dir(), "target", "openharmony", self.target.triple(), build_mode)
ohos_target_dir = path.join(self.get_top_dir(), "target", "openharmony", self.target.triple(), build_mode)
if path.exists(ohos_target_dir):
print("Cleaning up from previous packaging")
delete(ohos_target_dir)
@ -186,9 +176,14 @@ class PackageCommands(CommandBase):
if flavor is not None:
flavor_name = flavor
hvigor_command = ["--no-daemon", "assembleHap",
"-p", f"product={flavor_name}",
"-p", f"buildMode={build_mode}"]
hvigor_command = [
"--no-daemon",
"assembleHap",
"-p",
f"product={flavor_name}",
"-p",
f"buildMode={build_mode}",
]
# Detect if PATH already has hvigor, or else fallback to npm installation
# provided via HVIGOR_PATH
if "HVIGOR_PATH" not in env:
@ -198,9 +193,11 @@ class PackageCommands(CommandBase):
print(f"Found `hvigorw` with version {str(version, 'utf-8').strip()} in system PATH")
hvigor_command[0:0] = ["hvigorw"]
except FileNotFoundError:
print("Unable to find `hvigor` tool. Please either modify PATH to include the"
print(
"Unable to find `hvigor` tool. Please either modify PATH to include the"
"path to hvigorw or set the HVIGOR_PATH environment variable to the npm"
"installation containing `node_modules` directory with hvigor modules.")
"installation containing `node_modules` directory with hvigor modules."
)
sys.exit(1)
except subprocess.CalledProcessError as e:
print(f"hvigor exited with the following error: {e}")
@ -227,21 +224,21 @@ class PackageCommands(CommandBase):
except subprocess.CalledProcessError as e:
print("Packaging OpenHarmony exited with return value %d" % e.returncode)
return e.returncode
elif 'darwin' in self.target.triple():
elif "darwin" in self.target.triple():
print("Creating Servo.app")
dir_to_dmg = path.join(target_dir, 'dmg')
dir_to_app = path.join(dir_to_dmg, 'Servo.app')
dir_to_resources = path.join(dir_to_app, 'Contents', 'Resources')
dir_to_dmg = path.join(target_dir, "dmg")
dir_to_app = path.join(dir_to_dmg, "Servo.app")
dir_to_resources = path.join(dir_to_app, "Contents", "Resources")
if path.exists(dir_to_dmg):
print("Cleaning up from previous packaging")
delete(dir_to_dmg)
print("Copying files")
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
shutil.copy2(path.join(dir_to_root, 'Info.plist'), path.join(dir_to_app, 'Contents', 'Info.plist'))
shutil.copytree(path.join(dir_to_root, "resources"), dir_to_resources)
shutil.copy2(path.join(dir_to_root, "Info.plist"), path.join(dir_to_app, "Contents", "Info.plist"))
content_dir = path.join(dir_to_app, 'Contents', 'MacOS')
lib_dir = path.join(content_dir, 'lib')
content_dir = path.join(dir_to_app, "Contents", "MacOS")
lib_dir = path.join(content_dir, "lib")
os.makedirs(lib_dir)
shutil.copy2(binary_path, content_dir)
@ -250,19 +247,19 @@ class PackageCommands(CommandBase):
servo.gstreamer.package_gstreamer_dylibs(dmg_binary, lib_dir, self.target)
print("Adding version to Credits.rtf")
version_command = [binary_path, '--version']
p = subprocess.Popen(version_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
version_command = [binary_path, "--version"]
p = subprocess.Popen(
version_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
)
version, stderr = p.communicate()
if p.returncode != 0:
raise Exception("Error occurred when getting Servo version: " + stderr)
version = "Nightly version: " + version
import mako.template
template_path = path.join(dir_to_resources, 'Credits.rtf.mako')
credits_path = path.join(dir_to_resources, 'Credits.rtf')
template_path = path.join(dir_to_resources, "Credits.rtf.mako")
credits_path = path.join(dir_to_resources, "Credits.rtf")
with open(template_path) as template_file:
template = mako.template.Template(template_file.read())
with open(credits_path, "w") as credits_file:
@ -270,7 +267,7 @@ class PackageCommands(CommandBase):
delete(template_path)
print("Creating dmg")
os.symlink('/Applications', path.join(dir_to_dmg, 'Applications'))
os.symlink("/Applications", path.join(dir_to_dmg, "Applications"))
dmg_path = path.join(target_dir, "servo-tech-demo.dmg")
if path.exists(dmg_path):
@ -282,10 +279,9 @@ class PackageCommands(CommandBase):
# after a random wait.
try:
check_call_with_randomized_backoff(
['hdiutil', 'create', '-volname', 'Servo',
'-megabytes', '900', dmg_path,
'-srcfolder', dir_to_dmg],
retries=3)
["hdiutil", "create", "-volname", "Servo", "-megabytes", "900", dmg_path, "-srcfolder", dir_to_dmg],
retries=3,
)
except subprocess.CalledProcessError as e:
print("Packaging MacOS dmg exited with return value %d" % e.returncode)
return e.returncode
@ -294,42 +290,42 @@ class PackageCommands(CommandBase):
delete(dir_to_dmg)
print("Packaged Servo into " + dmg_path)
elif 'windows' in self.target.triple():
dir_to_msi = path.join(target_dir, 'msi')
elif "windows" in self.target.triple():
dir_to_msi = path.join(target_dir, "msi")
if path.exists(dir_to_msi):
print("Cleaning up from previous packaging")
delete(dir_to_msi)
os.makedirs(dir_to_msi)
print("Copying files")
dir_to_temp = path.join(dir_to_msi, 'temp')
dir_to_resources = path.join(dir_to_temp, 'resources')
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
dir_to_temp = path.join(dir_to_msi, "temp")
dir_to_resources = path.join(dir_to_temp, "resources")
shutil.copytree(path.join(dir_to_root, "resources"), dir_to_resources)
shutil.copy(binary_path, dir_to_temp)
copy_windows_dependencies(target_dir, dir_to_temp)
# generate Servo.wxs
import mako.template
template_path = path.join(dir_to_root, "support", "windows", "Servo.wxs.mako")
template = mako.template.Template(open(template_path).read())
wxs_path = path.join(dir_to_msi, "Installer.wxs")
open(wxs_path, "w").write(template.render(
exe_path=target_dir,
dir_to_temp=dir_to_temp,
resources_path=dir_to_resources))
open(wxs_path, "w").write(
template.render(exe_path=target_dir, dir_to_temp=dir_to_temp, resources_path=dir_to_resources)
)
# run candle and light
print("Creating MSI")
try:
with cd(dir_to_msi):
subprocess.check_call(['candle', wxs_path])
subprocess.check_call(["candle", wxs_path])
except subprocess.CalledProcessError as e:
print("WiX candle exited with return value %d" % e.returncode)
return e.returncode
try:
wxsobj_path = "{}.wixobj".format(path.splitext(wxs_path)[0])
with cd(dir_to_msi):
subprocess.check_call(['light', wxsobj_path])
subprocess.check_call(["light", wxsobj_path])
except subprocess.CalledProcessError as e:
print("WiX light exited with return value %d" % e.returncode)
return e.returncode
@ -338,18 +334,18 @@ class PackageCommands(CommandBase):
# Generate bundle with Servo installer.
print("Creating bundle")
shutil.copy(path.join(dir_to_root, 'support', 'windows', 'Servo.wxs'), dir_to_msi)
bundle_wxs_path = path.join(dir_to_msi, 'Servo.wxs')
shutil.copy(path.join(dir_to_root, "support", "windows", "Servo.wxs"), dir_to_msi)
bundle_wxs_path = path.join(dir_to_msi, "Servo.wxs")
try:
with cd(dir_to_msi):
subprocess.check_call(['candle', bundle_wxs_path, '-ext', 'WixBalExtension'])
subprocess.check_call(["candle", bundle_wxs_path, "-ext", "WixBalExtension"])
except subprocess.CalledProcessError as e:
print("WiX candle exited with return value %d" % e.returncode)
return e.returncode
try:
wxsobj_path = "{}.wixobj".format(path.splitext(bundle_wxs_path)[0])
with cd(dir_to_msi):
subprocess.check_call(['light', wxsobj_path, '-ext', 'WixBalExtension'])
subprocess.check_call(["light", wxsobj_path, "-ext", "WixBalExtension"])
except subprocess.CalledProcessError as e:
print("WiX light exited with return value %d" % e.returncode)
return e.returncode
@ -357,51 +353,39 @@ class PackageCommands(CommandBase):
print("Creating ZIP")
zip_path = path.join(dir_to_msi, "Servo.zip")
archive_deterministically(dir_to_temp, zip_path, prepend_path='servo/')
archive_deterministically(dir_to_temp, zip_path, prepend_path="servo/")
print("Packaged Servo into " + zip_path)
print("Cleaning up")
delete(dir_to_temp)
delete(dir_to_installer)
else:
dir_to_temp = path.join(target_dir, 'packaging-temp')
dir_to_temp = path.join(target_dir, "packaging-temp")
if path.exists(dir_to_temp):
# TODO(aneeshusa): lock dir_to_temp to prevent simultaneous builds
print("Cleaning up from previous packaging")
delete(dir_to_temp)
print("Copying files")
dir_to_resources = path.join(dir_to_temp, 'resources')
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
dir_to_resources = path.join(dir_to_temp, "resources")
shutil.copytree(path.join(dir_to_root, "resources"), dir_to_resources)
shutil.copy(binary_path, dir_to_temp)
print("Creating tarball")
tar_path = path.join(target_dir, 'servo-tech-demo.tar.gz')
tar_path = path.join(target_dir, "servo-tech-demo.tar.gz")
archive_deterministically(dir_to_temp, tar_path, prepend_path='servo/')
archive_deterministically(dir_to_temp, tar_path, prepend_path="servo/")
print("Cleaning up")
delete(dir_to_temp)
print("Packaged Servo into " + tar_path)
@Command('install',
description='Install Servo (currently, Android and Windows only)',
category='package')
@CommandArgument('--android',
action='store_true',
help='Install on Android')
@CommandArgument('--ohos',
action='store_true',
help='Install on OpenHarmony')
@CommandArgument('--emulator',
action='store_true',
help='For Android, install to the only emulated device')
@CommandArgument('--usb',
action='store_true',
help='For Android, install to the only USB device')
@CommandArgument('--target', '-t',
default=None,
help='Install the given target platform')
@Command("install", description="Install Servo (currently, Android and Windows only)", category="package")
@CommandArgument("--android", action="store_true", help="Install on Android")
@CommandArgument("--ohos", action="store_true", help="Install on OpenHarmony")
@CommandArgument("--emulator", action="store_true", help="For Android, install to the only emulated device")
@CommandArgument("--usb", action="store_true", help="For Android, install to the only USB device")
@CommandArgument("--target", "-t", default=None, help="Install the given target platform")
@CommandBase.common_command_arguments(build_configuration=False, build_type=True, package_configuration=True)
@CommandBase.allow_target_configuration
def install(self, build_type: BuildType, emulator=False, usb=False, with_asan=False, flavor=None):
@ -410,9 +394,7 @@ class PackageCommands(CommandBase):
binary_path = self.get_binary_path(build_type, asan=with_asan)
except BuildNotFound:
print("Servo build not found. Building servo...")
result = Registrar.dispatch(
"build", context=self.context, build_type=build_type, flavor=flavor
)
result = Registrar.dispatch("build", context=self.context, build_type=build_type, flavor=flavor)
if result:
return result
try:
@ -437,33 +419,26 @@ class PackageCommands(CommandBase):
hdc_path = path.join(env["OHOS_SDK_NATIVE"], "../", "toolchains", "hdc")
exec_command = [hdc_path, "install", "-r", pkg_path]
elif is_windows():
pkg_path = path.join(path.dirname(binary_path), 'msi', 'Servo.msi')
pkg_path = path.join(path.dirname(binary_path), "msi", "Servo.msi")
exec_command = ["msiexec", "/i", pkg_path]
if not path.exists(pkg_path):
print("Servo package not found. Packaging servo...")
result = Registrar.dispatch(
"package", context=self.context, build_type=build_type, flavor=flavor
)
result = Registrar.dispatch("package", context=self.context, build_type=build_type, flavor=flavor)
if result != 0:
return result
print(" ".join(exec_command))
return subprocess.call(exec_command, env=env)
@Command('upload-nightly',
description='Upload Servo nightly to S3',
category='package')
@CommandArgument('platform',
choices=PACKAGES.keys(),
help='Package platform type to upload')
@CommandArgument('--secret-from-environment',
action='store_true',
help='Retrieve the appropriate secrets from the environment.')
@CommandArgument('--github-release-id',
default=None,
type=int,
help='The github release to upload the nightly builds.')
@Command("upload-nightly", description="Upload Servo nightly to S3", category="package")
@CommandArgument("platform", choices=PACKAGES.keys(), help="Package platform type to upload")
@CommandArgument(
"--secret-from-environment", action="store_true", help="Retrieve the appropriate secrets from the environment."
)
@CommandArgument(
"--github-release-id", default=None, type=int, help="The github release to upload the nightly builds."
)
def upload_nightly(self, platform, secret_from_environment, github_release_id):
import boto3
@ -471,69 +446,62 @@ class PackageCommands(CommandBase):
aws_access_key = None
aws_secret_access_key = None
if secret_from_environment:
secret = json.loads(os.environ['S3_UPLOAD_CREDENTIALS'])
secret = json.loads(os.environ["S3_UPLOAD_CREDENTIALS"])
aws_access_key = secret["aws_access_key_id"]
aws_secret_access_key = secret["aws_secret_access_key"]
return (aws_access_key, aws_secret_access_key)
def nightly_filename(package, timestamp):
return '{}-{}'.format(
timestamp.isoformat() + 'Z', # The `Z` denotes UTC
path.basename(package)
return "{}-{}".format(
timestamp.isoformat() + "Z", # The `Z` denotes UTC
path.basename(package),
)
def upload_to_github_release(platform, package, package_hash):
if not github_release_id:
return
extension = path.basename(package).partition('.')[2]
g = Github(os.environ['NIGHTLY_REPO_TOKEN'])
nightly_repo = g.get_repo(os.environ['NIGHTLY_REPO'])
extension = path.basename(package).partition(".")[2]
g = Github(os.environ["NIGHTLY_REPO_TOKEN"])
nightly_repo = g.get_repo(os.environ["NIGHTLY_REPO"])
release = nightly_repo.get_release(github_release_id)
package_hash_fileobj = io.BytesIO(package_hash.encode('utf-8'))
package_hash_fileobj = io.BytesIO(package_hash.encode("utf-8"))
asset_name = f'servo-latest.{extension}'
asset_name = f"servo-latest.{extension}"
release.upload_asset(package, name=asset_name)
release.upload_asset_from_memory(
package_hash_fileobj,
package_hash_fileobj.getbuffer().nbytes,
name=f'{asset_name}.sha256')
package_hash_fileobj, package_hash_fileobj.getbuffer().nbytes, name=f"{asset_name}.sha256"
)
def upload_to_s3(platform, package, package_hash, timestamp):
(aws_access_key, aws_secret_access_key) = get_s3_secret()
s3 = boto3.client(
's3',
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key
)
s3 = boto3.client("s3", aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_access_key)
cloudfront = boto3.client(
'cloudfront',
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_access_key
"cloudfront", aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_access_key
)
BUCKET = 'servo-builds2'
DISTRIBUTION_ID = 'EJ8ZWSJKFCJS2'
BUCKET = "servo-builds2"
DISTRIBUTION_ID = "EJ8ZWSJKFCJS2"
nightly_dir = f'nightly/{platform}'
nightly_dir = f"nightly/{platform}"
filename = nightly_filename(package, timestamp)
package_upload_key = '{}/{}'.format(nightly_dir, filename)
extension = path.basename(package).partition('.')[2]
latest_upload_key = '{}/servo-latest.{}'.format(nightly_dir, extension)
package_upload_key = "{}/{}".format(nightly_dir, filename)
extension = path.basename(package).partition(".")[2]
latest_upload_key = "{}/servo-latest.{}".format(nightly_dir, extension)
package_hash_fileobj = io.BytesIO(package_hash.encode('utf-8'))
latest_hash_upload_key = f'{latest_upload_key}.sha256'
package_hash_fileobj = io.BytesIO(package_hash.encode("utf-8"))
latest_hash_upload_key = f"{latest_upload_key}.sha256"
s3.upload_file(package, BUCKET, package_upload_key)
copy_source = {
'Bucket': BUCKET,
'Key': package_upload_key,
"Bucket": BUCKET,
"Key": package_upload_key,
}
s3.copy(copy_source, BUCKET, latest_upload_key)
s3.upload_fileobj(
package_hash_fileobj, BUCKET, latest_hash_upload_key, ExtraArgs={'ContentType': 'text/plain'}
package_hash_fileobj, BUCKET, latest_hash_upload_key, ExtraArgs={"ContentType": "text/plain"}
)
# Invalidate previous "latest" nightly files from
@ -541,14 +509,9 @@ class PackageCommands(CommandBase):
cloudfront.create_invalidation(
DistributionId=DISTRIBUTION_ID,
InvalidationBatch={
'CallerReference': f'{latest_upload_key}-{timestamp}',
'Paths': {
'Quantity': 1,
'Items': [
f'/{latest_upload_key}*'
]
}
}
"CallerReference": f"{latest_upload_key}-{timestamp}",
"Paths": {"Quantity": 1, "Items": [f"/{latest_upload_key}*"]},
},
)
timestamp = datetime.utcnow().replace(microsecond=0)
@ -556,16 +519,13 @@ class PackageCommands(CommandBase):
if path.isdir(package):
continue
if not path.isfile(package):
print("Could not find package for {} at {}".format(
platform,
package
), file=sys.stderr)
print("Could not find package for {} at {}".format(platform, package), file=sys.stderr)
return 1
# Compute the hash
SHA_BUF_SIZE = 1048576 # read in 1 MiB chunks
sha256_digest = hashlib.sha256()
with open(package, 'rb') as package_file:
with open(package, "rb") as package_file:
while True:
data = package_file.read(SHA_BUF_SIZE)
if not data:

View file

@ -64,11 +64,14 @@ def get():
__platform__ = Windows(triple)
elif "linux-gnu" in triple:
from .linux import Linux
__platform__ = Linux(triple)
elif "apple-darwin" in triple:
from .macos import MacOS
__platform__ = MacOS(triple)
else:
from .base import Base
__platform__ = Base(triple)
return __platform__

View file

@ -33,9 +33,7 @@ class Base:
raise NotImplementedError("Bootstrap installation detection not yet available.")
def _platform_bootstrap_gstreamer(self, _target: BuildTarget, _force: bool) -> bool:
raise NotImplementedError(
"GStreamer bootstrap support is not yet available for your OS."
)
raise NotImplementedError("GStreamer bootstrap support is not yet available for your OS.")
def is_gstreamer_installed(self, target: BuildTarget) -> bool:
gstreamer_root = self.gstreamer_root(target)
@ -92,8 +90,7 @@ class Base:
if force or not shutil.which("cargo-deny"):
return False
# Tidy needs at least version 0.18.1 installed.
result = subprocess.run(["cargo-deny", "--version"],
encoding='utf-8', capture_output=True)
result = subprocess.run(["cargo-deny", "--version"], encoding="utf-8", capture_output=True)
(major, minor, micro) = result.stdout.strip().split(" ")[1].split(".", 2)
return (int(major), int(minor), int(micro)) >= (0, 18, 1)

View file

@ -29,12 +29,12 @@ class BuildTarget(object):
self.target_triple = target_triple
@staticmethod
def from_triple(target_triple: Optional[str]) -> 'BuildTarget':
def from_triple(target_triple: Optional[str]) -> "BuildTarget":
host_triple = servo.platform.host_triple()
if target_triple:
if 'android' in target_triple:
if "android" in target_triple:
return AndroidTarget(target_triple)
elif 'ohos' in target_triple:
elif "ohos" in target_triple:
return OpenHarmonyTarget(target_triple)
elif target_triple != host_triple:
raise Exception(f"Unknown build target {target_triple}")
@ -129,16 +129,16 @@ class AndroidTarget(CrossBuildTarget):
android_toolchain_name = ndk_configuration["toolchain_name"]
android_lib = ndk_configuration["lib"]
android_api = android_platform.replace('android-', '')
android_api = android_platform.replace("android-", "")
# Check if the NDK version is 26
if not os.path.isfile(path.join(env["ANDROID_NDK_ROOT"], 'source.properties')):
if not os.path.isfile(path.join(env["ANDROID_NDK_ROOT"], "source.properties")):
print("ANDROID_NDK should have file `source.properties`.")
print("The environment variable ANDROID_NDK_ROOT may be set at a wrong path.")
sys.exit(1)
with open(path.join(env["ANDROID_NDK_ROOT"], 'source.properties'), encoding="utf8") as ndk_properties:
with open(path.join(env["ANDROID_NDK_ROOT"], "source.properties"), encoding="utf8") as ndk_properties:
lines = ndk_properties.readlines()
if lines[1].split(' = ')[1].split('.')[0] != '26':
if lines[1].split(" = ")[1].split(".")[0] != "26":
print("Servo currently only supports NDK r26c.")
sys.exit(1)
@ -149,7 +149,7 @@ class AndroidTarget(CrossBuildTarget):
if os_type not in ["linux", "darwin"]:
raise Exception("Android cross builds are only supported on Linux and macOS.")
llvm_prebuilt = path.join(env['ANDROID_NDK_ROOT'], "toolchains", "llvm", "prebuilt")
llvm_prebuilt = path.join(env["ANDROID_NDK_ROOT"], "toolchains", "llvm", "prebuilt")
cpu_type = platform.machine().lower()
host_suffix = "unknown"
@ -172,11 +172,11 @@ class AndroidTarget(CrossBuildTarget):
raise Exception("Can't determine LLVM prebuilt directory.")
host = os_type + "-" + host_suffix
host_cc = env.get('HOST_CC') or shutil.which("clang")
host_cxx = env.get('HOST_CXX') or shutil.which("clang++")
host_cc = env.get("HOST_CC") or shutil.which("clang")
host_cxx = env.get("HOST_CXX") or shutil.which("clang++")
llvm_toolchain = path.join(llvm_prebuilt, host)
env['PATH'] = (env['PATH'] + ':' + path.join(llvm_toolchain, "bin"))
env["PATH"] = env["PATH"] + ":" + path.join(llvm_toolchain, "bin")
def to_ndk_bin(prog):
return path.join(llvm_toolchain, "bin", prog)
@ -189,26 +189,26 @@ class AndroidTarget(CrossBuildTarget):
[to_ndk_bin(f"x86_64-linux-android{android_api}-clang"), "--print-libgcc-file-name"],
check=True,
capture_output=True,
encoding="utf8"
encoding="utf8",
).stdout
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "")
env["RUSTFLAGS"] = env.get("RUSTFLAGS", "")
env["RUSTFLAGS"] += f"-C link-arg={libclangrt_filename}"
env["RUST_TARGET"] = self.triple()
env['HOST_CC'] = host_cc
env['HOST_CXX'] = host_cxx
env['HOST_CFLAGS'] = ''
env['HOST_CXXFLAGS'] = ''
env['TARGET_CC'] = to_ndk_bin("clang")
env['TARGET_CPP'] = to_ndk_bin("clang") + " -E"
env['TARGET_CXX'] = to_ndk_bin("clang++")
env["HOST_CC"] = host_cc
env["HOST_CXX"] = host_cxx
env["HOST_CFLAGS"] = ""
env["HOST_CXXFLAGS"] = ""
env["TARGET_CC"] = to_ndk_bin("clang")
env["TARGET_CPP"] = to_ndk_bin("clang") + " -E"
env["TARGET_CXX"] = to_ndk_bin("clang++")
env['TARGET_AR'] = to_ndk_bin("llvm-ar")
env['TARGET_RANLIB'] = to_ndk_bin("llvm-ranlib")
env['TARGET_OBJCOPY'] = to_ndk_bin("llvm-objcopy")
env['TARGET_YASM'] = to_ndk_bin("yasm")
env['TARGET_STRIP'] = to_ndk_bin("llvm-strip")
env['RUST_FONTCONFIG_DLOPEN'] = "on"
env["TARGET_AR"] = to_ndk_bin("llvm-ar")
env["TARGET_RANLIB"] = to_ndk_bin("llvm-ranlib")
env["TARGET_OBJCOPY"] = to_ndk_bin("llvm-objcopy")
env["TARGET_YASM"] = to_ndk_bin("yasm")
env["TARGET_STRIP"] = to_ndk_bin("llvm-strip")
env["RUST_FONTCONFIG_DLOPEN"] = "on"
env["LIBCLANG_PATH"] = path.join(llvm_toolchain, "lib")
env["CLANG_PATH"] = to_ndk_bin("clang")
@ -224,11 +224,11 @@ class AndroidTarget(CrossBuildTarget):
#
# Also worth remembering: autoconf uses C for its configuration,
# even for C++ builds, so the C flags need to line up with the C++ flags.
env['TARGET_CFLAGS'] = "--target=" + android_toolchain_name
env['TARGET_CXXFLAGS'] = "--target=" + android_toolchain_name
env["TARGET_CFLAGS"] = "--target=" + android_toolchain_name
env["TARGET_CXXFLAGS"] = "--target=" + android_toolchain_name
# These two variables are needed for the mozjs compilation.
env['ANDROID_API_LEVEL'] = android_api
env["ANDROID_API_LEVEL"] = android_api
env["ANDROID_NDK_HOME"] = env["ANDROID_NDK_ROOT"]
# The two variables set below are passed by our custom
@ -236,15 +236,16 @@ class AndroidTarget(CrossBuildTarget):
env["ANDROID_ABI"] = android_lib
env["ANDROID_PLATFORM"] = android_platform
env["NDK_CMAKE_TOOLCHAIN_FILE"] = path.join(
env['ANDROID_NDK_ROOT'], "build", "cmake", "android.toolchain.cmake")
env["ANDROID_NDK_ROOT"], "build", "cmake", "android.toolchain.cmake"
)
env["CMAKE_TOOLCHAIN_FILE"] = path.join(topdir, "support", "android", "toolchain.cmake")
# Set output dir for gradle aar files
env["AAR_OUT_DIR"] = path.join(topdir, "target", "android", "aar")
if not os.path.exists(env['AAR_OUT_DIR']):
os.makedirs(env['AAR_OUT_DIR'])
if not os.path.exists(env["AAR_OUT_DIR"]):
os.makedirs(env["AAR_OUT_DIR"])
env['TARGET_PKG_CONFIG_SYSROOT_DIR'] = path.join(llvm_toolchain, 'sysroot')
env["TARGET_PKG_CONFIG_SYSROOT_DIR"] = path.join(llvm_toolchain, "sysroot")
def binary_name(self) -> str:
return "libservoshell.so"
@ -273,8 +274,10 @@ class OpenHarmonyTarget(CrossBuildTarget):
env["OHOS_SDK_NATIVE"] = config["ohos"]["ndk"]
if "OHOS_SDK_NATIVE" not in env:
print("Please set the OHOS_SDK_NATIVE environment variable to the location of the `native` directory "
"in the OpenHarmony SDK.")
print(
"Please set the OHOS_SDK_NATIVE environment variable to the location of the `native` directory "
"in the OpenHarmony SDK."
)
sys.exit(1)
ndk_root = pathlib.Path(env["OHOS_SDK_NATIVE"])
@ -288,9 +291,9 @@ class OpenHarmonyTarget(CrossBuildTarget):
try:
with open(package_info) as meta_file:
meta = json.load(meta_file)
ohos_api_version = int(meta['apiVersion'])
ohos_sdk_version = parse_version(meta['version'])
if ohos_sdk_version < parse_version('5.0') or ohos_api_version < 12:
ohos_api_version = int(meta["apiVersion"])
ohos_sdk_version = parse_version(meta["version"])
if ohos_sdk_version < parse_version("5.0") or ohos_api_version < 12:
raise RuntimeError("Building servo for OpenHarmony requires SDK version 5.0 (API-12) or newer.")
print(f"Info: The OpenHarmony SDK {ohos_sdk_version} is targeting API-level {ohos_api_version}")
except (OSError, json.JSONDecodeError) as e:
@ -318,72 +321,79 @@ class OpenHarmonyTarget(CrossBuildTarget):
# Instead, we ensure that all the necessary flags for the c-compiler are set
# via environment variables such as `TARGET_CFLAGS`.
def to_sdk_llvm_bin(prog: str):
if sys.platform == 'win32':
prog = prog + '.exe'
if sys.platform == "win32":
prog = prog + ".exe"
llvm_prog = llvm_bin.joinpath(prog)
if not llvm_prog.is_file():
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), llvm_prog)
return llvm_bin.joinpath(prog).as_posix()
# CC and CXX should already be set to appropriate host compilers by `build_env()`
env['HOST_CC'] = env['CC']
env['HOST_CXX'] = env['CXX']
env['TARGET_AR'] = to_sdk_llvm_bin("llvm-ar")
env['TARGET_RANLIB'] = to_sdk_llvm_bin("llvm-ranlib")
env['TARGET_READELF'] = to_sdk_llvm_bin("llvm-readelf")
env['TARGET_OBJCOPY'] = to_sdk_llvm_bin("llvm-objcopy")
env['TARGET_STRIP'] = to_sdk_llvm_bin("llvm-strip")
env["HOST_CC"] = env["CC"]
env["HOST_CXX"] = env["CXX"]
env["TARGET_AR"] = to_sdk_llvm_bin("llvm-ar")
env["TARGET_RANLIB"] = to_sdk_llvm_bin("llvm-ranlib")
env["TARGET_READELF"] = to_sdk_llvm_bin("llvm-readelf")
env["TARGET_OBJCOPY"] = to_sdk_llvm_bin("llvm-objcopy")
env["TARGET_STRIP"] = to_sdk_llvm_bin("llvm-strip")
target_triple = self.triple()
rust_target_triple = str(target_triple).replace('-', '_')
rust_target_triple = str(target_triple).replace("-", "_")
ndk_clang = to_sdk_llvm_bin("clang")
ndk_clangxx = to_sdk_llvm_bin("clang++")
env[f'CC_{rust_target_triple}'] = ndk_clang
env[f'CXX_{rust_target_triple}'] = ndk_clangxx
env[f"CC_{rust_target_triple}"] = ndk_clang
env[f"CXX_{rust_target_triple}"] = ndk_clangxx
# The clang target name is different from the LLVM target name
clang_target_triple = str(target_triple).replace('-unknown-', '-')
clang_target_triple_underscore = clang_target_triple.replace('-', '_')
env[f'CC_{clang_target_triple_underscore}'] = ndk_clang
env[f'CXX_{clang_target_triple_underscore}'] = ndk_clangxx
clang_target_triple = str(target_triple).replace("-unknown-", "-")
clang_target_triple_underscore = clang_target_triple.replace("-", "_")
env[f"CC_{clang_target_triple_underscore}"] = ndk_clang
env[f"CXX_{clang_target_triple_underscore}"] = ndk_clangxx
# rustc linker
env[f'CARGO_TARGET_{rust_target_triple.upper()}_LINKER'] = ndk_clang
env[f"CARGO_TARGET_{rust_target_triple.upper()}_LINKER"] = ndk_clang
# We could also use a cross-compile wrapper
env["RUSTFLAGS"] += f' -Clink-arg=--target={clang_target_triple}'
env["RUSTFLAGS"] += f' -Clink-arg=--sysroot={ohos_sysroot_posix}'
env["RUSTFLAGS"] += f" -Clink-arg=--target={clang_target_triple}"
env["RUSTFLAGS"] += f" -Clink-arg=--sysroot={ohos_sysroot_posix}"
env['HOST_CFLAGS'] = ''
env['HOST_CXXFLAGS'] = ''
ohos_cflags = ['-D__MUSL__', f' --target={clang_target_triple}', f' --sysroot={ohos_sysroot_posix}',
"-Wno-error=unused-command-line-argument"]
if clang_target_triple.startswith('armv7-'):
ohos_cflags.extend(['-march=armv7-a', '-mfloat-abi=softfp', '-mtune=generic-armv7-a', '-mthumb'])
env["HOST_CFLAGS"] = ""
env["HOST_CXXFLAGS"] = ""
ohos_cflags = [
"-D__MUSL__",
f" --target={clang_target_triple}",
f" --sysroot={ohos_sysroot_posix}",
"-Wno-error=unused-command-line-argument",
]
if clang_target_triple.startswith("armv7-"):
ohos_cflags.extend(["-march=armv7-a", "-mfloat-abi=softfp", "-mtune=generic-armv7-a", "-mthumb"])
ohos_cflags_str = " ".join(ohos_cflags)
env['TARGET_CFLAGS'] = ohos_cflags_str
env['TARGET_CPPFLAGS'] = '-D__MUSL__'
env['TARGET_CXXFLAGS'] = ohos_cflags_str
env["TARGET_CFLAGS"] = ohos_cflags_str
env["TARGET_CPPFLAGS"] = "-D__MUSL__"
env["TARGET_CXXFLAGS"] = ohos_cflags_str
# CMake related flags
env['CMAKE'] = ndk_root.joinpath("build-tools", "cmake", "bin", "cmake").as_posix()
env["CMAKE"] = ndk_root.joinpath("build-tools", "cmake", "bin", "cmake").as_posix()
cmake_toolchain_file = ndk_root.joinpath("build", "cmake", "ohos.toolchain.cmake")
if cmake_toolchain_file.is_file():
env[f'CMAKE_TOOLCHAIN_FILE_{rust_target_triple}'] = cmake_toolchain_file.as_posix()
env[f"CMAKE_TOOLCHAIN_FILE_{rust_target_triple}"] = cmake_toolchain_file.as_posix()
else:
print(
f"Warning: Failed to find the OpenHarmony CMake Toolchain file - Expected it at {cmake_toolchain_file}")
env[f'CMAKE_C_COMPILER_{rust_target_triple}'] = ndk_clang
env[f'CMAKE_CXX_COMPILER_{rust_target_triple}'] = ndk_clangxx
f"Warning: Failed to find the OpenHarmony CMake Toolchain file - Expected it at {cmake_toolchain_file}"
)
env[f"CMAKE_C_COMPILER_{rust_target_triple}"] = ndk_clang
env[f"CMAKE_CXX_COMPILER_{rust_target_triple}"] = ndk_clangxx
# pkg-config
pkg_config_path = '{}:{}'.format(ohos_sysroot.joinpath("usr", "lib", "pkgconfig").as_posix(),
ohos_sysroot.joinpath("usr", "share", "pkgconfig").as_posix())
env[f'PKG_CONFIG_SYSROOT_DIR_{rust_target_triple}'] = ohos_sysroot_posix
env[f'PKG_CONFIG_PATH_{rust_target_triple}'] = pkg_config_path
pkg_config_path = "{}:{}".format(
ohos_sysroot.joinpath("usr", "lib", "pkgconfig").as_posix(),
ohos_sysroot.joinpath("usr", "share", "pkgconfig").as_posix(),
)
env[f"PKG_CONFIG_SYSROOT_DIR_{rust_target_triple}"] = ohos_sysroot_posix
env[f"PKG_CONFIG_PATH_{rust_target_triple}"] = pkg_config_path
# bindgen / libclang-sys
env["LIBCLANG_PATH"] = path.join(llvm_toolchain, "lib")
env["CLANG_PATH"] = ndk_clangxx
env[f'CXXSTDLIB_{clang_target_triple_underscore}'] = "c++"
bindgen_extra_clangs_args_var = f'BINDGEN_EXTRA_CLANG_ARGS_{rust_target_triple}'
env[f"CXXSTDLIB_{clang_target_triple_underscore}"] = "c++"
bindgen_extra_clangs_args_var = f"BINDGEN_EXTRA_CLANG_ARGS_{rust_target_triple}"
bindgen_extra_clangs_args = env.get(bindgen_extra_clangs_args_var, "")
bindgen_extra_clangs_args = bindgen_extra_clangs_args + " " + ohos_cflags_str
env[bindgen_extra_clangs_args_var] = bindgen_extra_clangs_args
@ -404,8 +414,5 @@ class OpenHarmonyTarget(CrossBuildTarget):
return path.join(base_path, build_type_directory, build_output_path, hap_name)
def abi_string(self) -> str:
abi_map = {
"aarch64-unknown-linux-ohos": "arm64-v8a",
"x86_64-unknown-linux-ohos": "x86_64"
}
abi_map = {"aarch64-unknown-linux-ohos": "arm64-v8a", "x86_64-unknown-linux-ohos": "x86_64"}
return abi_map[self.triple()]

View file

@ -26,22 +26,48 @@ from .build_target import BuildTarget
# 3. copy(`sudo apt install ${APT_PKGS.join(" ")}`)
# 4. paste into https://github.com/servo/book/edit/main/src/hacking/setting-up-your-environment.md
APT_PKGS = [
'build-essential', 'ccache', 'clang', 'cmake', 'curl', 'g++', 'git',
'gperf', 'libdbus-1-dev', 'libfreetype6-dev', 'libgl1-mesa-dri',
'libgles2-mesa-dev', 'libglib2.0-dev',
'gstreamer1.0-plugins-good', 'libgstreamer-plugins-good1.0-dev',
'gstreamer1.0-plugins-bad', 'libgstreamer-plugins-bad1.0-dev',
'gstreamer1.0-plugins-ugly',
"gstreamer1.0-plugins-base", 'libgstreamer-plugins-base1.0-dev',
'gstreamer1.0-libav',
'libgstrtspserver-1.0-dev',
'gstreamer1.0-tools',
'libges-1.0-dev',
'libharfbuzz-dev', 'liblzma-dev', 'libudev-dev', 'libunwind-dev',
'libvulkan1', 'libx11-dev', 'libxcb-render0-dev', 'libxcb-shape0-dev',
'libxcb-xfixes0-dev', 'libxmu-dev', 'libxmu6', 'libegl1-mesa-dev',
'llvm-dev', 'm4', 'xorg-dev', 'libxkbcommon0', "libxkbcommon-x11-0",
'tshark',
"build-essential",
"ccache",
"clang",
"cmake",
"curl",
"g++",
"git",
"gperf",
"libdbus-1-dev",
"libfreetype6-dev",
"libgl1-mesa-dri",
"libgles2-mesa-dev",
"libglib2.0-dev",
"gstreamer1.0-plugins-good",
"libgstreamer-plugins-good1.0-dev",
"gstreamer1.0-plugins-bad",
"libgstreamer-plugins-bad1.0-dev",
"gstreamer1.0-plugins-ugly",
"gstreamer1.0-plugins-base",
"libgstreamer-plugins-base1.0-dev",
"gstreamer1.0-libav",
"libgstrtspserver-1.0-dev",
"gstreamer1.0-tools",
"libges-1.0-dev",
"libharfbuzz-dev",
"liblzma-dev",
"libudev-dev",
"libunwind-dev",
"libvulkan1",
"libx11-dev",
"libxcb-render0-dev",
"libxcb-shape0-dev",
"libxcb-xfixes0-dev",
"libxmu-dev",
"libxmu6",
"libegl1-mesa-dev",
"llvm-dev",
"m4",
"xorg-dev",
"libxkbcommon0",
"libxkbcommon-x11-0",
"tshark",
]
# https://packages.fedoraproject.org
@ -49,37 +75,92 @@ APT_PKGS = [
# 2. paste in the whole DNF_PKGS = [...]
# 3. copy(`sudo dnf install ${DNF_PKGS.join(" ")}`)
# 4. paste into https://github.com/servo/book/edit/main/src/hacking/setting-up-your-environment.md
DNF_PKGS = ['libtool', 'gcc-c++', 'libXi-devel', 'freetype-devel',
'libunwind-devel', 'mesa-libGL-devel', 'mesa-libEGL-devel',
'glib2-devel', 'libX11-devel', 'libXrandr-devel', 'gperf',
'fontconfig-devel', 'cabextract', 'ttmkfdir', 'expat-devel',
'rpm-build', 'cmake', 'libXcursor-devel', 'libXmu-devel',
'dbus-devel', 'ncurses-devel', 'harfbuzz-devel', 'ccache',
'clang', 'clang-libs', 'llvm', 'python3-devel',
'gstreamer1-devel', 'gstreamer1-plugins-base-devel',
'gstreamer1-plugins-good', 'gstreamer1-plugins-bad-free-devel',
'gstreamer1-plugins-ugly-free', 'libjpeg-turbo-devel',
'zlib-ng', 'libjpeg-turbo', 'vulkan-loader', 'libxkbcommon',
'libxkbcommon-x11', 'wireshark-cli']
DNF_PKGS = [
"libtool",
"gcc-c++",
"libXi-devel",
"freetype-devel",
"libunwind-devel",
"mesa-libGL-devel",
"mesa-libEGL-devel",
"glib2-devel",
"libX11-devel",
"libXrandr-devel",
"gperf",
"fontconfig-devel",
"cabextract",
"ttmkfdir",
"expat-devel",
"rpm-build",
"cmake",
"libXcursor-devel",
"libXmu-devel",
"dbus-devel",
"ncurses-devel",
"harfbuzz-devel",
"ccache",
"clang",
"clang-libs",
"llvm",
"python3-devel",
"gstreamer1-devel",
"gstreamer1-plugins-base-devel",
"gstreamer1-plugins-good",
"gstreamer1-plugins-bad-free-devel",
"gstreamer1-plugins-ugly-free",
"libjpeg-turbo-devel",
"zlib-ng",
"libjpeg-turbo",
"vulkan-loader",
"libxkbcommon",
"libxkbcommon-x11",
"wireshark-cli",
]
# https://voidlinux.org/packages/
# 1. open devtools
# 2. paste in the whole XBPS_PKGS = [...]
# 3. copy(`sudo xbps-install ${XBPS_PKGS.join(" ")}`)
# 4. paste into https://github.com/servo/book/edit/main/src/hacking/setting-up-your-environment.md
XBPS_PKGS = ['libtool', 'gcc', 'libXi-devel', 'freetype-devel',
'libunwind-devel', 'MesaLib-devel', 'glib-devel', 'pkg-config',
'libX11-devel', 'libXrandr-devel', 'gperf', 'bzip2-devel',
'fontconfig-devel', 'cabextract', 'expat-devel', 'cmake',
'cmake', 'libXcursor-devel', 'libXmu-devel', 'dbus-devel',
'ncurses-devel', 'harfbuzz-devel', 'ccache', 'glu-devel',
'clang', 'gstreamer1-devel', 'gst-plugins-base1-devel',
'gst-plugins-good1', 'gst-plugins-bad1-devel',
'gst-plugins-ugly1', 'vulkan-loader', 'libxkbcommon',
'libxkbcommon-x11']
XBPS_PKGS = [
"libtool",
"gcc",
"libXi-devel",
"freetype-devel",
"libunwind-devel",
"MesaLib-devel",
"glib-devel",
"pkg-config",
"libX11-devel",
"libXrandr-devel",
"gperf",
"bzip2-devel",
"fontconfig-devel",
"cabextract",
"expat-devel",
"cmake",
"cmake",
"libXcursor-devel",
"libXmu-devel",
"dbus-devel",
"ncurses-devel",
"harfbuzz-devel",
"ccache",
"glu-devel",
"clang",
"gstreamer1-devel",
"gst-plugins-base1-devel",
"gst-plugins-good1",
"gst-plugins-bad1-devel",
"gst-plugins-ugly1",
"vulkan-loader",
"libxkbcommon",
"libxkbcommon-x11",
]
GSTREAMER_URL = \
GSTREAMER_URL = (
"https://github.com/servo/servo-build-deps/releases/download/linux/gstreamer-1.16-x86_64-linux-gnu.20190515.tar.gz"
)
class Linux(Base):
@ -93,67 +174,68 @@ class Linux(Base):
distrib = distro.name()
version = distro.version()
if distrib in ['LinuxMint', 'Linux Mint', 'KDE neon', 'Pop!_OS', 'TUXEDO OS']:
if '.' in version:
major, _ = version.split('.', 1)
if distrib in ["LinuxMint", "Linux Mint", "KDE neon", "Pop!_OS", "TUXEDO OS"]:
if "." in version:
major, _ = version.split(".", 1)
else:
major = version
distrib = 'Ubuntu'
if major == '22':
version = '22.04'
elif major == '21':
version = '21.04'
elif major == '20':
version = '20.04'
elif major == '19':
version = '18.04'
elif major == '18':
version = '16.04'
distrib = "Ubuntu"
if major == "22":
version = "22.04"
elif major == "21":
version = "21.04"
elif major == "20":
version = "20.04"
elif major == "19":
version = "18.04"
elif major == "18":
version = "16.04"
if distrib.lower() == 'elementary':
distrib = 'Ubuntu'
if version == '5.0':
version = '18.04'
elif version[0:3] == '0.4':
version = '16.04'
if distrib.lower() == "elementary":
distrib = "Ubuntu"
if version == "5.0":
version = "18.04"
elif version[0:3] == "0.4":
version = "16.04"
return (distrib, version)
def _platform_bootstrap(self, force: bool) -> bool:
if self.distro.lower() == 'nixos':
print('NixOS does not need bootstrap, it will automatically enter a nix-shell')
print('Just run ./mach build')
print('')
print('You will need to run a nix-shell if you are trying '
'to run any of the built binaries')
print('To enter the nix-shell manually use:')
print(' $ nix-shell')
if self.distro.lower() == "nixos":
print("NixOS does not need bootstrap, it will automatically enter a nix-shell")
print("Just run ./mach build")
print("")
print("You will need to run a nix-shell if you are trying to run any of the built binaries")
print("To enter the nix-shell manually use:")
print(" $ nix-shell")
return False
if self.distro.lower() == 'ubuntu' and self.version > '22.04':
if self.distro.lower() == "ubuntu" and self.version > "22.04":
print(f"WARNING: unsupported version of {self.distro}: {self.version}")
# FIXME: Better version checking for these distributions.
if self.distro.lower() not in [
'arch linux',
'arch',
'artix',
'endeavouros',
'centos linux',
'centos',
'debian gnu/linux',
'raspbian gnu/linux',
'fedora linux',
'fedora',
'nixos',
'ubuntu',
'void',
'fedora linux asahi remix'
"arch linux",
"arch",
"artix",
"endeavouros",
"centos linux",
"centos",
"debian gnu/linux",
"raspbian gnu/linux",
"fedora linux",
"fedora",
"nixos",
"ubuntu",
"void",
"fedora linux asahi remix",
]:
print(f"mach bootstrap does not support {self.distro}."
print(
f"mach bootstrap does not support {self.distro}."
" You may be able to install dependencies manually."
" See https://github.com/servo/servo/wiki/Building.")
" See https://github.com/servo/servo/wiki/Building."
)
input("Press Enter to continue...")
return False
@ -163,41 +245,39 @@ class Linux(Base):
def install_non_gstreamer_dependencies(self, force: bool) -> bool:
install = False
pkgs = []
if self.distro in ['Ubuntu', 'Debian GNU/Linux', 'Raspbian GNU/Linux']:
command = ['apt-get', 'install', "-m"]
if self.distro in ["Ubuntu", "Debian GNU/Linux", "Raspbian GNU/Linux"]:
command = ["apt-get", "install", "-m"]
pkgs = APT_PKGS
# Skip 'clang' if 'clang' binary already exists.
result = subprocess.run(['which', 'clang'], capture_output=True)
result = subprocess.run(["which", "clang"], capture_output=True)
if result and result.returncode == 0:
pkgs.remove('clang')
pkgs.remove("clang")
# Try to filter out unknown packages from the list. This is important for Debian
# as it does not ship all of the packages we want.
installable = subprocess.check_output(['apt-cache', '--generate', 'pkgnames'])
installable = subprocess.check_output(["apt-cache", "--generate", "pkgnames"])
if installable:
installable = installable.decode("ascii").splitlines()
pkgs = list(filter(lambda pkg: pkg in installable, pkgs))
if subprocess.call(['dpkg', '-s'] + pkgs, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) != 0:
if subprocess.call(["dpkg", "-s"] + pkgs, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) != 0:
install = True
elif self.distro in ['CentOS', 'CentOS Linux', 'Fedora', 'Fedora Linux', 'Fedora Linux Asahi Remix']:
command = ['dnf', 'install']
installed_pkgs: [str] = (
subprocess.check_output(['rpm', '--query', '--all', '--queryformat', '%{NAME}\n'],
encoding='utf-8')
.split('\n'))
elif self.distro in ["CentOS", "CentOS Linux", "Fedora", "Fedora Linux", "Fedora Linux Asahi Remix"]:
command = ["dnf", "install"]
installed_pkgs: [str] = subprocess.check_output(
["rpm", "--query", "--all", "--queryformat", "%{NAME}\n"], encoding="utf-8"
).split("\n")
pkgs = DNF_PKGS
for pkg in pkgs:
if pkg not in installed_pkgs:
install = True
break
elif self.distro == 'void':
installed_pkgs = str(subprocess.check_output(['xbps-query', '-l']))
elif self.distro == "void":
installed_pkgs = str(subprocess.check_output(["xbps-query", "-l"]))
pkgs = XBPS_PKGS
for pkg in pkgs:
command = ['xbps-install', '-A']
command = ["xbps-install", "-A"]
if "ii {}-".format(pkg) not in installed_pkgs:
install = force = True
break
@ -207,22 +287,24 @@ class Linux(Base):
def check_sudo():
if os.geteuid() != 0:
if shutil.which('sudo') is None:
if shutil.which("sudo") is None:
return False
return True
def run_as_root(command, force=False):
if os.geteuid() != 0:
command.insert(0, 'sudo')
command.insert(0, "sudo")
if force:
command.append('-y')
command.append("-y")
return subprocess.call(command)
print("Installing missing dependencies...")
if not check_sudo():
print("'sudo' command not found."
print(
"'sudo' command not found."
" You may be able to install dependencies manually."
" See https://github.com/servo/servo/wiki/Building.")
" See https://github.com/servo/servo/wiki/Building."
)
input("Press Enter to continue...")
return False
@ -236,4 +318,5 @@ class Linux(Base):
def _platform_bootstrap_gstreamer(self, _target: BuildTarget, _force: bool) -> bool:
raise EnvironmentError(
"Bootstrapping GStreamer on Linux is not supported. "
+ "Please install it using your distribution package manager.")
+ "Please install it using your distribution package manager."
)

View file

@ -42,9 +42,7 @@ class MacOS(Base):
installed_something = False
try:
brewfile = os.path.join(util.SERVO_ROOT, "support", "macos", "Brewfile")
output = subprocess.check_output(
['brew', 'bundle', 'install', "--file", brewfile]
).decode("utf-8")
output = subprocess.check_output(["brew", "bundle", "install", "--file", brewfile]).decode("utf-8")
print(output)
installed_something = "Installing" in output
except subprocess.CalledProcessError as e:
@ -60,14 +58,10 @@ class MacOS(Base):
with tempfile.TemporaryDirectory() as temp_dir:
libs_pkg = os.path.join(temp_dir, GSTREAMER_URL.rsplit("/", maxsplit=1)[-1])
devel_pkg = os.path.join(
temp_dir, GSTREAMER_DEVEL_URL.rsplit("/", maxsplit=1)[-1]
)
devel_pkg = os.path.join(temp_dir, GSTREAMER_DEVEL_URL.rsplit("/", maxsplit=1)[-1])
util.download_file("GStreamer libraries", GSTREAMER_URL, libs_pkg)
util.download_file(
"GStreamer development support", GSTREAMER_DEVEL_URL, devel_pkg
)
util.download_file("GStreamer development support", GSTREAMER_DEVEL_URL, devel_pkg)
print("Installing GStreamer packages...")
subprocess.check_call(
@ -75,8 +69,7 @@ class MacOS(Base):
"sudo",
"sh",
"-c",
f"installer -pkg '{libs_pkg}' -target / &&"
f"installer -pkg '{devel_pkg}' -target /",
f"installer -pkg '{libs_pkg}' -target / &&installer -pkg '{devel_pkg}' -target /",
]
)

View file

@ -71,10 +71,18 @@ class Windows(Base):
cmd_exe_args += ",'-f'"
print(cmd_exe_args)
subprocess.check_output([
"powershell", "Start-Process", "-Wait", "-verb", "runAs",
"cmd.exe", "-ArgumentList", f"@({cmd_exe_args})"
]).decode("utf-8")
subprocess.check_output(
[
"powershell",
"Start-Process",
"-Wait",
"-verb",
"runAs",
"cmd.exe",
"-ArgumentList",
f"@({cmd_exe_args})",
]
).decode("utf-8")
except subprocess.CalledProcessError as e:
print("Could not run chocolatey. Follow manual build setup instructions.")
raise e
@ -87,8 +95,7 @@ class Windows(Base):
"""A bootstrap method that is called without explicitly invoking `./mach bootstrap`
but that is executed in the process of other `./mach` commands. This should be
as fast as possible."""
to_install = [package for package in DEPENDENCIES if
not os.path.isdir(get_dependency_dir(package))]
to_install = [package for package in DEPENDENCIES if not os.path.isdir(get_dependency_dir(package))]
if not to_install:
return False
@ -116,9 +123,7 @@ class Windows(Base):
gst_arch_name = gst_arch_names[build_target_triple.split("-")[0]]
# The bootstraped version of GStreamer always takes precedance of the installed vesion.
prepackaged_root = os.path.join(
DEPENDENCIES_DIR, "gstreamer", "1.0", f"msvc_{gst_arch_name}"
)
prepackaged_root = os.path.join(DEPENDENCIES_DIR, "gstreamer", "1.0", f"msvc_{gst_arch_name}")
if os.path.exists(os.path.join(prepackaged_root, "bin", "ffi-7.dll")):
return prepackaged_root
@ -143,20 +148,15 @@ class Windows(Base):
return False
if "x86_64" not in self.triple:
print("Bootstrapping gstreamer not supported on "
"non-x86-64 Windows. Please install manually")
print("Bootstrapping gstreamer not supported on non-x86-64 Windows. Please install manually")
return False
with tempfile.TemporaryDirectory() as temp_dir:
libs_msi = os.path.join(temp_dir, GSTREAMER_URL.rsplit("/", maxsplit=1)[-1])
devel_msi = os.path.join(
temp_dir, GSTREAMER_DEVEL_URL.rsplit("/", maxsplit=1)[-1]
)
devel_msi = os.path.join(temp_dir, GSTREAMER_DEVEL_URL.rsplit("/", maxsplit=1)[-1])
util.download_file("GStreamer libraries", GSTREAMER_URL, libs_msi)
util.download_file(
"GStreamer development support", GSTREAMER_DEVEL_URL, devel_msi
)
util.download_file("GStreamer development support", GSTREAMER_DEVEL_URL, devel_msi)
print(f"Installing GStreamer packages to {DEPENDENCIES_DIR}...")
os.makedirs(DEPENDENCIES_DIR, exist_ok=True)
@ -164,15 +164,24 @@ class Windows(Base):
for installer in [libs_msi, devel_msi]:
arguments = [
"/a",
f'"{installer}"'
f'TARGETDIR="{DEPENDENCIES_DIR}"', # Install destination
f'"{installer}"TARGETDIR="{DEPENDENCIES_DIR}"', # Install destination
"/qn", # Quiet mode
]
quoted_arguments = ",".join((f"'{arg}'" for arg in arguments))
subprocess.check_call([
"powershell", "exit (Start-Process", "-PassThru", "-Wait", "-verb", "runAs",
"msiexec.exe", "-ArgumentList", f"@({quoted_arguments})", ").ExitCode"
])
subprocess.check_call(
[
"powershell",
"exit (Start-Process",
"-PassThru",
"-Wait",
"-verb",
"runAs",
"msiexec.exe",
"-ArgumentList",
f"@({quoted_arguments})",
").ExitCode",
]
)
assert self.is_gstreamer_installed(target)
return True

View file

@ -51,39 +51,50 @@ def shell_quote(arg):
@CommandProvider
class PostBuildCommands(CommandBase):
@Command('run',
description='Run Servo',
category='post-build')
@CommandArgument('--android', action='store_true', default=None,
help='Run on an Android device through `adb shell`')
@CommandArgument('--emulator',
action='store_true',
help='For Android, run in the only emulated device')
@CommandArgument('--usb',
action='store_true',
help='For Android, run in the only USB device')
@CommandArgument('--debugger', action='store_true',
help='Enable the debugger. Not specifying a '
'--debugger-cmd option will result in the default '
'debugger being used. The following arguments '
'have no effect without this.')
@CommandArgument('--debugger-cmd', default=None, type=str,
help='Name of debugger to use.')
@CommandArgument('--headless', '-z', action='store_true',
help='Launch in headless mode')
@CommandArgument('--software', '-s', action='store_true',
help='Launch with software rendering')
@Command("run", description="Run Servo", category="post-build")
@CommandArgument(
'params', nargs='...',
help="Command-line arguments to be passed through to Servo")
"--android", action="store_true", default=None, help="Run on an Android device through `adb shell`"
)
@CommandArgument("--emulator", action="store_true", help="For Android, run in the only emulated device")
@CommandArgument("--usb", action="store_true", help="For Android, run in the only USB device")
@CommandArgument(
"--debugger",
action="store_true",
help="Enable the debugger. Not specifying a "
"--debugger-cmd option will result in the default "
"debugger being used. The following arguments "
"have no effect without this.",
)
@CommandArgument("--debugger-cmd", default=None, type=str, help="Name of debugger to use.")
@CommandArgument("--headless", "-z", action="store_true", help="Launch in headless mode")
@CommandArgument("--software", "-s", action="store_true", help="Launch with software rendering")
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to Servo")
@CommandBase.common_command_arguments(binary_selection=True)
@CommandBase.allow_target_configuration
def run(self, servo_binary: str, params, debugger=False, debugger_cmd=None,
headless=False, software=False, emulator=False, usb=False):
def run(
self,
servo_binary: str,
params,
debugger=False,
debugger_cmd=None,
headless=False,
software=False,
emulator=False,
usb=False,
):
return self._run(servo_binary, params, debugger, debugger_cmd, headless, software, emulator, usb)
def _run(self, servo_binary: str, params, debugger=False, debugger_cmd=None,
headless=False, software=False, emulator=False, usb=False):
def _run(
self,
servo_binary: str,
params,
debugger=False,
debugger_cmd=None,
headless=False,
software=False,
emulator=False,
usb=False,
):
env = self.build_env()
env["RUST_BACKTRACE"] = "1"
if software:
@ -91,7 +102,7 @@ class PostBuildCommands(CommandBase):
print("Software rendering is only supported on Linux at the moment.")
return
env['LIBGL_ALWAYS_SOFTWARE'] = "1"
env["LIBGL_ALWAYS_SOFTWARE"] = "1"
os.environ.update(env)
# Make --debugger-cmd imply --debugger
@ -119,7 +130,7 @@ class PostBuildCommands(CommandBase):
"sleep 0.5",
f"echo Servo PID: $(pidof {ANDROID_APP_NAME})",
f"logcat --pid=$(pidof {ANDROID_APP_NAME})",
"exit"
"exit",
]
args = [self.android_adb_path(env)]
if emulator and usb:
@ -136,7 +147,7 @@ class PostBuildCommands(CommandBase):
args = [servo_binary]
if headless:
args.append('-z')
args.append("-z")
# Borrowed and modified from:
# http://hg.mozilla.org/mozilla-central/file/c9cfa9b91dea/python/mozbuild/mozbuild/mach_commands.py#l883
@ -144,8 +155,7 @@ class PostBuildCommands(CommandBase):
if not debugger_cmd:
# No debugger name was provided. Look for the default ones on
# current OS.
debugger_cmd = mozdebug.get_default_debugger_name(
mozdebug.DebuggerSearch.KeepLooking)
debugger_cmd = mozdebug.get_default_debugger_name(mozdebug.DebuggerSearch.KeepLooking)
debugger_info = mozdebug.get_debugger_info(debugger_cmd)
if not debugger_info:
@ -153,17 +163,17 @@ class PostBuildCommands(CommandBase):
return 1
command = debugger_info.path
if debugger_cmd == 'gdb' or debugger_cmd == 'lldb':
rust_command = 'rust-' + debugger_cmd
if debugger_cmd == "gdb" or debugger_cmd == "lldb":
rust_command = "rust-" + debugger_cmd
try:
subprocess.check_call([rust_command, '--version'], env=env, stdout=open(os.devnull, 'w'))
subprocess.check_call([rust_command, "--version"], env=env, stdout=open(os.devnull, "w"))
except (OSError, subprocess.CalledProcessError):
pass
else:
command = rust_command
# Prepend the debugger args.
args = ([command] + debugger_info.args + args + params)
args = [command] + debugger_info.args + args + params
else:
args = args + params
@ -177,36 +187,27 @@ class PostBuildCommands(CommandBase):
return exception.returncode
except OSError as exception:
if exception.errno == 2:
print("Servo Binary can't be found! Run './mach build'"
" and try again!")
print("Servo Binary can't be found! Run './mach build' and try again!")
else:
raise exception
@Command('android-emulator',
description='Run the Android emulator',
category='post-build')
@CommandArgument(
'args', nargs='...',
help="Command-line arguments to be passed through to the emulator")
@Command("android-emulator", description="Run the Android emulator", category="post-build")
@CommandArgument("args", nargs="...", help="Command-line arguments to be passed through to the emulator")
def android_emulator(self, args=None):
if not args:
print("AVDs created by `./mach bootstrap-android` are servo-arm and servo-x86.")
emulator = self.android_emulator_path(self.build_env())
return subprocess.call([emulator] + args)
@Command('rr-record',
description='Run Servo whilst recording execution with rr',
category='post-build')
@CommandArgument(
'params', nargs='...',
help="Command-line arguments to be passed through to Servo")
@Command("rr-record", description="Run Servo whilst recording execution with rr", category="post-build")
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to Servo")
@CommandBase.common_command_arguments(binary_selection=True)
def rr_record(self, servo_binary: str, params=[]):
env = self.build_env()
env["RUST_BACKTRACE"] = "1"
servo_cmd = [servo_binary] + params
rr_cmd = ['rr', '--fatal-errors', 'record']
rr_cmd = ["rr", "--fatal-errors", "record"]
try:
check_call(rr_cmd + servo_cmd)
except OSError as e:
@ -215,24 +216,22 @@ class PostBuildCommands(CommandBase):
else:
raise e
@Command('rr-replay',
description='Replay the most recent execution of Servo that was recorded with rr',
category='post-build')
@Command(
"rr-replay",
description="Replay the most recent execution of Servo that was recorded with rr",
category="post-build",
)
def rr_replay(self):
try:
check_call(['rr', '--fatal-errors', 'replay'])
check_call(["rr", "--fatal-errors", "replay"])
except OSError as e:
if e.errno == 2:
print("rr binary can't be found!")
else:
raise e
@Command('doc',
description='Generate documentation',
category='post-build')
@CommandArgument(
'params', nargs='...',
help="Command-line arguments to be passed through to cargo doc")
@Command("doc", description="Generate documentation", category="post-build")
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to cargo doc")
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
def doc(self, params: List[str], **kwargs):
self.ensure_bootstrapped()

View file

@ -46,10 +46,14 @@ SERVO_TESTS_PATH = os.path.join("tests", "wpt", "mozilla", "tests")
# Servo depends on several `rustfmt` options that are unstable. These are still
# supported by stable `rustfmt` if they are passed as these command-line arguments.
UNSTABLE_RUSTFMT_ARGUMENTS = [
"--config", "unstable_features=true",
"--config", "binop_separator=Back",
"--config", "imports_granularity=Module",
"--config", "group_imports=StdExternalCrate",
"--config",
"unstable_features=true",
"--config",
"binop_separator=Back",
"--config",
"imports_granularity=Module",
"--config",
"group_imports=StdExternalCrate",
]
# Listing these globs manually is a work-around for very slow `taplo` invocation
@ -72,9 +76,21 @@ def format_toml_files_with_taplo(check_only: bool = True) -> int:
return 1
if check_only:
return call([taplo, "fmt", "--check", *TOML_GLOBS], env={'RUST_LOG': 'error'})
return call([taplo, "fmt", "--check", *TOML_GLOBS], env={"RUST_LOG": "error"})
else:
return call([taplo, "fmt", *TOML_GLOBS], env={'RUST_LOG': 'error'})
return call([taplo, "fmt", *TOML_GLOBS], env={"RUST_LOG": "error"})
def format_python_files_with_ruff(check_only: bool = True) -> int:
ruff = shutil.which("ruff")
if ruff is None:
print("Could not find `ruff`. Run `./mach bootstrap`")
return 1
if check_only:
return call([ruff, "format", "--check", "--quiet"])
else:
return call([ruff, "format", "--quiet"])
def format_with_rustfmt(check_only: bool = True) -> int:
@ -83,8 +99,17 @@ def format_with_rustfmt(check_only: bool = True) -> int:
if result != 0:
return result
return call(["cargo", "fmt", "--manifest-path", "support/crown/Cargo.toml",
"--", *UNSTABLE_RUSTFMT_ARGUMENTS, *maybe_check_only])
return call(
[
"cargo",
"fmt",
"--manifest-path",
"support/crown/Cargo.toml",
"--",
*UNSTABLE_RUSTFMT_ARGUMENTS,
*maybe_check_only,
]
)
@CommandProvider
@ -97,15 +122,10 @@ class MachCommands(CommandBase):
if not hasattr(self.context, "built_tests"):
self.context.built_tests = False
@Command('test-perf',
description='Run the page load performance test',
category='testing')
@CommandArgument('--base', default=None,
help="the base URL for testcases")
@CommandArgument('--date', default=None,
help="the datestamp for the data")
@CommandArgument('--submit', '-a', default=False, action="store_true",
help="submit the data to perfherder")
@Command("test-perf", description="Run the page load performance test", category="testing")
@CommandArgument("--base", default=None, help="the base URL for testcases")
@CommandArgument("--date", default=None, help="the datestamp for the data")
@CommandArgument("--submit", "-a", default=False, action="store_true", help="submit the data to perfherder")
def test_perf(self, base=None, date=None, submit=False):
env = self.build_env()
cmd = ["bash", "test_perf.sh"]
@ -115,20 +135,15 @@ class MachCommands(CommandBase):
cmd += ["--date", date]
if submit:
cmd += ["--submit"]
return call(cmd,
env=env,
cwd=path.join("etc", "ci", "performance"))
return call(cmd, env=env, cwd=path.join("etc", "ci", "performance"))
@Command('test-unit',
description='Run unit tests',
category='testing')
@CommandArgument('test_name', nargs=argparse.REMAINDER,
help="Only run tests that match this pattern or file path")
@CommandArgument('--package', '-p', default=None, help="Specific package to test")
@CommandArgument('--bench', default=False, action="store_true",
help="Run in bench mode")
@CommandArgument('--nocapture', default=False, action="store_true",
help="Run tests with nocapture ( show test stdout )")
@Command("test-unit", description="Run unit tests", category="testing")
@CommandArgument("test_name", nargs=argparse.REMAINDER, help="Only run tests that match this pattern or file path")
@CommandArgument("--package", "-p", default=None, help="Specific package to test")
@CommandArgument("--bench", default=False, action="store_true", help="Run in bench mode")
@CommandArgument(
"--nocapture", default=False, action="store_true", help="Run tests with nocapture ( show test stdout )"
)
@CommandBase.common_command_arguments(build_configuration=True, build_type=True)
def test_unit(self, build_type: BuildType, test_name=None, package=None, bench=False, nocapture=False, **kwargs):
if test_name is None:
@ -183,7 +198,7 @@ class MachCommands(CommandBase):
"stylo_config",
]
if not packages:
packages = set(os.listdir(path.join(self.context.topdir, "tests", "unit"))) - set(['.DS_Store'])
packages = set(os.listdir(path.join(self.context.topdir, "tests", "unit"))) - set([".DS_Store"])
packages |= set(self_contained_tests)
in_crate_packages = []
@ -194,7 +209,7 @@ class MachCommands(CommandBase):
except KeyError:
pass
packages.discard('stylo')
packages.discard("stylo")
# Return if there is nothing to do.
if len(packages) == 0 and len(in_crate_packages) == 0:
@ -223,59 +238,56 @@ class MachCommands(CommandBase):
result = call(["cargo", "bench" if bench else "test"], cwd="support/crown")
if result != 0:
return result
return self.run_cargo_build_like_command(
"bench" if bench else "test",
args,
env=env,
**kwargs)
return self.run_cargo_build_like_command("bench" if bench else "test", args, env=env, **kwargs)
@Command('test-content',
description='Run the content tests',
category='testing')
@Command("test-content", description="Run the content tests", category="testing")
def test_content(self):
print("Content tests have been replaced by web-platform-tests under "
"tests/wpt/mozilla/.")
print("Content tests have been replaced by web-platform-tests under tests/wpt/mozilla/.")
return 0
@Command('test-tidy',
description='Run the source code tidiness check',
category='testing')
@CommandArgument('--all', default=False, action="store_true", dest="all_files",
help="Check all files, and run the WPT lint in tidy, "
"even if unchanged")
@CommandArgument('--no-progress', default=False, action="store_true",
help="Don't show progress for tidy")
@Command("test-tidy", description="Run the source code tidiness check", category="testing")
@CommandArgument(
"--all",
default=False,
action="store_true",
dest="all_files",
help="Check all files, and run the WPT lint in tidy, even if unchanged",
)
@CommandArgument("--no-progress", default=False, action="store_true", help="Don't show progress for tidy")
def test_tidy(self, all_files, no_progress):
tidy_failed = tidy.scan(not all_files, not no_progress)
print("\r ➤ Checking formatting of Rust files...")
rustfmt_failed = format_with_rustfmt(check_only=True)
if rustfmt_failed:
print("Run `./mach fmt` to fix the formatting")
print("\r ➤ Checking formatting of python files...")
ruff_format_failed = format_python_files_with_ruff()
print("\r ➤ Checking formatting of toml files...")
taplo_failed = format_toml_files_with_taplo()
tidy_failed = tidy_failed or rustfmt_failed or taplo_failed
format_failed = rustfmt_failed or ruff_format_failed or taplo_failed
tidy_failed = format_failed or tidy_failed
print()
if tidy_failed:
print("\r ❌ test-tidy reported errors.")
else:
print("\r ✅ test-tidy reported no errors.")
if format_failed:
print("Run `./mach fmt` to fix the formatting")
return tidy_failed
@Command('test-scripts',
description='Run tests for all build and support scripts.',
category='testing')
@CommandArgument('--verbose', '-v', default=False, action="store_true",
help="Enable verbose output")
@CommandArgument('--very-verbose', '-vv', default=False, action="store_true",
help="Enable very verbose output")
@CommandArgument('--all', '-a', default=False, action="store_true",
help="Run all script tests, even the slow ones.")
@CommandArgument('tests', default=None, nargs="...",
help="Specific WebIDL tests to run, relative to the tests directory")
@Command("test-scripts", description="Run tests for all build and support scripts.", category="testing")
@CommandArgument("--verbose", "-v", default=False, action="store_true", help="Enable verbose output")
@CommandArgument("--very-verbose", "-vv", default=False, action="store_true", help="Enable very verbose output")
@CommandArgument(
"--all", "-a", default=False, action="store_true", help="Run all script tests, even the slow ones."
)
@CommandArgument(
"tests", default=None, nargs="...", help="Specific WebIDL tests to run, relative to the tests directory"
)
def test_scripts(self, verbose, very_verbose, all, tests):
if very_verbose:
logging.getLogger().level = logging.DEBUG
@ -290,6 +302,7 @@ class MachCommands(CommandBase):
passed = tidy.run_tests() and passed
import python.servo.try_parser as try_parser
print("Running try_parser tests...")
passed = try_parser.run_tests() and passed
@ -302,7 +315,9 @@ class MachCommands(CommandBase):
try:
result = subprocess.run(
["etc/devtools_parser.py", "--json", "--use", "etc/devtools_parser_test.pcap"],
check=True, capture_output=True)
check=True,
capture_output=True,
)
expected = open("etc/devtools_parser_test.json", "rb").read()
actual = result.stdout
assert actual == expected, f"Incorrect output!\nExpected: {repr(expected)}\nActual: {repr(actual)}"
@ -323,41 +338,42 @@ class MachCommands(CommandBase):
sys.path.insert(0, test_file_dir)
run_file = path.abspath(path.join(test_file_dir, "runtests.py"))
run_globals = {"__file__": run_file}
exec(compile(open(run_file).read(), run_file, 'exec'), run_globals)
exec(compile(open(run_file).read(), run_file, "exec"), run_globals)
passed = run_globals["run_tests"](tests, verbose or very_verbose) and passed
return 0 if passed else 1
@Command('test-devtools',
description='Run tests for devtools.',
category='testing')
@Command("test-devtools", description="Run tests for devtools.", category="testing")
def test_devtools(self):
print("Running devtools tests...")
passed = servo.devtools_tests.run_tests(SCRIPT_PATH)
return 0 if passed else 1
@Command('test-wpt-failure',
description='Run the tests harness that verifies that the test failures are reported correctly',
category='testing',
parser=wpt.create_parser)
@Command(
"test-wpt-failure",
description="Run the tests harness that verifies that the test failures are reported correctly",
category="testing",
parser=wpt.create_parser,
)
@CommandBase.common_command_arguments(build_configuration=False, build_type=True)
def test_wpt_failure(self, build_type: BuildType, **kwargs):
kwargs["pause_after_test"] = False
kwargs["include"] = ["infrastructure/failing-test.html"]
return not self._test_wpt(build_type=build_type, **kwargs)
@Command('test-wpt',
description='Run the regular web platform test suite',
category='testing',
parser=wpt.create_parser)
@Command(
"test-wpt", description="Run the regular web platform test suite", category="testing", parser=wpt.create_parser
)
@CommandBase.common_command_arguments(binary_selection=True)
def test_wpt(self, servo_binary: str, **kwargs):
return self._test_wpt(servo_binary, **kwargs)
@Command('test-wpt-android',
description='Run the web platform test suite in an Android emulator',
category='testing',
parser=wpt.create_parser)
@Command(
"test-wpt-android",
description="Run the web platform test suite in an Android emulator",
category="testing",
parser=wpt.create_parser,
)
@CommandBase.common_command_arguments(build_configuration=False, build_type=True)
def test_wpt_android(self, build_type: BuildType, binary_args=None, **kwargs):
kwargs.update(
@ -374,27 +390,30 @@ class MachCommands(CommandBase):
return_value = wpt.run.run_tests(servo_binary, **kwargs)
return return_value if not kwargs["always_succeed"] else 0
@Command('update-manifest',
description='Run test-wpt --manifest-update SKIP_TESTS to regenerate MANIFEST.json',
category='testing',
parser=wpt.manifestupdate.create_parser)
@Command(
"update-manifest",
description="Run test-wpt --manifest-update SKIP_TESTS to regenerate MANIFEST.json",
category="testing",
parser=wpt.manifestupdate.create_parser,
)
def update_manifest(self, **kwargs):
return wpt.manifestupdate.update(check_clean=False)
@Command('fmt',
description='Format Rust and TOML files',
category='testing')
@Command("fmt", description="Format Rust, Python, and TOML files", category="testing")
def format_code(self):
result = format_python_files_with_ruff(check_only=False)
if result != 0:
return result
result = format_toml_files_with_taplo(check_only=False)
if result != 0:
return result
return format_with_rustfmt(check_only=False)
@Command('update-wpt',
description='Update the web platform tests',
category='testing',
parser=wpt.update.create_parser)
@Command(
"update-wpt", description="Update the web platform tests", category="testing", parser=wpt.update.create_parser
)
def update_wpt(self, **kwargs):
patch = kwargs.get("patch", False)
if not patch and kwargs["sync"]:
@ -402,9 +421,7 @@ class MachCommands(CommandBase):
return 1
return wpt.update.update_tests(**kwargs)
@Command('test-android-startup',
description='Extremely minimal testing of Servo for Android',
category='testing')
@Command("test-android-startup", description="Extremely minimal testing of Servo for Android", category="testing")
@CommandBase.common_command_arguments(build_configuration=False, build_type=True)
def test_android_startup(self, build_type: BuildType):
html = """
@ -441,50 +458,49 @@ class MachCommands(CommandBase):
py = path.join(self.context.topdir, "etc", "run_in_headless_android_emulator.py")
return [py, avd, apk]
@Command('test-jquery', description='Run the jQuery test suite', category='testing')
@Command("test-jquery", description="Run the jQuery test suite", category="testing")
@CommandBase.common_command_arguments(binary_selection=True)
def test_jquery(self, servo_binary: str):
return self.jquery_test_runner("test", servo_binary)
@Command('test-dromaeo', description='Run the Dromaeo test suite', category='testing')
@CommandArgument('tests', default=["recommended"], nargs="...", help="Specific tests to run")
@CommandArgument('--bmf-output', default=None, help="Specify BMF JSON output file")
@Command("test-dromaeo", description="Run the Dromaeo test suite", category="testing")
@CommandArgument("tests", default=["recommended"], nargs="...", help="Specific tests to run")
@CommandArgument("--bmf-output", default=None, help="Specify BMF JSON output file")
@CommandBase.common_command_arguments(binary_selection=True)
def test_dromaeo(self, tests, servo_binary: str, bmf_output: str | None = None):
return self.dromaeo_test_runner(tests, servo_binary, bmf_output)
@Command('test-speedometer', description="Run servo's speedometer", category='testing')
@CommandArgument('--bmf-output', default=None, help="Specify BMF JSON output file")
@Command("test-speedometer", description="Run servo's speedometer", category="testing")
@CommandArgument("--bmf-output", default=None, help="Specify BMF JSON output file")
@CommandBase.common_command_arguments(binary_selection=True)
def test_speedometer(self, servo_binary: str, bmf_output: str | None = None):
return self.speedometer_runner(servo_binary, bmf_output)
@Command('update-jquery',
description='Update the jQuery test suite expected results',
category='testing')
@Command("update-jquery", description="Update the jQuery test suite expected results", category="testing")
@CommandBase.common_command_arguments(binary_selection=True)
def update_jquery(self, servo_binary: str):
return self.jquery_test_runner("update", servo_binary)
@Command('compare_dromaeo',
description='Compare outputs of two runs of ./mach test-dromaeo command',
category='testing')
@CommandArgument('params', default=None, nargs="...",
help=" filepaths of output files of two runs of dromaeo test ")
@Command(
"compare_dromaeo", description="Compare outputs of two runs of ./mach test-dromaeo command", category="testing"
)
@CommandArgument(
"params", default=None, nargs="...", help=" filepaths of output files of two runs of dromaeo test "
)
def compare_dromaeo(self, params):
prev_op_filename = params[0]
cur_op_filename = params[1]
result = {'Test': [], 'Prev_Time': [], 'Cur_Time': [], 'Difference(%)': []}
with open(prev_op_filename, 'r') as prev_op, open(cur_op_filename, 'r') as cur_op:
result = {"Test": [], "Prev_Time": [], "Cur_Time": [], "Difference(%)": []}
with open(prev_op_filename, "r") as prev_op, open(cur_op_filename, "r") as cur_op:
l1 = prev_op.readline()
l2 = cur_op.readline()
while ((l1.find('[dromaeo] Saving...') and l2.find('[dromaeo] Saving...'))):
while l1.find("[dromaeo] Saving...") and l2.find("[dromaeo] Saving..."):
l1 = prev_op.readline()
l2 = cur_op.readline()
reach = 3
while (reach > 0):
while reach > 0:
l1 = prev_op.readline()
l2 = cur_op.readline()
reach -= 1
@ -494,33 +510,62 @@ class MachCommands(CommandBase):
l2 = cur_op.readline()
if not l1:
break
result['Test'].append(str(l1).split('|')[0].strip())
result['Prev_Time'].append(float(str(l1).split('|')[1].strip()))
result['Cur_Time'].append(float(str(l2).split('|')[1].strip()))
a = float(str(l1).split('|')[1].strip())
b = float(str(l2).split('|')[1].strip())
result['Difference(%)'].append(((b - a) / a) * 100)
result["Test"].append(str(l1).split("|")[0].strip())
result["Prev_Time"].append(float(str(l1).split("|")[1].strip()))
result["Cur_Time"].append(float(str(l2).split("|")[1].strip()))
a = float(str(l1).split("|")[1].strip())
b = float(str(l2).split("|")[1].strip())
result["Difference(%)"].append(((b - a) / a) * 100)
width_col1 = max([len(x) for x in result['Test']])
width_col2 = max([len(str(x)) for x in result['Prev_Time']])
width_col3 = max([len(str(x)) for x in result['Cur_Time']])
width_col4 = max([len(str(x)) for x in result['Difference(%)']])
width_col1 = max([len(x) for x in result["Test"]])
width_col2 = max([len(str(x)) for x in result["Prev_Time"]])
width_col3 = max([len(str(x)) for x in result["Cur_Time"]])
width_col4 = max([len(str(x)) for x in result["Difference(%)"]])
for p, q, r, s in zip(['Test'], ['First Run'], ['Second Run'], ['Difference(%)']):
print("\033[1m" + "{}|{}|{}|{}".format(p.ljust(width_col1), q.ljust(width_col2), r.ljust(width_col3),
s.ljust(width_col4)) + "\033[0m" + "\n" + "--------------------------------------------------"
+ "-------------------------------------------------------------------------")
for p, q, r, s in zip(["Test"], ["First Run"], ["Second Run"], ["Difference(%)"]):
print(
"\033[1m"
+ "{}|{}|{}|{}".format(
p.ljust(width_col1), q.ljust(width_col2), r.ljust(width_col3), s.ljust(width_col4)
)
+ "\033[0m"
+ "\n"
+ "--------------------------------------------------"
+ "-------------------------------------------------------------------------"
)
for a1, b1, c1, d1 in zip(result['Test'], result['Prev_Time'], result['Cur_Time'], result['Difference(%)']):
for a1, b1, c1, d1 in zip(result["Test"], result["Prev_Time"], result["Cur_Time"], result["Difference(%)"]):
if d1 > 0:
print("\033[91m" + "{}|{}|{}|{}".format(a1.ljust(width_col1),
str(b1).ljust(width_col2), str(c1).ljust(width_col3), str(d1).ljust(width_col4)) + "\033[0m")
print(
"\033[91m"
+ "{}|{}|{}|{}".format(
a1.ljust(width_col1),
str(b1).ljust(width_col2),
str(c1).ljust(width_col3),
str(d1).ljust(width_col4),
)
+ "\033[0m"
)
elif d1 < 0:
print("\033[92m" + "{}|{}|{}|{}".format(a1.ljust(width_col1),
str(b1).ljust(width_col2), str(c1).ljust(width_col3), str(d1).ljust(width_col4)) + "\033[0m")
print(
"\033[92m"
+ "{}|{}|{}|{}".format(
a1.ljust(width_col1),
str(b1).ljust(width_col2),
str(c1).ljust(width_col3),
str(d1).ljust(width_col4),
)
+ "\033[0m"
)
else:
print("{}|{}|{}|{}".format(a1.ljust(width_col1), str(b1).ljust(width_col2),
str(c1).ljust(width_col3), str(d1).ljust(width_col4)))
print(
"{}|{}|{}|{}".format(
a1.ljust(width_col1),
str(b1).ljust(width_col2),
str(c1).ljust(width_col3),
str(d1).ljust(width_col4),
)
)
def jquery_test_runner(self, cmd, binary: str):
base_dir = path.abspath(path.join("tests", "jquery"))
@ -529,12 +574,10 @@ class MachCommands(CommandBase):
# Clone the jQuery repository if it doesn't exist
if not os.path.isdir(jquery_dir):
check_call(
["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/servo/jquery", jquery_dir])
check_call(["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/servo/jquery", jquery_dir])
# Run pull in case the jQuery repo was updated since last test run
check_call(
["git", "-C", jquery_dir, "pull"])
check_call(["git", "-C", jquery_dir, "pull"])
# Check that a release servo build exists
bin_path = path.abspath(binary)
@ -553,29 +596,34 @@ class MachCommands(CommandBase):
# Clone the Dromaeo repository if it doesn't exist
if not os.path.isdir(dromaeo_dir):
check_call(
["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/notriddle/dromaeo", dromaeo_dir])
["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/notriddle/dromaeo", dromaeo_dir]
)
# Run pull in case the Dromaeo repo was updated since last test run
check_call(
["git", "-C", dromaeo_dir, "pull"])
check_call(["git", "-C", dromaeo_dir, "pull"])
# Compile test suite
check_call(
["make", "-C", dromaeo_dir, "web"])
check_call(["make", "-C", dromaeo_dir, "web"])
# Check that a release servo build exists
bin_path = path.abspath(binary)
return check_call(
[run_file, "|".join(tests), bin_path, base_dir, bmf_output])
return check_call([run_file, "|".join(tests), bin_path, base_dir, bmf_output])
def speedometer_runner(self, binary: str, bmf_output: str | None):
speedometer = json.loads(subprocess.check_output([
speedometer = json.loads(
subprocess.check_output(
[
binary,
"https://servospeedometer.netlify.app?headless=1",
"--pref", "dom_allow_scripts_to_close_windows",
"--pref",
"dom_allow_scripts_to_close_windows",
"--window-size=1100x900",
"--headless"], timeout=120).decode())
"--headless",
],
timeout=120,
).decode()
)
print(f"Score: {speedometer['Score']['mean']} ± {speedometer['Score']['delta']}")
@ -583,53 +631,53 @@ class MachCommands(CommandBase):
output = dict()
def parse_speedometer_result(result):
if result['unit'] == "ms":
if result["unit"] == "ms":
output[f"Speedometer/{result['name']}"] = {
'latency': { # speedometer has ms we need to convert to ns
'value': float(result['mean']) * 1000.0,
'lower_value': float(result['min']) * 1000.0,
'upper_value': float(result['max']) * 1000.0,
"latency": { # speedometer has ms we need to convert to ns
"value": float(result["mean"]) * 1000.0,
"lower_value": float(result["min"]) * 1000.0,
"upper_value": float(result["max"]) * 1000.0,
}
}
elif result['unit'] == "score":
elif result["unit"] == "score":
output[f"Speedometer/{result['name']}"] = {
'score': {
'value': float(result['mean']),
'lower_value': float(result['min']),
'upper_value': float(result['max']),
"score": {
"value": float(result["mean"]),
"lower_value": float(result["min"]),
"upper_value": float(result["max"]),
}
}
else:
raise "Unknown unit!"
for child in result['children']:
for child in result["children"]:
parse_speedometer_result(child)
for v in speedometer.values():
parse_speedometer_result(v)
with open(bmf_output, 'w', encoding='utf-8') as f:
with open(bmf_output, "w", encoding="utf-8") as f:
json.dump(output, f, indent=4)
@Command('update-net-cookies',
description='Update the net unit tests with cookie tests from http-state',
category='testing')
@Command(
"update-net-cookies",
description="Update the net unit tests with cookie tests from http-state",
category="testing",
)
def update_net_cookies(self):
cache_dir = path.join(self.config["tools"]["cache-dir"], "tests")
run_file = path.abspath(path.join(PROJECT_TOPLEVEL_PATH,
"components", "net", "tests",
"cookie_http_state_utils.py"))
run_file = path.abspath(
path.join(PROJECT_TOPLEVEL_PATH, "components", "net", "tests", "cookie_http_state_utils.py")
)
run_globals = {"__file__": run_file}
exec(compile(open(run_file).read(), run_file, 'exec'), run_globals)
exec(compile(open(run_file).read(), run_file, "exec"), run_globals)
return run_globals["update_test_file"](cache_dir)
@Command('update-webgl',
description='Update the WebGL conformance suite tests from Khronos repo',
category='testing')
@CommandArgument('--version', default='2.0.0',
help='WebGL conformance suite version')
@Command(
"update-webgl", description="Update the WebGL conformance suite tests from Khronos repo", category="testing"
)
@CommandArgument("--version", default="2.0.0", help="WebGL conformance suite version")
def update_webgl(self, version=None):
base_dir = path.abspath(path.join(PROJECT_TOPLEVEL_PATH,
"tests", "wpt", "mozilla", "tests", "webgl"))
base_dir = path.abspath(path.join(PROJECT_TOPLEVEL_PATH, "tests", "wpt", "mozilla", "tests", "webgl"))
run_file = path.join(base_dir, "tools", "import-conformance-tests.py")
dest_folder = path.join(base_dir, "conformance-%s" % version)
patches_dir = path.join(base_dir, "tools")
@ -638,18 +686,12 @@ class MachCommands(CommandBase):
shutil.rmtree(dest_folder)
run_globals = {"__file__": run_file}
exec(compile(open(run_file).read(), run_file, 'exec'), run_globals)
exec(compile(open(run_file).read(), run_file, "exec"), run_globals)
return run_globals["update_conformance"](version, dest_folder, None, patches_dir)
@Command('update-webgpu',
description='Update the WebGPU conformance test suite',
category='testing')
@CommandArgument(
'--repo', '-r', default="https://github.com/gpuweb/cts",
help='Repo to vendor cts from')
@CommandArgument(
'--checkout', '-c', default="main",
help='Branch or commit of repo')
@Command("update-webgpu", description="Update the WebGPU conformance test suite", category="testing")
@CommandArgument("--repo", "-r", default="https://github.com/gpuweb/cts", help="Repo to vendor cts from")
@CommandArgument("--checkout", "-c", default="main", help="Branch or commit of repo")
def cts(self, repo="https://github.com/gpuweb/cts", checkout="main"):
tdir = path.join(self.context.topdir, "tests/wpt/webgpu/tests")
clone_dir = path.join(tdir, "cts_clone")
@ -672,52 +714,52 @@ class MachCommands(CommandBase):
delete(path.join(clone_dir, "out-wpt", "cts-chunked2sec.https.html"))
cts_html = path.join(clone_dir, "out-wpt", "cts.https.html")
# patch
with open(cts_html, 'r') as file:
with open(cts_html, "r") as file:
filedata = file.read()
# files are mounted differently
filedata = filedata.replace('src=/webgpu/common/runtime/wpt.js', 'src=../webgpu/common/runtime/wpt.js')
filedata = filedata.replace("src=/webgpu/common/runtime/wpt.js", "src=../webgpu/common/runtime/wpt.js")
# Mark all webgpu tests as long to increase their timeouts. This is needed due to wgpu's slowness.
# TODO: replace this with more fine grained solution: https://github.com/servo/servo/issues/30999
filedata = filedata.replace('<meta charset=utf-8>',
'<meta charset=utf-8>\n<meta name="timeout" content="long">')
filedata = filedata.replace(
"<meta charset=utf-8>", '<meta charset=utf-8>\n<meta name="timeout" content="long">'
)
# Write the file out again
with open(cts_html, 'w') as file:
with open(cts_html, "w") as file:
file.write(filedata)
logger = path.join(clone_dir, "out-wpt", "common/internal/logging/test_case_recorder.js")
with open(logger, 'r') as file:
with open(logger, "r") as file:
filedata = file.read()
filedata.replace("info(ex) {", "info(ex) {return;")
with open(logger, 'w') as file:
with open(logger, "w") as file:
file.write(filedata)
# copy
delete(path.join(tdir, "webgpu"))
shutil.copytree(path.join(clone_dir, "out-wpt"), path.join(tdir, "webgpu"))
# update commit
commit = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=clone_dir).decode()
with open(path.join(tdir, "checkout_commit.txt"), 'w') as file:
with open(path.join(tdir, "checkout_commit.txt"), "w") as file:
file.write(commit)
# clean up
delete(clone_dir)
print("Updating manifest.")
return self.context.commands.dispatch("update-manifest", self.context)
@Command('smoketest',
description='Load a simple page in Servo and ensure that it closes properly',
category='testing')
@CommandArgument('params', nargs='...',
help="Command-line arguments to be passed through to Servo")
@Command(
"smoketest", description="Load a simple page in Servo and ensure that it closes properly", category="testing"
)
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to Servo")
@CommandBase.common_command_arguments(binary_selection=True)
def smoketest(self, servo_binary: str, params, **kwargs):
# We pass `-f` here so that any thread panic will cause Servo to exit,
# preventing a panic from hanging execution. This means that these kind
# of panics won't cause timeouts on CI.
return PostBuildCommands(self.context)._run(servo_binary,
params + ['-f', 'tests/html/close-on-load.html'])
return PostBuildCommands(self.context)._run(servo_binary, params + ["-f", "tests/html/close-on-load.html"])
@Command('try', description='Runs try jobs by force pushing to try branch', category='testing')
@CommandArgument('--remote', '-r', default="origin", help='A git remote to run the try job on')
@CommandArgument('try_strings', default=["full"], nargs='...',
help="A list of try strings specifying what kind of job to run.")
@Command("try", description="Runs try jobs by force pushing to try branch", category="testing")
@CommandArgument("--remote", "-r", default="origin", help="A git remote to run the try job on")
@CommandArgument(
"try_strings", default=["full"], nargs="...", help="A list of try strings specifying what kind of job to run."
)
def try_command(self, remote: str, try_strings: list[str]):
if subprocess.check_output(["git", "diff", "--cached", "--name-only"]).strip():
print("Cannot run `try` with staged and uncommited changes. ")
@ -755,7 +797,7 @@ class MachCommands(CommandBase):
# tool and get the real URL.
actions_url = remote_url.replace(".git", "/actions")
if not actions_url.startswith("https"):
actions_url = actions_url.replace(':', '/')
actions_url = actions_url.replace(":", "/")
actions_url = actions_url.replace("git@", "")
actions_url = f"https://{actions_url}"
print(f"Actions available at: {actions_url}")
@ -770,25 +812,27 @@ class MachCommands(CommandBase):
def create_parser_create():
import argparse
p = argparse.ArgumentParser()
p.add_argument("--no-editor", action="store_true",
help="Don't try to open the test in an editor")
p.add_argument("--no-editor", action="store_true", help="Don't try to open the test in an editor")
p.add_argument("-e", "--editor", action="store", help="Editor to use")
p.add_argument("--no-run", action="store_true",
help="Don't try to update the wpt manifest or open the test in a browser")
p.add_argument('--release', action="store_true",
help="Run with a release build of servo")
p.add_argument("--long-timeout", action="store_true",
help="Test should be given a long timeout (typically 60s rather than 10s,"
"but varies depending on environment)")
p.add_argument("--overwrite", action="store_true",
help="Allow overwriting an existing test file")
p.add_argument("-r", "--reftest", action="store_true",
help="Create a reftest rather than a testharness (js) test"),
p.add_argument(
"--no-run", action="store_true", help="Don't try to update the wpt manifest or open the test in a browser"
)
p.add_argument("--release", action="store_true", help="Run with a release build of servo")
p.add_argument(
"--long-timeout",
action="store_true",
help="Test should be given a long timeout (typically 60s rather than 10s,but varies depending on environment)",
)
p.add_argument("--overwrite", action="store_true", help="Allow overwriting an existing test file")
(
p.add_argument(
"-r", "--reftest", action="store_true", help="Create a reftest rather than a testharness (js) test"
),
)
p.add_argument("-ref", "--reference", dest="ref", help="Path to the reference file")
p.add_argument("--mismatch", action="store_true",
help="Create a mismatch reftest")
p.add_argument("--wait", action="store_true",
help="Create a reftest that waits until takeScreenshot() is called")
p.add_argument("--mismatch", action="store_true", help="Create a mismatch reftest")
p.add_argument("--wait", action="store_true", help="Create a reftest that waits until takeScreenshot() is called")
p.add_argument("path", action="store", help="Path to the test file")
return p

View file

@ -44,7 +44,7 @@ class JobConfig(object):
number_of_wpt_chunks: int = 20
# These are the fields that must match in between two JobConfigs for them to be able to be
# merged. If you modify any of the fields above, make sure to update this line as well.
merge_compatibility_fields: ClassVar[List[str]] = ['workflow', 'profile', 'wpt_args', 'build_args']
merge_compatibility_fields: ClassVar[List[str]] = ["workflow", "profile", "wpt_args", "build_args"]
def merge(self, other: JobConfig) -> bool:
"""Try to merge another job with this job. Returns True if merging is successful
@ -101,11 +101,14 @@ def handle_preset(s: str) -> Optional[JobConfig]:
elif any(word in s for word in ["ohos", "openharmony"]):
return JobConfig("OpenHarmony", Workflow.OHOS)
elif any(word in s for word in ["webgpu"]):
return JobConfig("WebGPU CTS", Workflow.LINUX,
return JobConfig(
"WebGPU CTS",
Workflow.LINUX,
wpt=True, # reftests are mode for new layout
wpt_args="_webgpu", # run only webgpu cts
profile="production", # WebGPU works to slow with debug assert
unit_tests=False) # production profile does not work with unit-tests
unit_tests=False,
) # production profile does not work with unit-tests
elif any(word in s for word in ["lint", "tidy"]):
return JobConfig("Lint", Workflow.LINT)
else:
@ -199,115 +202,130 @@ if __name__ == "__main__":
class TestParser(unittest.TestCase):
def test_string(self):
self.assertDictEqual(json.loads(Config("linux-unit-tests fail-fast").to_json()),
{'fail_fast': True,
'matrix': [{
'bencher': False,
'name': 'Linux (Unit Tests)',
'number_of_wpt_chunks': 20,
'profile': 'release',
'unit_tests': True,
'build_libservo': False,
'workflow': 'linux',
'wpt': False,
'wpt_args': '',
'build_args': ''
}]
})
self.assertDictEqual(
json.loads(Config("linux-unit-tests fail-fast").to_json()),
{
"fail_fast": True,
"matrix": [
{
"bencher": False,
"name": "Linux (Unit Tests)",
"number_of_wpt_chunks": 20,
"profile": "release",
"unit_tests": True,
"build_libservo": False,
"workflow": "linux",
"wpt": False,
"wpt_args": "",
"build_args": "",
}
],
},
)
def test_empty(self):
self.assertDictEqual(json.loads(Config("").to_json()),
{"fail_fast": False, "matrix": [
self.assertDictEqual(
json.loads(Config("").to_json()),
{
"fail_fast": False,
"matrix": [
{
"name": "Linux (Unit Tests, WPT, Bencher)",
'number_of_wpt_chunks': 20,
"number_of_wpt_chunks": 20,
"workflow": "linux",
"wpt": True,
"profile": "release",
"unit_tests": True,
'build_libservo': False,
'bencher': True,
"build_libservo": False,
"bencher": True,
"wpt_args": "",
'build_args': ''
"build_args": "",
},
{
"name": "MacOS (Unit Tests)",
'number_of_wpt_chunks': 20,
"number_of_wpt_chunks": 20,
"workflow": "macos",
"wpt": False,
"profile": "release",
"unit_tests": True,
'build_libservo': False,
'bencher': False,
"build_libservo": False,
"bencher": False,
"wpt_args": "",
'build_args': ''
"build_args": "",
},
{
"name": "Windows (Unit Tests)",
'number_of_wpt_chunks': 20,
"number_of_wpt_chunks": 20,
"workflow": "windows",
"wpt": False,
"profile": "release",
"unit_tests": True,
'build_libservo': False,
'bencher': False,
"build_libservo": False,
"bencher": False,
"wpt_args": "",
'build_args': ''
"build_args": "",
},
{
"name": "Android",
'number_of_wpt_chunks': 20,
"number_of_wpt_chunks": 20,
"workflow": "android",
"wpt": False,
"profile": "release",
"unit_tests": False,
'build_libservo': False,
'bencher': False,
"build_libservo": False,
"bencher": False,
"wpt_args": "",
'build_args': ''
"build_args": "",
},
{
"name": "OpenHarmony",
'number_of_wpt_chunks': 20,
"number_of_wpt_chunks": 20,
"workflow": "ohos",
"wpt": False,
"profile": "release",
"unit_tests": False,
'build_libservo': False,
'bencher': False,
"build_libservo": False,
"bencher": False,
"wpt_args": "",
'build_args': ''
"build_args": "",
},
{
"name": "Lint",
'number_of_wpt_chunks': 20,
"number_of_wpt_chunks": 20,
"workflow": "lint",
"wpt": False,
"profile": "release",
"unit_tests": False,
'build_libservo': False,
'bencher': False,
"build_libservo": False,
"bencher": False,
"wpt_args": "",
'build_args': ''
}
]})
"build_args": "",
},
],
},
)
def test_job_merging(self):
self.assertDictEqual(json.loads(Config("linux-wpt").to_json()),
{'fail_fast': False,
'matrix': [{
'bencher': False,
'name': 'Linux (WPT)',
'number_of_wpt_chunks': 20,
'profile': 'release',
'unit_tests': False,
'build_libservo': False,
'workflow': 'linux',
'wpt': True,
'wpt_args': '',
'build_args': ''
}]
})
self.assertDictEqual(
json.loads(Config("linux-wpt").to_json()),
{
"fail_fast": False,
"matrix": [
{
"bencher": False,
"name": "Linux (WPT)",
"number_of_wpt_chunks": 20,
"profile": "release",
"unit_tests": False,
"build_libservo": False,
"workflow": "linux",
"wpt": True,
"wpt_args": "",
"build_args": "",
}
],
},
)
a = JobConfig("Linux (Unit Tests)", Workflow.LINUX, unit_tests=True)
b = JobConfig("Linux", Workflow.LINUX, unit_tests=False)
@ -319,8 +337,7 @@ class TestParser(unittest.TestCase):
b = handle_preset("linux-wpt")
b = handle_modifier(b, "linux-wpt")
self.assertTrue(a.merge(b), "Should merge jobs that have different unit test configurations.")
self.assertEqual(a, JobConfig("Linux (Unit Tests, WPT)", Workflow.LINUX,
unit_tests=True, wpt=True))
self.assertEqual(a, JobConfig("Linux (Unit Tests, WPT)", Workflow.LINUX, unit_tests=True, wpt=True))
a = JobConfig("Linux (Unit Tests)", Workflow.LINUX, unit_tests=True)
b = JobConfig("Mac", Workflow.MACOS, unit_tests=True)
@ -343,12 +360,10 @@ class TestParser(unittest.TestCase):
self.assertEqual(a, JobConfig("Linux (Unit Tests)", Workflow.LINUX, unit_tests=True))
def test_full(self):
self.assertDictEqual(json.loads(Config("full").to_json()),
json.loads(Config("").to_json()))
self.assertDictEqual(json.loads(Config("full").to_json()), json.loads(Config("").to_json()))
def test_wpt_alias(self):
self.assertDictEqual(json.loads(Config("wpt").to_json()),
json.loads(Config("linux-wpt").to_json()))
self.assertDictEqual(json.loads(Config("wpt").to_json()), json.loads(Config("linux-wpt").to_json()))
def run_tests():

View file

@ -49,12 +49,12 @@ def download(description: str, url: str, writer: BufferedIOBase, start_byte: int
try:
req = urllib.request.Request(url)
if start_byte:
req = urllib.request.Request(url, headers={'Range': 'bytes={}-'.format(start_byte)})
req = urllib.request.Request(url, headers={"Range": "bytes={}-".format(start_byte)})
resp = urllib.request.urlopen(req)
fsize = None
if resp.info().get('Content-Length'):
fsize = int(resp.info().get('Content-Length').strip()) + start_byte
if resp.info().get("Content-Length"):
fsize = int(resp.info().get("Content-Length").strip()) + start_byte
recved = start_byte
chunk_size = 64 * 1024
@ -72,7 +72,7 @@ def download(description: str, url: str, writer: BufferedIOBase, start_byte: int
progress_line = "\rDownloading %s: %5.1f%%" % (description, pct)
now = time.time()
duration = now - previous_progress_line_time
if progress_line != previous_progress_line and duration > .1:
if progress_line != previous_progress_line and duration > 0.1:
print(progress_line, end="")
previous_progress_line = progress_line
previous_progress_line_time = now
@ -85,8 +85,10 @@ def download(description: str, url: str, writer: BufferedIOBase, start_byte: int
except urllib.error.HTTPError as e:
print("Download failed ({}): {} - {}".format(e.code, e.reason, url))
if e.code == 403:
print("No Rust compiler binary available for this platform. "
"Please see https://github.com/servo/servo/#prerequisites")
print(
"No Rust compiler binary available for this platform. "
"Please see https://github.com/servo/servo/#prerequisites"
)
sys.exit(1)
except urllib.error.URLError as e:
print("Error downloading {}: {}. The failing URL was: {}".format(description, e.reason, url))
@ -109,10 +111,10 @@ def download_file(description: str, url: str, destination_path: str):
tmp_path = destination_path + ".part"
try:
start_byte = os.path.getsize(tmp_path)
with open(tmp_path, 'ab') as fd:
with open(tmp_path, "ab") as fd:
download(description, url, fd, start_byte=start_byte)
except os.error:
with open(tmp_path, 'wb') as fd:
with open(tmp_path, "wb") as fd:
download(description, url, fd)
os.rename(tmp_path, destination_path)
@ -129,7 +131,7 @@ class ZipFileWithUnixPermissions(zipfile.ZipFile):
extracted = self._extract_member(member, path, pwd)
mode = os.stat(extracted).st_mode
mode |= (member.external_attr >> 16)
mode |= member.external_attr >> 16
os.chmod(extracted, mode)
return extracted

View file

@ -38,7 +38,7 @@ def find_vswhere():
for path in [PROGRAM_FILES, PROGRAM_FILES_X86]:
if not path:
continue
vswhere = os.path.join(path, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe')
vswhere = os.path.join(path, "Microsoft Visual Studio", "Installer", "vswhere.exe")
if os.path.exists(vswhere):
return vswhere
return None
@ -52,24 +52,30 @@ def find_compatible_msvc_with_vswhere() -> Generator[VisualStudioInstallation, N
if not vswhere:
return
output = subprocess.check_output([
output = subprocess.check_output(
[
vswhere,
'-format', 'json',
'-products', '*',
'-requires', 'Microsoft.VisualStudio.Component.VC.Tools.x86.x64',
'-requires', 'Microsoft.VisualStudio.Component.Windows10SDK',
'-utf8'
]).decode(errors='ignore')
"-format",
"json",
"-products",
"*",
"-requires",
"Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
"-requires",
"Microsoft.VisualStudio.Component.Windows10SDK",
"-utf8",
]
).decode(errors="ignore")
for install in json.loads(output):
installed_version = f"{install['installationVersion'].split('.')[0]}.0"
if installed_version not in COMPATIBLE_MSVC_VERSIONS.values():
continue
installation_path = install['installationPath']
installation_path = install["installationPath"]
yield VisualStudioInstallation(
version_number=installed_version,
installation_path=installation_path,
vc_install_path=os.path.join(installation_path, "VC")
vc_install_path=os.path.join(installation_path, "VC"),
)
@ -77,20 +83,20 @@ def find_compatible_msvc_with_path() -> Generator[VisualStudioInstallation, None
for program_files in [PROGRAM_FILES, PROGRAM_FILES_X86]:
if not program_files:
continue
for (version, version_number) in COMPATIBLE_MSVC_VERSIONS.items():
for version, version_number in COMPATIBLE_MSVC_VERSIONS.items():
for edition in ["Enterprise", "Professional", "Community", "BuildTools"]:
installation_path = os.path.join(program_files, "Microsoft Visual Studio", version, edition)
if os.path.exists(installation_path):
yield VisualStudioInstallation(
version_number=version_number,
installation_path=installation_path,
vc_install_path=os.path.join(installation_path, "VC")
vc_install_path=os.path.join(installation_path, "VC"),
)
def find_compatible_msvc_with_environment_variables() -> Optional[VisualStudioInstallation]:
installation_path = os.environ.get('VSINSTALLDIR')
version_number = os.environ.get('VisualStudioVersion')
installation_path = os.environ.get("VSINSTALLDIR")
version_number = os.environ.get("VisualStudioVersion")
if not installation_path or not version_number:
return None
vc_install_path = os.environ.get("VCINSTALLDIR", os.path.join(installation_path, "VC"))
@ -116,8 +122,10 @@ def find_msvc_installations() -> List[VisualStudioInstallation]:
if installation:
return [installation]
raise Exception("Can't find a Visual Studio installation. "
"Please set the VSINSTALLDIR and VisualStudioVersion environment variables")
raise Exception(
"Can't find a Visual Studio installation. "
"Please set the VSINSTALLDIR and VisualStudioVersion environment variables"
)
def find_msvc_redist_dirs(vs_platform: str) -> Generator[str, None, None]:
@ -160,7 +168,7 @@ def find_windows_sdk_installation_path() -> str:
# This is based on the advice from
# https://stackoverflow.com/questions/35119223/how-to-programmatically-detect-and-locate-the-windows-10-sdk
key_path = r'SOFTWARE\Wow6432Node\Microsoft\Microsoft SDKs\Windows\v10.0'
key_path = r"SOFTWARE\Wow6432Node\Microsoft\Microsoft SDKs\Windows\v10.0"
try:
with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, key_path) as key:
return str(winreg.QueryValueEx(key, "InstallationFolder")[0])

View file

@ -15,7 +15,7 @@ import unittest
from . import tidy
BASE_PATH = 'python/tidy/tests/'
BASE_PATH = "python/tidy/tests/"
def test_file_path(name):
@ -32,179 +32,170 @@ class CheckTidiness(unittest.TestCase):
next(errors)
def test_tidy_config(self):
errors = tidy.check_config_file(os.path.join(BASE_PATH, 'servo-tidy.toml'), print_text=False)
errors = tidy.check_config_file(os.path.join(BASE_PATH, "servo-tidy.toml"), print_text=False)
self.assertEqual("invalid config key 'key-outside'", next(errors)[2])
self.assertEqual("invalid config key 'wrong-key'", next(errors)[2])
self.assertEqual('invalid config table [wrong]', next(errors)[2])
self.assertEqual("invalid config table [wrong]", next(errors)[2])
self.assertEqual("ignored file './fake/file.html' doesn't exist", next(errors)[2])
self.assertEqual("ignored directory './fake/dir' doesn't exist", next(errors)[2])
self.assertNoMoreErrors(errors)
def test_directory_checks(self):
dirs = {
os.path.join(BASE_PATH, "dir_check/webidl_plus"): ['webidl', 'test'],
os.path.join(BASE_PATH, "dir_check/only_webidl"): ['webidl']
os.path.join(BASE_PATH, "dir_check/webidl_plus"): ["webidl", "test"],
os.path.join(BASE_PATH, "dir_check/only_webidl"): ["webidl"],
}
errors = tidy.check_directory_files(dirs, print_text=False)
error_dir = os.path.join(BASE_PATH, "dir_check/webidl_plus")
self.assertEqual("Unexpected extension found for test.rs. We only expect files with webidl, "
+ f"test extensions in {error_dir}", next(errors)[2])
self.assertEqual("Unexpected extension found for test2.rs. We only expect files with webidl, "
+ f"test extensions in {error_dir}", next(errors)[2])
self.assertEqual(
"Unexpected extension found for test.rs. We only expect files with webidl, "
+ f"test extensions in {error_dir}",
next(errors)[2],
)
self.assertEqual(
"Unexpected extension found for test2.rs. We only expect files with webidl, "
+ f"test extensions in {error_dir}",
next(errors)[2],
)
self.assertNoMoreErrors(errors)
def test_spaces_correctnes(self):
errors = tidy.collect_errors_for_files(iterFile('wrong_space.rs'), [], [tidy.check_by_line], print_text=False)
self.assertEqual('trailing whitespace', next(errors)[2])
self.assertEqual('no newline at EOF', next(errors)[2])
self.assertEqual('tab on line', next(errors)[2])
self.assertEqual('CR on line', next(errors)[2])
self.assertEqual('no newline at EOF', next(errors)[2])
errors = tidy.collect_errors_for_files(iterFile("wrong_space.rs"), [], [tidy.check_by_line], print_text=False)
self.assertEqual("trailing whitespace", next(errors)[2])
self.assertEqual("no newline at EOF", next(errors)[2])
self.assertEqual("tab on line", next(errors)[2])
self.assertEqual("CR on line", next(errors)[2])
self.assertEqual("no newline at EOF", next(errors)[2])
self.assertNoMoreErrors(errors)
def test_empty_file(self):
errors = tidy.collect_errors_for_files(iterFile('empty_file.rs'), [], [tidy.check_by_line], print_text=False)
self.assertEqual('file is empty', next(errors)[2])
errors = tidy.collect_errors_for_files(iterFile("empty_file.rs"), [], [tidy.check_by_line], print_text=False)
self.assertEqual("file is empty", next(errors)[2])
self.assertNoMoreErrors(errors)
def test_long_line(self):
errors = tidy.collect_errors_for_files(iterFile('long_line.rs'), [], [tidy.check_by_line], print_text=False)
self.assertEqual('Line is longer than 120 characters', next(errors)[2])
errors = tidy.collect_errors_for_files(iterFile("long_line.rs"), [], [tidy.check_by_line], print_text=False)
self.assertEqual("Line is longer than 120 characters", next(errors)[2])
self.assertNoMoreErrors(errors)
def test_whatwg_link(self):
errors = tidy.collect_errors_for_files(iterFile('whatwg_link.rs'), [], [tidy.check_by_line], print_text=False)
self.assertEqual('link to WHATWG may break in the future, use this format instead: https://html.spec.whatwg.org/multipage/#dom-context-2d-putimagedata', next(errors)[2])
self.assertEqual('links to WHATWG single-page url, change to multi page: https://html.spec.whatwg.org/multipage/#typographic-conventions', next(errors)[2])
errors = tidy.collect_errors_for_files(iterFile("whatwg_link.rs"), [], [tidy.check_by_line], print_text=False)
self.assertEqual(
"link to WHATWG may break in the future, use this format instead: https://html.spec.whatwg.org/multipage/#dom-context-2d-putimagedata",
next(errors)[2],
)
self.assertEqual(
"links to WHATWG single-page url, change to multi page: https://html.spec.whatwg.org/multipage/#typographic-conventions",
next(errors)[2],
)
self.assertNoMoreErrors(errors)
def test_license(self):
errors = tidy.collect_errors_for_files(
iterFile('incorrect_license.rs'),
[],
[tidy.check_license],
print_text=False
iterFile("incorrect_license.rs"), [], [tidy.check_license], print_text=False
)
self.assertEqual('incorrect license', next(errors)[2])
self.assertEqual("incorrect license", next(errors)[2])
self.assertNoMoreErrors(errors)
def test_shebang_license(self):
errors = tidy.collect_errors_for_files(
iterFile('shebang_license.py'),
[],
[tidy.check_license],
print_text=False
iterFile("shebang_license.py"), [], [tidy.check_license], print_text=False
)
self.assertEqual('missing blank line after shebang', next(errors)[2])
self.assertEqual("missing blank line after shebang", next(errors)[2])
self.assertNoMoreErrors(errors)
def test_shell(self):
errors = tidy.collect_errors_for_files(iterFile('shell_tidy.sh'), [], [tidy.check_shell], print_text=False)
errors = tidy.collect_errors_for_files(iterFile("shell_tidy.sh"), [], [tidy.check_shell], print_text=False)
self.assertEqual('script does not have shebang "#!/usr/bin/env bash"', next(errors)[2])
self.assertEqual('script is missing options "set -o errexit", "set -o pipefail"', next(errors)[2])
self.assertEqual('script should not use backticks for command substitution', next(errors)[2])
self.assertEqual('variable substitutions should use the full \"${VAR}\" form', next(errors)[2])
self.assertEqual('script should use `[[` instead of `[` for conditional testing', next(errors)[2])
self.assertEqual('script should use `[[` instead of `[` for conditional testing', next(errors)[2])
self.assertEqual("script should not use backticks for command substitution", next(errors)[2])
self.assertEqual('variable substitutions should use the full "${VAR}" form', next(errors)[2])
self.assertEqual("script should use `[[` instead of `[` for conditional testing", next(errors)[2])
self.assertEqual("script should use `[[` instead of `[` for conditional testing", next(errors)[2])
self.assertNoMoreErrors(errors)
def test_apache2_incomplete(self):
errors = tidy.collect_errors_for_files(
iterFile('apache2_license.rs'),
[],
[tidy.check_license],
print_text=False
iterFile("apache2_license.rs"), [], [tidy.check_license], print_text=False
)
self.assertEqual('incorrect license', next(errors)[2])
self.assertEqual("incorrect license", next(errors)[2])
def test_rust(self):
errors = tidy.collect_errors_for_files(
iterFile('rust_tidy.rs'),
[],
[tidy.check_rust],
print_text=False
)
self.assertTrue('mod declaration is not in alphabetical order' in next(errors)[2])
self.assertEqual('mod declaration spans multiple lines', next(errors)[2])
self.assertTrue('derivable traits list is not in alphabetical order' in next(errors)[2])
self.assertEqual('found an empty line following a {', next(errors)[2])
self.assertEqual('use &[T] instead of &Vec<T>', next(errors)[2])
self.assertEqual('use &str instead of &String', next(errors)[2])
self.assertEqual('use &T instead of &Root<T>', next(errors)[2])
self.assertEqual('use &T instead of &DomRoot<T>', next(errors)[2])
self.assertEqual('encountered function signature with -> ()', next(errors)[2])
self.assertEqual('operators should go at the end of the first line', next(errors)[2])
self.assertEqual('unwrap() or panic!() found in code which should not panic.', next(errors)[2])
self.assertEqual('unwrap() or panic!() found in code which should not panic.', next(errors)[2])
errors = tidy.collect_errors_for_files(iterFile("rust_tidy.rs"), [], [tidy.check_rust], print_text=False)
self.assertTrue("mod declaration is not in alphabetical order" in next(errors)[2])
self.assertEqual("mod declaration spans multiple lines", next(errors)[2])
self.assertTrue("derivable traits list is not in alphabetical order" in next(errors)[2])
self.assertEqual("found an empty line following a {", next(errors)[2])
self.assertEqual("use &[T] instead of &Vec<T>", next(errors)[2])
self.assertEqual("use &str instead of &String", next(errors)[2])
self.assertEqual("use &T instead of &Root<T>", next(errors)[2])
self.assertEqual("use &T instead of &DomRoot<T>", next(errors)[2])
self.assertEqual("encountered function signature with -> ()", next(errors)[2])
self.assertEqual("operators should go at the end of the first line", next(errors)[2])
self.assertEqual("unwrap() or panic!() found in code which should not panic.", next(errors)[2])
self.assertEqual("unwrap() or panic!() found in code which should not panic.", next(errors)[2])
self.assertNoMoreErrors(errors)
feature_errors = tidy.collect_errors_for_files(iterFile('lib.rs'), [], [tidy.check_rust], print_text=False)
feature_errors = tidy.collect_errors_for_files(iterFile("lib.rs"), [], [tidy.check_rust], print_text=False)
self.assertTrue('feature attribute is not in alphabetical order' in next(feature_errors)[2])
self.assertTrue('feature attribute is not in alphabetical order' in next(feature_errors)[2])
self.assertTrue('feature attribute is not in alphabetical order' in next(feature_errors)[2])
self.assertTrue('feature attribute is not in alphabetical order' in next(feature_errors)[2])
self.assertTrue("feature attribute is not in alphabetical order" in next(feature_errors)[2])
self.assertTrue("feature attribute is not in alphabetical order" in next(feature_errors)[2])
self.assertTrue("feature attribute is not in alphabetical order" in next(feature_errors)[2])
self.assertTrue("feature attribute is not in alphabetical order" in next(feature_errors)[2])
self.assertNoMoreErrors(feature_errors)
ban_errors = tidy.collect_errors_for_files(iterFile('ban.rs'), [], [tidy.check_rust], print_text=False)
self.assertEqual('Banned type Cell<JSVal> detected. Use MutDom<JSVal> instead', next(ban_errors)[2])
ban_errors = tidy.collect_errors_for_files(iterFile("ban.rs"), [], [tidy.check_rust], print_text=False)
self.assertEqual("Banned type Cell<JSVal> detected. Use MutDom<JSVal> instead", next(ban_errors)[2])
self.assertNoMoreErrors(ban_errors)
ban_errors = tidy.collect_errors_for_files(iterFile(
'ban-domrefcell.rs'),
[],
[tidy.check_rust],
print_text=False
ban_errors = tidy.collect_errors_for_files(
iterFile("ban-domrefcell.rs"), [], [tidy.check_rust], print_text=False
)
self.assertEqual('Banned type DomRefCell<Dom<T>> detected. Use MutDom<T> instead', next(ban_errors)[2])
self.assertEqual("Banned type DomRefCell<Dom<T>> detected. Use MutDom<T> instead", next(ban_errors)[2])
self.assertNoMoreErrors(ban_errors)
def test_spec_link(self):
tidy.SPEC_BASE_PATH = BASE_PATH
errors = tidy.collect_errors_for_files(iterFile('speclink.rs'), [], [tidy.check_spec], print_text=False)
self.assertEqual('method declared in webidl is missing a comment with a specification link', next(errors)[2])
self.assertEqual('method declared in webidl is missing a comment with a specification link', next(errors)[2])
errors = tidy.collect_errors_for_files(iterFile("speclink.rs"), [], [tidy.check_spec], print_text=False)
self.assertEqual("method declared in webidl is missing a comment with a specification link", next(errors)[2])
self.assertEqual("method declared in webidl is missing a comment with a specification link", next(errors)[2])
self.assertNoMoreErrors(errors)
def test_webidl(self):
errors = tidy.collect_errors_for_files(iterFile('spec.webidl'), [tidy.check_webidl_spec], [], print_text=False)
self.assertEqual('No specification link found.', next(errors)[2])
errors = tidy.collect_errors_for_files(iterFile("spec.webidl"), [tidy.check_webidl_spec], [], print_text=False)
self.assertEqual("No specification link found.", next(errors)[2])
self.assertNoMoreErrors(errors)
def test_toml(self):
errors = tidy.collect_errors_for_files(iterFile('Cargo.toml'), [], [tidy.check_toml], print_text=False)
self.assertEqual('found asterisk instead of minimum version number', next(errors)[2])
self.assertEqual('.toml file should contain a valid license.', next(errors)[2])
errors = tidy.collect_errors_for_files(iterFile("Cargo.toml"), [], [tidy.check_toml], print_text=False)
self.assertEqual("found asterisk instead of minimum version number", next(errors)[2])
self.assertEqual(".toml file should contain a valid license.", next(errors)[2])
self.assertNoMoreErrors(errors)
def test_modeline(self):
errors = tidy.collect_errors_for_files(iterFile('modeline.txt'), [], [tidy.check_modeline], print_text=False)
self.assertEqual('vi modeline present', next(errors)[2])
self.assertEqual('vi modeline present', next(errors)[2])
self.assertEqual('vi modeline present', next(errors)[2])
self.assertEqual('emacs file variables present', next(errors)[2])
self.assertEqual('emacs file variables present', next(errors)[2])
errors = tidy.collect_errors_for_files(iterFile("modeline.txt"), [], [tidy.check_modeline], print_text=False)
self.assertEqual("vi modeline present", next(errors)[2])
self.assertEqual("vi modeline present", next(errors)[2])
self.assertEqual("vi modeline present", next(errors)[2])
self.assertEqual("emacs file variables present", next(errors)[2])
self.assertEqual("emacs file variables present", next(errors)[2])
self.assertNoMoreErrors(errors)
def test_file_list(self):
file_path = os.path.join(BASE_PATH, 'test_ignored')
file_path = os.path.join(BASE_PATH, "test_ignored")
file_list = tidy.FileList(file_path, only_changed_files=False, exclude_dirs=[], progress=False)
lst = list(file_list)
self.assertEqual(
[
os.path.join(file_path, 'whee', 'test.rs'),
os.path.join(file_path, 'whee', 'foo', 'bar.rs')
],
lst
[os.path.join(file_path, "whee", "test.rs"), os.path.join(file_path, "whee", "foo", "bar.rs")], lst
)
file_list = tidy.FileList(
file_path, only_changed_files=False, exclude_dirs=[os.path.join(file_path, "whee", "foo")], progress=False
)
file_list = tidy.FileList(file_path, only_changed_files=False,
exclude_dirs=[os.path.join(file_path, 'whee', 'foo')],
progress=False)
lst = list(file_list)
self.assertEqual([os.path.join(file_path, 'whee', 'test.rs')], lst)
self.assertEqual([os.path.join(file_path, "whee", "test.rs")], lst)
def test_multiline_string(self):
errors = tidy.collect_errors_for_files(iterFile('multiline_string.rs'), [], [tidy.check_rust], print_text=False)
errors = tidy.collect_errors_for_files(iterFile("multiline_string.rs"), [], [tidy.check_rust], print_text=False)
self.assertNoMoreErrors(errors)
def test_raw_url_in_rustdoc(self):
@ -212,34 +203,19 @@ class CheckTidiness(unittest.TestCase):
self.assertEqual(tidy.ERROR_RAW_URL_IN_RUSTDOC, next(errors)[1])
self.assertNoMoreErrors(errors)
errors = tidy.check_for_raw_urls_in_rustdoc(
"file.rs", 3,
b"/// https://google.com"
)
errors = tidy.check_for_raw_urls_in_rustdoc("file.rs", 3, b"/// https://google.com")
assert_has_a_single_rustdoc_error(errors)
errors = tidy.check_for_raw_urls_in_rustdoc(
"file.rs", 3,
b"//! (https://google.com)"
)
errors = tidy.check_for_raw_urls_in_rustdoc("file.rs", 3, b"//! (https://google.com)")
assert_has_a_single_rustdoc_error(errors)
errors = tidy.check_for_raw_urls_in_rustdoc(
"file.rs", 3,
b"/// <https://google.com>"
)
errors = tidy.check_for_raw_urls_in_rustdoc("file.rs", 3, b"/// <https://google.com>")
self.assertNoMoreErrors(errors)
errors = tidy.check_for_raw_urls_in_rustdoc(
"file.rs", 3,
b"/// [hi]: https://google.com"
)
errors = tidy.check_for_raw_urls_in_rustdoc("file.rs", 3, b"/// [hi]: https://google.com")
self.assertNoMoreErrors(errors)
errors = tidy.check_for_raw_urls_in_rustdoc(
"file.rs", 3,
b"/// [hi](https://google.com)"
)
errors = tidy.check_for_raw_urls_in_rustdoc("file.rs", 3, b"/// [hi](https://google.com)")
self.assertNoMoreErrors(errors)

View file

@ -1,5 +1,6 @@
from servo_tidy.tidy import LintRunner
class Lint(LintRunner):
def run(self):
yield None

View file

@ -1,5 +1,6 @@
from servo_tidy.tidy import LintRunner
class Linter(LintRunner):
def run(self):
pass

View file

@ -1,5 +1,6 @@
from servo_tidy.tidy import LintRunner
class Lint(LintRunner):
def some_method(self):
pass

View file

@ -1,6 +1,7 @@
from servo_tidy.tidy import LintRunner
class Lint(LintRunner):
def run(self):
for _ in [None]:
yield ('path', 0, 'foobar')
yield ("path", 0, "foobar")

View file

@ -31,8 +31,8 @@ WPT_PATH = os.path.join(".", "tests", "wpt")
CONFIG_FILE_PATH = os.path.join(".", "servo-tidy.toml")
WPT_CONFIG_INI_PATH = os.path.join(WPT_PATH, "config.ini")
# regex source https://stackoverflow.com/questions/6883049/
URL_REGEX = re.compile(br'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+')
UTF8_URL_REGEX = re.compile(r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+')
URL_REGEX = re.compile(rb"https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+")
UTF8_URL_REGEX = re.compile(r"https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+")
CARGO_LOCK_FILE = os.path.join(TOPDIR, "Cargo.lock")
CARGO_DENY_CONFIG_FILE = os.path.join(TOPDIR, "deny.toml")
@ -57,15 +57,25 @@ config = {
],
"packages": [],
},
"check_ext": {}
"check_ext": {},
}
COMMENTS = [b"// ", b"# ", b" *", b"/* "]
# File patterns to include in the non-WPT tidy check.
FILE_PATTERNS_TO_CHECK = ["*.rs", "*.rc", "*.cpp", "*.c",
"*.h", "*.py", "*.sh",
"*.toml", "*.webidl", "*.json", "*.html"]
FILE_PATTERNS_TO_CHECK = [
"*.rs",
"*.rc",
"*.cpp",
"*.c",
"*.h",
"*.py",
"*.sh",
"*.toml",
"*.webidl",
"*.json",
"*.html",
]
# File patterns that are ignored for all tidy and lint checks.
FILE_PATTERNS_TO_IGNORE = ["*.#*", "*.pyc", "fake-ld.sh", "*.ogv", "*.webm"]
@ -106,8 +116,7 @@ WEBIDL_STANDARDS = [
b"//notifications.spec.whatwg.org",
b"//testutils.spec.whatwg.org/",
# Not a URL
b"// This interface is entirely internal to Servo, and should not be"
+ b" accessible to\n// web pages."
b"// This interface is entirely internal to Servo, and should not be" + b" accessible to\n// web pages.",
]
@ -121,9 +130,9 @@ def is_iter_empty(iterator):
def normilize_paths(paths):
if isinstance(paths, str):
return os.path.join(*paths.split('/'))
return os.path.join(*paths.split("/"))
else:
return [os.path.join(*path.split('/')) for path in paths]
return [os.path.join(*path.split("/")) for path in paths]
# A simple wrapper for iterators to show progress
@ -133,7 +142,7 @@ def progress_wrapper(iterator):
total_files, progress = len(list_of_stuff), 0
for idx, thing in enumerate(list_of_stuff):
progress = int(float(idx + 1) / total_files * 100)
sys.stdout.write('\r Progress: %s%% (%d/%d)' % (progress, idx + 1, total_files))
sys.stdout.write("\r Progress: %s%% (%d/%d)" % (progress, idx + 1, total_files))
sys.stdout.flush()
yield thing
@ -170,8 +179,8 @@ class FileList(object):
if not file_list:
return
for f in file_list:
if not any(os.path.join('.', os.path.dirname(f)).startswith(path) for path in self.excluded):
yield os.path.join('.', f)
if not any(os.path.join(".", os.path.dirname(f)).startswith(path) for path in self.excluded):
yield os.path.join(".", f)
def _filter_excluded(self):
for root, dirs, files in os.walk(self.directory, topdown=True):
@ -197,8 +206,12 @@ def filter_file(file_name):
def filter_files(start_dir, only_changed_files, progress):
file_iter = FileList(start_dir, only_changed_files=only_changed_files,
exclude_dirs=config["ignore"]["directories"], progress=progress)
file_iter = FileList(
start_dir,
only_changed_files=only_changed_files,
exclude_dirs=config["ignore"]["directories"],
progress=progress,
)
for file_name in iter(file_iter):
base_name = os.path.basename(file_name)
@ -213,8 +226,8 @@ def uncomment(line):
for c in COMMENTS:
if line.startswith(c):
if line.endswith(b"*/"):
return line[len(c):(len(line) - 3)].strip()
return line[len(c):].strip()
return line[len(c) : (len(line) - 3)].strip()
return line[len(c) :].strip()
def is_apache_licensed(header):
@ -226,8 +239,7 @@ def is_apache_licensed(header):
def check_license(file_name, lines):
if any(file_name.endswith(ext) for ext in (".toml", ".lock", ".json", ".html")) or \
config["skip-check-licenses"]:
if any(file_name.endswith(ext) for ext in (".toml", ".lock", ".json", ".html")) or config["skip-check-licenses"]:
return
if lines[0].startswith(b"#!") and lines[1].strip():
@ -238,7 +250,7 @@ def check_license(file_name, lines):
license_block = []
for line in lines:
line = line.rstrip(b'\n')
line = line.rstrip(b"\n")
if not line.strip():
blank_lines += 1
if blank_lines >= max_blank_lines:
@ -257,20 +269,19 @@ def check_license(file_name, lines):
def check_modeline(file_name, lines):
for idx, line in enumerate(lines[:5]):
if re.search(b'^.*[ \t](vi:|vim:|ex:)[ \t]', line):
if re.search(b"^.*[ \t](vi:|vim:|ex:)[ \t]", line):
yield (idx + 1, "vi modeline present")
elif re.search(br'-\*-.*-\*-', line, re.IGNORECASE):
elif re.search(rb"-\*-.*-\*-", line, re.IGNORECASE):
yield (idx + 1, "emacs file variables present")
def check_length(file_name, idx, line):
if any(file_name.endswith(ext) for ext in (".lock", ".json", ".html", ".toml")) or \
config["skip-check-length"]:
if any(file_name.endswith(ext) for ext in (".lock", ".json", ".html", ".toml")) or config["skip-check-length"]:
return
# Prefer shorter lines when shell scripting.
max_length = 80 if file_name.endswith(".sh") else 120
if len(line.rstrip(b'\n')) > max_length and not is_unsplittable(file_name, line):
if len(line.rstrip(b"\n")) > max_length and not is_unsplittable(file_name, line):
yield (idx + 1, "Line is longer than %d characters" % max_length)
@ -279,23 +290,18 @@ def contains_url(line):
def is_unsplittable(file_name, line):
return (
contains_url(line)
or file_name.endswith(".rs")
and line.startswith(b"use ")
and b"{" not in line
)
return contains_url(line) or file_name.endswith(".rs") and line.startswith(b"use ") and b"{" not in line
def check_whatwg_specific_url(idx, line):
match = re.search(br"https://html\.spec\.whatwg\.org/multipage/[\w-]+\.html#([\w\'\:-]+)", line)
match = re.search(rb"https://html\.spec\.whatwg\.org/multipage/[\w-]+\.html#([\w\'\:-]+)", line)
if match is not None:
preferred_link = "https://html.spec.whatwg.org/multipage/#{}".format(match.group(1).decode("utf-8"))
yield (idx + 1, "link to WHATWG may break in the future, use this format instead: {}".format(preferred_link))
def check_whatwg_single_page_url(idx, line):
match = re.search(br"https://html\.spec\.whatwg\.org/#([\w\'\:-]+)", line)
match = re.search(rb"https://html\.spec\.whatwg\.org/#([\w\'\:-]+)", line)
if match is not None:
preferred_link = "https://html.spec.whatwg.org/multipage/#{}".format(match.group(1).decode("utf-8"))
yield (idx + 1, "links to WHATWG single-page url, change to multi page: {}".format(preferred_link))
@ -335,10 +341,11 @@ def check_for_raw_urls_in_rustdoc(file_name: str, idx: int, line: bytes):
# [link text]: https://example.com
match = URL_REGEX.search(line)
if match and (
not line[match.start() - 1:].startswith(b"<")
and not line[match.start() - 1:].startswith(b"[")
and not line[match.start() - 2:].startswith(b"](")
and not line[match.start() - 3:].startswith(b"]: ")):
not line[match.start() - 1 :].startswith(b"<")
and not line[match.start() - 1 :].startswith(b"[")
and not line[match.start() - 2 :].startswith(b"](")
and not line[match.start() - 3 :].startswith(b"]: ")
):
yield (idx + 1, ERROR_RAW_URL_IN_RUSTDOC)
@ -369,12 +376,11 @@ def check_ruff_lints():
)
def run_cargo_deny_lints():
print("\r ➤ Running `cargo-deny` checks...")
result = subprocess.run(["cargo-deny", "--format=json", "--all-features", "check"],
encoding='utf-8',
capture_output=True)
result = subprocess.run(
["cargo-deny", "--format=json", "--all-features", "check"], encoding="utf-8", capture_output=True
)
assert result.stderr is not None, "cargo deny should return error information via stderr when failing"
errors = []
@ -397,11 +403,7 @@ def run_cargo_deny_lints():
if error_code == "rejected":
crate = CargoDenyKrate(error_fields["graphs"][0])
license_name = error_fields["notes"][0]
errors.append((
CARGO_LOCK_FILE,
1,
f"Rust dependency {crate}: Rejected license \"{license_name}\""
))
errors.append((CARGO_LOCK_FILE, 1, f'Rust dependency {crate}: Rejected license "{license_name}"'))
# This detects if a crate has been marked as banned in the configuration file.
elif error_code == "banned":
crate = CargoDenyKrate(error_fields["graphs"][0])
@ -431,7 +433,7 @@ def check_toml(file_name, lines):
if line_without_comment.find("*") != -1:
yield (idx + 1, "found asterisk instead of minimum version number")
for license_line in licenses_toml:
ok_licensed |= (license_line in line)
ok_licensed |= license_line in line
if "license.workspace" in line:
ok_licensed = True
if not ok_licensed:
@ -448,7 +450,7 @@ def check_shell(file_name, lines):
did_shebang_check = False
if not lines:
yield (0, 'script is an empty file')
yield (0, "script is an empty file")
return
if lines[0].rstrip() != shebang.encode("utf-8"):
@ -477,23 +479,25 @@ def check_shell(file_name, lines):
if " [ " in stripped or stripped.startswith("[ "):
yield (idx + 1, "script should use `[[` instead of `[` for conditional testing")
for dollar in re.finditer(r'\$', stripped):
for dollar in re.finditer(r"\$", stripped):
next_idx = dollar.end()
if next_idx < len(stripped):
next_char = stripped[next_idx]
if not (next_char == '{' or next_char == '('):
yield (idx + 1, "variable substitutions should use the full \"${VAR}\" form")
if not (next_char == "{" or next_char == "("):
yield (idx + 1, 'variable substitutions should use the full "${VAR}" form')
def check_rust(file_name, lines):
if not file_name.endswith(".rs") or \
file_name.endswith(".mako.rs") or \
file_name.endswith(os.path.join("style", "build.rs")) or \
file_name.endswith(os.path.join("unit", "style", "stylesheets.rs")):
if (
not file_name.endswith(".rs")
or file_name.endswith(".mako.rs")
or file_name.endswith(os.path.join("style", "build.rs"))
or file_name.endswith(os.path.join("unit", "style", "stylesheets.rs"))
):
return
comment_depth = 0
merged_lines = ''
merged_lines = ""
import_block = False
whitespace = False
@ -507,8 +511,7 @@ def check_rust(file_name, lines):
os.path.join("*", "ports", "servoshell", "embedder.rs"),
os.path.join("*", "rust_tidy.rs"), # This is for the tests.
]
is_panic_not_allowed_rs_file = any([
glob.fnmatch.fnmatch(file_name, path) for path in PANIC_NOT_ALLOWED_PATHS])
is_panic_not_allowed_rs_file = any([glob.fnmatch.fnmatch(file_name, path) for path in PANIC_NOT_ALLOWED_PATHS])
prev_open_brace = False
multi_line_string = False
@ -531,11 +534,11 @@ def check_rust(file_name, lines):
is_comment = re.search(r"^//|^/\*|^\*", line)
# Simple heuristic to avoid common case of no comments.
if '/' in line:
comment_depth += line.count('/*')
comment_depth -= line.count('*/')
if "/" in line:
comment_depth += line.count("/*")
comment_depth -= line.count("*/")
if line.endswith('\\'):
if line.endswith("\\"):
merged_lines += line[:-1]
continue
if comment_depth:
@ -543,11 +546,10 @@ def check_rust(file_name, lines):
continue
if merged_lines:
line = merged_lines + line
merged_lines = ''
merged_lines = ""
if multi_line_string:
line, count = re.subn(
r'^(\\.|[^"\\])*?"', '', line, count=1)
line, count = re.subn(r'^(\\.|[^"\\])*?"', "", line, count=1)
if count == 1:
multi_line_string = False
else:
@ -565,9 +567,7 @@ def check_rust(file_name, lines):
# get rid of strings and chars because cases like regex expression, keep attributes
if not is_attribute and not is_comment:
line = re.sub(r'"(\\.|[^\\"])*?"', '""', line)
line = re.sub(
r"'(\\.|[^\\']|(\\x[0-9a-fA-F]{2})|(\\u{[0-9a-fA-F]{1,6}}))'",
"''", line)
line = re.sub(r"'(\\.|[^\\']|(\\x[0-9a-fA-F]{2})|(\\u{[0-9a-fA-F]{1,6}}))'", "''", line)
# If, after parsing all single-line strings, we still have
# an odd number of double quotes, this line starts a
# multiline string
@ -576,15 +576,16 @@ def check_rust(file_name, lines):
multi_line_string = True
# get rid of comments
line = re.sub(r'//.*?$|/\*.*?$|^\*.*?$', '//', line)
line = re.sub(r"//.*?$|/\*.*?$|^\*.*?$", "//", line)
# get rid of attributes that do not contain =
line = re.sub(r'^#[A-Za-z0-9\(\)\[\]_]*?$', '#[]', line)
line = re.sub(r"^#[A-Za-z0-9\(\)\[\]_]*?$", "#[]", line)
# flag this line if it matches one of the following regular expressions
# tuple format: (pattern, format_message, filter_function(match, line))
def no_filter(match, line):
return True
regex_rules = [
# There should not be any extra pointer dereferencing
(r": &Vec<", "use &[T] instead of &Vec<T>", no_filter),
@ -618,17 +619,23 @@ def check_rust(file_name, lines):
match = re.search(r"#!\[feature\((.*)\)\]", line)
if match:
features = list(map(lambda w: w.strip(), match.group(1).split(',')))
features = list(map(lambda w: w.strip(), match.group(1).split(",")))
sorted_features = sorted(features)
if sorted_features != features and check_alphabetical_order:
yield (idx + 1, decl_message.format("feature attribute")
yield (
idx + 1,
decl_message.format("feature attribute")
+ decl_expected.format(tuple(sorted_features))
+ decl_found.format(tuple(features)))
+ decl_found.format(tuple(features)),
)
if prev_feature_name > sorted_features[0] and check_alphabetical_order:
yield (idx + 1, decl_message.format("feature attribute")
yield (
idx + 1,
decl_message.format("feature attribute")
+ decl_expected.format(prev_feature_name + " after " + sorted_features[0])
+ decl_found.format(prev_feature_name + " before " + sorted_features[0]))
+ decl_found.format(prev_feature_name + " before " + sorted_features[0]),
)
prev_feature_name = sorted_features[0]
else:
@ -652,9 +659,12 @@ def check_rust(file_name, lines):
if match == -1 and not line.endswith(";"):
yield (idx + 1, "mod declaration spans multiple lines")
if prev_mod[indent] and mod < prev_mod[indent] and check_alphabetical_order:
yield (idx + 1, decl_message.format("mod declaration")
yield (
idx + 1,
decl_message.format("mod declaration")
+ decl_expected.format(prev_mod[indent])
+ decl_found.format(mod))
+ decl_found.format(mod),
)
prev_mod[indent] = mod
else:
# we now erase previous entries
@ -665,21 +675,24 @@ def check_rust(file_name, lines):
# match the derivable traits filtering out macro expansions
match = re.search(r"#\[derive\(([a-zA-Z, ]*)", line)
if match:
derives = list(map(lambda w: w.strip(), match.group(1).split(',')))
derives = list(map(lambda w: w.strip(), match.group(1).split(",")))
# sort, compare and report
sorted_derives = sorted(derives)
if sorted_derives != derives and check_alphabetical_order:
yield (idx + 1, decl_message.format("derivable traits list")
yield (
idx + 1,
decl_message.format("derivable traits list")
+ decl_expected.format(", ".join(sorted_derives))
+ decl_found.format(", ".join(derives)))
+ decl_found.format(", ".join(derives)),
)
# Avoid flagging <Item=Foo> constructs
def is_associated_type(match, line):
if match.group(1) != '=':
if match.group(1) != "=":
return False
open_angle = line[0:match.end()].rfind('<')
close_angle = line[open_angle:].find('>') if open_angle != -1 else -1
open_angle = line[0 : match.end()].rfind("<")
close_angle = line[open_angle:].find(">") if open_angle != -1 else -1
generic_open = open_angle != -1 and open_angle < match.start()
generic_close = close_angle != -1 and close_angle + open_angle >= match.end()
return generic_open and generic_close
@ -731,6 +744,7 @@ def check_that_manifests_exist():
def check_that_manifests_are_clean():
from wptrunner import wptlogging
print("\r ➤ Checking WPT manifests for cleanliness...")
output_stream = io.StringIO("")
logger = wptlogging.setup({}, {"mach": output_stream})
@ -822,8 +836,8 @@ def check_spec(file_name, lines):
yield (idx + 1, "method declared in webidl is missing a comment with a specification link")
break
if in_impl:
brace_count += line.count('{')
brace_count -= line.count('}')
brace_count += line.count("{")
brace_count -= line.count("}")
if brace_count < 1:
break
@ -870,7 +884,7 @@ def check_config_file(config_file, print_text=True):
# Print invalid listed ignored directories
if current_table == "ignore" and invalid_dirs:
for d in invalid_dirs:
if line.strip().strip('\'",') == d:
if line.strip().strip("'\",") == d:
yield config_file, idx + 1, "ignored directory '%s' doesn't exist" % d
invalid_dirs.remove(d)
break
@ -878,7 +892,7 @@ def check_config_file(config_file, print_text=True):
# Print invalid listed ignored files
if current_table == "ignore" and invalid_files:
for f in invalid_files:
if line.strip().strip('\'",') == f:
if line.strip().strip("'\",") == f:
yield config_file, idx + 1, "ignored file '%s' doesn't exist" % f
invalid_files.remove(f)
break
@ -890,10 +904,14 @@ def check_config_file(config_file, print_text=True):
key = line.split("=")[0].strip()
# Check for invalid keys inside [configs] and [ignore] table
if (current_table == "configs" and key not in config
or current_table == "ignore" and key not in config["ignore"]
if (
current_table == "configs"
and key not in config
or current_table == "ignore"
and key not in config["ignore"]
# Any key outside of tables
or current_table == ""):
or current_table == ""
):
yield config_file, idx + 1, "invalid config key '%s'" % key
# Parse config file
@ -914,7 +932,7 @@ def parse_config(config_file):
dirs_to_check = config_file.get("check_ext", {})
# Fix the paths (OS-dependent)
for path, exts in dirs_to_check.items():
config['check_ext'][normilize_paths(path)] = exts
config["check_ext"][normilize_paths(path)] = exts
# Add list of blocked packages
config["blocked-packages"] = config_file.get("blocked-packages", {})
@ -933,13 +951,9 @@ def check_directory_files(directories, print_text=True):
files = sorted(os.listdir(directory))
for filename in files:
if not any(filename.endswith(ext) for ext in file_extensions):
details = {
"name": os.path.basename(filename),
"ext": ", ".join(file_extensions),
"dir_name": directory
}
message = '''Unexpected extension found for {name}. \
We only expect files with {ext} extensions in {dir_name}'''.format(**details)
details = {"name": os.path.basename(filename), "ext": ", ".join(file_extensions), "dir_name": directory}
message = """Unexpected extension found for {name}. \
We only expect files with {ext} extensions in {dir_name}""".format(**details)
yield (filename, 1, message)
@ -972,12 +986,19 @@ def scan(only_changed_files=False, progress=False):
# check config file for errors
config_errors = check_config_file(CONFIG_FILE_PATH)
# check directories contain expected files
directory_errors = check_directory_files(config['check_ext'])
directory_errors = check_directory_files(config["check_ext"])
# standard checks
files_to_check = filter_files('.', only_changed_files, progress)
files_to_check = filter_files(".", only_changed_files, progress)
checking_functions = (check_webidl_spec,)
line_checking_functions = (check_license, check_by_line, check_toml, check_shell,
check_rust, check_spec, check_modeline)
line_checking_functions = (
check_license,
check_by_line,
check_toml,
check_shell,
check_rust,
check_spec,
check_modeline,
)
file_errors = collect_errors_for_files(files_to_check, checking_functions, line_checking_functions)
python_errors = check_ruff_lints()
@ -985,26 +1006,27 @@ def scan(only_changed_files=False, progress=False):
wpt_errors = run_wpt_lints(only_changed_files)
# chain all the iterators
errors = itertools.chain(config_errors, directory_errors, file_errors,
python_errors, wpt_errors, cargo_lock_errors)
errors = itertools.chain(config_errors, directory_errors, file_errors, python_errors, wpt_errors, cargo_lock_errors)
colorama.init()
error = None
for error in errors:
print("\r | "
print(
"\r | "
+ f"{colorama.Fore.BLUE}{error[0]}{colorama.Style.RESET_ALL}:"
+ f"{colorama.Fore.YELLOW}{error[1]}{colorama.Style.RESET_ALL}: "
+ f"{colorama.Fore.RED}{error[2]}{colorama.Style.RESET_ALL}")
+ f"{colorama.Fore.RED}{error[2]}{colorama.Style.RESET_ALL}"
)
return int(error is not None)
class CargoDenyKrate:
def __init__(self, data: Dict[Any, Any]):
crate = data['Krate']
self.name = crate['name']
self.version = crate['version']
self.parents = [CargoDenyKrate(parent) for parent in data.get('parents', [])]
crate = data["Krate"]
self.name = crate["name"]
self.version = crate["version"]
self.parents = [CargoDenyKrate(parent) for parent in data.get("parents", [])]
def __str__(self):
return f"{self.name}@{self.version}"

View file

@ -27,22 +27,36 @@ import wptrunner.wptcommandline # noqa: E402
def create_parser():
parser = wptrunner.wptcommandline.create_parser()
parser.add_argument('--rr-chaos', default=False, action="store_true",
help="Run under chaos mode in rr until a failure is captured")
parser.add_argument('--pref', default=[], action="append", dest="prefs",
help="Pass preferences to servo")
parser.add_argument('--log-servojson', action="append", type=mozlog.commandline.log_file,
help="Servo's JSON logger of unexpected results")
parser.add_argument('--always-succeed', default=False, action="store_true",
help="Always yield exit code of zero")
parser.add_argument('--no-default-test-types', default=False, action="store_true",
help="Run all of the test types provided by wptrunner or specified explicitly by --test-types")
parser.add_argument('--filter-intermittents', default=None, action="store",
help="Filter intermittents against known intermittents "
"and save the filtered output to the given file.")
parser.add_argument('--log-raw-unexpected', default=None, action="store",
parser.add_argument(
"--rr-chaos", default=False, action="store_true", help="Run under chaos mode in rr until a failure is captured"
)
parser.add_argument("--pref", default=[], action="append", dest="prefs", help="Pass preferences to servo")
parser.add_argument(
"--log-servojson",
action="append",
type=mozlog.commandline.log_file,
help="Servo's JSON logger of unexpected results",
)
parser.add_argument("--always-succeed", default=False, action="store_true", help="Always yield exit code of zero")
parser.add_argument(
"--no-default-test-types",
default=False,
action="store_true",
help="Run all of the test types provided by wptrunner or specified explicitly by --test-types",
)
parser.add_argument(
"--filter-intermittents",
default=None,
action="store",
help="Filter intermittents against known intermittents and save the filtered output to the given file.",
)
parser.add_argument(
"--log-raw-unexpected",
default=None,
action="store",
help="Raw structured log messages for unexpected results."
" '--log-raw' Must also be passed in order to use this.")
" '--log-raw' Must also be passed in order to use this.",
)
return parser

View file

@ -21,20 +21,20 @@ from exporter import WPTSync
def main() -> int:
context = json.loads(os.environ['GITHUB_CONTEXT'])
context = json.loads(os.environ["GITHUB_CONTEXT"])
logging.getLogger().level = logging.INFO
success = WPTSync(
servo_repo='servo/servo',
wpt_repo='web-platform-tests/wpt',
downstream_wpt_repo='servo/wpt',
servo_path='./servo',
wpt_path='./wpt',
github_api_token=os.environ['WPT_SYNC_TOKEN'],
github_api_url='https://api.github.com/',
github_username='servo-wpt-sync',
github_email='ghbot+wpt-sync@servo.org',
github_name='Servo WPT Sync',
servo_repo="servo/servo",
wpt_repo="web-platform-tests/wpt",
downstream_wpt_repo="servo/wpt",
servo_path="./servo",
wpt_path="./wpt",
github_api_token=os.environ["WPT_SYNC_TOKEN"],
github_api_url="https://api.github.com/",
github_username="servo-wpt-sync",
github_email="ghbot+wpt-sync@servo.org",
github_name="Servo WPT Sync",
).run(context["event"])
return 0 if success else 1

View file

@ -24,26 +24,28 @@ import subprocess
from typing import Callable, Optional
from .common import \
CLOSING_EXISTING_UPSTREAM_PR, \
NO_SYNC_SIGNAL, \
NO_UPSTREAMBLE_CHANGES_COMMENT, \
OPENED_NEW_UPSTREAM_PR, \
UPDATED_EXISTING_UPSTREAM_PR, \
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR, \
UPSTREAMABLE_PATH, \
wpt_branch_name_from_servo_pr_number
from .common import (
CLOSING_EXISTING_UPSTREAM_PR,
NO_SYNC_SIGNAL,
NO_UPSTREAMBLE_CHANGES_COMMENT,
OPENED_NEW_UPSTREAM_PR,
UPDATED_EXISTING_UPSTREAM_PR,
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR,
UPSTREAMABLE_PATH,
wpt_branch_name_from_servo_pr_number,
)
from .github import GithubRepository, PullRequest
from .step import \
AsyncValue, \
ChangePRStep, \
CommentStep, \
CreateOrUpdateBranchForPRStep, \
MergePRStep, \
OpenPRStep, \
RemoveBranchForPRStep, \
Step
from .step import (
AsyncValue,
ChangePRStep,
CommentStep,
CreateOrUpdateBranchForPRStep,
MergePRStep,
OpenPRStep,
RemoveBranchForPRStep,
Step,
)
class LocalGitRepo:
@ -57,8 +59,7 @@ class LocalGitRepo:
def run_without_encoding(self, *args, env: dict = {}):
command_line = [self.git_path] + list(args)
logging.info(" → Execution (cwd='%s'): %s",
self.path, " ".join(command_line))
logging.info(" → Execution (cwd='%s'): %s", self.path, " ".join(command_line))
env.setdefault("GIT_AUTHOR_EMAIL", self.sync.github_email)
env.setdefault("GIT_COMMITTER_EMAIL", self.sync.github_email)
@ -66,20 +67,15 @@ class LocalGitRepo:
env.setdefault("GIT_COMMITTER_NAME", self.sync.github_name)
try:
return subprocess.check_output(
command_line, cwd=self.path, env=env, stderr=subprocess.STDOUT
)
return subprocess.check_output(command_line, cwd=self.path, env=env, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as exception:
logging.warning("Process execution failed with output:\n%s",
exception.output.decode("utf-8", errors="surrogateescape"))
logging.warning(
"Process execution failed with output:\n%s", exception.output.decode("utf-8", errors="surrogateescape")
)
raise exception
def run(self, *args, env: dict = {}):
return (
self
.run_without_encoding(*args, env=env)
.decode("utf-8", errors="surrogateescape")
)
return self.run_without_encoding(*args, env=env).decode("utf-8", errors="surrogateescape")
@dataclasses.dataclass()
@ -167,11 +163,7 @@ class WPTSync:
if action not in ["opened", "synchronize", "reopened", "edited", "closed"]:
return True
if (
action == "edited"
and "title" not in payload["changes"]
and "body" not in payload["changes"]
):
if action == "edited" and "title" not in payload["changes"] and "body" not in payload["changes"]:
return True
try:
@ -179,15 +171,11 @@ class WPTSync:
downstream_wpt_branch = self.downstream_wpt.get_branch(
wpt_branch_name_from_servo_pr_number(servo_pr.number)
)
upstream_pr = self.wpt.get_open_pull_request_for_branch(
self.github_username, downstream_wpt_branch
)
upstream_pr = self.wpt.get_open_pull_request_for_branch(self.github_username, downstream_wpt_branch)
if upstream_pr:
logging.info(
" → Detected existing upstream PR %s", upstream_pr)
logging.info(" → Detected existing upstream PR %s", upstream_pr)
run = SyncRun(self, servo_pr, AsyncValue(
upstream_pr), step_callback)
run = SyncRun(self, servo_pr, AsyncValue(upstream_pr), step_callback)
pull_data = payload["pull_request"]
if payload["action"] in ["opened", "synchronize", "reopened"]:
@ -210,50 +198,44 @@ class WPTSync:
num_commits = pull_data["commits"]
head_sha = pull_data["head"]["sha"]
is_upstreamable = (
len(
self.local_servo_repo.run(
"diff", head_sha, f"{head_sha}~{num_commits}", "--", UPSTREAMABLE_PATH
)
)
> 0
len(self.local_servo_repo.run("diff", head_sha, f"{head_sha}~{num_commits}", "--", UPSTREAMABLE_PATH)) > 0
)
logging.info(" → PR is upstreamable: '%s'", is_upstreamable)
title = pull_data['title']
body = pull_data['body']
title = pull_data["title"]
body = pull_data["body"]
if run.upstream_pr.has_value():
if is_upstreamable:
# In case this is adding new upstreamable changes to a PR that was closed
# due to a lack of upstreamable changes, force it to be reopened.
# Github refuses to reopen a PR that had a branch force pushed, so be sure
# to do this first.
run.add_step(ChangePRStep(
run.upstream_pr.value(), "opened", title, body))
run.add_step(ChangePRStep(run.upstream_pr.value(), "opened", title, body))
# Push the relevant changes to the upstream branch.
run.add_step(CreateOrUpdateBranchForPRStep(
pull_data, run.servo_pr))
run.add_step(CommentStep(
run.servo_pr, UPDATED_EXISTING_UPSTREAM_PR))
run.add_step(CreateOrUpdateBranchForPRStep(pull_data, run.servo_pr))
run.add_step(CommentStep(run.servo_pr, UPDATED_EXISTING_UPSTREAM_PR))
else:
# Close the upstream PR, since would contain no changes otherwise.
run.add_step(CommentStep(run.upstream_pr.value(),
NO_UPSTREAMBLE_CHANGES_COMMENT))
run.add_step(CommentStep(run.upstream_pr.value(), NO_UPSTREAMBLE_CHANGES_COMMENT))
run.add_step(ChangePRStep(run.upstream_pr.value(), "closed"))
run.add_step(RemoveBranchForPRStep(pull_data))
run.add_step(CommentStep(
run.servo_pr, CLOSING_EXISTING_UPSTREAM_PR))
run.add_step(CommentStep(run.servo_pr, CLOSING_EXISTING_UPSTREAM_PR))
elif is_upstreamable:
# Push the relevant changes to a new upstream branch.
branch = run.add_step(
CreateOrUpdateBranchForPRStep(pull_data, run.servo_pr))
branch = run.add_step(CreateOrUpdateBranchForPRStep(pull_data, run.servo_pr))
# Create a pull request against the upstream repository for the new branch.
assert branch
upstream_pr = run.add_step(OpenPRStep(
branch, self.wpt, title, body,
upstream_pr = run.add_step(
OpenPRStep(
branch,
self.wpt,
title,
body,
["servo-export", "do not merge yet"],
))
)
)
assert upstream_pr
run.upstream_pr = upstream_pr
@ -264,12 +246,8 @@ class WPTSync:
def handle_edited_pull_request(self, run: SyncRun, pull_data: dict):
logging.info("Changing upstream PR title")
if run.upstream_pr.has_value():
run.add_step(ChangePRStep(
run.upstream_pr.value(
), "open", pull_data["title"], pull_data["body"]
))
run.add_step(CommentStep(
run.servo_pr, UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR))
run.add_step(ChangePRStep(run.upstream_pr.value(), "open", pull_data["title"], pull_data["body"]))
run.add_step(CommentStep(run.servo_pr, UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR))
def handle_closed_pull_request(self, run: SyncRun, pull_data: dict):
logging.info("Processing closed PR")
@ -279,8 +257,7 @@ class WPTSync:
if pull_data["merged"]:
# Since the upstreamable changes have now been merged locally, merge the
# corresponding upstream PR.
run.add_step(MergePRStep(
run.upstream_pr.value(), ["do not merge yet"]))
run.add_step(MergePRStep(run.upstream_pr.value(), ["do not merge yet"]))
else:
# If a PR with upstreamable changes is closed without being merged, we
# don't want to merge the changes upstream either.

View file

@ -12,17 +12,11 @@
UPSTREAMABLE_PATH = "tests/wpt/tests/"
NO_SYNC_SIGNAL = "[no-wpt-sync]"
OPENED_NEW_UPSTREAM_PR = (
"🤖 Opened new upstream WPT pull request ({upstream_pr}) "
"with upstreamable changes."
)
OPENED_NEW_UPSTREAM_PR = "🤖 Opened new upstream WPT pull request ({upstream_pr}) with upstreamable changes."
UPDATED_EXISTING_UPSTREAM_PR = (
"📝 Transplanted new upstreamable changes to existing "
"upstream WPT pull request ({upstream_pr})."
)
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR = (
"✍ Updated existing upstream WPT pull request ({upstream_pr}) title and body."
"📝 Transplanted new upstreamable changes to existing upstream WPT pull request ({upstream_pr})."
)
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR = "✍ Updated existing upstream WPT pull request ({upstream_pr}) title and body."
CLOSING_EXISTING_UPSTREAM_PR = (
"🤖 This change no longer contains upstreamable changes to WPT; closed existing "
"upstream pull request ({upstream_pr})."

View file

@ -40,13 +40,9 @@ def authenticated(sync: WPTSync, method, url, json=None) -> requests.Response:
}
url = urllib.parse.urljoin(sync.github_api_url, url)
response = requests.request(
method, url, headers=headers, json=json, timeout=TIMEOUT
)
response = requests.request(method, url, headers=headers, json=json, timeout=TIMEOUT)
if int(response.status_code / 100) != 2:
raise ValueError(
f"Got unexpected {response.status_code} response: {response.text}"
)
raise ValueError(f"Got unexpected {response.status_code} response: {response.text}")
return response
@ -71,33 +67,27 @@ class GithubRepository:
def get_branch(self, name: str) -> GithubBranch:
return GithubBranch(self, name)
def get_open_pull_request_for_branch(
self,
github_username: str,
branch: GithubBranch
) -> Optional[PullRequest]:
def get_open_pull_request_for_branch(self, github_username: str, branch: GithubBranch) -> Optional[PullRequest]:
"""If this repository has an open pull request with the
given source head reference targeting the main branch,
return the first matching pull request, otherwise return None."""
params = "+".join([
params = "+".join(
[
"is:pr",
"state:open",
f"repo:{self.repo}",
f"author:{github_username}",
f"head:{branch.name}",
])
]
)
response = authenticated(self.sync, "GET", f"search/issues?q={params}")
if int(response.status_code / 100) != 2:
return None
json = response.json()
if not isinstance(json, dict) or \
"total_count" not in json or \
"items" not in json:
raise ValueError(
f"Got unexpected response from GitHub search: {response.text}"
)
if not isinstance(json, dict) or "total_count" not in json or "items" not in json:
raise ValueError(f"Got unexpected response from GitHub search: {response.text}")
if json["total_count"] < 1:
return None
@ -152,9 +142,7 @@ class PullRequest:
return authenticated(self.context, *args, **kwargs)
def leave_comment(self, comment: str):
return self.api(
"POST", f"{self.base_issues_url}/comments", json={"body": comment}
)
return self.api("POST", f"{self.base_issues_url}/comments", json={"body": comment})
def change(
self,

View file

@ -46,7 +46,7 @@ class Step:
return
T = TypeVar('T')
T = TypeVar("T")
class AsyncValue(Generic[T]):
@ -76,8 +76,7 @@ class CreateOrUpdateBranchForPRStep(Step):
def run(self, run: SyncRun):
try:
commits = self._get_upstreamable_commits_from_local_servo_repo(
run.sync)
commits = self._get_upstreamable_commits_from_local_servo_repo(run.sync)
branch_name = self._create_or_update_branch_for_pr(run, commits)
branch = run.sync.downstream_wpt.get_branch(branch_name)
@ -88,21 +87,15 @@ class CreateOrUpdateBranchForPRStep(Step):
logging.info(exception, exc_info=True)
run.steps = []
run.add_step(CommentStep(
self.pull_request, COULD_NOT_APPLY_CHANGES_DOWNSTREAM_COMMENT
))
run.add_step(CommentStep(self.pull_request, COULD_NOT_APPLY_CHANGES_DOWNSTREAM_COMMENT))
if run.upstream_pr.has_value():
run.add_step(CommentStep(
run.upstream_pr.value(), COULD_NOT_APPLY_CHANGES_UPSTREAM_COMMENT
))
run.add_step(CommentStep(run.upstream_pr.value(), COULD_NOT_APPLY_CHANGES_UPSTREAM_COMMENT))
def _get_upstreamable_commits_from_local_servo_repo(self, sync: WPTSync):
local_servo_repo = sync.local_servo_repo
number_of_commits = self.pull_data["commits"]
pr_head = self.pull_data["head"]["sha"]
commit_shas = local_servo_repo.run(
"log", "--pretty=%H", pr_head, f"-{number_of_commits}"
).splitlines()
commit_shas = local_servo_repo.run("log", "--pretty=%H", pr_head, f"-{number_of_commits}").splitlines()
filtered_commits = []
# We must iterate the commits in reverse to ensure we apply older changes first,
@ -128,12 +121,8 @@ class CreateOrUpdateBranchForPRStep(Step):
# commit to another repository.
filtered_commits += [
{
"author": local_servo_repo.run(
"show", "-s", "--pretty=%an <%ae>", sha
),
"message": local_servo_repo.run(
"show", "-s", "--pretty=%B", sha
),
"author": local_servo_repo.run("show", "-s", "--pretty=%an <%ae>", sha),
"message": local_servo_repo.run("show", "-s", "--pretty=%B", sha),
"diff": diff,
}
]
@ -146,23 +135,16 @@ class CreateOrUpdateBranchForPRStep(Step):
try:
with open(patch_path, "wb") as file:
file.write(commit["diff"])
run.sync.local_wpt_repo.run(
"apply", PATCH_FILE_NAME, "-p", str(strip_count)
)
run.sync.local_wpt_repo.run("apply", PATCH_FILE_NAME, "-p", str(strip_count))
finally:
# Ensure the patch file is not added with the other changes.
os.remove(patch_path)
run.sync.local_wpt_repo.run("add", "--all")
run.sync.local_wpt_repo.run(
"commit", "--message", commit["message"], "--author", commit["author"]
)
run.sync.local_wpt_repo.run("commit", "--message", commit["message"], "--author", commit["author"])
def _create_or_update_branch_for_pr(
self, run: SyncRun, commits: list[dict], pre_commit_callback=None
):
branch_name = wpt_branch_name_from_servo_pr_number(
self.pull_data["number"])
def _create_or_update_branch_for_pr(self, run: SyncRun, commits: list[dict], pre_commit_callback=None):
branch_name = wpt_branch_name_from_servo_pr_number(self.pull_data["number"])
try:
# Create a new branch with a unique name that is consistent between
# updates of the same PR.
@ -176,7 +158,6 @@ class CreateOrUpdateBranchForPRStep(Step):
# Push the branch upstream (forcing to overwrite any existing changes).
if not run.sync.suppress_force_push:
# In order to push to our downstream branch we need to ensure that
# the local repository isn't a shallow clone. Shallow clones are
# commonly created by GitHub actions.
@ -186,8 +167,7 @@ class CreateOrUpdateBranchForPRStep(Step):
token = run.sync.github_api_token
repo = run.sync.downstream_wpt_repo
remote_url = f"https://{user}:{token}@github.com/{repo}.git"
run.sync.local_wpt_repo.run(
"push", "-f", remote_url, branch_name)
run.sync.local_wpt_repo.run("push", "-f", remote_url, branch_name)
return branch_name
finally:
@ -201,8 +181,7 @@ class CreateOrUpdateBranchForPRStep(Step):
class RemoveBranchForPRStep(Step):
def __init__(self, pull_request):
Step.__init__(self, "RemoveBranchForPRStep")
self.branch_name = wpt_branch_name_from_servo_pr_number(
pull_request["number"])
self.branch_name = wpt_branch_name_from_servo_pr_number(pull_request["number"])
def run(self, run: SyncRun):
self.name += f":{run.sync.downstream_wpt.get_branch(self.branch_name)}"
@ -212,8 +191,7 @@ class RemoveBranchForPRStep(Step):
token = run.sync.github_api_token
repo = run.sync.downstream_wpt_repo
remote_url = f"https://{user}:{token}@github.com/{repo}.git"
run.sync.local_wpt_repo.run("push", remote_url, "--delete",
self.branch_name)
run.sync.local_wpt_repo.run("push", remote_url, "--delete", self.branch_name)
class ChangePRStep(Step):
@ -238,9 +216,7 @@ class ChangePRStep(Step):
body = self.body
if body:
body = run.prepare_body_text(body)
self.name += (
f':{textwrap.shorten(body, width=20, placeholder="...")}[{len(body)}]'
)
self.name += f":{textwrap.shorten(body, width=20, placeholder='...')}[{len(body)}]"
self.pull_request.change(state=self.state, title=self.title, body=body)
@ -261,12 +237,8 @@ class MergePRStep(Step):
logging.warning(exception, exc_info=True)
run.steps = []
run.add_step(CommentStep(
self.pull_request, COULD_NOT_MERGE_CHANGES_UPSTREAM_COMMENT
))
run.add_step(CommentStep(
run.servo_pr, COULD_NOT_MERGE_CHANGES_DOWNSTREAM_COMMENT
))
run.add_step(CommentStep(self.pull_request, COULD_NOT_MERGE_CHANGES_UPSTREAM_COMMENT))
run.add_step(CommentStep(run.servo_pr, COULD_NOT_MERGE_CHANGES_DOWNSTREAM_COMMENT))
self.pull_request.add_labels(["stale-servo-export"])

View file

@ -16,12 +16,12 @@ from dataclasses import dataclass, field
from typing import Dict, List, Optional, Any
from six import itervalues
DEFAULT_MOVE_UP_CODE = u"\x1b[A"
DEFAULT_CLEAR_EOL_CODE = u"\x1b[K"
DEFAULT_MOVE_UP_CODE = "\x1b[A"
DEFAULT_CLEAR_EOL_CODE = "\x1b[K"
@dataclass
class UnexpectedSubtestResult():
class UnexpectedSubtestResult:
path: str
subtest: str
actual: str
@ -32,15 +32,14 @@ class UnexpectedSubtestResult():
@dataclass
class UnexpectedResult():
class UnexpectedResult:
path: str
actual: str
expected: str
message: str
time: int
stack: Optional[str]
unexpected_subtest_results: list[UnexpectedSubtestResult] = field(
default_factory=list)
unexpected_subtest_results: list[UnexpectedSubtestResult] = field(default_factory=list)
issues: list[str] = field(default_factory=list)
flaky: bool = False
@ -48,13 +47,13 @@ class UnexpectedResult():
output = UnexpectedResult.to_lines(self)
if self.unexpected_subtest_results:
def make_subtests_failure(subtest_results):
# Test names sometimes contain control characters, which we want
# to be printed in their raw form, and not their interpreted form.
lines = []
for subtest in subtest_results[:-1]:
lines += UnexpectedResult.to_lines(
subtest, print_stack=False)
lines += UnexpectedResult.to_lines(subtest, print_stack=False)
lines += UnexpectedResult.to_lines(subtest_results[-1])
return self.wrap_and_indent_lines(lines, " ").splitlines()
@ -78,11 +77,11 @@ class UnexpectedResult():
if not lines:
return ""
output = indent + u"\u25B6 %s\n" % lines[0]
output = indent + "\u25b6 %s\n" % lines[0]
for line in lines[1:-1]:
output += indent + u"\u2502 %s\n" % line
output += indent + "\u2502 %s\n" % line
if len(lines) > 1:
output += indent + u"\u2514 %s\n" % lines[-1]
output += indent + "\u2514 %s\n" % lines[-1]
return output
@staticmethod
@ -112,6 +111,7 @@ class UnexpectedResult():
class ServoHandler(mozlog.reader.LogHandler):
"""LogHandler designed to collect unexpected results for use by
script or by the ServoFormatter output formatter."""
def __init__(self):
self.reset_state()
@ -126,24 +126,24 @@ class ServoHandler(mozlog.reader.LogHandler):
self.unexpected_results: List[UnexpectedResult] = []
self.expected = {
'OK': 0,
'PASS': 0,
'FAIL': 0,
'ERROR': 0,
'TIMEOUT': 0,
'SKIP': 0,
'CRASH': 0,
'PRECONDITION_FAILED': 0,
"OK": 0,
"PASS": 0,
"FAIL": 0,
"ERROR": 0,
"TIMEOUT": 0,
"SKIP": 0,
"CRASH": 0,
"PRECONDITION_FAILED": 0,
}
self.unexpected_tests = {
'OK': [],
'PASS': [],
'FAIL': [],
'ERROR': [],
'TIMEOUT': [],
'CRASH': [],
'PRECONDITION_FAILED': [],
"OK": [],
"PASS": [],
"FAIL": [],
"ERROR": [],
"TIMEOUT": [],
"CRASH": [],
"PRECONDITION_FAILED": [],
}
def suite_start(self, data):
@ -155,20 +155,19 @@ class ServoHandler(mozlog.reader.LogHandler):
pass
def test_start(self, data):
self.running_tests[data['thread']] = data['test']
self.running_tests[data["thread"]] = data["test"]
@staticmethod
def data_was_for_expected_result(data):
if "expected" not in data:
return True
return "known_intermittent" in data \
and data["status"] in data["known_intermittent"]
return "known_intermittent" in data and data["status"] in data["known_intermittent"]
def test_end(self, data: dict) -> Optional[UnexpectedResult]:
self.completed_tests += 1
test_status = data["status"]
test_path = data["test"]
del self.running_tests[data['thread']]
del self.running_tests[data["thread"]]
had_expected_test_result = self.data_was_for_expected_result(data)
subtest_failures = self.subtest_failures.pop(test_path, [])
@ -191,7 +190,7 @@ class ServoHandler(mozlog.reader.LogHandler):
data.get("message", ""),
data["time"],
stack,
subtest_failures
subtest_failures,
)
if not had_expected_test_result:
@ -205,19 +204,21 @@ class ServoHandler(mozlog.reader.LogHandler):
def test_status(self, data: dict):
if self.data_was_for_expected_result(data):
return
self.subtest_failures[data["test"]].append(UnexpectedSubtestResult(
self.subtest_failures[data["test"]].append(
UnexpectedSubtestResult(
data["test"],
data["subtest"],
data["status"],
data["expected"],
data.get("message", ""),
data["time"],
data.get('stack', None),
))
data.get("stack", None),
)
)
def process_output(self, data):
if 'test' in data:
self.test_output[data['test']] += data['data'] + "\n"
if "test" in data:
self.test_output[data["test"]] += data["data"] + "\n"
def log(self, _):
pass
@ -226,6 +227,7 @@ class ServoHandler(mozlog.reader.LogHandler):
class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
"""Formatter designed to produce unexpected test results grouped
together in a readable format."""
def __init__(self):
ServoHandler.__init__(self)
self.current_display = ""
@ -239,18 +241,17 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
try:
import blessed
self.terminal = blessed.Terminal()
self.move_up = self.terminal.move_up
self.clear_eol = self.terminal.clear_eol
except Exception as exception:
sys.stderr.write("GroupingFormatter: Could not get terminal "
"control characters: %s\n" % exception)
sys.stderr.write("GroupingFormatter: Could not get terminal control characters: %s\n" % exception)
def text_to_erase_display(self):
if not self.interactive or not self.current_display:
return ""
return ((self.move_up + self.clear_eol)
* self.current_display.count('\n'))
return (self.move_up + self.clear_eol) * self.current_display.count("\n")
def generate_output(self, text=None, new_display=None):
if not self.interactive:
@ -278,17 +279,16 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
max_width = self.line_width - len(new_display)
else:
max_width = sys.maxsize
return new_display + ("\n%s" % indent).join(
val[:max_width] for val in self.running_tests.values()) + "\n"
return new_display + ("\n%s" % indent).join(val[:max_width] for val in self.running_tests.values()) + "\n"
else:
return new_display + "No tests running.\n"
def suite_start(self, data):
ServoHandler.suite_start(self, data)
if self.number_of_tests == 0:
return "Running tests in %s\n\n" % data[u'source']
return "Running tests in %s\n\n" % data["source"]
else:
return "Running %i tests in %s\n\n" % (self.number_of_tests, data[u'source'])
return "Running %i tests in %s\n\n" % (self.number_of_tests, data["source"])
def test_start(self, data):
ServoHandler.test_start(self, data)
@ -300,8 +300,7 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
if unexpected_result:
# Surround test output by newlines so that it is easier to read.
output_for_unexpected_test = f"{unexpected_result}\n"
return self.generate_output(text=output_for_unexpected_test,
new_display=self.build_status_line())
return self.generate_output(text=output_for_unexpected_test, new_display=self.build_status_line())
# Print reason that tests are skipped.
if data["status"] == "SKIP":
@ -321,12 +320,14 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
def suite_end(self, data):
ServoHandler.suite_end(self, data)
if not self.interactive:
output = u"\n"
output = "\n"
else:
output = ""
output += u"Ran %i tests finished in %.1f seconds.\n" % (
self.completed_tests, (data["time"] - self.suite_start_time) / 1000)
output += "Ran %i tests finished in %.1f seconds.\n" % (
self.completed_tests,
(data["time"] - self.suite_start_time) / 1000,
)
# Sum the number of expected test results from each category
expected_test_results = sum(self.expected.values())
@ -337,29 +338,27 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
def text_for_unexpected_list(text, section):
tests = self.unexpected_tests[section]
if not tests:
return u""
return u" \u2022 %i tests %s\n" % (len(tests), text)
return ""
return " \u2022 %i tests %s\n" % (len(tests), text)
output += text_for_unexpected_list(u"crashed unexpectedly", 'CRASH')
output += text_for_unexpected_list(u"had errors unexpectedly", 'ERROR')
output += text_for_unexpected_list(u"failed unexpectedly", 'FAIL')
output += text_for_unexpected_list(u"precondition failed unexpectedly", 'PRECONDITION_FAILED')
output += text_for_unexpected_list(u"timed out unexpectedly", 'TIMEOUT')
output += text_for_unexpected_list(u"passed unexpectedly", 'PASS')
output += text_for_unexpected_list(u"unexpectedly okay", 'OK')
output += text_for_unexpected_list("crashed unexpectedly", "CRASH")
output += text_for_unexpected_list("had errors unexpectedly", "ERROR")
output += text_for_unexpected_list("failed unexpectedly", "FAIL")
output += text_for_unexpected_list("precondition failed unexpectedly", "PRECONDITION_FAILED")
output += text_for_unexpected_list("timed out unexpectedly", "TIMEOUT")
output += text_for_unexpected_list("passed unexpectedly", "PASS")
output += text_for_unexpected_list("unexpectedly okay", "OK")
num_with_failing_subtests = len(self.tests_with_failing_subtests)
if num_with_failing_subtests:
output += (u" \u2022 %i tests had unexpected subtest results\n"
% num_with_failing_subtests)
output += " \u2022 %i tests had unexpected subtest results\n" % num_with_failing_subtests
output += "\n"
# Repeat failing test output, so that it is easier to find, since the
# non-interactive version prints all the test names.
if not self.interactive and self.unexpected_results:
output += u"Tests with unexpected results:\n"
output += "".join([str(result)
for result in self.unexpected_results])
output += "Tests with unexpected results:\n"
output += "".join([str(result) for result in self.unexpected_results])
return self.generate_output(text=output, new_display="")
@ -371,8 +370,8 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
# We are logging messages that begin with STDERR, because that is how exceptions
# in this formatter are indicated.
if data['message'].startswith('STDERR'):
return self.generate_output(text=data['message'] + "\n")
if data["message"].startswith("STDERR"):
return self.generate_output(text=data["message"] + "\n")
if data['level'] in ('CRITICAL', 'ERROR'):
return self.generate_output(text=data['message'] + "\n")
if data["level"] in ("CRITICAL", "ERROR"):
return self.generate_output(text=data["message"] + "\n")

View file

@ -22,10 +22,10 @@ from wptrunner import wptlogging
def create_parser():
p = argparse.ArgumentParser()
p.add_argument("--check-clean", action="store_true",
help="Check that updating the manifest doesn't lead to any changes")
p.add_argument("--rebuild", action="store_true",
help="Rebuild the manifest from scratch")
p.add_argument(
"--check-clean", action="store_true", help="Check that updating the manifest doesn't lead to any changes"
)
p.add_argument("--rebuild", action="store_true", help="Rebuild the manifest from scratch")
commandline.add_logging_group(p)
return p
@ -34,11 +34,13 @@ def create_parser():
def update(check_clean=True, rebuild=False, logger=None, **kwargs):
if not logger:
logger = wptlogging.setup(kwargs, {"mach": sys.stdout})
kwargs = {"config": os.path.join(WPT_PATH, "config.ini"),
kwargs = {
"config": os.path.join(WPT_PATH, "config.ini"),
"product": "servo",
"manifest_path": os.path.join(WPT_PATH, "meta"),
"tests_root": None,
"metadata_root": None}
"metadata_root": None,
}
set_from_config(kwargs)
config = kwargs["config"]
@ -53,15 +55,15 @@ def update(check_clean=True, rebuild=False, logger=None, **kwargs):
def _update(logger, test_paths, rebuild):
for url_base, paths in iteritems(test_paths):
manifest_path = os.path.join(paths.metadata_path, "MANIFEST.json")
cache_subdir = os.path.relpath(os.path.dirname(manifest_path),
os.path.dirname(__file__))
wptmanifest.manifest.load_and_update(paths.tests_path,
cache_subdir = os.path.relpath(os.path.dirname(manifest_path), os.path.dirname(__file__))
wptmanifest.manifest.load_and_update(
paths.tests_path,
manifest_path,
url_base,
working_copy=True,
rebuild=rebuild,
cache_root=os.path.join(SERVO_ROOT, ".wpt",
cache_subdir))
cache_root=os.path.join(SERVO_ROOT, ".wpt", cache_subdir),
)
return 0
@ -72,26 +74,25 @@ def _check_clean(logger, test_paths):
tests_path = paths.tests_path
manifest_path = os.path.join(paths.metadata_path, "MANIFEST.json")
old_manifest = wptmanifest.manifest.load_and_update(tests_path,
manifest_path,
url_base,
working_copy=False,
update=False,
write_manifest=False)
old_manifest = wptmanifest.manifest.load_and_update(
tests_path, manifest_path, url_base, working_copy=False, update=False, write_manifest=False
)
# Even if no cache is specified, one will be used automatically by the
# VCS integration. Create a brand new cache every time to ensure that
# the VCS integration always thinks that any file modifications in the
# working directory are new and interesting.
cache_root = tempfile.mkdtemp()
new_manifest = wptmanifest.manifest.load_and_update(tests_path,
new_manifest = wptmanifest.manifest.load_and_update(
tests_path,
manifest_path,
url_base,
working_copy=True,
update=True,
cache_root=cache_root,
write_manifest=False,
allow_cached=False)
allow_cached=False,
)
manifests_by_path[manifest_path] = (old_manifest, new_manifest)
@ -116,8 +117,7 @@ def diff_manifests(logger, manifest_path, old_manifest, new_manifest):
"""
logger.info("Diffing old and new manifests %s" % manifest_path)
old_items, new_items = defaultdict(set), defaultdict(set)
for manifest, items in [(old_manifest, old_items),
(new_manifest, new_items)]:
for manifest, items in [(old_manifest, old_items), (new_manifest, new_items)]:
for test_type, path, tests in manifest:
for test in tests:
test_id = [test.id]
@ -158,8 +158,8 @@ def diff_manifests(logger, manifest_path, old_manifest, new_manifest):
if clean:
# Manifest currently has some list vs tuple inconsistencies that break
# a simple equality comparison.
old_paths = old_manifest.to_json()['items']
new_paths = new_manifest.to_json()['items']
old_paths = old_manifest.to_json()["items"]
new_paths = new_manifest.to_json()["items"]
if old_paths != new_paths:
logger.warning("Manifest %s contains correct tests but file hashes changed." % manifest_path) # noqa
clean = False
@ -168,8 +168,4 @@ def diff_manifests(logger, manifest_path, old_manifest, new_manifest):
def log_error(logger, manifest_path, msg):
logger.lint_error(path=manifest_path,
message=msg,
lineno=0,
source="",
linter="wpt-manifest")
logger.lint_error(path=manifest_path, message=msg, lineno=0, source="", linter="wpt-manifest")

View file

@ -19,10 +19,7 @@ import mozlog
import mozlog.formatters
from . import SERVO_ROOT, WPT_PATH, WPT_TOOLS_PATH
from .grouping_formatter import (
ServoFormatter, ServoHandler,
UnexpectedResult, UnexpectedSubtestResult
)
from .grouping_formatter import ServoFormatter, ServoHandler, UnexpectedResult, UnexpectedSubtestResult
from wptrunner import wptcommandline
from wptrunner import wptrunner
@ -63,12 +60,8 @@ def run_tests(default_binary_path: str, **kwargs):
set_if_none(kwargs, "processes", multiprocessing.cpu_count())
set_if_none(kwargs, "ca_cert_path", os.path.join(CERTS_PATH, "cacert.pem"))
set_if_none(
kwargs, "host_key_path", os.path.join(CERTS_PATH, "web-platform.test.key")
)
set_if_none(
kwargs, "host_cert_path", os.path.join(CERTS_PATH, "web-platform.test.pem")
)
set_if_none(kwargs, "host_key_path", os.path.join(CERTS_PATH, "web-platform.test.key"))
set_if_none(kwargs, "host_cert_path", os.path.join(CERTS_PATH, "web-platform.test.pem"))
# Set `id_hash` as the default chunk, as this better distributes testing across different
# chunks and leads to more consistent timing on GitHub Actions.
set_if_none(kwargs, "chunk_type", "id_hash")
@ -139,8 +132,7 @@ def run_tests(default_binary_path: str, **kwargs):
handler.reset_state()
print(80 * "=")
print(f"Rerunning {len(unexpected_results)} tests "
"with unexpected results to detect flaky tests.")
print(f"Rerunning {len(unexpected_results)} tests with unexpected results to detect flaky tests.")
unexpected_results_tests = [result.path for result in unexpected_results]
kwargs["test_list"] = unexpected_results_tests
kwargs["include"] = unexpected_results_tests
@ -158,8 +150,7 @@ def run_tests(default_binary_path: str, **kwargs):
for result in unexpected_results:
result.flaky = result.path not in stable_tests
all_filtered = filter_intermittents(unexpected_results,
filter_intermittents_output)
all_filtered = filter_intermittents(unexpected_results, filter_intermittents_output)
return_value = 0 if all_filtered else 1
# Write the unexpected-only raw log if that was specified on the command-line.
@ -168,9 +159,7 @@ def run_tests(default_binary_path: str, **kwargs):
print("'--log-raw-unexpected' not written without '--log-raw'.")
else:
write_unexpected_only_raw_log(
handler.unexpected_results,
raw_log_outputs[0].name,
unexpected_raw_log_output_file
handler.unexpected_results, raw_log_outputs[0].name, unexpected_raw_log_output_file
)
return return_value
@ -182,12 +171,10 @@ class GithubContextInformation(NamedTuple):
branch_name: Optional[str]
class TrackerDashboardFilter():
class TrackerDashboardFilter:
def __init__(self):
base_url = os.environ.get(TRACKER_API_ENV_VAR, TRACKER_API)
self.headers = {
"Content-Type": "application/json"
}
self.headers = {"Content-Type": "application/json"}
if TRACKER_DASHBOARD_SECRET_ENV_VAR in os.environ and os.environ[TRACKER_DASHBOARD_SECRET_ENV_VAR]:
self.url = f"{base_url}/dashboard/attempts"
secret = os.environ[TRACKER_DASHBOARD_SECRET_ENV_VAR]
@ -201,10 +188,10 @@ class TrackerDashboardFilter():
if not github_context:
return GithubContextInformation(None, None, None)
repository = github_context['repository']
repository = github_context["repository"]
repo_url = f"https://github.com/{repository}"
run_id = github_context['run_id']
run_id = github_context["run_id"]
build_url = f"{repo_url}/actions/runs/{run_id}"
commit_title = "<no title>"
@ -214,32 +201,27 @@ class TrackerDashboardFilter():
commit_title = github_context["event"]["head_commit"]["message"]
pr_url = None
match = re.match(r"^Auto merge of #(\d+)", commit_title) or \
re.match(r"\(#(\d+)\)", commit_title)
match = re.match(r"^Auto merge of #(\d+)", commit_title) or re.match(r"\(#(\d+)\)", commit_title)
if match:
pr_url = f"{repo_url}/pull/{match.group(1)}" if match else None
return GithubContextInformation(
build_url,
pr_url,
github_context["ref_name"]
)
return GithubContextInformation(build_url, pr_url, github_context["ref_name"])
def make_data_from_result(
self,
result: Union[UnexpectedResult, UnexpectedSubtestResult],
) -> dict:
data = {
'path': result.path,
'subtest': None,
'expected': result.expected,
'actual': result.actual,
'time': result.time // 1000,
"path": result.path,
"subtest": None,
"expected": result.expected,
"actual": result.actual,
"time": result.time // 1000,
# Truncate the message, to avoid issues with lots of output causing "HTTP
# Error 413: Request Entity Too Large."
# See https://github.com/servo/servo/issues/31845.
'message': result.message[0:TRACKER_DASHBOARD_MAXIMUM_OUTPUT_LENGTH],
'stack': result.stack,
"message": result.message[0:TRACKER_DASHBOARD_MAXIMUM_OUTPUT_LENGTH],
"stack": result.stack,
}
if isinstance(result, UnexpectedSubtestResult):
data["subtest"] = result.subtest
@ -256,20 +238,22 @@ class TrackerDashboardFilter():
try:
request = urllib.request.Request(
url=self.url,
method='POST',
data=json.dumps({
'branch': context.branch_name,
'build_url': context.build_url,
'pull_url': context.pull_url,
'attempts': attempts
}).encode('utf-8'),
headers=self.headers)
method="POST",
data=json.dumps(
{
"branch": context.branch_name,
"build_url": context.build_url,
"pull_url": context.pull_url,
"attempts": attempts,
}
).encode("utf-8"),
headers=self.headers,
)
known_intermittents = dict()
with urllib.request.urlopen(request) as response:
for test in json.load(response)["known"]:
known_intermittents[test["path"]] = \
[issue["number"] for issue in test["issues"]]
known_intermittents[test["path"]] = [issue["number"] for issue in test["issues"]]
except urllib.error.HTTPError as e:
print(e)
@ -280,13 +264,9 @@ class TrackerDashboardFilter():
result.issues = known_intermittents.get(result.path, [])
def filter_intermittents(
unexpected_results: List[UnexpectedResult],
output_path: str
) -> bool:
def filter_intermittents(unexpected_results: List[UnexpectedResult], output_path: str) -> bool:
dashboard = TrackerDashboardFilter()
print(f"Filtering {len(unexpected_results)} "
f"unexpected results for known intermittents via <{dashboard.url}>")
print(f"Filtering {len(unexpected_results)} unexpected results for known intermittents via <{dashboard.url}>")
dashboard.report_failures(unexpected_results)
def add_result(output, text, results: List[UnexpectedResult], filter_func) -> None:
@ -298,12 +278,14 @@ def filter_intermittents(
return not result.flaky and not result.issues
output: List[str] = []
add_result(output, "Flaky unexpected results", unexpected_results,
lambda result: result.flaky)
add_result(output, "Stable unexpected results that are known-intermittent",
unexpected_results, lambda result: not result.flaky and result.issues)
add_result(output, "Stable unexpected results",
unexpected_results, is_stable_and_unexpected)
add_result(output, "Flaky unexpected results", unexpected_results, lambda result: result.flaky)
add_result(
output,
"Stable unexpected results that are known-intermittent",
unexpected_results,
lambda result: not result.flaky and result.issues,
)
add_result(output, "Stable unexpected results", unexpected_results, is_stable_and_unexpected)
print("\n".join(output))
with open(output_path, "w", encoding="utf-8") as file:
@ -313,9 +295,7 @@ def filter_intermittents(
def write_unexpected_only_raw_log(
unexpected_results: List[UnexpectedResult],
raw_log_file: str,
filtered_raw_log_file: str
unexpected_results: List[UnexpectedResult], raw_log_file: str, filtered_raw_log_file: str
):
tests = [result.path for result in unexpected_results]
print(f"Writing unexpected-only raw log to {filtered_raw_log_file}")
@ -324,6 +304,5 @@ def write_unexpected_only_raw_log(
with open(raw_log_file) as input:
for line in input.readlines():
data = json.loads(line)
if data["action"] in ["suite_start", "suite_end"] or \
("test" in data and data["test"] in tests):
if data["action"] in ["suite_start", "suite_end"] or ("test" in data and data["test"] in tests):
output.write(line)

View file

@ -49,13 +49,13 @@ PORT = 9000
@dataclasses.dataclass
class MockPullRequest():
class MockPullRequest:
head: str
number: int
state: str = "open"
class MockGitHubAPIServer():
class MockGitHubAPIServer:
def __init__(self, port: int):
self.port = port
self.disable_logging()
@ -65,18 +65,19 @@ class MockGitHubAPIServer():
class NoLoggingHandler(WSGIRequestHandler):
def log_message(self, *args):
pass
if logging.getLogger().level == logging.DEBUG:
handler = WSGIRequestHandler
else:
handler = NoLoggingHandler
self.server = make_server('localhost', self.port, self.app, handler_class=handler)
self.server = make_server("localhost", self.port, self.app, handler_class=handler)
self.start_server_thread()
def disable_logging(self):
flask.cli.show_server_banner = lambda *args: None
logging.getLogger("werkzeug").disabled = True
logging.getLogger('werkzeug').setLevel(logging.CRITICAL)
logging.getLogger("werkzeug").setLevel(logging.CRITICAL)
def start(self):
self.thread.start()
@ -84,21 +85,21 @@ class MockGitHubAPIServer():
# Wait for the server to be started.
while True:
try:
response = requests.get(f'http://localhost:{self.port}/ping', timeout=1)
response = requests.get(f"http://localhost:{self.port}/ping", timeout=1)
assert response.status_code == 200
assert response.text == 'pong'
assert response.text == "pong"
break
except Exception:
time.sleep(0.1)
def reset_server_state_with_pull_requests(self, pulls: list[MockPullRequest]):
response = requests.get(
f'http://localhost:{self.port}/reset-mock-github',
f"http://localhost:{self.port}/reset-mock-github",
json=[dataclasses.asdict(pull_request) for pull_request in pulls],
timeout=1
timeout=1,
)
assert response.status_code == 200
assert response.text == '👍'
assert response.text == "👍"
def shutdown(self):
self.server.shutdown()
@ -111,26 +112,25 @@ class MockGitHubAPIServer():
@self.app.route("/ping")
def ping():
return ('pong', 200)
return ("pong", 200)
@self.app.route("/reset-mock-github")
def reset_server():
self.pulls = [
MockPullRequest(pull_request['head'],
pull_request['number'],
pull_request['state'])
for pull_request in flask.request.json]
return ('👍', 200)
MockPullRequest(pull_request["head"], pull_request["number"], pull_request["state"])
for pull_request in flask.request.json
]
return ("👍", 200)
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>/merge", methods=['PUT'])
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>/merge", methods=["PUT"])
def merge_pull_request(org, repo, number):
for pull_request in self.pulls:
if pull_request.number == number:
pull_request.state = 'closed'
return ('', 204)
return ('', 404)
pull_request.state = "closed"
return ("", 204)
return ("", 404)
@self.app.route("/search/issues", methods=['GET'])
@self.app.route("/search/issues", methods=["GET"])
def search():
params = {}
param_strings = flask.request.args.get("q", "").split(" ")
@ -145,38 +145,29 @@ class MockGitHubAPIServer():
for pull_request in self.pulls:
if pull_request.head.endswith(head_ref):
return json.dumps({
"total_count": 1,
"items": [{
"number": pull_request.number
}]
})
return json.dumps({"total_count": 1, "items": [{"number": pull_request.number}]})
return json.dumps({"total_count": 0, "items": []})
@self.app.route("/repos/<org>/<repo>/pulls", methods=['POST'])
@self.app.route("/repos/<org>/<repo>/pulls", methods=["POST"])
def create_pull_request(org, repo):
new_pr_number = len(self.pulls) + 1
self.pulls.append(MockPullRequest(
flask.request.json["head"],
new_pr_number,
"open"
))
self.pulls.append(MockPullRequest(flask.request.json["head"], new_pr_number, "open"))
return {"number": new_pr_number}
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>", methods=['PATCH'])
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>", methods=["PATCH"])
def update_pull_request(org, repo, number):
for pull_request in self.pulls:
if pull_request.number == number:
if 'state' in flask.request.json:
pull_request.state = flask.request.json['state']
return ('', 204)
return ('', 404)
if "state" in flask.request.json:
pull_request.state = flask.request.json["state"]
return ("", 204)
return ("", 404)
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels", methods=['GET', 'POST'])
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels/<label>", methods=['DELETE'])
@self.app.route("/repos/<org>/<repo>/issues/<issue>/comments", methods=['GET', 'POST'])
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels", methods=["GET", "POST"])
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels/<label>", methods=["DELETE"])
@self.app.route("/repos/<org>/<repo>/issues/<issue>/comments", methods=["GET", "POST"])
def other_requests(*args, **kwargs):
return ('', 204)
return ("", 204)
class TestCleanUpBodyText(unittest.TestCase):
@ -196,28 +187,22 @@ class TestCleanUpBodyText(unittest.TestCase):
)
self.assertEqual(
"Subject\n\nBody text #<!-- nolink -->1",
SyncRun.clean_up_body_text(
"Subject\n\nBody text #1\n---<!-- Thank you for contributing"
),
SyncRun.clean_up_body_text("Subject\n\nBody text #1\n---<!-- Thank you for contributing"),
)
self.assertEqual(
"Subject\n\nNo dashes",
SyncRun.clean_up_body_text(
"Subject\n\nNo dashes<!-- Thank you for contributing"
),
SyncRun.clean_up_body_text("Subject\n\nNo dashes<!-- Thank you for contributing"),
)
self.assertEqual(
"Subject\n\nNo --- comment",
SyncRun.clean_up_body_text(
"Subject\n\nNo --- comment\n---Other stuff that"
),
SyncRun.clean_up_body_text("Subject\n\nNo --- comment\n---Other stuff that"),
)
self.assertEqual(
"Subject\n\n#<!-- nolink -->3 servo#<!-- nolink -->3 servo/servo#3",
SyncRun.clean_up_body_text(
"Subject\n\n#3 servo#3 servo/servo#3",
),
"Only relative and bare issue reference links should be escaped."
"Only relative and bare issue reference links should be escaped.",
)
@ -236,9 +221,7 @@ class TestApplyCommitsToWPT(unittest.TestCase):
pull_request = SYNC.servo.get_pull_request(pr_number)
step = CreateOrUpdateBranchForPRStep({"number": pr_number}, pull_request)
def get_applied_commits(
num_commits: int, applied_commits: list[Tuple[str, str]]
):
def get_applied_commits(num_commits: int, applied_commits: list[Tuple[str, str]]):
assert SYNC is not None
repo = SYNC.local_wpt_repo
log = ["log", "--oneline", f"-{num_commits}"]
@ -252,17 +235,13 @@ class TestApplyCommitsToWPT(unittest.TestCase):
applied_commits: list[Any] = []
callback = partial(get_applied_commits, len(commits), applied_commits)
step._create_or_update_branch_for_pr(
SyncRun(SYNC, pull_request, None, None), commits, callback
)
step._create_or_update_branch_for_pr(SyncRun(SYNC, pull_request, None, None), commits, callback)
expected_commits = [(commit["author"], commit["message"]) for commit in commits]
self.assertListEqual(applied_commits, expected_commits)
def test_simple_commit(self):
self.run_test(
45, [["test author <test@author>", "test commit message", "18746.diff"]]
)
self.run_test(45, [["test author <test@author>", "test commit message", "18746.diff"]])
def test_two_commits(self):
self.run_test(
@ -299,9 +278,7 @@ class TestFullSyncRun(unittest.TestCase):
assert SYNC is not None
# Clean up any old files.
first_commit_hash = SYNC.local_servo_repo.run("rev-list", "HEAD").splitlines()[
-1
]
first_commit_hash = SYNC.local_servo_repo.run("rev-list", "HEAD").splitlines()[-1]
SYNC.local_servo_repo.run("reset", "--hard", first_commit_hash)
SYNC.local_servo_repo.run("clean", "-fxd")
@ -339,9 +316,7 @@ class TestFullSyncRun(unittest.TestCase):
SYNC.local_servo_repo.run("reset", "--hard", orig_sha)
return last_commit_sha
def run_test(
self, payload_file: str, diffs: list, existing_prs: list[MockPullRequest] = []
):
def run_test(self, payload_file: str, diffs: list, existing_prs: list[MockPullRequest] = []):
with open(os.path.join(TESTS_DIR, payload_file), encoding="utf-8") as file:
payload = json.loads(file.read())
@ -413,12 +388,8 @@ class TestFullSyncRun(unittest.TestCase):
)
def test_opened_new_mr_with_no_sync_signal(self):
self.assertListEqual(
self.run_test("opened-with-no-sync-signal.json", ["18746.diff"]), []
)
self.assertListEqual(
self.run_test("opened-with-no-sync-signal.json", ["non-wpt.diff"]), []
)
self.assertListEqual(self.run_test("opened-with-no-sync-signal.json", ["18746.diff"]), [])
self.assertListEqual(self.run_test("opened-with-no-sync-signal.json", ["non-wpt.diff"]), [])
def test_opened_upstreamable_pr_not_applying_cleanly_to_upstream(self):
self.assertListEqual(
@ -459,7 +430,7 @@ class TestFullSyncRun(unittest.TestCase):
"RemoveBranchForPRStep:servo/wpt/servo_export_18746",
"CommentStep:servo/servo#18746:🤖 This change no longer contains upstreamable changes "
"to WPT; closed existing upstream pull request (wpt/wpt#1).",
]
],
)
def test_opened_upstreamable_pr_with_non_utf8_file_contents(self):
@ -502,10 +473,7 @@ class TestFullSyncRun(unittest.TestCase):
["18746.diff"],
[MockPullRequest("servo:servo_export_18746", 10)],
),
[
"ChangePRStep:wpt/wpt#10:closed",
"RemoveBranchForPRStep:servo/wpt/servo_export_18746"
]
["ChangePRStep:wpt/wpt#10:closed", "RemoveBranchForPRStep:servo/wpt/servo_export_18746"],
)
def test_synchronize_move_new_changes_to_preexisting_upstream_pr(self):
@ -520,7 +488,7 @@ class TestFullSyncRun(unittest.TestCase):
"CreateOrUpdateBranchForPRStep:1:servo/wpt/servo_export_19612",
"CommentStep:servo/servo#19612:📝 Transplanted new upstreamable changes to existing "
"upstream WPT pull request (wpt/wpt#10).",
]
],
)
def test_synchronize_close_upstream_pr_after_new_changes_do_not_include_wpt(self):
@ -537,7 +505,7 @@ class TestFullSyncRun(unittest.TestCase):
"RemoveBranchForPRStep:servo/wpt/servo_export_19612",
"CommentStep:servo/servo#19612:🤖 This change no longer contains upstreamable changes to WPT; "
"closed existing upstream pull request (wpt/wpt#11).",
]
],
)
def test_synchronize_open_upstream_pr_after_new_changes_include_wpt(self):
@ -548,7 +516,7 @@ class TestFullSyncRun(unittest.TestCase):
"OpenPRStep:servo/wpt/servo_export_19612→wpt/wpt#1",
"CommentStep:servo/servo#19612:🤖 Opened new upstream WPT pull request "
"(wpt/wpt#1) with upstreamable changes.",
]
],
)
def test_synchronize_fail_to_update_preexisting_pr_after_new_changes_do_not_apply(
@ -567,20 +535,17 @@ class TestFullSyncRun(unittest.TestCase):
"latest upstream WPT. Servo's copy of the Web Platform Tests may be out of sync.",
"CommentStep:wpt/wpt#11:🛠 Changes from the source pull request (servo/servo#19612) can "
"no longer be cleanly applied. Waiting for a new version of these changes downstream.",
]
],
)
def test_edited_with_upstream_pr(self):
self.assertListEqual(
self.run_test(
"edited.json", ["wpt.diff"],
[MockPullRequest("servo:servo_export_19620", 10)]
),
self.run_test("edited.json", ["wpt.diff"], [MockPullRequest("servo:servo_export_19620", 10)]),
[
"ChangePRStep:wpt/wpt#10:open:A cool new title:Reference #<!--...[136]",
"CommentStep:servo/servo#19620:✍ Updated existing upstream WPT pull "
"request (wpt/wpt#10) title and body."
]
"request (wpt/wpt#10) title and body.",
],
)
def test_edited_with_no_upstream_pr(self):
@ -590,15 +555,13 @@ class TestFullSyncRun(unittest.TestCase):
self,
):
self.assertListEqual(
self.run_test(
"synchronize-multiple.json", ["18746.diff", "non-wpt.diff", "wpt.diff"]
),
self.run_test("synchronize-multiple.json", ["18746.diff", "non-wpt.diff", "wpt.diff"]),
[
"CreateOrUpdateBranchForPRStep:2:servo/wpt/servo_export_19612",
"OpenPRStep:servo/wpt/servo_export_19612→wpt/wpt#1",
"CommentStep:servo/servo#19612:"
"🤖 Opened new upstream WPT pull request (wpt/wpt#1) with upstreamable changes.",
]
],
)
def test_synchronize_with_non_upstreamable_changes(self):
@ -606,15 +569,8 @@ class TestFullSyncRun(unittest.TestCase):
def test_merge_upstream_pr_after_merge(self):
self.assertListEqual(
self.run_test(
"merged.json",
["18746.diff"],
[MockPullRequest("servo:servo_export_19620", 100)]
),
[
"MergePRStep:wpt/wpt#100",
"RemoveBranchForPRStep:servo/wpt/servo_export_19620"
]
self.run_test("merged.json", ["18746.diff"], [MockPullRequest("servo:servo_export_19620", 100)]),
["MergePRStep:wpt/wpt#100", "RemoveBranchForPRStep:servo/wpt/servo_export_19620"],
)
def test_pr_merged_no_upstream_pr(self):
@ -644,8 +600,7 @@ def setUpModule():
)
def setup_mock_repo(repo_name, local_repo, default_branch: str):
subprocess.check_output(
["cp", "-R", "-p", os.path.join(TESTS_DIR, repo_name), local_repo.path])
subprocess.check_output(["cp", "-R", "-p", os.path.join(TESTS_DIR, repo_name), local_repo.path])
local_repo.run("init", "-b", default_branch)
local_repo.run("add", ".")
local_repo.run("commit", "-a", "-m", "Initial commit")
@ -666,12 +621,16 @@ def run_tests():
verbosity = 1 if logging.getLogger().level >= logging.WARN else 2
def run_suite(test_case: Type[unittest.TestCase]):
return unittest.TextTestRunner(verbosity=verbosity).run(
unittest.TestLoader().loadTestsFromTestCase(test_case)
).wasSuccessful()
return (
unittest.TextTestRunner(verbosity=verbosity)
.run(unittest.TestLoader().loadTestsFromTestCase(test_case))
.wasSuccessful()
)
return all([
return all(
[
run_suite(TestApplyCommitsToWPT),
run_suite(TestCleanUpBodyText),
run_suite(TestFullSyncRun),
])
]
)

View file

@ -5,6 +5,6 @@ index 10d52a0..92fb89d 100644
@@ -8,3 +8,4 @@
# except according to those terms.
print('this is a python file')
+print('this is a change')
print("this is a python file")
+print("this is a change")

View file

@ -7,4 +7,4 @@
# option. This file may not be copied, modified, or distributed
# except according to those terms.
print('this is a python file')
print("this is a python file")

View file

@ -15,11 +15,8 @@ from wptrunner import wptcommandline # noqa: F401
from . import WPT_PATH
from . import manifestupdate
TEST_ROOT = os.path.join(WPT_PATH, 'tests')
META_ROOTS = [
os.path.join(WPT_PATH, 'meta'),
os.path.join(WPT_PATH, 'meta-legacy')
]
TEST_ROOT = os.path.join(WPT_PATH, "tests")
META_ROOTS = [os.path.join(WPT_PATH, "meta"), os.path.join(WPT_PATH, "meta-legacy")]
def do_sync(**kwargs) -> int:
@ -28,8 +25,8 @@ def do_sync(**kwargs) -> int:
# Commits should always be authored by the GitHub Actions bot.
os.environ["GIT_AUTHOR_NAME"] = "Servo WPT Sync"
os.environ["GIT_AUTHOR_EMAIL"] = "ghbot+wpt-sync@servo.org"
os.environ["GIT_COMMITTER_NAME"] = os.environ['GIT_AUTHOR_NAME']
os.environ["GIT_COMMITTER_EMAIL"] = os.environ['GIT_AUTHOR_EMAIL']
os.environ["GIT_COMMITTER_NAME"] = os.environ["GIT_AUTHOR_NAME"]
os.environ["GIT_COMMITTER_EMAIL"] = os.environ["GIT_AUTHOR_EMAIL"]
print("Updating WPT from upstream...")
run_update(**kwargs)
@ -67,7 +64,7 @@ def remove_unused_metadata():
dir_path = os.path.join(base_dir, dir_name)
# Skip any known directories that are meta-metadata.
if dir_name == '.cache':
if dir_name == ".cache":
unused_dirs.append(dir_path)
continue
@ -78,12 +75,11 @@ def remove_unused_metadata():
for fname in files:
# Skip any known files that are meta-metadata.
if not fname.endswith(".ini") or fname == '__dir__.ini':
if not fname.endswith(".ini") or fname == "__dir__.ini":
continue
# Turn tests/wpt/meta/foo/bar.html.ini into tests/wpt/tests/foo/bar.html.
test_file = os.path.join(
TEST_ROOT, os.path.relpath(base_dir, meta_root), fname[:-4])
test_file = os.path.join(TEST_ROOT, os.path.relpath(base_dir, meta_root), fname[:-4])
if not os.path.exists(test_file):
unused_files.append(os.path.join(base_dir, fname))
@ -106,10 +102,10 @@ def update_tests(**kwargs) -> int:
kwargs["store_state"] = False
wptcommandline.set_from_config(kwargs)
if hasattr(wptcommandline, 'check_paths'):
if hasattr(wptcommandline, "check_paths"):
wptcommandline.check_paths(kwargs["test_paths"])
if kwargs.get('sync', False):
if kwargs.get("sync", False):
return do_sync(**kwargs)
return 0 if run_update(**kwargs) else 1