mirror of
https://github.com/servo/servo.git
synced 2025-06-06 00:25:37 +00:00
Use ruff
to enforce python code formatting (#37117)
Requires servo/servo#37045 for deps and config. Testing: No need for tests to test tests. Fixes: servo/servo#37041 --------- Signed-off-by: zefr0x <zer0-x.7ty50@aleeas.com>
This commit is contained in:
parent
41ecfb53a1
commit
c96de69e80
67 changed files with 3021 additions and 3085 deletions
|
@ -17,27 +17,31 @@ import os
|
||||||
def size(args):
|
def size(args):
|
||||||
size = os.path.getsize(args.binary)
|
size = os.path.getsize(args.binary)
|
||||||
print(size)
|
print(size)
|
||||||
with open(args.bmf_output, 'w', encoding='utf-8') as f:
|
with open(args.bmf_output, "w", encoding="utf-8") as f:
|
||||||
json.dump({
|
json.dump(
|
||||||
|
{
|
||||||
args.variant: {
|
args.variant: {
|
||||||
'file-size': {
|
"file-size": {
|
||||||
'value': float(size),
|
"value": float(size),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}, f, indent=4)
|
},
|
||||||
|
f,
|
||||||
|
indent=4,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def merge(args):
|
def merge(args):
|
||||||
output: dict[str, object] = dict()
|
output: dict[str, object] = dict()
|
||||||
for input_file in args.inputs:
|
for input_file in args.inputs:
|
||||||
with open(input_file, 'r', encoding='utf-8') as f:
|
with open(input_file, "r", encoding="utf-8") as f:
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
diff = set(data) & set(output)
|
diff = set(data) & set(output)
|
||||||
if diff:
|
if diff:
|
||||||
print("Duplicated keys:", diff)
|
print("Duplicated keys:", diff)
|
||||||
output = data | output
|
output = data | output
|
||||||
|
|
||||||
with open(args.bmf_output, 'w', encoding='utf-8') as f:
|
with open(args.bmf_output, "w", encoding="utf-8") as f:
|
||||||
json.dump(output, f, indent=4)
|
json.dump(output, f, indent=4)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ TEST_CMD = [
|
||||||
"--log-raw=-",
|
"--log-raw=-",
|
||||||
# We run the content-security-policy test because it creates
|
# We run the content-security-policy test because it creates
|
||||||
# cross-origin iframes, which are good for stress-testing pipelines
|
# cross-origin iframes, which are good for stress-testing pipelines
|
||||||
"content-security-policy"
|
"content-security-policy",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Note that there will probably be test failures caused
|
# Note that there will probably be test failures caused
|
||||||
|
@ -35,7 +35,7 @@ test_results = Popen(TEST_CMD, stdout=PIPE)
|
||||||
any_crashes = False
|
any_crashes = False
|
||||||
|
|
||||||
for line in test_results.stdout:
|
for line in test_results.stdout:
|
||||||
report = json.loads(line.decode('utf-8'))
|
report = json.loads(line.decode("utf-8"))
|
||||||
if report.get("action") == "process_output":
|
if report.get("action") == "process_output":
|
||||||
print("{} - {}".format(report.get("thread"), report.get("data")))
|
print("{} - {}".format(report.get("thread"), report.get("data")))
|
||||||
status = report.get("status")
|
status = report.get("status")
|
||||||
|
|
|
@ -12,35 +12,46 @@ import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
symbol_regex = re.compile(br"D \*UND\*\t(.*) (.*)$")
|
symbol_regex = re.compile(rb"D \*UND\*\t(.*) (.*)$")
|
||||||
allowed_symbols = frozenset([
|
allowed_symbols = frozenset(
|
||||||
b'unshare',
|
[
|
||||||
b'malloc_usable_size',
|
b"unshare",
|
||||||
b'__cxa_type_match',
|
b"malloc_usable_size",
|
||||||
b'signal',
|
b"__cxa_type_match",
|
||||||
b'tcgetattr',
|
b"signal",
|
||||||
b'tcsetattr',
|
b"tcgetattr",
|
||||||
b'__strncpy_chk2',
|
b"tcsetattr",
|
||||||
b'rand',
|
b"__strncpy_chk2",
|
||||||
b'__read_chk',
|
b"rand",
|
||||||
b'fesetenv',
|
b"__read_chk",
|
||||||
b'srand',
|
b"fesetenv",
|
||||||
b'abs',
|
b"srand",
|
||||||
b'fegetenv',
|
b"abs",
|
||||||
b'sigemptyset',
|
b"fegetenv",
|
||||||
b'AHardwareBuffer_allocate',
|
b"sigemptyset",
|
||||||
b'AHardwareBuffer_release',
|
b"AHardwareBuffer_allocate",
|
||||||
b'getentropy',
|
b"AHardwareBuffer_release",
|
||||||
])
|
b"getentropy",
|
||||||
|
]
|
||||||
|
)
|
||||||
actual_symbols = set()
|
actual_symbols = set()
|
||||||
|
|
||||||
objdump_output = subprocess.check_output([
|
objdump_output = subprocess.check_output(
|
||||||
|
[
|
||||||
os.path.join(
|
os.path.join(
|
||||||
'android-toolchains', 'ndk', 'toolchains', 'arm-linux-androideabi-4.9',
|
"android-toolchains",
|
||||||
'prebuilt', 'linux-x86_64', 'bin', 'arm-linux-androideabi-objdump'),
|
"ndk",
|
||||||
'-T',
|
"toolchains",
|
||||||
'target/android/armv7-linux-androideabi/debug/libservoshell.so']
|
"arm-linux-androideabi-4.9",
|
||||||
).split(b'\n')
|
"prebuilt",
|
||||||
|
"linux-x86_64",
|
||||||
|
"bin",
|
||||||
|
"arm-linux-androideabi-objdump",
|
||||||
|
),
|
||||||
|
"-T",
|
||||||
|
"target/android/armv7-linux-androideabi/debug/libservoshell.so",
|
||||||
|
]
|
||||||
|
).split(b"\n")
|
||||||
|
|
||||||
for line in objdump_output:
|
for line in objdump_output:
|
||||||
m = symbol_regex.search(line)
|
m = symbol_regex.search(line)
|
||||||
|
|
|
@ -16,43 +16,47 @@ SCRIPT_PATH = os.path.split(__file__)[0]
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
default_output_dir = os.path.join(SCRIPT_PATH, 'output')
|
default_output_dir = os.path.join(SCRIPT_PATH, "output")
|
||||||
default_cache_dir = os.path.join(SCRIPT_PATH, '.cache')
|
default_cache_dir = os.path.join(SCRIPT_PATH, ".cache")
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(description="Download buildbot metadata")
|
||||||
description="Download buildbot metadata"
|
parser.add_argument(
|
||||||
|
"--index-url",
|
||||||
|
type=str,
|
||||||
|
default="https://build.servo.org/json",
|
||||||
|
help="the URL to get the JSON index data index from. Default: https://build.servo.org/json",
|
||||||
)
|
)
|
||||||
parser.add_argument("--index-url",
|
parser.add_argument(
|
||||||
|
"--build-url",
|
||||||
type=str,
|
type=str,
|
||||||
default='https://build.servo.org/json',
|
default="https://build.servo.org/json/builders/{}/builds/{}",
|
||||||
help="the URL to get the JSON index data index from. "
|
help="the URL to get the JSON build data from. Default: https://build.servo.org/json/builders/{}/builds/{}",
|
||||||
"Default: https://build.servo.org/json")
|
)
|
||||||
parser.add_argument("--build-url",
|
parser.add_argument(
|
||||||
type=str,
|
"--cache-dir",
|
||||||
default='https://build.servo.org/json/builders/{}/builds/{}',
|
|
||||||
help="the URL to get the JSON build data from. "
|
|
||||||
"Default: https://build.servo.org/json/builders/{}/builds/{}")
|
|
||||||
parser.add_argument("--cache-dir",
|
|
||||||
type=str,
|
type=str,
|
||||||
default=default_cache_dir,
|
default=default_cache_dir,
|
||||||
help="the directory to cache JSON files in. Default: " + default_cache_dir)
|
help="the directory to cache JSON files in. Default: " + default_cache_dir,
|
||||||
parser.add_argument("--cache-name",
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--cache-name",
|
||||||
type=str,
|
type=str,
|
||||||
default='build-{}-{}.json',
|
default="build-{}-{}.json",
|
||||||
help="the filename to cache JSON data in. "
|
help="the filename to cache JSON data in. Default: build-{}-{}.json",
|
||||||
"Default: build-{}-{}.json")
|
)
|
||||||
parser.add_argument("--output-dir",
|
parser.add_argument(
|
||||||
|
"--output-dir",
|
||||||
type=str,
|
type=str,
|
||||||
default=default_output_dir,
|
default=default_output_dir,
|
||||||
help="the directory to save the CSV data to. Default: " + default_output_dir)
|
help="the directory to save the CSV data to. Default: " + default_output_dir,
|
||||||
parser.add_argument("--output-name",
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--output-name",
|
||||||
type=str,
|
type=str,
|
||||||
default='builds-{}-{}.csv',
|
default="builds-{}-{}.csv",
|
||||||
help="the filename to save the CSV data to. "
|
help="the filename to save the CSV data to. Default: builds-{}-{}.csv",
|
||||||
"Default: builds-{}-{}.csv")
|
)
|
||||||
parser.add_argument("--verbose", "-v",
|
parser.add_argument("--verbose", "-v", action="store_true", help="print every HTTP request")
|
||||||
action='store_true',
|
|
||||||
help="print every HTTP request")
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
os.makedirs(args.cache_dir, exist_ok=True)
|
os.makedirs(args.cache_dir, exist_ok=True)
|
||||||
|
@ -63,7 +67,7 @@ def main():
|
||||||
if args.verbose:
|
if args.verbose:
|
||||||
print("Downloading index {}.".format(args.index_url))
|
print("Downloading index {}.".format(args.index_url))
|
||||||
with urlopen(args.index_url) as response:
|
with urlopen(args.index_url) as response:
|
||||||
index = json.loads(response.read().decode('utf-8'))
|
index = json.loads(response.read().decode("utf-8"))
|
||||||
|
|
||||||
builds = []
|
builds = []
|
||||||
|
|
||||||
|
@ -75,12 +79,11 @@ def main():
|
||||||
if args.verbose:
|
if args.verbose:
|
||||||
print("Downloading recent build {}.".format(recent_build_url))
|
print("Downloading recent build {}.".format(recent_build_url))
|
||||||
with urlopen(recent_build_url) as response:
|
with urlopen(recent_build_url) as response:
|
||||||
recent_build = json.loads(response.read().decode('utf-8'))
|
recent_build = json.loads(response.read().decode("utf-8"))
|
||||||
recent_build_number = recent_build["number"]
|
recent_build_number = recent_build["number"]
|
||||||
|
|
||||||
# Download each build, and convert to CSV
|
# Download each build, and convert to CSV
|
||||||
for build_number in range(0, recent_build_number):
|
for build_number in range(0, recent_build_number):
|
||||||
|
|
||||||
# Rather annoyingly, we can't just use the Python http cache,
|
# Rather annoyingly, we can't just use the Python http cache,
|
||||||
# because it doesn't cache 404 responses. So we roll our own.
|
# because it doesn't cache 404 responses. So we roll our own.
|
||||||
cache_json_name = args.cache_name.format(builder, build_number)
|
cache_json_name = args.cache_name.format(builder, build_number)
|
||||||
|
@ -96,7 +99,7 @@ def main():
|
||||||
print("Downloading build {}.".format(build_url))
|
print("Downloading build {}.".format(build_url))
|
||||||
try:
|
try:
|
||||||
with urlopen(build_url) as response:
|
with urlopen(build_url) as response:
|
||||||
build = json.loads(response.read().decode('utf-8'))
|
build = json.loads(response.read().decode("utf-8"))
|
||||||
except HTTPError as e:
|
except HTTPError as e:
|
||||||
if e.code == 404:
|
if e.code == 404:
|
||||||
build = {}
|
build = {}
|
||||||
|
@ -104,46 +107,46 @@ def main():
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# Don't cache current builds.
|
# Don't cache current builds.
|
||||||
if build.get('currentStep'):
|
if build.get("currentStep"):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
with open(cache_json, 'w+') as f:
|
with open(cache_json, "w+") as f:
|
||||||
json.dump(build, f)
|
json.dump(build, f)
|
||||||
|
|
||||||
if 'times' in build:
|
if "times" in build:
|
||||||
builds.append(build)
|
builds.append(build)
|
||||||
|
|
||||||
years = {}
|
years = {}
|
||||||
for build in builds:
|
for build in builds:
|
||||||
build_date = date.fromtimestamp(build['times'][0])
|
build_date = date.fromtimestamp(build["times"][0])
|
||||||
years.setdefault(build_date.year, {}).setdefault(build_date.month, []).append(build)
|
years.setdefault(build_date.year, {}).setdefault(build_date.month, []).append(build)
|
||||||
|
|
||||||
for year, months in years.items():
|
for year, months in years.items():
|
||||||
for month, builds in months.items():
|
for month, builds in months.items():
|
||||||
|
|
||||||
output_name = args.output_name.format(year, month)
|
output_name = args.output_name.format(year, month)
|
||||||
output = os.path.join(args.output_dir, output_name)
|
output = os.path.join(args.output_dir, output_name)
|
||||||
|
|
||||||
# Create the CSV file.
|
# Create the CSV file.
|
||||||
if args.verbose:
|
if args.verbose:
|
||||||
print('Creating file {}.'.format(output))
|
print("Creating file {}.".format(output))
|
||||||
with open(output, 'w+') as output_file:
|
with open(output, "w+") as output_file:
|
||||||
output_csv = csv.writer(output_file)
|
output_csv = csv.writer(output_file)
|
||||||
|
|
||||||
# The CSV column names
|
# The CSV column names
|
||||||
output_csv.writerow([
|
output_csv.writerow(
|
||||||
'builder',
|
[
|
||||||
'buildNumber',
|
"builder",
|
||||||
'buildTimestamp',
|
"buildNumber",
|
||||||
'stepName',
|
"buildTimestamp",
|
||||||
'stepText',
|
"stepName",
|
||||||
'stepNumber',
|
"stepText",
|
||||||
'stepStart',
|
"stepNumber",
|
||||||
'stepFinish'
|
"stepStart",
|
||||||
])
|
"stepFinish",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
for build in builds:
|
for build in builds:
|
||||||
|
|
||||||
builder = build["builderName"]
|
builder = build["builderName"]
|
||||||
build_number = build["number"]
|
build_number = build["number"]
|
||||||
build_timestamp = datetime.fromtimestamp(build["times"][0]).replace(microsecond=0)
|
build_timestamp = datetime.fromtimestamp(build["times"][0]).replace(microsecond=0)
|
||||||
|
@ -152,11 +155,12 @@ def main():
|
||||||
for step in build["steps"]:
|
for step in build["steps"]:
|
||||||
if step["isFinished"]:
|
if step["isFinished"]:
|
||||||
step_name = step["name"]
|
step_name = step["name"]
|
||||||
step_text = ' '.join(step["text"])
|
step_text = " ".join(step["text"])
|
||||||
step_number = step["step_number"]
|
step_number = step["step_number"]
|
||||||
step_start = floor(step["times"][0])
|
step_start = floor(step["times"][0])
|
||||||
step_finish = floor(step["times"][1])
|
step_finish = floor(step["times"][1])
|
||||||
output_csv.writerow([
|
output_csv.writerow(
|
||||||
|
[
|
||||||
builder,
|
builder,
|
||||||
build_number,
|
build_number,
|
||||||
build_timestamp,
|
build_timestamp,
|
||||||
|
@ -164,8 +168,9 @@ def main():
|
||||||
step_text,
|
step_text,
|
||||||
step_number,
|
step_number,
|
||||||
step_start,
|
step_start,
|
||||||
step_finish
|
step_finish,
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
|
@ -15,7 +15,7 @@ import sys
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def create_gecko_session():
|
def create_gecko_session():
|
||||||
try:
|
try:
|
||||||
firefox_binary = os.environ['FIREFOX_BIN']
|
firefox_binary = os.environ["FIREFOX_BIN"]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
print("+=============================================================+")
|
print("+=============================================================+")
|
||||||
print("| You must set the path to your firefox binary to FIREFOX_BIN |")
|
print("| You must set the path to your firefox binary to FIREFOX_BIN |")
|
||||||
|
@ -36,10 +36,7 @@ def generate_placeholder(testcase):
|
||||||
# use a placeholder with values = -1 to make Treeherder happy, and still be
|
# use a placeholder with values = -1 to make Treeherder happy, and still be
|
||||||
# able to identify failed tests (successful tests have time >=0).
|
# able to identify failed tests (successful tests have time >=0).
|
||||||
|
|
||||||
timings = {
|
timings = {"testcase": testcase, "title": ""}
|
||||||
"testcase": testcase,
|
|
||||||
"title": ""
|
|
||||||
}
|
|
||||||
|
|
||||||
timing_names = [
|
timing_names = [
|
||||||
"navigationStart",
|
"navigationStart",
|
||||||
|
@ -81,16 +78,9 @@ def run_gecko_test(testcase, url, date, timeout, is_async):
|
||||||
return generate_placeholder(testcase)
|
return generate_placeholder(testcase)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
timings = {
|
timings = {"testcase": testcase, "title": driver.title.replace(",", ",")}
|
||||||
"testcase": testcase,
|
|
||||||
"title": driver.title.replace(",", ",")
|
|
||||||
}
|
|
||||||
|
|
||||||
timings.update(json.loads(
|
timings.update(json.loads(driver.execute_script("return JSON.stringify(performance.timing)")))
|
||||||
driver.execute_script(
|
|
||||||
"return JSON.stringify(performance.timing)"
|
|
||||||
)
|
|
||||||
))
|
|
||||||
except Exception:
|
except Exception:
|
||||||
# We need to return a timing object no matter what happened.
|
# We need to return a timing object no matter what happened.
|
||||||
# See the comment in generate_placeholder() for explanation
|
# See the comment in generate_placeholder() for explanation
|
||||||
|
@ -101,17 +91,14 @@ def run_gecko_test(testcase, url, date, timeout, is_async):
|
||||||
# TODO: the timeout is hardcoded
|
# TODO: the timeout is hardcoded
|
||||||
driver.implicitly_wait(5) # sec
|
driver.implicitly_wait(5) # sec
|
||||||
driver.find_element_by_id("GECKO_TEST_DONE")
|
driver.find_element_by_id("GECKO_TEST_DONE")
|
||||||
timings.update(json.loads(
|
timings.update(json.loads(driver.execute_script("return JSON.stringify(window.customTimers)")))
|
||||||
driver.execute_script(
|
|
||||||
"return JSON.stringify(window.customTimers)"
|
|
||||||
)
|
|
||||||
))
|
|
||||||
|
|
||||||
return [timings]
|
return [timings]
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
# Just for manual testing
|
# Just for manual testing
|
||||||
from pprint import pprint
|
from pprint import pprint
|
||||||
|
|
||||||
url = "http://localhost:8000/page_load_test/tp5n/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html"
|
url = "http://localhost:8000/page_load_test/tp5n/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html"
|
||||||
pprint(run_gecko_test(url, 15))
|
pprint(run_gecko_test(url, 15))
|
||||||
|
|
|
@ -23,14 +23,13 @@ SYSTEM = platform.system()
|
||||||
|
|
||||||
|
|
||||||
def load_manifest(filename):
|
def load_manifest(filename):
|
||||||
with open(filename, 'r') as f:
|
with open(filename, "r") as f:
|
||||||
text = f.read()
|
text = f.read()
|
||||||
return list(parse_manifest(text))
|
return list(parse_manifest(text))
|
||||||
|
|
||||||
|
|
||||||
def parse_manifest(text):
|
def parse_manifest(text):
|
||||||
lines = filter(lambda x: x != "" and not x.startswith("#"),
|
lines = filter(lambda x: x != "" and not x.startswith("#"), map(lambda x: x.strip(), text.splitlines()))
|
||||||
map(lambda x: x.strip(), text.splitlines()))
|
|
||||||
output = []
|
output = []
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if line.split(" ")[0] == "async":
|
if line.split(" ")[0] == "async":
|
||||||
|
@ -46,21 +45,18 @@ def testcase_url(base, testcase):
|
||||||
# the server on port 80. To allow non-root users to run the test
|
# the server on port 80. To allow non-root users to run the test
|
||||||
# case, we take the URL to be relative to a base URL.
|
# case, we take the URL to be relative to a base URL.
|
||||||
(scheme, netloc, path, query, fragment) = urlsplit(testcase)
|
(scheme, netloc, path, query, fragment) = urlsplit(testcase)
|
||||||
relative_url = urlunsplit(('', '', '.' + path, query, fragment))
|
relative_url = urlunsplit(("", "", "." + path, query, fragment))
|
||||||
absolute_url = urljoin(base, relative_url)
|
absolute_url = urljoin(base, relative_url)
|
||||||
return absolute_url
|
return absolute_url
|
||||||
|
|
||||||
|
|
||||||
def execute_test(url, command, timeout):
|
def execute_test(url, command, timeout):
|
||||||
try:
|
try:
|
||||||
return subprocess.check_output(
|
return subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
|
||||||
command, stderr=subprocess.STDOUT, timeout=timeout
|
|
||||||
)
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("Unexpected Fail:")
|
print("Unexpected Fail:")
|
||||||
print(e)
|
print(e)
|
||||||
print("You may want to re-run the test manually:\n{}"
|
print("You may want to re-run the test manually:\n{}".format(" ".join(command)))
|
||||||
.format(' '.join(command)))
|
|
||||||
except subprocess.TimeoutExpired:
|
except subprocess.TimeoutExpired:
|
||||||
print("Test FAILED due to timeout: {}".format(url))
|
print("Test FAILED due to timeout: {}".format(url))
|
||||||
return ""
|
return ""
|
||||||
|
@ -74,22 +70,21 @@ def run_servo_test(testcase, url, date, timeout, is_async):
|
||||||
|
|
||||||
ua_script_path = "{}/user-agent-js".format(os.getcwd())
|
ua_script_path = "{}/user-agent-js".format(os.getcwd())
|
||||||
command = [
|
command = [
|
||||||
"../../../target/release/servo", url,
|
"../../../target/release/servo",
|
||||||
|
url,
|
||||||
"--userscripts=" + ua_script_path,
|
"--userscripts=" + ua_script_path,
|
||||||
"--headless",
|
"--headless",
|
||||||
"-x", "-o", "output.png"
|
"-x",
|
||||||
|
"-o",
|
||||||
|
"output.png",
|
||||||
]
|
]
|
||||||
log = ""
|
log = ""
|
||||||
try:
|
try:
|
||||||
log = subprocess.check_output(
|
log = subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
|
||||||
command, stderr=subprocess.STDOUT, timeout=timeout
|
|
||||||
)
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("Unexpected Fail:")
|
print("Unexpected Fail:")
|
||||||
print(e)
|
print(e)
|
||||||
print("You may want to re-run the test manually:\n{}".format(
|
print("You may want to re-run the test manually:\n{}".format(" ".join(command)))
|
||||||
' '.join(command)
|
|
||||||
))
|
|
||||||
except subprocess.TimeoutExpired:
|
except subprocess.TimeoutExpired:
|
||||||
print("Test FAILED due to timeout: {}".format(testcase))
|
print("Test FAILED due to timeout: {}".format(testcase))
|
||||||
return parse_log(log, testcase, url, date)
|
return parse_log(log, testcase, url, date)
|
||||||
|
@ -100,7 +95,7 @@ def parse_log(log, testcase, url, date):
|
||||||
block = []
|
block = []
|
||||||
copy = False
|
copy = False
|
||||||
for line_bytes in log.splitlines():
|
for line_bytes in log.splitlines():
|
||||||
line = line_bytes.decode('utf-8')
|
line = line_bytes.decode("utf-8")
|
||||||
|
|
||||||
if line.strip() == ("[PERF] perf block start"):
|
if line.strip() == ("[PERF] perf block start"):
|
||||||
copy = True
|
copy = True
|
||||||
|
@ -119,10 +114,10 @@ def parse_log(log, testcase, url, date):
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print("[DEBUG] failed to parse the following line:")
|
print("[DEBUG] failed to parse the following line:")
|
||||||
print(line)
|
print(line)
|
||||||
print('[DEBUG] log:')
|
print("[DEBUG] log:")
|
||||||
print('-----')
|
print("-----")
|
||||||
print(log)
|
print(log)
|
||||||
print('-----')
|
print("-----")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if key == "testcase" or key == "title":
|
if key == "testcase" or key == "title":
|
||||||
|
@ -133,10 +128,12 @@ def parse_log(log, testcase, url, date):
|
||||||
return timing
|
return timing
|
||||||
|
|
||||||
def valid_timing(timing, url=None):
|
def valid_timing(timing, url=None):
|
||||||
if (timing is None
|
if (
|
||||||
|
timing is None
|
||||||
or testcase is None
|
or testcase is None
|
||||||
or timing.get('title') == 'Error loading page'
|
or timing.get("title") == "Error loading page"
|
||||||
or timing.get('testcase') != url):
|
or timing.get("testcase") != url
|
||||||
|
):
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
return True
|
return True
|
||||||
|
@ -178,10 +175,10 @@ def parse_log(log, testcase, url, date):
|
||||||
# Set the testcase field to contain the original testcase name,
|
# Set the testcase field to contain the original testcase name,
|
||||||
# rather than the url.
|
# rather than the url.
|
||||||
def set_testcase(timing, testcase=None, date=None):
|
def set_testcase(timing, testcase=None, date=None):
|
||||||
timing['testcase'] = testcase
|
timing["testcase"] = testcase
|
||||||
timing['system'] = SYSTEM
|
timing["system"] = SYSTEM
|
||||||
timing['machine'] = MACHINE
|
timing["machine"] = MACHINE
|
||||||
timing['date'] = date
|
timing["date"] = date
|
||||||
return timing
|
return timing
|
||||||
|
|
||||||
valid_timing_for_case = partial(valid_timing, url=url)
|
valid_timing_for_case = partial(valid_timing, url=url)
|
||||||
|
@ -190,10 +187,10 @@ def parse_log(log, testcase, url, date):
|
||||||
|
|
||||||
if len(timings) == 0:
|
if len(timings) == 0:
|
||||||
print("Didn't find any perf data in the log, test timeout?")
|
print("Didn't find any perf data in the log, test timeout?")
|
||||||
print('[DEBUG] log:')
|
print("[DEBUG] log:")
|
||||||
print('-----')
|
print("-----")
|
||||||
print(log)
|
print(log)
|
||||||
print('-----')
|
print("-----")
|
||||||
|
|
||||||
return [create_placeholder(testcase)]
|
return [create_placeholder(testcase)]
|
||||||
else:
|
else:
|
||||||
|
@ -204,22 +201,25 @@ def filter_result_by_manifest(result_json, manifest, base):
|
||||||
filtered = []
|
filtered = []
|
||||||
for name, is_async in manifest:
|
for name, is_async in manifest:
|
||||||
url = testcase_url(base, name)
|
url = testcase_url(base, name)
|
||||||
match = [tc for tc in result_json if tc['testcase'] == url]
|
match = [tc for tc in result_json if tc["testcase"] == url]
|
||||||
if len(match) == 0:
|
if len(match) == 0:
|
||||||
raise Exception(("Missing test result: {}. This will cause a "
|
raise Exception(
|
||||||
|
(
|
||||||
|
"Missing test result: {}. This will cause a "
|
||||||
"discontinuity in the treeherder graph, "
|
"discontinuity in the treeherder graph, "
|
||||||
"so we won't submit this data.").format(name))
|
"so we won't submit this data."
|
||||||
|
).format(name)
|
||||||
|
)
|
||||||
filtered += match
|
filtered += match
|
||||||
return filtered
|
return filtered
|
||||||
|
|
||||||
|
|
||||||
def take_result_median(result_json, expected_runs):
|
def take_result_median(result_json, expected_runs):
|
||||||
median_results = []
|
median_results = []
|
||||||
for k, g in itertools.groupby(result_json, lambda x: x['testcase']):
|
for k, g in itertools.groupby(result_json, lambda x: x["testcase"]):
|
||||||
group = list(g)
|
group = list(g)
|
||||||
if len(group) != expected_runs:
|
if len(group) != expected_runs:
|
||||||
print(("Warning: Not enough test data for {},"
|
print(("Warning: Not enough test data for {}, maybe some runs failed?").format(k))
|
||||||
" maybe some runs failed?").format(k))
|
|
||||||
|
|
||||||
median_result = {}
|
median_result = {}
|
||||||
for k, _ in group[0].items():
|
for k, _ in group[0].items():
|
||||||
|
@ -227,8 +227,7 @@ def take_result_median(result_json, expected_runs):
|
||||||
median_result[k] = group[0][k]
|
median_result[k] = group[0][k]
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
median_result[k] = median([x[k] for x in group
|
median_result[k] = median([x[k] for x in group if x[k] is not None])
|
||||||
if x[k] is not None])
|
|
||||||
except StatisticsError:
|
except StatisticsError:
|
||||||
median_result[k] = -1
|
median_result[k] = -1
|
||||||
median_results.append(median_result)
|
median_results.append(median_result)
|
||||||
|
@ -236,72 +235,65 @@ def take_result_median(result_json, expected_runs):
|
||||||
|
|
||||||
|
|
||||||
def save_result_json(results, filename, manifest, expected_runs, base):
|
def save_result_json(results, filename, manifest, expected_runs, base):
|
||||||
|
|
||||||
results = filter_result_by_manifest(results, manifest, base)
|
results = filter_result_by_manifest(results, manifest, base)
|
||||||
results = take_result_median(results, expected_runs)
|
results = take_result_median(results, expected_runs)
|
||||||
|
|
||||||
if len(results) == 0:
|
if len(results) == 0:
|
||||||
with open(filename, 'w') as f:
|
with open(filename, "w") as f:
|
||||||
json.dump("No test result found in the log. All tests timeout?",
|
json.dump("No test result found in the log. All tests timeout?", f, indent=2)
|
||||||
f, indent=2)
|
|
||||||
else:
|
else:
|
||||||
with open(filename, 'w') as f:
|
with open(filename, "w") as f:
|
||||||
json.dump(results, f, indent=2)
|
json.dump(results, f, indent=2)
|
||||||
print("Result saved to {}".format(filename))
|
print("Result saved to {}".format(filename))
|
||||||
|
|
||||||
|
|
||||||
def save_result_csv(results, filename, manifest, expected_runs, base):
|
def save_result_csv(results, filename, manifest, expected_runs, base):
|
||||||
|
|
||||||
fieldnames = [
|
fieldnames = [
|
||||||
'system',
|
"system",
|
||||||
'machine',
|
"machine",
|
||||||
'date',
|
"date",
|
||||||
'testcase',
|
"testcase",
|
||||||
'title',
|
"title",
|
||||||
'connectEnd',
|
"connectEnd",
|
||||||
'connectStart',
|
"connectStart",
|
||||||
'domComplete',
|
"domComplete",
|
||||||
'domContentLoadedEventEnd',
|
"domContentLoadedEventEnd",
|
||||||
'domContentLoadedEventStart',
|
"domContentLoadedEventStart",
|
||||||
'domInteractive',
|
"domInteractive",
|
||||||
'domLoading',
|
"domLoading",
|
||||||
'domainLookupEnd',
|
"domainLookupEnd",
|
||||||
'domainLookupStart',
|
"domainLookupStart",
|
||||||
'fetchStart',
|
"fetchStart",
|
||||||
'loadEventEnd',
|
"loadEventEnd",
|
||||||
'loadEventStart',
|
"loadEventStart",
|
||||||
'navigationStart',
|
"navigationStart",
|
||||||
'redirectEnd',
|
"redirectEnd",
|
||||||
'redirectStart',
|
"redirectStart",
|
||||||
'requestStart',
|
"requestStart",
|
||||||
'responseEnd',
|
"responseEnd",
|
||||||
'responseStart',
|
"responseStart",
|
||||||
'secureConnectionStart',
|
"secureConnectionStart",
|
||||||
'unloadEventEnd',
|
"unloadEventEnd",
|
||||||
'unloadEventStart',
|
"unloadEventStart",
|
||||||
]
|
]
|
||||||
|
|
||||||
successes = [r for r in results if r['domComplete'] != -1]
|
successes = [r for r in results if r["domComplete"] != -1]
|
||||||
|
|
||||||
with open(filename, 'w', encoding='utf-8') as csvfile:
|
with open(filename, "w", encoding="utf-8") as csvfile:
|
||||||
writer = csv.DictWriter(csvfile, fieldnames)
|
writer = csv.DictWriter(csvfile, fieldnames)
|
||||||
writer.writeheader()
|
writer.writeheader()
|
||||||
writer.writerows(successes)
|
writer.writerows(successes)
|
||||||
|
|
||||||
|
|
||||||
def format_result_summary(results):
|
def format_result_summary(results):
|
||||||
failures = list(filter(lambda x: x['domComplete'] == -1, results))
|
failures = list(filter(lambda x: x["domComplete"] == -1, results))
|
||||||
result_log = """
|
result_log = """
|
||||||
========================================
|
========================================
|
||||||
Total {total} tests; {suc} succeeded, {fail} failed.
|
Total {total} tests; {suc} succeeded, {fail} failed.
|
||||||
|
|
||||||
Failure summary:
|
Failure summary:
|
||||||
""".format(
|
""".format(total=len(results), suc=len(list(filter(lambda x: x["domComplete"] != -1, results))), fail=len(failures))
|
||||||
total=len(results),
|
uniq_failures = list(set(map(lambda x: x["testcase"], failures)))
|
||||||
suc=len(list(filter(lambda x: x['domComplete'] != -1, results))),
|
|
||||||
fail=len(failures)
|
|
||||||
)
|
|
||||||
uniq_failures = list(set(map(lambda x: x['testcase'], failures)))
|
|
||||||
for failure in uniq_failures:
|
for failure in uniq_failures:
|
||||||
result_log += " - {}\n".format(failure)
|
result_log += " - {}\n".format(failure)
|
||||||
|
|
||||||
|
@ -311,40 +303,40 @@ Failure summary:
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(description="Run page load test on servo")
|
||||||
description="Run page load test on servo"
|
parser.add_argument("tp5_manifest", help="the test manifest in tp5 format")
|
||||||
)
|
parser.add_argument("output_file", help="filename for the output json")
|
||||||
parser.add_argument("tp5_manifest",
|
parser.add_argument(
|
||||||
help="the test manifest in tp5 format")
|
"--base",
|
||||||
parser.add_argument("output_file",
|
|
||||||
help="filename for the output json")
|
|
||||||
parser.add_argument("--base",
|
|
||||||
type=str,
|
type=str,
|
||||||
default='http://localhost:8000/',
|
default="http://localhost:8000/",
|
||||||
help="the base URL for tests. Default: http://localhost:8000/")
|
help="the base URL for tests. Default: http://localhost:8000/",
|
||||||
parser.add_argument("--runs",
|
)
|
||||||
type=int,
|
parser.add_argument("--runs", type=int, default=20, help="number of runs for each test case. Defult: 20")
|
||||||
default=20,
|
parser.add_argument(
|
||||||
help="number of runs for each test case. Defult: 20")
|
"--timeout",
|
||||||
parser.add_argument("--timeout",
|
|
||||||
type=int,
|
type=int,
|
||||||
default=300, # 5 min
|
default=300, # 5 min
|
||||||
help=("kill the test if not finished in time (sec)."
|
help=("kill the test if not finished in time (sec). Default: 5 min"),
|
||||||
" Default: 5 min"))
|
)
|
||||||
parser.add_argument("--date",
|
parser.add_argument(
|
||||||
|
"--date",
|
||||||
type=str,
|
type=str,
|
||||||
default=None, # 5 min
|
default=None, # 5 min
|
||||||
help=("the date to use in the CSV file."))
|
help=("the date to use in the CSV file."),
|
||||||
parser.add_argument("--engine",
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--engine",
|
||||||
type=str,
|
type=str,
|
||||||
default='servo',
|
default="servo",
|
||||||
help=("The engine to run the tests on. Currently only"
|
help=("The engine to run the tests on. Currently only servo and gecko are supported."),
|
||||||
" servo and gecko are supported."))
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
if args.engine == 'servo':
|
if args.engine == "servo":
|
||||||
run_test = run_servo_test
|
run_test = run_servo_test
|
||||||
elif args.engine == 'gecko':
|
elif args.engine == "gecko":
|
||||||
import gecko_driver # Load this only when we need gecko test
|
import gecko_driver # Load this only when we need gecko test
|
||||||
|
|
||||||
run_test = gecko_driver.run_gecko_test
|
run_test = gecko_driver.run_gecko_test
|
||||||
date = args.date or DATE
|
date = args.date or DATE
|
||||||
try:
|
try:
|
||||||
|
@ -354,9 +346,7 @@ def main():
|
||||||
for testcase, is_async in testcases:
|
for testcase, is_async in testcases:
|
||||||
url = testcase_url(args.base, testcase)
|
url = testcase_url(args.base, testcase)
|
||||||
for run in range(args.runs):
|
for run in range(args.runs):
|
||||||
print("Running test {}/{} on {}".format(run + 1,
|
print("Running test {}/{} on {}".format(run + 1, args.runs, url))
|
||||||
args.runs,
|
|
||||||
url))
|
|
||||||
# results will be a mixure of timings dict and testcase strings
|
# results will be a mixure of timings dict and testcase strings
|
||||||
# testcase string indicates a failed test
|
# testcase string indicates a failed test
|
||||||
results += run_test(testcase, url, date, args.timeout, is_async)
|
results += run_test(testcase, url, date, args.timeout, is_async)
|
||||||
|
@ -364,7 +354,7 @@ def main():
|
||||||
# TODO: Record and analyze other performance.timing properties
|
# TODO: Record and analyze other performance.timing properties
|
||||||
|
|
||||||
print(format_result_summary(results))
|
print(format_result_summary(results))
|
||||||
if args.output_file.endswith('.csv'):
|
if args.output_file.endswith(".csv"):
|
||||||
save_result_csv(results, args.output_file, testcases, args.runs, args.base)
|
save_result_csv(results, args.output_file, testcases, args.runs, args.base)
|
||||||
else:
|
else:
|
||||||
save_result_json(results, args.output_file, testcases, args.runs, args.base)
|
save_result_json(results, args.output_file, testcases, args.runs, args.base)
|
||||||
|
|
|
@ -10,13 +10,14 @@ import boto3
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description=("Set the policy of the servo-perf bucket. "
|
description=(
|
||||||
"Remember to set your S3 credentials "
|
"Set the policy of the servo-perf bucket. Remember to set your S3 credentials https://github.com/boto/boto3"
|
||||||
"https://github.com/boto/boto3"))
|
)
|
||||||
|
)
|
||||||
parser.parse_args()
|
parser.parse_args()
|
||||||
|
|
||||||
s3 = boto3.resource('s3')
|
s3 = boto3.resource("s3")
|
||||||
BUCKET = 'servo-perf'
|
BUCKET = "servo-perf"
|
||||||
POLICY = """{
|
POLICY = """{
|
||||||
"Version":"2012-10-17",
|
"Version":"2012-10-17",
|
||||||
"Statement":[
|
"Statement":[
|
||||||
|
|
|
@ -11,8 +11,7 @@ import operator
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
from thclient import (TreeherderClient, TreeherderResultSetCollection,
|
from thclient import TreeherderClient, TreeherderResultSetCollection, TreeherderJobCollection
|
||||||
TreeherderJobCollection)
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from runner import format_result_summary
|
from runner import format_result_summary
|
||||||
|
@ -24,33 +23,28 @@ def geometric_mean(iterable):
|
||||||
|
|
||||||
|
|
||||||
def format_testcase_name(name):
|
def format_testcase_name(name):
|
||||||
temp = name.replace('http://localhost:8000/page_load_test/', '')
|
temp = name.replace("http://localhost:8000/page_load_test/", "")
|
||||||
temp = temp.replace('http://localhost:8000/tp6/', '')
|
temp = temp.replace("http://localhost:8000/tp6/", "")
|
||||||
temp = temp.split('/')[0]
|
temp = temp.split("/")[0]
|
||||||
temp = temp[0:80]
|
temp = temp[0:80]
|
||||||
return temp
|
return temp
|
||||||
|
|
||||||
|
|
||||||
def format_perf_data(perf_json, engine='servo'):
|
def format_perf_data(perf_json, engine="servo"):
|
||||||
suites = []
|
suites = []
|
||||||
measurement = "domComplete" # Change this to an array when we have more
|
measurement = "domComplete" # Change this to an array when we have more
|
||||||
|
|
||||||
def get_time_from_nav_start(timings, measurement):
|
def get_time_from_nav_start(timings, measurement):
|
||||||
return timings[measurement] - timings['navigationStart']
|
return timings[measurement] - timings["navigationStart"]
|
||||||
|
|
||||||
measurementFromNavStart = partial(get_time_from_nav_start,
|
measurementFromNavStart = partial(get_time_from_nav_start, measurement=measurement)
|
||||||
measurement=measurement)
|
|
||||||
|
|
||||||
if (engine == 'gecko'):
|
if engine == "gecko":
|
||||||
name = 'gecko.{}'.format(measurement)
|
name = "gecko.{}".format(measurement)
|
||||||
else:
|
else:
|
||||||
name = measurement
|
name = measurement
|
||||||
|
|
||||||
suite = {
|
suite = {"name": name, "value": geometric_mean(map(measurementFromNavStart, perf_json)), "subtests": []}
|
||||||
"name": name,
|
|
||||||
"value": geometric_mean(map(measurementFromNavStart, perf_json)),
|
|
||||||
"subtests": []
|
|
||||||
}
|
|
||||||
for testcase in perf_json:
|
for testcase in perf_json:
|
||||||
if measurementFromNavStart(testcase) < 0:
|
if measurementFromNavStart(testcase) < 0:
|
||||||
value = -1
|
value = -1
|
||||||
|
@ -58,10 +52,7 @@ def format_perf_data(perf_json, engine='servo'):
|
||||||
else:
|
else:
|
||||||
value = measurementFromNavStart(testcase)
|
value = measurementFromNavStart(testcase)
|
||||||
|
|
||||||
suite["subtests"].append({
|
suite["subtests"].append({"name": format_testcase_name(testcase["testcase"]), "value": value})
|
||||||
"name": format_testcase_name(testcase["testcase"]),
|
|
||||||
"value": value
|
|
||||||
})
|
|
||||||
|
|
||||||
suites.append(suite)
|
suites.append(suite)
|
||||||
|
|
||||||
|
@ -69,7 +60,7 @@ def format_perf_data(perf_json, engine='servo'):
|
||||||
"performance_data": {
|
"performance_data": {
|
||||||
# https://bugzilla.mozilla.org/show_bug.cgi?id=1271472
|
# https://bugzilla.mozilla.org/show_bug.cgi?id=1271472
|
||||||
"framework": {"name": "servo-perf"},
|
"framework": {"name": "servo-perf"},
|
||||||
"suites": suites
|
"suites": suites,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,20 +73,20 @@ def create_resultset_collection(dataset):
|
||||||
for data in dataset:
|
for data in dataset:
|
||||||
trs = trsc.get_resultset()
|
trs = trsc.get_resultset()
|
||||||
|
|
||||||
trs.add_push_timestamp(data['push_timestamp'])
|
trs.add_push_timestamp(data["push_timestamp"])
|
||||||
trs.add_revision(data['revision'])
|
trs.add_revision(data["revision"])
|
||||||
trs.add_author(data['author'])
|
trs.add_author(data["author"])
|
||||||
# TODO: figure out where type is used
|
# TODO: figure out where type is used
|
||||||
# trs.add_type(data['type'])
|
# trs.add_type(data['type'])
|
||||||
|
|
||||||
revisions = []
|
revisions = []
|
||||||
for rev in data['revisions']:
|
for rev in data["revisions"]:
|
||||||
tr = trs.get_revision()
|
tr = trs.get_revision()
|
||||||
|
|
||||||
tr.add_revision(rev['revision'])
|
tr.add_revision(rev["revision"])
|
||||||
tr.add_author(rev['author'])
|
tr.add_author(rev["author"])
|
||||||
tr.add_comment(rev['comment'])
|
tr.add_comment(rev["comment"])
|
||||||
tr.add_repository(rev['repository'])
|
tr.add_repository(rev["repository"])
|
||||||
|
|
||||||
revisions.append(tr)
|
revisions.append(tr)
|
||||||
trs.add_revisions(revisions)
|
trs.add_revisions(revisions)
|
||||||
|
@ -114,46 +105,42 @@ def create_job_collection(dataset):
|
||||||
for data in dataset:
|
for data in dataset:
|
||||||
tj = tjc.get_job()
|
tj = tjc.get_job()
|
||||||
|
|
||||||
tj.add_revision(data['revision'])
|
tj.add_revision(data["revision"])
|
||||||
tj.add_project(data['project'])
|
tj.add_project(data["project"])
|
||||||
tj.add_coalesced_guid(data['job']['coalesced'])
|
tj.add_coalesced_guid(data["job"]["coalesced"])
|
||||||
tj.add_job_guid(data['job']['job_guid'])
|
tj.add_job_guid(data["job"]["job_guid"])
|
||||||
tj.add_job_name(data['job']['name'])
|
tj.add_job_name(data["job"]["name"])
|
||||||
tj.add_job_symbol(data['job']['job_symbol'])
|
tj.add_job_symbol(data["job"]["job_symbol"])
|
||||||
tj.add_group_name(data['job']['group_name'])
|
tj.add_group_name(data["job"]["group_name"])
|
||||||
tj.add_group_symbol(data['job']['group_symbol'])
|
tj.add_group_symbol(data["job"]["group_symbol"])
|
||||||
tj.add_description(data['job']['desc'])
|
tj.add_description(data["job"]["desc"])
|
||||||
tj.add_product_name(data['job']['product_name'])
|
tj.add_product_name(data["job"]["product_name"])
|
||||||
tj.add_state(data['job']['state'])
|
tj.add_state(data["job"]["state"])
|
||||||
tj.add_result(data['job']['result'])
|
tj.add_result(data["job"]["result"])
|
||||||
tj.add_reason(data['job']['reason'])
|
tj.add_reason(data["job"]["reason"])
|
||||||
tj.add_who(data['job']['who'])
|
tj.add_who(data["job"]["who"])
|
||||||
tj.add_tier(data['job']['tier'])
|
tj.add_tier(data["job"]["tier"])
|
||||||
tj.add_submit_timestamp(data['job']['submit_timestamp'])
|
tj.add_submit_timestamp(data["job"]["submit_timestamp"])
|
||||||
tj.add_start_timestamp(data['job']['start_timestamp'])
|
tj.add_start_timestamp(data["job"]["start_timestamp"])
|
||||||
tj.add_end_timestamp(data['job']['end_timestamp'])
|
tj.add_end_timestamp(data["job"]["end_timestamp"])
|
||||||
tj.add_machine(data['job']['machine'])
|
tj.add_machine(data["job"]["machine"])
|
||||||
|
|
||||||
tj.add_build_info(
|
tj.add_build_info(
|
||||||
data['job']['build_platform']['os_name'],
|
data["job"]["build_platform"]["os_name"],
|
||||||
data['job']['build_platform']['platform'],
|
data["job"]["build_platform"]["platform"],
|
||||||
data['job']['build_platform']['architecture']
|
data["job"]["build_platform"]["architecture"],
|
||||||
)
|
)
|
||||||
|
|
||||||
tj.add_machine_info(
|
tj.add_machine_info(
|
||||||
data['job']['machine_platform']['os_name'],
|
data["job"]["machine_platform"]["os_name"],
|
||||||
data['job']['machine_platform']['platform'],
|
data["job"]["machine_platform"]["platform"],
|
||||||
data['job']['machine_platform']['architecture']
|
data["job"]["machine_platform"]["architecture"],
|
||||||
)
|
)
|
||||||
|
|
||||||
tj.add_option_collection(data['job']['option_collection'])
|
tj.add_option_collection(data["job"]["option_collection"])
|
||||||
|
|
||||||
for artifact_data in data['job']['artifacts']:
|
for artifact_data in data["job"]["artifacts"]:
|
||||||
tj.add_artifact(
|
tj.add_artifact(artifact_data["name"], artifact_data["type"], artifact_data["blob"])
|
||||||
artifact_data['name'],
|
|
||||||
artifact_data['type'],
|
|
||||||
artifact_data['blob']
|
|
||||||
)
|
|
||||||
tjc.add(tj)
|
tjc.add(tj)
|
||||||
|
|
||||||
return tjc
|
return tjc
|
||||||
|
@ -161,30 +148,28 @@ def create_job_collection(dataset):
|
||||||
|
|
||||||
# TODO: refactor this big function to smaller chunks
|
# TODO: refactor this big function to smaller chunks
|
||||||
def submit(perf_data, failures, revision, summary, engine):
|
def submit(perf_data, failures, revision, summary, engine):
|
||||||
|
|
||||||
print("[DEBUG] failures:")
|
print("[DEBUG] failures:")
|
||||||
print(list(map(lambda x: x['testcase'], failures)))
|
print(list(map(lambda x: x["testcase"], failures)))
|
||||||
|
|
||||||
author = "{} <{}>".format(revision['author']['name'],
|
author = "{} <{}>".format(revision["author"]["name"], revision["author"]["email"])
|
||||||
revision['author']['email'])
|
|
||||||
|
|
||||||
dataset = [
|
dataset = [
|
||||||
{
|
{
|
||||||
# The top-most revision in the list of commits for a push.
|
# The top-most revision in the list of commits for a push.
|
||||||
'revision': revision['commit'],
|
"revision": revision["commit"],
|
||||||
'author': author,
|
"author": author,
|
||||||
'push_timestamp': int(revision['author']['timestamp']),
|
"push_timestamp": int(revision["author"]["timestamp"]),
|
||||||
'type': 'push',
|
"type": "push",
|
||||||
# a list of revisions associated with the resultset. There should
|
# a list of revisions associated with the resultset. There should
|
||||||
# be at least one.
|
# be at least one.
|
||||||
'revisions': [
|
"revisions": [
|
||||||
{
|
{
|
||||||
'comment': revision['subject'],
|
"comment": revision["subject"],
|
||||||
'revision': revision['commit'],
|
"revision": revision["commit"],
|
||||||
'repository': 'servo',
|
"repository": "servo",
|
||||||
'author': author
|
"author": author,
|
||||||
}
|
}
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -195,158 +180,129 @@ def submit(perf_data, failures, revision, summary, engine):
|
||||||
# if len(failures) > 0:
|
# if len(failures) > 0:
|
||||||
# result = "testfailed"
|
# result = "testfailed"
|
||||||
|
|
||||||
hashlen = len(revision['commit'])
|
hashlen = len(revision["commit"])
|
||||||
job_guid = ''.join(
|
job_guid = "".join(random.choice(string.ascii_letters + string.digits) for i in range(hashlen))
|
||||||
random.choice(string.ascii_letters + string.digits) for i in range(hashlen)
|
|
||||||
)
|
|
||||||
|
|
||||||
if (engine == "gecko"):
|
if engine == "gecko":
|
||||||
project = "servo"
|
project = "servo"
|
||||||
job_symbol = 'PLG'
|
job_symbol = "PLG"
|
||||||
group_symbol = 'SPG'
|
group_symbol = "SPG"
|
||||||
group_name = 'Servo Perf on Gecko'
|
group_name = "Servo Perf on Gecko"
|
||||||
else:
|
else:
|
||||||
project = "servo"
|
project = "servo"
|
||||||
job_symbol = 'PL'
|
job_symbol = "PL"
|
||||||
group_symbol = 'SP'
|
group_symbol = "SP"
|
||||||
group_name = 'Servo Perf'
|
group_name = "Servo Perf"
|
||||||
|
|
||||||
dataset = [
|
dataset = [
|
||||||
{
|
{
|
||||||
'project': project,
|
"project": project,
|
||||||
'revision': revision['commit'],
|
"revision": revision["commit"],
|
||||||
'job': {
|
"job": {
|
||||||
'job_guid': job_guid,
|
"job_guid": job_guid,
|
||||||
'product_name': project,
|
"product_name": project,
|
||||||
'reason': 'scheduler',
|
"reason": "scheduler",
|
||||||
# TODO: What is `who` for?
|
# TODO: What is `who` for?
|
||||||
'who': 'Servo',
|
"who": "Servo",
|
||||||
'desc': 'Servo Page Load Time Tests',
|
"desc": "Servo Page Load Time Tests",
|
||||||
'name': 'Servo Page Load Time',
|
"name": "Servo Page Load Time",
|
||||||
# The symbol representing the job displayed in
|
# The symbol representing the job displayed in
|
||||||
# treeherder.allizom.org
|
# treeherder.allizom.org
|
||||||
'job_symbol': job_symbol,
|
"job_symbol": job_symbol,
|
||||||
|
|
||||||
# The symbol representing the job group in
|
# The symbol representing the job group in
|
||||||
# treeherder.allizom.org
|
# treeherder.allizom.org
|
||||||
'group_symbol': group_symbol,
|
"group_symbol": group_symbol,
|
||||||
'group_name': group_name,
|
"group_name": group_name,
|
||||||
|
|
||||||
# TODO: get the real timing from the test runner
|
# TODO: get the real timing from the test runner
|
||||||
'submit_timestamp': str(int(time.time())),
|
"submit_timestamp": str(int(time.time())),
|
||||||
'start_timestamp': str(int(time.time())),
|
"start_timestamp": str(int(time.time())),
|
||||||
'end_timestamp': str(int(time.time())),
|
"end_timestamp": str(int(time.time())),
|
||||||
|
"state": "completed",
|
||||||
'state': 'completed',
|
"result": result, # "success" or "testfailed"
|
||||||
'result': result, # "success" or "testfailed"
|
"machine": "local-machine",
|
||||||
|
|
||||||
'machine': 'local-machine',
|
|
||||||
# TODO: read platform from test result
|
# TODO: read platform from test result
|
||||||
'build_platform': {
|
"build_platform": {"platform": "linux64", "os_name": "linux", "architecture": "x86_64"},
|
||||||
'platform': 'linux64',
|
"machine_platform": {"platform": "linux64", "os_name": "linux", "architecture": "x86_64"},
|
||||||
'os_name': 'linux',
|
"option_collection": {"opt": True},
|
||||||
'architecture': 'x86_64'
|
|
||||||
},
|
|
||||||
'machine_platform': {
|
|
||||||
'platform': 'linux64',
|
|
||||||
'os_name': 'linux',
|
|
||||||
'architecture': 'x86_64'
|
|
||||||
},
|
|
||||||
|
|
||||||
'option_collection': {'opt': True},
|
|
||||||
|
|
||||||
# jobs can belong to different tiers
|
# jobs can belong to different tiers
|
||||||
# setting the tier here will determine which tier the job
|
# setting the tier here will determine which tier the job
|
||||||
# belongs to. However, if a job is set as Tier of 1, but
|
# belongs to. However, if a job is set as Tier of 1, but
|
||||||
# belongs to the Tier 2 profile on the server, it will still
|
# belongs to the Tier 2 profile on the server, it will still
|
||||||
# be saved as Tier 2.
|
# be saved as Tier 2.
|
||||||
'tier': 1,
|
"tier": 1,
|
||||||
|
|
||||||
# the ``name`` of the log can be the default of "buildbot_text"
|
# the ``name`` of the log can be the default of "buildbot_text"
|
||||||
# however, you can use a custom name. See below.
|
# however, you can use a custom name. See below.
|
||||||
# TODO: point this to the log when we have them uploaded to S3
|
# TODO: point this to the log when we have them uploaded to S3
|
||||||
'log_references': [
|
"log_references": [{"url": "TBD", "name": "test log"}],
|
||||||
{
|
|
||||||
'url': 'TBD',
|
|
||||||
'name': 'test log'
|
|
||||||
}
|
|
||||||
],
|
|
||||||
# The artifact can contain any kind of structured data
|
# The artifact can contain any kind of structured data
|
||||||
# associated with a test.
|
# associated with a test.
|
||||||
'artifacts': [
|
"artifacts": [
|
||||||
{
|
{
|
||||||
'type': 'json',
|
"type": "json",
|
||||||
'name': 'performance_data',
|
"name": "performance_data",
|
||||||
# TODO: include the job_guid when the runner actually
|
# TODO: include the job_guid when the runner actually
|
||||||
# generates one
|
# generates one
|
||||||
# 'job_guid': job_guid,
|
# 'job_guid': job_guid,
|
||||||
'blob': perf_data
|
"blob": perf_data,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
'type': 'json',
|
"type": "json",
|
||||||
'name': 'Job Info',
|
"name": "Job Info",
|
||||||
# 'job_guid': job_guid,
|
# 'job_guid': job_guid,
|
||||||
"blob": {
|
"blob": {
|
||||||
"job_details": [
|
"job_details": [{"content_type": "raw_html", "title": "Result Summary", "value": summary}]
|
||||||
{
|
},
|
||||||
"content_type": "raw_html",
|
},
|
||||||
"title": "Result Summary",
|
|
||||||
"value": summary
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
],
|
],
|
||||||
# List of job guids that were coalesced to this job
|
# List of job guids that were coalesced to this job
|
||||||
'coalesced': []
|
"coalesced": [],
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
tjc = create_job_collection(dataset)
|
tjc = create_job_collection(dataset)
|
||||||
|
|
||||||
# TODO: extract this read credential code out of this function.
|
# TODO: extract this read credential code out of this function.
|
||||||
cred = {
|
cred = {"client_id": os.environ["TREEHERDER_CLIENT_ID"], "secret": os.environ["TREEHERDER_CLIENT_SECRET"]}
|
||||||
'client_id': os.environ['TREEHERDER_CLIENT_ID'],
|
|
||||||
'secret': os.environ['TREEHERDER_CLIENT_SECRET']
|
|
||||||
}
|
|
||||||
|
|
||||||
client = TreeherderClient(server_url='https://treeherder.mozilla.org',
|
client = TreeherderClient(
|
||||||
client_id=cred['client_id'],
|
server_url="https://treeherder.mozilla.org", client_id=cred["client_id"], secret=cred["secret"]
|
||||||
secret=cred['secret'])
|
)
|
||||||
|
|
||||||
# data structure validation is automatically performed here, if validation
|
# data structure validation is automatically performed here, if validation
|
||||||
# fails a TreeherderClientError is raised
|
# fails a TreeherderClientError is raised
|
||||||
client.post_collection('servo', trsc)
|
client.post_collection("servo", trsc)
|
||||||
client.post_collection('servo', tjc)
|
client.post_collection("servo", tjc)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description=("Submit Servo performance data to Perfherder. "
|
description=(
|
||||||
|
"Submit Servo performance data to Perfherder. "
|
||||||
"Remember to set your Treeherder credential as environment"
|
"Remember to set your Treeherder credential as environment"
|
||||||
" variable \'TREEHERDER_CLIENT_ID\' and "
|
" variable 'TREEHERDER_CLIENT_ID' and "
|
||||||
"\'TREEHERDER_CLIENT_SECRET\'"))
|
"'TREEHERDER_CLIENT_SECRET'"
|
||||||
parser.add_argument("perf_json",
|
)
|
||||||
help="the output json from runner")
|
)
|
||||||
parser.add_argument("revision_json",
|
parser.add_argument("perf_json", help="the output json from runner")
|
||||||
help="the json containing the servo revision data")
|
parser.add_argument("revision_json", help="the json containing the servo revision data")
|
||||||
parser.add_argument("--engine",
|
parser.add_argument(
|
||||||
|
"--engine",
|
||||||
type=str,
|
type=str,
|
||||||
default='servo',
|
default="servo",
|
||||||
help=("The engine to run the tests on. Currently only"
|
help=("The engine to run the tests on. Currently only servo and gecko are supported."),
|
||||||
" servo and gecko are supported."))
|
)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
with open(args.perf_json, 'r') as f:
|
with open(args.perf_json, "r") as f:
|
||||||
result_json = json.load(f)
|
result_json = json.load(f)
|
||||||
|
|
||||||
with open(args.revision_json, 'r') as f:
|
with open(args.revision_json, "r") as f:
|
||||||
revision = json.load(f)
|
revision = json.load(f)
|
||||||
|
|
||||||
perf_data = format_perf_data(result_json, args.engine)
|
perf_data = format_perf_data(result_json, args.engine)
|
||||||
failures = list(filter(lambda x: x['domComplete'] == -1, result_json))
|
failures = list(filter(lambda x: x["domComplete"] == -1, result_json))
|
||||||
summary = format_result_summary(result_json).replace('\n', '<br/>')
|
summary = format_result_summary(result_json).replace("\n", "<br/>")
|
||||||
|
|
||||||
submit(perf_data, failures, revision, summary, args.engine)
|
submit(perf_data, failures, revision, summary, args.engine)
|
||||||
print("Done!")
|
print("Done!")
|
||||||
|
|
|
@ -10,17 +10,16 @@ import boto3
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description=("Submit Servo performance data to S3. "
|
description=(
|
||||||
"Remember to set your S3 credentials "
|
"Submit Servo performance data to S3. Remember to set your S3 credentials https://github.com/boto/boto3"
|
||||||
"https://github.com/boto/boto3"))
|
)
|
||||||
parser.add_argument("perf_file",
|
)
|
||||||
help="the output CSV file from runner")
|
parser.add_argument("perf_file", help="the output CSV file from runner")
|
||||||
parser.add_argument("perf_key",
|
parser.add_argument("perf_key", help="the S3 key to upload to")
|
||||||
help="the S3 key to upload to")
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
s3 = boto3.client('s3')
|
s3 = boto3.client("s3")
|
||||||
BUCKET = 'servo-perf'
|
BUCKET = "servo-perf"
|
||||||
s3.upload_file(args.perf_file, BUCKET, args.perf_key)
|
s3.upload_file(args.perf_file, BUCKET, args.perf_key)
|
||||||
|
|
||||||
print("Done!")
|
print("Done!")
|
||||||
|
|
|
@ -16,16 +16,16 @@ args = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
def load_data(filename):
|
def load_data(filename):
|
||||||
with open(filename, 'r') as f:
|
with open(filename, "r") as f:
|
||||||
results = {}
|
results = {}
|
||||||
totals = {}
|
totals = {}
|
||||||
counts = {}
|
counts = {}
|
||||||
records = json.load(f)
|
records = json.load(f)
|
||||||
for record in records:
|
for record in records:
|
||||||
key = record.get('testcase')
|
key = record.get("testcase")
|
||||||
value = record.get('domComplete') - record.get('domLoading')
|
value = record.get("domComplete") - record.get("domLoading")
|
||||||
totals[key] = totals.get('key', 0) + value
|
totals[key] = totals.get("key", 0) + value
|
||||||
counts[key] = counts.get('key', 0) + 1
|
counts[key] = counts.get("key", 0) + 1
|
||||||
results[key] = round(totals[key] / counts[key])
|
results[key] = round(totals[key] / counts[key])
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
@ -34,10 +34,10 @@ data1 = load_data(args.file1)
|
||||||
data2 = load_data(args.file2)
|
data2 = load_data(args.file2)
|
||||||
keys = set(data1.keys()).union(data2.keys())
|
keys = set(data1.keys()).union(data2.keys())
|
||||||
|
|
||||||
BLUE = '\033[94m'
|
BLUE = "\033[94m"
|
||||||
GREEN = '\033[92m'
|
GREEN = "\033[92m"
|
||||||
WARNING = '\033[93m'
|
WARNING = "\033[93m"
|
||||||
END = '\033[0m'
|
END = "\033[0m"
|
||||||
|
|
||||||
|
|
||||||
total1 = 0
|
total1 = 0
|
||||||
|
|
|
@ -10,7 +10,7 @@ import pytest
|
||||||
|
|
||||||
def test_log_parser():
|
def test_log_parser():
|
||||||
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
||||||
mock_log = b'''
|
mock_log = b"""
|
||||||
[PERF] perf block start
|
[PERF] perf block start
|
||||||
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
|
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
|
||||||
[PERF],navigationStart,1460358376
|
[PERF],navigationStart,1460358376
|
||||||
|
@ -36,9 +36,10 @@ def test_log_parser():
|
||||||
[PERF],loadEventEnd,undefined
|
[PERF],loadEventEnd,undefined
|
||||||
[PERF] perf block end
|
[PERF] perf block end
|
||||||
Shutting down the Constellation after generating an output file or exit flag specified
|
Shutting down the Constellation after generating an output file or exit flag specified
|
||||||
'''
|
"""
|
||||||
|
|
||||||
expected = [{
|
expected = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"navigationStart": 1460358376,
|
"navigationStart": 1460358376,
|
||||||
"unloadEventStart": None,
|
"unloadEventStart": None,
|
||||||
|
@ -60,14 +61,15 @@ Shutting down the Constellation after generating an output file or exit flag spe
|
||||||
"domContentLoadedEventEnd": 1460358388000,
|
"domContentLoadedEventEnd": 1460358388000,
|
||||||
"domComplete": 1460358389000,
|
"domComplete": 1460358389000,
|
||||||
"loadEventStart": None,
|
"loadEventStart": None,
|
||||||
"loadEventEnd": None
|
"loadEventEnd": None,
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
result = runner.parse_log(mock_log, mock_url)
|
result = runner.parse_log(mock_log, mock_url)
|
||||||
assert (expected == list(result))
|
assert expected == list(result)
|
||||||
|
|
||||||
|
|
||||||
def test_log_parser_complex():
|
def test_log_parser_complex():
|
||||||
mock_log = b'''
|
mock_log = b"""
|
||||||
[PERF] perf block start
|
[PERF] perf block start
|
||||||
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/content.html
|
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/content.html
|
||||||
[PERF],navigationStart,1460358300
|
[PERF],navigationStart,1460358300
|
||||||
|
@ -119,9 +121,10 @@ Some other js error logs here
|
||||||
[PERF],loadEventEnd,undefined
|
[PERF],loadEventEnd,undefined
|
||||||
[PERF] perf block end
|
[PERF] perf block end
|
||||||
Shutting down the Constellation after generating an output file or exit flag specified
|
Shutting down the Constellation after generating an output file or exit flag specified
|
||||||
'''
|
"""
|
||||||
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
||||||
expected = [{
|
expected = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"navigationStart": 1460358376,
|
"navigationStart": 1460358376,
|
||||||
"unloadEventStart": None,
|
"unloadEventStart": None,
|
||||||
|
@ -143,14 +146,15 @@ Shutting down the Constellation after generating an output file or exit flag spe
|
||||||
"domContentLoadedEventEnd": 1460358388000,
|
"domContentLoadedEventEnd": 1460358388000,
|
||||||
"domComplete": 1460358389000,
|
"domComplete": 1460358389000,
|
||||||
"loadEventStart": None,
|
"loadEventStart": None,
|
||||||
"loadEventEnd": None
|
"loadEventEnd": None,
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
result = runner.parse_log(mock_log, mock_url)
|
result = runner.parse_log(mock_log, mock_url)
|
||||||
assert (expected == list(result))
|
assert expected == list(result)
|
||||||
|
|
||||||
|
|
||||||
def test_log_parser_empty():
|
def test_log_parser_empty():
|
||||||
mock_log = b'''
|
mock_log = b"""
|
||||||
[PERF] perf block start
|
[PERF] perf block start
|
||||||
[PERF]BROKEN!!!!!!!!!1
|
[PERF]BROKEN!!!!!!!!!1
|
||||||
[PERF]BROKEN!!!!!!!!!1
|
[PERF]BROKEN!!!!!!!!!1
|
||||||
|
@ -158,10 +162,11 @@ def test_log_parser_empty():
|
||||||
[PERF]BROKEN!!!!!!!!!1
|
[PERF]BROKEN!!!!!!!!!1
|
||||||
[PERF]BROKEN!!!!!!!!!1
|
[PERF]BROKEN!!!!!!!!!1
|
||||||
[PERF] perf block end
|
[PERF] perf block end
|
||||||
'''
|
"""
|
||||||
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
||||||
|
|
||||||
expected = [{
|
expected = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"title": "",
|
"title": "",
|
||||||
"navigationStart": 0,
|
"navigationStart": 0,
|
||||||
|
@ -184,17 +189,19 @@ def test_log_parser_empty():
|
||||||
"domContentLoadedEventEnd": -1,
|
"domContentLoadedEventEnd": -1,
|
||||||
"domComplete": -1,
|
"domComplete": -1,
|
||||||
"loadEventStart": -1,
|
"loadEventStart": -1,
|
||||||
"loadEventEnd": -1
|
"loadEventEnd": -1,
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
result = runner.parse_log(mock_log, mock_testcase)
|
result = runner.parse_log(mock_log, mock_testcase)
|
||||||
assert (expected == list(result))
|
assert expected == list(result)
|
||||||
|
|
||||||
|
|
||||||
def test_log_parser_error():
|
def test_log_parser_error():
|
||||||
mock_log = b'Nothing here! Test failed!'
|
mock_log = b"Nothing here! Test failed!"
|
||||||
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
||||||
|
|
||||||
expected = [{
|
expected = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"title": "",
|
"title": "",
|
||||||
"navigationStart": 0,
|
"navigationStart": 0,
|
||||||
|
@ -217,16 +224,17 @@ def test_log_parser_error():
|
||||||
"domContentLoadedEventEnd": -1,
|
"domContentLoadedEventEnd": -1,
|
||||||
"domComplete": -1,
|
"domComplete": -1,
|
||||||
"loadEventStart": -1,
|
"loadEventStart": -1,
|
||||||
"loadEventEnd": -1
|
"loadEventEnd": -1,
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
result = runner.parse_log(mock_log, mock_testcase)
|
result = runner.parse_log(mock_log, mock_testcase)
|
||||||
assert (expected == list(result))
|
assert expected == list(result)
|
||||||
|
|
||||||
|
|
||||||
def test_log_parser_bad_testcase_name():
|
def test_log_parser_bad_testcase_name():
|
||||||
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
||||||
# Notice the testcase is about:blank, servo crashed
|
# Notice the testcase is about:blank, servo crashed
|
||||||
mock_log = b'''
|
mock_log = b"""
|
||||||
[PERF] perf block start
|
[PERF] perf block start
|
||||||
[PERF],testcase,about:blank
|
[PERF],testcase,about:blank
|
||||||
[PERF],navigationStart,1460358376
|
[PERF],navigationStart,1460358376
|
||||||
|
@ -252,9 +260,10 @@ def test_log_parser_bad_testcase_name():
|
||||||
[PERF],loadEventEnd,undefined
|
[PERF],loadEventEnd,undefined
|
||||||
[PERF] perf block end
|
[PERF] perf block end
|
||||||
Shutting down the Constellation after generating an output file or exit flag specified
|
Shutting down the Constellation after generating an output file or exit flag specified
|
||||||
'''
|
"""
|
||||||
|
|
||||||
expected = [{
|
expected = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"title": "",
|
"title": "",
|
||||||
"navigationStart": 0,
|
"navigationStart": 0,
|
||||||
|
@ -277,157 +286,170 @@ Shutting down the Constellation after generating an output file or exit flag spe
|
||||||
"domContentLoadedEventEnd": -1,
|
"domContentLoadedEventEnd": -1,
|
||||||
"domComplete": -1,
|
"domComplete": -1,
|
||||||
"loadEventStart": -1,
|
"loadEventStart": -1,
|
||||||
"loadEventEnd": -1
|
"loadEventEnd": -1,
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
result = runner.parse_log(mock_log, mock_testcase)
|
result = runner.parse_log(mock_log, mock_testcase)
|
||||||
assert (expected == list(result))
|
assert expected == list(result)
|
||||||
|
|
||||||
|
|
||||||
def test_manifest_loader():
|
def test_manifest_loader():
|
||||||
|
text = """
|
||||||
text = '''
|
|
||||||
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
|
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
|
||||||
http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
|
http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
|
||||||
|
|
||||||
http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
|
http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
|
||||||
# Disabled! http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
|
# Disabled! http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
|
||||||
'''
|
"""
|
||||||
expected = [
|
expected = [
|
||||||
("http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html", False),
|
("http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html", False),
|
||||||
("http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html", False),
|
("http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html", False),
|
||||||
("http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html", False)
|
("http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html", False),
|
||||||
]
|
]
|
||||||
assert (expected == list(runner.parse_manifest(text)))
|
assert expected == list(runner.parse_manifest(text))
|
||||||
|
|
||||||
|
|
||||||
def test_manifest_loader_async():
|
def test_manifest_loader_async():
|
||||||
|
text = """
|
||||||
text = '''
|
|
||||||
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
|
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
|
||||||
async http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
|
async http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
|
||||||
'''
|
"""
|
||||||
expected = [
|
expected = [
|
||||||
("http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html", False),
|
("http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html", False),
|
||||||
("http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html", True),
|
("http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html", True),
|
||||||
]
|
]
|
||||||
assert (expected == list(runner.parse_manifest(text)))
|
assert expected == list(runner.parse_manifest(text))
|
||||||
|
|
||||||
|
|
||||||
def test_filter_result_by_manifest():
|
def test_filter_result_by_manifest():
|
||||||
input_json = [{
|
input_json = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
|
||||||
"domComplete": 1460358389000,
|
"domComplete": 1460358389000,
|
||||||
}, {
|
},
|
||||||
|
{
|
||||||
"testcase": "non-existing-html",
|
"testcase": "non-existing-html",
|
||||||
"domComplete": 1460358389000,
|
"domComplete": 1460358389000,
|
||||||
}, {
|
},
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"domComplete": 1460358389000,
|
"domComplete": 1460358389000,
|
||||||
}]
|
},
|
||||||
|
|
||||||
expected = [{
|
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
|
||||||
"domComplete": 1460358389000,
|
|
||||||
}]
|
|
||||||
|
|
||||||
manifest = [
|
|
||||||
("http://localhost:8000/page_load_test/56.com/www.56.com/index.html", False)
|
|
||||||
]
|
]
|
||||||
|
|
||||||
assert (expected == runner.filter_result_by_manifest(input_json, manifest))
|
expected = [
|
||||||
|
{
|
||||||
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
|
"domComplete": 1460358389000,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
manifest = [("http://localhost:8000/page_load_test/56.com/www.56.com/index.html", False)]
|
||||||
|
|
||||||
|
assert expected == runner.filter_result_by_manifest(input_json, manifest)
|
||||||
|
|
||||||
|
|
||||||
def test_filter_result_by_manifest_error():
|
def test_filter_result_by_manifest_error():
|
||||||
input_json = [{
|
input_json = [
|
||||||
|
{
|
||||||
"testcase": "1.html",
|
"testcase": "1.html",
|
||||||
"domComplete": 1460358389000,
|
"domComplete": 1460358389000,
|
||||||
}]
|
}
|
||||||
|
|
||||||
manifest = [
|
|
||||||
("1.html", False),
|
|
||||||
("2.html", False)
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
manifest = [("1.html", False), ("2.html", False)]
|
||||||
|
|
||||||
with pytest.raises(Exception) as execinfo:
|
with pytest.raises(Exception) as execinfo:
|
||||||
runner.filter_result_by_manifest(input_json, manifest)
|
runner.filter_result_by_manifest(input_json, manifest)
|
||||||
assert "Missing test result" in str(execinfo.value)
|
assert "Missing test result" in str(execinfo.value)
|
||||||
|
|
||||||
|
|
||||||
def test_take_result_median_odd():
|
def test_take_result_median_odd():
|
||||||
input_json = [{
|
input_json = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"domComplete": 1460358389001,
|
"domComplete": 1460358389001,
|
||||||
"domLoading": 1460358380002
|
"domLoading": 1460358380002,
|
||||||
}, {
|
},
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"domComplete": 1460358389002,
|
"domComplete": 1460358389002,
|
||||||
"domLoading": 1460358380001
|
"domLoading": 1460358380001,
|
||||||
}, {
|
},
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"domComplete": 1460358389003,
|
"domComplete": 1460358389003,
|
||||||
"domLoading": 1460358380003
|
"domLoading": 1460358380003,
|
||||||
}]
|
},
|
||||||
|
]
|
||||||
|
|
||||||
expected = [{
|
expected = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"domComplete": 1460358389002,
|
"domComplete": 1460358389002,
|
||||||
"domLoading": 1460358380002
|
"domLoading": 1460358380002,
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
|
|
||||||
assert (expected == runner.take_result_median(input_json, len(input_json)))
|
assert expected == runner.take_result_median(input_json, len(input_json))
|
||||||
|
|
||||||
|
|
||||||
def test_take_result_median_even():
|
def test_take_result_median_even():
|
||||||
input_json = [{
|
input_json = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"domComplete": 1460358389001,
|
"domComplete": 1460358389001,
|
||||||
"domLoading": 1460358380002
|
"domLoading": 1460358380002,
|
||||||
}, {
|
},
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"domComplete": 1460358389002,
|
"domComplete": 1460358389002,
|
||||||
"domLoading": 1460358380001
|
"domLoading": 1460358380001,
|
||||||
}]
|
},
|
||||||
|
]
|
||||||
|
|
||||||
expected = [{
|
expected = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"domComplete": 1460358389001.5,
|
"domComplete": 1460358389001.5,
|
||||||
"domLoading": 1460358380001.5
|
"domLoading": 1460358380001.5,
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
|
|
||||||
assert (expected == runner.take_result_median(input_json, len(input_json)))
|
assert expected == runner.take_result_median(input_json, len(input_json))
|
||||||
|
|
||||||
|
|
||||||
def test_take_result_median_error():
|
def test_take_result_median_error():
|
||||||
input_json = [{
|
input_json = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"domComplete": None,
|
"domComplete": None,
|
||||||
"domLoading": 1460358380002
|
"domLoading": 1460358380002,
|
||||||
}, {
|
},
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"domComplete": 1460358389002,
|
"domComplete": 1460358389002,
|
||||||
"domLoading": 1460358380001
|
"domLoading": 1460358380001,
|
||||||
}]
|
},
|
||||||
|
]
|
||||||
|
|
||||||
expected = [{
|
expected = [
|
||||||
|
{
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||||
"domComplete": 1460358389002,
|
"domComplete": 1460358389002,
|
||||||
"domLoading": 1460358380001.5
|
"domLoading": 1460358380001.5,
|
||||||
}]
|
}
|
||||||
|
]
|
||||||
|
|
||||||
assert (expected == runner.take_result_median(input_json, len(input_json)))
|
assert expected == runner.take_result_median(input_json, len(input_json))
|
||||||
|
|
||||||
|
|
||||||
def test_log_result():
|
def test_log_result():
|
||||||
results = [{
|
results = [
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
{"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html", "domComplete": -1},
|
||||||
"domComplete": -1
|
{"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html", "domComplete": -1},
|
||||||
}, {
|
{"testcase": "http://localhost:8000/page_load_test/104.com/www.104.com/index.html", "domComplete": 123456789},
|
||||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
]
|
||||||
"domComplete": -1
|
|
||||||
}, {
|
|
||||||
"testcase": "http://localhost:8000/page_load_test/104.com/www.104.com/index.html",
|
|
||||||
"domComplete": 123456789
|
|
||||||
}]
|
|
||||||
|
|
||||||
expected = """
|
expected = """
|
||||||
========================================
|
========================================
|
||||||
|
@ -437,4 +459,4 @@ Failure summary:
|
||||||
- http://localhost:8000/page_load_test/56.com/www.56.com/index.html
|
- http://localhost:8000/page_load_test/56.com/www.56.com/index.html
|
||||||
========================================
|
========================================
|
||||||
"""
|
"""
|
||||||
assert (expected == runner.format_result_summary(results))
|
assert expected == runner.format_result_summary(results)
|
||||||
|
|
|
@ -8,18 +8,18 @@ import submit_to_perfherder
|
||||||
|
|
||||||
|
|
||||||
def test_format_testcase_name():
|
def test_format_testcase_name():
|
||||||
assert ('about:blank' == submit_to_perfherder.format_testcase_name(
|
assert "about:blank" == submit_to_perfherder.format_testcase_name("about:blank")
|
||||||
'about:blank'))
|
assert "163.com" == submit_to_perfherder.format_testcase_name(
|
||||||
assert ('163.com' == submit_to_perfherder.format_testcase_name((
|
("http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html")
|
||||||
'http://localhost:8000/page_load_test/163.com/p.mail.163.com/'
|
)
|
||||||
'mailinfo/shownewmsg_www_1222.htm.html')))
|
assert (
|
||||||
assert (('1234567890223456789032345678904234567890'
|
"12345678902234567890323456789042345678905234567890623456789072345678908234567890"
|
||||||
'5234567890623456789072345678908234567890')
|
) == submit_to_perfherder.format_testcase_name(
|
||||||
== submit_to_perfherder.format_testcase_name((
|
("123456789022345678903234567890423456789052345678906234567890723456789082345678909234567890")
|
||||||
'1234567890223456789032345678904234567890'
|
)
|
||||||
'52345678906234567890723456789082345678909234567890')))
|
assert "news.ycombinator.com" == submit_to_perfherder.format_testcase_name(
|
||||||
assert ('news.ycombinator.com' == submit_to_perfherder.format_testcase_name(
|
"http://localhost:8000/tp6/news.ycombinator.com/index.html"
|
||||||
'http://localhost:8000/tp6/news.ycombinator.com/index.html'))
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_format_perf_data():
|
def test_format_perf_data():
|
||||||
|
@ -46,7 +46,7 @@ def test_format_perf_data():
|
||||||
"unloadEventEnd": None,
|
"unloadEventEnd": None,
|
||||||
"responseEnd": None,
|
"responseEnd": None,
|
||||||
"testcase": "about:blank",
|
"testcase": "about:blank",
|
||||||
"domComplete": 1460444931000
|
"domComplete": 1460444931000,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"unloadEventStart": None,
|
"unloadEventStart": None,
|
||||||
|
@ -69,11 +69,11 @@ def test_format_perf_data():
|
||||||
"domainLookupEnd": None,
|
"domainLookupEnd": None,
|
||||||
"unloadEventEnd": None,
|
"unloadEventEnd": None,
|
||||||
"responseEnd": None,
|
"responseEnd": None,
|
||||||
"testcase": ("http://localhost:8000/page_load_test/163.com/"
|
"testcase": (
|
||||||
"p.mail.163.com/mailinfo/"
|
"http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html"
|
||||||
"shownewmsg_www_1222.htm.html"),
|
),
|
||||||
"domComplete": 1460444948000
|
"domComplete": 1460444948000,
|
||||||
}
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
|
@ -84,33 +84,27 @@ def test_format_perf_data():
|
||||||
"name": "domComplete",
|
"name": "domComplete",
|
||||||
"value": 3741.657386773941,
|
"value": 3741.657386773941,
|
||||||
"subtests": [
|
"subtests": [
|
||||||
{"name": "about:blank",
|
{"name": "about:blank", "value": 1000},
|
||||||
"value": 1000},
|
{"name": "163.com", "value": 14000},
|
||||||
{"name": "163.com",
|
],
|
||||||
"value": 14000},
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
result = submit_to_perfherder.format_perf_data(mock_result)
|
result = submit_to_perfherder.format_perf_data(mock_result)
|
||||||
assert (expected == result)
|
assert expected == result
|
||||||
|
|
||||||
|
|
||||||
def test_format_bad_perf_data():
|
def test_format_bad_perf_data():
|
||||||
mock_result = [
|
mock_result = [
|
||||||
{
|
{"navigationStart": 1460444930000, "testcase": "about:blank", "domComplete": 0},
|
||||||
"navigationStart": 1460444930000,
|
|
||||||
"testcase": "about:blank",
|
|
||||||
"domComplete": 0
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"navigationStart": 1460444934000,
|
"navigationStart": 1460444934000,
|
||||||
"testcase": ("http://localhost:8000/page_load_test/163.com/"
|
"testcase": (
|
||||||
"p.mail.163.com/mailinfo/"
|
"http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html"
|
||||||
"shownewmsg_www_1222.htm.html"),
|
),
|
||||||
"domComplete": 1460444948000
|
"domComplete": 1460444948000,
|
||||||
}
|
},
|
||||||
]
|
]
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
|
@ -121,14 +115,12 @@ def test_format_bad_perf_data():
|
||||||
"name": "domComplete",
|
"name": "domComplete",
|
||||||
"value": 14000.0,
|
"value": 14000.0,
|
||||||
"subtests": [
|
"subtests": [
|
||||||
{"name": "about:blank",
|
{"name": "about:blank", "value": -1}, # Timeout
|
||||||
"value": -1}, # Timeout
|
{"name": "163.com", "value": 14000},
|
||||||
{"name": "163.com",
|
],
|
||||||
"value": 14000},
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
result = submit_to_perfherder.format_perf_data(mock_result)
|
result = submit_to_perfherder.format_perf_data(mock_result)
|
||||||
assert (expected == result)
|
assert expected == result
|
||||||
|
|
|
@ -37,7 +37,7 @@ class Item:
|
||||||
def from_result(cls, result: dict, title: Optional[str] = None, print_stack=True):
|
def from_result(cls, result: dict, title: Optional[str] = None, print_stack=True):
|
||||||
expected = result["expected"]
|
expected = result["expected"]
|
||||||
actual = result["actual"]
|
actual = result["actual"]
|
||||||
title = title if title else f'`{result["path"]}`'
|
title = title if title else f"`{result['path']}`"
|
||||||
if expected != actual:
|
if expected != actual:
|
||||||
title = f"{actual} [expected {expected}] {title}"
|
title = f"{actual} [expected {expected}] {title}"
|
||||||
else:
|
else:
|
||||||
|
@ -45,8 +45,7 @@ class Item:
|
||||||
|
|
||||||
issue_url = "http://github.com/servo/servo/issues/"
|
issue_url = "http://github.com/servo/servo/issues/"
|
||||||
if "issues" in result and result["issues"]:
|
if "issues" in result and result["issues"]:
|
||||||
issues = ", ".join([f"[#{issue}]({issue_url}{issue})"
|
issues = ", ".join([f"[#{issue}]({issue_url}{issue})" for issue in result["issues"]])
|
||||||
for issue in result["issues"]])
|
|
||||||
title += f" ({issues})"
|
title += f" ({issues})"
|
||||||
|
|
||||||
stack = result["stack"] if result["stack"] and print_stack else ""
|
stack = result["stack"] if result["stack"] and print_stack else ""
|
||||||
|
@ -59,8 +58,9 @@ class Item:
|
||||||
cls.from_result(
|
cls.from_result(
|
||||||
subtest_result,
|
subtest_result,
|
||||||
f"subtest: `{subtest_result['subtest']}`"
|
f"subtest: `{subtest_result['subtest']}`"
|
||||||
+ (f" \n```\n{subtest_result['message']}\n```\n" if subtest_result['message'] else ""),
|
+ (f" \n```\n{subtest_result['message']}\n```\n" if subtest_result["message"] else ""),
|
||||||
False)
|
False,
|
||||||
|
)
|
||||||
for subtest_result in subtest_results
|
for subtest_result in subtest_results
|
||||||
]
|
]
|
||||||
return cls(title, body, children)
|
return cls(title, body, children)
|
||||||
|
@ -68,10 +68,8 @@ class Item:
|
||||||
def to_string(self, bullet: str = "", indent: str = ""):
|
def to_string(self, bullet: str = "", indent: str = ""):
|
||||||
output = f"{indent}{bullet}{self.title}\n"
|
output = f"{indent}{bullet}{self.title}\n"
|
||||||
if self.body:
|
if self.body:
|
||||||
output += textwrap.indent(f"{self.body}\n",
|
output += textwrap.indent(f"{self.body}\n", " " * len(indent + bullet))
|
||||||
" " * len(indent + bullet))
|
output += "\n".join([child.to_string("• ", indent + " ") for child in self.children])
|
||||||
output += "\n".join([child.to_string("• ", indent + " ")
|
|
||||||
for child in self.children])
|
|
||||||
return output.rstrip().replace("`", "")
|
return output.rstrip().replace("`", "")
|
||||||
|
|
||||||
def to_html(self, level: int = 0) -> ElementTree.Element:
|
def to_html(self, level: int = 0) -> ElementTree.Element:
|
||||||
|
@ -88,17 +86,13 @@ class Item:
|
||||||
if self.children:
|
if self.children:
|
||||||
# Some tests have dozens of failing tests, which overwhelm the
|
# Some tests have dozens of failing tests, which overwhelm the
|
||||||
# output. Limit the output for subtests in GitHub comment output.
|
# output. Limit the output for subtests in GitHub comment output.
|
||||||
max_children = len(
|
max_children = len(self.children) if level < 2 else SUBTEST_RESULT_TRUNCATION
|
||||||
self.children) if level < 2 else SUBTEST_RESULT_TRUNCATION
|
|
||||||
if len(self.children) > max_children:
|
if len(self.children) > max_children:
|
||||||
children = self.children[:max_children]
|
children = self.children[:max_children]
|
||||||
children.append(Item(
|
children.append(Item(f"And {len(self.children) - max_children} more unexpected results...", "", []))
|
||||||
f"And {len(self.children) - max_children} more unexpected results...",
|
|
||||||
"", []))
|
|
||||||
else:
|
else:
|
||||||
children = self.children
|
children = self.children
|
||||||
container = ElementTree.SubElement(
|
container = ElementTree.SubElement(result, "div" if not level else "ul")
|
||||||
result, "div" if not level else "ul")
|
|
||||||
for child in children:
|
for child in children:
|
||||||
container.append(child.to_html(level + 1))
|
container.append(child.to_html(level + 1))
|
||||||
|
|
||||||
|
@ -125,17 +119,16 @@ def get_results(filenames: list[str], tag: str = "") -> Optional[Item]:
|
||||||
return not is_flaky(result) and not result["issues"]
|
return not is_flaky(result) and not result["issues"]
|
||||||
|
|
||||||
def add_children(children: List[Item], results: List[dict], filter_func, text):
|
def add_children(children: List[Item], results: List[dict], filter_func, text):
|
||||||
filtered = [Item.from_result(result) for result in
|
filtered = [Item.from_result(result) for result in filter(filter_func, results)]
|
||||||
filter(filter_func, results)]
|
|
||||||
if filtered:
|
if filtered:
|
||||||
children.append(Item(f"{text} ({len(filtered)})", "", filtered))
|
children.append(Item(f"{text} ({len(filtered)})", "", filtered))
|
||||||
|
|
||||||
children: List[Item] = []
|
children: List[Item] = []
|
||||||
add_children(children, unexpected, is_flaky, "Flaky unexpected result")
|
add_children(children, unexpected, is_flaky, "Flaky unexpected result")
|
||||||
add_children(children, unexpected, is_stable_and_known,
|
add_children(
|
||||||
"Stable unexpected results that are known to be intermittent")
|
children, unexpected, is_stable_and_known, "Stable unexpected results that are known to be intermittent"
|
||||||
add_children(children, unexpected, is_stable_and_unexpected,
|
)
|
||||||
"Stable unexpected results")
|
add_children(children, unexpected, is_stable_and_unexpected, "Stable unexpected results")
|
||||||
|
|
||||||
run_url = get_github_run_url()
|
run_url = get_github_run_url()
|
||||||
text = "Test results"
|
text = "Test results"
|
||||||
|
@ -154,8 +147,8 @@ def get_github_run_url() -> Optional[str]:
|
||||||
return None
|
return None
|
||||||
if "run_id" not in github_context:
|
if "run_id" not in github_context:
|
||||||
return None
|
return None
|
||||||
repository = github_context['repository']
|
repository = github_context["repository"]
|
||||||
run_id = github_context['run_id']
|
run_id = github_context["run_id"]
|
||||||
return f"[#{run_id}](https://github.com/{repository}/actions/runs/{run_id})"
|
return f"[#{run_id}](https://github.com/{repository}/actions/runs/{run_id})"
|
||||||
|
|
||||||
|
|
||||||
|
@ -197,14 +190,14 @@ def create_github_reports(body: str, tag: str = ""):
|
||||||
# This process is based on the documentation here:
|
# This process is based on the documentation here:
|
||||||
# https://docs.github.com/en/rest/checks/runs?apiVersion=2022-11-28#create-a-check-runs
|
# https://docs.github.com/en/rest/checks/runs?apiVersion=2022-11-28#create-a-check-runs
|
||||||
results = json.loads(os.environ.get("RESULTS", "{}"))
|
results = json.loads(os.environ.get("RESULTS", "{}"))
|
||||||
if all(r == 'success' for r in results):
|
if all(r == "success" for r in results):
|
||||||
conclusion = 'success'
|
conclusion = "success"
|
||||||
elif "failure" in results:
|
elif "failure" in results:
|
||||||
conclusion = 'failure'
|
conclusion = "failure"
|
||||||
elif "cancelled" in results:
|
elif "cancelled" in results:
|
||||||
conclusion = 'cancelled'
|
conclusion = "cancelled"
|
||||||
else:
|
else:
|
||||||
conclusion = 'neutral'
|
conclusion = "neutral"
|
||||||
|
|
||||||
github_token = os.environ.get("GITHUB_TOKEN")
|
github_token = os.environ.get("GITHUB_TOKEN")
|
||||||
github_context = json.loads(os.environ.get("GITHUB_CONTEXT", "{}"))
|
github_context = json.loads(os.environ.get("GITHUB_CONTEXT", "{}"))
|
||||||
|
@ -214,34 +207,42 @@ def create_github_reports(body: str, tag: str = ""):
|
||||||
return None
|
return None
|
||||||
repo = github_context["repository"]
|
repo = github_context["repository"]
|
||||||
data = {
|
data = {
|
||||||
'name': tag,
|
"name": tag,
|
||||||
'head_sha': github_context["sha"],
|
"head_sha": github_context["sha"],
|
||||||
'status': 'completed',
|
"status": "completed",
|
||||||
'started_at': datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
|
"started_at": datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
|
||||||
'conclusion': conclusion,
|
"conclusion": conclusion,
|
||||||
'completed_at': datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
|
"completed_at": datetime.utcnow().replace(microsecond=0).isoformat() + "Z",
|
||||||
'output': {
|
"output": {
|
||||||
'title': f'Aggregated {tag} report',
|
"title": f"Aggregated {tag} report",
|
||||||
'summary': body,
|
"summary": body,
|
||||||
'images': [{'alt': 'WPT logo', 'image_url': 'https://avatars.githubusercontent.com/u/37226233'}]
|
"images": [{"alt": "WPT logo", "image_url": "https://avatars.githubusercontent.com/u/37226233"}],
|
||||||
},
|
},
|
||||||
'actions': [
|
"actions": [],
|
||||||
]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
subprocess.Popen(["curl", "-L",
|
subprocess.Popen(
|
||||||
"-X", "POST",
|
[
|
||||||
"-H", "Accept: application/vnd.github+json",
|
"curl",
|
||||||
"-H", f"Authorization: Bearer {github_token}",
|
"-L",
|
||||||
"-H", "X-GitHub-Api-Version: 2022-11-28",
|
"-X",
|
||||||
|
"POST",
|
||||||
|
"-H",
|
||||||
|
"Accept: application/vnd.github+json",
|
||||||
|
"-H",
|
||||||
|
f"Authorization: Bearer {github_token}",
|
||||||
|
"-H",
|
||||||
|
"X-GitHub-Api-Version: 2022-11-28",
|
||||||
f"https://api.github.com/repos/{repo}/check-runs",
|
f"https://api.github.com/repos/{repo}/check-runs",
|
||||||
"-d", json.dumps(data)]).wait()
|
"-d",
|
||||||
|
json.dumps(data),
|
||||||
|
]
|
||||||
|
).wait()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("--tag", default="wpt", action="store",
|
parser.add_argument("--tag", default="wpt", action="store", help="A string tag used to distinguish the results.")
|
||||||
help="A string tag used to distinguish the results.")
|
|
||||||
args, filenames = parser.parse_known_args()
|
args, filenames = parser.parse_known_args()
|
||||||
results = get_results(filenames, args.tag)
|
results = get_results(filenames, args.tag)
|
||||||
if not results:
|
if not results:
|
||||||
|
@ -251,14 +252,12 @@ def main():
|
||||||
|
|
||||||
print(results.to_string())
|
print(results.to_string())
|
||||||
|
|
||||||
html_string = ElementTree.tostring(
|
html_string = ElementTree.tostring(results.to_html(), encoding="unicode")
|
||||||
results.to_html(), encoding="unicode")
|
|
||||||
create_github_reports(html_string, args.tag)
|
create_github_reports(html_string, args.tag)
|
||||||
|
|
||||||
pr_number = get_pr_number()
|
pr_number = get_pr_number()
|
||||||
if pr_number:
|
if pr_number:
|
||||||
process = subprocess.Popen(
|
process = subprocess.Popen(["gh", "pr", "comment", pr_number, "-F", "-"], stdin=subprocess.PIPE)
|
||||||
['gh', 'pr', 'comment', pr_number, '-F', '-'], stdin=subprocess.PIPE)
|
|
||||||
print(process.communicate(input=html_string.encode("utf-8"))[0])
|
print(process.communicate(input=html_string.encode("utf-8"))[0])
|
||||||
else:
|
else:
|
||||||
print("Could not find PR number in environment. Not making GitHub comment.")
|
print("Could not find PR number in environment. Not making GitHub comment.")
|
||||||
|
|
|
@ -35,6 +35,7 @@ def main(crate=None):
|
||||||
for dependency in graph.get(name, []):
|
for dependency in graph.get(name, []):
|
||||||
filtered.setdefault(name, []).append(dependency)
|
filtered.setdefault(name, []).append(dependency)
|
||||||
traverse(dependency)
|
traverse(dependency)
|
||||||
|
|
||||||
traverse(crate)
|
traverse(crate)
|
||||||
else:
|
else:
|
||||||
filtered = graph
|
filtered = graph
|
||||||
|
|
|
@ -42,12 +42,15 @@ import signal
|
||||||
import sys
|
import sys
|
||||||
from argparse import ArgumentParser
|
from argparse import ArgumentParser
|
||||||
from subprocess import Popen, PIPE
|
from subprocess import Popen, PIPE
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from termcolor import colored
|
from termcolor import colored
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
|
||||||
def colored(text, *args, **kwargs):
|
def colored(text, *args, **kwargs):
|
||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
fields = ["frame.time", "tcp.srcport", "tcp.payload"]
|
fields = ["frame.time", "tcp.srcport", "tcp.payload"]
|
||||||
|
|
||||||
|
|
||||||
|
@ -57,10 +60,14 @@ def record_data(file, port):
|
||||||
# Create tshark command
|
# Create tshark command
|
||||||
cmd = [
|
cmd = [
|
||||||
"tshark",
|
"tshark",
|
||||||
"-T", "fields",
|
"-T",
|
||||||
"-i", "lo",
|
"fields",
|
||||||
"-d", f"tcp.port=={port},http",
|
"-i",
|
||||||
"-w", file,
|
"lo",
|
||||||
|
"-d",
|
||||||
|
f"tcp.port=={port},http",
|
||||||
|
"-w",
|
||||||
|
file,
|
||||||
] + [e for f in fields for e in ("-e", f)]
|
] + [e for f in fields for e in ("-e", f)]
|
||||||
process = Popen(cmd, stdout=PIPE)
|
process = Popen(cmd, stdout=PIPE)
|
||||||
|
|
||||||
|
@ -84,8 +91,10 @@ def read_data(file):
|
||||||
# Create tshark command
|
# Create tshark command
|
||||||
cmd = [
|
cmd = [
|
||||||
"tshark",
|
"tshark",
|
||||||
"-T", "fields",
|
"-T",
|
||||||
"-r", file,
|
"fields",
|
||||||
|
"-r",
|
||||||
|
file,
|
||||||
] + [e for f in fields for e in ("-e", f)]
|
] + [e for f in fields for e in ("-e", f)]
|
||||||
process = Popen(cmd, stdout=PIPE)
|
process = Popen(cmd, stdout=PIPE)
|
||||||
|
|
||||||
|
@ -182,7 +191,7 @@ def parse_message(msg, *, json_output=False):
|
||||||
time, sender, i, data = msg
|
time, sender, i, data = msg
|
||||||
from_servo = sender == "Servo"
|
from_servo = sender == "Servo"
|
||||||
|
|
||||||
colored_sender = colored(sender, 'black', 'on_yellow' if from_servo else 'on_magenta', attrs=['bold'])
|
colored_sender = colored(sender, "black", "on_yellow" if from_servo else "on_magenta", attrs=["bold"])
|
||||||
if not json_output:
|
if not json_output:
|
||||||
print(f"\n{colored_sender} - {colored(i, 'blue')} - {colored(time, 'dark_grey')}")
|
print(f"\n{colored_sender} - {colored(i, 'blue')} - {colored(time, 'dark_grey')}")
|
||||||
|
|
||||||
|
@ -199,7 +208,7 @@ def parse_message(msg, *, json_output=False):
|
||||||
assert False, "Message is neither a request nor a response"
|
assert False, "Message is neither a request nor a response"
|
||||||
else:
|
else:
|
||||||
if from_servo and "from" in content:
|
if from_servo and "from" in content:
|
||||||
print(colored(f"Actor: {content['from']}", 'yellow'))
|
print(colored(f"Actor: {content['from']}", "yellow"))
|
||||||
print(json.dumps(content, sort_keys=True, indent=4))
|
print(json.dumps(content, sort_keys=True, indent=4))
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
print(f"Warning: Couldn't decode json\n{data}")
|
print(f"Warning: Couldn't decode json\n{data}")
|
||||||
|
@ -236,7 +245,7 @@ if __name__ == "__main__":
|
||||||
if args.range and len(args.range.split(":")) == 2:
|
if args.range and len(args.range.split(":")) == 2:
|
||||||
min, max = args.range.split(":")
|
min, max = args.range.split(":")
|
||||||
|
|
||||||
for msg in data[int(min):int(max) + 1]:
|
for msg in data[int(min) : int(max) + 1]:
|
||||||
# Filter the messages if specified
|
# Filter the messages if specified
|
||||||
if not args.filter or args.filter.lower() in msg[3].lower():
|
if not args.filter or args.filter.lower() in msg[3].lower():
|
||||||
parse_message(msg, json_output=args.json)
|
parse_message(msg, json_output=args.json)
|
||||||
|
|
|
@ -21,14 +21,14 @@ def extract_memory_reports(lines):
|
||||||
report_lines = []
|
report_lines = []
|
||||||
times = []
|
times = []
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if line.startswith('Begin memory reports'):
|
if line.startswith("Begin memory reports"):
|
||||||
in_report = True
|
in_report = True
|
||||||
report_lines += [[]]
|
report_lines += [[]]
|
||||||
times += [line.strip().split()[-1]]
|
times += [line.strip().split()[-1]]
|
||||||
elif line == 'End memory reports\n':
|
elif line == "End memory reports\n":
|
||||||
in_report = False
|
in_report = False
|
||||||
elif in_report:
|
elif in_report:
|
||||||
if line.startswith('|'):
|
if line.startswith("|"):
|
||||||
report_lines[-1].append(line.strip())
|
report_lines[-1].append(line.strip())
|
||||||
return (report_lines, times)
|
return (report_lines, times)
|
||||||
|
|
||||||
|
@ -38,11 +38,11 @@ def parse_memory_report(lines):
|
||||||
parents = []
|
parents = []
|
||||||
last_separator_index = None
|
last_separator_index = None
|
||||||
for line in lines:
|
for line in lines:
|
||||||
assert (line[0] == '|')
|
assert line[0] == "|"
|
||||||
line = line[1:]
|
line = line[1:]
|
||||||
if not line:
|
if not line:
|
||||||
continue
|
continue
|
||||||
separator_index = line.index('--')
|
separator_index = line.index("--")
|
||||||
if last_separator_index and separator_index <= last_separator_index:
|
if last_separator_index and separator_index <= last_separator_index:
|
||||||
while parents and parents[-1][1] >= separator_index:
|
while parents and parents[-1][1] >= separator_index:
|
||||||
parents.pop()
|
parents.pop()
|
||||||
|
@ -50,13 +50,9 @@ def parse_memory_report(lines):
|
||||||
amount, unit, _, name = line.split()
|
amount, unit, _, name = line.split()
|
||||||
|
|
||||||
dest_report = reports
|
dest_report = reports
|
||||||
for (parent, index) in parents:
|
for parent, index in parents:
|
||||||
dest_report = dest_report[parent]['children']
|
dest_report = dest_report[parent]["children"]
|
||||||
dest_report[name] = {
|
dest_report[name] = {"amount": amount, "unit": unit, "children": {}}
|
||||||
'amount': amount,
|
|
||||||
'unit': unit,
|
|
||||||
'children': {}
|
|
||||||
}
|
|
||||||
|
|
||||||
parents += [(name, separator_index)]
|
parents += [(name, separator_index)]
|
||||||
last_separator_index = separator_index
|
last_separator_index = separator_index
|
||||||
|
@ -68,24 +64,26 @@ def transform_report_for_test(report):
|
||||||
remaining = list(report.items())
|
remaining = list(report.items())
|
||||||
while remaining:
|
while remaining:
|
||||||
(name, value) = remaining.pop()
|
(name, value) = remaining.pop()
|
||||||
transformed[name] = '%s %s' % (value['amount'], value['unit'])
|
transformed[name] = "%s %s" % (value["amount"], value["unit"])
|
||||||
remaining += map(lambda k_v: (name + '/' + k_v[0], k_v[1]), list(value['children'].items()))
|
remaining += map(lambda k_v: (name + "/" + k_v[0], k_v[1]), list(value["children"].items()))
|
||||||
return transformed
|
return transformed
|
||||||
|
|
||||||
|
|
||||||
def test_extract_memory_reports():
|
def test_extract_memory_reports():
|
||||||
input = ["Begin memory reports",
|
input = [
|
||||||
|
"Begin memory reports",
|
||||||
"|",
|
"|",
|
||||||
" 154.56 MiB -- explicit\n",
|
" 154.56 MiB -- explicit\n",
|
||||||
"| 107.88 MiB -- system-heap-unclassified\n",
|
"| 107.88 MiB -- system-heap-unclassified\n",
|
||||||
"End memory reports\n"]
|
"End memory reports\n",
|
||||||
expected = ([['|', '| 107.88 MiB -- system-heap-unclassified']], ['reports'])
|
]
|
||||||
assert (extract_memory_reports(input) == expected)
|
expected = ([["|", "| 107.88 MiB -- system-heap-unclassified"]], ["reports"])
|
||||||
|
assert extract_memory_reports(input) == expected
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def test():
|
def test():
|
||||||
input = '''|
|
input = """|
|
||||||
| 23.89 MiB -- explicit
|
| 23.89 MiB -- explicit
|
||||||
| 21.35 MiB -- jemalloc-heap-unclassified
|
| 21.35 MiB -- jemalloc-heap-unclassified
|
||||||
| 2.54 MiB -- url(https://servo.org/)
|
| 2.54 MiB -- url(https://servo.org/)
|
||||||
|
@ -97,33 +95,33 @@ def test():
|
||||||
| 0.27 MiB -- stylist
|
| 0.27 MiB -- stylist
|
||||||
| 0.12 MiB -- dom-tree
|
| 0.12 MiB -- dom-tree
|
||||||
|
|
|
|
||||||
| 25.18 MiB -- jemalloc-heap-active'''
|
| 25.18 MiB -- jemalloc-heap-active"""
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'explicit': '23.89 MiB',
|
"explicit": "23.89 MiB",
|
||||||
'explicit/jemalloc-heap-unclassified': '21.35 MiB',
|
"explicit/jemalloc-heap-unclassified": "21.35 MiB",
|
||||||
'explicit/url(https://servo.org/)': '2.54 MiB',
|
"explicit/url(https://servo.org/)": "2.54 MiB",
|
||||||
'explicit/url(https://servo.org/)/js': '2.16 MiB',
|
"explicit/url(https://servo.org/)/js": "2.16 MiB",
|
||||||
'explicit/url(https://servo.org/)/js/gc-heap': '1.00 MiB',
|
"explicit/url(https://servo.org/)/js/gc-heap": "1.00 MiB",
|
||||||
'explicit/url(https://servo.org/)/js/gc-heap/decommitted': '0.77 MiB',
|
"explicit/url(https://servo.org/)/js/gc-heap/decommitted": "0.77 MiB",
|
||||||
'explicit/url(https://servo.org/)/js/non-heap': '1.00 MiB',
|
"explicit/url(https://servo.org/)/js/non-heap": "1.00 MiB",
|
||||||
'explicit/url(https://servo.org/)/layout-thread': '0.27 MiB',
|
"explicit/url(https://servo.org/)/layout-thread": "0.27 MiB",
|
||||||
'explicit/url(https://servo.org/)/layout-thread/stylist': '0.27 MiB',
|
"explicit/url(https://servo.org/)/layout-thread/stylist": "0.27 MiB",
|
||||||
'explicit/url(https://servo.org/)/dom-tree': '0.12 MiB',
|
"explicit/url(https://servo.org/)/dom-tree": "0.12 MiB",
|
||||||
'jemalloc-heap-active': '25.18 MiB',
|
"jemalloc-heap-active": "25.18 MiB",
|
||||||
}
|
}
|
||||||
report = parse_memory_report(input.split('\n'))
|
report = parse_memory_report(input.split("\n"))
|
||||||
transformed = transform_report_for_test(report)
|
transformed = transform_report_for_test(report)
|
||||||
assert (sorted(transformed.keys()) == sorted(expected.keys()))
|
assert sorted(transformed.keys()) == sorted(expected.keys())
|
||||||
for k, v in transformed.items():
|
for k, v in transformed.items():
|
||||||
assert (v == expected[k])
|
assert v == expected[k]
|
||||||
test_extract_memory_reports()
|
test_extract_memory_reports()
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def usage():
|
def usage():
|
||||||
print('%s --test - run automated tests' % sys.argv[0])
|
print("%s --test - run automated tests" % sys.argv[0])
|
||||||
print('%s file - extract all memory reports that are present in file' % sys.argv[0])
|
print("%s file - extract all memory reports that are present in file" % sys.argv[0])
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
|
||||||
|
@ -131,19 +129,19 @@ if __name__ == "__main__":
|
||||||
if len(sys.argv) == 1:
|
if len(sys.argv) == 1:
|
||||||
sys.exit(usage())
|
sys.exit(usage())
|
||||||
|
|
||||||
if sys.argv[1] == '--test':
|
if sys.argv[1] == "--test":
|
||||||
sys.exit(test())
|
sys.exit(test())
|
||||||
|
|
||||||
with open(sys.argv[1]) as f:
|
with open(sys.argv[1]) as f:
|
||||||
lines = f.readlines()
|
lines = f.readlines()
|
||||||
(reports, times) = extract_memory_reports(lines)
|
(reports, times) = extract_memory_reports(lines)
|
||||||
json_reports = []
|
json_reports = []
|
||||||
for (report_lines, seconds) in zip(reports, times):
|
for report_lines, seconds in zip(reports, times):
|
||||||
report = parse_memory_report(report_lines)
|
report = parse_memory_report(report_lines)
|
||||||
json_reports += [{'seconds': seconds, 'report': report}]
|
json_reports += [{"seconds": seconds, "report": report}]
|
||||||
with tempfile.NamedTemporaryFile(delete=False) as output:
|
with tempfile.NamedTemporaryFile(delete=False) as output:
|
||||||
thisdir = os.path.dirname(os.path.abspath(__file__))
|
thisdir = os.path.dirname(os.path.abspath(__file__))
|
||||||
with open(os.path.join(thisdir, 'memory_chart.html')) as template:
|
with open(os.path.join(thisdir, "memory_chart.html")) as template:
|
||||||
content = template.read()
|
content = template.read()
|
||||||
output.write(content.replace('[/* json data */]', json.dumps(json_reports)))
|
output.write(content.replace("[/* json data */]", json.dumps(json_reports)))
|
||||||
webbrowser.open_new_tab('file://' + output.name)
|
webbrowser.open_new_tab("file://" + output.name)
|
||||||
|
|
|
@ -31,7 +31,7 @@ Example:
|
||||||
""")
|
""")
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
rust_source = open(sys.argv[1], 'r')
|
rust_source = open(sys.argv[1], "r")
|
||||||
lines = iter(rust_source)
|
lines = iter(rust_source)
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if line.lstrip().startswith("pub enum ProfilerCategory"):
|
if line.lstrip().startswith("pub enum ProfilerCategory"):
|
||||||
|
@ -53,21 +53,21 @@ plist = ElementTree.ElementTree(ElementTree.fromstring(xml))
|
||||||
|
|
||||||
elems = iter(plist.findall("./dict/*"))
|
elems = iter(plist.findall("./dict/*"))
|
||||||
for elem in elems:
|
for elem in elems:
|
||||||
if elem.tag != 'key' or elem.text != '$objects':
|
if elem.tag != "key" or elem.text != "$objects":
|
||||||
continue
|
continue
|
||||||
array = elems.next()
|
array = elems.next()
|
||||||
break
|
break
|
||||||
|
|
||||||
elems = iter(array.findall("./*"))
|
elems = iter(array.findall("./*"))
|
||||||
for elem in elems:
|
for elem in elems:
|
||||||
if elem.tag != 'string' or elem.text != 'kdebugIntervalRule':
|
if elem.tag != "string" or elem.text != "kdebugIntervalRule":
|
||||||
continue
|
continue
|
||||||
dictionary = elems.next()
|
dictionary = elems.next()
|
||||||
break
|
break
|
||||||
|
|
||||||
elems = iter(dictionary.findall("./*"))
|
elems = iter(dictionary.findall("./*"))
|
||||||
for elem in elems:
|
for elem in elems:
|
||||||
if elem.tag != 'key' or elem.text != 'NS.objects':
|
if elem.tag != "key" or elem.text != "NS.objects":
|
||||||
continue
|
continue
|
||||||
objects_array = elems.next()
|
objects_array = elems.next()
|
||||||
break
|
break
|
||||||
|
@ -76,33 +76,33 @@ child_count = sum(1 for _ in iter(array.findall("./*")))
|
||||||
|
|
||||||
for code_pair in code_pairs:
|
for code_pair in code_pairs:
|
||||||
number_index = child_count
|
number_index = child_count
|
||||||
integer = Element('integer')
|
integer = Element("integer")
|
||||||
integer.text = str(int(code_pair[0], 0))
|
integer.text = str(int(code_pair[0], 0))
|
||||||
array.append(integer)
|
array.append(integer)
|
||||||
child_count += 1
|
child_count += 1
|
||||||
|
|
||||||
string_index = child_count
|
string_index = child_count
|
||||||
string = Element('string')
|
string = Element("string")
|
||||||
string.text = code_pair[1]
|
string.text = code_pair[1]
|
||||||
array.append(string)
|
array.append(string)
|
||||||
child_count += 1
|
child_count += 1
|
||||||
|
|
||||||
dictionary = Element('dict')
|
dictionary = Element("dict")
|
||||||
key = Element('key')
|
key = Element("key")
|
||||||
key.text = "CF$UID"
|
key.text = "CF$UID"
|
||||||
dictionary.append(key)
|
dictionary.append(key)
|
||||||
integer = Element('integer')
|
integer = Element("integer")
|
||||||
integer.text = str(number_index)
|
integer.text = str(number_index)
|
||||||
dictionary.append(integer)
|
dictionary.append(integer)
|
||||||
objects_array.append(dictionary)
|
objects_array.append(dictionary)
|
||||||
|
|
||||||
dictionary = Element('dict')
|
dictionary = Element("dict")
|
||||||
key = Element('key')
|
key = Element("key")
|
||||||
key.text = "CF$UID"
|
key.text = "CF$UID"
|
||||||
dictionary.append(key)
|
dictionary.append(key)
|
||||||
integer = Element('integer')
|
integer = Element("integer")
|
||||||
integer.text = str(string_index)
|
integer.text = str(string_index)
|
||||||
dictionary.append(integer)
|
dictionary.append(integer)
|
||||||
objects_array.append(dictionary)
|
objects_array.append(dictionary)
|
||||||
|
|
||||||
plist.write(sys.stdout, encoding='utf-8', xml_declaration=True)
|
plist.write(sys.stdout, encoding="utf-8", xml_declaration=True)
|
||||||
|
|
|
@ -53,17 +53,17 @@ stacks = {}
|
||||||
thread_data = defaultdict(list)
|
thread_data = defaultdict(list)
|
||||||
thread_order = {}
|
thread_order = {}
|
||||||
for sample in samples:
|
for sample in samples:
|
||||||
if sample['name']:
|
if sample["name"]:
|
||||||
name = sample['name']
|
name = sample["name"]
|
||||||
else:
|
else:
|
||||||
name = "%s %d %d" % (sample['type'], sample['namespace'], sample['index'])
|
name = "%s %d %d" % (sample["type"], sample["namespace"], sample["index"])
|
||||||
thread_data[name].append((sample['time'], sample['frames']))
|
thread_data[name].append((sample["time"], sample["frames"]))
|
||||||
if name not in thread_order:
|
if name not in thread_order:
|
||||||
thread_order[name] = (sample['namespace'], sample['index'])
|
thread_order[name] = (sample["namespace"], sample["index"])
|
||||||
|
|
||||||
tid = 0
|
tid = 0
|
||||||
threads = []
|
threads = []
|
||||||
for (name, raw_samples) in sorted(iter(thread_data.items()), key=lambda x: thread_order[x[0]]):
|
for name, raw_samples in sorted(iter(thread_data.items()), key=lambda x: thread_order[x[0]]):
|
||||||
string_table = StringTable()
|
string_table = StringTable()
|
||||||
tid += 1
|
tid += 1
|
||||||
|
|
||||||
|
@ -77,13 +77,13 @@ for (name, raw_samples) in sorted(iter(thread_data.items()), key=lambda x: threa
|
||||||
for sample in raw_samples:
|
for sample in raw_samples:
|
||||||
prefix = None
|
prefix = None
|
||||||
for frame in sample[1]:
|
for frame in sample[1]:
|
||||||
if not frame['name']:
|
if not frame["name"]:
|
||||||
continue
|
continue
|
||||||
if frame['name'] not in frameMap:
|
if frame["name"] not in frameMap:
|
||||||
frameMap[frame['name']] = len(frames)
|
frameMap[frame["name"]] = len(frames)
|
||||||
frame_index = string_table.get(frame['name'])
|
frame_index = string_table.get(frame["name"])
|
||||||
frames.append([frame_index])
|
frames.append([frame_index])
|
||||||
frame = frameMap[frame['name']]
|
frame = frameMap[frame["name"]]
|
||||||
|
|
||||||
stack_key = "%d,%d" % (frame, prefix) if prefix else str(frame)
|
stack_key = "%d,%d" % (frame, prefix) if prefix else str(frame)
|
||||||
if stack_key not in stackMap:
|
if stack_key not in stackMap:
|
||||||
|
@ -93,61 +93,63 @@ for (name, raw_samples) in sorted(iter(thread_data.items()), key=lambda x: threa
|
||||||
prefix = stack
|
prefix = stack
|
||||||
samples.append([stack, sample[0]])
|
samples.append([stack, sample[0]])
|
||||||
|
|
||||||
threads.append({
|
threads.append(
|
||||||
'tid': tid,
|
{
|
||||||
'name': name,
|
"tid": tid,
|
||||||
'markers': {
|
"name": name,
|
||||||
'schema': {
|
"markers": {
|
||||||
'name': 0,
|
"schema": {
|
||||||
'time': 1,
|
"name": 0,
|
||||||
'data': 2,
|
"time": 1,
|
||||||
|
"data": 2,
|
||||||
},
|
},
|
||||||
'data': [],
|
"data": [],
|
||||||
},
|
},
|
||||||
'samples': {
|
"samples": {
|
||||||
'schema': {
|
"schema": {
|
||||||
'stack': 0,
|
"stack": 0,
|
||||||
'time': 1,
|
"time": 1,
|
||||||
'responsiveness': 2,
|
"responsiveness": 2,
|
||||||
'rss': 2,
|
"rss": 2,
|
||||||
'uss': 4,
|
"uss": 4,
|
||||||
'frameNumber': 5,
|
"frameNumber": 5,
|
||||||
},
|
},
|
||||||
'data': samples,
|
"data": samples,
|
||||||
},
|
},
|
||||||
'frameTable': {
|
"frameTable": {
|
||||||
'schema': {
|
"schema": {
|
||||||
'location': 0,
|
"location": 0,
|
||||||
'implementation': 1,
|
"implementation": 1,
|
||||||
'optimizations': 2,
|
"optimizations": 2,
|
||||||
'line': 3,
|
"line": 3,
|
||||||
'category': 4,
|
"category": 4,
|
||||||
},
|
},
|
||||||
'data': frames,
|
"data": frames,
|
||||||
},
|
},
|
||||||
'stackTable': {
|
"stackTable": {
|
||||||
'schema': {
|
"schema": {
|
||||||
'frame': 0,
|
"frame": 0,
|
||||||
'prefix': 1,
|
"prefix": 1,
|
||||||
},
|
},
|
||||||
'data': stacks,
|
"data": stacks,
|
||||||
},
|
},
|
||||||
'stringTable': string_table.contents(),
|
"stringTable": string_table.contents(),
|
||||||
})
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
output = {
|
output = {
|
||||||
'meta': {
|
"meta": {
|
||||||
'interval': rate,
|
"interval": rate,
|
||||||
'processType': 0,
|
"processType": 0,
|
||||||
'product': 'Servo',
|
"product": "Servo",
|
||||||
'stackwalk': 1,
|
"stackwalk": 1,
|
||||||
'startTime': startTime,
|
"startTime": startTime,
|
||||||
'version': 4,
|
"version": 4,
|
||||||
'presymbolicated': True,
|
"presymbolicated": True,
|
||||||
},
|
},
|
||||||
'libs': [],
|
"libs": [],
|
||||||
'threads': threads,
|
"threads": threads,
|
||||||
}
|
}
|
||||||
|
|
||||||
print(json.dumps(output))
|
print(json.dumps(output))
|
||||||
|
|
|
@ -27,8 +27,10 @@ def main(avd_name, apk_path, *args):
|
||||||
"-no-window",
|
"-no-window",
|
||||||
"-no-snapshot",
|
"-no-snapshot",
|
||||||
"-no-snapstorage",
|
"-no-snapstorage",
|
||||||
"-gpu", "guest",
|
"-gpu",
|
||||||
"-port", emulator_port,
|
"guest",
|
||||||
|
"-port",
|
||||||
|
emulator_port,
|
||||||
]
|
]
|
||||||
with terminate_on_exit(emulator_args, stdout=sys.stderr) as emulator_process:
|
with terminate_on_exit(emulator_args, stdout=sys.stderr) as emulator_process:
|
||||||
# This is hopefully enough time for the emulator to exit
|
# This is hopefully enough time for the emulator to exit
|
||||||
|
@ -70,7 +72,6 @@ def main(avd_name, apk_path, *args):
|
||||||
"*:S", # Hide everything else
|
"*:S", # Hide everything else
|
||||||
]
|
]
|
||||||
with terminate_on_exit(adb + ["logcat"] + logcat_args) as logcat:
|
with terminate_on_exit(adb + ["logcat"] + logcat_args) as logcat:
|
||||||
|
|
||||||
# This step needs to happen after application start
|
# This step needs to happen after application start
|
||||||
forward_webdriver(adb, args)
|
forward_webdriver(adb, args)
|
||||||
|
|
||||||
|
@ -84,8 +85,7 @@ def tool_path(directory, bin_name):
|
||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
return path
|
return path
|
||||||
|
|
||||||
path = os.path.join(os.path.dirname(__file__), "..", "android-toolchains", "sdk",
|
path = os.path.join(os.path.dirname(__file__), "..", "android-toolchains", "sdk", directory, bin_name)
|
||||||
directory, bin_name)
|
|
||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
return path
|
return path
|
||||||
|
|
||||||
|
@ -207,8 +207,7 @@ def interrupt(_signum, _frame):
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
if len(sys.argv) < 3:
|
if len(sys.argv) < 3:
|
||||||
print("Usage: %s avd_name apk_path [servo args...]" % sys.argv[0])
|
print("Usage: %s avd_name apk_path [servo args...]" % sys.argv[0])
|
||||||
print("Example: %s servo-x86 target/i686-linux-android/release/servo.apk https://servo.org"
|
print("Example: %s servo-x86 target/i686-linux-android/release/servo.apk https://servo.org" % sys.argv[0])
|
||||||
% sys.argv[0])
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -29,16 +29,16 @@ import getopt
|
||||||
|
|
||||||
|
|
||||||
def print_help():
|
def print_help():
|
||||||
|
print("\nPlease enter the command as shown below: \n")
|
||||||
print('\nPlease enter the command as shown below: \n')
|
print(
|
||||||
print('python3 ./etc/servo_automation_screenshot.py -p <port>'
|
"python3 ./etc/servo_automation_screenshot.py -p <port>"
|
||||||
+ ' -i /path/to/folder/containing/files -r <resolution>'
|
+ " -i /path/to/folder/containing/files -r <resolution>"
|
||||||
+ ' -n num_of_files\n')
|
+ " -n num_of_files\n"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def servo_ready_to_accept(url, payload, headers):
|
def servo_ready_to_accept(url, payload, headers):
|
||||||
while (True):
|
while True:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Before sending an additional request, we wait for one second each time
|
# Before sending an additional request, we wait for one second each time
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
@ -48,45 +48,46 @@ def servo_ready_to_accept(url, payload, headers):
|
||||||
break
|
break
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
print('Exception: ', e)
|
print("Exception: ", e)
|
||||||
return json_string
|
return json_string
|
||||||
|
|
||||||
|
|
||||||
def ensure_screenshots_directory_exists():
|
def ensure_screenshots_directory_exists():
|
||||||
if not os.path.exists('screenshots'):
|
if not os.path.exists("screenshots"):
|
||||||
os.makedirs('screenshots')
|
os.makedirs("screenshots")
|
||||||
|
|
||||||
|
|
||||||
def render_html_files(num_of_files, url, file_url, json_string, headers, cwd):
|
def render_html_files(num_of_files, url, file_url, json_string, headers, cwd):
|
||||||
for x in range(num_of_files):
|
for x in range(num_of_files):
|
||||||
|
|
||||||
json_data = {}
|
json_data = {}
|
||||||
json_data['url'] = 'file://{0}file{1}.html'.format(file_url, str(x))
|
json_data["url"] = "file://{0}file{1}.html".format(file_url, str(x))
|
||||||
print(json_data['url'])
|
print(json_data["url"])
|
||||||
json_data = json.dumps(json_data)
|
json_data = json.dumps(json_data)
|
||||||
requests.post('{0}/{1}/url'.format(url, json_string['value']['sessionId']), data=json_data, headers=headers)
|
requests.post("{0}/{1}/url".format(url, json_string["value"]["sessionId"]), data=json_data, headers=headers)
|
||||||
screenshot_request = requests.get('{0}/{1}/screenshot'.format(url, json_string['value']['sessionId']))
|
screenshot_request = requests.get("{0}/{1}/screenshot".format(url, json_string["value"]["sessionId"]))
|
||||||
image_data_encoded = screenshot_request.json()['value']
|
image_data_encoded = screenshot_request.json()["value"]
|
||||||
with open("screenshots/output_image_{0}.png".format(str(x)), "wb") as image_file:
|
with open("screenshots/output_image_{0}.png".format(str(x)), "wb") as image_file:
|
||||||
image_file.write(base64.decodebytes(image_data_encoded.encode('utf-8')))
|
image_file.write(base64.decodebytes(image_data_encoded.encode("utf-8")))
|
||||||
print("################################")
|
print("################################")
|
||||||
print("The screenshot is stored in the location: {0}/screenshots/"
|
print(
|
||||||
" with filename: output_image_{1}.png".format(cwd, str(x)))
|
"The screenshot is stored in the location: {0}/screenshots/ with filename: output_image_{1}.png".format(
|
||||||
|
cwd, str(x)
|
||||||
|
)
|
||||||
|
)
|
||||||
print("################################")
|
print("################################")
|
||||||
|
|
||||||
|
|
||||||
def main(argv): # take inputs from command line by considering the options parameter i.e -h, -p etc.
|
def main(argv): # take inputs from command line by considering the options parameter i.e -h, -p etc.
|
||||||
|
|
||||||
# Local Variables
|
# Local Variables
|
||||||
port = ''
|
port = ""
|
||||||
resolution = ''
|
resolution = ""
|
||||||
file_url = ''
|
file_url = ""
|
||||||
num_of_files = ''
|
num_of_files = ""
|
||||||
cwd = os.getcwd()
|
cwd = os.getcwd()
|
||||||
url = ''
|
url = ""
|
||||||
payload = "{}"
|
payload = "{}"
|
||||||
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
|
headers = {"content-type": "application/json", "Accept-Charset": "UTF-8"}
|
||||||
json_string = ''
|
json_string = ""
|
||||||
try:
|
try:
|
||||||
# input options defined here.
|
# input options defined here.
|
||||||
opts, args = getopt.getopt(argv, "p:i:r:n:", ["port=", "ifile=", "resolution=", "num-files="])
|
opts, args = getopt.getopt(argv, "p:i:r:n:", ["port=", "ifile=", "resolution=", "num-files="])
|
||||||
|
@ -96,7 +97,7 @@ def main(argv): # take inputs from command line by considering the options para
|
||||||
print_help()
|
print_help()
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
for opt, arg in opts:
|
for opt, arg in opts:
|
||||||
if opt == '-h': # -h means help. Displays how to input command line arguments
|
if opt == "-h": # -h means help. Displays how to input command line arguments
|
||||||
print_help()
|
print_help()
|
||||||
sys.exit()
|
sys.exit()
|
||||||
elif opt in ("-p", "--port"): # store the value provided with the option -p in port variable.
|
elif opt in ("-p", "--port"): # store the value provided with the option -p in port variable.
|
||||||
|
@ -108,7 +109,7 @@ def main(argv): # take inputs from command line by considering the options para
|
||||||
elif opt in ("-n", "--num-files"): # store the value provided with the option -n in num_of_files variable.
|
elif opt in ("-n", "--num-files"): # store the value provided with the option -n in num_of_files variable.
|
||||||
num_of_files = arg
|
num_of_files = arg
|
||||||
|
|
||||||
url = 'http://localhost:{0}/session'.format(port)
|
url = "http://localhost:{0}/session".format(port)
|
||||||
num_of_files = int(num_of_files)
|
num_of_files = int(num_of_files)
|
||||||
|
|
||||||
# Starting servo on specified port
|
# Starting servo on specified port
|
||||||
|
|
|
@ -68,7 +68,7 @@ class TrustedNodeAddressPrinter:
|
||||||
def children(self):
|
def children(self):
|
||||||
node_type = gdb.lookup_type("struct script::dom::node::Node").pointer()
|
node_type = gdb.lookup_type("struct script::dom::node::Node").pointer()
|
||||||
value = self.val.cast(node_type)
|
value = self.val.cast(node_type)
|
||||||
return [('Node', value)]
|
return [("Node", value)]
|
||||||
|
|
||||||
def to_string(self):
|
def to_string(self):
|
||||||
return self.val.address
|
return self.val.address
|
||||||
|
@ -83,7 +83,7 @@ class NodeTypeIdPrinter:
|
||||||
u8_ptr_type = gdb.lookup_type("u8").pointer()
|
u8_ptr_type = gdb.lookup_type("u8").pointer()
|
||||||
enum_0 = self.val.address.cast(u8_ptr_type).dereference()
|
enum_0 = self.val.address.cast(u8_ptr_type).dereference()
|
||||||
enum_type = self.val.type.fields()[int(enum_0)].type
|
enum_type = self.val.type.fields()[int(enum_0)].type
|
||||||
return str(enum_type).lstrip('struct ')
|
return str(enum_type).lstrip("struct ")
|
||||||
|
|
||||||
|
|
||||||
# Printer for std::Option<>
|
# Printer for std::Option<>
|
||||||
|
@ -113,8 +113,8 @@ class OptionPrinter:
|
||||||
value_type = option_type.fields()[1].type.fields()[1].type
|
value_type = option_type.fields()[1].type.fields()[1].type
|
||||||
v_size = value_type.sizeof
|
v_size = value_type.sizeof
|
||||||
data_ptr = (ptr + t_size - v_size).cast(value_type.pointer()).dereference()
|
data_ptr = (ptr + t_size - v_size).cast(value_type.pointer()).dereference()
|
||||||
return [('Some', data_ptr)]
|
return [("Some", data_ptr)]
|
||||||
return [('None', None)]
|
return [("None", None)]
|
||||||
|
|
||||||
def to_string(self):
|
def to_string(self):
|
||||||
return None
|
return None
|
||||||
|
@ -130,19 +130,19 @@ class TestPrinter:
|
||||||
|
|
||||||
|
|
||||||
type_map = [
|
type_map = [
|
||||||
('struct Au', AuPrinter),
|
("struct Au", AuPrinter),
|
||||||
('FlowFlags', BitFieldU8Printer),
|
("FlowFlags", BitFieldU8Printer),
|
||||||
('IntrinsicWidths', ChildPrinter),
|
("IntrinsicWidths", ChildPrinter),
|
||||||
('PlacementInfo', ChildPrinter),
|
("PlacementInfo", ChildPrinter),
|
||||||
('TrustedNodeAddress', TrustedNodeAddressPrinter),
|
("TrustedNodeAddress", TrustedNodeAddressPrinter),
|
||||||
('NodeTypeId', NodeTypeIdPrinter),
|
("NodeTypeId", NodeTypeIdPrinter),
|
||||||
('Option', OptionPrinter),
|
("Option", OptionPrinter),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def lookup_servo_type(val):
|
def lookup_servo_type(val):
|
||||||
val_type = str(val.type)
|
val_type = str(val.type)
|
||||||
for (type_name, printer) in type_map:
|
for type_name, printer in type_map:
|
||||||
if val_type == type_name or val_type.endswith("::" + type_name):
|
if val_type == type_name or val_type.endswith("::" + type_name):
|
||||||
return printer(val)
|
return printer(val)
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -12,13 +12,13 @@ Created on Mon Mar 26 20:08:25 2018
|
||||||
@author: Pranshu Sinha, Abhay Soni, Aayushi Agrawal
|
@author: Pranshu Sinha, Abhay Soni, Aayushi Agrawal
|
||||||
The script is intended to start servo on localhost:7002
|
The script is intended to start servo on localhost:7002
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
def start_servo(port, resolution):
|
def start_servo(port, resolution):
|
||||||
|
|
||||||
# Use the below command if you are running this script on windows
|
# Use the below command if you are running this script on windows
|
||||||
# cmds = 'mach.bat run --webdriver ' + port + ' --window-size ' + resolution
|
# cmds = 'mach.bat run --webdriver ' + port + ' --window-size ' + resolution
|
||||||
cmds = './mach run --webdriver=' + port + ' --window-size ' + resolution
|
cmds = "./mach run --webdriver=" + port + " --window-size " + resolution
|
||||||
process = subprocess.Popen(cmds, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
process = subprocess.Popen(cmds, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
return process
|
return process
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
|
|
||||||
full_search = len(sys.argv) > 3 and sys.argv[3] == '--full'
|
full_search = len(sys.argv) > 3 and sys.argv[3] == "--full"
|
||||||
|
|
||||||
with open(sys.argv[1]) as f:
|
with open(sys.argv[1]) as f:
|
||||||
data = f.readlines()
|
data = f.readlines()
|
||||||
|
@ -34,13 +34,9 @@ with open(sys.argv[1]) as f:
|
||||||
if "action" in entry and entry["action"] == "test_end":
|
if "action" in entry and entry["action"] == "test_end":
|
||||||
thread = None
|
thread = None
|
||||||
else:
|
else:
|
||||||
if ("action" in entry
|
if "action" in entry and entry["action"] == "test_start" and entry["test"] == sys.argv[2]:
|
||||||
and entry["action"] == "test_start"
|
|
||||||
and entry["test"] == sys.argv[2]):
|
|
||||||
thread = entry["thread"]
|
thread = entry["thread"]
|
||||||
print(json.dumps(entry))
|
print(json.dumps(entry))
|
||||||
elif (full_search
|
elif full_search and "command" in entry and sys.argv[2] in entry["command"]:
|
||||||
and "command" in entry
|
|
||||||
and sys.argv[2] in entry["command"]):
|
|
||||||
thread = entry["thread"]
|
thread = entry["thread"]
|
||||||
print(json.dumps(entry))
|
print(json.dumps(entry))
|
||||||
|
|
|
@ -45,9 +45,7 @@ def process_log(data):
|
||||||
elif entry["action"] == "test_end":
|
elif entry["action"] == "test_end":
|
||||||
test = tests[entry["test"]]
|
test = tests[entry["test"]]
|
||||||
test["end"] = int(entry["time"])
|
test["end"] = int(entry["time"])
|
||||||
test_results[entry["status"]] += [
|
test_results[entry["status"]] += [(entry["test"], test["end"] - test["start"])]
|
||||||
(entry["test"], test["end"] - test["start"])
|
|
||||||
]
|
|
||||||
|
|
||||||
return test_results
|
return test_results
|
||||||
|
|
||||||
|
@ -73,24 +71,18 @@ print("%d tests timed out." % len(test_results["TIMEOUT"]))
|
||||||
longest_crash = sorted(test_results["CRASH"], key=lambda x: x[1], reverse=True)
|
longest_crash = sorted(test_results["CRASH"], key=lambda x: x[1], reverse=True)
|
||||||
print("Longest CRASH test took %dms (%s)" % (longest_crash[0][1], longest_crash[0][0]))
|
print("Longest CRASH test took %dms (%s)" % (longest_crash[0][1], longest_crash[0][0]))
|
||||||
|
|
||||||
longest_ok = sorted(
|
longest_ok = sorted(test_results["PASS"] + test_results["OK"], key=lambda x: x[1], reverse=True)
|
||||||
test_results["PASS"] + test_results["OK"],
|
csv_data = [["Test path", "Milliseconds"]]
|
||||||
key=lambda x: x[1], reverse=True
|
with open("longest_ok.csv", "w") as csv_file:
|
||||||
)
|
|
||||||
csv_data = [['Test path', 'Milliseconds']]
|
|
||||||
with open('longest_ok.csv', 'w') as csv_file:
|
|
||||||
writer = csv.writer(csv_file)
|
writer = csv.writer(csv_file)
|
||||||
writer.writerows(csv_data + longest_ok)
|
writer.writerows(csv_data + longest_ok)
|
||||||
|
|
||||||
longest_fail = sorted(
|
longest_fail = sorted(test_results["ERROR"] + test_results["FAIL"], key=lambda x: x[1], reverse=True)
|
||||||
test_results["ERROR"] + test_results["FAIL"],
|
with open("longest_err.csv", "w") as csv_file:
|
||||||
key=lambda x: x[1], reverse=True
|
|
||||||
)
|
|
||||||
with open('longest_err.csv', 'w') as csv_file:
|
|
||||||
writer = csv.writer(csv_file)
|
writer = csv.writer(csv_file)
|
||||||
writer.writerows(csv_data + longest_fail)
|
writer.writerows(csv_data + longest_fail)
|
||||||
|
|
||||||
longest_timeout = sorted(test_results["TIMEOUT"], key=lambda x: x[1], reverse=True)
|
longest_timeout = sorted(test_results["TIMEOUT"], key=lambda x: x[1], reverse=True)
|
||||||
with open('timeouts.csv', 'w') as csv_file:
|
with open("timeouts.csv", "w") as csv_file:
|
||||||
writer = csv.writer(csv_file)
|
writer = csv.writer(csv_file)
|
||||||
writer.writerows(csv_data + longest_timeout)
|
writer.writerows(csv_data + longest_timeout)
|
||||||
|
|
|
@ -20,8 +20,8 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
|
||||||
test_root = os.path.join('tests', 'wpt', 'tests')
|
test_root = os.path.join("tests", "wpt", "tests")
|
||||||
meta_root = os.path.join('tests', 'wpt', 'meta')
|
meta_root = os.path.join("tests", "wpt", "meta")
|
||||||
|
|
||||||
test_counts = {}
|
test_counts = {}
|
||||||
meta_counts = {}
|
meta_counts = {}
|
||||||
|
@ -35,7 +35,7 @@ for base_dir, dir_names, files in os.walk(test_root):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
test_files = []
|
test_files = []
|
||||||
exts = ['.html', '.htm', '.xht', '.xhtml', '.window.js', '.worker.js', '.any.js']
|
exts = [".html", ".htm", ".xht", ".xhtml", ".window.js", ".worker.js", ".any.js"]
|
||||||
for f in files:
|
for f in files:
|
||||||
for ext in exts:
|
for ext in exts:
|
||||||
if f.endswith(ext):
|
if f.endswith(ext):
|
||||||
|
@ -48,21 +48,21 @@ for base_dir, dir_names, files in os.walk(meta_root):
|
||||||
|
|
||||||
rel_base = os.path.relpath(base_dir, meta_root)
|
rel_base = os.path.relpath(base_dir, meta_root)
|
||||||
num_files = len(files)
|
num_files = len(files)
|
||||||
if '__dir__.ini' in files:
|
if "__dir__.ini" in files:
|
||||||
num_files -= 1
|
num_files -= 1
|
||||||
meta_counts[rel_base] = num_files
|
meta_counts[rel_base] = num_files
|
||||||
|
|
||||||
final_counts = []
|
final_counts = []
|
||||||
for (test_dir, test_count) in test_counts.items():
|
for test_dir, test_count in test_counts.items():
|
||||||
if not test_count:
|
if not test_count:
|
||||||
continue
|
continue
|
||||||
meta_count = meta_counts.get(test_dir, 0)
|
meta_count = meta_counts.get(test_dir, 0)
|
||||||
final_counts += [(test_dir, test_count, meta_count)]
|
final_counts += [(test_dir, test_count, meta_count)]
|
||||||
|
|
||||||
print('Test counts')
|
print("Test counts")
|
||||||
print('dir: %% failed (num tests / num failures)')
|
print("dir: %% failed (num tests / num failures)")
|
||||||
s = sorted(final_counts, key=lambda x: x[2] / x[1])
|
s = sorted(final_counts, key=lambda x: x[2] / x[1])
|
||||||
for (test_dir, test_count, meta_count) in reversed(sorted(s, key=lambda x: x[2])):
|
for test_dir, test_count, meta_count in reversed(sorted(s, key=lambda x: x[2])):
|
||||||
if not meta_count:
|
if not meta_count:
|
||||||
continue
|
continue
|
||||||
print('%s: %.2f%% (%d / %d)' % (test_dir, meta_count / test_count * 100, test_count, meta_count))
|
print("%s: %.2f%% (%d / %d)" % (test_dir, meta_count / test_count * 100, test_count, meta_count))
|
||||||
|
|
|
@ -22,61 +22,61 @@ SEARCH_PATHS = [
|
||||||
|
|
||||||
# Individual files providing mach commands.
|
# Individual files providing mach commands.
|
||||||
MACH_MODULES = [
|
MACH_MODULES = [
|
||||||
os.path.join('python', 'servo', 'bootstrap_commands.py'),
|
os.path.join("python", "servo", "bootstrap_commands.py"),
|
||||||
os.path.join('python', 'servo', 'build_commands.py'),
|
os.path.join("python", "servo", "build_commands.py"),
|
||||||
os.path.join('python', 'servo', 'testing_commands.py'),
|
os.path.join("python", "servo", "testing_commands.py"),
|
||||||
os.path.join('python', 'servo', 'post_build_commands.py'),
|
os.path.join("python", "servo", "post_build_commands.py"),
|
||||||
os.path.join('python', 'servo', 'package_commands.py'),
|
os.path.join("python", "servo", "package_commands.py"),
|
||||||
os.path.join('python', 'servo', 'devenv_commands.py'),
|
os.path.join("python", "servo", "devenv_commands.py"),
|
||||||
]
|
]
|
||||||
|
|
||||||
CATEGORIES = {
|
CATEGORIES = {
|
||||||
'bootstrap': {
|
"bootstrap": {
|
||||||
'short': 'Bootstrap Commands',
|
"short": "Bootstrap Commands",
|
||||||
'long': 'Bootstrap the build system',
|
"long": "Bootstrap the build system",
|
||||||
'priority': 90,
|
"priority": 90,
|
||||||
},
|
},
|
||||||
'build': {
|
"build": {
|
||||||
'short': 'Build Commands',
|
"short": "Build Commands",
|
||||||
'long': 'Interact with the build system',
|
"long": "Interact with the build system",
|
||||||
'priority': 80,
|
"priority": 80,
|
||||||
},
|
},
|
||||||
'post-build': {
|
"post-build": {
|
||||||
'short': 'Post-build Commands',
|
"short": "Post-build Commands",
|
||||||
'long': 'Common actions performed after completing a build.',
|
"long": "Common actions performed after completing a build.",
|
||||||
'priority': 70,
|
"priority": 70,
|
||||||
},
|
},
|
||||||
'testing': {
|
"testing": {
|
||||||
'short': 'Testing',
|
"short": "Testing",
|
||||||
'long': 'Run tests.',
|
"long": "Run tests.",
|
||||||
'priority': 60,
|
"priority": 60,
|
||||||
},
|
},
|
||||||
'devenv': {
|
"devenv": {
|
||||||
'short': 'Development Environment',
|
"short": "Development Environment",
|
||||||
'long': 'Set up and configure your development environment.',
|
"long": "Set up and configure your development environment.",
|
||||||
'priority': 50,
|
"priority": 50,
|
||||||
},
|
},
|
||||||
'build-dev': {
|
"build-dev": {
|
||||||
'short': 'Low-level Build System Interaction',
|
"short": "Low-level Build System Interaction",
|
||||||
'long': 'Interact with specific parts of the build system.',
|
"long": "Interact with specific parts of the build system.",
|
||||||
'priority': 20,
|
"priority": 20,
|
||||||
},
|
},
|
||||||
'package': {
|
"package": {
|
||||||
'short': 'Package',
|
"short": "Package",
|
||||||
'long': 'Create objects to distribute',
|
"long": "Create objects to distribute",
|
||||||
'priority': 15,
|
"priority": 15,
|
||||||
},
|
},
|
||||||
'misc': {
|
"misc": {
|
||||||
'short': 'Potpourri',
|
"short": "Potpourri",
|
||||||
'long': 'Potent potables and assorted snacks.',
|
"long": "Potent potables and assorted snacks.",
|
||||||
'priority': 10,
|
"priority": 10,
|
||||||
},
|
},
|
||||||
'disabled': {
|
"disabled": {
|
||||||
'short': 'Disabled',
|
"short": "Disabled",
|
||||||
'long': 'The disabled commands are hidden by default. Use -v to display them. These commands are unavailable '
|
"long": "The disabled commands are hidden by default. Use -v to display them. These commands are unavailable "
|
||||||
'for your current context, run "mach <command>" to see why.',
|
'for your current context, run "mach <command>" to see why.',
|
||||||
'priority': 0,
|
"priority": 0,
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -92,17 +92,25 @@ def _process_exec(args, cwd):
|
||||||
def install_virtual_env_requirements(project_path: str, marker_path: str):
|
def install_virtual_env_requirements(project_path: str, marker_path: str):
|
||||||
requirements_paths = [
|
requirements_paths = [
|
||||||
os.path.join(project_path, "python", "requirements.txt"),
|
os.path.join(project_path, "python", "requirements.txt"),
|
||||||
os.path.join(project_path, WPT_TOOLS_PATH, "requirements_tests.txt",),
|
os.path.join(
|
||||||
os.path.join(project_path, WPT_RUNNER_PATH, "requirements.txt",),
|
project_path,
|
||||||
|
WPT_TOOLS_PATH,
|
||||||
|
"requirements_tests.txt",
|
||||||
|
),
|
||||||
|
os.path.join(
|
||||||
|
project_path,
|
||||||
|
WPT_RUNNER_PATH,
|
||||||
|
"requirements.txt",
|
||||||
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
requirements_hasher = hashlib.sha256()
|
requirements_hasher = hashlib.sha256()
|
||||||
for path in requirements_paths:
|
for path in requirements_paths:
|
||||||
with open(path, 'rb') as file:
|
with open(path, "rb") as file:
|
||||||
requirements_hasher.update(file.read())
|
requirements_hasher.update(file.read())
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(marker_path, 'r') as marker_file:
|
with open(marker_path, "r") as marker_file:
|
||||||
marker_hash = marker_file.read()
|
marker_hash = marker_file.read()
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
marker_hash = None
|
marker_hash = None
|
||||||
|
@ -132,27 +140,28 @@ def _activate_virtualenv(topdir):
|
||||||
_process_exec(["uv", "venv"], cwd=topdir)
|
_process_exec(["uv", "venv"], cwd=topdir)
|
||||||
|
|
||||||
script_dir = "Scripts" if _is_windows() else "bin"
|
script_dir = "Scripts" if _is_windows() else "bin"
|
||||||
runpy.run_path(os.path.join(virtualenv_path, script_dir, 'activate_this.py'))
|
runpy.run_path(os.path.join(virtualenv_path, script_dir, "activate_this.py"))
|
||||||
|
|
||||||
install_virtual_env_requirements(topdir, marker_path)
|
install_virtual_env_requirements(topdir, marker_path)
|
||||||
|
|
||||||
# Turn off warnings about deprecated syntax in our indirect dependencies.
|
# Turn off warnings about deprecated syntax in our indirect dependencies.
|
||||||
# TODO: Find a better approach for doing this.
|
# TODO: Find a better approach for doing this.
|
||||||
import warnings
|
import warnings
|
||||||
warnings.filterwarnings('ignore', category=SyntaxWarning, module=r'.*.venv')
|
|
||||||
|
warnings.filterwarnings("ignore", category=SyntaxWarning, module=r".*.venv")
|
||||||
|
|
||||||
|
|
||||||
def _ensure_case_insensitive_if_windows():
|
def _ensure_case_insensitive_if_windows():
|
||||||
# The folder is called 'python'. By deliberately checking for it with the wrong case, we determine if the file
|
# The folder is called 'python'. By deliberately checking for it with the wrong case, we determine if the file
|
||||||
# system is case sensitive or not.
|
# system is case sensitive or not.
|
||||||
if _is_windows() and not os.path.exists('Python'):
|
if _is_windows() and not os.path.exists("Python"):
|
||||||
print('Cannot run mach in a path on a case-sensitive file system on Windows.')
|
print("Cannot run mach in a path on a case-sensitive file system on Windows.")
|
||||||
print('For more details, see https://github.com/pypa/virtualenv/issues/935')
|
print("For more details, see https://github.com/pypa/virtualenv/issues/935")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def _is_windows():
|
def _is_windows():
|
||||||
return sys.platform == 'win32'
|
return sys.platform == "win32"
|
||||||
|
|
||||||
|
|
||||||
def bootstrap_command_only(topdir):
|
def bootstrap_command_only(topdir):
|
||||||
|
@ -168,9 +177,9 @@ def bootstrap_command_only(topdir):
|
||||||
import servo.util
|
import servo.util
|
||||||
|
|
||||||
try:
|
try:
|
||||||
force = '-f' in sys.argv or '--force' in sys.argv
|
force = "-f" in sys.argv or "--force" in sys.argv
|
||||||
skip_platform = '--skip-platform' in sys.argv
|
skip_platform = "--skip-platform" in sys.argv
|
||||||
skip_lints = '--skip-lints' in sys.argv
|
skip_lints = "--skip-lints" in sys.argv
|
||||||
servo.platform.get().bootstrap(force, skip_platform, skip_lints)
|
servo.platform.get().bootstrap(force, skip_platform, skip_lints)
|
||||||
except NotImplementedError as exception:
|
except NotImplementedError as exception:
|
||||||
print(exception)
|
print(exception)
|
||||||
|
@ -186,9 +195,9 @@ def bootstrap(topdir):
|
||||||
|
|
||||||
# We don't support paths with spaces for now
|
# We don't support paths with spaces for now
|
||||||
# https://github.com/servo/servo/issues/9616
|
# https://github.com/servo/servo/issues/9616
|
||||||
if ' ' in topdir and (not _is_windows()):
|
if " " in topdir and (not _is_windows()):
|
||||||
print('Cannot run mach in a path with spaces.')
|
print("Cannot run mach in a path with spaces.")
|
||||||
print('Current path:', topdir)
|
print("Current path:", topdir)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
_activate_virtualenv(topdir)
|
_activate_virtualenv(topdir)
|
||||||
|
@ -196,7 +205,7 @@ def bootstrap(topdir):
|
||||||
def populate_context(context, key=None):
|
def populate_context(context, key=None):
|
||||||
if key is None:
|
if key is None:
|
||||||
return
|
return
|
||||||
if key == 'topdir':
|
if key == "topdir":
|
||||||
return topdir
|
return topdir
|
||||||
raise AttributeError(key)
|
raise AttributeError(key)
|
||||||
|
|
||||||
|
@ -204,11 +213,12 @@ def bootstrap(topdir):
|
||||||
sys.path[0:0] = [WPT_PATH, WPT_RUNNER_PATH, WPT_SERVE_PATH]
|
sys.path[0:0] = [WPT_PATH, WPT_RUNNER_PATH, WPT_SERVE_PATH]
|
||||||
|
|
||||||
import mach.main
|
import mach.main
|
||||||
|
|
||||||
mach = mach.main.Mach(os.getcwd())
|
mach = mach.main.Mach(os.getcwd())
|
||||||
mach.populate_context_handler = populate_context
|
mach.populate_context_handler = populate_context
|
||||||
|
|
||||||
for category, meta in CATEGORIES.items():
|
for category, meta in CATEGORIES.items():
|
||||||
mach.define_category(category, meta['short'], meta['long'], meta['priority'])
|
mach.define_category(category, meta["short"], meta["long"], meta["priority"])
|
||||||
|
|
||||||
for path in MACH_MODULES:
|
for path in MACH_MODULES:
|
||||||
# explicitly provide a module name
|
# explicitly provide a module name
|
||||||
|
|
|
@ -36,18 +36,10 @@ from servo.util import delete, download_bytes
|
||||||
|
|
||||||
@CommandProvider
|
@CommandProvider
|
||||||
class MachCommands(CommandBase):
|
class MachCommands(CommandBase):
|
||||||
@Command('bootstrap',
|
@Command("bootstrap", description="Install required packages for building.", category="bootstrap")
|
||||||
description='Install required packages for building.',
|
@CommandArgument("--force", "-f", action="store_true", help="Boostrap without confirmation")
|
||||||
category='bootstrap')
|
@CommandArgument("--skip-platform", action="store_true", help="Skip platform bootstrapping.")
|
||||||
@CommandArgument('--force', '-f',
|
@CommandArgument("--skip-lints", action="store_true", help="Skip tool necessary for linting.")
|
||||||
action='store_true',
|
|
||||||
help='Boostrap without confirmation')
|
|
||||||
@CommandArgument('--skip-platform',
|
|
||||||
action='store_true',
|
|
||||||
help='Skip platform bootstrapping.')
|
|
||||||
@CommandArgument('--skip-lints',
|
|
||||||
action='store_true',
|
|
||||||
help='Skip tool necessary for linting.')
|
|
||||||
def bootstrap(self, force=False, skip_platform=False, skip_lints=False):
|
def bootstrap(self, force=False, skip_platform=False, skip_lints=False):
|
||||||
# Note: This entry point isn't actually invoked by ./mach bootstrap.
|
# Note: This entry point isn't actually invoked by ./mach bootstrap.
|
||||||
# ./mach bootstrap calls mach_bootstrap.bootstrap_command_only so that
|
# ./mach bootstrap calls mach_bootstrap.bootstrap_command_only so that
|
||||||
|
@ -59,12 +51,12 @@ class MachCommands(CommandBase):
|
||||||
return 1
|
return 1
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
@Command('bootstrap-gstreamer',
|
@Command(
|
||||||
description='Set up a local copy of the gstreamer libraries (linux only).',
|
"bootstrap-gstreamer",
|
||||||
category='bootstrap')
|
description="Set up a local copy of the gstreamer libraries (linux only).",
|
||||||
@CommandArgument('--force', '-f',
|
category="bootstrap",
|
||||||
action='store_true',
|
)
|
||||||
help='Boostrap without confirmation')
|
@CommandArgument("--force", "-f", action="store_true", help="Boostrap without confirmation")
|
||||||
def bootstrap_gstreamer(self, force=False):
|
def bootstrap_gstreamer(self, force=False):
|
||||||
try:
|
try:
|
||||||
servo.platform.get().bootstrap_gstreamer(force)
|
servo.platform.get().bootstrap_gstreamer(force)
|
||||||
|
@ -73,15 +65,15 @@ class MachCommands(CommandBase):
|
||||||
return 1
|
return 1
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
@Command('update-hsts-preload',
|
@Command("update-hsts-preload", description="Download the HSTS preload list", category="bootstrap")
|
||||||
description='Download the HSTS preload list',
|
|
||||||
category='bootstrap')
|
|
||||||
def bootstrap_hsts_preload(self, force=False):
|
def bootstrap_hsts_preload(self, force=False):
|
||||||
preload_filename = "hsts_preload.fstmap"
|
preload_filename = "hsts_preload.fstmap"
|
||||||
preload_path = path.join(self.context.topdir, "resources")
|
preload_path = path.join(self.context.topdir, "resources")
|
||||||
|
|
||||||
chromium_hsts_url = "https://chromium.googlesource.com/chromium/src" + \
|
chromium_hsts_url = (
|
||||||
"/+/main/net/http/transport_security_state_static.json?format=TEXT"
|
"https://chromium.googlesource.com/chromium/src"
|
||||||
|
+ "/+/main/net/http/transport_security_state_static.json?format=TEXT"
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
content_base64 = download_bytes("Chromium HSTS preload list", chromium_hsts_url)
|
content_base64 = download_bytes("Chromium HSTS preload list", chromium_hsts_url)
|
||||||
|
@ -93,7 +85,7 @@ class MachCommands(CommandBase):
|
||||||
|
|
||||||
# The chromium "json" has single line comments in it which, of course,
|
# The chromium "json" has single line comments in it which, of course,
|
||||||
# are non-standard/non-valid json. Simply strip them out before parsing
|
# are non-standard/non-valid json. Simply strip them out before parsing
|
||||||
content_json = re.sub(r'(^|\s+)//.*$', '', content_decoded, flags=re.MULTILINE)
|
content_json = re.sub(r"(^|\s+)//.*$", "", content_decoded, flags=re.MULTILINE)
|
||||||
try:
|
try:
|
||||||
pins_and_static_preloads = json.loads(content_json)
|
pins_and_static_preloads = json.loads(content_json)
|
||||||
with tempfile.NamedTemporaryFile(mode="w") as csv_file:
|
with tempfile.NamedTemporaryFile(mode="w") as csv_file:
|
||||||
|
@ -107,13 +99,15 @@ class MachCommands(CommandBase):
|
||||||
print(f"Unable to parse chromium HSTS preload list, has the format changed? \n{e}")
|
print(f"Unable to parse chromium HSTS preload list, has the format changed? \n{e}")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
@Command('update-pub-domains',
|
@Command(
|
||||||
description='Download the public domains list and update resources/public_domains.txt',
|
"update-pub-domains",
|
||||||
category='bootstrap')
|
description="Download the public domains list and update resources/public_domains.txt",
|
||||||
|
category="bootstrap",
|
||||||
|
)
|
||||||
def bootstrap_pub_suffix(self, force=False):
|
def bootstrap_pub_suffix(self, force=False):
|
||||||
list_url = "https://publicsuffix.org/list/public_suffix_list.dat"
|
list_url = "https://publicsuffix.org/list/public_suffix_list.dat"
|
||||||
dst_filename = path.join(self.context.topdir, "resources", "public_domains.txt")
|
dst_filename = path.join(self.context.topdir, "resources", "public_domains.txt")
|
||||||
not_implemented_case = re.compile(r'^[^*]+\*')
|
not_implemented_case = re.compile(r"^[^*]+\*")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
content = download_bytes("Public suffix list", list_url)
|
content = download_bytes("Public suffix list", list_url)
|
||||||
|
@ -130,29 +124,22 @@ class MachCommands(CommandBase):
|
||||||
print("Warning: the new list contains a case that servo can't handle: %s" % suffix)
|
print("Warning: the new list contains a case that servo can't handle: %s" % suffix)
|
||||||
fo.write(suffix.encode("idna") + "\n")
|
fo.write(suffix.encode("idna") + "\n")
|
||||||
|
|
||||||
@Command('clean-nightlies',
|
@Command("clean-nightlies", description="Clean unused nightly builds of Rust and Cargo", category="bootstrap")
|
||||||
description='Clean unused nightly builds of Rust and Cargo',
|
@CommandArgument("--force", "-f", action="store_true", help="Actually remove stuff")
|
||||||
category='bootstrap')
|
@CommandArgument("--keep", default="1", help="Keep up to this many most recent nightlies")
|
||||||
@CommandArgument('--force', '-f',
|
|
||||||
action='store_true',
|
|
||||||
help='Actually remove stuff')
|
|
||||||
@CommandArgument('--keep',
|
|
||||||
default='1',
|
|
||||||
help='Keep up to this many most recent nightlies')
|
|
||||||
def clean_nightlies(self, force=False, keep=None):
|
def clean_nightlies(self, force=False, keep=None):
|
||||||
print(f"Current Rust version for Servo: {self.rust_toolchain()}")
|
print(f"Current Rust version for Servo: {self.rust_toolchain()}")
|
||||||
old_toolchains = []
|
old_toolchains = []
|
||||||
keep = int(keep)
|
keep = int(keep)
|
||||||
stdout = subprocess.check_output(['git', 'log', '--format=%H', 'rust-toolchain.toml'])
|
stdout = subprocess.check_output(["git", "log", "--format=%H", "rust-toolchain.toml"])
|
||||||
for i, commit_hash in enumerate(stdout.split(), 1):
|
for i, commit_hash in enumerate(stdout.split(), 1):
|
||||||
if i > keep:
|
if i > keep:
|
||||||
toolchain_config_text = subprocess.check_output(
|
toolchain_config_text = subprocess.check_output(["git", "show", f"{commit_hash}:rust-toolchain.toml"])
|
||||||
['git', 'show', f'{commit_hash}:rust-toolchain.toml'])
|
toolchain = toml.loads(toolchain_config_text)["toolchain"]["channel"]
|
||||||
toolchain = toml.loads(toolchain_config_text)['toolchain']['channel']
|
|
||||||
old_toolchains.append(toolchain)
|
old_toolchains.append(toolchain)
|
||||||
|
|
||||||
removing_anything = False
|
removing_anything = False
|
||||||
stdout = subprocess.check_output(['rustup', 'toolchain', 'list'])
|
stdout = subprocess.check_output(["rustup", "toolchain", "list"])
|
||||||
for toolchain_with_host in stdout.split():
|
for toolchain_with_host in stdout.split():
|
||||||
for old in old_toolchains:
|
for old in old_toolchains:
|
||||||
if toolchain_with_host.startswith(old):
|
if toolchain_with_host.startswith(old):
|
||||||
|
@ -165,21 +152,12 @@ class MachCommands(CommandBase):
|
||||||
if not removing_anything:
|
if not removing_anything:
|
||||||
print("Nothing to remove.")
|
print("Nothing to remove.")
|
||||||
elif not force:
|
elif not force:
|
||||||
print("Nothing done. "
|
print("Nothing done. Run `./mach clean-nightlies -f` to actually remove.")
|
||||||
"Run `./mach clean-nightlies -f` to actually remove.")
|
|
||||||
|
|
||||||
@Command('clean-cargo-cache',
|
@Command("clean-cargo-cache", description="Clean unused Cargo packages", category="bootstrap")
|
||||||
description='Clean unused Cargo packages',
|
@CommandArgument("--force", "-f", action="store_true", help="Actually remove stuff")
|
||||||
category='bootstrap')
|
@CommandArgument("--show-size", "-s", action="store_true", help="Show packages size")
|
||||||
@CommandArgument('--force', '-f',
|
@CommandArgument("--keep", default="1", help="Keep up to this many most recent dependencies")
|
||||||
action='store_true',
|
|
||||||
help='Actually remove stuff')
|
|
||||||
@CommandArgument('--show-size', '-s',
|
|
||||||
action='store_true',
|
|
||||||
help='Show packages size')
|
|
||||||
@CommandArgument('--keep',
|
|
||||||
default='1',
|
|
||||||
help='Keep up to this many most recent dependencies')
|
|
||||||
def clean_cargo_cache(self, force=False, show_size=False, keep=None):
|
def clean_cargo_cache(self, force=False, show_size=False, keep=None):
|
||||||
def get_size(path):
|
def get_size(path):
|
||||||
if os.path.isfile(path):
|
if os.path.isfile(path):
|
||||||
|
@ -193,10 +171,11 @@ class MachCommands(CommandBase):
|
||||||
|
|
||||||
removing_anything = False
|
removing_anything = False
|
||||||
packages = {
|
packages = {
|
||||||
'crates': {},
|
"crates": {},
|
||||||
'git': {},
|
"git": {},
|
||||||
}
|
}
|
||||||
import toml
|
import toml
|
||||||
|
|
||||||
if os.environ.get("CARGO_HOME", ""):
|
if os.environ.get("CARGO_HOME", ""):
|
||||||
cargo_dir = os.environ.get("CARGO_HOME")
|
cargo_dir = os.environ.get("CARGO_HOME")
|
||||||
else:
|
else:
|
||||||
|
@ -210,7 +189,7 @@ class MachCommands(CommandBase):
|
||||||
for package in content.get("package", []):
|
for package in content.get("package", []):
|
||||||
source = package.get("source", "")
|
source = package.get("source", "")
|
||||||
version = package["version"]
|
version = package["version"]
|
||||||
if source == u"registry+https://github.com/rust-lang/crates.io-index":
|
if source == "registry+https://github.com/rust-lang/crates.io-index":
|
||||||
crate_name = "{}-{}".format(package["name"], version)
|
crate_name = "{}-{}".format(package["name"], version)
|
||||||
if not packages["crates"].get(crate_name, False):
|
if not packages["crates"].get(crate_name, False):
|
||||||
packages["crates"][package["name"]] = {
|
packages["crates"][package["name"]] = {
|
||||||
|
@ -248,7 +227,7 @@ class MachCommands(CommandBase):
|
||||||
git_db_dir = path.join(git_dir, "db")
|
git_db_dir = path.join(git_dir, "db")
|
||||||
git_checkout_dir = path.join(git_dir, "checkouts")
|
git_checkout_dir = path.join(git_dir, "checkouts")
|
||||||
if os.path.isdir(git_db_dir):
|
if os.path.isdir(git_db_dir):
|
||||||
git_db_list = list(filter(lambda f: not f.startswith('.'), os.listdir(git_db_dir)))
|
git_db_list = list(filter(lambda f: not f.startswith("."), os.listdir(git_db_dir)))
|
||||||
else:
|
else:
|
||||||
git_db_list = []
|
git_db_list = []
|
||||||
if os.path.isdir(git_checkout_dir):
|
if os.path.isdir(git_checkout_dir):
|
||||||
|
@ -265,7 +244,7 @@ class MachCommands(CommandBase):
|
||||||
}
|
}
|
||||||
if os.path.isdir(path.join(git_checkout_dir, d)):
|
if os.path.isdir(path.join(git_checkout_dir, d)):
|
||||||
with cd(path.join(git_checkout_dir, d)):
|
with cd(path.join(git_checkout_dir, d)):
|
||||||
git_crate_hash = glob.glob('*')
|
git_crate_hash = glob.glob("*")
|
||||||
if not git_crate_hash or not os.path.isdir(path.join(git_db_dir, d)):
|
if not git_crate_hash or not os.path.isdir(path.join(git_db_dir, d)):
|
||||||
packages["git"][crate_name]["exist"].append(("del", d, ""))
|
packages["git"][crate_name]["exist"].append(("del", d, ""))
|
||||||
continue
|
continue
|
||||||
|
@ -299,8 +278,12 @@ class MachCommands(CommandBase):
|
||||||
exist_item = exist[2] if packages_type == "git" else exist
|
exist_item = exist[2] if packages_type == "git" else exist
|
||||||
if exist_item not in current_crate:
|
if exist_item not in current_crate:
|
||||||
crate_count += 1
|
crate_count += 1
|
||||||
if int(crate_count) >= int(keep) or not current_crate or \
|
if (
|
||||||
exist[0] == "del" or exist[2] == "master":
|
int(crate_count) >= int(keep)
|
||||||
|
or not current_crate
|
||||||
|
or exist[0] == "del"
|
||||||
|
or exist[2] == "master"
|
||||||
|
):
|
||||||
removing_anything = True
|
removing_anything = True
|
||||||
crate_paths = []
|
crate_paths = []
|
||||||
if packages_type == "git":
|
if packages_type == "git":
|
||||||
|
@ -317,7 +300,7 @@ class MachCommands(CommandBase):
|
||||||
else:
|
else:
|
||||||
crate_paths.append(exist_path)
|
crate_paths.append(exist_path)
|
||||||
|
|
||||||
exist_checkout_list = glob.glob(path.join(exist_checkout_path, '*'))
|
exist_checkout_list = glob.glob(path.join(exist_checkout_path, "*"))
|
||||||
if len(exist_checkout_list) <= 1:
|
if len(exist_checkout_list) <= 1:
|
||||||
crate_paths.append(exist_checkout_path)
|
crate_paths.append(exist_checkout_path)
|
||||||
if os.path.isdir(exist_db_path):
|
if os.path.isdir(exist_db_path):
|
||||||
|
@ -347,5 +330,4 @@ class MachCommands(CommandBase):
|
||||||
if not removing_anything:
|
if not removing_anything:
|
||||||
print("Nothing to remove.")
|
print("Nothing to remove.")
|
||||||
elif not force:
|
elif not force:
|
||||||
print("\nNothing done. "
|
print("\nNothing done. Run `./mach clean-cargo-cache -f` to actually remove.")
|
||||||
"Run `./mach clean-cargo-cache -f` to actually remove.")
|
|
||||||
|
|
|
@ -37,8 +37,12 @@ from servo.command_base import BuildType, CommandBase, call, check_call
|
||||||
from servo.gstreamer import windows_dlls, windows_plugins, package_gstreamer_dylibs
|
from servo.gstreamer import windows_dlls, windows_plugins, package_gstreamer_dylibs
|
||||||
from servo.platform.build_target import BuildTarget
|
from servo.platform.build_target import BuildTarget
|
||||||
|
|
||||||
SUPPORTED_ASAN_TARGETS = ["aarch64-apple-darwin", "aarch64-unknown-linux-gnu",
|
SUPPORTED_ASAN_TARGETS = [
|
||||||
"x86_64-apple-darwin", "x86_64-unknown-linux-gnu"]
|
"aarch64-apple-darwin",
|
||||||
|
"aarch64-unknown-linux-gnu",
|
||||||
|
"x86_64-apple-darwin",
|
||||||
|
"x86_64-unknown-linux-gnu",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def get_rustc_llvm_version() -> Optional[List[int]]:
|
def get_rustc_llvm_version() -> Optional[List[int]]:
|
||||||
|
@ -50,14 +54,14 @@ def get_rustc_llvm_version() -> Optional[List[int]]:
|
||||||
be valid in both rustup managed environment and on nix.
|
be valid in both rustup managed environment and on nix.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
result = subprocess.run(['rustc', '--version', '--verbose'], encoding='utf-8', capture_output=True)
|
result = subprocess.run(["rustc", "--version", "--verbose"], encoding="utf-8", capture_output=True)
|
||||||
result.check_returncode()
|
result.check_returncode()
|
||||||
for line in result.stdout.splitlines():
|
for line in result.stdout.splitlines():
|
||||||
line_lowercase = line.lower()
|
line_lowercase = line.lower()
|
||||||
if line_lowercase.startswith("llvm version:"):
|
if line_lowercase.startswith("llvm version:"):
|
||||||
llvm_version = line_lowercase.strip("llvm version:")
|
llvm_version = line_lowercase.strip("llvm version:")
|
||||||
llvm_version = llvm_version.strip()
|
llvm_version = llvm_version.strip()
|
||||||
version = llvm_version.split('.')
|
version = llvm_version.split(".")
|
||||||
print(f"Info: rustc is using LLVM version {'.'.join(version)}")
|
print(f"Info: rustc is using LLVM version {'.'.join(version)}")
|
||||||
return version
|
return version
|
||||||
else:
|
else:
|
||||||
|
@ -69,24 +73,27 @@ def get_rustc_llvm_version() -> Optional[List[int]]:
|
||||||
|
|
||||||
@CommandProvider
|
@CommandProvider
|
||||||
class MachCommands(CommandBase):
|
class MachCommands(CommandBase):
|
||||||
@Command('build', description='Build Servo', category='build')
|
@Command("build", description="Build Servo", category="build")
|
||||||
@CommandArgument('--jobs', '-j',
|
@CommandArgument("--jobs", "-j", default=None, help="Number of jobs to run in parallel")
|
||||||
default=None,
|
@CommandArgument(
|
||||||
help='Number of jobs to run in parallel')
|
"--no-package", action="store_true", help="For Android, disable packaging into a .apk after building"
|
||||||
@CommandArgument('--no-package',
|
)
|
||||||
action='store_true',
|
@CommandArgument("--verbose", "-v", action="store_true", help="Print verbose output")
|
||||||
help='For Android, disable packaging into a .apk after building')
|
@CommandArgument("--very-verbose", "-vv", action="store_true", help="Print very verbose output")
|
||||||
@CommandArgument('--verbose', '-v',
|
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to Cargo")
|
||||||
action='store_true',
|
|
||||||
help='Print verbose output')
|
|
||||||
@CommandArgument('--very-verbose', '-vv',
|
|
||||||
action='store_true',
|
|
||||||
help='Print very verbose output')
|
|
||||||
@CommandArgument('params', nargs='...',
|
|
||||||
help="Command-line arguments to be passed through to Cargo")
|
|
||||||
@CommandBase.common_command_arguments(build_configuration=True, build_type=True, package_configuration=True)
|
@CommandBase.common_command_arguments(build_configuration=True, build_type=True, package_configuration=True)
|
||||||
def build(self, build_type: BuildType, jobs=None, params=None, no_package=False,
|
def build(
|
||||||
verbose=False, very_verbose=False, with_asan=False, flavor=None, **kwargs):
|
self,
|
||||||
|
build_type: BuildType,
|
||||||
|
jobs=None,
|
||||||
|
params=None,
|
||||||
|
no_package=False,
|
||||||
|
verbose=False,
|
||||||
|
very_verbose=False,
|
||||||
|
with_asan=False,
|
||||||
|
flavor=None,
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
opts = params or []
|
opts = params or []
|
||||||
|
|
||||||
if build_type.is_release():
|
if build_type.is_release():
|
||||||
|
@ -112,8 +119,10 @@ class MachCommands(CommandBase):
|
||||||
|
|
||||||
if with_asan:
|
if with_asan:
|
||||||
if target_triple not in SUPPORTED_ASAN_TARGETS:
|
if target_triple not in SUPPORTED_ASAN_TARGETS:
|
||||||
print("AddressSanitizer is currently not supported on this platform\n",
|
print(
|
||||||
"See https://doc.rust-lang.org/beta/unstable-book/compiler-flags/sanitizer.html")
|
"AddressSanitizer is currently not supported on this platform\n",
|
||||||
|
"See https://doc.rust-lang.org/beta/unstable-book/compiler-flags/sanitizer.html",
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# do not use crown (clashes with different rust version)
|
# do not use crown (clashes with different rust version)
|
||||||
|
@ -157,12 +166,14 @@ class MachCommands(CommandBase):
|
||||||
|
|
||||||
build_start = time()
|
build_start = time()
|
||||||
|
|
||||||
if host != target_triple and 'windows' in target_triple:
|
if host != target_triple and "windows" in target_triple:
|
||||||
if os.environ.get('VisualStudioVersion') or os.environ.get('VCINSTALLDIR'):
|
if os.environ.get("VisualStudioVersion") or os.environ.get("VCINSTALLDIR"):
|
||||||
print("Can't cross-compile for Windows inside of a Visual Studio shell.\n"
|
print(
|
||||||
|
"Can't cross-compile for Windows inside of a Visual Studio shell.\n"
|
||||||
"Please run `python mach build [arguments]` to bypass automatic "
|
"Please run `python mach build [arguments]` to bypass automatic "
|
||||||
"Visual Studio shell, and make sure the VisualStudioVersion and "
|
"Visual Studio shell, and make sure the VisualStudioVersion and "
|
||||||
"VCINSTALLDIR environment variables are not set.")
|
"VCINSTALLDIR environment variables are not set."
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# Gather Cargo build timings (https://doc.rust-lang.org/cargo/reference/timings.html).
|
# Gather Cargo build timings (https://doc.rust-lang.org/cargo/reference/timings.html).
|
||||||
|
@ -173,8 +184,7 @@ class MachCommands(CommandBase):
|
||||||
for key in env:
|
for key in env:
|
||||||
print((key, env[key]))
|
print((key, env[key]))
|
||||||
|
|
||||||
status = self.run_cargo_build_like_command(
|
status = self.run_cargo_build_like_command("rustc", opts, env=env, verbose=verbose, **kwargs)
|
||||||
"rustc", opts, env=env, verbose=verbose, **kwargs)
|
|
||||||
|
|
||||||
if status == 0:
|
if status == 0:
|
||||||
built_binary = self.get_binary_path(build_type, asan=with_asan)
|
built_binary = self.get_binary_path(build_type, asan=with_asan)
|
||||||
|
@ -201,12 +211,11 @@ class MachCommands(CommandBase):
|
||||||
# like Instruments.app.
|
# like Instruments.app.
|
||||||
try:
|
try:
|
||||||
import Cocoa
|
import Cocoa
|
||||||
|
|
||||||
icon_path = path.join(self.get_top_dir(), "resources", "servo_1024.png")
|
icon_path = path.join(self.get_top_dir(), "resources", "servo_1024.png")
|
||||||
icon = Cocoa.NSImage.alloc().initWithContentsOfFile_(icon_path)
|
icon = Cocoa.NSImage.alloc().initWithContentsOfFile_(icon_path)
|
||||||
if icon is not None:
|
if icon is not None:
|
||||||
Cocoa.NSWorkspace.sharedWorkspace().setIcon_forFile_options_(icon,
|
Cocoa.NSWorkspace.sharedWorkspace().setIcon_forFile_options_(icon, built_binary, 0)
|
||||||
built_binary,
|
|
||||||
0)
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -220,23 +229,16 @@ class MachCommands(CommandBase):
|
||||||
|
|
||||||
return status
|
return status
|
||||||
|
|
||||||
@Command('clean',
|
@Command("clean", description="Clean the target/ and Python virtual environment directories", category="build")
|
||||||
description='Clean the target/ and Python virtual environment directories',
|
@CommandArgument("--manifest-path", default=None, help="Path to the manifest to the package to clean")
|
||||||
category='build')
|
@CommandArgument("--verbose", "-v", action="store_true", help="Print verbose output")
|
||||||
@CommandArgument('--manifest-path',
|
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to Cargo")
|
||||||
default=None,
|
|
||||||
help='Path to the manifest to the package to clean')
|
|
||||||
@CommandArgument('--verbose', '-v',
|
|
||||||
action='store_true',
|
|
||||||
help='Print verbose output')
|
|
||||||
@CommandArgument('params', nargs='...',
|
|
||||||
help="Command-line arguments to be passed through to Cargo")
|
|
||||||
def clean(self, manifest_path=None, params=[], verbose=False):
|
def clean(self, manifest_path=None, params=[], verbose=False):
|
||||||
self.ensure_bootstrapped()
|
self.ensure_bootstrapped()
|
||||||
|
|
||||||
virtualenv_path = path.join(self.get_top_dir(), '.venv')
|
virtualenv_path = path.join(self.get_top_dir(), ".venv")
|
||||||
if path.exists(virtualenv_path):
|
if path.exists(virtualenv_path):
|
||||||
print('Removing virtualenv directory: %s' % virtualenv_path)
|
print("Removing virtualenv directory: %s" % virtualenv_path)
|
||||||
shutil.rmtree(virtualenv_path)
|
shutil.rmtree(virtualenv_path)
|
||||||
|
|
||||||
opts = ["--manifest-path", manifest_path or path.join(self.context.topdir, "Cargo.toml")]
|
opts = ["--manifest-path", manifest_path or path.join(self.context.topdir, "Cargo.toml")]
|
||||||
|
@ -263,6 +265,7 @@ class MachCommands(CommandBase):
|
||||||
def send_notification(self, **kwargs):
|
def send_notification(self, **kwargs):
|
||||||
try:
|
try:
|
||||||
import dbus
|
import dbus
|
||||||
|
|
||||||
bus = dbus.SessionBus()
|
bus = dbus.SessionBus()
|
||||||
notify_obj = bus.get_object("org.freedesktop.Notifications", "/org/freedesktop/Notifications")
|
notify_obj = bus.get_object("org.freedesktop.Notifications", "/org/freedesktop/Notifications")
|
||||||
method = notify_obj.get_dbus_method("Notify", "org.freedesktop.Notifications")
|
method = notify_obj.get_dbus_method("Notify", "org.freedesktop.Notifications")
|
||||||
|
@ -274,17 +277,15 @@ class MachCommands(CommandBase):
|
||||||
kwargs.get("notification_subtitle"),
|
kwargs.get("notification_subtitle"),
|
||||||
[], # actions
|
[], # actions
|
||||||
{"transient": True}, # hints
|
{"transient": True}, # hints
|
||||||
-1 # timeout
|
-1, # timeout
|
||||||
)
|
)
|
||||||
except Exception as exception:
|
except Exception as exception:
|
||||||
print(f"[Warning] Could not generate notification: {exception}",
|
print(f"[Warning] Could not generate notification: {exception}", file=sys.stderr)
|
||||||
file=sys.stderr)
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
if notify_command:
|
if notify_command:
|
||||||
if call([notify_command, title, message]) != 0:
|
if call([notify_command, title, message]) != 0:
|
||||||
print("[Warning] Could not generate notification: "
|
print(f"[Warning] Could not generate notification: Could not run '{notify_command}'.", file=sys.stderr)
|
||||||
f"Could not run '{notify_command}'.", file=sys.stderr)
|
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
notifier = LinuxNotifier if sys.platform.startswith("linux") else None
|
notifier = LinuxNotifier if sys.platform.startswith("linux") else None
|
||||||
|
@ -384,11 +385,12 @@ def package_msvc_dlls(servo_exe_dir: str, target: BuildTarget):
|
||||||
"x86_64": "x64",
|
"x86_64": "x64",
|
||||||
"i686": "x86",
|
"i686": "x86",
|
||||||
"aarch64": "arm64",
|
"aarch64": "arm64",
|
||||||
}[target.triple().split('-')[0]]
|
}[target.triple().split("-")[0]]
|
||||||
|
|
||||||
for msvc_redist_dir in servo.visual_studio.find_msvc_redist_dirs(vs_platform):
|
for msvc_redist_dir in servo.visual_studio.find_msvc_redist_dirs(vs_platform):
|
||||||
if copy_file(os.path.join(msvc_redist_dir, "msvcp140.dll")) and \
|
if copy_file(os.path.join(msvc_redist_dir, "msvcp140.dll")) and copy_file(
|
||||||
copy_file(os.path.join(msvc_redist_dir, "vcruntime140.dll")):
|
os.path.join(msvc_redist_dir, "vcruntime140.dll")
|
||||||
|
):
|
||||||
break
|
break
|
||||||
|
|
||||||
# Different SDKs install the file into different directory structures within the
|
# Different SDKs install the file into different directory structures within the
|
||||||
|
|
|
@ -118,7 +118,7 @@ def find_dep_path_newest(package, bin_path):
|
||||||
deps_path = path.join(path.split(bin_path)[0], "build")
|
deps_path = path.join(path.split(bin_path)[0], "build")
|
||||||
candidates = []
|
candidates = []
|
||||||
with cd(deps_path):
|
with cd(deps_path):
|
||||||
for c in glob(package + '-*'):
|
for c in glob(package + "-*"):
|
||||||
candidate_path = path.join(deps_path, c)
|
candidate_path = path.join(deps_path, c)
|
||||||
if path.exists(path.join(candidate_path, "output")):
|
if path.exists(path.join(candidate_path, "output")):
|
||||||
candidates.append(candidate_path)
|
candidates.append(candidate_path)
|
||||||
|
@ -152,24 +152,24 @@ def archive_deterministically(dir_to_archive, dest_archive, prepend_path=None):
|
||||||
file_list.append(os.path.join(root, name))
|
file_list.append(os.path.join(root, name))
|
||||||
|
|
||||||
# Sort file entries with the fixed locale
|
# Sort file entries with the fixed locale
|
||||||
with setlocale('C'):
|
with setlocale("C"):
|
||||||
file_list.sort(key=functools.cmp_to_key(locale.strcoll))
|
file_list.sort(key=functools.cmp_to_key(locale.strcoll))
|
||||||
|
|
||||||
# Use a temporary file and atomic rename to avoid partially-formed
|
# Use a temporary file and atomic rename to avoid partially-formed
|
||||||
# packaging (in case of exceptional situations like running out of disk space).
|
# packaging (in case of exceptional situations like running out of disk space).
|
||||||
# TODO do this in a temporary folder after #11983 is fixed
|
# TODO do this in a temporary folder after #11983 is fixed
|
||||||
temp_file = '{}.temp~'.format(dest_archive)
|
temp_file = "{}.temp~".format(dest_archive)
|
||||||
with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0o644), 'wb') as out_file:
|
with os.fdopen(os.open(temp_file, os.O_WRONLY | os.O_CREAT, 0o644), "wb") as out_file:
|
||||||
if dest_archive.endswith('.zip'):
|
if dest_archive.endswith(".zip"):
|
||||||
with zipfile.ZipFile(temp_file, 'w', zipfile.ZIP_DEFLATED) as zip_file:
|
with zipfile.ZipFile(temp_file, "w", zipfile.ZIP_DEFLATED) as zip_file:
|
||||||
for entry in file_list:
|
for entry in file_list:
|
||||||
arcname = entry
|
arcname = entry
|
||||||
if prepend_path is not None:
|
if prepend_path is not None:
|
||||||
arcname = os.path.normpath(os.path.join(prepend_path, arcname))
|
arcname = os.path.normpath(os.path.join(prepend_path, arcname))
|
||||||
zip_file.write(entry, arcname=arcname)
|
zip_file.write(entry, arcname=arcname)
|
||||||
else:
|
else:
|
||||||
with gzip.GzipFile(mode='wb', fileobj=out_file, mtime=0) as gzip_file:
|
with gzip.GzipFile(mode="wb", fileobj=out_file, mtime=0) as gzip_file:
|
||||||
with tarfile.open(fileobj=gzip_file, mode='w:') as tar_file:
|
with tarfile.open(fileobj=gzip_file, mode="w:") as tar_file:
|
||||||
for entry in file_list:
|
for entry in file_list:
|
||||||
arcname = entry
|
arcname = entry
|
||||||
if prepend_path is not None:
|
if prepend_path is not None:
|
||||||
|
@ -180,35 +180,35 @@ def archive_deterministically(dir_to_archive, dest_archive, prepend_path=None):
|
||||||
|
|
||||||
def call(*args, **kwargs):
|
def call(*args, **kwargs):
|
||||||
"""Wrap `subprocess.call`, printing the command if verbose=True."""
|
"""Wrap `subprocess.call`, printing the command if verbose=True."""
|
||||||
verbose = kwargs.pop('verbose', False)
|
verbose = kwargs.pop("verbose", False)
|
||||||
if verbose:
|
if verbose:
|
||||||
print(' '.join(args[0]))
|
print(" ".join(args[0]))
|
||||||
# we have to use shell=True in order to get PATH handling
|
# we have to use shell=True in order to get PATH handling
|
||||||
# when looking for the binary on Windows
|
# when looking for the binary on Windows
|
||||||
return subprocess.call(*args, shell=sys.platform == 'win32', **kwargs)
|
return subprocess.call(*args, shell=sys.platform == "win32", **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def check_output(*args, **kwargs) -> bytes:
|
def check_output(*args, **kwargs) -> bytes:
|
||||||
"""Wrap `subprocess.call`, printing the command if verbose=True."""
|
"""Wrap `subprocess.call`, printing the command if verbose=True."""
|
||||||
verbose = kwargs.pop('verbose', False)
|
verbose = kwargs.pop("verbose", False)
|
||||||
if verbose:
|
if verbose:
|
||||||
print(' '.join(args[0]))
|
print(" ".join(args[0]))
|
||||||
# we have to use shell=True in order to get PATH handling
|
# we have to use shell=True in order to get PATH handling
|
||||||
# when looking for the binary on Windows
|
# when looking for the binary on Windows
|
||||||
return subprocess.check_output(*args, shell=sys.platform == 'win32', **kwargs)
|
return subprocess.check_output(*args, shell=sys.platform == "win32", **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def check_call(*args, **kwargs):
|
def check_call(*args, **kwargs):
|
||||||
"""Wrap `subprocess.check_call`, printing the command if verbose=True.
|
"""Wrap `subprocess.check_call`, printing the command if verbose=True.
|
||||||
|
|
||||||
Also fix any unicode-containing `env`, for subprocess """
|
Also fix any unicode-containing `env`, for subprocess"""
|
||||||
verbose = kwargs.pop('verbose', False)
|
verbose = kwargs.pop("verbose", False)
|
||||||
|
|
||||||
if verbose:
|
if verbose:
|
||||||
print(' '.join(args[0]))
|
print(" ".join(args[0]))
|
||||||
# we have to use shell=True in order to get PATH handling
|
# we have to use shell=True in order to get PATH handling
|
||||||
# when looking for the binary on Windows
|
# when looking for the binary on Windows
|
||||||
proc = subprocess.Popen(*args, shell=sys.platform == 'win32', **kwargs)
|
proc = subprocess.Popen(*args, shell=sys.platform == "win32", **kwargs)
|
||||||
status = None
|
status = None
|
||||||
# Leave it to the subprocess to handle Ctrl+C. If it terminates as
|
# Leave it to the subprocess to handle Ctrl+C. If it terminates as
|
||||||
# a result of Ctrl+C, proc.wait() will return a status code, and,
|
# a result of Ctrl+C, proc.wait() will return a status code, and,
|
||||||
|
@ -221,19 +221,19 @@ def check_call(*args, **kwargs):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if status:
|
if status:
|
||||||
raise subprocess.CalledProcessError(status, ' '.join(*args))
|
raise subprocess.CalledProcessError(status, " ".join(*args))
|
||||||
|
|
||||||
|
|
||||||
def is_windows():
|
def is_windows():
|
||||||
return sys.platform == 'win32'
|
return sys.platform == "win32"
|
||||||
|
|
||||||
|
|
||||||
def is_macosx():
|
def is_macosx():
|
||||||
return sys.platform == 'darwin'
|
return sys.platform == "darwin"
|
||||||
|
|
||||||
|
|
||||||
def is_linux():
|
def is_linux():
|
||||||
return sys.platform.startswith('linux')
|
return sys.platform.startswith("linux")
|
||||||
|
|
||||||
|
|
||||||
class BuildNotFound(Exception):
|
class BuildNotFound(Exception):
|
||||||
|
@ -262,14 +262,13 @@ class CommandBase(object):
|
||||||
# Contents of env vars are strings by default. This returns the
|
# Contents of env vars are strings by default. This returns the
|
||||||
# boolean value of the specified environment variable, or the
|
# boolean value of the specified environment variable, or the
|
||||||
# speciried default if the var doesn't contain True or False
|
# speciried default if the var doesn't contain True or False
|
||||||
return {'True': True, 'False': False}.get(os.environ.get(var), default)
|
return {"True": True, "False": False}.get(os.environ.get(var), default)
|
||||||
|
|
||||||
def resolverelative(category, key):
|
def resolverelative(category, key):
|
||||||
# Allow ~
|
# Allow ~
|
||||||
self.config[category][key] = path.expanduser(self.config[category][key])
|
self.config[category][key] = path.expanduser(self.config[category][key])
|
||||||
# Resolve relative paths
|
# Resolve relative paths
|
||||||
self.config[category][key] = path.join(context.topdir,
|
self.config[category][key] = path.join(context.topdir, self.config[category][key])
|
||||||
self.config[category][key])
|
|
||||||
|
|
||||||
if not hasattr(self.context, "bootstrapped"):
|
if not hasattr(self.context, "bootstrapped"):
|
||||||
self.context.bootstrapped = False
|
self.context.bootstrapped = False
|
||||||
|
@ -286,8 +285,7 @@ class CommandBase(object):
|
||||||
self.config["tools"].setdefault("cache-dir", get_default_cache_dir(context.topdir))
|
self.config["tools"].setdefault("cache-dir", get_default_cache_dir(context.topdir))
|
||||||
resolverelative("tools", "cache-dir")
|
resolverelative("tools", "cache-dir")
|
||||||
|
|
||||||
default_cargo_home = os.environ.get("CARGO_HOME",
|
default_cargo_home = os.environ.get("CARGO_HOME", path.join(context.topdir, ".cargo"))
|
||||||
path.join(context.topdir, ".cargo"))
|
|
||||||
self.config["tools"].setdefault("cargo-home-dir", default_cargo_home)
|
self.config["tools"].setdefault("cargo-home-dir", default_cargo_home)
|
||||||
resolverelative("tools", "cargo-home-dir")
|
resolverelative("tools", "cargo-home-dir")
|
||||||
|
|
||||||
|
@ -323,7 +321,7 @@ class CommandBase(object):
|
||||||
return self._rust_toolchain
|
return self._rust_toolchain
|
||||||
|
|
||||||
toolchain_file = path.join(self.context.topdir, "rust-toolchain.toml")
|
toolchain_file = path.join(self.context.topdir, "rust-toolchain.toml")
|
||||||
self._rust_toolchain = toml.load(toolchain_file)['toolchain']['channel']
|
self._rust_toolchain = toml.load(toolchain_file)["toolchain"]["channel"]
|
||||||
return self._rust_toolchain
|
return self._rust_toolchain
|
||||||
|
|
||||||
def get_top_dir(self):
|
def get_top_dir(self):
|
||||||
|
@ -337,14 +335,14 @@ class CommandBase(object):
|
||||||
binary_path = path.join(base_path, build_type.directory_name(), binary_name)
|
binary_path = path.join(base_path, build_type.directory_name(), binary_name)
|
||||||
|
|
||||||
if not path.exists(binary_path):
|
if not path.exists(binary_path):
|
||||||
raise BuildNotFound('No Servo binary found. Perhaps you forgot to run `./mach build`?')
|
raise BuildNotFound("No Servo binary found. Perhaps you forgot to run `./mach build`?")
|
||||||
|
|
||||||
return binary_path
|
return binary_path
|
||||||
|
|
||||||
def detach_volume(self, mounted_volume):
|
def detach_volume(self, mounted_volume):
|
||||||
print("Detaching volume {}".format(mounted_volume))
|
print("Detaching volume {}".format(mounted_volume))
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(['hdiutil', 'detach', mounted_volume])
|
subprocess.check_call(["hdiutil", "detach", mounted_volume])
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("Could not detach volume {} : {}".format(mounted_volume, e.returncode))
|
print("Could not detach volume {} : {}".format(mounted_volume, e.returncode))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@ -356,7 +354,7 @@ class CommandBase(object):
|
||||||
def mount_dmg(self, dmg_path):
|
def mount_dmg(self, dmg_path):
|
||||||
print("Mounting dmg {}".format(dmg_path))
|
print("Mounting dmg {}".format(dmg_path))
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(['hdiutil', 'attach', dmg_path])
|
subprocess.check_call(["hdiutil", "attach", dmg_path])
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("Could not mount Servo dmg : {}".format(e.returncode))
|
print("Could not mount Servo dmg : {}".format(e.returncode))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@ -374,8 +372,9 @@ class CommandBase(object):
|
||||||
self.detach_volume(mounted_volume)
|
self.detach_volume(mounted_volume)
|
||||||
else:
|
else:
|
||||||
if is_windows():
|
if is_windows():
|
||||||
command = 'msiexec /a {} /qn TARGETDIR={}'.format(
|
command = "msiexec /a {} /qn TARGETDIR={}".format(
|
||||||
os.path.join(nightlies_folder, destination_file), destination_folder)
|
os.path.join(nightlies_folder, destination_file), destination_folder
|
||||||
|
)
|
||||||
if subprocess.call(command, stdout=PIPE, stderr=PIPE) != 0:
|
if subprocess.call(command, stdout=PIPE, stderr=PIPE) != 0:
|
||||||
print("Could not extract the nightly executable from the msi package.")
|
print("Could not extract the nightly executable from the msi package.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
@ -394,8 +393,7 @@ class CommandBase(object):
|
||||||
if nightly_date is None:
|
if nightly_date is None:
|
||||||
return
|
return
|
||||||
if not nightly_date:
|
if not nightly_date:
|
||||||
print(
|
print("No nightly date has been provided although the --nightly or -n flag has been passed.")
|
||||||
"No nightly date has been provided although the --nightly or -n flag has been passed.")
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
# Will alow us to fetch the relevant builds from the nightly repository
|
# Will alow us to fetch the relevant builds from the nightly repository
|
||||||
os_prefix = "linux"
|
os_prefix = "linux"
|
||||||
|
@ -406,55 +404,44 @@ class CommandBase(object):
|
||||||
nightly_date = nightly_date.strip()
|
nightly_date = nightly_date.strip()
|
||||||
# Fetch the filename to download from the build list
|
# Fetch the filename to download from the build list
|
||||||
repository_index = NIGHTLY_REPOSITORY_URL + "?list-type=2&prefix=nightly"
|
repository_index = NIGHTLY_REPOSITORY_URL + "?list-type=2&prefix=nightly"
|
||||||
req = urllib.request.Request(
|
req = urllib.request.Request("{}/{}/{}".format(repository_index, os_prefix, nightly_date))
|
||||||
"{}/{}/{}".format(repository_index, os_prefix, nightly_date))
|
|
||||||
try:
|
try:
|
||||||
response = urllib.request.urlopen(req).read()
|
response = urllib.request.urlopen(req).read()
|
||||||
tree = XML(response)
|
tree = XML(response)
|
||||||
namespaces = {'ns': tree.tag[1:tree.tag.index('}')]}
|
namespaces = {"ns": tree.tag[1 : tree.tag.index("}")]}
|
||||||
file_to_download = tree.find('ns:Contents', namespaces).find(
|
file_to_download = tree.find("ns:Contents", namespaces).find("ns:Key", namespaces).text
|
||||||
'ns:Key', namespaces).text
|
|
||||||
except urllib.error.URLError as e:
|
except urllib.error.URLError as e:
|
||||||
print("Could not fetch the available nightly versions from the repository : {}".format(
|
print("Could not fetch the available nightly versions from the repository : {}".format(e.reason))
|
||||||
e.reason))
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
print("Could not fetch a nightly version for date {} and platform {}".format(
|
print("Could not fetch a nightly version for date {} and platform {}".format(nightly_date, os_prefix))
|
||||||
nightly_date, os_prefix))
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
nightly_target_directory = path.join(self.context.topdir, "target")
|
nightly_target_directory = path.join(self.context.topdir, "target")
|
||||||
# ':' is not an authorized character for a file name on Windows
|
# ':' is not an authorized character for a file name on Windows
|
||||||
# make sure the OS specific separator is used
|
# make sure the OS specific separator is used
|
||||||
target_file_path = file_to_download.replace(':', '-').split('/')
|
target_file_path = file_to_download.replace(":", "-").split("/")
|
||||||
destination_file = os.path.join(
|
destination_file = os.path.join(nightly_target_directory, os.path.join(*target_file_path))
|
||||||
nightly_target_directory, os.path.join(*target_file_path))
|
|
||||||
# Once extracted, the nightly folder name is the tar name without the extension
|
# Once extracted, the nightly folder name is the tar name without the extension
|
||||||
# (eg /foo/bar/baz.tar.gz extracts to /foo/bar/baz)
|
# (eg /foo/bar/baz.tar.gz extracts to /foo/bar/baz)
|
||||||
destination_folder = os.path.splitext(destination_file)[0]
|
destination_folder = os.path.splitext(destination_file)[0]
|
||||||
nightlies_folder = path.join(
|
nightlies_folder = path.join(nightly_target_directory, "nightly", os_prefix)
|
||||||
nightly_target_directory, 'nightly', os_prefix)
|
|
||||||
|
|
||||||
# Make sure the target directory exists
|
# Make sure the target directory exists
|
||||||
if not os.path.isdir(nightlies_folder):
|
if not os.path.isdir(nightlies_folder):
|
||||||
print("The nightly folder for the target does not exist yet. Creating {}".format(
|
print("The nightly folder for the target does not exist yet. Creating {}".format(nightlies_folder))
|
||||||
nightlies_folder))
|
|
||||||
os.makedirs(nightlies_folder)
|
os.makedirs(nightlies_folder)
|
||||||
|
|
||||||
# Download the nightly version
|
# Download the nightly version
|
||||||
if os.path.isfile(path.join(nightlies_folder, destination_file)):
|
if os.path.isfile(path.join(nightlies_folder, destination_file)):
|
||||||
print("The nightly file {} has already been downloaded.".format(
|
print("The nightly file {} has already been downloaded.".format(destination_file))
|
||||||
destination_file))
|
|
||||||
else:
|
else:
|
||||||
print("The nightly {} does not exist yet, downloading it.".format(
|
print("The nightly {} does not exist yet, downloading it.".format(destination_file))
|
||||||
destination_file))
|
download_file(destination_file, NIGHTLY_REPOSITORY_URL + file_to_download, destination_file)
|
||||||
download_file(destination_file, NIGHTLY_REPOSITORY_URL
|
|
||||||
+ file_to_download, destination_file)
|
|
||||||
|
|
||||||
# Extract the downloaded nightly version
|
# Extract the downloaded nightly version
|
||||||
if os.path.isdir(destination_folder):
|
if os.path.isdir(destination_folder):
|
||||||
print("The nightly folder {} has already been extracted.".format(
|
print("The nightly folder {} has already been extracted.".format(destination_folder))
|
||||||
destination_folder))
|
|
||||||
else:
|
else:
|
||||||
self.extract_nightly(nightlies_folder, destination_folder, destination_file)
|
self.extract_nightly(nightlies_folder, destination_folder, destination_file)
|
||||||
|
|
||||||
|
@ -493,34 +480,34 @@ class CommandBase(object):
|
||||||
elif self.config["build"]["incremental"] is not None:
|
elif self.config["build"]["incremental"] is not None:
|
||||||
env["CARGO_INCREMENTAL"] = "0"
|
env["CARGO_INCREMENTAL"] = "0"
|
||||||
|
|
||||||
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "")
|
env["RUSTFLAGS"] = env.get("RUSTFLAGS", "")
|
||||||
|
|
||||||
if self.config["build"]["rustflags"]:
|
if self.config["build"]["rustflags"]:
|
||||||
env['RUSTFLAGS'] += " " + self.config["build"]["rustflags"]
|
env["RUSTFLAGS"] += " " + self.config["build"]["rustflags"]
|
||||||
|
|
||||||
if not (self.config["build"]["ccache"] == ""):
|
if not (self.config["build"]["ccache"] == ""):
|
||||||
env['CCACHE'] = self.config["build"]["ccache"]
|
env["CCACHE"] = self.config["build"]["ccache"]
|
||||||
|
|
||||||
env["CARGO_TARGET_DIR"] = servo.util.get_target_dir()
|
env["CARGO_TARGET_DIR"] = servo.util.get_target_dir()
|
||||||
|
|
||||||
# Work around https://github.com/servo/servo/issues/24446
|
# Work around https://github.com/servo/servo/issues/24446
|
||||||
# Argument-less str.split normalizes leading, trailing, and double spaces
|
# Argument-less str.split normalizes leading, trailing, and double spaces
|
||||||
env['RUSTFLAGS'] = " ".join(env['RUSTFLAGS'].split())
|
env["RUSTFLAGS"] = " ".join(env["RUSTFLAGS"].split())
|
||||||
|
|
||||||
# Suppress known false-positives during memory leak sanitizing.
|
# Suppress known false-positives during memory leak sanitizing.
|
||||||
env["LSAN_OPTIONS"] = f"{env.get('LSAN_OPTIONS', '')}:suppressions={ASAN_LEAK_SUPPRESSION_FILE}"
|
env["LSAN_OPTIONS"] = f"{env.get('LSAN_OPTIONS', '')}:suppressions={ASAN_LEAK_SUPPRESSION_FILE}"
|
||||||
|
|
||||||
self.target.configure_build_environment(env, self.config, self.context.topdir)
|
self.target.configure_build_environment(env, self.config, self.context.topdir)
|
||||||
|
|
||||||
if sys.platform == 'win32' and 'windows' not in self.target.triple():
|
if sys.platform == "win32" and "windows" not in self.target.triple():
|
||||||
# aws-lc-rs only supports the Ninja Generator when cross-compiling on windows hosts to non-windows.
|
# aws-lc-rs only supports the Ninja Generator when cross-compiling on windows hosts to non-windows.
|
||||||
env['TARGET_CMAKE_GENERATOR'] = "Ninja"
|
env["TARGET_CMAKE_GENERATOR"] = "Ninja"
|
||||||
if shutil.which('ninja') is None:
|
if shutil.which("ninja") is None:
|
||||||
print("Error: Cross-compiling servo on windows requires the Ninja tool to be installed and in PATH.")
|
print("Error: Cross-compiling servo on windows requires the Ninja tool to be installed and in PATH.")
|
||||||
print("Hint: Ninja-build is available on github at: https://github.com/ninja-build/ninja/releases")
|
print("Hint: Ninja-build is available on github at: https://github.com/ninja-build/ninja/releases")
|
||||||
exit(1)
|
exit(1)
|
||||||
# `tr` is also required by the CMake build rules of `aws-lc-rs`
|
# `tr` is also required by the CMake build rules of `aws-lc-rs`
|
||||||
if shutil.which('tr') is None:
|
if shutil.which("tr") is None:
|
||||||
print("Error: Cross-compiling servo on windows requires the `tr` tool, which was not found.")
|
print("Error: Cross-compiling servo on windows requires the `tr` tool, which was not found.")
|
||||||
print("Hint: Try running ./mach from `git bash` instead of powershell.")
|
print("Hint: Try running ./mach from `git bash` instead of powershell.")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
@ -528,132 +515,146 @@ class CommandBase(object):
|
||||||
return env
|
return env
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def common_command_arguments(build_configuration=False,
|
def common_command_arguments(
|
||||||
build_type=False,
|
build_configuration=False, build_type=False, binary_selection=False, package_configuration=False
|
||||||
binary_selection=False,
|
|
||||||
package_configuration=False
|
|
||||||
):
|
):
|
||||||
decorators = []
|
decorators = []
|
||||||
if build_type or binary_selection:
|
if build_type or binary_selection:
|
||||||
decorators += [
|
decorators += [
|
||||||
CommandArgumentGroup('Build Type'),
|
CommandArgumentGroup("Build Type"),
|
||||||
CommandArgument('--release', '-r', group="Build Type",
|
CommandArgument(
|
||||||
action='store_true',
|
"--release", "-r", group="Build Type", action="store_true", help="Build in release mode"
|
||||||
help='Build in release mode'),
|
),
|
||||||
CommandArgument('--dev', '--debug', '-d', group="Build Type",
|
CommandArgument(
|
||||||
action='store_true',
|
"--dev", "--debug", "-d", group="Build Type", action="store_true", help="Build in development mode"
|
||||||
help='Build in development mode'),
|
),
|
||||||
CommandArgument('--prod', '--production', group="Build Type",
|
CommandArgument(
|
||||||
action='store_true',
|
"--prod",
|
||||||
help='Build in release mode without debug assertions'),
|
"--production",
|
||||||
CommandArgument('--profile', group="Build Type",
|
group="Build Type",
|
||||||
help='Build with custom Cargo profile'),
|
action="store_true",
|
||||||
CommandArgument('--with-asan', action='store_true', help="Build with AddressSanitizer"),
|
help="Build in release mode without debug assertions",
|
||||||
|
),
|
||||||
|
CommandArgument("--profile", group="Build Type", help="Build with custom Cargo profile"),
|
||||||
|
CommandArgument("--with-asan", action="store_true", help="Build with AddressSanitizer"),
|
||||||
]
|
]
|
||||||
|
|
||||||
if build_configuration:
|
if build_configuration:
|
||||||
decorators += [
|
decorators += [
|
||||||
CommandArgumentGroup('Cross Compilation'),
|
CommandArgumentGroup("Cross Compilation"),
|
||||||
CommandArgument(
|
CommandArgument(
|
||||||
'--target', '-t',
|
"--target",
|
||||||
|
"-t",
|
||||||
group="Cross Compilation",
|
group="Cross Compilation",
|
||||||
default=None,
|
default=None,
|
||||||
help='Cross compile for given target platform',
|
help="Cross compile for given target platform",
|
||||||
),
|
),
|
||||||
CommandArgument(
|
CommandArgument(
|
||||||
'--android', default=None, action='store_true',
|
"--android",
|
||||||
help='Build for Android. If --target is not specified, this '
|
default=None,
|
||||||
f'will choose the default target architecture ({AndroidTarget.DEFAULT_TRIPLE}).',
|
action="store_true",
|
||||||
|
help="Build for Android. If --target is not specified, this "
|
||||||
|
f"will choose the default target architecture ({AndroidTarget.DEFAULT_TRIPLE}).",
|
||||||
),
|
),
|
||||||
CommandArgument(
|
CommandArgument(
|
||||||
'--ohos', default=None, action='store_true',
|
"--ohos",
|
||||||
help='Build for OpenHarmony. If --target is not specified, this '
|
default=None,
|
||||||
f'will choose a default target architecture ({OpenHarmonyTarget.DEFAULT_TRIPLE}).',
|
action="store_true",
|
||||||
|
help="Build for OpenHarmony. If --target is not specified, this "
|
||||||
|
f"will choose a default target architecture ({OpenHarmonyTarget.DEFAULT_TRIPLE}).",
|
||||||
),
|
),
|
||||||
CommandArgument('--win-arm64', action='store_true', help="Use arm64 Windows target"),
|
CommandArgument("--win-arm64", action="store_true", help="Use arm64 Windows target"),
|
||||||
CommandArgumentGroup('Feature Selection'),
|
CommandArgumentGroup("Feature Selection"),
|
||||||
CommandArgument(
|
CommandArgument(
|
||||||
'--features', default=None, group="Feature Selection", nargs='+',
|
"--features",
|
||||||
help='Space-separated list of features to also build',
|
default=None,
|
||||||
|
group="Feature Selection",
|
||||||
|
nargs="+",
|
||||||
|
help="Space-separated list of features to also build",
|
||||||
),
|
),
|
||||||
CommandArgument(
|
CommandArgument(
|
||||||
'--media-stack', default=None, group="Feature Selection",
|
"--media-stack",
|
||||||
choices=["gstreamer", "dummy"], help='Which media stack to use',
|
default=None,
|
||||||
|
group="Feature Selection",
|
||||||
|
choices=["gstreamer", "dummy"],
|
||||||
|
help="Which media stack to use",
|
||||||
),
|
),
|
||||||
CommandArgument(
|
CommandArgument(
|
||||||
'--debug-mozjs',
|
"--debug-mozjs",
|
||||||
default=False,
|
default=False,
|
||||||
group="Feature Selection",
|
group="Feature Selection",
|
||||||
action='store_true',
|
action="store_true",
|
||||||
help='Enable debug assertions in mozjs',
|
help="Enable debug assertions in mozjs",
|
||||||
),
|
),
|
||||||
CommandArgument(
|
CommandArgument(
|
||||||
'--with-debug-assertions',
|
"--with-debug-assertions",
|
||||||
default=False,
|
default=False,
|
||||||
group="Feature Selection",
|
group="Feature Selection",
|
||||||
action='store_true',
|
action="store_true",
|
||||||
help='Enable debug assertions in release',
|
help="Enable debug assertions in release",
|
||||||
),
|
),
|
||||||
CommandArgument(
|
CommandArgument(
|
||||||
'--with-frame-pointer',
|
"--with-frame-pointer",
|
||||||
default=None, group="Feature Selection",
|
default=None,
|
||||||
action='store_true',
|
group="Feature Selection",
|
||||||
help='Build with frame pointer enabled, used by the background hang monitor.',
|
action="store_true",
|
||||||
|
help="Build with frame pointer enabled, used by the background hang monitor.",
|
||||||
),
|
),
|
||||||
CommandArgument(
|
CommandArgument(
|
||||||
'--use-crown',
|
"--use-crown", default=False, action="store_true", help="Enable Servo's `crown` linter tool"
|
||||||
default=False,
|
),
|
||||||
action='store_true',
|
|
||||||
help="Enable Servo's `crown` linter tool"
|
|
||||||
)
|
|
||||||
]
|
]
|
||||||
if package_configuration:
|
if package_configuration:
|
||||||
decorators += [
|
decorators += [
|
||||||
CommandArgumentGroup('Packaging options'),
|
CommandArgumentGroup("Packaging options"),
|
||||||
CommandArgument(
|
CommandArgument(
|
||||||
'--flavor', default=None, group="Packaging options",
|
"--flavor",
|
||||||
help='Product flavor to be used when packaging with Gradle/Hvigor (android/ohos).'
|
default=None,
|
||||||
|
group="Packaging options",
|
||||||
|
help="Product flavor to be used when packaging with Gradle/Hvigor (android/ohos).",
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
if binary_selection:
|
if binary_selection:
|
||||||
decorators += [
|
decorators += [
|
||||||
CommandArgumentGroup('Binary selection'),
|
CommandArgumentGroup("Binary selection"),
|
||||||
CommandArgument('--bin', default=None,
|
CommandArgument("--bin", default=None, help="Launch with specific binary"),
|
||||||
help='Launch with specific binary'),
|
CommandArgument("--nightly", "-n", default=None, help="Specify a YYYY-MM-DD nightly build to run"),
|
||||||
CommandArgument('--nightly', '-n', default=None,
|
|
||||||
help='Specify a YYYY-MM-DD nightly build to run'),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
def decorator_function(original_function):
|
def decorator_function(original_function):
|
||||||
def configuration_decorator(self, *args, **kwargs):
|
def configuration_decorator(self, *args, **kwargs):
|
||||||
if build_type or binary_selection:
|
if build_type or binary_selection:
|
||||||
# If `build_type` already exists in kwargs we are doing a recursive dispatch.
|
# If `build_type` already exists in kwargs we are doing a recursive dispatch.
|
||||||
if 'build_type' not in kwargs:
|
if "build_type" not in kwargs:
|
||||||
kwargs['build_type'] = self.configure_build_type(
|
kwargs["build_type"] = self.configure_build_type(
|
||||||
kwargs['release'], kwargs['dev'], kwargs['prod'], kwargs['profile'],
|
kwargs["release"],
|
||||||
|
kwargs["dev"],
|
||||||
|
kwargs["prod"],
|
||||||
|
kwargs["profile"],
|
||||||
)
|
)
|
||||||
kwargs.pop('release', None)
|
kwargs.pop("release", None)
|
||||||
kwargs.pop('dev', None)
|
kwargs.pop("dev", None)
|
||||||
kwargs.pop('prod', None)
|
kwargs.pop("prod", None)
|
||||||
kwargs.pop('profile', None)
|
kwargs.pop("profile", None)
|
||||||
|
|
||||||
if build_configuration:
|
if build_configuration:
|
||||||
self.configure_build_target(kwargs)
|
self.configure_build_target(kwargs)
|
||||||
self.features = kwargs.get("features", None) or []
|
self.features = kwargs.get("features", None) or []
|
||||||
self.enable_media = self.is_media_enabled(kwargs['media_stack'])
|
self.enable_media = self.is_media_enabled(kwargs["media_stack"])
|
||||||
|
|
||||||
if binary_selection:
|
if binary_selection:
|
||||||
if 'servo_binary' not in kwargs:
|
if "servo_binary" not in kwargs:
|
||||||
kwargs['servo_binary'] = (kwargs.get('bin')
|
kwargs["servo_binary"] = (
|
||||||
or self.get_nightly_binary_path(kwargs.get('nightly'))
|
kwargs.get("bin")
|
||||||
or self.get_binary_path(kwargs.get('build_type'),
|
or self.get_nightly_binary_path(kwargs.get("nightly"))
|
||||||
asan=kwargs.get('with_asan')))
|
or self.get_binary_path(kwargs.get("build_type"), asan=kwargs.get("with_asan"))
|
||||||
kwargs.pop('bin')
|
)
|
||||||
kwargs.pop('nightly')
|
kwargs.pop("bin")
|
||||||
|
kwargs.pop("nightly")
|
||||||
if not build_type:
|
if not build_type:
|
||||||
kwargs.pop('build_type')
|
kwargs.pop("build_type")
|
||||||
kwargs.pop('with_asan')
|
kwargs.pop("with_asan")
|
||||||
|
|
||||||
return original_function(self, *args, **kwargs)
|
return original_function(self, *args, **kwargs)
|
||||||
|
|
||||||
|
@ -669,9 +670,9 @@ class CommandBase(object):
|
||||||
def allow_target_configuration(original_function):
|
def allow_target_configuration(original_function):
|
||||||
def target_configuration_decorator(self, *args, **kwargs):
|
def target_configuration_decorator(self, *args, **kwargs):
|
||||||
self.configure_build_target(kwargs, suppress_log=True)
|
self.configure_build_target(kwargs, suppress_log=True)
|
||||||
kwargs.pop('target', False)
|
kwargs.pop("target", False)
|
||||||
kwargs.pop('android', False)
|
kwargs.pop("android", False)
|
||||||
kwargs.pop('ohos', False)
|
kwargs.pop("ohos", False)
|
||||||
return original_function(self, *args, **kwargs)
|
return original_function(self, *args, **kwargs)
|
||||||
|
|
||||||
return target_configuration_decorator
|
return target_configuration_decorator
|
||||||
|
@ -709,15 +710,15 @@ class CommandBase(object):
|
||||||
return BuildType.custom(profile)
|
return BuildType.custom(profile)
|
||||||
|
|
||||||
def configure_build_target(self, kwargs: Dict[str, Any], suppress_log: bool = False):
|
def configure_build_target(self, kwargs: Dict[str, Any], suppress_log: bool = False):
|
||||||
if hasattr(self.context, 'target'):
|
if hasattr(self.context, "target"):
|
||||||
# This call is for a dispatched command and we've already configured
|
# This call is for a dispatched command and we've already configured
|
||||||
# the target, so just use it.
|
# the target, so just use it.
|
||||||
self.target = self.context.target
|
self.target = self.context.target
|
||||||
return
|
return
|
||||||
|
|
||||||
android = kwargs.get('android') or self.config["build"]["android"]
|
android = kwargs.get("android") or self.config["build"]["android"]
|
||||||
ohos = kwargs.get('ohos') or self.config["build"]["ohos"]
|
ohos = kwargs.get("ohos") or self.config["build"]["ohos"]
|
||||||
target_triple = kwargs.get('target')
|
target_triple = kwargs.get("target")
|
||||||
|
|
||||||
if android and ohos:
|
if android and ohos:
|
||||||
print("Cannot build both android and ohos targets simultaneously.")
|
print("Cannot build both android and ohos targets simultaneously.")
|
||||||
|
@ -768,20 +769,23 @@ class CommandBase(object):
|
||||||
# Once we drop support for this platform (it's currently needed for wpt.fyi runners),
|
# Once we drop support for this platform (it's currently needed for wpt.fyi runners),
|
||||||
# we can remove this workaround and officially only support Ubuntu 22.04 and up.
|
# we can remove this workaround and officially only support Ubuntu 22.04 and up.
|
||||||
platform = servo.platform.get()
|
platform = servo.platform.get()
|
||||||
if not self.target.is_cross_build() and platform.is_linux and \
|
if not self.target.is_cross_build() and platform.is_linux and not platform.is_gstreamer_installed(self.target):
|
||||||
not platform.is_gstreamer_installed(self.target):
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return media_stack != "dummy"
|
return media_stack != "dummy"
|
||||||
|
|
||||||
def run_cargo_build_like_command(
|
def run_cargo_build_like_command(
|
||||||
self, command: str, cargo_args: List[str],
|
self,
|
||||||
env=None, verbose=False,
|
command: str,
|
||||||
debug_mozjs=False, with_debug_assertions=False,
|
cargo_args: List[str],
|
||||||
|
env=None,
|
||||||
|
verbose=False,
|
||||||
|
debug_mozjs=False,
|
||||||
|
with_debug_assertions=False,
|
||||||
with_frame_pointer=False,
|
with_frame_pointer=False,
|
||||||
use_crown=False,
|
use_crown=False,
|
||||||
target_override: Optional[str] = None,
|
target_override: Optional[str] = None,
|
||||||
**_kwargs
|
**_kwargs,
|
||||||
):
|
):
|
||||||
env = env or self.build_env()
|
env = env or self.build_env()
|
||||||
|
|
||||||
|
@ -790,8 +794,7 @@ class CommandBase(object):
|
||||||
platform = servo.platform.get()
|
platform = servo.platform.get()
|
||||||
if self.enable_media and not platform.is_gstreamer_installed(self.target):
|
if self.enable_media and not platform.is_gstreamer_installed(self.target):
|
||||||
raise FileNotFoundError(
|
raise FileNotFoundError(
|
||||||
"GStreamer libraries not found (>= version 1.18)."
|
"GStreamer libraries not found (>= version 1.18).Please see installation instructions in README.md"
|
||||||
"Please see installation instructions in README.md"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
args = []
|
args = []
|
||||||
|
@ -806,21 +809,23 @@ class CommandBase(object):
|
||||||
args += ["--target", self.target.triple()]
|
args += ["--target", self.target.triple()]
|
||||||
if type(self.target) in [AndroidTarget, OpenHarmonyTarget]:
|
if type(self.target) in [AndroidTarget, OpenHarmonyTarget]:
|
||||||
# Note: in practice `cargo rustc` should just be used unconditionally.
|
# Note: in practice `cargo rustc` should just be used unconditionally.
|
||||||
assert command != 'build', "For Android / OpenHarmony `cargo rustc` must be used instead of cargo build"
|
assert command != "build", "For Android / OpenHarmony `cargo rustc` must be used instead of cargo build"
|
||||||
if command == 'rustc':
|
if command == "rustc":
|
||||||
args += ["--lib", "--crate-type=cdylib"]
|
args += ["--lib", "--crate-type=cdylib"]
|
||||||
|
|
||||||
features = []
|
features = []
|
||||||
|
|
||||||
if use_crown:
|
if use_crown:
|
||||||
if 'CARGO_BUILD_RUSTC' in env:
|
if "CARGO_BUILD_RUSTC" in env:
|
||||||
current_rustc = env['CARGO_BUILD_RUSTC']
|
current_rustc = env["CARGO_BUILD_RUSTC"]
|
||||||
if current_rustc != 'crown':
|
if current_rustc != "crown":
|
||||||
print('Error: `mach` was called with `--use-crown` while `CARGO_BUILD_RUSTC` was'
|
print(
|
||||||
f'already set to `{current_rustc}` in the parent environment.\n'
|
"Error: `mach` was called with `--use-crown` while `CARGO_BUILD_RUSTC` was"
|
||||||
'These options conflict, please specify only one of them.')
|
f"already set to `{current_rustc}` in the parent environment.\n"
|
||||||
|
"These options conflict, please specify only one of them."
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
env['CARGO_BUILD_RUSTC'] = 'crown'
|
env["CARGO_BUILD_RUSTC"] = "crown"
|
||||||
# Modyfing `RUSTC` or `CARGO_BUILD_RUSTC` to use a linter does not cause
|
# Modyfing `RUSTC` or `CARGO_BUILD_RUSTC` to use a linter does not cause
|
||||||
# `cargo check` to rebuild. To work around this bug use a `crown` feature
|
# `cargo check` to rebuild. To work around this bug use a `crown` feature
|
||||||
# to invalidate caches and force a rebuild / relint.
|
# to invalidate caches and force a rebuild / relint.
|
||||||
|
@ -835,7 +840,7 @@ class CommandBase(object):
|
||||||
features.append("debugmozjs")
|
features.append("debugmozjs")
|
||||||
|
|
||||||
if with_frame_pointer:
|
if with_frame_pointer:
|
||||||
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C force-frame-pointers=yes"
|
env["RUSTFLAGS"] = env.get("RUSTFLAGS", "") + " -C force-frame-pointers=yes"
|
||||||
features.append("profilemozjs")
|
features.append("profilemozjs")
|
||||||
if self.config["build"]["webgl-backtrace"]:
|
if self.config["build"]["webgl-backtrace"]:
|
||||||
features.append("webgl-backtrace")
|
features.append("webgl-backtrace")
|
||||||
|
@ -844,11 +849,11 @@ class CommandBase(object):
|
||||||
args += ["--features", " ".join(features)]
|
args += ["--features", " ".join(features)]
|
||||||
|
|
||||||
if with_debug_assertions or self.config["build"]["debug-assertions"]:
|
if with_debug_assertions or self.config["build"]["debug-assertions"]:
|
||||||
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "") + " -C debug_assertions"
|
env["RUSTFLAGS"] = env.get("RUSTFLAGS", "") + " -C debug_assertions"
|
||||||
|
|
||||||
# mozjs gets its Python from `env['PYTHON3']`, which defaults to `python3`,
|
# mozjs gets its Python from `env['PYTHON3']`, which defaults to `python3`,
|
||||||
# but uv venv on Windows only provides a `python`, not `python3`.
|
# but uv venv on Windows only provides a `python`, not `python3`.
|
||||||
env['PYTHON3'] = "python"
|
env["PYTHON3"] = "python"
|
||||||
|
|
||||||
return call(["cargo", command] + args + cargo_args, env=env, verbose=verbose)
|
return call(["cargo", command] + args + cargo_args, env=env, verbose=verbose)
|
||||||
|
|
||||||
|
@ -877,13 +882,9 @@ class CommandBase(object):
|
||||||
if not self.target.is_cross_build():
|
if not self.target.is_cross_build():
|
||||||
return
|
return
|
||||||
|
|
||||||
installed_targets = check_output(
|
installed_targets = check_output(["rustup", "target", "list", "--installed"], cwd=self.context.topdir).decode()
|
||||||
["rustup", "target", "list", "--installed"],
|
|
||||||
cwd=self.context.topdir
|
|
||||||
).decode()
|
|
||||||
if self.target.triple() not in installed_targets:
|
if self.target.triple() not in installed_targets:
|
||||||
check_call(["rustup", "target", "add", self.target.triple()],
|
check_call(["rustup", "target", "add", self.target.triple()], cwd=self.context.topdir)
|
||||||
cwd=self.context.topdir)
|
|
||||||
|
|
||||||
def ensure_rustup_version(self):
|
def ensure_rustup_version(self):
|
||||||
try:
|
try:
|
||||||
|
@ -891,16 +892,18 @@ class CommandBase(object):
|
||||||
["rustup" + servo.platform.get().executable_suffix(), "--version"],
|
["rustup" + servo.platform.get().executable_suffix(), "--version"],
|
||||||
# Silence "info: This is the version for the rustup toolchain manager,
|
# Silence "info: This is the version for the rustup toolchain manager,
|
||||||
# not the rustc compiler."
|
# not the rustc compiler."
|
||||||
stderr=open(os.devnull, "wb")
|
stderr=open(os.devnull, "wb"),
|
||||||
)
|
)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if e.errno == NO_SUCH_FILE_OR_DIRECTORY:
|
if e.errno == NO_SUCH_FILE_OR_DIRECTORY:
|
||||||
print("It looks like rustup is not installed. See instructions at "
|
print(
|
||||||
"https://github.com/servo/servo/#setting-up-your-environment")
|
"It looks like rustup is not installed. See instructions at "
|
||||||
|
"https://github.com/servo/servo/#setting-up-your-environment"
|
||||||
|
)
|
||||||
print()
|
print()
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
raise
|
raise
|
||||||
version = tuple(map(int, re.match(br"rustup (\d+)\.(\d+)\.(\d+)", version_line).groups()))
|
version = tuple(map(int, re.match(rb"rustup (\d+)\.(\d+)\.(\d+)", version_line).groups()))
|
||||||
version_needed = (1, 23, 0)
|
version_needed = (1, 23, 0)
|
||||||
if version < version_needed:
|
if version < version_needed:
|
||||||
print("rustup is at version %s.%s.%s, Servo requires %s.%s.%s or more recent." % (version + version_needed))
|
print("rustup is at version %s.%s.%s, Servo requires %s.%s.%s or more recent." % (version + version_needed))
|
||||||
|
@ -910,25 +913,25 @@ class CommandBase(object):
|
||||||
def ensure_clobbered(self, target_dir=None):
|
def ensure_clobbered(self, target_dir=None):
|
||||||
if target_dir is None:
|
if target_dir is None:
|
||||||
target_dir = util.get_target_dir()
|
target_dir = util.get_target_dir()
|
||||||
auto = True if os.environ.get('AUTOCLOBBER', False) else False
|
auto = True if os.environ.get("AUTOCLOBBER", False) else False
|
||||||
src_clobber = os.path.join(self.context.topdir, 'CLOBBER')
|
src_clobber = os.path.join(self.context.topdir, "CLOBBER")
|
||||||
target_clobber = os.path.join(target_dir, 'CLOBBER')
|
target_clobber = os.path.join(target_dir, "CLOBBER")
|
||||||
|
|
||||||
if not os.path.exists(target_dir):
|
if not os.path.exists(target_dir):
|
||||||
os.makedirs(target_dir)
|
os.makedirs(target_dir)
|
||||||
|
|
||||||
if not os.path.exists(target_clobber):
|
if not os.path.exists(target_clobber):
|
||||||
# Simply touch the file.
|
# Simply touch the file.
|
||||||
with open(target_clobber, 'a'):
|
with open(target_clobber, "a"):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if auto:
|
if auto:
|
||||||
if os.path.getmtime(src_clobber) > os.path.getmtime(target_clobber):
|
if os.path.getmtime(src_clobber) > os.path.getmtime(target_clobber):
|
||||||
print('Automatically clobbering target directory: {}'.format(target_dir))
|
print("Automatically clobbering target directory: {}".format(target_dir))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
Registrar.dispatch("clean", context=self.context, verbose=True)
|
Registrar.dispatch("clean", context=self.context, verbose=True)
|
||||||
print('Successfully completed auto clobber.')
|
print("Successfully completed auto clobber.")
|
||||||
except subprocess.CalledProcessError as error:
|
except subprocess.CalledProcessError as error:
|
||||||
sys.exit(error)
|
sys.exit(error)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -25,12 +25,10 @@ from servo.command_base import CommandBase, cd, call
|
||||||
|
|
||||||
@CommandProvider
|
@CommandProvider
|
||||||
class MachCommands(CommandBase):
|
class MachCommands(CommandBase):
|
||||||
@Command('check',
|
@Command("check", description='Run "cargo check"', category="devenv")
|
||||||
description='Run "cargo check"',
|
|
||||||
category='devenv')
|
|
||||||
@CommandArgument(
|
@CommandArgument(
|
||||||
'params', default=None, nargs='...',
|
"params", default=None, nargs="...", help="Command-line arguments to be passed through to cargo check"
|
||||||
help="Command-line arguments to be passed through to cargo check")
|
)
|
||||||
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
|
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
|
||||||
def check(self, params, **kwargs):
|
def check(self, params, **kwargs):
|
||||||
if not params:
|
if not params:
|
||||||
|
@ -40,45 +38,34 @@ class MachCommands(CommandBase):
|
||||||
self.ensure_clobbered()
|
self.ensure_clobbered()
|
||||||
status = self.run_cargo_build_like_command("check", params, **kwargs)
|
status = self.run_cargo_build_like_command("check", params, **kwargs)
|
||||||
if status == 0:
|
if status == 0:
|
||||||
print('Finished checking, binary NOT updated. Consider ./mach build before ./mach run')
|
print("Finished checking, binary NOT updated. Consider ./mach build before ./mach run")
|
||||||
|
|
||||||
return status
|
return status
|
||||||
|
|
||||||
@Command('cargo-update',
|
@Command("cargo-update", description="Same as update-cargo", category="devenv")
|
||||||
description='Same as update-cargo',
|
|
||||||
category='devenv')
|
|
||||||
@CommandArgument(
|
@CommandArgument(
|
||||||
'params', default=None, nargs='...',
|
"params", default=None, nargs="...", help="Command-line arguments to be passed through to cargo update"
|
||||||
help='Command-line arguments to be passed through to cargo update')
|
)
|
||||||
@CommandArgument(
|
@CommandArgument("--package", "-p", default=None, help="Updates selected package")
|
||||||
'--package', '-p', default=None,
|
@CommandArgument("--all-packages", "-a", action="store_true", help="Updates all packages")
|
||||||
help='Updates selected package')
|
@CommandArgument("--dry-run", "-d", action="store_true", help="Show outdated packages.")
|
||||||
@CommandArgument(
|
|
||||||
'--all-packages', '-a', action='store_true',
|
|
||||||
help='Updates all packages')
|
|
||||||
@CommandArgument(
|
|
||||||
'--dry-run', '-d', action='store_true',
|
|
||||||
help='Show outdated packages.')
|
|
||||||
def cargo_update(self, params=None, package=None, all_packages=None, dry_run=None):
|
def cargo_update(self, params=None, package=None, all_packages=None, dry_run=None):
|
||||||
self.update_cargo(params, package, all_packages, dry_run)
|
self.update_cargo(params, package, all_packages, dry_run)
|
||||||
|
|
||||||
@Command('update-cargo',
|
@Command("update-cargo", description="Update Cargo dependencies", category="devenv")
|
||||||
description='Update Cargo dependencies',
|
|
||||||
category='devenv')
|
|
||||||
@CommandArgument(
|
@CommandArgument(
|
||||||
'params', default=None, nargs='...',
|
"params", default=None, nargs="...", help="Command-line arguments to be passed through to cargo update"
|
||||||
help='Command-line arguments to be passed through to cargo update')
|
)
|
||||||
|
@CommandArgument("--package", "-p", default=None, help="Updates the selected package")
|
||||||
@CommandArgument(
|
@CommandArgument(
|
||||||
'--package', '-p', default=None,
|
"--all-packages",
|
||||||
help='Updates the selected package')
|
"-a",
|
||||||
@CommandArgument(
|
action="store_true",
|
||||||
'--all-packages', '-a', action='store_true',
|
help="Updates all packages. NOTE! This is very likely to break your "
|
||||||
help='Updates all packages. NOTE! This is very likely to break your '
|
"working copy, making it impossible to build servo. Only do "
|
||||||
'working copy, making it impossible to build servo. Only do '
|
"this if you really know what you are doing.",
|
||||||
'this if you really know what you are doing.')
|
)
|
||||||
@CommandArgument(
|
@CommandArgument("--dry-run", "-d", action="store_true", help="Show outdated packages.")
|
||||||
'--dry-run', '-d', action='store_true',
|
|
||||||
help='Show outdated packages.')
|
|
||||||
def update_cargo(self, params=None, package=None, all_packages=None, dry_run=None):
|
def update_cargo(self, params=None, package=None, all_packages=None, dry_run=None):
|
||||||
if not params:
|
if not params:
|
||||||
params = []
|
params = []
|
||||||
|
@ -97,12 +84,8 @@ class MachCommands(CommandBase):
|
||||||
with cd(self.context.topdir):
|
with cd(self.context.topdir):
|
||||||
call(["cargo", "update"] + params, env=self.build_env())
|
call(["cargo", "update"] + params, env=self.build_env())
|
||||||
|
|
||||||
@Command('rustc',
|
@Command("rustc", description="Run the Rust compiler", category="devenv")
|
||||||
description='Run the Rust compiler',
|
@CommandArgument("params", default=None, nargs="...", help="Command-line arguments to be passed through to rustc")
|
||||||
category='devenv')
|
|
||||||
@CommandArgument(
|
|
||||||
'params', default=None, nargs='...',
|
|
||||||
help="Command-line arguments to be passed through to rustc")
|
|
||||||
def rustc(self, params):
|
def rustc(self, params):
|
||||||
if params is None:
|
if params is None:
|
||||||
params = []
|
params = []
|
||||||
|
@ -110,12 +93,10 @@ class MachCommands(CommandBase):
|
||||||
self.ensure_bootstrapped()
|
self.ensure_bootstrapped()
|
||||||
return call(["rustc"] + params, env=self.build_env())
|
return call(["rustc"] + params, env=self.build_env())
|
||||||
|
|
||||||
@Command('cargo-fix',
|
@Command("cargo-fix", description='Run "cargo fix"', category="devenv")
|
||||||
description='Run "cargo fix"',
|
|
||||||
category='devenv')
|
|
||||||
@CommandArgument(
|
@CommandArgument(
|
||||||
'params', default=None, nargs='...',
|
"params", default=None, nargs="...", help="Command-line arguments to be passed through to cargo-fix"
|
||||||
help="Command-line arguments to be passed through to cargo-fix")
|
)
|
||||||
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
|
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
|
||||||
def cargo_fix(self, params, **kwargs):
|
def cargo_fix(self, params, **kwargs):
|
||||||
if not params:
|
if not params:
|
||||||
|
@ -125,12 +106,8 @@ class MachCommands(CommandBase):
|
||||||
self.ensure_clobbered()
|
self.ensure_clobbered()
|
||||||
return self.run_cargo_build_like_command("fix", params, **kwargs)
|
return self.run_cargo_build_like_command("fix", params, **kwargs)
|
||||||
|
|
||||||
@Command('clippy',
|
@Command("clippy", description='Run "cargo clippy"', category="devenv")
|
||||||
description='Run "cargo clippy"',
|
@CommandArgument("params", default=None, nargs="...", help="Command-line arguments to be passed through to clippy")
|
||||||
category='devenv')
|
|
||||||
@CommandArgument(
|
|
||||||
'params', default=None, nargs='...',
|
|
||||||
help="Command-line arguments to be passed through to clippy")
|
|
||||||
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
|
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
|
||||||
def cargo_clippy(self, params, **kwargs):
|
def cargo_clippy(self, params, **kwargs):
|
||||||
if not params:
|
if not params:
|
||||||
|
@ -139,48 +116,42 @@ class MachCommands(CommandBase):
|
||||||
self.ensure_bootstrapped()
|
self.ensure_bootstrapped()
|
||||||
self.ensure_clobbered()
|
self.ensure_clobbered()
|
||||||
env = self.build_env()
|
env = self.build_env()
|
||||||
env['RUSTC'] = 'rustc'
|
env["RUSTC"] = "rustc"
|
||||||
return self.run_cargo_build_like_command("clippy", params, env=env, **kwargs)
|
return self.run_cargo_build_like_command("clippy", params, env=env, **kwargs)
|
||||||
|
|
||||||
@Command('grep',
|
@Command("grep", description="`git grep` for selected directories.", category="devenv")
|
||||||
description='`git grep` for selected directories.',
|
|
||||||
category='devenv')
|
|
||||||
@CommandArgument(
|
@CommandArgument(
|
||||||
'params', default=None, nargs='...',
|
"params", default=None, nargs="...", help="Command-line arguments to be passed through to `git grep`"
|
||||||
help="Command-line arguments to be passed through to `git grep`")
|
)
|
||||||
def grep(self, params):
|
def grep(self, params):
|
||||||
if not params:
|
if not params:
|
||||||
params = []
|
params = []
|
||||||
# get all directories under tests/
|
# get all directories under tests/
|
||||||
tests_dirs = listdir('tests')
|
tests_dirs = listdir("tests")
|
||||||
# Directories to be excluded under tests/
|
# Directories to be excluded under tests/
|
||||||
excluded_tests_dirs = ['wpt', 'jquery']
|
excluded_tests_dirs = ["wpt", "jquery"]
|
||||||
tests_dirs = filter(lambda dir: dir not in excluded_tests_dirs, tests_dirs)
|
tests_dirs = filter(lambda dir: dir not in excluded_tests_dirs, tests_dirs)
|
||||||
# Set of directories in project root
|
# Set of directories in project root
|
||||||
root_dirs = ['components', 'ports', 'python', 'etc', 'resources']
|
root_dirs = ["components", "ports", "python", "etc", "resources"]
|
||||||
# Generate absolute paths for directories in tests/ and project-root/
|
# Generate absolute paths for directories in tests/ and project-root/
|
||||||
tests_dirs_abs = [path.join(self.context.topdir, 'tests', s) for s in tests_dirs]
|
tests_dirs_abs = [path.join(self.context.topdir, "tests", s) for s in tests_dirs]
|
||||||
root_dirs_abs = [path.join(self.context.topdir, s) for s in root_dirs]
|
root_dirs_abs = [path.join(self.context.topdir, s) for s in root_dirs]
|
||||||
# Absolute paths for all directories to be considered
|
# Absolute paths for all directories to be considered
|
||||||
grep_paths = root_dirs_abs + tests_dirs_abs
|
grep_paths = root_dirs_abs + tests_dirs_abs
|
||||||
return call(
|
return call(
|
||||||
["git"] + ["grep"] + params + ['--'] + grep_paths + [':(exclude)*.min.js', ':(exclude)*.min.css'],
|
["git"] + ["grep"] + params + ["--"] + grep_paths + [":(exclude)*.min.js", ":(exclude)*.min.css"],
|
||||||
env=self.build_env())
|
env=self.build_env(),
|
||||||
|
)
|
||||||
|
|
||||||
@Command('fetch',
|
@Command("fetch", description="Fetch Rust, Cargo and Cargo dependencies", category="devenv")
|
||||||
description='Fetch Rust, Cargo and Cargo dependencies',
|
|
||||||
category='devenv')
|
|
||||||
def fetch(self):
|
def fetch(self):
|
||||||
self.ensure_bootstrapped()
|
self.ensure_bootstrapped()
|
||||||
return call(["cargo", "fetch"], env=self.build_env())
|
return call(["cargo", "fetch"], env=self.build_env())
|
||||||
|
|
||||||
@Command('ndk-stack',
|
@Command("ndk-stack", description="Invoke the ndk-stack tool with the expected symbol paths", category="devenv")
|
||||||
description='Invoke the ndk-stack tool with the expected symbol paths',
|
@CommandArgument("--release", action="store_true", help="Use release build symbols")
|
||||||
category='devenv')
|
@CommandArgument("--target", action="store", default="armv7-linux-androideabi", help="Build target")
|
||||||
@CommandArgument('--release', action='store_true', help="Use release build symbols")
|
@CommandArgument("logfile", action="store", help="Path to logcat output with crash report")
|
||||||
@CommandArgument('--target', action='store', default="armv7-linux-androideabi",
|
|
||||||
help="Build target")
|
|
||||||
@CommandArgument('logfile', action='store', help="Path to logcat output with crash report")
|
|
||||||
def stack(self, release, target, logfile):
|
def stack(self, release, target, logfile):
|
||||||
if not path.isfile(logfile):
|
if not path.isfile(logfile):
|
||||||
print(logfile + " doesn't exist")
|
print(logfile + " doesn't exist")
|
||||||
|
@ -190,21 +161,13 @@ class MachCommands(CommandBase):
|
||||||
ndk_stack = path.join(env["ANDROID_NDK"], "ndk-stack")
|
ndk_stack = path.join(env["ANDROID_NDK"], "ndk-stack")
|
||||||
self.setup_configuration_for_android_target(target)
|
self.setup_configuration_for_android_target(target)
|
||||||
sym_path = path.join(
|
sym_path = path.join(
|
||||||
"target",
|
"target", target, "release" if release else "debug", "apk", "obj", "local", self.config["android"]["lib"]
|
||||||
target,
|
)
|
||||||
"release" if release else "debug",
|
|
||||||
"apk",
|
|
||||||
"obj",
|
|
||||||
"local",
|
|
||||||
self.config["android"]["lib"])
|
|
||||||
print(subprocess.check_output([ndk_stack, "-sym", sym_path, "-dump", logfile]))
|
print(subprocess.check_output([ndk_stack, "-sym", sym_path, "-dump", logfile]))
|
||||||
|
|
||||||
@Command('ndk-gdb',
|
@Command("ndk-gdb", description="Invoke ndk-gdb tool with the expected symbol paths", category="devenv")
|
||||||
description='Invoke ndk-gdb tool with the expected symbol paths',
|
@CommandArgument("--release", action="store_true", help="Use release build symbols")
|
||||||
category='devenv')
|
@CommandArgument("--target", action="store", default="armv7-linux-androideabi", help="Build target")
|
||||||
@CommandArgument('--release', action='store_true', help="Use release build symbols")
|
|
||||||
@CommandArgument('--target', action='store', default="armv7-linux-androideabi",
|
|
||||||
help="Build target")
|
|
||||||
def ndk_gdb(self, release, target):
|
def ndk_gdb(self, release, target):
|
||||||
env = self.build_env()
|
env = self.build_env()
|
||||||
ndk_gdb = path.join(env["ANDROID_NDK"], "ndk-gdb")
|
ndk_gdb = path.join(env["ANDROID_NDK"], "ndk-gdb")
|
||||||
|
@ -218,7 +181,7 @@ class MachCommands(CommandBase):
|
||||||
"apk",
|
"apk",
|
||||||
"obj",
|
"obj",
|
||||||
"local",
|
"local",
|
||||||
self.config["android"]["lib"]
|
self.config["android"]["lib"],
|
||||||
),
|
),
|
||||||
path.join(
|
path.join(
|
||||||
getcwd(),
|
getcwd(),
|
||||||
|
@ -227,27 +190,38 @@ class MachCommands(CommandBase):
|
||||||
"release" if release else "debug",
|
"release" if release else "debug",
|
||||||
"apk",
|
"apk",
|
||||||
"libs",
|
"libs",
|
||||||
self.config["android"]["lib"]
|
self.config["android"]["lib"],
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
env["NDK_PROJECT_PATH"] = path.join(getcwd(), "support", "android", "apk")
|
env["NDK_PROJECT_PATH"] = path.join(getcwd(), "support", "android", "apk")
|
||||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||||
|
|
||||||
with tempfile.NamedTemporaryFile(delete=False) as f:
|
with tempfile.NamedTemporaryFile(delete=False) as f:
|
||||||
f.write('\n'.join([
|
f.write(
|
||||||
|
"\n".join(
|
||||||
|
[
|
||||||
"python",
|
"python",
|
||||||
"param = gdb.parameter('solib-search-path')",
|
"param = gdb.parameter('solib-search-path')",
|
||||||
"param += ':{}'".format(':'.join(sym_paths)),
|
"param += ':{}'".format(":".join(sym_paths)),
|
||||||
"gdb.execute('set solib-search-path ' + param)",
|
"gdb.execute('set solib-search-path ' + param)",
|
||||||
"end",
|
"end",
|
||||||
]))
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
p = subprocess.Popen([
|
p = subprocess.Popen(
|
||||||
|
[
|
||||||
ndk_gdb,
|
ndk_gdb,
|
||||||
"--adb", adb_path,
|
"--adb",
|
||||||
"--project", "support/android/apk/servoapp/src/main/",
|
adb_path,
|
||||||
"--launch", "org.servo.servoshell.MainActivity",
|
"--project",
|
||||||
"-x", f.name,
|
"support/android/apk/servoapp/src/main/",
|
||||||
|
"--launch",
|
||||||
|
"org.servo.servoshell.MainActivity",
|
||||||
|
"-x",
|
||||||
|
f.name,
|
||||||
"--verbose",
|
"--verbose",
|
||||||
], env=env)
|
],
|
||||||
|
env=env,
|
||||||
|
)
|
||||||
return p.wait()
|
return p.wait()
|
||||||
|
|
|
@ -51,10 +51,21 @@ class DevtoolsTests(unittest.IsolatedAsyncioTestCase):
|
||||||
def test_sources_list(self):
|
def test_sources_list(self):
|
||||||
self.start_web_server(test_dir=os.path.join(DevtoolsTests.script_path, "devtools_tests/sources"))
|
self.start_web_server(test_dir=os.path.join(DevtoolsTests.script_path, "devtools_tests/sources"))
|
||||||
self.run_servoshell()
|
self.run_servoshell()
|
||||||
self.assert_sources_list(2, set([
|
self.assert_sources_list(
|
||||||
tuple([f"{self.base_url}/classic.js", f"{self.base_url}/test.html", "https://servo.org/js/load-table.js"]),
|
2,
|
||||||
|
set(
|
||||||
|
[
|
||||||
|
tuple(
|
||||||
|
[
|
||||||
|
f"{self.base_url}/classic.js",
|
||||||
|
f"{self.base_url}/test.html",
|
||||||
|
"https://servo.org/js/load-table.js",
|
||||||
|
]
|
||||||
|
),
|
||||||
tuple([f"{self.base_url}/worker.js"]),
|
tuple([f"{self.base_url}/worker.js"]),
|
||||||
]))
|
]
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def test_sources_list_with_data_no_scripts(self):
|
def test_sources_list_with_data_no_scripts(self):
|
||||||
self.run_servoshell(url="data:text/html,")
|
self.run_servoshell(url="data:text/html,")
|
||||||
|
@ -70,7 +81,7 @@ class DevtoolsTests(unittest.IsolatedAsyncioTestCase):
|
||||||
|
|
||||||
def test_sources_list_with_data_external_classic_script(self):
|
def test_sources_list_with_data_external_classic_script(self):
|
||||||
self.start_web_server(test_dir=os.path.join(DevtoolsTests.script_path, "devtools_tests/sources"))
|
self.start_web_server(test_dir=os.path.join(DevtoolsTests.script_path, "devtools_tests/sources"))
|
||||||
self.run_servoshell(url=f"data:text/html,<script src=\"{self.base_url}/classic.js\"></script>")
|
self.run_servoshell(url=f'data:text/html,<script src="{self.base_url}/classic.js"></script>')
|
||||||
self.assert_sources_list(1, set([tuple([f"{self.base_url}/classic.js"])]))
|
self.assert_sources_list(1, set([tuple([f"{self.base_url}/classic.js"])]))
|
||||||
|
|
||||||
def test_sources_list_with_data_empty_inline_module_script(self):
|
def test_sources_list_with_data_empty_inline_module_script(self):
|
||||||
|
@ -158,7 +169,9 @@ class DevtoolsTests(unittest.IsolatedAsyncioTestCase):
|
||||||
done.set_result(e)
|
done.set_result(e)
|
||||||
|
|
||||||
client.add_event_listener(
|
client.add_event_listener(
|
||||||
watcher.actor_id, Events.Watcher.TARGET_AVAILABLE_FORM, on_target,
|
watcher.actor_id,
|
||||||
|
Events.Watcher.TARGET_AVAILABLE_FORM,
|
||||||
|
on_target,
|
||||||
)
|
)
|
||||||
watcher.watch_targets(WatcherActor.Targets.FRAME)
|
watcher.watch_targets(WatcherActor.Targets.FRAME)
|
||||||
watcher.watch_targets(WatcherActor.Targets.WORKER)
|
watcher.watch_targets(WatcherActor.Targets.WORKER)
|
||||||
|
|
|
@ -15,7 +15,7 @@ from typing import Set
|
||||||
|
|
||||||
# This file is called as a script from components/servo/build.rs, so
|
# This file is called as a script from components/servo/build.rs, so
|
||||||
# we need to explicitly modify the search path here.
|
# we need to explicitly modify the search path here.
|
||||||
sys.path[0:0] = [os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))]
|
sys.path[0:0] = [os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))]
|
||||||
from servo.platform.build_target import BuildTarget # noqa: E402
|
from servo.platform.build_target import BuildTarget # noqa: E402
|
||||||
|
|
||||||
GSTREAMER_BASE_LIBS = [
|
GSTREAMER_BASE_LIBS = [
|
||||||
|
@ -158,18 +158,12 @@ def windows_dlls():
|
||||||
|
|
||||||
|
|
||||||
def windows_plugins():
|
def windows_plugins():
|
||||||
libs = [
|
libs = [*GSTREAMER_PLUGIN_LIBS, *GSTREAMER_WIN_PLUGIN_LIBS]
|
||||||
*GSTREAMER_PLUGIN_LIBS,
|
|
||||||
*GSTREAMER_WIN_PLUGIN_LIBS
|
|
||||||
]
|
|
||||||
return [f"{lib}.dll" for lib in libs]
|
return [f"{lib}.dll" for lib in libs]
|
||||||
|
|
||||||
|
|
||||||
def macos_plugins():
|
def macos_plugins():
|
||||||
plugins = [
|
plugins = [*GSTREAMER_PLUGIN_LIBS, *GSTREAMER_MAC_PLUGIN_LIBS]
|
||||||
*GSTREAMER_PLUGIN_LIBS,
|
|
||||||
*GSTREAMER_MAC_PLUGIN_LIBS
|
|
||||||
]
|
|
||||||
|
|
||||||
return [f"lib{plugin}.dylib" for plugin in plugins]
|
return [f"lib{plugin}.dylib" for plugin in plugins]
|
||||||
|
|
||||||
|
@ -178,22 +172,23 @@ def write_plugin_list(target):
|
||||||
plugins = []
|
plugins = []
|
||||||
if "apple-" in target:
|
if "apple-" in target:
|
||||||
plugins = macos_plugins()
|
plugins = macos_plugins()
|
||||||
elif '-windows-' in target:
|
elif "-windows-" in target:
|
||||||
plugins = windows_plugins()
|
plugins = windows_plugins()
|
||||||
print('''/* This is a generated file. Do not modify. */
|
print(
|
||||||
|
"""/* This is a generated file. Do not modify. */
|
||||||
|
|
||||||
pub(crate) static GSTREAMER_PLUGINS: &[&str] = &[
|
pub(crate) static GSTREAMER_PLUGINS: &[&str] = &[
|
||||||
%s
|
%s
|
||||||
];
|
];
|
||||||
''' % ',\n'.join(map(lambda x: '"' + x + '"', plugins)))
|
"""
|
||||||
|
% ",\n".join(map(lambda x: '"' + x + '"', plugins))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def is_macos_system_library(library_path: str) -> bool:
|
def is_macos_system_library(library_path: str) -> bool:
|
||||||
"""Returns true if if the given dependency line from otool refers to
|
"""Returns true if if the given dependency line from otool refers to
|
||||||
a system library that should not be packaged."""
|
a system library that should not be packaged."""
|
||||||
return (library_path.startswith("/System/Library")
|
return library_path.startswith("/System/Library") or library_path.startswith("/usr/lib") or ".asan." in library_path
|
||||||
or library_path.startswith("/usr/lib")
|
|
||||||
or ".asan." in library_path)
|
|
||||||
|
|
||||||
|
|
||||||
def rewrite_dependencies_to_be_relative(binary: str, dependency_lines: Set[str], relative_path: str):
|
def rewrite_dependencies_to_be_relative(binary: str, dependency_lines: Set[str], relative_path: str):
|
||||||
|
@ -205,7 +200,7 @@ def rewrite_dependencies_to_be_relative(binary: str, dependency_lines: Set[str],
|
||||||
continue
|
continue
|
||||||
|
|
||||||
new_path = os.path.join("@executable_path", relative_path, os.path.basename(dependency_line))
|
new_path = os.path.join("@executable_path", relative_path, os.path.basename(dependency_line))
|
||||||
arguments = ['install_name_tool', '-change', dependency_line, new_path, binary]
|
arguments = ["install_name_tool", "-change", dependency_line, new_path, binary]
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(arguments)
|
subprocess.check_call(arguments)
|
||||||
except subprocess.CalledProcessError as exception:
|
except subprocess.CalledProcessError as exception:
|
||||||
|
@ -220,7 +215,7 @@ def make_rpath_path_absolute(dylib_path_from_otool: str, rpath: str):
|
||||||
|
|
||||||
# Not every dependency is in the same directory as the binary that is references. For
|
# Not every dependency is in the same directory as the binary that is references. For
|
||||||
# instance, plugins dylibs can be found in "gstreamer-1.0".
|
# instance, plugins dylibs can be found in "gstreamer-1.0".
|
||||||
path_relative_to_rpath = dylib_path_from_otool.replace('@rpath/', '')
|
path_relative_to_rpath = dylib_path_from_otool.replace("@rpath/", "")
|
||||||
for relative_directory in ["", "..", "gstreamer-1.0"]:
|
for relative_directory in ["", "..", "gstreamer-1.0"]:
|
||||||
full_path = os.path.join(rpath, relative_directory, path_relative_to_rpath)
|
full_path = os.path.join(rpath, relative_directory, path_relative_to_rpath)
|
||||||
if os.path.exists(full_path):
|
if os.path.exists(full_path):
|
||||||
|
@ -232,13 +227,13 @@ def make_rpath_path_absolute(dylib_path_from_otool: str, rpath: str):
|
||||||
def find_non_system_dependencies_with_otool(binary_path: str) -> Set[str]:
|
def find_non_system_dependencies_with_otool(binary_path: str) -> Set[str]:
|
||||||
"""Given a binary path, find all dylib dependency lines that do not refer to
|
"""Given a binary path, find all dylib dependency lines that do not refer to
|
||||||
system libraries."""
|
system libraries."""
|
||||||
process = subprocess.Popen(['/usr/bin/otool', '-L', binary_path], stdout=subprocess.PIPE)
|
process = subprocess.Popen(["/usr/bin/otool", "-L", binary_path], stdout=subprocess.PIPE)
|
||||||
output = set()
|
output = set()
|
||||||
|
|
||||||
for line in map(lambda line: line.decode('utf8'), process.stdout):
|
for line in map(lambda line: line.decode("utf8"), process.stdout):
|
||||||
if not line.startswith("\t"):
|
if not line.startswith("\t"):
|
||||||
continue
|
continue
|
||||||
dependency = line.split(' ', 1)[0][1:]
|
dependency = line.split(" ", 1)[0][1:]
|
||||||
|
|
||||||
# No need to do any processing for system libraries. They should be
|
# No need to do any processing for system libraries. They should be
|
||||||
# present on all macOS systems.
|
# present on all macOS systems.
|
||||||
|
@ -288,8 +283,7 @@ def package_gstreamer_dylibs(binary_path: str, library_target_directory: str, ta
|
||||||
# which are loaded dynmically at runtime and don't appear in `otool` output.
|
# which are loaded dynmically at runtime and don't appear in `otool` output.
|
||||||
binary_dependencies = set(find_non_system_dependencies_with_otool(binary_path))
|
binary_dependencies = set(find_non_system_dependencies_with_otool(binary_path))
|
||||||
binary_dependencies.update(
|
binary_dependencies.update(
|
||||||
[os.path.join(gstreamer_root_libs, "gstreamer-1.0", plugin)
|
[os.path.join(gstreamer_root_libs, "gstreamer-1.0", plugin) for plugin in macos_plugins()]
|
||||||
for plugin in macos_plugins()]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
rewrite_dependencies_to_be_relative(binary_path, binary_dependencies, relative_path)
|
rewrite_dependencies_to_be_relative(binary_path, binary_dependencies, relative_path)
|
||||||
|
|
|
@ -15,12 +15,7 @@ import test
|
||||||
import logging
|
import logging
|
||||||
import random
|
import random
|
||||||
|
|
||||||
test_summary = {
|
test_summary = {test.Status.KILLED: 0, test.Status.SURVIVED: 0, test.Status.SKIPPED: 0, test.Status.UNEXPECTED: 0}
|
||||||
test.Status.KILLED: 0,
|
|
||||||
test.Status.SURVIVED: 0,
|
|
||||||
test.Status.SKIPPED: 0,
|
|
||||||
test.Status.UNEXPECTED: 0
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_folders_list(path):
|
def get_folders_list(path):
|
||||||
|
@ -33,7 +28,7 @@ def get_folders_list(path):
|
||||||
|
|
||||||
|
|
||||||
def mutation_test_for(mutation_path):
|
def mutation_test_for(mutation_path):
|
||||||
test_mapping_file = join(mutation_path, 'test_mapping.json')
|
test_mapping_file = join(mutation_path, "test_mapping.json")
|
||||||
if isfile(test_mapping_file):
|
if isfile(test_mapping_file):
|
||||||
json_data = open(test_mapping_file).read()
|
json_data = open(test_mapping_file).read()
|
||||||
test_mapping = json.loads(json_data)
|
test_mapping = json.loads(json_data)
|
||||||
|
@ -41,7 +36,7 @@ def mutation_test_for(mutation_path):
|
||||||
source_files = list(test_mapping.keys())
|
source_files = list(test_mapping.keys())
|
||||||
random.shuffle(source_files)
|
random.shuffle(source_files)
|
||||||
for src_file in source_files:
|
for src_file in source_files:
|
||||||
status = test.mutation_test(join(mutation_path, src_file.encode('utf-8')), test_mapping[src_file])
|
status = test.mutation_test(join(mutation_path, src_file.encode("utf-8")), test_mapping[src_file])
|
||||||
test_summary[status] += 1
|
test_summary[status] += 1
|
||||||
# Run mutation test in all folder in the path.
|
# Run mutation test in all folder in the path.
|
||||||
for folder in get_folders_list(mutation_path):
|
for folder in get_folders_list(mutation_path):
|
||||||
|
|
|
@ -39,7 +39,7 @@ class Strategy:
|
||||||
def mutate(self, file_name):
|
def mutate(self, file_name):
|
||||||
line_numbers = []
|
line_numbers = []
|
||||||
for line in fileinput.input(file_name):
|
for line in fileinput.input(file_name):
|
||||||
if not is_comment(line) and re.search(self._replace_strategy['regex'], line):
|
if not is_comment(line) and re.search(self._replace_strategy["regex"], line):
|
||||||
line_numbers.append(fileinput.lineno())
|
line_numbers.append(fileinput.lineno())
|
||||||
if len(line_numbers) == 0:
|
if len(line_numbers) == 0:
|
||||||
return -1
|
return -1
|
||||||
|
@ -47,7 +47,7 @@ class Strategy:
|
||||||
mutation_line_number = line_numbers[random.randint(0, len(line_numbers) - 1)]
|
mutation_line_number = line_numbers[random.randint(0, len(line_numbers) - 1)]
|
||||||
for line in fileinput.input(file_name, inplace=True):
|
for line in fileinput.input(file_name, inplace=True):
|
||||||
if fileinput.lineno() == mutation_line_number:
|
if fileinput.lineno() == mutation_line_number:
|
||||||
line = re.sub(self._replace_strategy['regex'], self._replace_strategy['replaceString'], line)
|
line = re.sub(self._replace_strategy["regex"], self._replace_strategy["replaceString"], line)
|
||||||
print(line.rstrip())
|
print(line.rstrip())
|
||||||
return mutation_line_number
|
return mutation_line_number
|
||||||
|
|
||||||
|
@ -56,30 +56,21 @@ class AndOr(Strategy):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
Strategy.__init__(self)
|
Strategy.__init__(self)
|
||||||
logical_and = r"(?<=\s)&&(?=\s)"
|
logical_and = r"(?<=\s)&&(?=\s)"
|
||||||
self._replace_strategy = {
|
self._replace_strategy = {"regex": logical_and, "replaceString": "||"}
|
||||||
'regex': logical_and,
|
|
||||||
'replaceString': '||'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class IfTrue(Strategy):
|
class IfTrue(Strategy):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
Strategy.__init__(self)
|
Strategy.__init__(self)
|
||||||
if_condition = r"(?<=if\s)\s*(?!let\s)(.*)(?=\s\{)"
|
if_condition = r"(?<=if\s)\s*(?!let\s)(.*)(?=\s\{)"
|
||||||
self._replace_strategy = {
|
self._replace_strategy = {"regex": if_condition, "replaceString": "true"}
|
||||||
'regex': if_condition,
|
|
||||||
'replaceString': 'true'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class IfFalse(Strategy):
|
class IfFalse(Strategy):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
Strategy.__init__(self)
|
Strategy.__init__(self)
|
||||||
if_condition = r"(?<=if\s)\s*(?!let\s)(.*)(?=\s\{)"
|
if_condition = r"(?<=if\s)\s*(?!let\s)(.*)(?=\s\{)"
|
||||||
self._replace_strategy = {
|
self._replace_strategy = {"regex": if_condition, "replaceString": "false"}
|
||||||
'regex': if_condition,
|
|
||||||
'replaceString': 'false'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class ModifyComparision(Strategy):
|
class ModifyComparision(Strategy):
|
||||||
|
@ -87,10 +78,7 @@ class ModifyComparision(Strategy):
|
||||||
Strategy.__init__(self)
|
Strategy.__init__(self)
|
||||||
less_than_equals = r"(?<=\s)(\<)\=(?=\s)"
|
less_than_equals = r"(?<=\s)(\<)\=(?=\s)"
|
||||||
greater_than_equals = r"(?<=\s)(\<)\=(?=\s)"
|
greater_than_equals = r"(?<=\s)(\<)\=(?=\s)"
|
||||||
self._replace_strategy = {
|
self._replace_strategy = {"regex": (less_than_equals + "|" + greater_than_equals), "replaceString": r"\1"}
|
||||||
'regex': (less_than_equals + '|' + greater_than_equals),
|
|
||||||
'replaceString': r"\1"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class MinusToPlus(Strategy):
|
class MinusToPlus(Strategy):
|
||||||
|
@ -98,10 +86,7 @@ class MinusToPlus(Strategy):
|
||||||
Strategy.__init__(self)
|
Strategy.__init__(self)
|
||||||
arithmetic_minus = r"(?<=\s)\-(?=\s.+)"
|
arithmetic_minus = r"(?<=\s)\-(?=\s.+)"
|
||||||
minus_in_shorthand = r"(?<=\s)\-(?=\=)"
|
minus_in_shorthand = r"(?<=\s)\-(?=\=)"
|
||||||
self._replace_strategy = {
|
self._replace_strategy = {"regex": (arithmetic_minus + "|" + minus_in_shorthand), "replaceString": "+"}
|
||||||
'regex': (arithmetic_minus + '|' + minus_in_shorthand),
|
|
||||||
'replaceString': '+'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class PlusToMinus(Strategy):
|
class PlusToMinus(Strategy):
|
||||||
|
@ -109,20 +94,14 @@ class PlusToMinus(Strategy):
|
||||||
Strategy.__init__(self)
|
Strategy.__init__(self)
|
||||||
arithmetic_plus = r"(?<=[^\"]\s)\+(?=\s[^A-Z\'?\":\{]+)"
|
arithmetic_plus = r"(?<=[^\"]\s)\+(?=\s[^A-Z\'?\":\{]+)"
|
||||||
plus_in_shorthand = r"(?<=\s)\+(?=\=)"
|
plus_in_shorthand = r"(?<=\s)\+(?=\=)"
|
||||||
self._replace_strategy = {
|
self._replace_strategy = {"regex": (arithmetic_plus + "|" + plus_in_shorthand), "replaceString": "-"}
|
||||||
'regex': (arithmetic_plus + '|' + plus_in_shorthand),
|
|
||||||
'replaceString': '-'
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class AtomicString(Strategy):
|
class AtomicString(Strategy):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
Strategy.__init__(self)
|
Strategy.__init__(self)
|
||||||
string_literal = r"(?<=\").+(?=\")"
|
string_literal = r"(?<=\").+(?=\")"
|
||||||
self._replace_strategy = {
|
self._replace_strategy = {"regex": string_literal, "replaceString": " "}
|
||||||
'regex': string_literal,
|
|
||||||
'replaceString': ' '
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class DuplicateLine(Strategy):
|
class DuplicateLine(Strategy):
|
||||||
|
@ -136,9 +115,20 @@ class DuplicateLine(Strategy):
|
||||||
plus_equals_statement = r".+?\s\+\=\s.*"
|
plus_equals_statement = r".+?\s\+\=\s.*"
|
||||||
minus_equals_statement = r".+?\s\-\=\s.*"
|
minus_equals_statement = r".+?\s\-\=\s.*"
|
||||||
self._replace_strategy = {
|
self._replace_strategy = {
|
||||||
'regex': (append_statement + '|' + remove_statement + '|' + push_statement
|
"regex": (
|
||||||
+ '|' + pop_statement + '|' + plus_equals_statement + '|' + minus_equals_statement),
|
append_statement
|
||||||
'replaceString': r"\g<0>\n\g<0>",
|
+ "|"
|
||||||
|
+ remove_statement
|
||||||
|
+ "|"
|
||||||
|
+ push_statement
|
||||||
|
+ "|"
|
||||||
|
+ pop_statement
|
||||||
|
+ "|"
|
||||||
|
+ plus_equals_statement
|
||||||
|
+ "|"
|
||||||
|
+ minus_equals_statement
|
||||||
|
),
|
||||||
|
"replaceString": r"\g<0>\n\g<0>",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -161,14 +151,17 @@ class DeleteIfBlock(Strategy):
|
||||||
while line_to_mutate <= len(code_lines):
|
while line_to_mutate <= len(code_lines):
|
||||||
current_line = code_lines[line_to_mutate - 1]
|
current_line = code_lines[line_to_mutate - 1]
|
||||||
next_line = code_lines[line_to_mutate]
|
next_line = code_lines[line_to_mutate]
|
||||||
if re.search(self.else_block, current_line) is not None \
|
if (
|
||||||
or re.search(self.else_block, next_line) is not None:
|
re.search(self.else_block, current_line) is not None
|
||||||
|
or re.search(self.else_block, next_line) is not None
|
||||||
|
):
|
||||||
if_blocks.pop(random_index)
|
if_blocks.pop(random_index)
|
||||||
if len(if_blocks) == 0:
|
if len(if_blocks) == 0:
|
||||||
return -1
|
return -1
|
||||||
else:
|
else:
|
||||||
random_index, start_counter, end_counter, lines_to_delete, line_to_mutate = \
|
random_index, start_counter, end_counter, lines_to_delete, line_to_mutate = init_variables(
|
||||||
init_variables(if_blocks)
|
if_blocks
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
lines_to_delete.append(line_to_mutate)
|
lines_to_delete.append(line_to_mutate)
|
||||||
for ch in current_line:
|
for ch in current_line:
|
||||||
|
@ -183,8 +176,17 @@ class DeleteIfBlock(Strategy):
|
||||||
|
|
||||||
|
|
||||||
def get_strategies():
|
def get_strategies():
|
||||||
return AndOr, IfTrue, IfFalse, ModifyComparision, PlusToMinus, MinusToPlus, \
|
return (
|
||||||
AtomicString, DuplicateLine, DeleteIfBlock
|
AndOr,
|
||||||
|
IfTrue,
|
||||||
|
IfFalse,
|
||||||
|
ModifyComparision,
|
||||||
|
PlusToMinus,
|
||||||
|
MinusToPlus,
|
||||||
|
AtomicString,
|
||||||
|
DuplicateLine,
|
||||||
|
DeleteIfBlock,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Mutator:
|
class Mutator:
|
||||||
|
|
|
@ -14,7 +14,8 @@ import logging
|
||||||
|
|
||||||
from mutator import Mutator, get_strategies
|
from mutator import Mutator, get_strategies
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
DEVNULL = open(os.devnull, 'wb')
|
|
||||||
|
DEVNULL = open(os.devnull, "wb")
|
||||||
|
|
||||||
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s", level=logging.DEBUG)
|
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s", level=logging.DEBUG)
|
||||||
|
|
||||||
|
@ -28,7 +29,7 @@ class Status(Enum):
|
||||||
|
|
||||||
def mutation_test(file_name, tests):
|
def mutation_test(file_name, tests):
|
||||||
status = Status.UNEXPECTED
|
status = Status.UNEXPECTED
|
||||||
local_changes_present = subprocess.call('git diff --quiet {0}'.format(file_name), shell=True)
|
local_changes_present = subprocess.call("git diff --quiet {0}".format(file_name), shell=True)
|
||||||
if local_changes_present == 1:
|
if local_changes_present == 1:
|
||||||
status = Status.SKIPPED
|
status = Status.SKIPPED
|
||||||
logging.warning("{0} has local changes, please commit/remove changes before running the test".format(file_name))
|
logging.warning("{0} has local changes, please commit/remove changes before running the test".format(file_name))
|
||||||
|
@ -46,24 +47,24 @@ def mutation_test(file_name, tests):
|
||||||
if subprocess.call(test_command, shell=True, stdout=DEVNULL):
|
if subprocess.call(test_command, shell=True, stdout=DEVNULL):
|
||||||
logging.error("Compilation Failed: Unexpected error")
|
logging.error("Compilation Failed: Unexpected error")
|
||||||
logging.error("Failed: while running `{0}`".format(test_command))
|
logging.error("Failed: while running `{0}`".format(test_command))
|
||||||
subprocess.call('git --no-pager diff {0}'.format(file_name), shell=True)
|
subprocess.call("git --no-pager diff {0}".format(file_name), shell=True)
|
||||||
status = Status.UNEXPECTED
|
status = Status.UNEXPECTED
|
||||||
else:
|
else:
|
||||||
for test in tests:
|
for test in tests:
|
||||||
test_command = "python mach test-wpt {0} --release".format(test.encode('utf-8'))
|
test_command = "python mach test-wpt {0} --release".format(test.encode("utf-8"))
|
||||||
logging.info("running `{0}` test for mutant {1}:{2}".format(test, file_name, mutated_line))
|
logging.info("running `{0}` test for mutant {1}:{2}".format(test, file_name, mutated_line))
|
||||||
test_status = subprocess.call(test_command, shell=True, stdout=DEVNULL)
|
test_status = subprocess.call(test_command, shell=True, stdout=DEVNULL)
|
||||||
if test_status != 0:
|
if test_status != 0:
|
||||||
logging.error("Failed: while running `{0}`".format(test_command))
|
logging.error("Failed: while running `{0}`".format(test_command))
|
||||||
logging.error("mutated file {0} diff".format(file_name))
|
logging.error("mutated file {0} diff".format(file_name))
|
||||||
subprocess.call('git --no-pager diff {0}'.format(file_name), shell=True)
|
subprocess.call("git --no-pager diff {0}".format(file_name), shell=True)
|
||||||
status = Status.SURVIVED
|
status = Status.SURVIVED
|
||||||
else:
|
else:
|
||||||
logging.info("Success: Mutation killed by {0}".format(test.encode('utf-8')))
|
logging.info("Success: Mutation killed by {0}".format(test.encode("utf-8")))
|
||||||
status = Status.KILLED
|
status = Status.KILLED
|
||||||
break
|
break
|
||||||
logging.info("reverting mutant {0}:{1}\n".format(file_name, mutated_line))
|
logging.info("reverting mutant {0}:{1}\n".format(file_name, mutated_line))
|
||||||
subprocess.call('git checkout {0}'.format(file_name), shell=True)
|
subprocess.call("git checkout {0}".format(file_name), shell=True)
|
||||||
break
|
break
|
||||||
elif not len(strategies):
|
elif not len(strategies):
|
||||||
# All strategies are tried
|
# All strategies are tried
|
||||||
|
|
|
@ -42,23 +42,25 @@ from servo.command_base import (
|
||||||
from servo.util import delete, get_target_dir
|
from servo.util import delete, get_target_dir
|
||||||
|
|
||||||
PACKAGES = {
|
PACKAGES = {
|
||||||
'android': [
|
"android": [
|
||||||
'android/aarch64-linux-android/release/servoapp.apk',
|
"android/aarch64-linux-android/release/servoapp.apk",
|
||||||
'android/aarch64-linux-android/release/servoview.aar',
|
"android/aarch64-linux-android/release/servoview.aar",
|
||||||
],
|
],
|
||||||
'linux': [
|
"linux": [
|
||||||
'production/servo-tech-demo.tar.gz',
|
"production/servo-tech-demo.tar.gz",
|
||||||
],
|
],
|
||||||
'mac': [
|
"mac": [
|
||||||
'production/servo-tech-demo.dmg',
|
"production/servo-tech-demo.dmg",
|
||||||
],
|
],
|
||||||
'windows-msvc': [
|
"windows-msvc": [
|
||||||
r'production\msi\Servo.exe',
|
r"production\msi\Servo.exe",
|
||||||
r'production\msi\Servo.zip',
|
r"production\msi\Servo.zip",
|
||||||
],
|
],
|
||||||
'ohos': [
|
"ohos": [
|
||||||
('openharmony/aarch64-unknown-linux-ohos/release/entry/build/'
|
(
|
||||||
'default/outputs/default/servoshell-default-signed.hap')
|
"openharmony/aarch64-unknown-linux-ohos/release/entry/build/"
|
||||||
|
"default/outputs/default/servoshell-default-signed.hap"
|
||||||
|
)
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,8 +73,7 @@ def packages_for_platform(platform):
|
||||||
|
|
||||||
|
|
||||||
def listfiles(directory):
|
def listfiles(directory):
|
||||||
return [f for f in os.listdir(directory)
|
return [f for f in os.listdir(directory) if path.isfile(path.join(directory, f))]
|
||||||
if path.isfile(path.join(directory, f))]
|
|
||||||
|
|
||||||
|
|
||||||
def copy_windows_dependencies(binary_path, destination):
|
def copy_windows_dependencies(binary_path, destination):
|
||||||
|
@ -101,20 +102,10 @@ def check_call_with_randomized_backoff(args: List[str], retries: int) -> int:
|
||||||
|
|
||||||
@CommandProvider
|
@CommandProvider
|
||||||
class PackageCommands(CommandBase):
|
class PackageCommands(CommandBase):
|
||||||
@Command('package',
|
@Command("package", description="Package Servo", category="package")
|
||||||
description='Package Servo',
|
@CommandArgument("--android", default=None, action="store_true", help="Package Android")
|
||||||
category='package')
|
@CommandArgument("--ohos", default=None, action="store_true", help="Package OpenHarmony")
|
||||||
@CommandArgument('--android',
|
@CommandArgument("--target", "-t", default=None, help="Package for given target platform")
|
||||||
default=None,
|
|
||||||
action='store_true',
|
|
||||||
help='Package Android')
|
|
||||||
@CommandArgument('--ohos',
|
|
||||||
default=None,
|
|
||||||
action='store_true',
|
|
||||||
help='Package OpenHarmony')
|
|
||||||
@CommandArgument('--target', '-t',
|
|
||||||
default=None,
|
|
||||||
help='Package for given target platform')
|
|
||||||
@CommandBase.common_command_arguments(build_configuration=False, build_type=True, package_configuration=True)
|
@CommandBase.common_command_arguments(build_configuration=False, build_type=True, package_configuration=True)
|
||||||
@CommandBase.allow_target_configuration
|
@CommandBase.allow_target_configuration
|
||||||
def package(self, build_type: BuildType, flavor=None, with_asan=False):
|
def package(self, build_type: BuildType, flavor=None, with_asan=False):
|
||||||
|
@ -146,11 +137,11 @@ class PackageCommands(CommandBase):
|
||||||
if flavor is not None:
|
if flavor is not None:
|
||||||
flavor_name = flavor.title()
|
flavor_name = flavor.title()
|
||||||
|
|
||||||
dir_to_resources = path.join(self.get_top_dir(), 'target', 'android', 'resources')
|
dir_to_resources = path.join(self.get_top_dir(), "target", "android", "resources")
|
||||||
if path.exists(dir_to_resources):
|
if path.exists(dir_to_resources):
|
||||||
delete(dir_to_resources)
|
delete(dir_to_resources)
|
||||||
|
|
||||||
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
|
shutil.copytree(path.join(dir_to_root, "resources"), dir_to_resources)
|
||||||
|
|
||||||
variant = ":assemble" + flavor_name + arch_string + build_type_string
|
variant = ":assemble" + flavor_name + arch_string + build_type_string
|
||||||
apk_task_name = ":servoapp" + variant
|
apk_task_name = ":servoapp" + variant
|
||||||
|
@ -167,8 +158,7 @@ class PackageCommands(CommandBase):
|
||||||
# so copy the source files into the target/openharmony directory first.
|
# so copy the source files into the target/openharmony directory first.
|
||||||
ohos_app_dir = path.join(self.get_top_dir(), "support", "openharmony")
|
ohos_app_dir = path.join(self.get_top_dir(), "support", "openharmony")
|
||||||
build_mode = build_type.directory_name()
|
build_mode = build_type.directory_name()
|
||||||
ohos_target_dir = path.join(
|
ohos_target_dir = path.join(self.get_top_dir(), "target", "openharmony", self.target.triple(), build_mode)
|
||||||
self.get_top_dir(), "target", "openharmony", self.target.triple(), build_mode)
|
|
||||||
if path.exists(ohos_target_dir):
|
if path.exists(ohos_target_dir):
|
||||||
print("Cleaning up from previous packaging")
|
print("Cleaning up from previous packaging")
|
||||||
delete(ohos_target_dir)
|
delete(ohos_target_dir)
|
||||||
|
@ -186,9 +176,14 @@ class PackageCommands(CommandBase):
|
||||||
if flavor is not None:
|
if flavor is not None:
|
||||||
flavor_name = flavor
|
flavor_name = flavor
|
||||||
|
|
||||||
hvigor_command = ["--no-daemon", "assembleHap",
|
hvigor_command = [
|
||||||
"-p", f"product={flavor_name}",
|
"--no-daemon",
|
||||||
"-p", f"buildMode={build_mode}"]
|
"assembleHap",
|
||||||
|
"-p",
|
||||||
|
f"product={flavor_name}",
|
||||||
|
"-p",
|
||||||
|
f"buildMode={build_mode}",
|
||||||
|
]
|
||||||
# Detect if PATH already has hvigor, or else fallback to npm installation
|
# Detect if PATH already has hvigor, or else fallback to npm installation
|
||||||
# provided via HVIGOR_PATH
|
# provided via HVIGOR_PATH
|
||||||
if "HVIGOR_PATH" not in env:
|
if "HVIGOR_PATH" not in env:
|
||||||
|
@ -198,9 +193,11 @@ class PackageCommands(CommandBase):
|
||||||
print(f"Found `hvigorw` with version {str(version, 'utf-8').strip()} in system PATH")
|
print(f"Found `hvigorw` with version {str(version, 'utf-8').strip()} in system PATH")
|
||||||
hvigor_command[0:0] = ["hvigorw"]
|
hvigor_command[0:0] = ["hvigorw"]
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
print("Unable to find `hvigor` tool. Please either modify PATH to include the"
|
print(
|
||||||
|
"Unable to find `hvigor` tool. Please either modify PATH to include the"
|
||||||
"path to hvigorw or set the HVIGOR_PATH environment variable to the npm"
|
"path to hvigorw or set the HVIGOR_PATH environment variable to the npm"
|
||||||
"installation containing `node_modules` directory with hvigor modules.")
|
"installation containing `node_modules` directory with hvigor modules."
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print(f"hvigor exited with the following error: {e}")
|
print(f"hvigor exited with the following error: {e}")
|
||||||
|
@ -227,21 +224,21 @@ class PackageCommands(CommandBase):
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("Packaging OpenHarmony exited with return value %d" % e.returncode)
|
print("Packaging OpenHarmony exited with return value %d" % e.returncode)
|
||||||
return e.returncode
|
return e.returncode
|
||||||
elif 'darwin' in self.target.triple():
|
elif "darwin" in self.target.triple():
|
||||||
print("Creating Servo.app")
|
print("Creating Servo.app")
|
||||||
dir_to_dmg = path.join(target_dir, 'dmg')
|
dir_to_dmg = path.join(target_dir, "dmg")
|
||||||
dir_to_app = path.join(dir_to_dmg, 'Servo.app')
|
dir_to_app = path.join(dir_to_dmg, "Servo.app")
|
||||||
dir_to_resources = path.join(dir_to_app, 'Contents', 'Resources')
|
dir_to_resources = path.join(dir_to_app, "Contents", "Resources")
|
||||||
if path.exists(dir_to_dmg):
|
if path.exists(dir_to_dmg):
|
||||||
print("Cleaning up from previous packaging")
|
print("Cleaning up from previous packaging")
|
||||||
delete(dir_to_dmg)
|
delete(dir_to_dmg)
|
||||||
|
|
||||||
print("Copying files")
|
print("Copying files")
|
||||||
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
|
shutil.copytree(path.join(dir_to_root, "resources"), dir_to_resources)
|
||||||
shutil.copy2(path.join(dir_to_root, 'Info.plist'), path.join(dir_to_app, 'Contents', 'Info.plist'))
|
shutil.copy2(path.join(dir_to_root, "Info.plist"), path.join(dir_to_app, "Contents", "Info.plist"))
|
||||||
|
|
||||||
content_dir = path.join(dir_to_app, 'Contents', 'MacOS')
|
content_dir = path.join(dir_to_app, "Contents", "MacOS")
|
||||||
lib_dir = path.join(content_dir, 'lib')
|
lib_dir = path.join(content_dir, "lib")
|
||||||
os.makedirs(lib_dir)
|
os.makedirs(lib_dir)
|
||||||
shutil.copy2(binary_path, content_dir)
|
shutil.copy2(binary_path, content_dir)
|
||||||
|
|
||||||
|
@ -250,19 +247,19 @@ class PackageCommands(CommandBase):
|
||||||
servo.gstreamer.package_gstreamer_dylibs(dmg_binary, lib_dir, self.target)
|
servo.gstreamer.package_gstreamer_dylibs(dmg_binary, lib_dir, self.target)
|
||||||
|
|
||||||
print("Adding version to Credits.rtf")
|
print("Adding version to Credits.rtf")
|
||||||
version_command = [binary_path, '--version']
|
version_command = [binary_path, "--version"]
|
||||||
p = subprocess.Popen(version_command,
|
p = subprocess.Popen(
|
||||||
stdout=subprocess.PIPE,
|
version_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True
|
||||||
stderr=subprocess.PIPE,
|
)
|
||||||
universal_newlines=True)
|
|
||||||
version, stderr = p.communicate()
|
version, stderr = p.communicate()
|
||||||
if p.returncode != 0:
|
if p.returncode != 0:
|
||||||
raise Exception("Error occurred when getting Servo version: " + stderr)
|
raise Exception("Error occurred when getting Servo version: " + stderr)
|
||||||
version = "Nightly version: " + version
|
version = "Nightly version: " + version
|
||||||
|
|
||||||
import mako.template
|
import mako.template
|
||||||
template_path = path.join(dir_to_resources, 'Credits.rtf.mako')
|
|
||||||
credits_path = path.join(dir_to_resources, 'Credits.rtf')
|
template_path = path.join(dir_to_resources, "Credits.rtf.mako")
|
||||||
|
credits_path = path.join(dir_to_resources, "Credits.rtf")
|
||||||
with open(template_path) as template_file:
|
with open(template_path) as template_file:
|
||||||
template = mako.template.Template(template_file.read())
|
template = mako.template.Template(template_file.read())
|
||||||
with open(credits_path, "w") as credits_file:
|
with open(credits_path, "w") as credits_file:
|
||||||
|
@ -270,7 +267,7 @@ class PackageCommands(CommandBase):
|
||||||
delete(template_path)
|
delete(template_path)
|
||||||
|
|
||||||
print("Creating dmg")
|
print("Creating dmg")
|
||||||
os.symlink('/Applications', path.join(dir_to_dmg, 'Applications'))
|
os.symlink("/Applications", path.join(dir_to_dmg, "Applications"))
|
||||||
dmg_path = path.join(target_dir, "servo-tech-demo.dmg")
|
dmg_path = path.join(target_dir, "servo-tech-demo.dmg")
|
||||||
|
|
||||||
if path.exists(dmg_path):
|
if path.exists(dmg_path):
|
||||||
|
@ -282,10 +279,9 @@ class PackageCommands(CommandBase):
|
||||||
# after a random wait.
|
# after a random wait.
|
||||||
try:
|
try:
|
||||||
check_call_with_randomized_backoff(
|
check_call_with_randomized_backoff(
|
||||||
['hdiutil', 'create', '-volname', 'Servo',
|
["hdiutil", "create", "-volname", "Servo", "-megabytes", "900", dmg_path, "-srcfolder", dir_to_dmg],
|
||||||
'-megabytes', '900', dmg_path,
|
retries=3,
|
||||||
'-srcfolder', dir_to_dmg],
|
)
|
||||||
retries=3)
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("Packaging MacOS dmg exited with return value %d" % e.returncode)
|
print("Packaging MacOS dmg exited with return value %d" % e.returncode)
|
||||||
return e.returncode
|
return e.returncode
|
||||||
|
@ -294,42 +290,42 @@ class PackageCommands(CommandBase):
|
||||||
delete(dir_to_dmg)
|
delete(dir_to_dmg)
|
||||||
print("Packaged Servo into " + dmg_path)
|
print("Packaged Servo into " + dmg_path)
|
||||||
|
|
||||||
elif 'windows' in self.target.triple():
|
elif "windows" in self.target.triple():
|
||||||
dir_to_msi = path.join(target_dir, 'msi')
|
dir_to_msi = path.join(target_dir, "msi")
|
||||||
if path.exists(dir_to_msi):
|
if path.exists(dir_to_msi):
|
||||||
print("Cleaning up from previous packaging")
|
print("Cleaning up from previous packaging")
|
||||||
delete(dir_to_msi)
|
delete(dir_to_msi)
|
||||||
os.makedirs(dir_to_msi)
|
os.makedirs(dir_to_msi)
|
||||||
|
|
||||||
print("Copying files")
|
print("Copying files")
|
||||||
dir_to_temp = path.join(dir_to_msi, 'temp')
|
dir_to_temp = path.join(dir_to_msi, "temp")
|
||||||
dir_to_resources = path.join(dir_to_temp, 'resources')
|
dir_to_resources = path.join(dir_to_temp, "resources")
|
||||||
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
|
shutil.copytree(path.join(dir_to_root, "resources"), dir_to_resources)
|
||||||
shutil.copy(binary_path, dir_to_temp)
|
shutil.copy(binary_path, dir_to_temp)
|
||||||
copy_windows_dependencies(target_dir, dir_to_temp)
|
copy_windows_dependencies(target_dir, dir_to_temp)
|
||||||
|
|
||||||
# generate Servo.wxs
|
# generate Servo.wxs
|
||||||
import mako.template
|
import mako.template
|
||||||
|
|
||||||
template_path = path.join(dir_to_root, "support", "windows", "Servo.wxs.mako")
|
template_path = path.join(dir_to_root, "support", "windows", "Servo.wxs.mako")
|
||||||
template = mako.template.Template(open(template_path).read())
|
template = mako.template.Template(open(template_path).read())
|
||||||
wxs_path = path.join(dir_to_msi, "Installer.wxs")
|
wxs_path = path.join(dir_to_msi, "Installer.wxs")
|
||||||
open(wxs_path, "w").write(template.render(
|
open(wxs_path, "w").write(
|
||||||
exe_path=target_dir,
|
template.render(exe_path=target_dir, dir_to_temp=dir_to_temp, resources_path=dir_to_resources)
|
||||||
dir_to_temp=dir_to_temp,
|
)
|
||||||
resources_path=dir_to_resources))
|
|
||||||
|
|
||||||
# run candle and light
|
# run candle and light
|
||||||
print("Creating MSI")
|
print("Creating MSI")
|
||||||
try:
|
try:
|
||||||
with cd(dir_to_msi):
|
with cd(dir_to_msi):
|
||||||
subprocess.check_call(['candle', wxs_path])
|
subprocess.check_call(["candle", wxs_path])
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("WiX candle exited with return value %d" % e.returncode)
|
print("WiX candle exited with return value %d" % e.returncode)
|
||||||
return e.returncode
|
return e.returncode
|
||||||
try:
|
try:
|
||||||
wxsobj_path = "{}.wixobj".format(path.splitext(wxs_path)[0])
|
wxsobj_path = "{}.wixobj".format(path.splitext(wxs_path)[0])
|
||||||
with cd(dir_to_msi):
|
with cd(dir_to_msi):
|
||||||
subprocess.check_call(['light', wxsobj_path])
|
subprocess.check_call(["light", wxsobj_path])
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("WiX light exited with return value %d" % e.returncode)
|
print("WiX light exited with return value %d" % e.returncode)
|
||||||
return e.returncode
|
return e.returncode
|
||||||
|
@ -338,18 +334,18 @@ class PackageCommands(CommandBase):
|
||||||
|
|
||||||
# Generate bundle with Servo installer.
|
# Generate bundle with Servo installer.
|
||||||
print("Creating bundle")
|
print("Creating bundle")
|
||||||
shutil.copy(path.join(dir_to_root, 'support', 'windows', 'Servo.wxs'), dir_to_msi)
|
shutil.copy(path.join(dir_to_root, "support", "windows", "Servo.wxs"), dir_to_msi)
|
||||||
bundle_wxs_path = path.join(dir_to_msi, 'Servo.wxs')
|
bundle_wxs_path = path.join(dir_to_msi, "Servo.wxs")
|
||||||
try:
|
try:
|
||||||
with cd(dir_to_msi):
|
with cd(dir_to_msi):
|
||||||
subprocess.check_call(['candle', bundle_wxs_path, '-ext', 'WixBalExtension'])
|
subprocess.check_call(["candle", bundle_wxs_path, "-ext", "WixBalExtension"])
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("WiX candle exited with return value %d" % e.returncode)
|
print("WiX candle exited with return value %d" % e.returncode)
|
||||||
return e.returncode
|
return e.returncode
|
||||||
try:
|
try:
|
||||||
wxsobj_path = "{}.wixobj".format(path.splitext(bundle_wxs_path)[0])
|
wxsobj_path = "{}.wixobj".format(path.splitext(bundle_wxs_path)[0])
|
||||||
with cd(dir_to_msi):
|
with cd(dir_to_msi):
|
||||||
subprocess.check_call(['light', wxsobj_path, '-ext', 'WixBalExtension'])
|
subprocess.check_call(["light", wxsobj_path, "-ext", "WixBalExtension"])
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("WiX light exited with return value %d" % e.returncode)
|
print("WiX light exited with return value %d" % e.returncode)
|
||||||
return e.returncode
|
return e.returncode
|
||||||
|
@ -357,51 +353,39 @@ class PackageCommands(CommandBase):
|
||||||
|
|
||||||
print("Creating ZIP")
|
print("Creating ZIP")
|
||||||
zip_path = path.join(dir_to_msi, "Servo.zip")
|
zip_path = path.join(dir_to_msi, "Servo.zip")
|
||||||
archive_deterministically(dir_to_temp, zip_path, prepend_path='servo/')
|
archive_deterministically(dir_to_temp, zip_path, prepend_path="servo/")
|
||||||
print("Packaged Servo into " + zip_path)
|
print("Packaged Servo into " + zip_path)
|
||||||
|
|
||||||
print("Cleaning up")
|
print("Cleaning up")
|
||||||
delete(dir_to_temp)
|
delete(dir_to_temp)
|
||||||
delete(dir_to_installer)
|
delete(dir_to_installer)
|
||||||
else:
|
else:
|
||||||
dir_to_temp = path.join(target_dir, 'packaging-temp')
|
dir_to_temp = path.join(target_dir, "packaging-temp")
|
||||||
if path.exists(dir_to_temp):
|
if path.exists(dir_to_temp):
|
||||||
# TODO(aneeshusa): lock dir_to_temp to prevent simultaneous builds
|
# TODO(aneeshusa): lock dir_to_temp to prevent simultaneous builds
|
||||||
print("Cleaning up from previous packaging")
|
print("Cleaning up from previous packaging")
|
||||||
delete(dir_to_temp)
|
delete(dir_to_temp)
|
||||||
|
|
||||||
print("Copying files")
|
print("Copying files")
|
||||||
dir_to_resources = path.join(dir_to_temp, 'resources')
|
dir_to_resources = path.join(dir_to_temp, "resources")
|
||||||
shutil.copytree(path.join(dir_to_root, 'resources'), dir_to_resources)
|
shutil.copytree(path.join(dir_to_root, "resources"), dir_to_resources)
|
||||||
shutil.copy(binary_path, dir_to_temp)
|
shutil.copy(binary_path, dir_to_temp)
|
||||||
|
|
||||||
print("Creating tarball")
|
print("Creating tarball")
|
||||||
tar_path = path.join(target_dir, 'servo-tech-demo.tar.gz')
|
tar_path = path.join(target_dir, "servo-tech-demo.tar.gz")
|
||||||
|
|
||||||
archive_deterministically(dir_to_temp, tar_path, prepend_path='servo/')
|
archive_deterministically(dir_to_temp, tar_path, prepend_path="servo/")
|
||||||
|
|
||||||
print("Cleaning up")
|
print("Cleaning up")
|
||||||
delete(dir_to_temp)
|
delete(dir_to_temp)
|
||||||
print("Packaged Servo into " + tar_path)
|
print("Packaged Servo into " + tar_path)
|
||||||
|
|
||||||
@Command('install',
|
@Command("install", description="Install Servo (currently, Android and Windows only)", category="package")
|
||||||
description='Install Servo (currently, Android and Windows only)',
|
@CommandArgument("--android", action="store_true", help="Install on Android")
|
||||||
category='package')
|
@CommandArgument("--ohos", action="store_true", help="Install on OpenHarmony")
|
||||||
@CommandArgument('--android',
|
@CommandArgument("--emulator", action="store_true", help="For Android, install to the only emulated device")
|
||||||
action='store_true',
|
@CommandArgument("--usb", action="store_true", help="For Android, install to the only USB device")
|
||||||
help='Install on Android')
|
@CommandArgument("--target", "-t", default=None, help="Install the given target platform")
|
||||||
@CommandArgument('--ohos',
|
|
||||||
action='store_true',
|
|
||||||
help='Install on OpenHarmony')
|
|
||||||
@CommandArgument('--emulator',
|
|
||||||
action='store_true',
|
|
||||||
help='For Android, install to the only emulated device')
|
|
||||||
@CommandArgument('--usb',
|
|
||||||
action='store_true',
|
|
||||||
help='For Android, install to the only USB device')
|
|
||||||
@CommandArgument('--target', '-t',
|
|
||||||
default=None,
|
|
||||||
help='Install the given target platform')
|
|
||||||
@CommandBase.common_command_arguments(build_configuration=False, build_type=True, package_configuration=True)
|
@CommandBase.common_command_arguments(build_configuration=False, build_type=True, package_configuration=True)
|
||||||
@CommandBase.allow_target_configuration
|
@CommandBase.allow_target_configuration
|
||||||
def install(self, build_type: BuildType, emulator=False, usb=False, with_asan=False, flavor=None):
|
def install(self, build_type: BuildType, emulator=False, usb=False, with_asan=False, flavor=None):
|
||||||
|
@ -410,9 +394,7 @@ class PackageCommands(CommandBase):
|
||||||
binary_path = self.get_binary_path(build_type, asan=with_asan)
|
binary_path = self.get_binary_path(build_type, asan=with_asan)
|
||||||
except BuildNotFound:
|
except BuildNotFound:
|
||||||
print("Servo build not found. Building servo...")
|
print("Servo build not found. Building servo...")
|
||||||
result = Registrar.dispatch(
|
result = Registrar.dispatch("build", context=self.context, build_type=build_type, flavor=flavor)
|
||||||
"build", context=self.context, build_type=build_type, flavor=flavor
|
|
||||||
)
|
|
||||||
if result:
|
if result:
|
||||||
return result
|
return result
|
||||||
try:
|
try:
|
||||||
|
@ -437,33 +419,26 @@ class PackageCommands(CommandBase):
|
||||||
hdc_path = path.join(env["OHOS_SDK_NATIVE"], "../", "toolchains", "hdc")
|
hdc_path = path.join(env["OHOS_SDK_NATIVE"], "../", "toolchains", "hdc")
|
||||||
exec_command = [hdc_path, "install", "-r", pkg_path]
|
exec_command = [hdc_path, "install", "-r", pkg_path]
|
||||||
elif is_windows():
|
elif is_windows():
|
||||||
pkg_path = path.join(path.dirname(binary_path), 'msi', 'Servo.msi')
|
pkg_path = path.join(path.dirname(binary_path), "msi", "Servo.msi")
|
||||||
exec_command = ["msiexec", "/i", pkg_path]
|
exec_command = ["msiexec", "/i", pkg_path]
|
||||||
|
|
||||||
if not path.exists(pkg_path):
|
if not path.exists(pkg_path):
|
||||||
print("Servo package not found. Packaging servo...")
|
print("Servo package not found. Packaging servo...")
|
||||||
result = Registrar.dispatch(
|
result = Registrar.dispatch("package", context=self.context, build_type=build_type, flavor=flavor)
|
||||||
"package", context=self.context, build_type=build_type, flavor=flavor
|
|
||||||
)
|
|
||||||
if result != 0:
|
if result != 0:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
print(" ".join(exec_command))
|
print(" ".join(exec_command))
|
||||||
return subprocess.call(exec_command, env=env)
|
return subprocess.call(exec_command, env=env)
|
||||||
|
|
||||||
@Command('upload-nightly',
|
@Command("upload-nightly", description="Upload Servo nightly to S3", category="package")
|
||||||
description='Upload Servo nightly to S3',
|
@CommandArgument("platform", choices=PACKAGES.keys(), help="Package platform type to upload")
|
||||||
category='package')
|
@CommandArgument(
|
||||||
@CommandArgument('platform',
|
"--secret-from-environment", action="store_true", help="Retrieve the appropriate secrets from the environment."
|
||||||
choices=PACKAGES.keys(),
|
)
|
||||||
help='Package platform type to upload')
|
@CommandArgument(
|
||||||
@CommandArgument('--secret-from-environment',
|
"--github-release-id", default=None, type=int, help="The github release to upload the nightly builds."
|
||||||
action='store_true',
|
)
|
||||||
help='Retrieve the appropriate secrets from the environment.')
|
|
||||||
@CommandArgument('--github-release-id',
|
|
||||||
default=None,
|
|
||||||
type=int,
|
|
||||||
help='The github release to upload the nightly builds.')
|
|
||||||
def upload_nightly(self, platform, secret_from_environment, github_release_id):
|
def upload_nightly(self, platform, secret_from_environment, github_release_id):
|
||||||
import boto3
|
import boto3
|
||||||
|
|
||||||
|
@ -471,69 +446,62 @@ class PackageCommands(CommandBase):
|
||||||
aws_access_key = None
|
aws_access_key = None
|
||||||
aws_secret_access_key = None
|
aws_secret_access_key = None
|
||||||
if secret_from_environment:
|
if secret_from_environment:
|
||||||
secret = json.loads(os.environ['S3_UPLOAD_CREDENTIALS'])
|
secret = json.loads(os.environ["S3_UPLOAD_CREDENTIALS"])
|
||||||
aws_access_key = secret["aws_access_key_id"]
|
aws_access_key = secret["aws_access_key_id"]
|
||||||
aws_secret_access_key = secret["aws_secret_access_key"]
|
aws_secret_access_key = secret["aws_secret_access_key"]
|
||||||
return (aws_access_key, aws_secret_access_key)
|
return (aws_access_key, aws_secret_access_key)
|
||||||
|
|
||||||
def nightly_filename(package, timestamp):
|
def nightly_filename(package, timestamp):
|
||||||
return '{}-{}'.format(
|
return "{}-{}".format(
|
||||||
timestamp.isoformat() + 'Z', # The `Z` denotes UTC
|
timestamp.isoformat() + "Z", # The `Z` denotes UTC
|
||||||
path.basename(package)
|
path.basename(package),
|
||||||
)
|
)
|
||||||
|
|
||||||
def upload_to_github_release(platform, package, package_hash):
|
def upload_to_github_release(platform, package, package_hash):
|
||||||
if not github_release_id:
|
if not github_release_id:
|
||||||
return
|
return
|
||||||
|
|
||||||
extension = path.basename(package).partition('.')[2]
|
extension = path.basename(package).partition(".")[2]
|
||||||
g = Github(os.environ['NIGHTLY_REPO_TOKEN'])
|
g = Github(os.environ["NIGHTLY_REPO_TOKEN"])
|
||||||
nightly_repo = g.get_repo(os.environ['NIGHTLY_REPO'])
|
nightly_repo = g.get_repo(os.environ["NIGHTLY_REPO"])
|
||||||
release = nightly_repo.get_release(github_release_id)
|
release = nightly_repo.get_release(github_release_id)
|
||||||
package_hash_fileobj = io.BytesIO(package_hash.encode('utf-8'))
|
package_hash_fileobj = io.BytesIO(package_hash.encode("utf-8"))
|
||||||
|
|
||||||
asset_name = f'servo-latest.{extension}'
|
asset_name = f"servo-latest.{extension}"
|
||||||
release.upload_asset(package, name=asset_name)
|
release.upload_asset(package, name=asset_name)
|
||||||
release.upload_asset_from_memory(
|
release.upload_asset_from_memory(
|
||||||
package_hash_fileobj,
|
package_hash_fileobj, package_hash_fileobj.getbuffer().nbytes, name=f"{asset_name}.sha256"
|
||||||
package_hash_fileobj.getbuffer().nbytes,
|
)
|
||||||
name=f'{asset_name}.sha256')
|
|
||||||
|
|
||||||
def upload_to_s3(platform, package, package_hash, timestamp):
|
def upload_to_s3(platform, package, package_hash, timestamp):
|
||||||
(aws_access_key, aws_secret_access_key) = get_s3_secret()
|
(aws_access_key, aws_secret_access_key) = get_s3_secret()
|
||||||
s3 = boto3.client(
|
s3 = boto3.client("s3", aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_access_key)
|
||||||
's3',
|
|
||||||
aws_access_key_id=aws_access_key,
|
|
||||||
aws_secret_access_key=aws_secret_access_key
|
|
||||||
)
|
|
||||||
|
|
||||||
cloudfront = boto3.client(
|
cloudfront = boto3.client(
|
||||||
'cloudfront',
|
"cloudfront", aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_access_key
|
||||||
aws_access_key_id=aws_access_key,
|
|
||||||
aws_secret_access_key=aws_secret_access_key
|
|
||||||
)
|
)
|
||||||
|
|
||||||
BUCKET = 'servo-builds2'
|
BUCKET = "servo-builds2"
|
||||||
DISTRIBUTION_ID = 'EJ8ZWSJKFCJS2'
|
DISTRIBUTION_ID = "EJ8ZWSJKFCJS2"
|
||||||
|
|
||||||
nightly_dir = f'nightly/{platform}'
|
nightly_dir = f"nightly/{platform}"
|
||||||
filename = nightly_filename(package, timestamp)
|
filename = nightly_filename(package, timestamp)
|
||||||
package_upload_key = '{}/{}'.format(nightly_dir, filename)
|
package_upload_key = "{}/{}".format(nightly_dir, filename)
|
||||||
extension = path.basename(package).partition('.')[2]
|
extension = path.basename(package).partition(".")[2]
|
||||||
latest_upload_key = '{}/servo-latest.{}'.format(nightly_dir, extension)
|
latest_upload_key = "{}/servo-latest.{}".format(nightly_dir, extension)
|
||||||
|
|
||||||
package_hash_fileobj = io.BytesIO(package_hash.encode('utf-8'))
|
package_hash_fileobj = io.BytesIO(package_hash.encode("utf-8"))
|
||||||
latest_hash_upload_key = f'{latest_upload_key}.sha256'
|
latest_hash_upload_key = f"{latest_upload_key}.sha256"
|
||||||
|
|
||||||
s3.upload_file(package, BUCKET, package_upload_key)
|
s3.upload_file(package, BUCKET, package_upload_key)
|
||||||
|
|
||||||
copy_source = {
|
copy_source = {
|
||||||
'Bucket': BUCKET,
|
"Bucket": BUCKET,
|
||||||
'Key': package_upload_key,
|
"Key": package_upload_key,
|
||||||
}
|
}
|
||||||
s3.copy(copy_source, BUCKET, latest_upload_key)
|
s3.copy(copy_source, BUCKET, latest_upload_key)
|
||||||
s3.upload_fileobj(
|
s3.upload_fileobj(
|
||||||
package_hash_fileobj, BUCKET, latest_hash_upload_key, ExtraArgs={'ContentType': 'text/plain'}
|
package_hash_fileobj, BUCKET, latest_hash_upload_key, ExtraArgs={"ContentType": "text/plain"}
|
||||||
)
|
)
|
||||||
|
|
||||||
# Invalidate previous "latest" nightly files from
|
# Invalidate previous "latest" nightly files from
|
||||||
|
@ -541,14 +509,9 @@ class PackageCommands(CommandBase):
|
||||||
cloudfront.create_invalidation(
|
cloudfront.create_invalidation(
|
||||||
DistributionId=DISTRIBUTION_ID,
|
DistributionId=DISTRIBUTION_ID,
|
||||||
InvalidationBatch={
|
InvalidationBatch={
|
||||||
'CallerReference': f'{latest_upload_key}-{timestamp}',
|
"CallerReference": f"{latest_upload_key}-{timestamp}",
|
||||||
'Paths': {
|
"Paths": {"Quantity": 1, "Items": [f"/{latest_upload_key}*"]},
|
||||||
'Quantity': 1,
|
},
|
||||||
'Items': [
|
|
||||||
f'/{latest_upload_key}*'
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
|
|
||||||
timestamp = datetime.utcnow().replace(microsecond=0)
|
timestamp = datetime.utcnow().replace(microsecond=0)
|
||||||
|
@ -556,16 +519,13 @@ class PackageCommands(CommandBase):
|
||||||
if path.isdir(package):
|
if path.isdir(package):
|
||||||
continue
|
continue
|
||||||
if not path.isfile(package):
|
if not path.isfile(package):
|
||||||
print("Could not find package for {} at {}".format(
|
print("Could not find package for {} at {}".format(platform, package), file=sys.stderr)
|
||||||
platform,
|
|
||||||
package
|
|
||||||
), file=sys.stderr)
|
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
# Compute the hash
|
# Compute the hash
|
||||||
SHA_BUF_SIZE = 1048576 # read in 1 MiB chunks
|
SHA_BUF_SIZE = 1048576 # read in 1 MiB chunks
|
||||||
sha256_digest = hashlib.sha256()
|
sha256_digest = hashlib.sha256()
|
||||||
with open(package, 'rb') as package_file:
|
with open(package, "rb") as package_file:
|
||||||
while True:
|
while True:
|
||||||
data = package_file.read(SHA_BUF_SIZE)
|
data = package_file.read(SHA_BUF_SIZE)
|
||||||
if not data:
|
if not data:
|
||||||
|
|
|
@ -64,11 +64,14 @@ def get():
|
||||||
__platform__ = Windows(triple)
|
__platform__ = Windows(triple)
|
||||||
elif "linux-gnu" in triple:
|
elif "linux-gnu" in triple:
|
||||||
from .linux import Linux
|
from .linux import Linux
|
||||||
|
|
||||||
__platform__ = Linux(triple)
|
__platform__ = Linux(triple)
|
||||||
elif "apple-darwin" in triple:
|
elif "apple-darwin" in triple:
|
||||||
from .macos import MacOS
|
from .macos import MacOS
|
||||||
|
|
||||||
__platform__ = MacOS(triple)
|
__platform__ = MacOS(triple)
|
||||||
else:
|
else:
|
||||||
from .base import Base
|
from .base import Base
|
||||||
|
|
||||||
__platform__ = Base(triple)
|
__platform__ = Base(triple)
|
||||||
return __platform__
|
return __platform__
|
||||||
|
|
|
@ -33,9 +33,7 @@ class Base:
|
||||||
raise NotImplementedError("Bootstrap installation detection not yet available.")
|
raise NotImplementedError("Bootstrap installation detection not yet available.")
|
||||||
|
|
||||||
def _platform_bootstrap_gstreamer(self, _target: BuildTarget, _force: bool) -> bool:
|
def _platform_bootstrap_gstreamer(self, _target: BuildTarget, _force: bool) -> bool:
|
||||||
raise NotImplementedError(
|
raise NotImplementedError("GStreamer bootstrap support is not yet available for your OS.")
|
||||||
"GStreamer bootstrap support is not yet available for your OS."
|
|
||||||
)
|
|
||||||
|
|
||||||
def is_gstreamer_installed(self, target: BuildTarget) -> bool:
|
def is_gstreamer_installed(self, target: BuildTarget) -> bool:
|
||||||
gstreamer_root = self.gstreamer_root(target)
|
gstreamer_root = self.gstreamer_root(target)
|
||||||
|
@ -92,8 +90,7 @@ class Base:
|
||||||
if force or not shutil.which("cargo-deny"):
|
if force or not shutil.which("cargo-deny"):
|
||||||
return False
|
return False
|
||||||
# Tidy needs at least version 0.18.1 installed.
|
# Tidy needs at least version 0.18.1 installed.
|
||||||
result = subprocess.run(["cargo-deny", "--version"],
|
result = subprocess.run(["cargo-deny", "--version"], encoding="utf-8", capture_output=True)
|
||||||
encoding='utf-8', capture_output=True)
|
|
||||||
(major, minor, micro) = result.stdout.strip().split(" ")[1].split(".", 2)
|
(major, minor, micro) = result.stdout.strip().split(" ")[1].split(".", 2)
|
||||||
return (int(major), int(minor), int(micro)) >= (0, 18, 1)
|
return (int(major), int(minor), int(micro)) >= (0, 18, 1)
|
||||||
|
|
||||||
|
|
|
@ -29,12 +29,12 @@ class BuildTarget(object):
|
||||||
self.target_triple = target_triple
|
self.target_triple = target_triple
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_triple(target_triple: Optional[str]) -> 'BuildTarget':
|
def from_triple(target_triple: Optional[str]) -> "BuildTarget":
|
||||||
host_triple = servo.platform.host_triple()
|
host_triple = servo.platform.host_triple()
|
||||||
if target_triple:
|
if target_triple:
|
||||||
if 'android' in target_triple:
|
if "android" in target_triple:
|
||||||
return AndroidTarget(target_triple)
|
return AndroidTarget(target_triple)
|
||||||
elif 'ohos' in target_triple:
|
elif "ohos" in target_triple:
|
||||||
return OpenHarmonyTarget(target_triple)
|
return OpenHarmonyTarget(target_triple)
|
||||||
elif target_triple != host_triple:
|
elif target_triple != host_triple:
|
||||||
raise Exception(f"Unknown build target {target_triple}")
|
raise Exception(f"Unknown build target {target_triple}")
|
||||||
|
@ -129,16 +129,16 @@ class AndroidTarget(CrossBuildTarget):
|
||||||
android_toolchain_name = ndk_configuration["toolchain_name"]
|
android_toolchain_name = ndk_configuration["toolchain_name"]
|
||||||
android_lib = ndk_configuration["lib"]
|
android_lib = ndk_configuration["lib"]
|
||||||
|
|
||||||
android_api = android_platform.replace('android-', '')
|
android_api = android_platform.replace("android-", "")
|
||||||
|
|
||||||
# Check if the NDK version is 26
|
# Check if the NDK version is 26
|
||||||
if not os.path.isfile(path.join(env["ANDROID_NDK_ROOT"], 'source.properties')):
|
if not os.path.isfile(path.join(env["ANDROID_NDK_ROOT"], "source.properties")):
|
||||||
print("ANDROID_NDK should have file `source.properties`.")
|
print("ANDROID_NDK should have file `source.properties`.")
|
||||||
print("The environment variable ANDROID_NDK_ROOT may be set at a wrong path.")
|
print("The environment variable ANDROID_NDK_ROOT may be set at a wrong path.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
with open(path.join(env["ANDROID_NDK_ROOT"], 'source.properties'), encoding="utf8") as ndk_properties:
|
with open(path.join(env["ANDROID_NDK_ROOT"], "source.properties"), encoding="utf8") as ndk_properties:
|
||||||
lines = ndk_properties.readlines()
|
lines = ndk_properties.readlines()
|
||||||
if lines[1].split(' = ')[1].split('.')[0] != '26':
|
if lines[1].split(" = ")[1].split(".")[0] != "26":
|
||||||
print("Servo currently only supports NDK r26c.")
|
print("Servo currently only supports NDK r26c.")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
@ -149,7 +149,7 @@ class AndroidTarget(CrossBuildTarget):
|
||||||
if os_type not in ["linux", "darwin"]:
|
if os_type not in ["linux", "darwin"]:
|
||||||
raise Exception("Android cross builds are only supported on Linux and macOS.")
|
raise Exception("Android cross builds are only supported on Linux and macOS.")
|
||||||
|
|
||||||
llvm_prebuilt = path.join(env['ANDROID_NDK_ROOT'], "toolchains", "llvm", "prebuilt")
|
llvm_prebuilt = path.join(env["ANDROID_NDK_ROOT"], "toolchains", "llvm", "prebuilt")
|
||||||
|
|
||||||
cpu_type = platform.machine().lower()
|
cpu_type = platform.machine().lower()
|
||||||
host_suffix = "unknown"
|
host_suffix = "unknown"
|
||||||
|
@ -172,11 +172,11 @@ class AndroidTarget(CrossBuildTarget):
|
||||||
raise Exception("Can't determine LLVM prebuilt directory.")
|
raise Exception("Can't determine LLVM prebuilt directory.")
|
||||||
host = os_type + "-" + host_suffix
|
host = os_type + "-" + host_suffix
|
||||||
|
|
||||||
host_cc = env.get('HOST_CC') or shutil.which("clang")
|
host_cc = env.get("HOST_CC") or shutil.which("clang")
|
||||||
host_cxx = env.get('HOST_CXX') or shutil.which("clang++")
|
host_cxx = env.get("HOST_CXX") or shutil.which("clang++")
|
||||||
|
|
||||||
llvm_toolchain = path.join(llvm_prebuilt, host)
|
llvm_toolchain = path.join(llvm_prebuilt, host)
|
||||||
env['PATH'] = (env['PATH'] + ':' + path.join(llvm_toolchain, "bin"))
|
env["PATH"] = env["PATH"] + ":" + path.join(llvm_toolchain, "bin")
|
||||||
|
|
||||||
def to_ndk_bin(prog):
|
def to_ndk_bin(prog):
|
||||||
return path.join(llvm_toolchain, "bin", prog)
|
return path.join(llvm_toolchain, "bin", prog)
|
||||||
|
@ -189,26 +189,26 @@ class AndroidTarget(CrossBuildTarget):
|
||||||
[to_ndk_bin(f"x86_64-linux-android{android_api}-clang"), "--print-libgcc-file-name"],
|
[to_ndk_bin(f"x86_64-linux-android{android_api}-clang"), "--print-libgcc-file-name"],
|
||||||
check=True,
|
check=True,
|
||||||
capture_output=True,
|
capture_output=True,
|
||||||
encoding="utf8"
|
encoding="utf8",
|
||||||
).stdout
|
).stdout
|
||||||
env['RUSTFLAGS'] = env.get('RUSTFLAGS', "")
|
env["RUSTFLAGS"] = env.get("RUSTFLAGS", "")
|
||||||
env["RUSTFLAGS"] += f"-C link-arg={libclangrt_filename}"
|
env["RUSTFLAGS"] += f"-C link-arg={libclangrt_filename}"
|
||||||
|
|
||||||
env["RUST_TARGET"] = self.triple()
|
env["RUST_TARGET"] = self.triple()
|
||||||
env['HOST_CC'] = host_cc
|
env["HOST_CC"] = host_cc
|
||||||
env['HOST_CXX'] = host_cxx
|
env["HOST_CXX"] = host_cxx
|
||||||
env['HOST_CFLAGS'] = ''
|
env["HOST_CFLAGS"] = ""
|
||||||
env['HOST_CXXFLAGS'] = ''
|
env["HOST_CXXFLAGS"] = ""
|
||||||
env['TARGET_CC'] = to_ndk_bin("clang")
|
env["TARGET_CC"] = to_ndk_bin("clang")
|
||||||
env['TARGET_CPP'] = to_ndk_bin("clang") + " -E"
|
env["TARGET_CPP"] = to_ndk_bin("clang") + " -E"
|
||||||
env['TARGET_CXX'] = to_ndk_bin("clang++")
|
env["TARGET_CXX"] = to_ndk_bin("clang++")
|
||||||
|
|
||||||
env['TARGET_AR'] = to_ndk_bin("llvm-ar")
|
env["TARGET_AR"] = to_ndk_bin("llvm-ar")
|
||||||
env['TARGET_RANLIB'] = to_ndk_bin("llvm-ranlib")
|
env["TARGET_RANLIB"] = to_ndk_bin("llvm-ranlib")
|
||||||
env['TARGET_OBJCOPY'] = to_ndk_bin("llvm-objcopy")
|
env["TARGET_OBJCOPY"] = to_ndk_bin("llvm-objcopy")
|
||||||
env['TARGET_YASM'] = to_ndk_bin("yasm")
|
env["TARGET_YASM"] = to_ndk_bin("yasm")
|
||||||
env['TARGET_STRIP'] = to_ndk_bin("llvm-strip")
|
env["TARGET_STRIP"] = to_ndk_bin("llvm-strip")
|
||||||
env['RUST_FONTCONFIG_DLOPEN'] = "on"
|
env["RUST_FONTCONFIG_DLOPEN"] = "on"
|
||||||
|
|
||||||
env["LIBCLANG_PATH"] = path.join(llvm_toolchain, "lib")
|
env["LIBCLANG_PATH"] = path.join(llvm_toolchain, "lib")
|
||||||
env["CLANG_PATH"] = to_ndk_bin("clang")
|
env["CLANG_PATH"] = to_ndk_bin("clang")
|
||||||
|
@ -224,11 +224,11 @@ class AndroidTarget(CrossBuildTarget):
|
||||||
#
|
#
|
||||||
# Also worth remembering: autoconf uses C for its configuration,
|
# Also worth remembering: autoconf uses C for its configuration,
|
||||||
# even for C++ builds, so the C flags need to line up with the C++ flags.
|
# even for C++ builds, so the C flags need to line up with the C++ flags.
|
||||||
env['TARGET_CFLAGS'] = "--target=" + android_toolchain_name
|
env["TARGET_CFLAGS"] = "--target=" + android_toolchain_name
|
||||||
env['TARGET_CXXFLAGS'] = "--target=" + android_toolchain_name
|
env["TARGET_CXXFLAGS"] = "--target=" + android_toolchain_name
|
||||||
|
|
||||||
# These two variables are needed for the mozjs compilation.
|
# These two variables are needed for the mozjs compilation.
|
||||||
env['ANDROID_API_LEVEL'] = android_api
|
env["ANDROID_API_LEVEL"] = android_api
|
||||||
env["ANDROID_NDK_HOME"] = env["ANDROID_NDK_ROOT"]
|
env["ANDROID_NDK_HOME"] = env["ANDROID_NDK_ROOT"]
|
||||||
|
|
||||||
# The two variables set below are passed by our custom
|
# The two variables set below are passed by our custom
|
||||||
|
@ -236,15 +236,16 @@ class AndroidTarget(CrossBuildTarget):
|
||||||
env["ANDROID_ABI"] = android_lib
|
env["ANDROID_ABI"] = android_lib
|
||||||
env["ANDROID_PLATFORM"] = android_platform
|
env["ANDROID_PLATFORM"] = android_platform
|
||||||
env["NDK_CMAKE_TOOLCHAIN_FILE"] = path.join(
|
env["NDK_CMAKE_TOOLCHAIN_FILE"] = path.join(
|
||||||
env['ANDROID_NDK_ROOT'], "build", "cmake", "android.toolchain.cmake")
|
env["ANDROID_NDK_ROOT"], "build", "cmake", "android.toolchain.cmake"
|
||||||
|
)
|
||||||
env["CMAKE_TOOLCHAIN_FILE"] = path.join(topdir, "support", "android", "toolchain.cmake")
|
env["CMAKE_TOOLCHAIN_FILE"] = path.join(topdir, "support", "android", "toolchain.cmake")
|
||||||
|
|
||||||
# Set output dir for gradle aar files
|
# Set output dir for gradle aar files
|
||||||
env["AAR_OUT_DIR"] = path.join(topdir, "target", "android", "aar")
|
env["AAR_OUT_DIR"] = path.join(topdir, "target", "android", "aar")
|
||||||
if not os.path.exists(env['AAR_OUT_DIR']):
|
if not os.path.exists(env["AAR_OUT_DIR"]):
|
||||||
os.makedirs(env['AAR_OUT_DIR'])
|
os.makedirs(env["AAR_OUT_DIR"])
|
||||||
|
|
||||||
env['TARGET_PKG_CONFIG_SYSROOT_DIR'] = path.join(llvm_toolchain, 'sysroot')
|
env["TARGET_PKG_CONFIG_SYSROOT_DIR"] = path.join(llvm_toolchain, "sysroot")
|
||||||
|
|
||||||
def binary_name(self) -> str:
|
def binary_name(self) -> str:
|
||||||
return "libservoshell.so"
|
return "libservoshell.so"
|
||||||
|
@ -273,8 +274,10 @@ class OpenHarmonyTarget(CrossBuildTarget):
|
||||||
env["OHOS_SDK_NATIVE"] = config["ohos"]["ndk"]
|
env["OHOS_SDK_NATIVE"] = config["ohos"]["ndk"]
|
||||||
|
|
||||||
if "OHOS_SDK_NATIVE" not in env:
|
if "OHOS_SDK_NATIVE" not in env:
|
||||||
print("Please set the OHOS_SDK_NATIVE environment variable to the location of the `native` directory "
|
print(
|
||||||
"in the OpenHarmony SDK.")
|
"Please set the OHOS_SDK_NATIVE environment variable to the location of the `native` directory "
|
||||||
|
"in the OpenHarmony SDK."
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
ndk_root = pathlib.Path(env["OHOS_SDK_NATIVE"])
|
ndk_root = pathlib.Path(env["OHOS_SDK_NATIVE"])
|
||||||
|
@ -288,9 +291,9 @@ class OpenHarmonyTarget(CrossBuildTarget):
|
||||||
try:
|
try:
|
||||||
with open(package_info) as meta_file:
|
with open(package_info) as meta_file:
|
||||||
meta = json.load(meta_file)
|
meta = json.load(meta_file)
|
||||||
ohos_api_version = int(meta['apiVersion'])
|
ohos_api_version = int(meta["apiVersion"])
|
||||||
ohos_sdk_version = parse_version(meta['version'])
|
ohos_sdk_version = parse_version(meta["version"])
|
||||||
if ohos_sdk_version < parse_version('5.0') or ohos_api_version < 12:
|
if ohos_sdk_version < parse_version("5.0") or ohos_api_version < 12:
|
||||||
raise RuntimeError("Building servo for OpenHarmony requires SDK version 5.0 (API-12) or newer.")
|
raise RuntimeError("Building servo for OpenHarmony requires SDK version 5.0 (API-12) or newer.")
|
||||||
print(f"Info: The OpenHarmony SDK {ohos_sdk_version} is targeting API-level {ohos_api_version}")
|
print(f"Info: The OpenHarmony SDK {ohos_sdk_version} is targeting API-level {ohos_api_version}")
|
||||||
except (OSError, json.JSONDecodeError) as e:
|
except (OSError, json.JSONDecodeError) as e:
|
||||||
|
@ -318,72 +321,79 @@ class OpenHarmonyTarget(CrossBuildTarget):
|
||||||
# Instead, we ensure that all the necessary flags for the c-compiler are set
|
# Instead, we ensure that all the necessary flags for the c-compiler are set
|
||||||
# via environment variables such as `TARGET_CFLAGS`.
|
# via environment variables such as `TARGET_CFLAGS`.
|
||||||
def to_sdk_llvm_bin(prog: str):
|
def to_sdk_llvm_bin(prog: str):
|
||||||
if sys.platform == 'win32':
|
if sys.platform == "win32":
|
||||||
prog = prog + '.exe'
|
prog = prog + ".exe"
|
||||||
llvm_prog = llvm_bin.joinpath(prog)
|
llvm_prog = llvm_bin.joinpath(prog)
|
||||||
if not llvm_prog.is_file():
|
if not llvm_prog.is_file():
|
||||||
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), llvm_prog)
|
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), llvm_prog)
|
||||||
return llvm_bin.joinpath(prog).as_posix()
|
return llvm_bin.joinpath(prog).as_posix()
|
||||||
|
|
||||||
# CC and CXX should already be set to appropriate host compilers by `build_env()`
|
# CC and CXX should already be set to appropriate host compilers by `build_env()`
|
||||||
env['HOST_CC'] = env['CC']
|
env["HOST_CC"] = env["CC"]
|
||||||
env['HOST_CXX'] = env['CXX']
|
env["HOST_CXX"] = env["CXX"]
|
||||||
env['TARGET_AR'] = to_sdk_llvm_bin("llvm-ar")
|
env["TARGET_AR"] = to_sdk_llvm_bin("llvm-ar")
|
||||||
env['TARGET_RANLIB'] = to_sdk_llvm_bin("llvm-ranlib")
|
env["TARGET_RANLIB"] = to_sdk_llvm_bin("llvm-ranlib")
|
||||||
env['TARGET_READELF'] = to_sdk_llvm_bin("llvm-readelf")
|
env["TARGET_READELF"] = to_sdk_llvm_bin("llvm-readelf")
|
||||||
env['TARGET_OBJCOPY'] = to_sdk_llvm_bin("llvm-objcopy")
|
env["TARGET_OBJCOPY"] = to_sdk_llvm_bin("llvm-objcopy")
|
||||||
env['TARGET_STRIP'] = to_sdk_llvm_bin("llvm-strip")
|
env["TARGET_STRIP"] = to_sdk_llvm_bin("llvm-strip")
|
||||||
|
|
||||||
target_triple = self.triple()
|
target_triple = self.triple()
|
||||||
rust_target_triple = str(target_triple).replace('-', '_')
|
rust_target_triple = str(target_triple).replace("-", "_")
|
||||||
ndk_clang = to_sdk_llvm_bin("clang")
|
ndk_clang = to_sdk_llvm_bin("clang")
|
||||||
ndk_clangxx = to_sdk_llvm_bin("clang++")
|
ndk_clangxx = to_sdk_llvm_bin("clang++")
|
||||||
env[f'CC_{rust_target_triple}'] = ndk_clang
|
env[f"CC_{rust_target_triple}"] = ndk_clang
|
||||||
env[f'CXX_{rust_target_triple}'] = ndk_clangxx
|
env[f"CXX_{rust_target_triple}"] = ndk_clangxx
|
||||||
# The clang target name is different from the LLVM target name
|
# The clang target name is different from the LLVM target name
|
||||||
clang_target_triple = str(target_triple).replace('-unknown-', '-')
|
clang_target_triple = str(target_triple).replace("-unknown-", "-")
|
||||||
clang_target_triple_underscore = clang_target_triple.replace('-', '_')
|
clang_target_triple_underscore = clang_target_triple.replace("-", "_")
|
||||||
env[f'CC_{clang_target_triple_underscore}'] = ndk_clang
|
env[f"CC_{clang_target_triple_underscore}"] = ndk_clang
|
||||||
env[f'CXX_{clang_target_triple_underscore}'] = ndk_clangxx
|
env[f"CXX_{clang_target_triple_underscore}"] = ndk_clangxx
|
||||||
# rustc linker
|
# rustc linker
|
||||||
env[f'CARGO_TARGET_{rust_target_triple.upper()}_LINKER'] = ndk_clang
|
env[f"CARGO_TARGET_{rust_target_triple.upper()}_LINKER"] = ndk_clang
|
||||||
# We could also use a cross-compile wrapper
|
# We could also use a cross-compile wrapper
|
||||||
env["RUSTFLAGS"] += f' -Clink-arg=--target={clang_target_triple}'
|
env["RUSTFLAGS"] += f" -Clink-arg=--target={clang_target_triple}"
|
||||||
env["RUSTFLAGS"] += f' -Clink-arg=--sysroot={ohos_sysroot_posix}'
|
env["RUSTFLAGS"] += f" -Clink-arg=--sysroot={ohos_sysroot_posix}"
|
||||||
|
|
||||||
env['HOST_CFLAGS'] = ''
|
env["HOST_CFLAGS"] = ""
|
||||||
env['HOST_CXXFLAGS'] = ''
|
env["HOST_CXXFLAGS"] = ""
|
||||||
ohos_cflags = ['-D__MUSL__', f' --target={clang_target_triple}', f' --sysroot={ohos_sysroot_posix}',
|
ohos_cflags = [
|
||||||
"-Wno-error=unused-command-line-argument"]
|
"-D__MUSL__",
|
||||||
if clang_target_triple.startswith('armv7-'):
|
f" --target={clang_target_triple}",
|
||||||
ohos_cflags.extend(['-march=armv7-a', '-mfloat-abi=softfp', '-mtune=generic-armv7-a', '-mthumb'])
|
f" --sysroot={ohos_sysroot_posix}",
|
||||||
|
"-Wno-error=unused-command-line-argument",
|
||||||
|
]
|
||||||
|
if clang_target_triple.startswith("armv7-"):
|
||||||
|
ohos_cflags.extend(["-march=armv7-a", "-mfloat-abi=softfp", "-mtune=generic-armv7-a", "-mthumb"])
|
||||||
ohos_cflags_str = " ".join(ohos_cflags)
|
ohos_cflags_str = " ".join(ohos_cflags)
|
||||||
env['TARGET_CFLAGS'] = ohos_cflags_str
|
env["TARGET_CFLAGS"] = ohos_cflags_str
|
||||||
env['TARGET_CPPFLAGS'] = '-D__MUSL__'
|
env["TARGET_CPPFLAGS"] = "-D__MUSL__"
|
||||||
env['TARGET_CXXFLAGS'] = ohos_cflags_str
|
env["TARGET_CXXFLAGS"] = ohos_cflags_str
|
||||||
|
|
||||||
# CMake related flags
|
# CMake related flags
|
||||||
env['CMAKE'] = ndk_root.joinpath("build-tools", "cmake", "bin", "cmake").as_posix()
|
env["CMAKE"] = ndk_root.joinpath("build-tools", "cmake", "bin", "cmake").as_posix()
|
||||||
cmake_toolchain_file = ndk_root.joinpath("build", "cmake", "ohos.toolchain.cmake")
|
cmake_toolchain_file = ndk_root.joinpath("build", "cmake", "ohos.toolchain.cmake")
|
||||||
if cmake_toolchain_file.is_file():
|
if cmake_toolchain_file.is_file():
|
||||||
env[f'CMAKE_TOOLCHAIN_FILE_{rust_target_triple}'] = cmake_toolchain_file.as_posix()
|
env[f"CMAKE_TOOLCHAIN_FILE_{rust_target_triple}"] = cmake_toolchain_file.as_posix()
|
||||||
else:
|
else:
|
||||||
print(
|
print(
|
||||||
f"Warning: Failed to find the OpenHarmony CMake Toolchain file - Expected it at {cmake_toolchain_file}")
|
f"Warning: Failed to find the OpenHarmony CMake Toolchain file - Expected it at {cmake_toolchain_file}"
|
||||||
env[f'CMAKE_C_COMPILER_{rust_target_triple}'] = ndk_clang
|
)
|
||||||
env[f'CMAKE_CXX_COMPILER_{rust_target_triple}'] = ndk_clangxx
|
env[f"CMAKE_C_COMPILER_{rust_target_triple}"] = ndk_clang
|
||||||
|
env[f"CMAKE_CXX_COMPILER_{rust_target_triple}"] = ndk_clangxx
|
||||||
|
|
||||||
# pkg-config
|
# pkg-config
|
||||||
pkg_config_path = '{}:{}'.format(ohos_sysroot.joinpath("usr", "lib", "pkgconfig").as_posix(),
|
pkg_config_path = "{}:{}".format(
|
||||||
ohos_sysroot.joinpath("usr", "share", "pkgconfig").as_posix())
|
ohos_sysroot.joinpath("usr", "lib", "pkgconfig").as_posix(),
|
||||||
env[f'PKG_CONFIG_SYSROOT_DIR_{rust_target_triple}'] = ohos_sysroot_posix
|
ohos_sysroot.joinpath("usr", "share", "pkgconfig").as_posix(),
|
||||||
env[f'PKG_CONFIG_PATH_{rust_target_triple}'] = pkg_config_path
|
)
|
||||||
|
env[f"PKG_CONFIG_SYSROOT_DIR_{rust_target_triple}"] = ohos_sysroot_posix
|
||||||
|
env[f"PKG_CONFIG_PATH_{rust_target_triple}"] = pkg_config_path
|
||||||
|
|
||||||
# bindgen / libclang-sys
|
# bindgen / libclang-sys
|
||||||
env["LIBCLANG_PATH"] = path.join(llvm_toolchain, "lib")
|
env["LIBCLANG_PATH"] = path.join(llvm_toolchain, "lib")
|
||||||
env["CLANG_PATH"] = ndk_clangxx
|
env["CLANG_PATH"] = ndk_clangxx
|
||||||
env[f'CXXSTDLIB_{clang_target_triple_underscore}'] = "c++"
|
env[f"CXXSTDLIB_{clang_target_triple_underscore}"] = "c++"
|
||||||
bindgen_extra_clangs_args_var = f'BINDGEN_EXTRA_CLANG_ARGS_{rust_target_triple}'
|
bindgen_extra_clangs_args_var = f"BINDGEN_EXTRA_CLANG_ARGS_{rust_target_triple}"
|
||||||
bindgen_extra_clangs_args = env.get(bindgen_extra_clangs_args_var, "")
|
bindgen_extra_clangs_args = env.get(bindgen_extra_clangs_args_var, "")
|
||||||
bindgen_extra_clangs_args = bindgen_extra_clangs_args + " " + ohos_cflags_str
|
bindgen_extra_clangs_args = bindgen_extra_clangs_args + " " + ohos_cflags_str
|
||||||
env[bindgen_extra_clangs_args_var] = bindgen_extra_clangs_args
|
env[bindgen_extra_clangs_args_var] = bindgen_extra_clangs_args
|
||||||
|
@ -404,8 +414,5 @@ class OpenHarmonyTarget(CrossBuildTarget):
|
||||||
return path.join(base_path, build_type_directory, build_output_path, hap_name)
|
return path.join(base_path, build_type_directory, build_output_path, hap_name)
|
||||||
|
|
||||||
def abi_string(self) -> str:
|
def abi_string(self) -> str:
|
||||||
abi_map = {
|
abi_map = {"aarch64-unknown-linux-ohos": "arm64-v8a", "x86_64-unknown-linux-ohos": "x86_64"}
|
||||||
"aarch64-unknown-linux-ohos": "arm64-v8a",
|
|
||||||
"x86_64-unknown-linux-ohos": "x86_64"
|
|
||||||
}
|
|
||||||
return abi_map[self.triple()]
|
return abi_map[self.triple()]
|
||||||
|
|
|
@ -26,22 +26,48 @@ from .build_target import BuildTarget
|
||||||
# 3. copy(`sudo apt install ${APT_PKGS.join(" ")}`)
|
# 3. copy(`sudo apt install ${APT_PKGS.join(" ")}`)
|
||||||
# 4. paste into https://github.com/servo/book/edit/main/src/hacking/setting-up-your-environment.md
|
# 4. paste into https://github.com/servo/book/edit/main/src/hacking/setting-up-your-environment.md
|
||||||
APT_PKGS = [
|
APT_PKGS = [
|
||||||
'build-essential', 'ccache', 'clang', 'cmake', 'curl', 'g++', 'git',
|
"build-essential",
|
||||||
'gperf', 'libdbus-1-dev', 'libfreetype6-dev', 'libgl1-mesa-dri',
|
"ccache",
|
||||||
'libgles2-mesa-dev', 'libglib2.0-dev',
|
"clang",
|
||||||
'gstreamer1.0-plugins-good', 'libgstreamer-plugins-good1.0-dev',
|
"cmake",
|
||||||
'gstreamer1.0-plugins-bad', 'libgstreamer-plugins-bad1.0-dev',
|
"curl",
|
||||||
'gstreamer1.0-plugins-ugly',
|
"g++",
|
||||||
"gstreamer1.0-plugins-base", 'libgstreamer-plugins-base1.0-dev',
|
"git",
|
||||||
'gstreamer1.0-libav',
|
"gperf",
|
||||||
'libgstrtspserver-1.0-dev',
|
"libdbus-1-dev",
|
||||||
'gstreamer1.0-tools',
|
"libfreetype6-dev",
|
||||||
'libges-1.0-dev',
|
"libgl1-mesa-dri",
|
||||||
'libharfbuzz-dev', 'liblzma-dev', 'libudev-dev', 'libunwind-dev',
|
"libgles2-mesa-dev",
|
||||||
'libvulkan1', 'libx11-dev', 'libxcb-render0-dev', 'libxcb-shape0-dev',
|
"libglib2.0-dev",
|
||||||
'libxcb-xfixes0-dev', 'libxmu-dev', 'libxmu6', 'libegl1-mesa-dev',
|
"gstreamer1.0-plugins-good",
|
||||||
'llvm-dev', 'm4', 'xorg-dev', 'libxkbcommon0', "libxkbcommon-x11-0",
|
"libgstreamer-plugins-good1.0-dev",
|
||||||
'tshark',
|
"gstreamer1.0-plugins-bad",
|
||||||
|
"libgstreamer-plugins-bad1.0-dev",
|
||||||
|
"gstreamer1.0-plugins-ugly",
|
||||||
|
"gstreamer1.0-plugins-base",
|
||||||
|
"libgstreamer-plugins-base1.0-dev",
|
||||||
|
"gstreamer1.0-libav",
|
||||||
|
"libgstrtspserver-1.0-dev",
|
||||||
|
"gstreamer1.0-tools",
|
||||||
|
"libges-1.0-dev",
|
||||||
|
"libharfbuzz-dev",
|
||||||
|
"liblzma-dev",
|
||||||
|
"libudev-dev",
|
||||||
|
"libunwind-dev",
|
||||||
|
"libvulkan1",
|
||||||
|
"libx11-dev",
|
||||||
|
"libxcb-render0-dev",
|
||||||
|
"libxcb-shape0-dev",
|
||||||
|
"libxcb-xfixes0-dev",
|
||||||
|
"libxmu-dev",
|
||||||
|
"libxmu6",
|
||||||
|
"libegl1-mesa-dev",
|
||||||
|
"llvm-dev",
|
||||||
|
"m4",
|
||||||
|
"xorg-dev",
|
||||||
|
"libxkbcommon0",
|
||||||
|
"libxkbcommon-x11-0",
|
||||||
|
"tshark",
|
||||||
]
|
]
|
||||||
|
|
||||||
# https://packages.fedoraproject.org
|
# https://packages.fedoraproject.org
|
||||||
|
@ -49,37 +75,92 @@ APT_PKGS = [
|
||||||
# 2. paste in the whole DNF_PKGS = [...]
|
# 2. paste in the whole DNF_PKGS = [...]
|
||||||
# 3. copy(`sudo dnf install ${DNF_PKGS.join(" ")}`)
|
# 3. copy(`sudo dnf install ${DNF_PKGS.join(" ")}`)
|
||||||
# 4. paste into https://github.com/servo/book/edit/main/src/hacking/setting-up-your-environment.md
|
# 4. paste into https://github.com/servo/book/edit/main/src/hacking/setting-up-your-environment.md
|
||||||
DNF_PKGS = ['libtool', 'gcc-c++', 'libXi-devel', 'freetype-devel',
|
DNF_PKGS = [
|
||||||
'libunwind-devel', 'mesa-libGL-devel', 'mesa-libEGL-devel',
|
"libtool",
|
||||||
'glib2-devel', 'libX11-devel', 'libXrandr-devel', 'gperf',
|
"gcc-c++",
|
||||||
'fontconfig-devel', 'cabextract', 'ttmkfdir', 'expat-devel',
|
"libXi-devel",
|
||||||
'rpm-build', 'cmake', 'libXcursor-devel', 'libXmu-devel',
|
"freetype-devel",
|
||||||
'dbus-devel', 'ncurses-devel', 'harfbuzz-devel', 'ccache',
|
"libunwind-devel",
|
||||||
'clang', 'clang-libs', 'llvm', 'python3-devel',
|
"mesa-libGL-devel",
|
||||||
'gstreamer1-devel', 'gstreamer1-plugins-base-devel',
|
"mesa-libEGL-devel",
|
||||||
'gstreamer1-plugins-good', 'gstreamer1-plugins-bad-free-devel',
|
"glib2-devel",
|
||||||
'gstreamer1-plugins-ugly-free', 'libjpeg-turbo-devel',
|
"libX11-devel",
|
||||||
'zlib-ng', 'libjpeg-turbo', 'vulkan-loader', 'libxkbcommon',
|
"libXrandr-devel",
|
||||||
'libxkbcommon-x11', 'wireshark-cli']
|
"gperf",
|
||||||
|
"fontconfig-devel",
|
||||||
|
"cabextract",
|
||||||
|
"ttmkfdir",
|
||||||
|
"expat-devel",
|
||||||
|
"rpm-build",
|
||||||
|
"cmake",
|
||||||
|
"libXcursor-devel",
|
||||||
|
"libXmu-devel",
|
||||||
|
"dbus-devel",
|
||||||
|
"ncurses-devel",
|
||||||
|
"harfbuzz-devel",
|
||||||
|
"ccache",
|
||||||
|
"clang",
|
||||||
|
"clang-libs",
|
||||||
|
"llvm",
|
||||||
|
"python3-devel",
|
||||||
|
"gstreamer1-devel",
|
||||||
|
"gstreamer1-plugins-base-devel",
|
||||||
|
"gstreamer1-plugins-good",
|
||||||
|
"gstreamer1-plugins-bad-free-devel",
|
||||||
|
"gstreamer1-plugins-ugly-free",
|
||||||
|
"libjpeg-turbo-devel",
|
||||||
|
"zlib-ng",
|
||||||
|
"libjpeg-turbo",
|
||||||
|
"vulkan-loader",
|
||||||
|
"libxkbcommon",
|
||||||
|
"libxkbcommon-x11",
|
||||||
|
"wireshark-cli",
|
||||||
|
]
|
||||||
|
|
||||||
# https://voidlinux.org/packages/
|
# https://voidlinux.org/packages/
|
||||||
# 1. open devtools
|
# 1. open devtools
|
||||||
# 2. paste in the whole XBPS_PKGS = [...]
|
# 2. paste in the whole XBPS_PKGS = [...]
|
||||||
# 3. copy(`sudo xbps-install ${XBPS_PKGS.join(" ")}`)
|
# 3. copy(`sudo xbps-install ${XBPS_PKGS.join(" ")}`)
|
||||||
# 4. paste into https://github.com/servo/book/edit/main/src/hacking/setting-up-your-environment.md
|
# 4. paste into https://github.com/servo/book/edit/main/src/hacking/setting-up-your-environment.md
|
||||||
XBPS_PKGS = ['libtool', 'gcc', 'libXi-devel', 'freetype-devel',
|
XBPS_PKGS = [
|
||||||
'libunwind-devel', 'MesaLib-devel', 'glib-devel', 'pkg-config',
|
"libtool",
|
||||||
'libX11-devel', 'libXrandr-devel', 'gperf', 'bzip2-devel',
|
"gcc",
|
||||||
'fontconfig-devel', 'cabextract', 'expat-devel', 'cmake',
|
"libXi-devel",
|
||||||
'cmake', 'libXcursor-devel', 'libXmu-devel', 'dbus-devel',
|
"freetype-devel",
|
||||||
'ncurses-devel', 'harfbuzz-devel', 'ccache', 'glu-devel',
|
"libunwind-devel",
|
||||||
'clang', 'gstreamer1-devel', 'gst-plugins-base1-devel',
|
"MesaLib-devel",
|
||||||
'gst-plugins-good1', 'gst-plugins-bad1-devel',
|
"glib-devel",
|
||||||
'gst-plugins-ugly1', 'vulkan-loader', 'libxkbcommon',
|
"pkg-config",
|
||||||
'libxkbcommon-x11']
|
"libX11-devel",
|
||||||
|
"libXrandr-devel",
|
||||||
|
"gperf",
|
||||||
|
"bzip2-devel",
|
||||||
|
"fontconfig-devel",
|
||||||
|
"cabextract",
|
||||||
|
"expat-devel",
|
||||||
|
"cmake",
|
||||||
|
"cmake",
|
||||||
|
"libXcursor-devel",
|
||||||
|
"libXmu-devel",
|
||||||
|
"dbus-devel",
|
||||||
|
"ncurses-devel",
|
||||||
|
"harfbuzz-devel",
|
||||||
|
"ccache",
|
||||||
|
"glu-devel",
|
||||||
|
"clang",
|
||||||
|
"gstreamer1-devel",
|
||||||
|
"gst-plugins-base1-devel",
|
||||||
|
"gst-plugins-good1",
|
||||||
|
"gst-plugins-bad1-devel",
|
||||||
|
"gst-plugins-ugly1",
|
||||||
|
"vulkan-loader",
|
||||||
|
"libxkbcommon",
|
||||||
|
"libxkbcommon-x11",
|
||||||
|
]
|
||||||
|
|
||||||
GSTREAMER_URL = \
|
GSTREAMER_URL = (
|
||||||
"https://github.com/servo/servo-build-deps/releases/download/linux/gstreamer-1.16-x86_64-linux-gnu.20190515.tar.gz"
|
"https://github.com/servo/servo-build-deps/releases/download/linux/gstreamer-1.16-x86_64-linux-gnu.20190515.tar.gz"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class Linux(Base):
|
class Linux(Base):
|
||||||
|
@ -93,67 +174,68 @@ class Linux(Base):
|
||||||
distrib = distro.name()
|
distrib = distro.name()
|
||||||
version = distro.version()
|
version = distro.version()
|
||||||
|
|
||||||
if distrib in ['LinuxMint', 'Linux Mint', 'KDE neon', 'Pop!_OS', 'TUXEDO OS']:
|
if distrib in ["LinuxMint", "Linux Mint", "KDE neon", "Pop!_OS", "TUXEDO OS"]:
|
||||||
if '.' in version:
|
if "." in version:
|
||||||
major, _ = version.split('.', 1)
|
major, _ = version.split(".", 1)
|
||||||
else:
|
else:
|
||||||
major = version
|
major = version
|
||||||
|
|
||||||
distrib = 'Ubuntu'
|
distrib = "Ubuntu"
|
||||||
if major == '22':
|
if major == "22":
|
||||||
version = '22.04'
|
version = "22.04"
|
||||||
elif major == '21':
|
elif major == "21":
|
||||||
version = '21.04'
|
version = "21.04"
|
||||||
elif major == '20':
|
elif major == "20":
|
||||||
version = '20.04'
|
version = "20.04"
|
||||||
elif major == '19':
|
elif major == "19":
|
||||||
version = '18.04'
|
version = "18.04"
|
||||||
elif major == '18':
|
elif major == "18":
|
||||||
version = '16.04'
|
version = "16.04"
|
||||||
|
|
||||||
if distrib.lower() == 'elementary':
|
if distrib.lower() == "elementary":
|
||||||
distrib = 'Ubuntu'
|
distrib = "Ubuntu"
|
||||||
if version == '5.0':
|
if version == "5.0":
|
||||||
version = '18.04'
|
version = "18.04"
|
||||||
elif version[0:3] == '0.4':
|
elif version[0:3] == "0.4":
|
||||||
version = '16.04'
|
version = "16.04"
|
||||||
|
|
||||||
return (distrib, version)
|
return (distrib, version)
|
||||||
|
|
||||||
def _platform_bootstrap(self, force: bool) -> bool:
|
def _platform_bootstrap(self, force: bool) -> bool:
|
||||||
if self.distro.lower() == 'nixos':
|
if self.distro.lower() == "nixos":
|
||||||
print('NixOS does not need bootstrap, it will automatically enter a nix-shell')
|
print("NixOS does not need bootstrap, it will automatically enter a nix-shell")
|
||||||
print('Just run ./mach build')
|
print("Just run ./mach build")
|
||||||
print('')
|
print("")
|
||||||
print('You will need to run a nix-shell if you are trying '
|
print("You will need to run a nix-shell if you are trying to run any of the built binaries")
|
||||||
'to run any of the built binaries')
|
print("To enter the nix-shell manually use:")
|
||||||
print('To enter the nix-shell manually use:')
|
print(" $ nix-shell")
|
||||||
print(' $ nix-shell')
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self.distro.lower() == 'ubuntu' and self.version > '22.04':
|
if self.distro.lower() == "ubuntu" and self.version > "22.04":
|
||||||
print(f"WARNING: unsupported version of {self.distro}: {self.version}")
|
print(f"WARNING: unsupported version of {self.distro}: {self.version}")
|
||||||
|
|
||||||
# FIXME: Better version checking for these distributions.
|
# FIXME: Better version checking for these distributions.
|
||||||
if self.distro.lower() not in [
|
if self.distro.lower() not in [
|
||||||
'arch linux',
|
"arch linux",
|
||||||
'arch',
|
"arch",
|
||||||
'artix',
|
"artix",
|
||||||
'endeavouros',
|
"endeavouros",
|
||||||
'centos linux',
|
"centos linux",
|
||||||
'centos',
|
"centos",
|
||||||
'debian gnu/linux',
|
"debian gnu/linux",
|
||||||
'raspbian gnu/linux',
|
"raspbian gnu/linux",
|
||||||
'fedora linux',
|
"fedora linux",
|
||||||
'fedora',
|
"fedora",
|
||||||
'nixos',
|
"nixos",
|
||||||
'ubuntu',
|
"ubuntu",
|
||||||
'void',
|
"void",
|
||||||
'fedora linux asahi remix'
|
"fedora linux asahi remix",
|
||||||
]:
|
]:
|
||||||
print(f"mach bootstrap does not support {self.distro}."
|
print(
|
||||||
|
f"mach bootstrap does not support {self.distro}."
|
||||||
" You may be able to install dependencies manually."
|
" You may be able to install dependencies manually."
|
||||||
" See https://github.com/servo/servo/wiki/Building.")
|
" See https://github.com/servo/servo/wiki/Building."
|
||||||
|
)
|
||||||
input("Press Enter to continue...")
|
input("Press Enter to continue...")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -163,41 +245,39 @@ class Linux(Base):
|
||||||
def install_non_gstreamer_dependencies(self, force: bool) -> bool:
|
def install_non_gstreamer_dependencies(self, force: bool) -> bool:
|
||||||
install = False
|
install = False
|
||||||
pkgs = []
|
pkgs = []
|
||||||
if self.distro in ['Ubuntu', 'Debian GNU/Linux', 'Raspbian GNU/Linux']:
|
if self.distro in ["Ubuntu", "Debian GNU/Linux", "Raspbian GNU/Linux"]:
|
||||||
command = ['apt-get', 'install', "-m"]
|
command = ["apt-get", "install", "-m"]
|
||||||
pkgs = APT_PKGS
|
pkgs = APT_PKGS
|
||||||
|
|
||||||
# Skip 'clang' if 'clang' binary already exists.
|
# Skip 'clang' if 'clang' binary already exists.
|
||||||
result = subprocess.run(['which', 'clang'], capture_output=True)
|
result = subprocess.run(["which", "clang"], capture_output=True)
|
||||||
if result and result.returncode == 0:
|
if result and result.returncode == 0:
|
||||||
pkgs.remove('clang')
|
pkgs.remove("clang")
|
||||||
|
|
||||||
# Try to filter out unknown packages from the list. This is important for Debian
|
# Try to filter out unknown packages from the list. This is important for Debian
|
||||||
# as it does not ship all of the packages we want.
|
# as it does not ship all of the packages we want.
|
||||||
installable = subprocess.check_output(['apt-cache', '--generate', 'pkgnames'])
|
installable = subprocess.check_output(["apt-cache", "--generate", "pkgnames"])
|
||||||
if installable:
|
if installable:
|
||||||
installable = installable.decode("ascii").splitlines()
|
installable = installable.decode("ascii").splitlines()
|
||||||
pkgs = list(filter(lambda pkg: pkg in installable, pkgs))
|
pkgs = list(filter(lambda pkg: pkg in installable, pkgs))
|
||||||
|
|
||||||
if subprocess.call(['dpkg', '-s'] + pkgs, shell=True,
|
if subprocess.call(["dpkg", "-s"] + pkgs, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) != 0:
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE) != 0:
|
|
||||||
install = True
|
install = True
|
||||||
elif self.distro in ['CentOS', 'CentOS Linux', 'Fedora', 'Fedora Linux', 'Fedora Linux Asahi Remix']:
|
elif self.distro in ["CentOS", "CentOS Linux", "Fedora", "Fedora Linux", "Fedora Linux Asahi Remix"]:
|
||||||
command = ['dnf', 'install']
|
command = ["dnf", "install"]
|
||||||
installed_pkgs: [str] = (
|
installed_pkgs: [str] = subprocess.check_output(
|
||||||
subprocess.check_output(['rpm', '--query', '--all', '--queryformat', '%{NAME}\n'],
|
["rpm", "--query", "--all", "--queryformat", "%{NAME}\n"], encoding="utf-8"
|
||||||
encoding='utf-8')
|
).split("\n")
|
||||||
.split('\n'))
|
|
||||||
pkgs = DNF_PKGS
|
pkgs = DNF_PKGS
|
||||||
for pkg in pkgs:
|
for pkg in pkgs:
|
||||||
if pkg not in installed_pkgs:
|
if pkg not in installed_pkgs:
|
||||||
install = True
|
install = True
|
||||||
break
|
break
|
||||||
elif self.distro == 'void':
|
elif self.distro == "void":
|
||||||
installed_pkgs = str(subprocess.check_output(['xbps-query', '-l']))
|
installed_pkgs = str(subprocess.check_output(["xbps-query", "-l"]))
|
||||||
pkgs = XBPS_PKGS
|
pkgs = XBPS_PKGS
|
||||||
for pkg in pkgs:
|
for pkg in pkgs:
|
||||||
command = ['xbps-install', '-A']
|
command = ["xbps-install", "-A"]
|
||||||
if "ii {}-".format(pkg) not in installed_pkgs:
|
if "ii {}-".format(pkg) not in installed_pkgs:
|
||||||
install = force = True
|
install = force = True
|
||||||
break
|
break
|
||||||
|
@ -207,22 +287,24 @@ class Linux(Base):
|
||||||
|
|
||||||
def check_sudo():
|
def check_sudo():
|
||||||
if os.geteuid() != 0:
|
if os.geteuid() != 0:
|
||||||
if shutil.which('sudo') is None:
|
if shutil.which("sudo") is None:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def run_as_root(command, force=False):
|
def run_as_root(command, force=False):
|
||||||
if os.geteuid() != 0:
|
if os.geteuid() != 0:
|
||||||
command.insert(0, 'sudo')
|
command.insert(0, "sudo")
|
||||||
if force:
|
if force:
|
||||||
command.append('-y')
|
command.append("-y")
|
||||||
return subprocess.call(command)
|
return subprocess.call(command)
|
||||||
|
|
||||||
print("Installing missing dependencies...")
|
print("Installing missing dependencies...")
|
||||||
if not check_sudo():
|
if not check_sudo():
|
||||||
print("'sudo' command not found."
|
print(
|
||||||
|
"'sudo' command not found."
|
||||||
" You may be able to install dependencies manually."
|
" You may be able to install dependencies manually."
|
||||||
" See https://github.com/servo/servo/wiki/Building.")
|
" See https://github.com/servo/servo/wiki/Building."
|
||||||
|
)
|
||||||
input("Press Enter to continue...")
|
input("Press Enter to continue...")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -236,4 +318,5 @@ class Linux(Base):
|
||||||
def _platform_bootstrap_gstreamer(self, _target: BuildTarget, _force: bool) -> bool:
|
def _platform_bootstrap_gstreamer(self, _target: BuildTarget, _force: bool) -> bool:
|
||||||
raise EnvironmentError(
|
raise EnvironmentError(
|
||||||
"Bootstrapping GStreamer on Linux is not supported. "
|
"Bootstrapping GStreamer on Linux is not supported. "
|
||||||
+ "Please install it using your distribution package manager.")
|
+ "Please install it using your distribution package manager."
|
||||||
|
)
|
||||||
|
|
|
@ -42,9 +42,7 @@ class MacOS(Base):
|
||||||
installed_something = False
|
installed_something = False
|
||||||
try:
|
try:
|
||||||
brewfile = os.path.join(util.SERVO_ROOT, "support", "macos", "Brewfile")
|
brewfile = os.path.join(util.SERVO_ROOT, "support", "macos", "Brewfile")
|
||||||
output = subprocess.check_output(
|
output = subprocess.check_output(["brew", "bundle", "install", "--file", brewfile]).decode("utf-8")
|
||||||
['brew', 'bundle', 'install', "--file", brewfile]
|
|
||||||
).decode("utf-8")
|
|
||||||
print(output)
|
print(output)
|
||||||
installed_something = "Installing" in output
|
installed_something = "Installing" in output
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
|
@ -60,14 +58,10 @@ class MacOS(Base):
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as temp_dir:
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
libs_pkg = os.path.join(temp_dir, GSTREAMER_URL.rsplit("/", maxsplit=1)[-1])
|
libs_pkg = os.path.join(temp_dir, GSTREAMER_URL.rsplit("/", maxsplit=1)[-1])
|
||||||
devel_pkg = os.path.join(
|
devel_pkg = os.path.join(temp_dir, GSTREAMER_DEVEL_URL.rsplit("/", maxsplit=1)[-1])
|
||||||
temp_dir, GSTREAMER_DEVEL_URL.rsplit("/", maxsplit=1)[-1]
|
|
||||||
)
|
|
||||||
|
|
||||||
util.download_file("GStreamer libraries", GSTREAMER_URL, libs_pkg)
|
util.download_file("GStreamer libraries", GSTREAMER_URL, libs_pkg)
|
||||||
util.download_file(
|
util.download_file("GStreamer development support", GSTREAMER_DEVEL_URL, devel_pkg)
|
||||||
"GStreamer development support", GSTREAMER_DEVEL_URL, devel_pkg
|
|
||||||
)
|
|
||||||
|
|
||||||
print("Installing GStreamer packages...")
|
print("Installing GStreamer packages...")
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
|
@ -75,8 +69,7 @@ class MacOS(Base):
|
||||||
"sudo",
|
"sudo",
|
||||||
"sh",
|
"sh",
|
||||||
"-c",
|
"-c",
|
||||||
f"installer -pkg '{libs_pkg}' -target / &&"
|
f"installer -pkg '{libs_pkg}' -target / &&installer -pkg '{devel_pkg}' -target /",
|
||||||
f"installer -pkg '{devel_pkg}' -target /",
|
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -71,10 +71,18 @@ class Windows(Base):
|
||||||
cmd_exe_args += ",'-f'"
|
cmd_exe_args += ",'-f'"
|
||||||
|
|
||||||
print(cmd_exe_args)
|
print(cmd_exe_args)
|
||||||
subprocess.check_output([
|
subprocess.check_output(
|
||||||
"powershell", "Start-Process", "-Wait", "-verb", "runAs",
|
[
|
||||||
"cmd.exe", "-ArgumentList", f"@({cmd_exe_args})"
|
"powershell",
|
||||||
]).decode("utf-8")
|
"Start-Process",
|
||||||
|
"-Wait",
|
||||||
|
"-verb",
|
||||||
|
"runAs",
|
||||||
|
"cmd.exe",
|
||||||
|
"-ArgumentList",
|
||||||
|
f"@({cmd_exe_args})",
|
||||||
|
]
|
||||||
|
).decode("utf-8")
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("Could not run chocolatey. Follow manual build setup instructions.")
|
print("Could not run chocolatey. Follow manual build setup instructions.")
|
||||||
raise e
|
raise e
|
||||||
|
@ -87,8 +95,7 @@ class Windows(Base):
|
||||||
"""A bootstrap method that is called without explicitly invoking `./mach bootstrap`
|
"""A bootstrap method that is called without explicitly invoking `./mach bootstrap`
|
||||||
but that is executed in the process of other `./mach` commands. This should be
|
but that is executed in the process of other `./mach` commands. This should be
|
||||||
as fast as possible."""
|
as fast as possible."""
|
||||||
to_install = [package for package in DEPENDENCIES if
|
to_install = [package for package in DEPENDENCIES if not os.path.isdir(get_dependency_dir(package))]
|
||||||
not os.path.isdir(get_dependency_dir(package))]
|
|
||||||
if not to_install:
|
if not to_install:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -116,9 +123,7 @@ class Windows(Base):
|
||||||
gst_arch_name = gst_arch_names[build_target_triple.split("-")[0]]
|
gst_arch_name = gst_arch_names[build_target_triple.split("-")[0]]
|
||||||
|
|
||||||
# The bootstraped version of GStreamer always takes precedance of the installed vesion.
|
# The bootstraped version of GStreamer always takes precedance of the installed vesion.
|
||||||
prepackaged_root = os.path.join(
|
prepackaged_root = os.path.join(DEPENDENCIES_DIR, "gstreamer", "1.0", f"msvc_{gst_arch_name}")
|
||||||
DEPENDENCIES_DIR, "gstreamer", "1.0", f"msvc_{gst_arch_name}"
|
|
||||||
)
|
|
||||||
if os.path.exists(os.path.join(prepackaged_root, "bin", "ffi-7.dll")):
|
if os.path.exists(os.path.join(prepackaged_root, "bin", "ffi-7.dll")):
|
||||||
return prepackaged_root
|
return prepackaged_root
|
||||||
|
|
||||||
|
@ -143,20 +148,15 @@ class Windows(Base):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if "x86_64" not in self.triple:
|
if "x86_64" not in self.triple:
|
||||||
print("Bootstrapping gstreamer not supported on "
|
print("Bootstrapping gstreamer not supported on non-x86-64 Windows. Please install manually")
|
||||||
"non-x86-64 Windows. Please install manually")
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as temp_dir:
|
with tempfile.TemporaryDirectory() as temp_dir:
|
||||||
libs_msi = os.path.join(temp_dir, GSTREAMER_URL.rsplit("/", maxsplit=1)[-1])
|
libs_msi = os.path.join(temp_dir, GSTREAMER_URL.rsplit("/", maxsplit=1)[-1])
|
||||||
devel_msi = os.path.join(
|
devel_msi = os.path.join(temp_dir, GSTREAMER_DEVEL_URL.rsplit("/", maxsplit=1)[-1])
|
||||||
temp_dir, GSTREAMER_DEVEL_URL.rsplit("/", maxsplit=1)[-1]
|
|
||||||
)
|
|
||||||
|
|
||||||
util.download_file("GStreamer libraries", GSTREAMER_URL, libs_msi)
|
util.download_file("GStreamer libraries", GSTREAMER_URL, libs_msi)
|
||||||
util.download_file(
|
util.download_file("GStreamer development support", GSTREAMER_DEVEL_URL, devel_msi)
|
||||||
"GStreamer development support", GSTREAMER_DEVEL_URL, devel_msi
|
|
||||||
)
|
|
||||||
|
|
||||||
print(f"Installing GStreamer packages to {DEPENDENCIES_DIR}...")
|
print(f"Installing GStreamer packages to {DEPENDENCIES_DIR}...")
|
||||||
os.makedirs(DEPENDENCIES_DIR, exist_ok=True)
|
os.makedirs(DEPENDENCIES_DIR, exist_ok=True)
|
||||||
|
@ -164,15 +164,24 @@ class Windows(Base):
|
||||||
for installer in [libs_msi, devel_msi]:
|
for installer in [libs_msi, devel_msi]:
|
||||||
arguments = [
|
arguments = [
|
||||||
"/a",
|
"/a",
|
||||||
f'"{installer}"'
|
f'"{installer}"TARGETDIR="{DEPENDENCIES_DIR}"', # Install destination
|
||||||
f'TARGETDIR="{DEPENDENCIES_DIR}"', # Install destination
|
|
||||||
"/qn", # Quiet mode
|
"/qn", # Quiet mode
|
||||||
]
|
]
|
||||||
quoted_arguments = ",".join((f"'{arg}'" for arg in arguments))
|
quoted_arguments = ",".join((f"'{arg}'" for arg in arguments))
|
||||||
subprocess.check_call([
|
subprocess.check_call(
|
||||||
"powershell", "exit (Start-Process", "-PassThru", "-Wait", "-verb", "runAs",
|
[
|
||||||
"msiexec.exe", "-ArgumentList", f"@({quoted_arguments})", ").ExitCode"
|
"powershell",
|
||||||
])
|
"exit (Start-Process",
|
||||||
|
"-PassThru",
|
||||||
|
"-Wait",
|
||||||
|
"-verb",
|
||||||
|
"runAs",
|
||||||
|
"msiexec.exe",
|
||||||
|
"-ArgumentList",
|
||||||
|
f"@({quoted_arguments})",
|
||||||
|
").ExitCode",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
assert self.is_gstreamer_installed(target)
|
assert self.is_gstreamer_installed(target)
|
||||||
return True
|
return True
|
||||||
|
|
|
@ -51,39 +51,50 @@ def shell_quote(arg):
|
||||||
|
|
||||||
@CommandProvider
|
@CommandProvider
|
||||||
class PostBuildCommands(CommandBase):
|
class PostBuildCommands(CommandBase):
|
||||||
@Command('run',
|
@Command("run", description="Run Servo", category="post-build")
|
||||||
description='Run Servo',
|
|
||||||
category='post-build')
|
|
||||||
@CommandArgument('--android', action='store_true', default=None,
|
|
||||||
help='Run on an Android device through `adb shell`')
|
|
||||||
@CommandArgument('--emulator',
|
|
||||||
action='store_true',
|
|
||||||
help='For Android, run in the only emulated device')
|
|
||||||
@CommandArgument('--usb',
|
|
||||||
action='store_true',
|
|
||||||
help='For Android, run in the only USB device')
|
|
||||||
@CommandArgument('--debugger', action='store_true',
|
|
||||||
help='Enable the debugger. Not specifying a '
|
|
||||||
'--debugger-cmd option will result in the default '
|
|
||||||
'debugger being used. The following arguments '
|
|
||||||
'have no effect without this.')
|
|
||||||
@CommandArgument('--debugger-cmd', default=None, type=str,
|
|
||||||
help='Name of debugger to use.')
|
|
||||||
@CommandArgument('--headless', '-z', action='store_true',
|
|
||||||
help='Launch in headless mode')
|
|
||||||
@CommandArgument('--software', '-s', action='store_true',
|
|
||||||
help='Launch with software rendering')
|
|
||||||
@CommandArgument(
|
@CommandArgument(
|
||||||
'params', nargs='...',
|
"--android", action="store_true", default=None, help="Run on an Android device through `adb shell`"
|
||||||
help="Command-line arguments to be passed through to Servo")
|
)
|
||||||
|
@CommandArgument("--emulator", action="store_true", help="For Android, run in the only emulated device")
|
||||||
|
@CommandArgument("--usb", action="store_true", help="For Android, run in the only USB device")
|
||||||
|
@CommandArgument(
|
||||||
|
"--debugger",
|
||||||
|
action="store_true",
|
||||||
|
help="Enable the debugger. Not specifying a "
|
||||||
|
"--debugger-cmd option will result in the default "
|
||||||
|
"debugger being used. The following arguments "
|
||||||
|
"have no effect without this.",
|
||||||
|
)
|
||||||
|
@CommandArgument("--debugger-cmd", default=None, type=str, help="Name of debugger to use.")
|
||||||
|
@CommandArgument("--headless", "-z", action="store_true", help="Launch in headless mode")
|
||||||
|
@CommandArgument("--software", "-s", action="store_true", help="Launch with software rendering")
|
||||||
|
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to Servo")
|
||||||
@CommandBase.common_command_arguments(binary_selection=True)
|
@CommandBase.common_command_arguments(binary_selection=True)
|
||||||
@CommandBase.allow_target_configuration
|
@CommandBase.allow_target_configuration
|
||||||
def run(self, servo_binary: str, params, debugger=False, debugger_cmd=None,
|
def run(
|
||||||
headless=False, software=False, emulator=False, usb=False):
|
self,
|
||||||
|
servo_binary: str,
|
||||||
|
params,
|
||||||
|
debugger=False,
|
||||||
|
debugger_cmd=None,
|
||||||
|
headless=False,
|
||||||
|
software=False,
|
||||||
|
emulator=False,
|
||||||
|
usb=False,
|
||||||
|
):
|
||||||
return self._run(servo_binary, params, debugger, debugger_cmd, headless, software, emulator, usb)
|
return self._run(servo_binary, params, debugger, debugger_cmd, headless, software, emulator, usb)
|
||||||
|
|
||||||
def _run(self, servo_binary: str, params, debugger=False, debugger_cmd=None,
|
def _run(
|
||||||
headless=False, software=False, emulator=False, usb=False):
|
self,
|
||||||
|
servo_binary: str,
|
||||||
|
params,
|
||||||
|
debugger=False,
|
||||||
|
debugger_cmd=None,
|
||||||
|
headless=False,
|
||||||
|
software=False,
|
||||||
|
emulator=False,
|
||||||
|
usb=False,
|
||||||
|
):
|
||||||
env = self.build_env()
|
env = self.build_env()
|
||||||
env["RUST_BACKTRACE"] = "1"
|
env["RUST_BACKTRACE"] = "1"
|
||||||
if software:
|
if software:
|
||||||
|
@ -91,7 +102,7 @@ class PostBuildCommands(CommandBase):
|
||||||
print("Software rendering is only supported on Linux at the moment.")
|
print("Software rendering is only supported on Linux at the moment.")
|
||||||
return
|
return
|
||||||
|
|
||||||
env['LIBGL_ALWAYS_SOFTWARE'] = "1"
|
env["LIBGL_ALWAYS_SOFTWARE"] = "1"
|
||||||
os.environ.update(env)
|
os.environ.update(env)
|
||||||
|
|
||||||
# Make --debugger-cmd imply --debugger
|
# Make --debugger-cmd imply --debugger
|
||||||
|
@ -119,7 +130,7 @@ class PostBuildCommands(CommandBase):
|
||||||
"sleep 0.5",
|
"sleep 0.5",
|
||||||
f"echo Servo PID: $(pidof {ANDROID_APP_NAME})",
|
f"echo Servo PID: $(pidof {ANDROID_APP_NAME})",
|
||||||
f"logcat --pid=$(pidof {ANDROID_APP_NAME})",
|
f"logcat --pid=$(pidof {ANDROID_APP_NAME})",
|
||||||
"exit"
|
"exit",
|
||||||
]
|
]
|
||||||
args = [self.android_adb_path(env)]
|
args = [self.android_adb_path(env)]
|
||||||
if emulator and usb:
|
if emulator and usb:
|
||||||
|
@ -136,7 +147,7 @@ class PostBuildCommands(CommandBase):
|
||||||
args = [servo_binary]
|
args = [servo_binary]
|
||||||
|
|
||||||
if headless:
|
if headless:
|
||||||
args.append('-z')
|
args.append("-z")
|
||||||
|
|
||||||
# Borrowed and modified from:
|
# Borrowed and modified from:
|
||||||
# http://hg.mozilla.org/mozilla-central/file/c9cfa9b91dea/python/mozbuild/mozbuild/mach_commands.py#l883
|
# http://hg.mozilla.org/mozilla-central/file/c9cfa9b91dea/python/mozbuild/mozbuild/mach_commands.py#l883
|
||||||
|
@ -144,8 +155,7 @@ class PostBuildCommands(CommandBase):
|
||||||
if not debugger_cmd:
|
if not debugger_cmd:
|
||||||
# No debugger name was provided. Look for the default ones on
|
# No debugger name was provided. Look for the default ones on
|
||||||
# current OS.
|
# current OS.
|
||||||
debugger_cmd = mozdebug.get_default_debugger_name(
|
debugger_cmd = mozdebug.get_default_debugger_name(mozdebug.DebuggerSearch.KeepLooking)
|
||||||
mozdebug.DebuggerSearch.KeepLooking)
|
|
||||||
|
|
||||||
debugger_info = mozdebug.get_debugger_info(debugger_cmd)
|
debugger_info = mozdebug.get_debugger_info(debugger_cmd)
|
||||||
if not debugger_info:
|
if not debugger_info:
|
||||||
|
@ -153,17 +163,17 @@ class PostBuildCommands(CommandBase):
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
command = debugger_info.path
|
command = debugger_info.path
|
||||||
if debugger_cmd == 'gdb' or debugger_cmd == 'lldb':
|
if debugger_cmd == "gdb" or debugger_cmd == "lldb":
|
||||||
rust_command = 'rust-' + debugger_cmd
|
rust_command = "rust-" + debugger_cmd
|
||||||
try:
|
try:
|
||||||
subprocess.check_call([rust_command, '--version'], env=env, stdout=open(os.devnull, 'w'))
|
subprocess.check_call([rust_command, "--version"], env=env, stdout=open(os.devnull, "w"))
|
||||||
except (OSError, subprocess.CalledProcessError):
|
except (OSError, subprocess.CalledProcessError):
|
||||||
pass
|
pass
|
||||||
else:
|
else:
|
||||||
command = rust_command
|
command = rust_command
|
||||||
|
|
||||||
# Prepend the debugger args.
|
# Prepend the debugger args.
|
||||||
args = ([command] + debugger_info.args + args + params)
|
args = [command] + debugger_info.args + args + params
|
||||||
else:
|
else:
|
||||||
args = args + params
|
args = args + params
|
||||||
|
|
||||||
|
@ -177,36 +187,27 @@ class PostBuildCommands(CommandBase):
|
||||||
return exception.returncode
|
return exception.returncode
|
||||||
except OSError as exception:
|
except OSError as exception:
|
||||||
if exception.errno == 2:
|
if exception.errno == 2:
|
||||||
print("Servo Binary can't be found! Run './mach build'"
|
print("Servo Binary can't be found! Run './mach build' and try again!")
|
||||||
" and try again!")
|
|
||||||
else:
|
else:
|
||||||
raise exception
|
raise exception
|
||||||
|
|
||||||
@Command('android-emulator',
|
@Command("android-emulator", description="Run the Android emulator", category="post-build")
|
||||||
description='Run the Android emulator',
|
@CommandArgument("args", nargs="...", help="Command-line arguments to be passed through to the emulator")
|
||||||
category='post-build')
|
|
||||||
@CommandArgument(
|
|
||||||
'args', nargs='...',
|
|
||||||
help="Command-line arguments to be passed through to the emulator")
|
|
||||||
def android_emulator(self, args=None):
|
def android_emulator(self, args=None):
|
||||||
if not args:
|
if not args:
|
||||||
print("AVDs created by `./mach bootstrap-android` are servo-arm and servo-x86.")
|
print("AVDs created by `./mach bootstrap-android` are servo-arm and servo-x86.")
|
||||||
emulator = self.android_emulator_path(self.build_env())
|
emulator = self.android_emulator_path(self.build_env())
|
||||||
return subprocess.call([emulator] + args)
|
return subprocess.call([emulator] + args)
|
||||||
|
|
||||||
@Command('rr-record',
|
@Command("rr-record", description="Run Servo whilst recording execution with rr", category="post-build")
|
||||||
description='Run Servo whilst recording execution with rr',
|
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to Servo")
|
||||||
category='post-build')
|
|
||||||
@CommandArgument(
|
|
||||||
'params', nargs='...',
|
|
||||||
help="Command-line arguments to be passed through to Servo")
|
|
||||||
@CommandBase.common_command_arguments(binary_selection=True)
|
@CommandBase.common_command_arguments(binary_selection=True)
|
||||||
def rr_record(self, servo_binary: str, params=[]):
|
def rr_record(self, servo_binary: str, params=[]):
|
||||||
env = self.build_env()
|
env = self.build_env()
|
||||||
env["RUST_BACKTRACE"] = "1"
|
env["RUST_BACKTRACE"] = "1"
|
||||||
|
|
||||||
servo_cmd = [servo_binary] + params
|
servo_cmd = [servo_binary] + params
|
||||||
rr_cmd = ['rr', '--fatal-errors', 'record']
|
rr_cmd = ["rr", "--fatal-errors", "record"]
|
||||||
try:
|
try:
|
||||||
check_call(rr_cmd + servo_cmd)
|
check_call(rr_cmd + servo_cmd)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
|
@ -215,24 +216,22 @@ class PostBuildCommands(CommandBase):
|
||||||
else:
|
else:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
@Command('rr-replay',
|
@Command(
|
||||||
description='Replay the most recent execution of Servo that was recorded with rr',
|
"rr-replay",
|
||||||
category='post-build')
|
description="Replay the most recent execution of Servo that was recorded with rr",
|
||||||
|
category="post-build",
|
||||||
|
)
|
||||||
def rr_replay(self):
|
def rr_replay(self):
|
||||||
try:
|
try:
|
||||||
check_call(['rr', '--fatal-errors', 'replay'])
|
check_call(["rr", "--fatal-errors", "replay"])
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
if e.errno == 2:
|
if e.errno == 2:
|
||||||
print("rr binary can't be found!")
|
print("rr binary can't be found!")
|
||||||
else:
|
else:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
@Command('doc',
|
@Command("doc", description="Generate documentation", category="post-build")
|
||||||
description='Generate documentation',
|
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to cargo doc")
|
||||||
category='post-build')
|
|
||||||
@CommandArgument(
|
|
||||||
'params', nargs='...',
|
|
||||||
help="Command-line arguments to be passed through to cargo doc")
|
|
||||||
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
|
@CommandBase.common_command_arguments(build_configuration=True, build_type=False)
|
||||||
def doc(self, params: List[str], **kwargs):
|
def doc(self, params: List[str], **kwargs):
|
||||||
self.ensure_bootstrapped()
|
self.ensure_bootstrapped()
|
||||||
|
|
|
@ -46,10 +46,14 @@ SERVO_TESTS_PATH = os.path.join("tests", "wpt", "mozilla", "tests")
|
||||||
# Servo depends on several `rustfmt` options that are unstable. These are still
|
# Servo depends on several `rustfmt` options that are unstable. These are still
|
||||||
# supported by stable `rustfmt` if they are passed as these command-line arguments.
|
# supported by stable `rustfmt` if they are passed as these command-line arguments.
|
||||||
UNSTABLE_RUSTFMT_ARGUMENTS = [
|
UNSTABLE_RUSTFMT_ARGUMENTS = [
|
||||||
"--config", "unstable_features=true",
|
"--config",
|
||||||
"--config", "binop_separator=Back",
|
"unstable_features=true",
|
||||||
"--config", "imports_granularity=Module",
|
"--config",
|
||||||
"--config", "group_imports=StdExternalCrate",
|
"binop_separator=Back",
|
||||||
|
"--config",
|
||||||
|
"imports_granularity=Module",
|
||||||
|
"--config",
|
||||||
|
"group_imports=StdExternalCrate",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Listing these globs manually is a work-around for very slow `taplo` invocation
|
# Listing these globs manually is a work-around for very slow `taplo` invocation
|
||||||
|
@ -72,9 +76,21 @@ def format_toml_files_with_taplo(check_only: bool = True) -> int:
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
if check_only:
|
if check_only:
|
||||||
return call([taplo, "fmt", "--check", *TOML_GLOBS], env={'RUST_LOG': 'error'})
|
return call([taplo, "fmt", "--check", *TOML_GLOBS], env={"RUST_LOG": "error"})
|
||||||
else:
|
else:
|
||||||
return call([taplo, "fmt", *TOML_GLOBS], env={'RUST_LOG': 'error'})
|
return call([taplo, "fmt", *TOML_GLOBS], env={"RUST_LOG": "error"})
|
||||||
|
|
||||||
|
|
||||||
|
def format_python_files_with_ruff(check_only: bool = True) -> int:
|
||||||
|
ruff = shutil.which("ruff")
|
||||||
|
if ruff is None:
|
||||||
|
print("Could not find `ruff`. Run `./mach bootstrap`")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if check_only:
|
||||||
|
return call([ruff, "format", "--check", "--quiet"])
|
||||||
|
else:
|
||||||
|
return call([ruff, "format", "--quiet"])
|
||||||
|
|
||||||
|
|
||||||
def format_with_rustfmt(check_only: bool = True) -> int:
|
def format_with_rustfmt(check_only: bool = True) -> int:
|
||||||
|
@ -83,8 +99,17 @@ def format_with_rustfmt(check_only: bool = True) -> int:
|
||||||
if result != 0:
|
if result != 0:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
return call(["cargo", "fmt", "--manifest-path", "support/crown/Cargo.toml",
|
return call(
|
||||||
"--", *UNSTABLE_RUSTFMT_ARGUMENTS, *maybe_check_only])
|
[
|
||||||
|
"cargo",
|
||||||
|
"fmt",
|
||||||
|
"--manifest-path",
|
||||||
|
"support/crown/Cargo.toml",
|
||||||
|
"--",
|
||||||
|
*UNSTABLE_RUSTFMT_ARGUMENTS,
|
||||||
|
*maybe_check_only,
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@CommandProvider
|
@CommandProvider
|
||||||
|
@ -97,15 +122,10 @@ class MachCommands(CommandBase):
|
||||||
if not hasattr(self.context, "built_tests"):
|
if not hasattr(self.context, "built_tests"):
|
||||||
self.context.built_tests = False
|
self.context.built_tests = False
|
||||||
|
|
||||||
@Command('test-perf',
|
@Command("test-perf", description="Run the page load performance test", category="testing")
|
||||||
description='Run the page load performance test',
|
@CommandArgument("--base", default=None, help="the base URL for testcases")
|
||||||
category='testing')
|
@CommandArgument("--date", default=None, help="the datestamp for the data")
|
||||||
@CommandArgument('--base', default=None,
|
@CommandArgument("--submit", "-a", default=False, action="store_true", help="submit the data to perfherder")
|
||||||
help="the base URL for testcases")
|
|
||||||
@CommandArgument('--date', default=None,
|
|
||||||
help="the datestamp for the data")
|
|
||||||
@CommandArgument('--submit', '-a', default=False, action="store_true",
|
|
||||||
help="submit the data to perfherder")
|
|
||||||
def test_perf(self, base=None, date=None, submit=False):
|
def test_perf(self, base=None, date=None, submit=False):
|
||||||
env = self.build_env()
|
env = self.build_env()
|
||||||
cmd = ["bash", "test_perf.sh"]
|
cmd = ["bash", "test_perf.sh"]
|
||||||
|
@ -115,20 +135,15 @@ class MachCommands(CommandBase):
|
||||||
cmd += ["--date", date]
|
cmd += ["--date", date]
|
||||||
if submit:
|
if submit:
|
||||||
cmd += ["--submit"]
|
cmd += ["--submit"]
|
||||||
return call(cmd,
|
return call(cmd, env=env, cwd=path.join("etc", "ci", "performance"))
|
||||||
env=env,
|
|
||||||
cwd=path.join("etc", "ci", "performance"))
|
|
||||||
|
|
||||||
@Command('test-unit',
|
@Command("test-unit", description="Run unit tests", category="testing")
|
||||||
description='Run unit tests',
|
@CommandArgument("test_name", nargs=argparse.REMAINDER, help="Only run tests that match this pattern or file path")
|
||||||
category='testing')
|
@CommandArgument("--package", "-p", default=None, help="Specific package to test")
|
||||||
@CommandArgument('test_name', nargs=argparse.REMAINDER,
|
@CommandArgument("--bench", default=False, action="store_true", help="Run in bench mode")
|
||||||
help="Only run tests that match this pattern or file path")
|
@CommandArgument(
|
||||||
@CommandArgument('--package', '-p', default=None, help="Specific package to test")
|
"--nocapture", default=False, action="store_true", help="Run tests with nocapture ( show test stdout )"
|
||||||
@CommandArgument('--bench', default=False, action="store_true",
|
)
|
||||||
help="Run in bench mode")
|
|
||||||
@CommandArgument('--nocapture', default=False, action="store_true",
|
|
||||||
help="Run tests with nocapture ( show test stdout )")
|
|
||||||
@CommandBase.common_command_arguments(build_configuration=True, build_type=True)
|
@CommandBase.common_command_arguments(build_configuration=True, build_type=True)
|
||||||
def test_unit(self, build_type: BuildType, test_name=None, package=None, bench=False, nocapture=False, **kwargs):
|
def test_unit(self, build_type: BuildType, test_name=None, package=None, bench=False, nocapture=False, **kwargs):
|
||||||
if test_name is None:
|
if test_name is None:
|
||||||
|
@ -183,7 +198,7 @@ class MachCommands(CommandBase):
|
||||||
"stylo_config",
|
"stylo_config",
|
||||||
]
|
]
|
||||||
if not packages:
|
if not packages:
|
||||||
packages = set(os.listdir(path.join(self.context.topdir, "tests", "unit"))) - set(['.DS_Store'])
|
packages = set(os.listdir(path.join(self.context.topdir, "tests", "unit"))) - set([".DS_Store"])
|
||||||
packages |= set(self_contained_tests)
|
packages |= set(self_contained_tests)
|
||||||
|
|
||||||
in_crate_packages = []
|
in_crate_packages = []
|
||||||
|
@ -194,7 +209,7 @@ class MachCommands(CommandBase):
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
packages.discard('stylo')
|
packages.discard("stylo")
|
||||||
|
|
||||||
# Return if there is nothing to do.
|
# Return if there is nothing to do.
|
||||||
if len(packages) == 0 and len(in_crate_packages) == 0:
|
if len(packages) == 0 and len(in_crate_packages) == 0:
|
||||||
|
@ -223,59 +238,56 @@ class MachCommands(CommandBase):
|
||||||
result = call(["cargo", "bench" if bench else "test"], cwd="support/crown")
|
result = call(["cargo", "bench" if bench else "test"], cwd="support/crown")
|
||||||
if result != 0:
|
if result != 0:
|
||||||
return result
|
return result
|
||||||
return self.run_cargo_build_like_command(
|
return self.run_cargo_build_like_command("bench" if bench else "test", args, env=env, **kwargs)
|
||||||
"bench" if bench else "test",
|
|
||||||
args,
|
|
||||||
env=env,
|
|
||||||
**kwargs)
|
|
||||||
|
|
||||||
@Command('test-content',
|
@Command("test-content", description="Run the content tests", category="testing")
|
||||||
description='Run the content tests',
|
|
||||||
category='testing')
|
|
||||||
def test_content(self):
|
def test_content(self):
|
||||||
print("Content tests have been replaced by web-platform-tests under "
|
print("Content tests have been replaced by web-platform-tests under tests/wpt/mozilla/.")
|
||||||
"tests/wpt/mozilla/.")
|
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
@Command('test-tidy',
|
@Command("test-tidy", description="Run the source code tidiness check", category="testing")
|
||||||
description='Run the source code tidiness check',
|
@CommandArgument(
|
||||||
category='testing')
|
"--all",
|
||||||
@CommandArgument('--all', default=False, action="store_true", dest="all_files",
|
default=False,
|
||||||
help="Check all files, and run the WPT lint in tidy, "
|
action="store_true",
|
||||||
"even if unchanged")
|
dest="all_files",
|
||||||
@CommandArgument('--no-progress', default=False, action="store_true",
|
help="Check all files, and run the WPT lint in tidy, even if unchanged",
|
||||||
help="Don't show progress for tidy")
|
)
|
||||||
|
@CommandArgument("--no-progress", default=False, action="store_true", help="Don't show progress for tidy")
|
||||||
def test_tidy(self, all_files, no_progress):
|
def test_tidy(self, all_files, no_progress):
|
||||||
tidy_failed = tidy.scan(not all_files, not no_progress)
|
tidy_failed = tidy.scan(not all_files, not no_progress)
|
||||||
|
|
||||||
print("\r ➤ Checking formatting of Rust files...")
|
print("\r ➤ Checking formatting of Rust files...")
|
||||||
rustfmt_failed = format_with_rustfmt(check_only=True)
|
rustfmt_failed = format_with_rustfmt(check_only=True)
|
||||||
if rustfmt_failed:
|
|
||||||
print("Run `./mach fmt` to fix the formatting")
|
print("\r ➤ Checking formatting of python files...")
|
||||||
|
ruff_format_failed = format_python_files_with_ruff()
|
||||||
|
|
||||||
print("\r ➤ Checking formatting of toml files...")
|
print("\r ➤ Checking formatting of toml files...")
|
||||||
taplo_failed = format_toml_files_with_taplo()
|
taplo_failed = format_toml_files_with_taplo()
|
||||||
|
|
||||||
tidy_failed = tidy_failed or rustfmt_failed or taplo_failed
|
format_failed = rustfmt_failed or ruff_format_failed or taplo_failed
|
||||||
|
tidy_failed = format_failed or tidy_failed
|
||||||
print()
|
print()
|
||||||
if tidy_failed:
|
if tidy_failed:
|
||||||
print("\r ❌ test-tidy reported errors.")
|
print("\r ❌ test-tidy reported errors.")
|
||||||
else:
|
else:
|
||||||
print("\r ✅ test-tidy reported no errors.")
|
print("\r ✅ test-tidy reported no errors.")
|
||||||
|
|
||||||
|
if format_failed:
|
||||||
|
print("Run `./mach fmt` to fix the formatting")
|
||||||
|
|
||||||
return tidy_failed
|
return tidy_failed
|
||||||
|
|
||||||
@Command('test-scripts',
|
@Command("test-scripts", description="Run tests for all build and support scripts.", category="testing")
|
||||||
description='Run tests for all build and support scripts.',
|
@CommandArgument("--verbose", "-v", default=False, action="store_true", help="Enable verbose output")
|
||||||
category='testing')
|
@CommandArgument("--very-verbose", "-vv", default=False, action="store_true", help="Enable very verbose output")
|
||||||
@CommandArgument('--verbose', '-v', default=False, action="store_true",
|
@CommandArgument(
|
||||||
help="Enable verbose output")
|
"--all", "-a", default=False, action="store_true", help="Run all script tests, even the slow ones."
|
||||||
@CommandArgument('--very-verbose', '-vv', default=False, action="store_true",
|
)
|
||||||
help="Enable very verbose output")
|
@CommandArgument(
|
||||||
@CommandArgument('--all', '-a', default=False, action="store_true",
|
"tests", default=None, nargs="...", help="Specific WebIDL tests to run, relative to the tests directory"
|
||||||
help="Run all script tests, even the slow ones.")
|
)
|
||||||
@CommandArgument('tests', default=None, nargs="...",
|
|
||||||
help="Specific WebIDL tests to run, relative to the tests directory")
|
|
||||||
def test_scripts(self, verbose, very_verbose, all, tests):
|
def test_scripts(self, verbose, very_verbose, all, tests):
|
||||||
if very_verbose:
|
if very_verbose:
|
||||||
logging.getLogger().level = logging.DEBUG
|
logging.getLogger().level = logging.DEBUG
|
||||||
|
@ -290,6 +302,7 @@ class MachCommands(CommandBase):
|
||||||
passed = tidy.run_tests() and passed
|
passed = tidy.run_tests() and passed
|
||||||
|
|
||||||
import python.servo.try_parser as try_parser
|
import python.servo.try_parser as try_parser
|
||||||
|
|
||||||
print("Running try_parser tests...")
|
print("Running try_parser tests...")
|
||||||
passed = try_parser.run_tests() and passed
|
passed = try_parser.run_tests() and passed
|
||||||
|
|
||||||
|
@ -302,7 +315,9 @@ class MachCommands(CommandBase):
|
||||||
try:
|
try:
|
||||||
result = subprocess.run(
|
result = subprocess.run(
|
||||||
["etc/devtools_parser.py", "--json", "--use", "etc/devtools_parser_test.pcap"],
|
["etc/devtools_parser.py", "--json", "--use", "etc/devtools_parser_test.pcap"],
|
||||||
check=True, capture_output=True)
|
check=True,
|
||||||
|
capture_output=True,
|
||||||
|
)
|
||||||
expected = open("etc/devtools_parser_test.json", "rb").read()
|
expected = open("etc/devtools_parser_test.json", "rb").read()
|
||||||
actual = result.stdout
|
actual = result.stdout
|
||||||
assert actual == expected, f"Incorrect output!\nExpected: {repr(expected)}\nActual: {repr(actual)}"
|
assert actual == expected, f"Incorrect output!\nExpected: {repr(expected)}\nActual: {repr(actual)}"
|
||||||
|
@ -323,41 +338,42 @@ class MachCommands(CommandBase):
|
||||||
sys.path.insert(0, test_file_dir)
|
sys.path.insert(0, test_file_dir)
|
||||||
run_file = path.abspath(path.join(test_file_dir, "runtests.py"))
|
run_file = path.abspath(path.join(test_file_dir, "runtests.py"))
|
||||||
run_globals = {"__file__": run_file}
|
run_globals = {"__file__": run_file}
|
||||||
exec(compile(open(run_file).read(), run_file, 'exec'), run_globals)
|
exec(compile(open(run_file).read(), run_file, "exec"), run_globals)
|
||||||
passed = run_globals["run_tests"](tests, verbose or very_verbose) and passed
|
passed = run_globals["run_tests"](tests, verbose or very_verbose) and passed
|
||||||
|
|
||||||
return 0 if passed else 1
|
return 0 if passed else 1
|
||||||
|
|
||||||
@Command('test-devtools',
|
@Command("test-devtools", description="Run tests for devtools.", category="testing")
|
||||||
description='Run tests for devtools.',
|
|
||||||
category='testing')
|
|
||||||
def test_devtools(self):
|
def test_devtools(self):
|
||||||
print("Running devtools tests...")
|
print("Running devtools tests...")
|
||||||
passed = servo.devtools_tests.run_tests(SCRIPT_PATH)
|
passed = servo.devtools_tests.run_tests(SCRIPT_PATH)
|
||||||
return 0 if passed else 1
|
return 0 if passed else 1
|
||||||
|
|
||||||
@Command('test-wpt-failure',
|
@Command(
|
||||||
description='Run the tests harness that verifies that the test failures are reported correctly',
|
"test-wpt-failure",
|
||||||
category='testing',
|
description="Run the tests harness that verifies that the test failures are reported correctly",
|
||||||
parser=wpt.create_parser)
|
category="testing",
|
||||||
|
parser=wpt.create_parser,
|
||||||
|
)
|
||||||
@CommandBase.common_command_arguments(build_configuration=False, build_type=True)
|
@CommandBase.common_command_arguments(build_configuration=False, build_type=True)
|
||||||
def test_wpt_failure(self, build_type: BuildType, **kwargs):
|
def test_wpt_failure(self, build_type: BuildType, **kwargs):
|
||||||
kwargs["pause_after_test"] = False
|
kwargs["pause_after_test"] = False
|
||||||
kwargs["include"] = ["infrastructure/failing-test.html"]
|
kwargs["include"] = ["infrastructure/failing-test.html"]
|
||||||
return not self._test_wpt(build_type=build_type, **kwargs)
|
return not self._test_wpt(build_type=build_type, **kwargs)
|
||||||
|
|
||||||
@Command('test-wpt',
|
@Command(
|
||||||
description='Run the regular web platform test suite',
|
"test-wpt", description="Run the regular web platform test suite", category="testing", parser=wpt.create_parser
|
||||||
category='testing',
|
)
|
||||||
parser=wpt.create_parser)
|
|
||||||
@CommandBase.common_command_arguments(binary_selection=True)
|
@CommandBase.common_command_arguments(binary_selection=True)
|
||||||
def test_wpt(self, servo_binary: str, **kwargs):
|
def test_wpt(self, servo_binary: str, **kwargs):
|
||||||
return self._test_wpt(servo_binary, **kwargs)
|
return self._test_wpt(servo_binary, **kwargs)
|
||||||
|
|
||||||
@Command('test-wpt-android',
|
@Command(
|
||||||
description='Run the web platform test suite in an Android emulator',
|
"test-wpt-android",
|
||||||
category='testing',
|
description="Run the web platform test suite in an Android emulator",
|
||||||
parser=wpt.create_parser)
|
category="testing",
|
||||||
|
parser=wpt.create_parser,
|
||||||
|
)
|
||||||
@CommandBase.common_command_arguments(build_configuration=False, build_type=True)
|
@CommandBase.common_command_arguments(build_configuration=False, build_type=True)
|
||||||
def test_wpt_android(self, build_type: BuildType, binary_args=None, **kwargs):
|
def test_wpt_android(self, build_type: BuildType, binary_args=None, **kwargs):
|
||||||
kwargs.update(
|
kwargs.update(
|
||||||
|
@ -374,27 +390,30 @@ class MachCommands(CommandBase):
|
||||||
return_value = wpt.run.run_tests(servo_binary, **kwargs)
|
return_value = wpt.run.run_tests(servo_binary, **kwargs)
|
||||||
return return_value if not kwargs["always_succeed"] else 0
|
return return_value if not kwargs["always_succeed"] else 0
|
||||||
|
|
||||||
@Command('update-manifest',
|
@Command(
|
||||||
description='Run test-wpt --manifest-update SKIP_TESTS to regenerate MANIFEST.json',
|
"update-manifest",
|
||||||
category='testing',
|
description="Run test-wpt --manifest-update SKIP_TESTS to regenerate MANIFEST.json",
|
||||||
parser=wpt.manifestupdate.create_parser)
|
category="testing",
|
||||||
|
parser=wpt.manifestupdate.create_parser,
|
||||||
|
)
|
||||||
def update_manifest(self, **kwargs):
|
def update_manifest(self, **kwargs):
|
||||||
return wpt.manifestupdate.update(check_clean=False)
|
return wpt.manifestupdate.update(check_clean=False)
|
||||||
|
|
||||||
@Command('fmt',
|
@Command("fmt", description="Format Rust, Python, and TOML files", category="testing")
|
||||||
description='Format Rust and TOML files',
|
|
||||||
category='testing')
|
|
||||||
def format_code(self):
|
def format_code(self):
|
||||||
|
result = format_python_files_with_ruff(check_only=False)
|
||||||
|
if result != 0:
|
||||||
|
return result
|
||||||
|
|
||||||
result = format_toml_files_with_taplo(check_only=False)
|
result = format_toml_files_with_taplo(check_only=False)
|
||||||
if result != 0:
|
if result != 0:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
return format_with_rustfmt(check_only=False)
|
return format_with_rustfmt(check_only=False)
|
||||||
|
|
||||||
@Command('update-wpt',
|
@Command(
|
||||||
description='Update the web platform tests',
|
"update-wpt", description="Update the web platform tests", category="testing", parser=wpt.update.create_parser
|
||||||
category='testing',
|
)
|
||||||
parser=wpt.update.create_parser)
|
|
||||||
def update_wpt(self, **kwargs):
|
def update_wpt(self, **kwargs):
|
||||||
patch = kwargs.get("patch", False)
|
patch = kwargs.get("patch", False)
|
||||||
if not patch and kwargs["sync"]:
|
if not patch and kwargs["sync"]:
|
||||||
|
@ -402,9 +421,7 @@ class MachCommands(CommandBase):
|
||||||
return 1
|
return 1
|
||||||
return wpt.update.update_tests(**kwargs)
|
return wpt.update.update_tests(**kwargs)
|
||||||
|
|
||||||
@Command('test-android-startup',
|
@Command("test-android-startup", description="Extremely minimal testing of Servo for Android", category="testing")
|
||||||
description='Extremely minimal testing of Servo for Android',
|
|
||||||
category='testing')
|
|
||||||
@CommandBase.common_command_arguments(build_configuration=False, build_type=True)
|
@CommandBase.common_command_arguments(build_configuration=False, build_type=True)
|
||||||
def test_android_startup(self, build_type: BuildType):
|
def test_android_startup(self, build_type: BuildType):
|
||||||
html = """
|
html = """
|
||||||
|
@ -441,50 +458,49 @@ class MachCommands(CommandBase):
|
||||||
py = path.join(self.context.topdir, "etc", "run_in_headless_android_emulator.py")
|
py = path.join(self.context.topdir, "etc", "run_in_headless_android_emulator.py")
|
||||||
return [py, avd, apk]
|
return [py, avd, apk]
|
||||||
|
|
||||||
@Command('test-jquery', description='Run the jQuery test suite', category='testing')
|
@Command("test-jquery", description="Run the jQuery test suite", category="testing")
|
||||||
@CommandBase.common_command_arguments(binary_selection=True)
|
@CommandBase.common_command_arguments(binary_selection=True)
|
||||||
def test_jquery(self, servo_binary: str):
|
def test_jquery(self, servo_binary: str):
|
||||||
return self.jquery_test_runner("test", servo_binary)
|
return self.jquery_test_runner("test", servo_binary)
|
||||||
|
|
||||||
@Command('test-dromaeo', description='Run the Dromaeo test suite', category='testing')
|
@Command("test-dromaeo", description="Run the Dromaeo test suite", category="testing")
|
||||||
@CommandArgument('tests', default=["recommended"], nargs="...", help="Specific tests to run")
|
@CommandArgument("tests", default=["recommended"], nargs="...", help="Specific tests to run")
|
||||||
@CommandArgument('--bmf-output', default=None, help="Specify BMF JSON output file")
|
@CommandArgument("--bmf-output", default=None, help="Specify BMF JSON output file")
|
||||||
@CommandBase.common_command_arguments(binary_selection=True)
|
@CommandBase.common_command_arguments(binary_selection=True)
|
||||||
def test_dromaeo(self, tests, servo_binary: str, bmf_output: str | None = None):
|
def test_dromaeo(self, tests, servo_binary: str, bmf_output: str | None = None):
|
||||||
return self.dromaeo_test_runner(tests, servo_binary, bmf_output)
|
return self.dromaeo_test_runner(tests, servo_binary, bmf_output)
|
||||||
|
|
||||||
@Command('test-speedometer', description="Run servo's speedometer", category='testing')
|
@Command("test-speedometer", description="Run servo's speedometer", category="testing")
|
||||||
@CommandArgument('--bmf-output', default=None, help="Specify BMF JSON output file")
|
@CommandArgument("--bmf-output", default=None, help="Specify BMF JSON output file")
|
||||||
@CommandBase.common_command_arguments(binary_selection=True)
|
@CommandBase.common_command_arguments(binary_selection=True)
|
||||||
def test_speedometer(self, servo_binary: str, bmf_output: str | None = None):
|
def test_speedometer(self, servo_binary: str, bmf_output: str | None = None):
|
||||||
return self.speedometer_runner(servo_binary, bmf_output)
|
return self.speedometer_runner(servo_binary, bmf_output)
|
||||||
|
|
||||||
@Command('update-jquery',
|
@Command("update-jquery", description="Update the jQuery test suite expected results", category="testing")
|
||||||
description='Update the jQuery test suite expected results',
|
|
||||||
category='testing')
|
|
||||||
@CommandBase.common_command_arguments(binary_selection=True)
|
@CommandBase.common_command_arguments(binary_selection=True)
|
||||||
def update_jquery(self, servo_binary: str):
|
def update_jquery(self, servo_binary: str):
|
||||||
return self.jquery_test_runner("update", servo_binary)
|
return self.jquery_test_runner("update", servo_binary)
|
||||||
|
|
||||||
@Command('compare_dromaeo',
|
@Command(
|
||||||
description='Compare outputs of two runs of ./mach test-dromaeo command',
|
"compare_dromaeo", description="Compare outputs of two runs of ./mach test-dromaeo command", category="testing"
|
||||||
category='testing')
|
)
|
||||||
@CommandArgument('params', default=None, nargs="...",
|
@CommandArgument(
|
||||||
help=" filepaths of output files of two runs of dromaeo test ")
|
"params", default=None, nargs="...", help=" filepaths of output files of two runs of dromaeo test "
|
||||||
|
)
|
||||||
def compare_dromaeo(self, params):
|
def compare_dromaeo(self, params):
|
||||||
prev_op_filename = params[0]
|
prev_op_filename = params[0]
|
||||||
cur_op_filename = params[1]
|
cur_op_filename = params[1]
|
||||||
result = {'Test': [], 'Prev_Time': [], 'Cur_Time': [], 'Difference(%)': []}
|
result = {"Test": [], "Prev_Time": [], "Cur_Time": [], "Difference(%)": []}
|
||||||
with open(prev_op_filename, 'r') as prev_op, open(cur_op_filename, 'r') as cur_op:
|
with open(prev_op_filename, "r") as prev_op, open(cur_op_filename, "r") as cur_op:
|
||||||
l1 = prev_op.readline()
|
l1 = prev_op.readline()
|
||||||
l2 = cur_op.readline()
|
l2 = cur_op.readline()
|
||||||
|
|
||||||
while ((l1.find('[dromaeo] Saving...') and l2.find('[dromaeo] Saving...'))):
|
while l1.find("[dromaeo] Saving...") and l2.find("[dromaeo] Saving..."):
|
||||||
l1 = prev_op.readline()
|
l1 = prev_op.readline()
|
||||||
l2 = cur_op.readline()
|
l2 = cur_op.readline()
|
||||||
|
|
||||||
reach = 3
|
reach = 3
|
||||||
while (reach > 0):
|
while reach > 0:
|
||||||
l1 = prev_op.readline()
|
l1 = prev_op.readline()
|
||||||
l2 = cur_op.readline()
|
l2 = cur_op.readline()
|
||||||
reach -= 1
|
reach -= 1
|
||||||
|
@ -494,33 +510,62 @@ class MachCommands(CommandBase):
|
||||||
l2 = cur_op.readline()
|
l2 = cur_op.readline()
|
||||||
if not l1:
|
if not l1:
|
||||||
break
|
break
|
||||||
result['Test'].append(str(l1).split('|')[0].strip())
|
result["Test"].append(str(l1).split("|")[0].strip())
|
||||||
result['Prev_Time'].append(float(str(l1).split('|')[1].strip()))
|
result["Prev_Time"].append(float(str(l1).split("|")[1].strip()))
|
||||||
result['Cur_Time'].append(float(str(l2).split('|')[1].strip()))
|
result["Cur_Time"].append(float(str(l2).split("|")[1].strip()))
|
||||||
a = float(str(l1).split('|')[1].strip())
|
a = float(str(l1).split("|")[1].strip())
|
||||||
b = float(str(l2).split('|')[1].strip())
|
b = float(str(l2).split("|")[1].strip())
|
||||||
result['Difference(%)'].append(((b - a) / a) * 100)
|
result["Difference(%)"].append(((b - a) / a) * 100)
|
||||||
|
|
||||||
width_col1 = max([len(x) for x in result['Test']])
|
width_col1 = max([len(x) for x in result["Test"]])
|
||||||
width_col2 = max([len(str(x)) for x in result['Prev_Time']])
|
width_col2 = max([len(str(x)) for x in result["Prev_Time"]])
|
||||||
width_col3 = max([len(str(x)) for x in result['Cur_Time']])
|
width_col3 = max([len(str(x)) for x in result["Cur_Time"]])
|
||||||
width_col4 = max([len(str(x)) for x in result['Difference(%)']])
|
width_col4 = max([len(str(x)) for x in result["Difference(%)"]])
|
||||||
|
|
||||||
for p, q, r, s in zip(['Test'], ['First Run'], ['Second Run'], ['Difference(%)']):
|
for p, q, r, s in zip(["Test"], ["First Run"], ["Second Run"], ["Difference(%)"]):
|
||||||
print("\033[1m" + "{}|{}|{}|{}".format(p.ljust(width_col1), q.ljust(width_col2), r.ljust(width_col3),
|
print(
|
||||||
s.ljust(width_col4)) + "\033[0m" + "\n" + "--------------------------------------------------"
|
"\033[1m"
|
||||||
+ "-------------------------------------------------------------------------")
|
+ "{}|{}|{}|{}".format(
|
||||||
|
p.ljust(width_col1), q.ljust(width_col2), r.ljust(width_col3), s.ljust(width_col4)
|
||||||
|
)
|
||||||
|
+ "\033[0m"
|
||||||
|
+ "\n"
|
||||||
|
+ "--------------------------------------------------"
|
||||||
|
+ "-------------------------------------------------------------------------"
|
||||||
|
)
|
||||||
|
|
||||||
for a1, b1, c1, d1 in zip(result['Test'], result['Prev_Time'], result['Cur_Time'], result['Difference(%)']):
|
for a1, b1, c1, d1 in zip(result["Test"], result["Prev_Time"], result["Cur_Time"], result["Difference(%)"]):
|
||||||
if d1 > 0:
|
if d1 > 0:
|
||||||
print("\033[91m" + "{}|{}|{}|{}".format(a1.ljust(width_col1),
|
print(
|
||||||
str(b1).ljust(width_col2), str(c1).ljust(width_col3), str(d1).ljust(width_col4)) + "\033[0m")
|
"\033[91m"
|
||||||
|
+ "{}|{}|{}|{}".format(
|
||||||
|
a1.ljust(width_col1),
|
||||||
|
str(b1).ljust(width_col2),
|
||||||
|
str(c1).ljust(width_col3),
|
||||||
|
str(d1).ljust(width_col4),
|
||||||
|
)
|
||||||
|
+ "\033[0m"
|
||||||
|
)
|
||||||
elif d1 < 0:
|
elif d1 < 0:
|
||||||
print("\033[92m" + "{}|{}|{}|{}".format(a1.ljust(width_col1),
|
print(
|
||||||
str(b1).ljust(width_col2), str(c1).ljust(width_col3), str(d1).ljust(width_col4)) + "\033[0m")
|
"\033[92m"
|
||||||
|
+ "{}|{}|{}|{}".format(
|
||||||
|
a1.ljust(width_col1),
|
||||||
|
str(b1).ljust(width_col2),
|
||||||
|
str(c1).ljust(width_col3),
|
||||||
|
str(d1).ljust(width_col4),
|
||||||
|
)
|
||||||
|
+ "\033[0m"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
print("{}|{}|{}|{}".format(a1.ljust(width_col1), str(b1).ljust(width_col2),
|
print(
|
||||||
str(c1).ljust(width_col3), str(d1).ljust(width_col4)))
|
"{}|{}|{}|{}".format(
|
||||||
|
a1.ljust(width_col1),
|
||||||
|
str(b1).ljust(width_col2),
|
||||||
|
str(c1).ljust(width_col3),
|
||||||
|
str(d1).ljust(width_col4),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def jquery_test_runner(self, cmd, binary: str):
|
def jquery_test_runner(self, cmd, binary: str):
|
||||||
base_dir = path.abspath(path.join("tests", "jquery"))
|
base_dir = path.abspath(path.join("tests", "jquery"))
|
||||||
|
@ -529,12 +574,10 @@ class MachCommands(CommandBase):
|
||||||
|
|
||||||
# Clone the jQuery repository if it doesn't exist
|
# Clone the jQuery repository if it doesn't exist
|
||||||
if not os.path.isdir(jquery_dir):
|
if not os.path.isdir(jquery_dir):
|
||||||
check_call(
|
check_call(["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/servo/jquery", jquery_dir])
|
||||||
["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/servo/jquery", jquery_dir])
|
|
||||||
|
|
||||||
# Run pull in case the jQuery repo was updated since last test run
|
# Run pull in case the jQuery repo was updated since last test run
|
||||||
check_call(
|
check_call(["git", "-C", jquery_dir, "pull"])
|
||||||
["git", "-C", jquery_dir, "pull"])
|
|
||||||
|
|
||||||
# Check that a release servo build exists
|
# Check that a release servo build exists
|
||||||
bin_path = path.abspath(binary)
|
bin_path = path.abspath(binary)
|
||||||
|
@ -553,29 +596,34 @@ class MachCommands(CommandBase):
|
||||||
# Clone the Dromaeo repository if it doesn't exist
|
# Clone the Dromaeo repository if it doesn't exist
|
||||||
if not os.path.isdir(dromaeo_dir):
|
if not os.path.isdir(dromaeo_dir):
|
||||||
check_call(
|
check_call(
|
||||||
["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/notriddle/dromaeo", dromaeo_dir])
|
["git", "clone", "-b", "servo", "--depth", "1", "https://github.com/notriddle/dromaeo", dromaeo_dir]
|
||||||
|
)
|
||||||
|
|
||||||
# Run pull in case the Dromaeo repo was updated since last test run
|
# Run pull in case the Dromaeo repo was updated since last test run
|
||||||
check_call(
|
check_call(["git", "-C", dromaeo_dir, "pull"])
|
||||||
["git", "-C", dromaeo_dir, "pull"])
|
|
||||||
|
|
||||||
# Compile test suite
|
# Compile test suite
|
||||||
check_call(
|
check_call(["make", "-C", dromaeo_dir, "web"])
|
||||||
["make", "-C", dromaeo_dir, "web"])
|
|
||||||
|
|
||||||
# Check that a release servo build exists
|
# Check that a release servo build exists
|
||||||
bin_path = path.abspath(binary)
|
bin_path = path.abspath(binary)
|
||||||
|
|
||||||
return check_call(
|
return check_call([run_file, "|".join(tests), bin_path, base_dir, bmf_output])
|
||||||
[run_file, "|".join(tests), bin_path, base_dir, bmf_output])
|
|
||||||
|
|
||||||
def speedometer_runner(self, binary: str, bmf_output: str | None):
|
def speedometer_runner(self, binary: str, bmf_output: str | None):
|
||||||
speedometer = json.loads(subprocess.check_output([
|
speedometer = json.loads(
|
||||||
|
subprocess.check_output(
|
||||||
|
[
|
||||||
binary,
|
binary,
|
||||||
"https://servospeedometer.netlify.app?headless=1",
|
"https://servospeedometer.netlify.app?headless=1",
|
||||||
"--pref", "dom_allow_scripts_to_close_windows",
|
"--pref",
|
||||||
|
"dom_allow_scripts_to_close_windows",
|
||||||
"--window-size=1100x900",
|
"--window-size=1100x900",
|
||||||
"--headless"], timeout=120).decode())
|
"--headless",
|
||||||
|
],
|
||||||
|
timeout=120,
|
||||||
|
).decode()
|
||||||
|
)
|
||||||
|
|
||||||
print(f"Score: {speedometer['Score']['mean']} ± {speedometer['Score']['delta']}")
|
print(f"Score: {speedometer['Score']['mean']} ± {speedometer['Score']['delta']}")
|
||||||
|
|
||||||
|
@ -583,53 +631,53 @@ class MachCommands(CommandBase):
|
||||||
output = dict()
|
output = dict()
|
||||||
|
|
||||||
def parse_speedometer_result(result):
|
def parse_speedometer_result(result):
|
||||||
if result['unit'] == "ms":
|
if result["unit"] == "ms":
|
||||||
output[f"Speedometer/{result['name']}"] = {
|
output[f"Speedometer/{result['name']}"] = {
|
||||||
'latency': { # speedometer has ms we need to convert to ns
|
"latency": { # speedometer has ms we need to convert to ns
|
||||||
'value': float(result['mean']) * 1000.0,
|
"value": float(result["mean"]) * 1000.0,
|
||||||
'lower_value': float(result['min']) * 1000.0,
|
"lower_value": float(result["min"]) * 1000.0,
|
||||||
'upper_value': float(result['max']) * 1000.0,
|
"upper_value": float(result["max"]) * 1000.0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
elif result['unit'] == "score":
|
elif result["unit"] == "score":
|
||||||
output[f"Speedometer/{result['name']}"] = {
|
output[f"Speedometer/{result['name']}"] = {
|
||||||
'score': {
|
"score": {
|
||||||
'value': float(result['mean']),
|
"value": float(result["mean"]),
|
||||||
'lower_value': float(result['min']),
|
"lower_value": float(result["min"]),
|
||||||
'upper_value': float(result['max']),
|
"upper_value": float(result["max"]),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else:
|
else:
|
||||||
raise "Unknown unit!"
|
raise "Unknown unit!"
|
||||||
|
|
||||||
for child in result['children']:
|
for child in result["children"]:
|
||||||
parse_speedometer_result(child)
|
parse_speedometer_result(child)
|
||||||
|
|
||||||
for v in speedometer.values():
|
for v in speedometer.values():
|
||||||
parse_speedometer_result(v)
|
parse_speedometer_result(v)
|
||||||
with open(bmf_output, 'w', encoding='utf-8') as f:
|
with open(bmf_output, "w", encoding="utf-8") as f:
|
||||||
json.dump(output, f, indent=4)
|
json.dump(output, f, indent=4)
|
||||||
|
|
||||||
@Command('update-net-cookies',
|
@Command(
|
||||||
description='Update the net unit tests with cookie tests from http-state',
|
"update-net-cookies",
|
||||||
category='testing')
|
description="Update the net unit tests with cookie tests from http-state",
|
||||||
|
category="testing",
|
||||||
|
)
|
||||||
def update_net_cookies(self):
|
def update_net_cookies(self):
|
||||||
cache_dir = path.join(self.config["tools"]["cache-dir"], "tests")
|
cache_dir = path.join(self.config["tools"]["cache-dir"], "tests")
|
||||||
run_file = path.abspath(path.join(PROJECT_TOPLEVEL_PATH,
|
run_file = path.abspath(
|
||||||
"components", "net", "tests",
|
path.join(PROJECT_TOPLEVEL_PATH, "components", "net", "tests", "cookie_http_state_utils.py")
|
||||||
"cookie_http_state_utils.py"))
|
)
|
||||||
run_globals = {"__file__": run_file}
|
run_globals = {"__file__": run_file}
|
||||||
exec(compile(open(run_file).read(), run_file, 'exec'), run_globals)
|
exec(compile(open(run_file).read(), run_file, "exec"), run_globals)
|
||||||
return run_globals["update_test_file"](cache_dir)
|
return run_globals["update_test_file"](cache_dir)
|
||||||
|
|
||||||
@Command('update-webgl',
|
@Command(
|
||||||
description='Update the WebGL conformance suite tests from Khronos repo',
|
"update-webgl", description="Update the WebGL conformance suite tests from Khronos repo", category="testing"
|
||||||
category='testing')
|
)
|
||||||
@CommandArgument('--version', default='2.0.0',
|
@CommandArgument("--version", default="2.0.0", help="WebGL conformance suite version")
|
||||||
help='WebGL conformance suite version')
|
|
||||||
def update_webgl(self, version=None):
|
def update_webgl(self, version=None):
|
||||||
base_dir = path.abspath(path.join(PROJECT_TOPLEVEL_PATH,
|
base_dir = path.abspath(path.join(PROJECT_TOPLEVEL_PATH, "tests", "wpt", "mozilla", "tests", "webgl"))
|
||||||
"tests", "wpt", "mozilla", "tests", "webgl"))
|
|
||||||
run_file = path.join(base_dir, "tools", "import-conformance-tests.py")
|
run_file = path.join(base_dir, "tools", "import-conformance-tests.py")
|
||||||
dest_folder = path.join(base_dir, "conformance-%s" % version)
|
dest_folder = path.join(base_dir, "conformance-%s" % version)
|
||||||
patches_dir = path.join(base_dir, "tools")
|
patches_dir = path.join(base_dir, "tools")
|
||||||
|
@ -638,18 +686,12 @@ class MachCommands(CommandBase):
|
||||||
shutil.rmtree(dest_folder)
|
shutil.rmtree(dest_folder)
|
||||||
|
|
||||||
run_globals = {"__file__": run_file}
|
run_globals = {"__file__": run_file}
|
||||||
exec(compile(open(run_file).read(), run_file, 'exec'), run_globals)
|
exec(compile(open(run_file).read(), run_file, "exec"), run_globals)
|
||||||
return run_globals["update_conformance"](version, dest_folder, None, patches_dir)
|
return run_globals["update_conformance"](version, dest_folder, None, patches_dir)
|
||||||
|
|
||||||
@Command('update-webgpu',
|
@Command("update-webgpu", description="Update the WebGPU conformance test suite", category="testing")
|
||||||
description='Update the WebGPU conformance test suite',
|
@CommandArgument("--repo", "-r", default="https://github.com/gpuweb/cts", help="Repo to vendor cts from")
|
||||||
category='testing')
|
@CommandArgument("--checkout", "-c", default="main", help="Branch or commit of repo")
|
||||||
@CommandArgument(
|
|
||||||
'--repo', '-r', default="https://github.com/gpuweb/cts",
|
|
||||||
help='Repo to vendor cts from')
|
|
||||||
@CommandArgument(
|
|
||||||
'--checkout', '-c', default="main",
|
|
||||||
help='Branch or commit of repo')
|
|
||||||
def cts(self, repo="https://github.com/gpuweb/cts", checkout="main"):
|
def cts(self, repo="https://github.com/gpuweb/cts", checkout="main"):
|
||||||
tdir = path.join(self.context.topdir, "tests/wpt/webgpu/tests")
|
tdir = path.join(self.context.topdir, "tests/wpt/webgpu/tests")
|
||||||
clone_dir = path.join(tdir, "cts_clone")
|
clone_dir = path.join(tdir, "cts_clone")
|
||||||
|
@ -672,52 +714,52 @@ class MachCommands(CommandBase):
|
||||||
delete(path.join(clone_dir, "out-wpt", "cts-chunked2sec.https.html"))
|
delete(path.join(clone_dir, "out-wpt", "cts-chunked2sec.https.html"))
|
||||||
cts_html = path.join(clone_dir, "out-wpt", "cts.https.html")
|
cts_html = path.join(clone_dir, "out-wpt", "cts.https.html")
|
||||||
# patch
|
# patch
|
||||||
with open(cts_html, 'r') as file:
|
with open(cts_html, "r") as file:
|
||||||
filedata = file.read()
|
filedata = file.read()
|
||||||
# files are mounted differently
|
# files are mounted differently
|
||||||
filedata = filedata.replace('src=/webgpu/common/runtime/wpt.js', 'src=../webgpu/common/runtime/wpt.js')
|
filedata = filedata.replace("src=/webgpu/common/runtime/wpt.js", "src=../webgpu/common/runtime/wpt.js")
|
||||||
# Mark all webgpu tests as long to increase their timeouts. This is needed due to wgpu's slowness.
|
# Mark all webgpu tests as long to increase their timeouts. This is needed due to wgpu's slowness.
|
||||||
# TODO: replace this with more fine grained solution: https://github.com/servo/servo/issues/30999
|
# TODO: replace this with more fine grained solution: https://github.com/servo/servo/issues/30999
|
||||||
filedata = filedata.replace('<meta charset=utf-8>',
|
filedata = filedata.replace(
|
||||||
'<meta charset=utf-8>\n<meta name="timeout" content="long">')
|
"<meta charset=utf-8>", '<meta charset=utf-8>\n<meta name="timeout" content="long">'
|
||||||
|
)
|
||||||
# Write the file out again
|
# Write the file out again
|
||||||
with open(cts_html, 'w') as file:
|
with open(cts_html, "w") as file:
|
||||||
file.write(filedata)
|
file.write(filedata)
|
||||||
logger = path.join(clone_dir, "out-wpt", "common/internal/logging/test_case_recorder.js")
|
logger = path.join(clone_dir, "out-wpt", "common/internal/logging/test_case_recorder.js")
|
||||||
with open(logger, 'r') as file:
|
with open(logger, "r") as file:
|
||||||
filedata = file.read()
|
filedata = file.read()
|
||||||
filedata.replace("info(ex) {", "info(ex) {return;")
|
filedata.replace("info(ex) {", "info(ex) {return;")
|
||||||
with open(logger, 'w') as file:
|
with open(logger, "w") as file:
|
||||||
file.write(filedata)
|
file.write(filedata)
|
||||||
# copy
|
# copy
|
||||||
delete(path.join(tdir, "webgpu"))
|
delete(path.join(tdir, "webgpu"))
|
||||||
shutil.copytree(path.join(clone_dir, "out-wpt"), path.join(tdir, "webgpu"))
|
shutil.copytree(path.join(clone_dir, "out-wpt"), path.join(tdir, "webgpu"))
|
||||||
# update commit
|
# update commit
|
||||||
commit = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=clone_dir).decode()
|
commit = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=clone_dir).decode()
|
||||||
with open(path.join(tdir, "checkout_commit.txt"), 'w') as file:
|
with open(path.join(tdir, "checkout_commit.txt"), "w") as file:
|
||||||
file.write(commit)
|
file.write(commit)
|
||||||
# clean up
|
# clean up
|
||||||
delete(clone_dir)
|
delete(clone_dir)
|
||||||
print("Updating manifest.")
|
print("Updating manifest.")
|
||||||
return self.context.commands.dispatch("update-manifest", self.context)
|
return self.context.commands.dispatch("update-manifest", self.context)
|
||||||
|
|
||||||
@Command('smoketest',
|
@Command(
|
||||||
description='Load a simple page in Servo and ensure that it closes properly',
|
"smoketest", description="Load a simple page in Servo and ensure that it closes properly", category="testing"
|
||||||
category='testing')
|
)
|
||||||
@CommandArgument('params', nargs='...',
|
@CommandArgument("params", nargs="...", help="Command-line arguments to be passed through to Servo")
|
||||||
help="Command-line arguments to be passed through to Servo")
|
|
||||||
@CommandBase.common_command_arguments(binary_selection=True)
|
@CommandBase.common_command_arguments(binary_selection=True)
|
||||||
def smoketest(self, servo_binary: str, params, **kwargs):
|
def smoketest(self, servo_binary: str, params, **kwargs):
|
||||||
# We pass `-f` here so that any thread panic will cause Servo to exit,
|
# We pass `-f` here so that any thread panic will cause Servo to exit,
|
||||||
# preventing a panic from hanging execution. This means that these kind
|
# preventing a panic from hanging execution. This means that these kind
|
||||||
# of panics won't cause timeouts on CI.
|
# of panics won't cause timeouts on CI.
|
||||||
return PostBuildCommands(self.context)._run(servo_binary,
|
return PostBuildCommands(self.context)._run(servo_binary, params + ["-f", "tests/html/close-on-load.html"])
|
||||||
params + ['-f', 'tests/html/close-on-load.html'])
|
|
||||||
|
|
||||||
@Command('try', description='Runs try jobs by force pushing to try branch', category='testing')
|
@Command("try", description="Runs try jobs by force pushing to try branch", category="testing")
|
||||||
@CommandArgument('--remote', '-r', default="origin", help='A git remote to run the try job on')
|
@CommandArgument("--remote", "-r", default="origin", help="A git remote to run the try job on")
|
||||||
@CommandArgument('try_strings', default=["full"], nargs='...',
|
@CommandArgument(
|
||||||
help="A list of try strings specifying what kind of job to run.")
|
"try_strings", default=["full"], nargs="...", help="A list of try strings specifying what kind of job to run."
|
||||||
|
)
|
||||||
def try_command(self, remote: str, try_strings: list[str]):
|
def try_command(self, remote: str, try_strings: list[str]):
|
||||||
if subprocess.check_output(["git", "diff", "--cached", "--name-only"]).strip():
|
if subprocess.check_output(["git", "diff", "--cached", "--name-only"]).strip():
|
||||||
print("Cannot run `try` with staged and uncommited changes. ")
|
print("Cannot run `try` with staged and uncommited changes. ")
|
||||||
|
@ -755,7 +797,7 @@ class MachCommands(CommandBase):
|
||||||
# tool and get the real URL.
|
# tool and get the real URL.
|
||||||
actions_url = remote_url.replace(".git", "/actions")
|
actions_url = remote_url.replace(".git", "/actions")
|
||||||
if not actions_url.startswith("https"):
|
if not actions_url.startswith("https"):
|
||||||
actions_url = actions_url.replace(':', '/')
|
actions_url = actions_url.replace(":", "/")
|
||||||
actions_url = actions_url.replace("git@", "")
|
actions_url = actions_url.replace("git@", "")
|
||||||
actions_url = f"https://{actions_url}"
|
actions_url = f"https://{actions_url}"
|
||||||
print(f"Actions available at: {actions_url}")
|
print(f"Actions available at: {actions_url}")
|
||||||
|
@ -770,25 +812,27 @@ class MachCommands(CommandBase):
|
||||||
|
|
||||||
def create_parser_create():
|
def create_parser_create():
|
||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
p = argparse.ArgumentParser()
|
p = argparse.ArgumentParser()
|
||||||
p.add_argument("--no-editor", action="store_true",
|
p.add_argument("--no-editor", action="store_true", help="Don't try to open the test in an editor")
|
||||||
help="Don't try to open the test in an editor")
|
|
||||||
p.add_argument("-e", "--editor", action="store", help="Editor to use")
|
p.add_argument("-e", "--editor", action="store", help="Editor to use")
|
||||||
p.add_argument("--no-run", action="store_true",
|
p.add_argument(
|
||||||
help="Don't try to update the wpt manifest or open the test in a browser")
|
"--no-run", action="store_true", help="Don't try to update the wpt manifest or open the test in a browser"
|
||||||
p.add_argument('--release', action="store_true",
|
)
|
||||||
help="Run with a release build of servo")
|
p.add_argument("--release", action="store_true", help="Run with a release build of servo")
|
||||||
p.add_argument("--long-timeout", action="store_true",
|
p.add_argument(
|
||||||
help="Test should be given a long timeout (typically 60s rather than 10s,"
|
"--long-timeout",
|
||||||
"but varies depending on environment)")
|
action="store_true",
|
||||||
p.add_argument("--overwrite", action="store_true",
|
help="Test should be given a long timeout (typically 60s rather than 10s,but varies depending on environment)",
|
||||||
help="Allow overwriting an existing test file")
|
)
|
||||||
p.add_argument("-r", "--reftest", action="store_true",
|
p.add_argument("--overwrite", action="store_true", help="Allow overwriting an existing test file")
|
||||||
help="Create a reftest rather than a testharness (js) test"),
|
(
|
||||||
|
p.add_argument(
|
||||||
|
"-r", "--reftest", action="store_true", help="Create a reftest rather than a testharness (js) test"
|
||||||
|
),
|
||||||
|
)
|
||||||
p.add_argument("-ref", "--reference", dest="ref", help="Path to the reference file")
|
p.add_argument("-ref", "--reference", dest="ref", help="Path to the reference file")
|
||||||
p.add_argument("--mismatch", action="store_true",
|
p.add_argument("--mismatch", action="store_true", help="Create a mismatch reftest")
|
||||||
help="Create a mismatch reftest")
|
p.add_argument("--wait", action="store_true", help="Create a reftest that waits until takeScreenshot() is called")
|
||||||
p.add_argument("--wait", action="store_true",
|
|
||||||
help="Create a reftest that waits until takeScreenshot() is called")
|
|
||||||
p.add_argument("path", action="store", help="Path to the test file")
|
p.add_argument("path", action="store", help="Path to the test file")
|
||||||
return p
|
return p
|
||||||
|
|
|
@ -44,7 +44,7 @@ class JobConfig(object):
|
||||||
number_of_wpt_chunks: int = 20
|
number_of_wpt_chunks: int = 20
|
||||||
# These are the fields that must match in between two JobConfigs for them to be able to be
|
# These are the fields that must match in between two JobConfigs for them to be able to be
|
||||||
# merged. If you modify any of the fields above, make sure to update this line as well.
|
# merged. If you modify any of the fields above, make sure to update this line as well.
|
||||||
merge_compatibility_fields: ClassVar[List[str]] = ['workflow', 'profile', 'wpt_args', 'build_args']
|
merge_compatibility_fields: ClassVar[List[str]] = ["workflow", "profile", "wpt_args", "build_args"]
|
||||||
|
|
||||||
def merge(self, other: JobConfig) -> bool:
|
def merge(self, other: JobConfig) -> bool:
|
||||||
"""Try to merge another job with this job. Returns True if merging is successful
|
"""Try to merge another job with this job. Returns True if merging is successful
|
||||||
|
@ -101,11 +101,14 @@ def handle_preset(s: str) -> Optional[JobConfig]:
|
||||||
elif any(word in s for word in ["ohos", "openharmony"]):
|
elif any(word in s for word in ["ohos", "openharmony"]):
|
||||||
return JobConfig("OpenHarmony", Workflow.OHOS)
|
return JobConfig("OpenHarmony", Workflow.OHOS)
|
||||||
elif any(word in s for word in ["webgpu"]):
|
elif any(word in s for word in ["webgpu"]):
|
||||||
return JobConfig("WebGPU CTS", Workflow.LINUX,
|
return JobConfig(
|
||||||
|
"WebGPU CTS",
|
||||||
|
Workflow.LINUX,
|
||||||
wpt=True, # reftests are mode for new layout
|
wpt=True, # reftests are mode for new layout
|
||||||
wpt_args="_webgpu", # run only webgpu cts
|
wpt_args="_webgpu", # run only webgpu cts
|
||||||
profile="production", # WebGPU works to slow with debug assert
|
profile="production", # WebGPU works to slow with debug assert
|
||||||
unit_tests=False) # production profile does not work with unit-tests
|
unit_tests=False,
|
||||||
|
) # production profile does not work with unit-tests
|
||||||
elif any(word in s for word in ["lint", "tidy"]):
|
elif any(word in s for word in ["lint", "tidy"]):
|
||||||
return JobConfig("Lint", Workflow.LINT)
|
return JobConfig("Lint", Workflow.LINT)
|
||||||
else:
|
else:
|
||||||
|
@ -199,115 +202,130 @@ if __name__ == "__main__":
|
||||||
|
|
||||||
class TestParser(unittest.TestCase):
|
class TestParser(unittest.TestCase):
|
||||||
def test_string(self):
|
def test_string(self):
|
||||||
self.assertDictEqual(json.loads(Config("linux-unit-tests fail-fast").to_json()),
|
self.assertDictEqual(
|
||||||
{'fail_fast': True,
|
json.loads(Config("linux-unit-tests fail-fast").to_json()),
|
||||||
'matrix': [{
|
{
|
||||||
'bencher': False,
|
"fail_fast": True,
|
||||||
'name': 'Linux (Unit Tests)',
|
"matrix": [
|
||||||
'number_of_wpt_chunks': 20,
|
{
|
||||||
'profile': 'release',
|
"bencher": False,
|
||||||
'unit_tests': True,
|
"name": "Linux (Unit Tests)",
|
||||||
'build_libservo': False,
|
"number_of_wpt_chunks": 20,
|
||||||
'workflow': 'linux',
|
"profile": "release",
|
||||||
'wpt': False,
|
"unit_tests": True,
|
||||||
'wpt_args': '',
|
"build_libservo": False,
|
||||||
'build_args': ''
|
"workflow": "linux",
|
||||||
}]
|
"wpt": False,
|
||||||
})
|
"wpt_args": "",
|
||||||
|
"build_args": "",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
def test_empty(self):
|
def test_empty(self):
|
||||||
self.assertDictEqual(json.loads(Config("").to_json()),
|
self.assertDictEqual(
|
||||||
{"fail_fast": False, "matrix": [
|
json.loads(Config("").to_json()),
|
||||||
|
{
|
||||||
|
"fail_fast": False,
|
||||||
|
"matrix": [
|
||||||
{
|
{
|
||||||
"name": "Linux (Unit Tests, WPT, Bencher)",
|
"name": "Linux (Unit Tests, WPT, Bencher)",
|
||||||
'number_of_wpt_chunks': 20,
|
"number_of_wpt_chunks": 20,
|
||||||
"workflow": "linux",
|
"workflow": "linux",
|
||||||
"wpt": True,
|
"wpt": True,
|
||||||
"profile": "release",
|
"profile": "release",
|
||||||
"unit_tests": True,
|
"unit_tests": True,
|
||||||
'build_libservo': False,
|
"build_libservo": False,
|
||||||
'bencher': True,
|
"bencher": True,
|
||||||
"wpt_args": "",
|
"wpt_args": "",
|
||||||
'build_args': ''
|
"build_args": "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "MacOS (Unit Tests)",
|
"name": "MacOS (Unit Tests)",
|
||||||
'number_of_wpt_chunks': 20,
|
"number_of_wpt_chunks": 20,
|
||||||
"workflow": "macos",
|
"workflow": "macos",
|
||||||
"wpt": False,
|
"wpt": False,
|
||||||
"profile": "release",
|
"profile": "release",
|
||||||
"unit_tests": True,
|
"unit_tests": True,
|
||||||
'build_libservo': False,
|
"build_libservo": False,
|
||||||
'bencher': False,
|
"bencher": False,
|
||||||
"wpt_args": "",
|
"wpt_args": "",
|
||||||
'build_args': ''
|
"build_args": "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Windows (Unit Tests)",
|
"name": "Windows (Unit Tests)",
|
||||||
'number_of_wpt_chunks': 20,
|
"number_of_wpt_chunks": 20,
|
||||||
"workflow": "windows",
|
"workflow": "windows",
|
||||||
"wpt": False,
|
"wpt": False,
|
||||||
"profile": "release",
|
"profile": "release",
|
||||||
"unit_tests": True,
|
"unit_tests": True,
|
||||||
'build_libservo': False,
|
"build_libservo": False,
|
||||||
'bencher': False,
|
"bencher": False,
|
||||||
"wpt_args": "",
|
"wpt_args": "",
|
||||||
'build_args': ''
|
"build_args": "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Android",
|
"name": "Android",
|
||||||
'number_of_wpt_chunks': 20,
|
"number_of_wpt_chunks": 20,
|
||||||
"workflow": "android",
|
"workflow": "android",
|
||||||
"wpt": False,
|
"wpt": False,
|
||||||
"profile": "release",
|
"profile": "release",
|
||||||
"unit_tests": False,
|
"unit_tests": False,
|
||||||
'build_libservo': False,
|
"build_libservo": False,
|
||||||
'bencher': False,
|
"bencher": False,
|
||||||
"wpt_args": "",
|
"wpt_args": "",
|
||||||
'build_args': ''
|
"build_args": "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "OpenHarmony",
|
"name": "OpenHarmony",
|
||||||
'number_of_wpt_chunks': 20,
|
"number_of_wpt_chunks": 20,
|
||||||
"workflow": "ohos",
|
"workflow": "ohos",
|
||||||
"wpt": False,
|
"wpt": False,
|
||||||
"profile": "release",
|
"profile": "release",
|
||||||
"unit_tests": False,
|
"unit_tests": False,
|
||||||
'build_libservo': False,
|
"build_libservo": False,
|
||||||
'bencher': False,
|
"bencher": False,
|
||||||
"wpt_args": "",
|
"wpt_args": "",
|
||||||
'build_args': ''
|
"build_args": "",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Lint",
|
"name": "Lint",
|
||||||
'number_of_wpt_chunks': 20,
|
"number_of_wpt_chunks": 20,
|
||||||
"workflow": "lint",
|
"workflow": "lint",
|
||||||
"wpt": False,
|
"wpt": False,
|
||||||
"profile": "release",
|
"profile": "release",
|
||||||
"unit_tests": False,
|
"unit_tests": False,
|
||||||
'build_libservo': False,
|
"build_libservo": False,
|
||||||
'bencher': False,
|
"bencher": False,
|
||||||
"wpt_args": "",
|
"wpt_args": "",
|
||||||
'build_args': ''
|
"build_args": "",
|
||||||
}
|
},
|
||||||
]})
|
],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
def test_job_merging(self):
|
def test_job_merging(self):
|
||||||
self.assertDictEqual(json.loads(Config("linux-wpt").to_json()),
|
self.assertDictEqual(
|
||||||
{'fail_fast': False,
|
json.loads(Config("linux-wpt").to_json()),
|
||||||
'matrix': [{
|
{
|
||||||
'bencher': False,
|
"fail_fast": False,
|
||||||
'name': 'Linux (WPT)',
|
"matrix": [
|
||||||
'number_of_wpt_chunks': 20,
|
{
|
||||||
'profile': 'release',
|
"bencher": False,
|
||||||
'unit_tests': False,
|
"name": "Linux (WPT)",
|
||||||
'build_libservo': False,
|
"number_of_wpt_chunks": 20,
|
||||||
'workflow': 'linux',
|
"profile": "release",
|
||||||
'wpt': True,
|
"unit_tests": False,
|
||||||
'wpt_args': '',
|
"build_libservo": False,
|
||||||
'build_args': ''
|
"workflow": "linux",
|
||||||
}]
|
"wpt": True,
|
||||||
})
|
"wpt_args": "",
|
||||||
|
"build_args": "",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
a = JobConfig("Linux (Unit Tests)", Workflow.LINUX, unit_tests=True)
|
a = JobConfig("Linux (Unit Tests)", Workflow.LINUX, unit_tests=True)
|
||||||
b = JobConfig("Linux", Workflow.LINUX, unit_tests=False)
|
b = JobConfig("Linux", Workflow.LINUX, unit_tests=False)
|
||||||
|
@ -319,8 +337,7 @@ class TestParser(unittest.TestCase):
|
||||||
b = handle_preset("linux-wpt")
|
b = handle_preset("linux-wpt")
|
||||||
b = handle_modifier(b, "linux-wpt")
|
b = handle_modifier(b, "linux-wpt")
|
||||||
self.assertTrue(a.merge(b), "Should merge jobs that have different unit test configurations.")
|
self.assertTrue(a.merge(b), "Should merge jobs that have different unit test configurations.")
|
||||||
self.assertEqual(a, JobConfig("Linux (Unit Tests, WPT)", Workflow.LINUX,
|
self.assertEqual(a, JobConfig("Linux (Unit Tests, WPT)", Workflow.LINUX, unit_tests=True, wpt=True))
|
||||||
unit_tests=True, wpt=True))
|
|
||||||
|
|
||||||
a = JobConfig("Linux (Unit Tests)", Workflow.LINUX, unit_tests=True)
|
a = JobConfig("Linux (Unit Tests)", Workflow.LINUX, unit_tests=True)
|
||||||
b = JobConfig("Mac", Workflow.MACOS, unit_tests=True)
|
b = JobConfig("Mac", Workflow.MACOS, unit_tests=True)
|
||||||
|
@ -343,12 +360,10 @@ class TestParser(unittest.TestCase):
|
||||||
self.assertEqual(a, JobConfig("Linux (Unit Tests)", Workflow.LINUX, unit_tests=True))
|
self.assertEqual(a, JobConfig("Linux (Unit Tests)", Workflow.LINUX, unit_tests=True))
|
||||||
|
|
||||||
def test_full(self):
|
def test_full(self):
|
||||||
self.assertDictEqual(json.loads(Config("full").to_json()),
|
self.assertDictEqual(json.loads(Config("full").to_json()), json.loads(Config("").to_json()))
|
||||||
json.loads(Config("").to_json()))
|
|
||||||
|
|
||||||
def test_wpt_alias(self):
|
def test_wpt_alias(self):
|
||||||
self.assertDictEqual(json.loads(Config("wpt").to_json()),
|
self.assertDictEqual(json.loads(Config("wpt").to_json()), json.loads(Config("linux-wpt").to_json()))
|
||||||
json.loads(Config("linux-wpt").to_json()))
|
|
||||||
|
|
||||||
|
|
||||||
def run_tests():
|
def run_tests():
|
||||||
|
|
|
@ -49,12 +49,12 @@ def download(description: str, url: str, writer: BufferedIOBase, start_byte: int
|
||||||
try:
|
try:
|
||||||
req = urllib.request.Request(url)
|
req = urllib.request.Request(url)
|
||||||
if start_byte:
|
if start_byte:
|
||||||
req = urllib.request.Request(url, headers={'Range': 'bytes={}-'.format(start_byte)})
|
req = urllib.request.Request(url, headers={"Range": "bytes={}-".format(start_byte)})
|
||||||
resp = urllib.request.urlopen(req)
|
resp = urllib.request.urlopen(req)
|
||||||
|
|
||||||
fsize = None
|
fsize = None
|
||||||
if resp.info().get('Content-Length'):
|
if resp.info().get("Content-Length"):
|
||||||
fsize = int(resp.info().get('Content-Length').strip()) + start_byte
|
fsize = int(resp.info().get("Content-Length").strip()) + start_byte
|
||||||
|
|
||||||
recved = start_byte
|
recved = start_byte
|
||||||
chunk_size = 64 * 1024
|
chunk_size = 64 * 1024
|
||||||
|
@ -72,7 +72,7 @@ def download(description: str, url: str, writer: BufferedIOBase, start_byte: int
|
||||||
progress_line = "\rDownloading %s: %5.1f%%" % (description, pct)
|
progress_line = "\rDownloading %s: %5.1f%%" % (description, pct)
|
||||||
now = time.time()
|
now = time.time()
|
||||||
duration = now - previous_progress_line_time
|
duration = now - previous_progress_line_time
|
||||||
if progress_line != previous_progress_line and duration > .1:
|
if progress_line != previous_progress_line and duration > 0.1:
|
||||||
print(progress_line, end="")
|
print(progress_line, end="")
|
||||||
previous_progress_line = progress_line
|
previous_progress_line = progress_line
|
||||||
previous_progress_line_time = now
|
previous_progress_line_time = now
|
||||||
|
@ -85,8 +85,10 @@ def download(description: str, url: str, writer: BufferedIOBase, start_byte: int
|
||||||
except urllib.error.HTTPError as e:
|
except urllib.error.HTTPError as e:
|
||||||
print("Download failed ({}): {} - {}".format(e.code, e.reason, url))
|
print("Download failed ({}): {} - {}".format(e.code, e.reason, url))
|
||||||
if e.code == 403:
|
if e.code == 403:
|
||||||
print("No Rust compiler binary available for this platform. "
|
print(
|
||||||
"Please see https://github.com/servo/servo/#prerequisites")
|
"No Rust compiler binary available for this platform. "
|
||||||
|
"Please see https://github.com/servo/servo/#prerequisites"
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
except urllib.error.URLError as e:
|
except urllib.error.URLError as e:
|
||||||
print("Error downloading {}: {}. The failing URL was: {}".format(description, e.reason, url))
|
print("Error downloading {}: {}. The failing URL was: {}".format(description, e.reason, url))
|
||||||
|
@ -109,10 +111,10 @@ def download_file(description: str, url: str, destination_path: str):
|
||||||
tmp_path = destination_path + ".part"
|
tmp_path = destination_path + ".part"
|
||||||
try:
|
try:
|
||||||
start_byte = os.path.getsize(tmp_path)
|
start_byte = os.path.getsize(tmp_path)
|
||||||
with open(tmp_path, 'ab') as fd:
|
with open(tmp_path, "ab") as fd:
|
||||||
download(description, url, fd, start_byte=start_byte)
|
download(description, url, fd, start_byte=start_byte)
|
||||||
except os.error:
|
except os.error:
|
||||||
with open(tmp_path, 'wb') as fd:
|
with open(tmp_path, "wb") as fd:
|
||||||
download(description, url, fd)
|
download(description, url, fd)
|
||||||
os.rename(tmp_path, destination_path)
|
os.rename(tmp_path, destination_path)
|
||||||
|
|
||||||
|
@ -129,7 +131,7 @@ class ZipFileWithUnixPermissions(zipfile.ZipFile):
|
||||||
|
|
||||||
extracted = self._extract_member(member, path, pwd)
|
extracted = self._extract_member(member, path, pwd)
|
||||||
mode = os.stat(extracted).st_mode
|
mode = os.stat(extracted).st_mode
|
||||||
mode |= (member.external_attr >> 16)
|
mode |= member.external_attr >> 16
|
||||||
os.chmod(extracted, mode)
|
os.chmod(extracted, mode)
|
||||||
return extracted
|
return extracted
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ def find_vswhere():
|
||||||
for path in [PROGRAM_FILES, PROGRAM_FILES_X86]:
|
for path in [PROGRAM_FILES, PROGRAM_FILES_X86]:
|
||||||
if not path:
|
if not path:
|
||||||
continue
|
continue
|
||||||
vswhere = os.path.join(path, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe')
|
vswhere = os.path.join(path, "Microsoft Visual Studio", "Installer", "vswhere.exe")
|
||||||
if os.path.exists(vswhere):
|
if os.path.exists(vswhere):
|
||||||
return vswhere
|
return vswhere
|
||||||
return None
|
return None
|
||||||
|
@ -52,24 +52,30 @@ def find_compatible_msvc_with_vswhere() -> Generator[VisualStudioInstallation, N
|
||||||
if not vswhere:
|
if not vswhere:
|
||||||
return
|
return
|
||||||
|
|
||||||
output = subprocess.check_output([
|
output = subprocess.check_output(
|
||||||
|
[
|
||||||
vswhere,
|
vswhere,
|
||||||
'-format', 'json',
|
"-format",
|
||||||
'-products', '*',
|
"json",
|
||||||
'-requires', 'Microsoft.VisualStudio.Component.VC.Tools.x86.x64',
|
"-products",
|
||||||
'-requires', 'Microsoft.VisualStudio.Component.Windows10SDK',
|
"*",
|
||||||
'-utf8'
|
"-requires",
|
||||||
]).decode(errors='ignore')
|
"Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
|
||||||
|
"-requires",
|
||||||
|
"Microsoft.VisualStudio.Component.Windows10SDK",
|
||||||
|
"-utf8",
|
||||||
|
]
|
||||||
|
).decode(errors="ignore")
|
||||||
|
|
||||||
for install in json.loads(output):
|
for install in json.loads(output):
|
||||||
installed_version = f"{install['installationVersion'].split('.')[0]}.0"
|
installed_version = f"{install['installationVersion'].split('.')[0]}.0"
|
||||||
if installed_version not in COMPATIBLE_MSVC_VERSIONS.values():
|
if installed_version not in COMPATIBLE_MSVC_VERSIONS.values():
|
||||||
continue
|
continue
|
||||||
installation_path = install['installationPath']
|
installation_path = install["installationPath"]
|
||||||
yield VisualStudioInstallation(
|
yield VisualStudioInstallation(
|
||||||
version_number=installed_version,
|
version_number=installed_version,
|
||||||
installation_path=installation_path,
|
installation_path=installation_path,
|
||||||
vc_install_path=os.path.join(installation_path, "VC")
|
vc_install_path=os.path.join(installation_path, "VC"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -77,20 +83,20 @@ def find_compatible_msvc_with_path() -> Generator[VisualStudioInstallation, None
|
||||||
for program_files in [PROGRAM_FILES, PROGRAM_FILES_X86]:
|
for program_files in [PROGRAM_FILES, PROGRAM_FILES_X86]:
|
||||||
if not program_files:
|
if not program_files:
|
||||||
continue
|
continue
|
||||||
for (version, version_number) in COMPATIBLE_MSVC_VERSIONS.items():
|
for version, version_number in COMPATIBLE_MSVC_VERSIONS.items():
|
||||||
for edition in ["Enterprise", "Professional", "Community", "BuildTools"]:
|
for edition in ["Enterprise", "Professional", "Community", "BuildTools"]:
|
||||||
installation_path = os.path.join(program_files, "Microsoft Visual Studio", version, edition)
|
installation_path = os.path.join(program_files, "Microsoft Visual Studio", version, edition)
|
||||||
if os.path.exists(installation_path):
|
if os.path.exists(installation_path):
|
||||||
yield VisualStudioInstallation(
|
yield VisualStudioInstallation(
|
||||||
version_number=version_number,
|
version_number=version_number,
|
||||||
installation_path=installation_path,
|
installation_path=installation_path,
|
||||||
vc_install_path=os.path.join(installation_path, "VC")
|
vc_install_path=os.path.join(installation_path, "VC"),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def find_compatible_msvc_with_environment_variables() -> Optional[VisualStudioInstallation]:
|
def find_compatible_msvc_with_environment_variables() -> Optional[VisualStudioInstallation]:
|
||||||
installation_path = os.environ.get('VSINSTALLDIR')
|
installation_path = os.environ.get("VSINSTALLDIR")
|
||||||
version_number = os.environ.get('VisualStudioVersion')
|
version_number = os.environ.get("VisualStudioVersion")
|
||||||
if not installation_path or not version_number:
|
if not installation_path or not version_number:
|
||||||
return None
|
return None
|
||||||
vc_install_path = os.environ.get("VCINSTALLDIR", os.path.join(installation_path, "VC"))
|
vc_install_path = os.environ.get("VCINSTALLDIR", os.path.join(installation_path, "VC"))
|
||||||
|
@ -116,8 +122,10 @@ def find_msvc_installations() -> List[VisualStudioInstallation]:
|
||||||
if installation:
|
if installation:
|
||||||
return [installation]
|
return [installation]
|
||||||
|
|
||||||
raise Exception("Can't find a Visual Studio installation. "
|
raise Exception(
|
||||||
"Please set the VSINSTALLDIR and VisualStudioVersion environment variables")
|
"Can't find a Visual Studio installation. "
|
||||||
|
"Please set the VSINSTALLDIR and VisualStudioVersion environment variables"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def find_msvc_redist_dirs(vs_platform: str) -> Generator[str, None, None]:
|
def find_msvc_redist_dirs(vs_platform: str) -> Generator[str, None, None]:
|
||||||
|
@ -160,7 +168,7 @@ def find_windows_sdk_installation_path() -> str:
|
||||||
|
|
||||||
# This is based on the advice from
|
# This is based on the advice from
|
||||||
# https://stackoverflow.com/questions/35119223/how-to-programmatically-detect-and-locate-the-windows-10-sdk
|
# https://stackoverflow.com/questions/35119223/how-to-programmatically-detect-and-locate-the-windows-10-sdk
|
||||||
key_path = r'SOFTWARE\Wow6432Node\Microsoft\Microsoft SDKs\Windows\v10.0'
|
key_path = r"SOFTWARE\Wow6432Node\Microsoft\Microsoft SDKs\Windows\v10.0"
|
||||||
try:
|
try:
|
||||||
with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, key_path) as key:
|
with winreg.OpenKeyEx(winreg.HKEY_LOCAL_MACHINE, key_path) as key:
|
||||||
return str(winreg.QueryValueEx(key, "InstallationFolder")[0])
|
return str(winreg.QueryValueEx(key, "InstallationFolder")[0])
|
||||||
|
|
|
@ -15,7 +15,7 @@ import unittest
|
||||||
from . import tidy
|
from . import tidy
|
||||||
|
|
||||||
|
|
||||||
BASE_PATH = 'python/tidy/tests/'
|
BASE_PATH = "python/tidy/tests/"
|
||||||
|
|
||||||
|
|
||||||
def test_file_path(name):
|
def test_file_path(name):
|
||||||
|
@ -32,179 +32,170 @@ class CheckTidiness(unittest.TestCase):
|
||||||
next(errors)
|
next(errors)
|
||||||
|
|
||||||
def test_tidy_config(self):
|
def test_tidy_config(self):
|
||||||
errors = tidy.check_config_file(os.path.join(BASE_PATH, 'servo-tidy.toml'), print_text=False)
|
errors = tidy.check_config_file(os.path.join(BASE_PATH, "servo-tidy.toml"), print_text=False)
|
||||||
self.assertEqual("invalid config key 'key-outside'", next(errors)[2])
|
self.assertEqual("invalid config key 'key-outside'", next(errors)[2])
|
||||||
self.assertEqual("invalid config key 'wrong-key'", next(errors)[2])
|
self.assertEqual("invalid config key 'wrong-key'", next(errors)[2])
|
||||||
self.assertEqual('invalid config table [wrong]', next(errors)[2])
|
self.assertEqual("invalid config table [wrong]", next(errors)[2])
|
||||||
self.assertEqual("ignored file './fake/file.html' doesn't exist", next(errors)[2])
|
self.assertEqual("ignored file './fake/file.html' doesn't exist", next(errors)[2])
|
||||||
self.assertEqual("ignored directory './fake/dir' doesn't exist", next(errors)[2])
|
self.assertEqual("ignored directory './fake/dir' doesn't exist", next(errors)[2])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_directory_checks(self):
|
def test_directory_checks(self):
|
||||||
dirs = {
|
dirs = {
|
||||||
os.path.join(BASE_PATH, "dir_check/webidl_plus"): ['webidl', 'test'],
|
os.path.join(BASE_PATH, "dir_check/webidl_plus"): ["webidl", "test"],
|
||||||
os.path.join(BASE_PATH, "dir_check/only_webidl"): ['webidl']
|
os.path.join(BASE_PATH, "dir_check/only_webidl"): ["webidl"],
|
||||||
}
|
}
|
||||||
errors = tidy.check_directory_files(dirs, print_text=False)
|
errors = tidy.check_directory_files(dirs, print_text=False)
|
||||||
error_dir = os.path.join(BASE_PATH, "dir_check/webidl_plus")
|
error_dir = os.path.join(BASE_PATH, "dir_check/webidl_plus")
|
||||||
self.assertEqual("Unexpected extension found for test.rs. We only expect files with webidl, "
|
self.assertEqual(
|
||||||
+ f"test extensions in {error_dir}", next(errors)[2])
|
"Unexpected extension found for test.rs. We only expect files with webidl, "
|
||||||
self.assertEqual("Unexpected extension found for test2.rs. We only expect files with webidl, "
|
+ f"test extensions in {error_dir}",
|
||||||
+ f"test extensions in {error_dir}", next(errors)[2])
|
next(errors)[2],
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
"Unexpected extension found for test2.rs. We only expect files with webidl, "
|
||||||
|
+ f"test extensions in {error_dir}",
|
||||||
|
next(errors)[2],
|
||||||
|
)
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_spaces_correctnes(self):
|
def test_spaces_correctnes(self):
|
||||||
errors = tidy.collect_errors_for_files(iterFile('wrong_space.rs'), [], [tidy.check_by_line], print_text=False)
|
errors = tidy.collect_errors_for_files(iterFile("wrong_space.rs"), [], [tidy.check_by_line], print_text=False)
|
||||||
self.assertEqual('trailing whitespace', next(errors)[2])
|
self.assertEqual("trailing whitespace", next(errors)[2])
|
||||||
self.assertEqual('no newline at EOF', next(errors)[2])
|
self.assertEqual("no newline at EOF", next(errors)[2])
|
||||||
self.assertEqual('tab on line', next(errors)[2])
|
self.assertEqual("tab on line", next(errors)[2])
|
||||||
self.assertEqual('CR on line', next(errors)[2])
|
self.assertEqual("CR on line", next(errors)[2])
|
||||||
self.assertEqual('no newline at EOF', next(errors)[2])
|
self.assertEqual("no newline at EOF", next(errors)[2])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_empty_file(self):
|
def test_empty_file(self):
|
||||||
errors = tidy.collect_errors_for_files(iterFile('empty_file.rs'), [], [tidy.check_by_line], print_text=False)
|
errors = tidy.collect_errors_for_files(iterFile("empty_file.rs"), [], [tidy.check_by_line], print_text=False)
|
||||||
self.assertEqual('file is empty', next(errors)[2])
|
self.assertEqual("file is empty", next(errors)[2])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_long_line(self):
|
def test_long_line(self):
|
||||||
errors = tidy.collect_errors_for_files(iterFile('long_line.rs'), [], [tidy.check_by_line], print_text=False)
|
errors = tidy.collect_errors_for_files(iterFile("long_line.rs"), [], [tidy.check_by_line], print_text=False)
|
||||||
self.assertEqual('Line is longer than 120 characters', next(errors)[2])
|
self.assertEqual("Line is longer than 120 characters", next(errors)[2])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_whatwg_link(self):
|
def test_whatwg_link(self):
|
||||||
errors = tidy.collect_errors_for_files(iterFile('whatwg_link.rs'), [], [tidy.check_by_line], print_text=False)
|
errors = tidy.collect_errors_for_files(iterFile("whatwg_link.rs"), [], [tidy.check_by_line], print_text=False)
|
||||||
self.assertEqual('link to WHATWG may break in the future, use this format instead: https://html.spec.whatwg.org/multipage/#dom-context-2d-putimagedata', next(errors)[2])
|
self.assertEqual(
|
||||||
self.assertEqual('links to WHATWG single-page url, change to multi page: https://html.spec.whatwg.org/multipage/#typographic-conventions', next(errors)[2])
|
"link to WHATWG may break in the future, use this format instead: https://html.spec.whatwg.org/multipage/#dom-context-2d-putimagedata",
|
||||||
|
next(errors)[2],
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
"links to WHATWG single-page url, change to multi page: https://html.spec.whatwg.org/multipage/#typographic-conventions",
|
||||||
|
next(errors)[2],
|
||||||
|
)
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_license(self):
|
def test_license(self):
|
||||||
errors = tidy.collect_errors_for_files(
|
errors = tidy.collect_errors_for_files(
|
||||||
iterFile('incorrect_license.rs'),
|
iterFile("incorrect_license.rs"), [], [tidy.check_license], print_text=False
|
||||||
[],
|
|
||||||
[tidy.check_license],
|
|
||||||
print_text=False
|
|
||||||
)
|
)
|
||||||
self.assertEqual('incorrect license', next(errors)[2])
|
self.assertEqual("incorrect license", next(errors)[2])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_shebang_license(self):
|
def test_shebang_license(self):
|
||||||
errors = tidy.collect_errors_for_files(
|
errors = tidy.collect_errors_for_files(
|
||||||
iterFile('shebang_license.py'),
|
iterFile("shebang_license.py"), [], [tidy.check_license], print_text=False
|
||||||
[],
|
|
||||||
[tidy.check_license],
|
|
||||||
print_text=False
|
|
||||||
)
|
)
|
||||||
self.assertEqual('missing blank line after shebang', next(errors)[2])
|
self.assertEqual("missing blank line after shebang", next(errors)[2])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_shell(self):
|
def test_shell(self):
|
||||||
errors = tidy.collect_errors_for_files(iterFile('shell_tidy.sh'), [], [tidy.check_shell], print_text=False)
|
errors = tidy.collect_errors_for_files(iterFile("shell_tidy.sh"), [], [tidy.check_shell], print_text=False)
|
||||||
self.assertEqual('script does not have shebang "#!/usr/bin/env bash"', next(errors)[2])
|
self.assertEqual('script does not have shebang "#!/usr/bin/env bash"', next(errors)[2])
|
||||||
self.assertEqual('script is missing options "set -o errexit", "set -o pipefail"', next(errors)[2])
|
self.assertEqual('script is missing options "set -o errexit", "set -o pipefail"', next(errors)[2])
|
||||||
self.assertEqual('script should not use backticks for command substitution', next(errors)[2])
|
self.assertEqual("script should not use backticks for command substitution", next(errors)[2])
|
||||||
self.assertEqual('variable substitutions should use the full \"${VAR}\" form', next(errors)[2])
|
self.assertEqual('variable substitutions should use the full "${VAR}" form', next(errors)[2])
|
||||||
self.assertEqual('script should use `[[` instead of `[` for conditional testing', next(errors)[2])
|
self.assertEqual("script should use `[[` instead of `[` for conditional testing", next(errors)[2])
|
||||||
self.assertEqual('script should use `[[` instead of `[` for conditional testing', next(errors)[2])
|
self.assertEqual("script should use `[[` instead of `[` for conditional testing", next(errors)[2])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_apache2_incomplete(self):
|
def test_apache2_incomplete(self):
|
||||||
errors = tidy.collect_errors_for_files(
|
errors = tidy.collect_errors_for_files(
|
||||||
iterFile('apache2_license.rs'),
|
iterFile("apache2_license.rs"), [], [tidy.check_license], print_text=False
|
||||||
[],
|
|
||||||
[tidy.check_license],
|
|
||||||
print_text=False
|
|
||||||
)
|
)
|
||||||
self.assertEqual('incorrect license', next(errors)[2])
|
self.assertEqual("incorrect license", next(errors)[2])
|
||||||
|
|
||||||
def test_rust(self):
|
def test_rust(self):
|
||||||
errors = tidy.collect_errors_for_files(
|
errors = tidy.collect_errors_for_files(iterFile("rust_tidy.rs"), [], [tidy.check_rust], print_text=False)
|
||||||
iterFile('rust_tidy.rs'),
|
self.assertTrue("mod declaration is not in alphabetical order" in next(errors)[2])
|
||||||
[],
|
self.assertEqual("mod declaration spans multiple lines", next(errors)[2])
|
||||||
[tidy.check_rust],
|
self.assertTrue("derivable traits list is not in alphabetical order" in next(errors)[2])
|
||||||
print_text=False
|
self.assertEqual("found an empty line following a {", next(errors)[2])
|
||||||
)
|
self.assertEqual("use &[T] instead of &Vec<T>", next(errors)[2])
|
||||||
self.assertTrue('mod declaration is not in alphabetical order' in next(errors)[2])
|
self.assertEqual("use &str instead of &String", next(errors)[2])
|
||||||
self.assertEqual('mod declaration spans multiple lines', next(errors)[2])
|
self.assertEqual("use &T instead of &Root<T>", next(errors)[2])
|
||||||
self.assertTrue('derivable traits list is not in alphabetical order' in next(errors)[2])
|
self.assertEqual("use &T instead of &DomRoot<T>", next(errors)[2])
|
||||||
self.assertEqual('found an empty line following a {', next(errors)[2])
|
self.assertEqual("encountered function signature with -> ()", next(errors)[2])
|
||||||
self.assertEqual('use &[T] instead of &Vec<T>', next(errors)[2])
|
self.assertEqual("operators should go at the end of the first line", next(errors)[2])
|
||||||
self.assertEqual('use &str instead of &String', next(errors)[2])
|
self.assertEqual("unwrap() or panic!() found in code which should not panic.", next(errors)[2])
|
||||||
self.assertEqual('use &T instead of &Root<T>', next(errors)[2])
|
self.assertEqual("unwrap() or panic!() found in code which should not panic.", next(errors)[2])
|
||||||
self.assertEqual('use &T instead of &DomRoot<T>', next(errors)[2])
|
|
||||||
self.assertEqual('encountered function signature with -> ()', next(errors)[2])
|
|
||||||
self.assertEqual('operators should go at the end of the first line', next(errors)[2])
|
|
||||||
self.assertEqual('unwrap() or panic!() found in code which should not panic.', next(errors)[2])
|
|
||||||
self.assertEqual('unwrap() or panic!() found in code which should not panic.', next(errors)[2])
|
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
feature_errors = tidy.collect_errors_for_files(iterFile('lib.rs'), [], [tidy.check_rust], print_text=False)
|
feature_errors = tidy.collect_errors_for_files(iterFile("lib.rs"), [], [tidy.check_rust], print_text=False)
|
||||||
|
|
||||||
self.assertTrue('feature attribute is not in alphabetical order' in next(feature_errors)[2])
|
self.assertTrue("feature attribute is not in alphabetical order" in next(feature_errors)[2])
|
||||||
self.assertTrue('feature attribute is not in alphabetical order' in next(feature_errors)[2])
|
self.assertTrue("feature attribute is not in alphabetical order" in next(feature_errors)[2])
|
||||||
self.assertTrue('feature attribute is not in alphabetical order' in next(feature_errors)[2])
|
self.assertTrue("feature attribute is not in alphabetical order" in next(feature_errors)[2])
|
||||||
self.assertTrue('feature attribute is not in alphabetical order' in next(feature_errors)[2])
|
self.assertTrue("feature attribute is not in alphabetical order" in next(feature_errors)[2])
|
||||||
self.assertNoMoreErrors(feature_errors)
|
self.assertNoMoreErrors(feature_errors)
|
||||||
|
|
||||||
ban_errors = tidy.collect_errors_for_files(iterFile('ban.rs'), [], [tidy.check_rust], print_text=False)
|
ban_errors = tidy.collect_errors_for_files(iterFile("ban.rs"), [], [tidy.check_rust], print_text=False)
|
||||||
self.assertEqual('Banned type Cell<JSVal> detected. Use MutDom<JSVal> instead', next(ban_errors)[2])
|
self.assertEqual("Banned type Cell<JSVal> detected. Use MutDom<JSVal> instead", next(ban_errors)[2])
|
||||||
self.assertNoMoreErrors(ban_errors)
|
self.assertNoMoreErrors(ban_errors)
|
||||||
|
|
||||||
ban_errors = tidy.collect_errors_for_files(iterFile(
|
ban_errors = tidy.collect_errors_for_files(
|
||||||
'ban-domrefcell.rs'),
|
iterFile("ban-domrefcell.rs"), [], [tidy.check_rust], print_text=False
|
||||||
[],
|
|
||||||
[tidy.check_rust],
|
|
||||||
print_text=False
|
|
||||||
)
|
)
|
||||||
self.assertEqual('Banned type DomRefCell<Dom<T>> detected. Use MutDom<T> instead', next(ban_errors)[2])
|
self.assertEqual("Banned type DomRefCell<Dom<T>> detected. Use MutDom<T> instead", next(ban_errors)[2])
|
||||||
self.assertNoMoreErrors(ban_errors)
|
self.assertNoMoreErrors(ban_errors)
|
||||||
|
|
||||||
def test_spec_link(self):
|
def test_spec_link(self):
|
||||||
tidy.SPEC_BASE_PATH = BASE_PATH
|
tidy.SPEC_BASE_PATH = BASE_PATH
|
||||||
errors = tidy.collect_errors_for_files(iterFile('speclink.rs'), [], [tidy.check_spec], print_text=False)
|
errors = tidy.collect_errors_for_files(iterFile("speclink.rs"), [], [tidy.check_spec], print_text=False)
|
||||||
self.assertEqual('method declared in webidl is missing a comment with a specification link', next(errors)[2])
|
self.assertEqual("method declared in webidl is missing a comment with a specification link", next(errors)[2])
|
||||||
self.assertEqual('method declared in webidl is missing a comment with a specification link', next(errors)[2])
|
self.assertEqual("method declared in webidl is missing a comment with a specification link", next(errors)[2])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_webidl(self):
|
def test_webidl(self):
|
||||||
errors = tidy.collect_errors_for_files(iterFile('spec.webidl'), [tidy.check_webidl_spec], [], print_text=False)
|
errors = tidy.collect_errors_for_files(iterFile("spec.webidl"), [tidy.check_webidl_spec], [], print_text=False)
|
||||||
self.assertEqual('No specification link found.', next(errors)[2])
|
self.assertEqual("No specification link found.", next(errors)[2])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_toml(self):
|
def test_toml(self):
|
||||||
errors = tidy.collect_errors_for_files(iterFile('Cargo.toml'), [], [tidy.check_toml], print_text=False)
|
errors = tidy.collect_errors_for_files(iterFile("Cargo.toml"), [], [tidy.check_toml], print_text=False)
|
||||||
self.assertEqual('found asterisk instead of minimum version number', next(errors)[2])
|
self.assertEqual("found asterisk instead of minimum version number", next(errors)[2])
|
||||||
self.assertEqual('.toml file should contain a valid license.', next(errors)[2])
|
self.assertEqual(".toml file should contain a valid license.", next(errors)[2])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_modeline(self):
|
def test_modeline(self):
|
||||||
errors = tidy.collect_errors_for_files(iterFile('modeline.txt'), [], [tidy.check_modeline], print_text=False)
|
errors = tidy.collect_errors_for_files(iterFile("modeline.txt"), [], [tidy.check_modeline], print_text=False)
|
||||||
self.assertEqual('vi modeline present', next(errors)[2])
|
self.assertEqual("vi modeline present", next(errors)[2])
|
||||||
self.assertEqual('vi modeline present', next(errors)[2])
|
self.assertEqual("vi modeline present", next(errors)[2])
|
||||||
self.assertEqual('vi modeline present', next(errors)[2])
|
self.assertEqual("vi modeline present", next(errors)[2])
|
||||||
self.assertEqual('emacs file variables present', next(errors)[2])
|
self.assertEqual("emacs file variables present", next(errors)[2])
|
||||||
self.assertEqual('emacs file variables present', next(errors)[2])
|
self.assertEqual("emacs file variables present", next(errors)[2])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_file_list(self):
|
def test_file_list(self):
|
||||||
file_path = os.path.join(BASE_PATH, 'test_ignored')
|
file_path = os.path.join(BASE_PATH, "test_ignored")
|
||||||
file_list = tidy.FileList(file_path, only_changed_files=False, exclude_dirs=[], progress=False)
|
file_list = tidy.FileList(file_path, only_changed_files=False, exclude_dirs=[], progress=False)
|
||||||
lst = list(file_list)
|
lst = list(file_list)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
[
|
[os.path.join(file_path, "whee", "test.rs"), os.path.join(file_path, "whee", "foo", "bar.rs")], lst
|
||||||
os.path.join(file_path, 'whee', 'test.rs'),
|
)
|
||||||
os.path.join(file_path, 'whee', 'foo', 'bar.rs')
|
file_list = tidy.FileList(
|
||||||
],
|
file_path, only_changed_files=False, exclude_dirs=[os.path.join(file_path, "whee", "foo")], progress=False
|
||||||
lst
|
|
||||||
)
|
)
|
||||||
file_list = tidy.FileList(file_path, only_changed_files=False,
|
|
||||||
exclude_dirs=[os.path.join(file_path, 'whee', 'foo')],
|
|
||||||
progress=False)
|
|
||||||
lst = list(file_list)
|
lst = list(file_list)
|
||||||
self.assertEqual([os.path.join(file_path, 'whee', 'test.rs')], lst)
|
self.assertEqual([os.path.join(file_path, "whee", "test.rs")], lst)
|
||||||
|
|
||||||
def test_multiline_string(self):
|
def test_multiline_string(self):
|
||||||
errors = tidy.collect_errors_for_files(iterFile('multiline_string.rs'), [], [tidy.check_rust], print_text=False)
|
errors = tidy.collect_errors_for_files(iterFile("multiline_string.rs"), [], [tidy.check_rust], print_text=False)
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
def test_raw_url_in_rustdoc(self):
|
def test_raw_url_in_rustdoc(self):
|
||||||
|
@ -212,34 +203,19 @@ class CheckTidiness(unittest.TestCase):
|
||||||
self.assertEqual(tidy.ERROR_RAW_URL_IN_RUSTDOC, next(errors)[1])
|
self.assertEqual(tidy.ERROR_RAW_URL_IN_RUSTDOC, next(errors)[1])
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
errors = tidy.check_for_raw_urls_in_rustdoc(
|
errors = tidy.check_for_raw_urls_in_rustdoc("file.rs", 3, b"/// https://google.com")
|
||||||
"file.rs", 3,
|
|
||||||
b"/// https://google.com"
|
|
||||||
)
|
|
||||||
assert_has_a_single_rustdoc_error(errors)
|
assert_has_a_single_rustdoc_error(errors)
|
||||||
|
|
||||||
errors = tidy.check_for_raw_urls_in_rustdoc(
|
errors = tidy.check_for_raw_urls_in_rustdoc("file.rs", 3, b"//! (https://google.com)")
|
||||||
"file.rs", 3,
|
|
||||||
b"//! (https://google.com)"
|
|
||||||
)
|
|
||||||
assert_has_a_single_rustdoc_error(errors)
|
assert_has_a_single_rustdoc_error(errors)
|
||||||
|
|
||||||
errors = tidy.check_for_raw_urls_in_rustdoc(
|
errors = tidy.check_for_raw_urls_in_rustdoc("file.rs", 3, b"/// <https://google.com>")
|
||||||
"file.rs", 3,
|
|
||||||
b"/// <https://google.com>"
|
|
||||||
)
|
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
errors = tidy.check_for_raw_urls_in_rustdoc(
|
errors = tidy.check_for_raw_urls_in_rustdoc("file.rs", 3, b"/// [hi]: https://google.com")
|
||||||
"file.rs", 3,
|
|
||||||
b"/// [hi]: https://google.com"
|
|
||||||
)
|
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
errors = tidy.check_for_raw_urls_in_rustdoc(
|
errors = tidy.check_for_raw_urls_in_rustdoc("file.rs", 3, b"/// [hi](https://google.com)")
|
||||||
"file.rs", 3,
|
|
||||||
b"/// [hi](https://google.com)"
|
|
||||||
)
|
|
||||||
self.assertNoMoreErrors(errors)
|
self.assertNoMoreErrors(errors)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
from servo_tidy.tidy import LintRunner
|
from servo_tidy.tidy import LintRunner
|
||||||
|
|
||||||
|
|
||||||
class Lint(LintRunner):
|
class Lint(LintRunner):
|
||||||
def run(self):
|
def run(self):
|
||||||
yield None
|
yield None
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
from servo_tidy.tidy import LintRunner
|
from servo_tidy.tidy import LintRunner
|
||||||
|
|
||||||
|
|
||||||
class Linter(LintRunner):
|
class Linter(LintRunner):
|
||||||
def run(self):
|
def run(self):
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
from servo_tidy.tidy import LintRunner
|
from servo_tidy.tidy import LintRunner
|
||||||
|
|
||||||
|
|
||||||
class Lint(LintRunner):
|
class Lint(LintRunner):
|
||||||
def some_method(self):
|
def some_method(self):
|
||||||
pass
|
pass
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
from servo_tidy.tidy import LintRunner
|
from servo_tidy.tidy import LintRunner
|
||||||
|
|
||||||
|
|
||||||
class Lint(LintRunner):
|
class Lint(LintRunner):
|
||||||
def run(self):
|
def run(self):
|
||||||
for _ in [None]:
|
for _ in [None]:
|
||||||
yield ('path', 0, 'foobar')
|
yield ("path", 0, "foobar")
|
||||||
|
|
|
@ -31,8 +31,8 @@ WPT_PATH = os.path.join(".", "tests", "wpt")
|
||||||
CONFIG_FILE_PATH = os.path.join(".", "servo-tidy.toml")
|
CONFIG_FILE_PATH = os.path.join(".", "servo-tidy.toml")
|
||||||
WPT_CONFIG_INI_PATH = os.path.join(WPT_PATH, "config.ini")
|
WPT_CONFIG_INI_PATH = os.path.join(WPT_PATH, "config.ini")
|
||||||
# regex source https://stackoverflow.com/questions/6883049/
|
# regex source https://stackoverflow.com/questions/6883049/
|
||||||
URL_REGEX = re.compile(br'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+')
|
URL_REGEX = re.compile(rb"https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+")
|
||||||
UTF8_URL_REGEX = re.compile(r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+')
|
UTF8_URL_REGEX = re.compile(r"https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+")
|
||||||
CARGO_LOCK_FILE = os.path.join(TOPDIR, "Cargo.lock")
|
CARGO_LOCK_FILE = os.path.join(TOPDIR, "Cargo.lock")
|
||||||
CARGO_DENY_CONFIG_FILE = os.path.join(TOPDIR, "deny.toml")
|
CARGO_DENY_CONFIG_FILE = os.path.join(TOPDIR, "deny.toml")
|
||||||
|
|
||||||
|
@ -57,15 +57,25 @@ config = {
|
||||||
],
|
],
|
||||||
"packages": [],
|
"packages": [],
|
||||||
},
|
},
|
||||||
"check_ext": {}
|
"check_ext": {},
|
||||||
}
|
}
|
||||||
|
|
||||||
COMMENTS = [b"// ", b"# ", b" *", b"/* "]
|
COMMENTS = [b"// ", b"# ", b" *", b"/* "]
|
||||||
|
|
||||||
# File patterns to include in the non-WPT tidy check.
|
# File patterns to include in the non-WPT tidy check.
|
||||||
FILE_PATTERNS_TO_CHECK = ["*.rs", "*.rc", "*.cpp", "*.c",
|
FILE_PATTERNS_TO_CHECK = [
|
||||||
"*.h", "*.py", "*.sh",
|
"*.rs",
|
||||||
"*.toml", "*.webidl", "*.json", "*.html"]
|
"*.rc",
|
||||||
|
"*.cpp",
|
||||||
|
"*.c",
|
||||||
|
"*.h",
|
||||||
|
"*.py",
|
||||||
|
"*.sh",
|
||||||
|
"*.toml",
|
||||||
|
"*.webidl",
|
||||||
|
"*.json",
|
||||||
|
"*.html",
|
||||||
|
]
|
||||||
|
|
||||||
# File patterns that are ignored for all tidy and lint checks.
|
# File patterns that are ignored for all tidy and lint checks.
|
||||||
FILE_PATTERNS_TO_IGNORE = ["*.#*", "*.pyc", "fake-ld.sh", "*.ogv", "*.webm"]
|
FILE_PATTERNS_TO_IGNORE = ["*.#*", "*.pyc", "fake-ld.sh", "*.ogv", "*.webm"]
|
||||||
|
@ -106,8 +116,7 @@ WEBIDL_STANDARDS = [
|
||||||
b"//notifications.spec.whatwg.org",
|
b"//notifications.spec.whatwg.org",
|
||||||
b"//testutils.spec.whatwg.org/",
|
b"//testutils.spec.whatwg.org/",
|
||||||
# Not a URL
|
# Not a URL
|
||||||
b"// This interface is entirely internal to Servo, and should not be"
|
b"// This interface is entirely internal to Servo, and should not be" + b" accessible to\n// web pages.",
|
||||||
+ b" accessible to\n// web pages."
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -121,9 +130,9 @@ def is_iter_empty(iterator):
|
||||||
|
|
||||||
def normilize_paths(paths):
|
def normilize_paths(paths):
|
||||||
if isinstance(paths, str):
|
if isinstance(paths, str):
|
||||||
return os.path.join(*paths.split('/'))
|
return os.path.join(*paths.split("/"))
|
||||||
else:
|
else:
|
||||||
return [os.path.join(*path.split('/')) for path in paths]
|
return [os.path.join(*path.split("/")) for path in paths]
|
||||||
|
|
||||||
|
|
||||||
# A simple wrapper for iterators to show progress
|
# A simple wrapper for iterators to show progress
|
||||||
|
@ -133,7 +142,7 @@ def progress_wrapper(iterator):
|
||||||
total_files, progress = len(list_of_stuff), 0
|
total_files, progress = len(list_of_stuff), 0
|
||||||
for idx, thing in enumerate(list_of_stuff):
|
for idx, thing in enumerate(list_of_stuff):
|
||||||
progress = int(float(idx + 1) / total_files * 100)
|
progress = int(float(idx + 1) / total_files * 100)
|
||||||
sys.stdout.write('\r Progress: %s%% (%d/%d)' % (progress, idx + 1, total_files))
|
sys.stdout.write("\r Progress: %s%% (%d/%d)" % (progress, idx + 1, total_files))
|
||||||
sys.stdout.flush()
|
sys.stdout.flush()
|
||||||
yield thing
|
yield thing
|
||||||
|
|
||||||
|
@ -170,8 +179,8 @@ class FileList(object):
|
||||||
if not file_list:
|
if not file_list:
|
||||||
return
|
return
|
||||||
for f in file_list:
|
for f in file_list:
|
||||||
if not any(os.path.join('.', os.path.dirname(f)).startswith(path) for path in self.excluded):
|
if not any(os.path.join(".", os.path.dirname(f)).startswith(path) for path in self.excluded):
|
||||||
yield os.path.join('.', f)
|
yield os.path.join(".", f)
|
||||||
|
|
||||||
def _filter_excluded(self):
|
def _filter_excluded(self):
|
||||||
for root, dirs, files in os.walk(self.directory, topdown=True):
|
for root, dirs, files in os.walk(self.directory, topdown=True):
|
||||||
|
@ -197,8 +206,12 @@ def filter_file(file_name):
|
||||||
|
|
||||||
|
|
||||||
def filter_files(start_dir, only_changed_files, progress):
|
def filter_files(start_dir, only_changed_files, progress):
|
||||||
file_iter = FileList(start_dir, only_changed_files=only_changed_files,
|
file_iter = FileList(
|
||||||
exclude_dirs=config["ignore"]["directories"], progress=progress)
|
start_dir,
|
||||||
|
only_changed_files=only_changed_files,
|
||||||
|
exclude_dirs=config["ignore"]["directories"],
|
||||||
|
progress=progress,
|
||||||
|
)
|
||||||
|
|
||||||
for file_name in iter(file_iter):
|
for file_name in iter(file_iter):
|
||||||
base_name = os.path.basename(file_name)
|
base_name = os.path.basename(file_name)
|
||||||
|
@ -213,8 +226,8 @@ def uncomment(line):
|
||||||
for c in COMMENTS:
|
for c in COMMENTS:
|
||||||
if line.startswith(c):
|
if line.startswith(c):
|
||||||
if line.endswith(b"*/"):
|
if line.endswith(b"*/"):
|
||||||
return line[len(c):(len(line) - 3)].strip()
|
return line[len(c) : (len(line) - 3)].strip()
|
||||||
return line[len(c):].strip()
|
return line[len(c) :].strip()
|
||||||
|
|
||||||
|
|
||||||
def is_apache_licensed(header):
|
def is_apache_licensed(header):
|
||||||
|
@ -226,8 +239,7 @@ def is_apache_licensed(header):
|
||||||
|
|
||||||
|
|
||||||
def check_license(file_name, lines):
|
def check_license(file_name, lines):
|
||||||
if any(file_name.endswith(ext) for ext in (".toml", ".lock", ".json", ".html")) or \
|
if any(file_name.endswith(ext) for ext in (".toml", ".lock", ".json", ".html")) or config["skip-check-licenses"]:
|
||||||
config["skip-check-licenses"]:
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if lines[0].startswith(b"#!") and lines[1].strip():
|
if lines[0].startswith(b"#!") and lines[1].strip():
|
||||||
|
@ -238,7 +250,7 @@ def check_license(file_name, lines):
|
||||||
license_block = []
|
license_block = []
|
||||||
|
|
||||||
for line in lines:
|
for line in lines:
|
||||||
line = line.rstrip(b'\n')
|
line = line.rstrip(b"\n")
|
||||||
if not line.strip():
|
if not line.strip():
|
||||||
blank_lines += 1
|
blank_lines += 1
|
||||||
if blank_lines >= max_blank_lines:
|
if blank_lines >= max_blank_lines:
|
||||||
|
@ -257,20 +269,19 @@ def check_license(file_name, lines):
|
||||||
|
|
||||||
def check_modeline(file_name, lines):
|
def check_modeline(file_name, lines):
|
||||||
for idx, line in enumerate(lines[:5]):
|
for idx, line in enumerate(lines[:5]):
|
||||||
if re.search(b'^.*[ \t](vi:|vim:|ex:)[ \t]', line):
|
if re.search(b"^.*[ \t](vi:|vim:|ex:)[ \t]", line):
|
||||||
yield (idx + 1, "vi modeline present")
|
yield (idx + 1, "vi modeline present")
|
||||||
elif re.search(br'-\*-.*-\*-', line, re.IGNORECASE):
|
elif re.search(rb"-\*-.*-\*-", line, re.IGNORECASE):
|
||||||
yield (idx + 1, "emacs file variables present")
|
yield (idx + 1, "emacs file variables present")
|
||||||
|
|
||||||
|
|
||||||
def check_length(file_name, idx, line):
|
def check_length(file_name, idx, line):
|
||||||
if any(file_name.endswith(ext) for ext in (".lock", ".json", ".html", ".toml")) or \
|
if any(file_name.endswith(ext) for ext in (".lock", ".json", ".html", ".toml")) or config["skip-check-length"]:
|
||||||
config["skip-check-length"]:
|
|
||||||
return
|
return
|
||||||
|
|
||||||
# Prefer shorter lines when shell scripting.
|
# Prefer shorter lines when shell scripting.
|
||||||
max_length = 80 if file_name.endswith(".sh") else 120
|
max_length = 80 if file_name.endswith(".sh") else 120
|
||||||
if len(line.rstrip(b'\n')) > max_length and not is_unsplittable(file_name, line):
|
if len(line.rstrip(b"\n")) > max_length and not is_unsplittable(file_name, line):
|
||||||
yield (idx + 1, "Line is longer than %d characters" % max_length)
|
yield (idx + 1, "Line is longer than %d characters" % max_length)
|
||||||
|
|
||||||
|
|
||||||
|
@ -279,23 +290,18 @@ def contains_url(line):
|
||||||
|
|
||||||
|
|
||||||
def is_unsplittable(file_name, line):
|
def is_unsplittable(file_name, line):
|
||||||
return (
|
return contains_url(line) or file_name.endswith(".rs") and line.startswith(b"use ") and b"{" not in line
|
||||||
contains_url(line)
|
|
||||||
or file_name.endswith(".rs")
|
|
||||||
and line.startswith(b"use ")
|
|
||||||
and b"{" not in line
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def check_whatwg_specific_url(idx, line):
|
def check_whatwg_specific_url(idx, line):
|
||||||
match = re.search(br"https://html\.spec\.whatwg\.org/multipage/[\w-]+\.html#([\w\'\:-]+)", line)
|
match = re.search(rb"https://html\.spec\.whatwg\.org/multipage/[\w-]+\.html#([\w\'\:-]+)", line)
|
||||||
if match is not None:
|
if match is not None:
|
||||||
preferred_link = "https://html.spec.whatwg.org/multipage/#{}".format(match.group(1).decode("utf-8"))
|
preferred_link = "https://html.spec.whatwg.org/multipage/#{}".format(match.group(1).decode("utf-8"))
|
||||||
yield (idx + 1, "link to WHATWG may break in the future, use this format instead: {}".format(preferred_link))
|
yield (idx + 1, "link to WHATWG may break in the future, use this format instead: {}".format(preferred_link))
|
||||||
|
|
||||||
|
|
||||||
def check_whatwg_single_page_url(idx, line):
|
def check_whatwg_single_page_url(idx, line):
|
||||||
match = re.search(br"https://html\.spec\.whatwg\.org/#([\w\'\:-]+)", line)
|
match = re.search(rb"https://html\.spec\.whatwg\.org/#([\w\'\:-]+)", line)
|
||||||
if match is not None:
|
if match is not None:
|
||||||
preferred_link = "https://html.spec.whatwg.org/multipage/#{}".format(match.group(1).decode("utf-8"))
|
preferred_link = "https://html.spec.whatwg.org/multipage/#{}".format(match.group(1).decode("utf-8"))
|
||||||
yield (idx + 1, "links to WHATWG single-page url, change to multi page: {}".format(preferred_link))
|
yield (idx + 1, "links to WHATWG single-page url, change to multi page: {}".format(preferred_link))
|
||||||
|
@ -335,10 +341,11 @@ def check_for_raw_urls_in_rustdoc(file_name: str, idx: int, line: bytes):
|
||||||
# [link text]: https://example.com
|
# [link text]: https://example.com
|
||||||
match = URL_REGEX.search(line)
|
match = URL_REGEX.search(line)
|
||||||
if match and (
|
if match and (
|
||||||
not line[match.start() - 1:].startswith(b"<")
|
not line[match.start() - 1 :].startswith(b"<")
|
||||||
and not line[match.start() - 1:].startswith(b"[")
|
and not line[match.start() - 1 :].startswith(b"[")
|
||||||
and not line[match.start() - 2:].startswith(b"](")
|
and not line[match.start() - 2 :].startswith(b"](")
|
||||||
and not line[match.start() - 3:].startswith(b"]: ")):
|
and not line[match.start() - 3 :].startswith(b"]: ")
|
||||||
|
):
|
||||||
yield (idx + 1, ERROR_RAW_URL_IN_RUSTDOC)
|
yield (idx + 1, ERROR_RAW_URL_IN_RUSTDOC)
|
||||||
|
|
||||||
|
|
||||||
|
@ -369,12 +376,11 @@ def check_ruff_lints():
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def run_cargo_deny_lints():
|
def run_cargo_deny_lints():
|
||||||
print("\r ➤ Running `cargo-deny` checks...")
|
print("\r ➤ Running `cargo-deny` checks...")
|
||||||
result = subprocess.run(["cargo-deny", "--format=json", "--all-features", "check"],
|
result = subprocess.run(
|
||||||
encoding='utf-8',
|
["cargo-deny", "--format=json", "--all-features", "check"], encoding="utf-8", capture_output=True
|
||||||
capture_output=True)
|
)
|
||||||
assert result.stderr is not None, "cargo deny should return error information via stderr when failing"
|
assert result.stderr is not None, "cargo deny should return error information via stderr when failing"
|
||||||
|
|
||||||
errors = []
|
errors = []
|
||||||
|
@ -397,11 +403,7 @@ def run_cargo_deny_lints():
|
||||||
if error_code == "rejected":
|
if error_code == "rejected":
|
||||||
crate = CargoDenyKrate(error_fields["graphs"][0])
|
crate = CargoDenyKrate(error_fields["graphs"][0])
|
||||||
license_name = error_fields["notes"][0]
|
license_name = error_fields["notes"][0]
|
||||||
errors.append((
|
errors.append((CARGO_LOCK_FILE, 1, f'Rust dependency {crate}: Rejected license "{license_name}"'))
|
||||||
CARGO_LOCK_FILE,
|
|
||||||
1,
|
|
||||||
f"Rust dependency {crate}: Rejected license \"{license_name}\""
|
|
||||||
))
|
|
||||||
# This detects if a crate has been marked as banned in the configuration file.
|
# This detects if a crate has been marked as banned in the configuration file.
|
||||||
elif error_code == "banned":
|
elif error_code == "banned":
|
||||||
crate = CargoDenyKrate(error_fields["graphs"][0])
|
crate = CargoDenyKrate(error_fields["graphs"][0])
|
||||||
|
@ -431,7 +433,7 @@ def check_toml(file_name, lines):
|
||||||
if line_without_comment.find("*") != -1:
|
if line_without_comment.find("*") != -1:
|
||||||
yield (idx + 1, "found asterisk instead of minimum version number")
|
yield (idx + 1, "found asterisk instead of minimum version number")
|
||||||
for license_line in licenses_toml:
|
for license_line in licenses_toml:
|
||||||
ok_licensed |= (license_line in line)
|
ok_licensed |= license_line in line
|
||||||
if "license.workspace" in line:
|
if "license.workspace" in line:
|
||||||
ok_licensed = True
|
ok_licensed = True
|
||||||
if not ok_licensed:
|
if not ok_licensed:
|
||||||
|
@ -448,7 +450,7 @@ def check_shell(file_name, lines):
|
||||||
did_shebang_check = False
|
did_shebang_check = False
|
||||||
|
|
||||||
if not lines:
|
if not lines:
|
||||||
yield (0, 'script is an empty file')
|
yield (0, "script is an empty file")
|
||||||
return
|
return
|
||||||
|
|
||||||
if lines[0].rstrip() != shebang.encode("utf-8"):
|
if lines[0].rstrip() != shebang.encode("utf-8"):
|
||||||
|
@ -477,23 +479,25 @@ def check_shell(file_name, lines):
|
||||||
if " [ " in stripped or stripped.startswith("[ "):
|
if " [ " in stripped or stripped.startswith("[ "):
|
||||||
yield (idx + 1, "script should use `[[` instead of `[` for conditional testing")
|
yield (idx + 1, "script should use `[[` instead of `[` for conditional testing")
|
||||||
|
|
||||||
for dollar in re.finditer(r'\$', stripped):
|
for dollar in re.finditer(r"\$", stripped):
|
||||||
next_idx = dollar.end()
|
next_idx = dollar.end()
|
||||||
if next_idx < len(stripped):
|
if next_idx < len(stripped):
|
||||||
next_char = stripped[next_idx]
|
next_char = stripped[next_idx]
|
||||||
if not (next_char == '{' or next_char == '('):
|
if not (next_char == "{" or next_char == "("):
|
||||||
yield (idx + 1, "variable substitutions should use the full \"${VAR}\" form")
|
yield (idx + 1, 'variable substitutions should use the full "${VAR}" form')
|
||||||
|
|
||||||
|
|
||||||
def check_rust(file_name, lines):
|
def check_rust(file_name, lines):
|
||||||
if not file_name.endswith(".rs") or \
|
if (
|
||||||
file_name.endswith(".mako.rs") or \
|
not file_name.endswith(".rs")
|
||||||
file_name.endswith(os.path.join("style", "build.rs")) or \
|
or file_name.endswith(".mako.rs")
|
||||||
file_name.endswith(os.path.join("unit", "style", "stylesheets.rs")):
|
or file_name.endswith(os.path.join("style", "build.rs"))
|
||||||
|
or file_name.endswith(os.path.join("unit", "style", "stylesheets.rs"))
|
||||||
|
):
|
||||||
return
|
return
|
||||||
|
|
||||||
comment_depth = 0
|
comment_depth = 0
|
||||||
merged_lines = ''
|
merged_lines = ""
|
||||||
import_block = False
|
import_block = False
|
||||||
whitespace = False
|
whitespace = False
|
||||||
|
|
||||||
|
@ -507,8 +511,7 @@ def check_rust(file_name, lines):
|
||||||
os.path.join("*", "ports", "servoshell", "embedder.rs"),
|
os.path.join("*", "ports", "servoshell", "embedder.rs"),
|
||||||
os.path.join("*", "rust_tidy.rs"), # This is for the tests.
|
os.path.join("*", "rust_tidy.rs"), # This is for the tests.
|
||||||
]
|
]
|
||||||
is_panic_not_allowed_rs_file = any([
|
is_panic_not_allowed_rs_file = any([glob.fnmatch.fnmatch(file_name, path) for path in PANIC_NOT_ALLOWED_PATHS])
|
||||||
glob.fnmatch.fnmatch(file_name, path) for path in PANIC_NOT_ALLOWED_PATHS])
|
|
||||||
|
|
||||||
prev_open_brace = False
|
prev_open_brace = False
|
||||||
multi_line_string = False
|
multi_line_string = False
|
||||||
|
@ -531,11 +534,11 @@ def check_rust(file_name, lines):
|
||||||
is_comment = re.search(r"^//|^/\*|^\*", line)
|
is_comment = re.search(r"^//|^/\*|^\*", line)
|
||||||
|
|
||||||
# Simple heuristic to avoid common case of no comments.
|
# Simple heuristic to avoid common case of no comments.
|
||||||
if '/' in line:
|
if "/" in line:
|
||||||
comment_depth += line.count('/*')
|
comment_depth += line.count("/*")
|
||||||
comment_depth -= line.count('*/')
|
comment_depth -= line.count("*/")
|
||||||
|
|
||||||
if line.endswith('\\'):
|
if line.endswith("\\"):
|
||||||
merged_lines += line[:-1]
|
merged_lines += line[:-1]
|
||||||
continue
|
continue
|
||||||
if comment_depth:
|
if comment_depth:
|
||||||
|
@ -543,11 +546,10 @@ def check_rust(file_name, lines):
|
||||||
continue
|
continue
|
||||||
if merged_lines:
|
if merged_lines:
|
||||||
line = merged_lines + line
|
line = merged_lines + line
|
||||||
merged_lines = ''
|
merged_lines = ""
|
||||||
|
|
||||||
if multi_line_string:
|
if multi_line_string:
|
||||||
line, count = re.subn(
|
line, count = re.subn(r'^(\\.|[^"\\])*?"', "", line, count=1)
|
||||||
r'^(\\.|[^"\\])*?"', '', line, count=1)
|
|
||||||
if count == 1:
|
if count == 1:
|
||||||
multi_line_string = False
|
multi_line_string = False
|
||||||
else:
|
else:
|
||||||
|
@ -565,9 +567,7 @@ def check_rust(file_name, lines):
|
||||||
# get rid of strings and chars because cases like regex expression, keep attributes
|
# get rid of strings and chars because cases like regex expression, keep attributes
|
||||||
if not is_attribute and not is_comment:
|
if not is_attribute and not is_comment:
|
||||||
line = re.sub(r'"(\\.|[^\\"])*?"', '""', line)
|
line = re.sub(r'"(\\.|[^\\"])*?"', '""', line)
|
||||||
line = re.sub(
|
line = re.sub(r"'(\\.|[^\\']|(\\x[0-9a-fA-F]{2})|(\\u{[0-9a-fA-F]{1,6}}))'", "''", line)
|
||||||
r"'(\\.|[^\\']|(\\x[0-9a-fA-F]{2})|(\\u{[0-9a-fA-F]{1,6}}))'",
|
|
||||||
"''", line)
|
|
||||||
# If, after parsing all single-line strings, we still have
|
# If, after parsing all single-line strings, we still have
|
||||||
# an odd number of double quotes, this line starts a
|
# an odd number of double quotes, this line starts a
|
||||||
# multiline string
|
# multiline string
|
||||||
|
@ -576,15 +576,16 @@ def check_rust(file_name, lines):
|
||||||
multi_line_string = True
|
multi_line_string = True
|
||||||
|
|
||||||
# get rid of comments
|
# get rid of comments
|
||||||
line = re.sub(r'//.*?$|/\*.*?$|^\*.*?$', '//', line)
|
line = re.sub(r"//.*?$|/\*.*?$|^\*.*?$", "//", line)
|
||||||
|
|
||||||
# get rid of attributes that do not contain =
|
# get rid of attributes that do not contain =
|
||||||
line = re.sub(r'^#[A-Za-z0-9\(\)\[\]_]*?$', '#[]', line)
|
line = re.sub(r"^#[A-Za-z0-9\(\)\[\]_]*?$", "#[]", line)
|
||||||
|
|
||||||
# flag this line if it matches one of the following regular expressions
|
# flag this line if it matches one of the following regular expressions
|
||||||
# tuple format: (pattern, format_message, filter_function(match, line))
|
# tuple format: (pattern, format_message, filter_function(match, line))
|
||||||
def no_filter(match, line):
|
def no_filter(match, line):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
regex_rules = [
|
regex_rules = [
|
||||||
# There should not be any extra pointer dereferencing
|
# There should not be any extra pointer dereferencing
|
||||||
(r": &Vec<", "use &[T] instead of &Vec<T>", no_filter),
|
(r": &Vec<", "use &[T] instead of &Vec<T>", no_filter),
|
||||||
|
@ -618,17 +619,23 @@ def check_rust(file_name, lines):
|
||||||
match = re.search(r"#!\[feature\((.*)\)\]", line)
|
match = re.search(r"#!\[feature\((.*)\)\]", line)
|
||||||
|
|
||||||
if match:
|
if match:
|
||||||
features = list(map(lambda w: w.strip(), match.group(1).split(',')))
|
features = list(map(lambda w: w.strip(), match.group(1).split(",")))
|
||||||
sorted_features = sorted(features)
|
sorted_features = sorted(features)
|
||||||
if sorted_features != features and check_alphabetical_order:
|
if sorted_features != features and check_alphabetical_order:
|
||||||
yield (idx + 1, decl_message.format("feature attribute")
|
yield (
|
||||||
|
idx + 1,
|
||||||
|
decl_message.format("feature attribute")
|
||||||
+ decl_expected.format(tuple(sorted_features))
|
+ decl_expected.format(tuple(sorted_features))
|
||||||
+ decl_found.format(tuple(features)))
|
+ decl_found.format(tuple(features)),
|
||||||
|
)
|
||||||
|
|
||||||
if prev_feature_name > sorted_features[0] and check_alphabetical_order:
|
if prev_feature_name > sorted_features[0] and check_alphabetical_order:
|
||||||
yield (idx + 1, decl_message.format("feature attribute")
|
yield (
|
||||||
|
idx + 1,
|
||||||
|
decl_message.format("feature attribute")
|
||||||
+ decl_expected.format(prev_feature_name + " after " + sorted_features[0])
|
+ decl_expected.format(prev_feature_name + " after " + sorted_features[0])
|
||||||
+ decl_found.format(prev_feature_name + " before " + sorted_features[0]))
|
+ decl_found.format(prev_feature_name + " before " + sorted_features[0]),
|
||||||
|
)
|
||||||
|
|
||||||
prev_feature_name = sorted_features[0]
|
prev_feature_name = sorted_features[0]
|
||||||
else:
|
else:
|
||||||
|
@ -652,9 +659,12 @@ def check_rust(file_name, lines):
|
||||||
if match == -1 and not line.endswith(";"):
|
if match == -1 and not line.endswith(";"):
|
||||||
yield (idx + 1, "mod declaration spans multiple lines")
|
yield (idx + 1, "mod declaration spans multiple lines")
|
||||||
if prev_mod[indent] and mod < prev_mod[indent] and check_alphabetical_order:
|
if prev_mod[indent] and mod < prev_mod[indent] and check_alphabetical_order:
|
||||||
yield (idx + 1, decl_message.format("mod declaration")
|
yield (
|
||||||
|
idx + 1,
|
||||||
|
decl_message.format("mod declaration")
|
||||||
+ decl_expected.format(prev_mod[indent])
|
+ decl_expected.format(prev_mod[indent])
|
||||||
+ decl_found.format(mod))
|
+ decl_found.format(mod),
|
||||||
|
)
|
||||||
prev_mod[indent] = mod
|
prev_mod[indent] = mod
|
||||||
else:
|
else:
|
||||||
# we now erase previous entries
|
# we now erase previous entries
|
||||||
|
@ -665,21 +675,24 @@ def check_rust(file_name, lines):
|
||||||
# match the derivable traits filtering out macro expansions
|
# match the derivable traits filtering out macro expansions
|
||||||
match = re.search(r"#\[derive\(([a-zA-Z, ]*)", line)
|
match = re.search(r"#\[derive\(([a-zA-Z, ]*)", line)
|
||||||
if match:
|
if match:
|
||||||
derives = list(map(lambda w: w.strip(), match.group(1).split(',')))
|
derives = list(map(lambda w: w.strip(), match.group(1).split(",")))
|
||||||
# sort, compare and report
|
# sort, compare and report
|
||||||
sorted_derives = sorted(derives)
|
sorted_derives = sorted(derives)
|
||||||
if sorted_derives != derives and check_alphabetical_order:
|
if sorted_derives != derives and check_alphabetical_order:
|
||||||
yield (idx + 1, decl_message.format("derivable traits list")
|
yield (
|
||||||
|
idx + 1,
|
||||||
|
decl_message.format("derivable traits list")
|
||||||
+ decl_expected.format(", ".join(sorted_derives))
|
+ decl_expected.format(", ".join(sorted_derives))
|
||||||
+ decl_found.format(", ".join(derives)))
|
+ decl_found.format(", ".join(derives)),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Avoid flagging <Item=Foo> constructs
|
# Avoid flagging <Item=Foo> constructs
|
||||||
def is_associated_type(match, line):
|
def is_associated_type(match, line):
|
||||||
if match.group(1) != '=':
|
if match.group(1) != "=":
|
||||||
return False
|
return False
|
||||||
open_angle = line[0:match.end()].rfind('<')
|
open_angle = line[0 : match.end()].rfind("<")
|
||||||
close_angle = line[open_angle:].find('>') if open_angle != -1 else -1
|
close_angle = line[open_angle:].find(">") if open_angle != -1 else -1
|
||||||
generic_open = open_angle != -1 and open_angle < match.start()
|
generic_open = open_angle != -1 and open_angle < match.start()
|
||||||
generic_close = close_angle != -1 and close_angle + open_angle >= match.end()
|
generic_close = close_angle != -1 and close_angle + open_angle >= match.end()
|
||||||
return generic_open and generic_close
|
return generic_open and generic_close
|
||||||
|
@ -731,6 +744,7 @@ def check_that_manifests_exist():
|
||||||
|
|
||||||
def check_that_manifests_are_clean():
|
def check_that_manifests_are_clean():
|
||||||
from wptrunner import wptlogging
|
from wptrunner import wptlogging
|
||||||
|
|
||||||
print("\r ➤ Checking WPT manifests for cleanliness...")
|
print("\r ➤ Checking WPT manifests for cleanliness...")
|
||||||
output_stream = io.StringIO("")
|
output_stream = io.StringIO("")
|
||||||
logger = wptlogging.setup({}, {"mach": output_stream})
|
logger = wptlogging.setup({}, {"mach": output_stream})
|
||||||
|
@ -822,8 +836,8 @@ def check_spec(file_name, lines):
|
||||||
yield (idx + 1, "method declared in webidl is missing a comment with a specification link")
|
yield (idx + 1, "method declared in webidl is missing a comment with a specification link")
|
||||||
break
|
break
|
||||||
if in_impl:
|
if in_impl:
|
||||||
brace_count += line.count('{')
|
brace_count += line.count("{")
|
||||||
brace_count -= line.count('}')
|
brace_count -= line.count("}")
|
||||||
if brace_count < 1:
|
if brace_count < 1:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
@ -870,7 +884,7 @@ def check_config_file(config_file, print_text=True):
|
||||||
# Print invalid listed ignored directories
|
# Print invalid listed ignored directories
|
||||||
if current_table == "ignore" and invalid_dirs:
|
if current_table == "ignore" and invalid_dirs:
|
||||||
for d in invalid_dirs:
|
for d in invalid_dirs:
|
||||||
if line.strip().strip('\'",') == d:
|
if line.strip().strip("'\",") == d:
|
||||||
yield config_file, idx + 1, "ignored directory '%s' doesn't exist" % d
|
yield config_file, idx + 1, "ignored directory '%s' doesn't exist" % d
|
||||||
invalid_dirs.remove(d)
|
invalid_dirs.remove(d)
|
||||||
break
|
break
|
||||||
|
@ -878,7 +892,7 @@ def check_config_file(config_file, print_text=True):
|
||||||
# Print invalid listed ignored files
|
# Print invalid listed ignored files
|
||||||
if current_table == "ignore" and invalid_files:
|
if current_table == "ignore" and invalid_files:
|
||||||
for f in invalid_files:
|
for f in invalid_files:
|
||||||
if line.strip().strip('\'",') == f:
|
if line.strip().strip("'\",") == f:
|
||||||
yield config_file, idx + 1, "ignored file '%s' doesn't exist" % f
|
yield config_file, idx + 1, "ignored file '%s' doesn't exist" % f
|
||||||
invalid_files.remove(f)
|
invalid_files.remove(f)
|
||||||
break
|
break
|
||||||
|
@ -890,10 +904,14 @@ def check_config_file(config_file, print_text=True):
|
||||||
key = line.split("=")[0].strip()
|
key = line.split("=")[0].strip()
|
||||||
|
|
||||||
# Check for invalid keys inside [configs] and [ignore] table
|
# Check for invalid keys inside [configs] and [ignore] table
|
||||||
if (current_table == "configs" and key not in config
|
if (
|
||||||
or current_table == "ignore" and key not in config["ignore"]
|
current_table == "configs"
|
||||||
|
and key not in config
|
||||||
|
or current_table == "ignore"
|
||||||
|
and key not in config["ignore"]
|
||||||
# Any key outside of tables
|
# Any key outside of tables
|
||||||
or current_table == ""):
|
or current_table == ""
|
||||||
|
):
|
||||||
yield config_file, idx + 1, "invalid config key '%s'" % key
|
yield config_file, idx + 1, "invalid config key '%s'" % key
|
||||||
|
|
||||||
# Parse config file
|
# Parse config file
|
||||||
|
@ -914,7 +932,7 @@ def parse_config(config_file):
|
||||||
dirs_to_check = config_file.get("check_ext", {})
|
dirs_to_check = config_file.get("check_ext", {})
|
||||||
# Fix the paths (OS-dependent)
|
# Fix the paths (OS-dependent)
|
||||||
for path, exts in dirs_to_check.items():
|
for path, exts in dirs_to_check.items():
|
||||||
config['check_ext'][normilize_paths(path)] = exts
|
config["check_ext"][normilize_paths(path)] = exts
|
||||||
|
|
||||||
# Add list of blocked packages
|
# Add list of blocked packages
|
||||||
config["blocked-packages"] = config_file.get("blocked-packages", {})
|
config["blocked-packages"] = config_file.get("blocked-packages", {})
|
||||||
|
@ -933,13 +951,9 @@ def check_directory_files(directories, print_text=True):
|
||||||
files = sorted(os.listdir(directory))
|
files = sorted(os.listdir(directory))
|
||||||
for filename in files:
|
for filename in files:
|
||||||
if not any(filename.endswith(ext) for ext in file_extensions):
|
if not any(filename.endswith(ext) for ext in file_extensions):
|
||||||
details = {
|
details = {"name": os.path.basename(filename), "ext": ", ".join(file_extensions), "dir_name": directory}
|
||||||
"name": os.path.basename(filename),
|
message = """Unexpected extension found for {name}. \
|
||||||
"ext": ", ".join(file_extensions),
|
We only expect files with {ext} extensions in {dir_name}""".format(**details)
|
||||||
"dir_name": directory
|
|
||||||
}
|
|
||||||
message = '''Unexpected extension found for {name}. \
|
|
||||||
We only expect files with {ext} extensions in {dir_name}'''.format(**details)
|
|
||||||
yield (filename, 1, message)
|
yield (filename, 1, message)
|
||||||
|
|
||||||
|
|
||||||
|
@ -972,12 +986,19 @@ def scan(only_changed_files=False, progress=False):
|
||||||
# check config file for errors
|
# check config file for errors
|
||||||
config_errors = check_config_file(CONFIG_FILE_PATH)
|
config_errors = check_config_file(CONFIG_FILE_PATH)
|
||||||
# check directories contain expected files
|
# check directories contain expected files
|
||||||
directory_errors = check_directory_files(config['check_ext'])
|
directory_errors = check_directory_files(config["check_ext"])
|
||||||
# standard checks
|
# standard checks
|
||||||
files_to_check = filter_files('.', only_changed_files, progress)
|
files_to_check = filter_files(".", only_changed_files, progress)
|
||||||
checking_functions = (check_webidl_spec,)
|
checking_functions = (check_webidl_spec,)
|
||||||
line_checking_functions = (check_license, check_by_line, check_toml, check_shell,
|
line_checking_functions = (
|
||||||
check_rust, check_spec, check_modeline)
|
check_license,
|
||||||
|
check_by_line,
|
||||||
|
check_toml,
|
||||||
|
check_shell,
|
||||||
|
check_rust,
|
||||||
|
check_spec,
|
||||||
|
check_modeline,
|
||||||
|
)
|
||||||
file_errors = collect_errors_for_files(files_to_check, checking_functions, line_checking_functions)
|
file_errors = collect_errors_for_files(files_to_check, checking_functions, line_checking_functions)
|
||||||
|
|
||||||
python_errors = check_ruff_lints()
|
python_errors = check_ruff_lints()
|
||||||
|
@ -985,26 +1006,27 @@ def scan(only_changed_files=False, progress=False):
|
||||||
wpt_errors = run_wpt_lints(only_changed_files)
|
wpt_errors = run_wpt_lints(only_changed_files)
|
||||||
|
|
||||||
# chain all the iterators
|
# chain all the iterators
|
||||||
errors = itertools.chain(config_errors, directory_errors, file_errors,
|
errors = itertools.chain(config_errors, directory_errors, file_errors, python_errors, wpt_errors, cargo_lock_errors)
|
||||||
python_errors, wpt_errors, cargo_lock_errors)
|
|
||||||
|
|
||||||
colorama.init()
|
colorama.init()
|
||||||
error = None
|
error = None
|
||||||
for error in errors:
|
for error in errors:
|
||||||
print("\r | "
|
print(
|
||||||
|
"\r | "
|
||||||
+ f"{colorama.Fore.BLUE}{error[0]}{colorama.Style.RESET_ALL}:"
|
+ f"{colorama.Fore.BLUE}{error[0]}{colorama.Style.RESET_ALL}:"
|
||||||
+ f"{colorama.Fore.YELLOW}{error[1]}{colorama.Style.RESET_ALL}: "
|
+ f"{colorama.Fore.YELLOW}{error[1]}{colorama.Style.RESET_ALL}: "
|
||||||
+ f"{colorama.Fore.RED}{error[2]}{colorama.Style.RESET_ALL}")
|
+ f"{colorama.Fore.RED}{error[2]}{colorama.Style.RESET_ALL}"
|
||||||
|
)
|
||||||
|
|
||||||
return int(error is not None)
|
return int(error is not None)
|
||||||
|
|
||||||
|
|
||||||
class CargoDenyKrate:
|
class CargoDenyKrate:
|
||||||
def __init__(self, data: Dict[Any, Any]):
|
def __init__(self, data: Dict[Any, Any]):
|
||||||
crate = data['Krate']
|
crate = data["Krate"]
|
||||||
self.name = crate['name']
|
self.name = crate["name"]
|
||||||
self.version = crate['version']
|
self.version = crate["version"]
|
||||||
self.parents = [CargoDenyKrate(parent) for parent in data.get('parents', [])]
|
self.parents = [CargoDenyKrate(parent) for parent in data.get("parents", [])]
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return f"{self.name}@{self.version}"
|
return f"{self.name}@{self.version}"
|
||||||
|
|
|
@ -27,22 +27,36 @@ import wptrunner.wptcommandline # noqa: E402
|
||||||
|
|
||||||
def create_parser():
|
def create_parser():
|
||||||
parser = wptrunner.wptcommandline.create_parser()
|
parser = wptrunner.wptcommandline.create_parser()
|
||||||
parser.add_argument('--rr-chaos', default=False, action="store_true",
|
parser.add_argument(
|
||||||
help="Run under chaos mode in rr until a failure is captured")
|
"--rr-chaos", default=False, action="store_true", help="Run under chaos mode in rr until a failure is captured"
|
||||||
parser.add_argument('--pref', default=[], action="append", dest="prefs",
|
)
|
||||||
help="Pass preferences to servo")
|
parser.add_argument("--pref", default=[], action="append", dest="prefs", help="Pass preferences to servo")
|
||||||
parser.add_argument('--log-servojson', action="append", type=mozlog.commandline.log_file,
|
parser.add_argument(
|
||||||
help="Servo's JSON logger of unexpected results")
|
"--log-servojson",
|
||||||
parser.add_argument('--always-succeed', default=False, action="store_true",
|
action="append",
|
||||||
help="Always yield exit code of zero")
|
type=mozlog.commandline.log_file,
|
||||||
parser.add_argument('--no-default-test-types', default=False, action="store_true",
|
help="Servo's JSON logger of unexpected results",
|
||||||
help="Run all of the test types provided by wptrunner or specified explicitly by --test-types")
|
)
|
||||||
parser.add_argument('--filter-intermittents', default=None, action="store",
|
parser.add_argument("--always-succeed", default=False, action="store_true", help="Always yield exit code of zero")
|
||||||
help="Filter intermittents against known intermittents "
|
parser.add_argument(
|
||||||
"and save the filtered output to the given file.")
|
"--no-default-test-types",
|
||||||
parser.add_argument('--log-raw-unexpected', default=None, action="store",
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Run all of the test types provided by wptrunner or specified explicitly by --test-types",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--filter-intermittents",
|
||||||
|
default=None,
|
||||||
|
action="store",
|
||||||
|
help="Filter intermittents against known intermittents and save the filtered output to the given file.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--log-raw-unexpected",
|
||||||
|
default=None,
|
||||||
|
action="store",
|
||||||
help="Raw structured log messages for unexpected results."
|
help="Raw structured log messages for unexpected results."
|
||||||
" '--log-raw' Must also be passed in order to use this.")
|
" '--log-raw' Must also be passed in order to use this.",
|
||||||
|
)
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -21,20 +21,20 @@ from exporter import WPTSync
|
||||||
|
|
||||||
|
|
||||||
def main() -> int:
|
def main() -> int:
|
||||||
context = json.loads(os.environ['GITHUB_CONTEXT'])
|
context = json.loads(os.environ["GITHUB_CONTEXT"])
|
||||||
logging.getLogger().level = logging.INFO
|
logging.getLogger().level = logging.INFO
|
||||||
|
|
||||||
success = WPTSync(
|
success = WPTSync(
|
||||||
servo_repo='servo/servo',
|
servo_repo="servo/servo",
|
||||||
wpt_repo='web-platform-tests/wpt',
|
wpt_repo="web-platform-tests/wpt",
|
||||||
downstream_wpt_repo='servo/wpt',
|
downstream_wpt_repo="servo/wpt",
|
||||||
servo_path='./servo',
|
servo_path="./servo",
|
||||||
wpt_path='./wpt',
|
wpt_path="./wpt",
|
||||||
github_api_token=os.environ['WPT_SYNC_TOKEN'],
|
github_api_token=os.environ["WPT_SYNC_TOKEN"],
|
||||||
github_api_url='https://api.github.com/',
|
github_api_url="https://api.github.com/",
|
||||||
github_username='servo-wpt-sync',
|
github_username="servo-wpt-sync",
|
||||||
github_email='ghbot+wpt-sync@servo.org',
|
github_email="ghbot+wpt-sync@servo.org",
|
||||||
github_name='Servo WPT Sync',
|
github_name="Servo WPT Sync",
|
||||||
).run(context["event"])
|
).run(context["event"])
|
||||||
return 0 if success else 1
|
return 0 if success else 1
|
||||||
|
|
||||||
|
|
|
@ -24,26 +24,28 @@ import subprocess
|
||||||
|
|
||||||
from typing import Callable, Optional
|
from typing import Callable, Optional
|
||||||
|
|
||||||
from .common import \
|
from .common import (
|
||||||
CLOSING_EXISTING_UPSTREAM_PR, \
|
CLOSING_EXISTING_UPSTREAM_PR,
|
||||||
NO_SYNC_SIGNAL, \
|
NO_SYNC_SIGNAL,
|
||||||
NO_UPSTREAMBLE_CHANGES_COMMENT, \
|
NO_UPSTREAMBLE_CHANGES_COMMENT,
|
||||||
OPENED_NEW_UPSTREAM_PR, \
|
OPENED_NEW_UPSTREAM_PR,
|
||||||
UPDATED_EXISTING_UPSTREAM_PR, \
|
UPDATED_EXISTING_UPSTREAM_PR,
|
||||||
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR, \
|
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR,
|
||||||
UPSTREAMABLE_PATH, \
|
UPSTREAMABLE_PATH,
|
||||||
wpt_branch_name_from_servo_pr_number
|
wpt_branch_name_from_servo_pr_number,
|
||||||
|
)
|
||||||
|
|
||||||
from .github import GithubRepository, PullRequest
|
from .github import GithubRepository, PullRequest
|
||||||
from .step import \
|
from .step import (
|
||||||
AsyncValue, \
|
AsyncValue,
|
||||||
ChangePRStep, \
|
ChangePRStep,
|
||||||
CommentStep, \
|
CommentStep,
|
||||||
CreateOrUpdateBranchForPRStep, \
|
CreateOrUpdateBranchForPRStep,
|
||||||
MergePRStep, \
|
MergePRStep,
|
||||||
OpenPRStep, \
|
OpenPRStep,
|
||||||
RemoveBranchForPRStep, \
|
RemoveBranchForPRStep,
|
||||||
Step
|
Step,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class LocalGitRepo:
|
class LocalGitRepo:
|
||||||
|
@ -57,8 +59,7 @@ class LocalGitRepo:
|
||||||
|
|
||||||
def run_without_encoding(self, *args, env: dict = {}):
|
def run_without_encoding(self, *args, env: dict = {}):
|
||||||
command_line = [self.git_path] + list(args)
|
command_line = [self.git_path] + list(args)
|
||||||
logging.info(" → Execution (cwd='%s'): %s",
|
logging.info(" → Execution (cwd='%s'): %s", self.path, " ".join(command_line))
|
||||||
self.path, " ".join(command_line))
|
|
||||||
|
|
||||||
env.setdefault("GIT_AUTHOR_EMAIL", self.sync.github_email)
|
env.setdefault("GIT_AUTHOR_EMAIL", self.sync.github_email)
|
||||||
env.setdefault("GIT_COMMITTER_EMAIL", self.sync.github_email)
|
env.setdefault("GIT_COMMITTER_EMAIL", self.sync.github_email)
|
||||||
|
@ -66,20 +67,15 @@ class LocalGitRepo:
|
||||||
env.setdefault("GIT_COMMITTER_NAME", self.sync.github_name)
|
env.setdefault("GIT_COMMITTER_NAME", self.sync.github_name)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return subprocess.check_output(
|
return subprocess.check_output(command_line, cwd=self.path, env=env, stderr=subprocess.STDOUT)
|
||||||
command_line, cwd=self.path, env=env, stderr=subprocess.STDOUT
|
|
||||||
)
|
|
||||||
except subprocess.CalledProcessError as exception:
|
except subprocess.CalledProcessError as exception:
|
||||||
logging.warning("Process execution failed with output:\n%s",
|
logging.warning(
|
||||||
exception.output.decode("utf-8", errors="surrogateescape"))
|
"Process execution failed with output:\n%s", exception.output.decode("utf-8", errors="surrogateescape")
|
||||||
|
)
|
||||||
raise exception
|
raise exception
|
||||||
|
|
||||||
def run(self, *args, env: dict = {}):
|
def run(self, *args, env: dict = {}):
|
||||||
return (
|
return self.run_without_encoding(*args, env=env).decode("utf-8", errors="surrogateescape")
|
||||||
self
|
|
||||||
.run_without_encoding(*args, env=env)
|
|
||||||
.decode("utf-8", errors="surrogateescape")
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclasses.dataclass()
|
@dataclasses.dataclass()
|
||||||
|
@ -167,11 +163,7 @@ class WPTSync:
|
||||||
if action not in ["opened", "synchronize", "reopened", "edited", "closed"]:
|
if action not in ["opened", "synchronize", "reopened", "edited", "closed"]:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
if (
|
if action == "edited" and "title" not in payload["changes"] and "body" not in payload["changes"]:
|
||||||
action == "edited"
|
|
||||||
and "title" not in payload["changes"]
|
|
||||||
and "body" not in payload["changes"]
|
|
||||||
):
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -179,15 +171,11 @@ class WPTSync:
|
||||||
downstream_wpt_branch = self.downstream_wpt.get_branch(
|
downstream_wpt_branch = self.downstream_wpt.get_branch(
|
||||||
wpt_branch_name_from_servo_pr_number(servo_pr.number)
|
wpt_branch_name_from_servo_pr_number(servo_pr.number)
|
||||||
)
|
)
|
||||||
upstream_pr = self.wpt.get_open_pull_request_for_branch(
|
upstream_pr = self.wpt.get_open_pull_request_for_branch(self.github_username, downstream_wpt_branch)
|
||||||
self.github_username, downstream_wpt_branch
|
|
||||||
)
|
|
||||||
if upstream_pr:
|
if upstream_pr:
|
||||||
logging.info(
|
logging.info(" → Detected existing upstream PR %s", upstream_pr)
|
||||||
" → Detected existing upstream PR %s", upstream_pr)
|
|
||||||
|
|
||||||
run = SyncRun(self, servo_pr, AsyncValue(
|
run = SyncRun(self, servo_pr, AsyncValue(upstream_pr), step_callback)
|
||||||
upstream_pr), step_callback)
|
|
||||||
|
|
||||||
pull_data = payload["pull_request"]
|
pull_data = payload["pull_request"]
|
||||||
if payload["action"] in ["opened", "synchronize", "reopened"]:
|
if payload["action"] in ["opened", "synchronize", "reopened"]:
|
||||||
|
@ -210,50 +198,44 @@ class WPTSync:
|
||||||
num_commits = pull_data["commits"]
|
num_commits = pull_data["commits"]
|
||||||
head_sha = pull_data["head"]["sha"]
|
head_sha = pull_data["head"]["sha"]
|
||||||
is_upstreamable = (
|
is_upstreamable = (
|
||||||
len(
|
len(self.local_servo_repo.run("diff", head_sha, f"{head_sha}~{num_commits}", "--", UPSTREAMABLE_PATH)) > 0
|
||||||
self.local_servo_repo.run(
|
|
||||||
"diff", head_sha, f"{head_sha}~{num_commits}", "--", UPSTREAMABLE_PATH
|
|
||||||
)
|
|
||||||
)
|
|
||||||
> 0
|
|
||||||
)
|
)
|
||||||
logging.info(" → PR is upstreamable: '%s'", is_upstreamable)
|
logging.info(" → PR is upstreamable: '%s'", is_upstreamable)
|
||||||
|
|
||||||
title = pull_data['title']
|
title = pull_data["title"]
|
||||||
body = pull_data['body']
|
body = pull_data["body"]
|
||||||
if run.upstream_pr.has_value():
|
if run.upstream_pr.has_value():
|
||||||
if is_upstreamable:
|
if is_upstreamable:
|
||||||
# In case this is adding new upstreamable changes to a PR that was closed
|
# In case this is adding new upstreamable changes to a PR that was closed
|
||||||
# due to a lack of upstreamable changes, force it to be reopened.
|
# due to a lack of upstreamable changes, force it to be reopened.
|
||||||
# Github refuses to reopen a PR that had a branch force pushed, so be sure
|
# Github refuses to reopen a PR that had a branch force pushed, so be sure
|
||||||
# to do this first.
|
# to do this first.
|
||||||
run.add_step(ChangePRStep(
|
run.add_step(ChangePRStep(run.upstream_pr.value(), "opened", title, body))
|
||||||
run.upstream_pr.value(), "opened", title, body))
|
|
||||||
# Push the relevant changes to the upstream branch.
|
# Push the relevant changes to the upstream branch.
|
||||||
run.add_step(CreateOrUpdateBranchForPRStep(
|
run.add_step(CreateOrUpdateBranchForPRStep(pull_data, run.servo_pr))
|
||||||
pull_data, run.servo_pr))
|
run.add_step(CommentStep(run.servo_pr, UPDATED_EXISTING_UPSTREAM_PR))
|
||||||
run.add_step(CommentStep(
|
|
||||||
run.servo_pr, UPDATED_EXISTING_UPSTREAM_PR))
|
|
||||||
else:
|
else:
|
||||||
# Close the upstream PR, since would contain no changes otherwise.
|
# Close the upstream PR, since would contain no changes otherwise.
|
||||||
run.add_step(CommentStep(run.upstream_pr.value(),
|
run.add_step(CommentStep(run.upstream_pr.value(), NO_UPSTREAMBLE_CHANGES_COMMENT))
|
||||||
NO_UPSTREAMBLE_CHANGES_COMMENT))
|
|
||||||
run.add_step(ChangePRStep(run.upstream_pr.value(), "closed"))
|
run.add_step(ChangePRStep(run.upstream_pr.value(), "closed"))
|
||||||
run.add_step(RemoveBranchForPRStep(pull_data))
|
run.add_step(RemoveBranchForPRStep(pull_data))
|
||||||
run.add_step(CommentStep(
|
run.add_step(CommentStep(run.servo_pr, CLOSING_EXISTING_UPSTREAM_PR))
|
||||||
run.servo_pr, CLOSING_EXISTING_UPSTREAM_PR))
|
|
||||||
|
|
||||||
elif is_upstreamable:
|
elif is_upstreamable:
|
||||||
# Push the relevant changes to a new upstream branch.
|
# Push the relevant changes to a new upstream branch.
|
||||||
branch = run.add_step(
|
branch = run.add_step(CreateOrUpdateBranchForPRStep(pull_data, run.servo_pr))
|
||||||
CreateOrUpdateBranchForPRStep(pull_data, run.servo_pr))
|
|
||||||
|
|
||||||
# Create a pull request against the upstream repository for the new branch.
|
# Create a pull request against the upstream repository for the new branch.
|
||||||
assert branch
|
assert branch
|
||||||
upstream_pr = run.add_step(OpenPRStep(
|
upstream_pr = run.add_step(
|
||||||
branch, self.wpt, title, body,
|
OpenPRStep(
|
||||||
|
branch,
|
||||||
|
self.wpt,
|
||||||
|
title,
|
||||||
|
body,
|
||||||
["servo-export", "do not merge yet"],
|
["servo-export", "do not merge yet"],
|
||||||
))
|
)
|
||||||
|
)
|
||||||
|
|
||||||
assert upstream_pr
|
assert upstream_pr
|
||||||
run.upstream_pr = upstream_pr
|
run.upstream_pr = upstream_pr
|
||||||
|
@ -264,12 +246,8 @@ class WPTSync:
|
||||||
def handle_edited_pull_request(self, run: SyncRun, pull_data: dict):
|
def handle_edited_pull_request(self, run: SyncRun, pull_data: dict):
|
||||||
logging.info("Changing upstream PR title")
|
logging.info("Changing upstream PR title")
|
||||||
if run.upstream_pr.has_value():
|
if run.upstream_pr.has_value():
|
||||||
run.add_step(ChangePRStep(
|
run.add_step(ChangePRStep(run.upstream_pr.value(), "open", pull_data["title"], pull_data["body"]))
|
||||||
run.upstream_pr.value(
|
run.add_step(CommentStep(run.servo_pr, UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR))
|
||||||
), "open", pull_data["title"], pull_data["body"]
|
|
||||||
))
|
|
||||||
run.add_step(CommentStep(
|
|
||||||
run.servo_pr, UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR))
|
|
||||||
|
|
||||||
def handle_closed_pull_request(self, run: SyncRun, pull_data: dict):
|
def handle_closed_pull_request(self, run: SyncRun, pull_data: dict):
|
||||||
logging.info("Processing closed PR")
|
logging.info("Processing closed PR")
|
||||||
|
@ -279,8 +257,7 @@ class WPTSync:
|
||||||
if pull_data["merged"]:
|
if pull_data["merged"]:
|
||||||
# Since the upstreamable changes have now been merged locally, merge the
|
# Since the upstreamable changes have now been merged locally, merge the
|
||||||
# corresponding upstream PR.
|
# corresponding upstream PR.
|
||||||
run.add_step(MergePRStep(
|
run.add_step(MergePRStep(run.upstream_pr.value(), ["do not merge yet"]))
|
||||||
run.upstream_pr.value(), ["do not merge yet"]))
|
|
||||||
else:
|
else:
|
||||||
# If a PR with upstreamable changes is closed without being merged, we
|
# If a PR with upstreamable changes is closed without being merged, we
|
||||||
# don't want to merge the changes upstream either.
|
# don't want to merge the changes upstream either.
|
||||||
|
|
|
@ -12,17 +12,11 @@
|
||||||
UPSTREAMABLE_PATH = "tests/wpt/tests/"
|
UPSTREAMABLE_PATH = "tests/wpt/tests/"
|
||||||
NO_SYNC_SIGNAL = "[no-wpt-sync]"
|
NO_SYNC_SIGNAL = "[no-wpt-sync]"
|
||||||
|
|
||||||
OPENED_NEW_UPSTREAM_PR = (
|
OPENED_NEW_UPSTREAM_PR = "🤖 Opened new upstream WPT pull request ({upstream_pr}) with upstreamable changes."
|
||||||
"🤖 Opened new upstream WPT pull request ({upstream_pr}) "
|
|
||||||
"with upstreamable changes."
|
|
||||||
)
|
|
||||||
UPDATED_EXISTING_UPSTREAM_PR = (
|
UPDATED_EXISTING_UPSTREAM_PR = (
|
||||||
"📝 Transplanted new upstreamable changes to existing "
|
"📝 Transplanted new upstreamable changes to existing upstream WPT pull request ({upstream_pr})."
|
||||||
"upstream WPT pull request ({upstream_pr})."
|
|
||||||
)
|
|
||||||
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR = (
|
|
||||||
"✍ Updated existing upstream WPT pull request ({upstream_pr}) title and body."
|
|
||||||
)
|
)
|
||||||
|
UPDATED_TITLE_IN_EXISTING_UPSTREAM_PR = "✍ Updated existing upstream WPT pull request ({upstream_pr}) title and body."
|
||||||
CLOSING_EXISTING_UPSTREAM_PR = (
|
CLOSING_EXISTING_UPSTREAM_PR = (
|
||||||
"🤖 This change no longer contains upstreamable changes to WPT; closed existing "
|
"🤖 This change no longer contains upstreamable changes to WPT; closed existing "
|
||||||
"upstream pull request ({upstream_pr})."
|
"upstream pull request ({upstream_pr})."
|
||||||
|
|
|
@ -40,13 +40,9 @@ def authenticated(sync: WPTSync, method, url, json=None) -> requests.Response:
|
||||||
}
|
}
|
||||||
|
|
||||||
url = urllib.parse.urljoin(sync.github_api_url, url)
|
url = urllib.parse.urljoin(sync.github_api_url, url)
|
||||||
response = requests.request(
|
response = requests.request(method, url, headers=headers, json=json, timeout=TIMEOUT)
|
||||||
method, url, headers=headers, json=json, timeout=TIMEOUT
|
|
||||||
)
|
|
||||||
if int(response.status_code / 100) != 2:
|
if int(response.status_code / 100) != 2:
|
||||||
raise ValueError(
|
raise ValueError(f"Got unexpected {response.status_code} response: {response.text}")
|
||||||
f"Got unexpected {response.status_code} response: {response.text}"
|
|
||||||
)
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
@ -71,33 +67,27 @@ class GithubRepository:
|
||||||
def get_branch(self, name: str) -> GithubBranch:
|
def get_branch(self, name: str) -> GithubBranch:
|
||||||
return GithubBranch(self, name)
|
return GithubBranch(self, name)
|
||||||
|
|
||||||
def get_open_pull_request_for_branch(
|
def get_open_pull_request_for_branch(self, github_username: str, branch: GithubBranch) -> Optional[PullRequest]:
|
||||||
self,
|
|
||||||
github_username: str,
|
|
||||||
branch: GithubBranch
|
|
||||||
) -> Optional[PullRequest]:
|
|
||||||
"""If this repository has an open pull request with the
|
"""If this repository has an open pull request with the
|
||||||
given source head reference targeting the main branch,
|
given source head reference targeting the main branch,
|
||||||
return the first matching pull request, otherwise return None."""
|
return the first matching pull request, otherwise return None."""
|
||||||
|
|
||||||
params = "+".join([
|
params = "+".join(
|
||||||
|
[
|
||||||
"is:pr",
|
"is:pr",
|
||||||
"state:open",
|
"state:open",
|
||||||
f"repo:{self.repo}",
|
f"repo:{self.repo}",
|
||||||
f"author:{github_username}",
|
f"author:{github_username}",
|
||||||
f"head:{branch.name}",
|
f"head:{branch.name}",
|
||||||
])
|
]
|
||||||
|
)
|
||||||
response = authenticated(self.sync, "GET", f"search/issues?q={params}")
|
response = authenticated(self.sync, "GET", f"search/issues?q={params}")
|
||||||
if int(response.status_code / 100) != 2:
|
if int(response.status_code / 100) != 2:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
json = response.json()
|
json = response.json()
|
||||||
if not isinstance(json, dict) or \
|
if not isinstance(json, dict) or "total_count" not in json or "items" not in json:
|
||||||
"total_count" not in json or \
|
raise ValueError(f"Got unexpected response from GitHub search: {response.text}")
|
||||||
"items" not in json:
|
|
||||||
raise ValueError(
|
|
||||||
f"Got unexpected response from GitHub search: {response.text}"
|
|
||||||
)
|
|
||||||
|
|
||||||
if json["total_count"] < 1:
|
if json["total_count"] < 1:
|
||||||
return None
|
return None
|
||||||
|
@ -152,9 +142,7 @@ class PullRequest:
|
||||||
return authenticated(self.context, *args, **kwargs)
|
return authenticated(self.context, *args, **kwargs)
|
||||||
|
|
||||||
def leave_comment(self, comment: str):
|
def leave_comment(self, comment: str):
|
||||||
return self.api(
|
return self.api("POST", f"{self.base_issues_url}/comments", json={"body": comment})
|
||||||
"POST", f"{self.base_issues_url}/comments", json={"body": comment}
|
|
||||||
)
|
|
||||||
|
|
||||||
def change(
|
def change(
|
||||||
self,
|
self,
|
||||||
|
|
|
@ -46,7 +46,7 @@ class Step:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
T = TypeVar('T')
|
T = TypeVar("T")
|
||||||
|
|
||||||
|
|
||||||
class AsyncValue(Generic[T]):
|
class AsyncValue(Generic[T]):
|
||||||
|
@ -76,8 +76,7 @@ class CreateOrUpdateBranchForPRStep(Step):
|
||||||
|
|
||||||
def run(self, run: SyncRun):
|
def run(self, run: SyncRun):
|
||||||
try:
|
try:
|
||||||
commits = self._get_upstreamable_commits_from_local_servo_repo(
|
commits = self._get_upstreamable_commits_from_local_servo_repo(run.sync)
|
||||||
run.sync)
|
|
||||||
branch_name = self._create_or_update_branch_for_pr(run, commits)
|
branch_name = self._create_or_update_branch_for_pr(run, commits)
|
||||||
branch = run.sync.downstream_wpt.get_branch(branch_name)
|
branch = run.sync.downstream_wpt.get_branch(branch_name)
|
||||||
|
|
||||||
|
@ -88,21 +87,15 @@ class CreateOrUpdateBranchForPRStep(Step):
|
||||||
logging.info(exception, exc_info=True)
|
logging.info(exception, exc_info=True)
|
||||||
|
|
||||||
run.steps = []
|
run.steps = []
|
||||||
run.add_step(CommentStep(
|
run.add_step(CommentStep(self.pull_request, COULD_NOT_APPLY_CHANGES_DOWNSTREAM_COMMENT))
|
||||||
self.pull_request, COULD_NOT_APPLY_CHANGES_DOWNSTREAM_COMMENT
|
|
||||||
))
|
|
||||||
if run.upstream_pr.has_value():
|
if run.upstream_pr.has_value():
|
||||||
run.add_step(CommentStep(
|
run.add_step(CommentStep(run.upstream_pr.value(), COULD_NOT_APPLY_CHANGES_UPSTREAM_COMMENT))
|
||||||
run.upstream_pr.value(), COULD_NOT_APPLY_CHANGES_UPSTREAM_COMMENT
|
|
||||||
))
|
|
||||||
|
|
||||||
def _get_upstreamable_commits_from_local_servo_repo(self, sync: WPTSync):
|
def _get_upstreamable_commits_from_local_servo_repo(self, sync: WPTSync):
|
||||||
local_servo_repo = sync.local_servo_repo
|
local_servo_repo = sync.local_servo_repo
|
||||||
number_of_commits = self.pull_data["commits"]
|
number_of_commits = self.pull_data["commits"]
|
||||||
pr_head = self.pull_data["head"]["sha"]
|
pr_head = self.pull_data["head"]["sha"]
|
||||||
commit_shas = local_servo_repo.run(
|
commit_shas = local_servo_repo.run("log", "--pretty=%H", pr_head, f"-{number_of_commits}").splitlines()
|
||||||
"log", "--pretty=%H", pr_head, f"-{number_of_commits}"
|
|
||||||
).splitlines()
|
|
||||||
|
|
||||||
filtered_commits = []
|
filtered_commits = []
|
||||||
# We must iterate the commits in reverse to ensure we apply older changes first,
|
# We must iterate the commits in reverse to ensure we apply older changes first,
|
||||||
|
@ -128,12 +121,8 @@ class CreateOrUpdateBranchForPRStep(Step):
|
||||||
# commit to another repository.
|
# commit to another repository.
|
||||||
filtered_commits += [
|
filtered_commits += [
|
||||||
{
|
{
|
||||||
"author": local_servo_repo.run(
|
"author": local_servo_repo.run("show", "-s", "--pretty=%an <%ae>", sha),
|
||||||
"show", "-s", "--pretty=%an <%ae>", sha
|
"message": local_servo_repo.run("show", "-s", "--pretty=%B", sha),
|
||||||
),
|
|
||||||
"message": local_servo_repo.run(
|
|
||||||
"show", "-s", "--pretty=%B", sha
|
|
||||||
),
|
|
||||||
"diff": diff,
|
"diff": diff,
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
@ -146,23 +135,16 @@ class CreateOrUpdateBranchForPRStep(Step):
|
||||||
try:
|
try:
|
||||||
with open(patch_path, "wb") as file:
|
with open(patch_path, "wb") as file:
|
||||||
file.write(commit["diff"])
|
file.write(commit["diff"])
|
||||||
run.sync.local_wpt_repo.run(
|
run.sync.local_wpt_repo.run("apply", PATCH_FILE_NAME, "-p", str(strip_count))
|
||||||
"apply", PATCH_FILE_NAME, "-p", str(strip_count)
|
|
||||||
)
|
|
||||||
finally:
|
finally:
|
||||||
# Ensure the patch file is not added with the other changes.
|
# Ensure the patch file is not added with the other changes.
|
||||||
os.remove(patch_path)
|
os.remove(patch_path)
|
||||||
|
|
||||||
run.sync.local_wpt_repo.run("add", "--all")
|
run.sync.local_wpt_repo.run("add", "--all")
|
||||||
run.sync.local_wpt_repo.run(
|
run.sync.local_wpt_repo.run("commit", "--message", commit["message"], "--author", commit["author"])
|
||||||
"commit", "--message", commit["message"], "--author", commit["author"]
|
|
||||||
)
|
|
||||||
|
|
||||||
def _create_or_update_branch_for_pr(
|
def _create_or_update_branch_for_pr(self, run: SyncRun, commits: list[dict], pre_commit_callback=None):
|
||||||
self, run: SyncRun, commits: list[dict], pre_commit_callback=None
|
branch_name = wpt_branch_name_from_servo_pr_number(self.pull_data["number"])
|
||||||
):
|
|
||||||
branch_name = wpt_branch_name_from_servo_pr_number(
|
|
||||||
self.pull_data["number"])
|
|
||||||
try:
|
try:
|
||||||
# Create a new branch with a unique name that is consistent between
|
# Create a new branch with a unique name that is consistent between
|
||||||
# updates of the same PR.
|
# updates of the same PR.
|
||||||
|
@ -176,7 +158,6 @@ class CreateOrUpdateBranchForPRStep(Step):
|
||||||
|
|
||||||
# Push the branch upstream (forcing to overwrite any existing changes).
|
# Push the branch upstream (forcing to overwrite any existing changes).
|
||||||
if not run.sync.suppress_force_push:
|
if not run.sync.suppress_force_push:
|
||||||
|
|
||||||
# In order to push to our downstream branch we need to ensure that
|
# In order to push to our downstream branch we need to ensure that
|
||||||
# the local repository isn't a shallow clone. Shallow clones are
|
# the local repository isn't a shallow clone. Shallow clones are
|
||||||
# commonly created by GitHub actions.
|
# commonly created by GitHub actions.
|
||||||
|
@ -186,8 +167,7 @@ class CreateOrUpdateBranchForPRStep(Step):
|
||||||
token = run.sync.github_api_token
|
token = run.sync.github_api_token
|
||||||
repo = run.sync.downstream_wpt_repo
|
repo = run.sync.downstream_wpt_repo
|
||||||
remote_url = f"https://{user}:{token}@github.com/{repo}.git"
|
remote_url = f"https://{user}:{token}@github.com/{repo}.git"
|
||||||
run.sync.local_wpt_repo.run(
|
run.sync.local_wpt_repo.run("push", "-f", remote_url, branch_name)
|
||||||
"push", "-f", remote_url, branch_name)
|
|
||||||
|
|
||||||
return branch_name
|
return branch_name
|
||||||
finally:
|
finally:
|
||||||
|
@ -201,8 +181,7 @@ class CreateOrUpdateBranchForPRStep(Step):
|
||||||
class RemoveBranchForPRStep(Step):
|
class RemoveBranchForPRStep(Step):
|
||||||
def __init__(self, pull_request):
|
def __init__(self, pull_request):
|
||||||
Step.__init__(self, "RemoveBranchForPRStep")
|
Step.__init__(self, "RemoveBranchForPRStep")
|
||||||
self.branch_name = wpt_branch_name_from_servo_pr_number(
|
self.branch_name = wpt_branch_name_from_servo_pr_number(pull_request["number"])
|
||||||
pull_request["number"])
|
|
||||||
|
|
||||||
def run(self, run: SyncRun):
|
def run(self, run: SyncRun):
|
||||||
self.name += f":{run.sync.downstream_wpt.get_branch(self.branch_name)}"
|
self.name += f":{run.sync.downstream_wpt.get_branch(self.branch_name)}"
|
||||||
|
@ -212,8 +191,7 @@ class RemoveBranchForPRStep(Step):
|
||||||
token = run.sync.github_api_token
|
token = run.sync.github_api_token
|
||||||
repo = run.sync.downstream_wpt_repo
|
repo = run.sync.downstream_wpt_repo
|
||||||
remote_url = f"https://{user}:{token}@github.com/{repo}.git"
|
remote_url = f"https://{user}:{token}@github.com/{repo}.git"
|
||||||
run.sync.local_wpt_repo.run("push", remote_url, "--delete",
|
run.sync.local_wpt_repo.run("push", remote_url, "--delete", self.branch_name)
|
||||||
self.branch_name)
|
|
||||||
|
|
||||||
|
|
||||||
class ChangePRStep(Step):
|
class ChangePRStep(Step):
|
||||||
|
@ -238,9 +216,7 @@ class ChangePRStep(Step):
|
||||||
body = self.body
|
body = self.body
|
||||||
if body:
|
if body:
|
||||||
body = run.prepare_body_text(body)
|
body = run.prepare_body_text(body)
|
||||||
self.name += (
|
self.name += f":{textwrap.shorten(body, width=20, placeholder='...')}[{len(body)}]"
|
||||||
f':{textwrap.shorten(body, width=20, placeholder="...")}[{len(body)}]'
|
|
||||||
)
|
|
||||||
|
|
||||||
self.pull_request.change(state=self.state, title=self.title, body=body)
|
self.pull_request.change(state=self.state, title=self.title, body=body)
|
||||||
|
|
||||||
|
@ -261,12 +237,8 @@ class MergePRStep(Step):
|
||||||
logging.warning(exception, exc_info=True)
|
logging.warning(exception, exc_info=True)
|
||||||
|
|
||||||
run.steps = []
|
run.steps = []
|
||||||
run.add_step(CommentStep(
|
run.add_step(CommentStep(self.pull_request, COULD_NOT_MERGE_CHANGES_UPSTREAM_COMMENT))
|
||||||
self.pull_request, COULD_NOT_MERGE_CHANGES_UPSTREAM_COMMENT
|
run.add_step(CommentStep(run.servo_pr, COULD_NOT_MERGE_CHANGES_DOWNSTREAM_COMMENT))
|
||||||
))
|
|
||||||
run.add_step(CommentStep(
|
|
||||||
run.servo_pr, COULD_NOT_MERGE_CHANGES_DOWNSTREAM_COMMENT
|
|
||||||
))
|
|
||||||
self.pull_request.add_labels(["stale-servo-export"])
|
self.pull_request.add_labels(["stale-servo-export"])
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -16,12 +16,12 @@ from dataclasses import dataclass, field
|
||||||
from typing import Dict, List, Optional, Any
|
from typing import Dict, List, Optional, Any
|
||||||
from six import itervalues
|
from six import itervalues
|
||||||
|
|
||||||
DEFAULT_MOVE_UP_CODE = u"\x1b[A"
|
DEFAULT_MOVE_UP_CODE = "\x1b[A"
|
||||||
DEFAULT_CLEAR_EOL_CODE = u"\x1b[K"
|
DEFAULT_CLEAR_EOL_CODE = "\x1b[K"
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class UnexpectedSubtestResult():
|
class UnexpectedSubtestResult:
|
||||||
path: str
|
path: str
|
||||||
subtest: str
|
subtest: str
|
||||||
actual: str
|
actual: str
|
||||||
|
@ -32,15 +32,14 @@ class UnexpectedSubtestResult():
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class UnexpectedResult():
|
class UnexpectedResult:
|
||||||
path: str
|
path: str
|
||||||
actual: str
|
actual: str
|
||||||
expected: str
|
expected: str
|
||||||
message: str
|
message: str
|
||||||
time: int
|
time: int
|
||||||
stack: Optional[str]
|
stack: Optional[str]
|
||||||
unexpected_subtest_results: list[UnexpectedSubtestResult] = field(
|
unexpected_subtest_results: list[UnexpectedSubtestResult] = field(default_factory=list)
|
||||||
default_factory=list)
|
|
||||||
issues: list[str] = field(default_factory=list)
|
issues: list[str] = field(default_factory=list)
|
||||||
flaky: bool = False
|
flaky: bool = False
|
||||||
|
|
||||||
|
@ -48,13 +47,13 @@ class UnexpectedResult():
|
||||||
output = UnexpectedResult.to_lines(self)
|
output = UnexpectedResult.to_lines(self)
|
||||||
|
|
||||||
if self.unexpected_subtest_results:
|
if self.unexpected_subtest_results:
|
||||||
|
|
||||||
def make_subtests_failure(subtest_results):
|
def make_subtests_failure(subtest_results):
|
||||||
# Test names sometimes contain control characters, which we want
|
# Test names sometimes contain control characters, which we want
|
||||||
# to be printed in their raw form, and not their interpreted form.
|
# to be printed in their raw form, and not their interpreted form.
|
||||||
lines = []
|
lines = []
|
||||||
for subtest in subtest_results[:-1]:
|
for subtest in subtest_results[:-1]:
|
||||||
lines += UnexpectedResult.to_lines(
|
lines += UnexpectedResult.to_lines(subtest, print_stack=False)
|
||||||
subtest, print_stack=False)
|
|
||||||
lines += UnexpectedResult.to_lines(subtest_results[-1])
|
lines += UnexpectedResult.to_lines(subtest_results[-1])
|
||||||
return self.wrap_and_indent_lines(lines, " ").splitlines()
|
return self.wrap_and_indent_lines(lines, " ").splitlines()
|
||||||
|
|
||||||
|
@ -78,11 +77,11 @@ class UnexpectedResult():
|
||||||
if not lines:
|
if not lines:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
output = indent + u"\u25B6 %s\n" % lines[0]
|
output = indent + "\u25b6 %s\n" % lines[0]
|
||||||
for line in lines[1:-1]:
|
for line in lines[1:-1]:
|
||||||
output += indent + u"\u2502 %s\n" % line
|
output += indent + "\u2502 %s\n" % line
|
||||||
if len(lines) > 1:
|
if len(lines) > 1:
|
||||||
output += indent + u"\u2514 %s\n" % lines[-1]
|
output += indent + "\u2514 %s\n" % lines[-1]
|
||||||
return output
|
return output
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -112,6 +111,7 @@ class UnexpectedResult():
|
||||||
class ServoHandler(mozlog.reader.LogHandler):
|
class ServoHandler(mozlog.reader.LogHandler):
|
||||||
"""LogHandler designed to collect unexpected results for use by
|
"""LogHandler designed to collect unexpected results for use by
|
||||||
script or by the ServoFormatter output formatter."""
|
script or by the ServoFormatter output formatter."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.reset_state()
|
self.reset_state()
|
||||||
|
|
||||||
|
@ -126,24 +126,24 @@ class ServoHandler(mozlog.reader.LogHandler):
|
||||||
self.unexpected_results: List[UnexpectedResult] = []
|
self.unexpected_results: List[UnexpectedResult] = []
|
||||||
|
|
||||||
self.expected = {
|
self.expected = {
|
||||||
'OK': 0,
|
"OK": 0,
|
||||||
'PASS': 0,
|
"PASS": 0,
|
||||||
'FAIL': 0,
|
"FAIL": 0,
|
||||||
'ERROR': 0,
|
"ERROR": 0,
|
||||||
'TIMEOUT': 0,
|
"TIMEOUT": 0,
|
||||||
'SKIP': 0,
|
"SKIP": 0,
|
||||||
'CRASH': 0,
|
"CRASH": 0,
|
||||||
'PRECONDITION_FAILED': 0,
|
"PRECONDITION_FAILED": 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
self.unexpected_tests = {
|
self.unexpected_tests = {
|
||||||
'OK': [],
|
"OK": [],
|
||||||
'PASS': [],
|
"PASS": [],
|
||||||
'FAIL': [],
|
"FAIL": [],
|
||||||
'ERROR': [],
|
"ERROR": [],
|
||||||
'TIMEOUT': [],
|
"TIMEOUT": [],
|
||||||
'CRASH': [],
|
"CRASH": [],
|
||||||
'PRECONDITION_FAILED': [],
|
"PRECONDITION_FAILED": [],
|
||||||
}
|
}
|
||||||
|
|
||||||
def suite_start(self, data):
|
def suite_start(self, data):
|
||||||
|
@ -155,20 +155,19 @@ class ServoHandler(mozlog.reader.LogHandler):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def test_start(self, data):
|
def test_start(self, data):
|
||||||
self.running_tests[data['thread']] = data['test']
|
self.running_tests[data["thread"]] = data["test"]
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def data_was_for_expected_result(data):
|
def data_was_for_expected_result(data):
|
||||||
if "expected" not in data:
|
if "expected" not in data:
|
||||||
return True
|
return True
|
||||||
return "known_intermittent" in data \
|
return "known_intermittent" in data and data["status"] in data["known_intermittent"]
|
||||||
and data["status"] in data["known_intermittent"]
|
|
||||||
|
|
||||||
def test_end(self, data: dict) -> Optional[UnexpectedResult]:
|
def test_end(self, data: dict) -> Optional[UnexpectedResult]:
|
||||||
self.completed_tests += 1
|
self.completed_tests += 1
|
||||||
test_status = data["status"]
|
test_status = data["status"]
|
||||||
test_path = data["test"]
|
test_path = data["test"]
|
||||||
del self.running_tests[data['thread']]
|
del self.running_tests[data["thread"]]
|
||||||
|
|
||||||
had_expected_test_result = self.data_was_for_expected_result(data)
|
had_expected_test_result = self.data_was_for_expected_result(data)
|
||||||
subtest_failures = self.subtest_failures.pop(test_path, [])
|
subtest_failures = self.subtest_failures.pop(test_path, [])
|
||||||
|
@ -191,7 +190,7 @@ class ServoHandler(mozlog.reader.LogHandler):
|
||||||
data.get("message", ""),
|
data.get("message", ""),
|
||||||
data["time"],
|
data["time"],
|
||||||
stack,
|
stack,
|
||||||
subtest_failures
|
subtest_failures,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not had_expected_test_result:
|
if not had_expected_test_result:
|
||||||
|
@ -205,19 +204,21 @@ class ServoHandler(mozlog.reader.LogHandler):
|
||||||
def test_status(self, data: dict):
|
def test_status(self, data: dict):
|
||||||
if self.data_was_for_expected_result(data):
|
if self.data_was_for_expected_result(data):
|
||||||
return
|
return
|
||||||
self.subtest_failures[data["test"]].append(UnexpectedSubtestResult(
|
self.subtest_failures[data["test"]].append(
|
||||||
|
UnexpectedSubtestResult(
|
||||||
data["test"],
|
data["test"],
|
||||||
data["subtest"],
|
data["subtest"],
|
||||||
data["status"],
|
data["status"],
|
||||||
data["expected"],
|
data["expected"],
|
||||||
data.get("message", ""),
|
data.get("message", ""),
|
||||||
data["time"],
|
data["time"],
|
||||||
data.get('stack', None),
|
data.get("stack", None),
|
||||||
))
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def process_output(self, data):
|
def process_output(self, data):
|
||||||
if 'test' in data:
|
if "test" in data:
|
||||||
self.test_output[data['test']] += data['data'] + "\n"
|
self.test_output[data["test"]] += data["data"] + "\n"
|
||||||
|
|
||||||
def log(self, _):
|
def log(self, _):
|
||||||
pass
|
pass
|
||||||
|
@ -226,6 +227,7 @@ class ServoHandler(mozlog.reader.LogHandler):
|
||||||
class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
|
class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
|
||||||
"""Formatter designed to produce unexpected test results grouped
|
"""Formatter designed to produce unexpected test results grouped
|
||||||
together in a readable format."""
|
together in a readable format."""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
ServoHandler.__init__(self)
|
ServoHandler.__init__(self)
|
||||||
self.current_display = ""
|
self.current_display = ""
|
||||||
|
@ -239,18 +241,17 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import blessed
|
import blessed
|
||||||
|
|
||||||
self.terminal = blessed.Terminal()
|
self.terminal = blessed.Terminal()
|
||||||
self.move_up = self.terminal.move_up
|
self.move_up = self.terminal.move_up
|
||||||
self.clear_eol = self.terminal.clear_eol
|
self.clear_eol = self.terminal.clear_eol
|
||||||
except Exception as exception:
|
except Exception as exception:
|
||||||
sys.stderr.write("GroupingFormatter: Could not get terminal "
|
sys.stderr.write("GroupingFormatter: Could not get terminal control characters: %s\n" % exception)
|
||||||
"control characters: %s\n" % exception)
|
|
||||||
|
|
||||||
def text_to_erase_display(self):
|
def text_to_erase_display(self):
|
||||||
if not self.interactive or not self.current_display:
|
if not self.interactive or not self.current_display:
|
||||||
return ""
|
return ""
|
||||||
return ((self.move_up + self.clear_eol)
|
return (self.move_up + self.clear_eol) * self.current_display.count("\n")
|
||||||
* self.current_display.count('\n'))
|
|
||||||
|
|
||||||
def generate_output(self, text=None, new_display=None):
|
def generate_output(self, text=None, new_display=None):
|
||||||
if not self.interactive:
|
if not self.interactive:
|
||||||
|
@ -278,17 +279,16 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
|
||||||
max_width = self.line_width - len(new_display)
|
max_width = self.line_width - len(new_display)
|
||||||
else:
|
else:
|
||||||
max_width = sys.maxsize
|
max_width = sys.maxsize
|
||||||
return new_display + ("\n%s" % indent).join(
|
return new_display + ("\n%s" % indent).join(val[:max_width] for val in self.running_tests.values()) + "\n"
|
||||||
val[:max_width] for val in self.running_tests.values()) + "\n"
|
|
||||||
else:
|
else:
|
||||||
return new_display + "No tests running.\n"
|
return new_display + "No tests running.\n"
|
||||||
|
|
||||||
def suite_start(self, data):
|
def suite_start(self, data):
|
||||||
ServoHandler.suite_start(self, data)
|
ServoHandler.suite_start(self, data)
|
||||||
if self.number_of_tests == 0:
|
if self.number_of_tests == 0:
|
||||||
return "Running tests in %s\n\n" % data[u'source']
|
return "Running tests in %s\n\n" % data["source"]
|
||||||
else:
|
else:
|
||||||
return "Running %i tests in %s\n\n" % (self.number_of_tests, data[u'source'])
|
return "Running %i tests in %s\n\n" % (self.number_of_tests, data["source"])
|
||||||
|
|
||||||
def test_start(self, data):
|
def test_start(self, data):
|
||||||
ServoHandler.test_start(self, data)
|
ServoHandler.test_start(self, data)
|
||||||
|
@ -300,8 +300,7 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
|
||||||
if unexpected_result:
|
if unexpected_result:
|
||||||
# Surround test output by newlines so that it is easier to read.
|
# Surround test output by newlines so that it is easier to read.
|
||||||
output_for_unexpected_test = f"{unexpected_result}\n"
|
output_for_unexpected_test = f"{unexpected_result}\n"
|
||||||
return self.generate_output(text=output_for_unexpected_test,
|
return self.generate_output(text=output_for_unexpected_test, new_display=self.build_status_line())
|
||||||
new_display=self.build_status_line())
|
|
||||||
|
|
||||||
# Print reason that tests are skipped.
|
# Print reason that tests are skipped.
|
||||||
if data["status"] == "SKIP":
|
if data["status"] == "SKIP":
|
||||||
|
@ -321,12 +320,14 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
|
||||||
def suite_end(self, data):
|
def suite_end(self, data):
|
||||||
ServoHandler.suite_end(self, data)
|
ServoHandler.suite_end(self, data)
|
||||||
if not self.interactive:
|
if not self.interactive:
|
||||||
output = u"\n"
|
output = "\n"
|
||||||
else:
|
else:
|
||||||
output = ""
|
output = ""
|
||||||
|
|
||||||
output += u"Ran %i tests finished in %.1f seconds.\n" % (
|
output += "Ran %i tests finished in %.1f seconds.\n" % (
|
||||||
self.completed_tests, (data["time"] - self.suite_start_time) / 1000)
|
self.completed_tests,
|
||||||
|
(data["time"] - self.suite_start_time) / 1000,
|
||||||
|
)
|
||||||
|
|
||||||
# Sum the number of expected test results from each category
|
# Sum the number of expected test results from each category
|
||||||
expected_test_results = sum(self.expected.values())
|
expected_test_results = sum(self.expected.values())
|
||||||
|
@ -337,29 +338,27 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
|
||||||
def text_for_unexpected_list(text, section):
|
def text_for_unexpected_list(text, section):
|
||||||
tests = self.unexpected_tests[section]
|
tests = self.unexpected_tests[section]
|
||||||
if not tests:
|
if not tests:
|
||||||
return u""
|
return ""
|
||||||
return u" \u2022 %i tests %s\n" % (len(tests), text)
|
return " \u2022 %i tests %s\n" % (len(tests), text)
|
||||||
|
|
||||||
output += text_for_unexpected_list(u"crashed unexpectedly", 'CRASH')
|
output += text_for_unexpected_list("crashed unexpectedly", "CRASH")
|
||||||
output += text_for_unexpected_list(u"had errors unexpectedly", 'ERROR')
|
output += text_for_unexpected_list("had errors unexpectedly", "ERROR")
|
||||||
output += text_for_unexpected_list(u"failed unexpectedly", 'FAIL')
|
output += text_for_unexpected_list("failed unexpectedly", "FAIL")
|
||||||
output += text_for_unexpected_list(u"precondition failed unexpectedly", 'PRECONDITION_FAILED')
|
output += text_for_unexpected_list("precondition failed unexpectedly", "PRECONDITION_FAILED")
|
||||||
output += text_for_unexpected_list(u"timed out unexpectedly", 'TIMEOUT')
|
output += text_for_unexpected_list("timed out unexpectedly", "TIMEOUT")
|
||||||
output += text_for_unexpected_list(u"passed unexpectedly", 'PASS')
|
output += text_for_unexpected_list("passed unexpectedly", "PASS")
|
||||||
output += text_for_unexpected_list(u"unexpectedly okay", 'OK')
|
output += text_for_unexpected_list("unexpectedly okay", "OK")
|
||||||
|
|
||||||
num_with_failing_subtests = len(self.tests_with_failing_subtests)
|
num_with_failing_subtests = len(self.tests_with_failing_subtests)
|
||||||
if num_with_failing_subtests:
|
if num_with_failing_subtests:
|
||||||
output += (u" \u2022 %i tests had unexpected subtest results\n"
|
output += " \u2022 %i tests had unexpected subtest results\n" % num_with_failing_subtests
|
||||||
% num_with_failing_subtests)
|
|
||||||
output += "\n"
|
output += "\n"
|
||||||
|
|
||||||
# Repeat failing test output, so that it is easier to find, since the
|
# Repeat failing test output, so that it is easier to find, since the
|
||||||
# non-interactive version prints all the test names.
|
# non-interactive version prints all the test names.
|
||||||
if not self.interactive and self.unexpected_results:
|
if not self.interactive and self.unexpected_results:
|
||||||
output += u"Tests with unexpected results:\n"
|
output += "Tests with unexpected results:\n"
|
||||||
output += "".join([str(result)
|
output += "".join([str(result) for result in self.unexpected_results])
|
||||||
for result in self.unexpected_results])
|
|
||||||
|
|
||||||
return self.generate_output(text=output, new_display="")
|
return self.generate_output(text=output, new_display="")
|
||||||
|
|
||||||
|
@ -371,8 +370,8 @@ class ServoFormatter(mozlog.formatters.base.BaseFormatter, ServoHandler):
|
||||||
|
|
||||||
# We are logging messages that begin with STDERR, because that is how exceptions
|
# We are logging messages that begin with STDERR, because that is how exceptions
|
||||||
# in this formatter are indicated.
|
# in this formatter are indicated.
|
||||||
if data['message'].startswith('STDERR'):
|
if data["message"].startswith("STDERR"):
|
||||||
return self.generate_output(text=data['message'] + "\n")
|
return self.generate_output(text=data["message"] + "\n")
|
||||||
|
|
||||||
if data['level'] in ('CRITICAL', 'ERROR'):
|
if data["level"] in ("CRITICAL", "ERROR"):
|
||||||
return self.generate_output(text=data['message'] + "\n")
|
return self.generate_output(text=data["message"] + "\n")
|
||||||
|
|
|
@ -22,10 +22,10 @@ from wptrunner import wptlogging
|
||||||
|
|
||||||
def create_parser():
|
def create_parser():
|
||||||
p = argparse.ArgumentParser()
|
p = argparse.ArgumentParser()
|
||||||
p.add_argument("--check-clean", action="store_true",
|
p.add_argument(
|
||||||
help="Check that updating the manifest doesn't lead to any changes")
|
"--check-clean", action="store_true", help="Check that updating the manifest doesn't lead to any changes"
|
||||||
p.add_argument("--rebuild", action="store_true",
|
)
|
||||||
help="Rebuild the manifest from scratch")
|
p.add_argument("--rebuild", action="store_true", help="Rebuild the manifest from scratch")
|
||||||
commandline.add_logging_group(p)
|
commandline.add_logging_group(p)
|
||||||
|
|
||||||
return p
|
return p
|
||||||
|
@ -34,11 +34,13 @@ def create_parser():
|
||||||
def update(check_clean=True, rebuild=False, logger=None, **kwargs):
|
def update(check_clean=True, rebuild=False, logger=None, **kwargs):
|
||||||
if not logger:
|
if not logger:
|
||||||
logger = wptlogging.setup(kwargs, {"mach": sys.stdout})
|
logger = wptlogging.setup(kwargs, {"mach": sys.stdout})
|
||||||
kwargs = {"config": os.path.join(WPT_PATH, "config.ini"),
|
kwargs = {
|
||||||
|
"config": os.path.join(WPT_PATH, "config.ini"),
|
||||||
"product": "servo",
|
"product": "servo",
|
||||||
"manifest_path": os.path.join(WPT_PATH, "meta"),
|
"manifest_path": os.path.join(WPT_PATH, "meta"),
|
||||||
"tests_root": None,
|
"tests_root": None,
|
||||||
"metadata_root": None}
|
"metadata_root": None,
|
||||||
|
}
|
||||||
|
|
||||||
set_from_config(kwargs)
|
set_from_config(kwargs)
|
||||||
config = kwargs["config"]
|
config = kwargs["config"]
|
||||||
|
@ -53,15 +55,15 @@ def update(check_clean=True, rebuild=False, logger=None, **kwargs):
|
||||||
def _update(logger, test_paths, rebuild):
|
def _update(logger, test_paths, rebuild):
|
||||||
for url_base, paths in iteritems(test_paths):
|
for url_base, paths in iteritems(test_paths):
|
||||||
manifest_path = os.path.join(paths.metadata_path, "MANIFEST.json")
|
manifest_path = os.path.join(paths.metadata_path, "MANIFEST.json")
|
||||||
cache_subdir = os.path.relpath(os.path.dirname(manifest_path),
|
cache_subdir = os.path.relpath(os.path.dirname(manifest_path), os.path.dirname(__file__))
|
||||||
os.path.dirname(__file__))
|
wptmanifest.manifest.load_and_update(
|
||||||
wptmanifest.manifest.load_and_update(paths.tests_path,
|
paths.tests_path,
|
||||||
manifest_path,
|
manifest_path,
|
||||||
url_base,
|
url_base,
|
||||||
working_copy=True,
|
working_copy=True,
|
||||||
rebuild=rebuild,
|
rebuild=rebuild,
|
||||||
cache_root=os.path.join(SERVO_ROOT, ".wpt",
|
cache_root=os.path.join(SERVO_ROOT, ".wpt", cache_subdir),
|
||||||
cache_subdir))
|
)
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
@ -72,26 +74,25 @@ def _check_clean(logger, test_paths):
|
||||||
tests_path = paths.tests_path
|
tests_path = paths.tests_path
|
||||||
manifest_path = os.path.join(paths.metadata_path, "MANIFEST.json")
|
manifest_path = os.path.join(paths.metadata_path, "MANIFEST.json")
|
||||||
|
|
||||||
old_manifest = wptmanifest.manifest.load_and_update(tests_path,
|
old_manifest = wptmanifest.manifest.load_and_update(
|
||||||
manifest_path,
|
tests_path, manifest_path, url_base, working_copy=False, update=False, write_manifest=False
|
||||||
url_base,
|
)
|
||||||
working_copy=False,
|
|
||||||
update=False,
|
|
||||||
write_manifest=False)
|
|
||||||
|
|
||||||
# Even if no cache is specified, one will be used automatically by the
|
# Even if no cache is specified, one will be used automatically by the
|
||||||
# VCS integration. Create a brand new cache every time to ensure that
|
# VCS integration. Create a brand new cache every time to ensure that
|
||||||
# the VCS integration always thinks that any file modifications in the
|
# the VCS integration always thinks that any file modifications in the
|
||||||
# working directory are new and interesting.
|
# working directory are new and interesting.
|
||||||
cache_root = tempfile.mkdtemp()
|
cache_root = tempfile.mkdtemp()
|
||||||
new_manifest = wptmanifest.manifest.load_and_update(tests_path,
|
new_manifest = wptmanifest.manifest.load_and_update(
|
||||||
|
tests_path,
|
||||||
manifest_path,
|
manifest_path,
|
||||||
url_base,
|
url_base,
|
||||||
working_copy=True,
|
working_copy=True,
|
||||||
update=True,
|
update=True,
|
||||||
cache_root=cache_root,
|
cache_root=cache_root,
|
||||||
write_manifest=False,
|
write_manifest=False,
|
||||||
allow_cached=False)
|
allow_cached=False,
|
||||||
|
)
|
||||||
|
|
||||||
manifests_by_path[manifest_path] = (old_manifest, new_manifest)
|
manifests_by_path[manifest_path] = (old_manifest, new_manifest)
|
||||||
|
|
||||||
|
@ -116,8 +117,7 @@ def diff_manifests(logger, manifest_path, old_manifest, new_manifest):
|
||||||
"""
|
"""
|
||||||
logger.info("Diffing old and new manifests %s" % manifest_path)
|
logger.info("Diffing old and new manifests %s" % manifest_path)
|
||||||
old_items, new_items = defaultdict(set), defaultdict(set)
|
old_items, new_items = defaultdict(set), defaultdict(set)
|
||||||
for manifest, items in [(old_manifest, old_items),
|
for manifest, items in [(old_manifest, old_items), (new_manifest, new_items)]:
|
||||||
(new_manifest, new_items)]:
|
|
||||||
for test_type, path, tests in manifest:
|
for test_type, path, tests in manifest:
|
||||||
for test in tests:
|
for test in tests:
|
||||||
test_id = [test.id]
|
test_id = [test.id]
|
||||||
|
@ -158,8 +158,8 @@ def diff_manifests(logger, manifest_path, old_manifest, new_manifest):
|
||||||
if clean:
|
if clean:
|
||||||
# Manifest currently has some list vs tuple inconsistencies that break
|
# Manifest currently has some list vs tuple inconsistencies that break
|
||||||
# a simple equality comparison.
|
# a simple equality comparison.
|
||||||
old_paths = old_manifest.to_json()['items']
|
old_paths = old_manifest.to_json()["items"]
|
||||||
new_paths = new_manifest.to_json()['items']
|
new_paths = new_manifest.to_json()["items"]
|
||||||
if old_paths != new_paths:
|
if old_paths != new_paths:
|
||||||
logger.warning("Manifest %s contains correct tests but file hashes changed." % manifest_path) # noqa
|
logger.warning("Manifest %s contains correct tests but file hashes changed." % manifest_path) # noqa
|
||||||
clean = False
|
clean = False
|
||||||
|
@ -168,8 +168,4 @@ def diff_manifests(logger, manifest_path, old_manifest, new_manifest):
|
||||||
|
|
||||||
|
|
||||||
def log_error(logger, manifest_path, msg):
|
def log_error(logger, manifest_path, msg):
|
||||||
logger.lint_error(path=manifest_path,
|
logger.lint_error(path=manifest_path, message=msg, lineno=0, source="", linter="wpt-manifest")
|
||||||
message=msg,
|
|
||||||
lineno=0,
|
|
||||||
source="",
|
|
||||||
linter="wpt-manifest")
|
|
||||||
|
|
|
@ -19,10 +19,7 @@ import mozlog
|
||||||
import mozlog.formatters
|
import mozlog.formatters
|
||||||
|
|
||||||
from . import SERVO_ROOT, WPT_PATH, WPT_TOOLS_PATH
|
from . import SERVO_ROOT, WPT_PATH, WPT_TOOLS_PATH
|
||||||
from .grouping_formatter import (
|
from .grouping_formatter import ServoFormatter, ServoHandler, UnexpectedResult, UnexpectedSubtestResult
|
||||||
ServoFormatter, ServoHandler,
|
|
||||||
UnexpectedResult, UnexpectedSubtestResult
|
|
||||||
)
|
|
||||||
from wptrunner import wptcommandline
|
from wptrunner import wptcommandline
|
||||||
from wptrunner import wptrunner
|
from wptrunner import wptrunner
|
||||||
|
|
||||||
|
@ -63,12 +60,8 @@ def run_tests(default_binary_path: str, **kwargs):
|
||||||
set_if_none(kwargs, "processes", multiprocessing.cpu_count())
|
set_if_none(kwargs, "processes", multiprocessing.cpu_count())
|
||||||
|
|
||||||
set_if_none(kwargs, "ca_cert_path", os.path.join(CERTS_PATH, "cacert.pem"))
|
set_if_none(kwargs, "ca_cert_path", os.path.join(CERTS_PATH, "cacert.pem"))
|
||||||
set_if_none(
|
set_if_none(kwargs, "host_key_path", os.path.join(CERTS_PATH, "web-platform.test.key"))
|
||||||
kwargs, "host_key_path", os.path.join(CERTS_PATH, "web-platform.test.key")
|
set_if_none(kwargs, "host_cert_path", os.path.join(CERTS_PATH, "web-platform.test.pem"))
|
||||||
)
|
|
||||||
set_if_none(
|
|
||||||
kwargs, "host_cert_path", os.path.join(CERTS_PATH, "web-platform.test.pem")
|
|
||||||
)
|
|
||||||
# Set `id_hash` as the default chunk, as this better distributes testing across different
|
# Set `id_hash` as the default chunk, as this better distributes testing across different
|
||||||
# chunks and leads to more consistent timing on GitHub Actions.
|
# chunks and leads to more consistent timing on GitHub Actions.
|
||||||
set_if_none(kwargs, "chunk_type", "id_hash")
|
set_if_none(kwargs, "chunk_type", "id_hash")
|
||||||
|
@ -139,8 +132,7 @@ def run_tests(default_binary_path: str, **kwargs):
|
||||||
handler.reset_state()
|
handler.reset_state()
|
||||||
|
|
||||||
print(80 * "=")
|
print(80 * "=")
|
||||||
print(f"Rerunning {len(unexpected_results)} tests "
|
print(f"Rerunning {len(unexpected_results)} tests with unexpected results to detect flaky tests.")
|
||||||
"with unexpected results to detect flaky tests.")
|
|
||||||
unexpected_results_tests = [result.path for result in unexpected_results]
|
unexpected_results_tests = [result.path for result in unexpected_results]
|
||||||
kwargs["test_list"] = unexpected_results_tests
|
kwargs["test_list"] = unexpected_results_tests
|
||||||
kwargs["include"] = unexpected_results_tests
|
kwargs["include"] = unexpected_results_tests
|
||||||
|
@ -158,8 +150,7 @@ def run_tests(default_binary_path: str, **kwargs):
|
||||||
for result in unexpected_results:
|
for result in unexpected_results:
|
||||||
result.flaky = result.path not in stable_tests
|
result.flaky = result.path not in stable_tests
|
||||||
|
|
||||||
all_filtered = filter_intermittents(unexpected_results,
|
all_filtered = filter_intermittents(unexpected_results, filter_intermittents_output)
|
||||||
filter_intermittents_output)
|
|
||||||
return_value = 0 if all_filtered else 1
|
return_value = 0 if all_filtered else 1
|
||||||
|
|
||||||
# Write the unexpected-only raw log if that was specified on the command-line.
|
# Write the unexpected-only raw log if that was specified on the command-line.
|
||||||
|
@ -168,9 +159,7 @@ def run_tests(default_binary_path: str, **kwargs):
|
||||||
print("'--log-raw-unexpected' not written without '--log-raw'.")
|
print("'--log-raw-unexpected' not written without '--log-raw'.")
|
||||||
else:
|
else:
|
||||||
write_unexpected_only_raw_log(
|
write_unexpected_only_raw_log(
|
||||||
handler.unexpected_results,
|
handler.unexpected_results, raw_log_outputs[0].name, unexpected_raw_log_output_file
|
||||||
raw_log_outputs[0].name,
|
|
||||||
unexpected_raw_log_output_file
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return return_value
|
return return_value
|
||||||
|
@ -182,12 +171,10 @@ class GithubContextInformation(NamedTuple):
|
||||||
branch_name: Optional[str]
|
branch_name: Optional[str]
|
||||||
|
|
||||||
|
|
||||||
class TrackerDashboardFilter():
|
class TrackerDashboardFilter:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
base_url = os.environ.get(TRACKER_API_ENV_VAR, TRACKER_API)
|
base_url = os.environ.get(TRACKER_API_ENV_VAR, TRACKER_API)
|
||||||
self.headers = {
|
self.headers = {"Content-Type": "application/json"}
|
||||||
"Content-Type": "application/json"
|
|
||||||
}
|
|
||||||
if TRACKER_DASHBOARD_SECRET_ENV_VAR in os.environ and os.environ[TRACKER_DASHBOARD_SECRET_ENV_VAR]:
|
if TRACKER_DASHBOARD_SECRET_ENV_VAR in os.environ and os.environ[TRACKER_DASHBOARD_SECRET_ENV_VAR]:
|
||||||
self.url = f"{base_url}/dashboard/attempts"
|
self.url = f"{base_url}/dashboard/attempts"
|
||||||
secret = os.environ[TRACKER_DASHBOARD_SECRET_ENV_VAR]
|
secret = os.environ[TRACKER_DASHBOARD_SECRET_ENV_VAR]
|
||||||
|
@ -201,10 +188,10 @@ class TrackerDashboardFilter():
|
||||||
if not github_context:
|
if not github_context:
|
||||||
return GithubContextInformation(None, None, None)
|
return GithubContextInformation(None, None, None)
|
||||||
|
|
||||||
repository = github_context['repository']
|
repository = github_context["repository"]
|
||||||
repo_url = f"https://github.com/{repository}"
|
repo_url = f"https://github.com/{repository}"
|
||||||
|
|
||||||
run_id = github_context['run_id']
|
run_id = github_context["run_id"]
|
||||||
build_url = f"{repo_url}/actions/runs/{run_id}"
|
build_url = f"{repo_url}/actions/runs/{run_id}"
|
||||||
|
|
||||||
commit_title = "<no title>"
|
commit_title = "<no title>"
|
||||||
|
@ -214,32 +201,27 @@ class TrackerDashboardFilter():
|
||||||
commit_title = github_context["event"]["head_commit"]["message"]
|
commit_title = github_context["event"]["head_commit"]["message"]
|
||||||
|
|
||||||
pr_url = None
|
pr_url = None
|
||||||
match = re.match(r"^Auto merge of #(\d+)", commit_title) or \
|
match = re.match(r"^Auto merge of #(\d+)", commit_title) or re.match(r"\(#(\d+)\)", commit_title)
|
||||||
re.match(r"\(#(\d+)\)", commit_title)
|
|
||||||
if match:
|
if match:
|
||||||
pr_url = f"{repo_url}/pull/{match.group(1)}" if match else None
|
pr_url = f"{repo_url}/pull/{match.group(1)}" if match else None
|
||||||
|
|
||||||
return GithubContextInformation(
|
return GithubContextInformation(build_url, pr_url, github_context["ref_name"])
|
||||||
build_url,
|
|
||||||
pr_url,
|
|
||||||
github_context["ref_name"]
|
|
||||||
)
|
|
||||||
|
|
||||||
def make_data_from_result(
|
def make_data_from_result(
|
||||||
self,
|
self,
|
||||||
result: Union[UnexpectedResult, UnexpectedSubtestResult],
|
result: Union[UnexpectedResult, UnexpectedSubtestResult],
|
||||||
) -> dict:
|
) -> dict:
|
||||||
data = {
|
data = {
|
||||||
'path': result.path,
|
"path": result.path,
|
||||||
'subtest': None,
|
"subtest": None,
|
||||||
'expected': result.expected,
|
"expected": result.expected,
|
||||||
'actual': result.actual,
|
"actual": result.actual,
|
||||||
'time': result.time // 1000,
|
"time": result.time // 1000,
|
||||||
# Truncate the message, to avoid issues with lots of output causing "HTTP
|
# Truncate the message, to avoid issues with lots of output causing "HTTP
|
||||||
# Error 413: Request Entity Too Large."
|
# Error 413: Request Entity Too Large."
|
||||||
# See https://github.com/servo/servo/issues/31845.
|
# See https://github.com/servo/servo/issues/31845.
|
||||||
'message': result.message[0:TRACKER_DASHBOARD_MAXIMUM_OUTPUT_LENGTH],
|
"message": result.message[0:TRACKER_DASHBOARD_MAXIMUM_OUTPUT_LENGTH],
|
||||||
'stack': result.stack,
|
"stack": result.stack,
|
||||||
}
|
}
|
||||||
if isinstance(result, UnexpectedSubtestResult):
|
if isinstance(result, UnexpectedSubtestResult):
|
||||||
data["subtest"] = result.subtest
|
data["subtest"] = result.subtest
|
||||||
|
@ -256,20 +238,22 @@ class TrackerDashboardFilter():
|
||||||
try:
|
try:
|
||||||
request = urllib.request.Request(
|
request = urllib.request.Request(
|
||||||
url=self.url,
|
url=self.url,
|
||||||
method='POST',
|
method="POST",
|
||||||
data=json.dumps({
|
data=json.dumps(
|
||||||
'branch': context.branch_name,
|
{
|
||||||
'build_url': context.build_url,
|
"branch": context.branch_name,
|
||||||
'pull_url': context.pull_url,
|
"build_url": context.build_url,
|
||||||
'attempts': attempts
|
"pull_url": context.pull_url,
|
||||||
}).encode('utf-8'),
|
"attempts": attempts,
|
||||||
headers=self.headers)
|
}
|
||||||
|
).encode("utf-8"),
|
||||||
|
headers=self.headers,
|
||||||
|
)
|
||||||
|
|
||||||
known_intermittents = dict()
|
known_intermittents = dict()
|
||||||
with urllib.request.urlopen(request) as response:
|
with urllib.request.urlopen(request) as response:
|
||||||
for test in json.load(response)["known"]:
|
for test in json.load(response)["known"]:
|
||||||
known_intermittents[test["path"]] = \
|
known_intermittents[test["path"]] = [issue["number"] for issue in test["issues"]]
|
||||||
[issue["number"] for issue in test["issues"]]
|
|
||||||
|
|
||||||
except urllib.error.HTTPError as e:
|
except urllib.error.HTTPError as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
@ -280,13 +264,9 @@ class TrackerDashboardFilter():
|
||||||
result.issues = known_intermittents.get(result.path, [])
|
result.issues = known_intermittents.get(result.path, [])
|
||||||
|
|
||||||
|
|
||||||
def filter_intermittents(
|
def filter_intermittents(unexpected_results: List[UnexpectedResult], output_path: str) -> bool:
|
||||||
unexpected_results: List[UnexpectedResult],
|
|
||||||
output_path: str
|
|
||||||
) -> bool:
|
|
||||||
dashboard = TrackerDashboardFilter()
|
dashboard = TrackerDashboardFilter()
|
||||||
print(f"Filtering {len(unexpected_results)} "
|
print(f"Filtering {len(unexpected_results)} unexpected results for known intermittents via <{dashboard.url}>")
|
||||||
f"unexpected results for known intermittents via <{dashboard.url}>")
|
|
||||||
dashboard.report_failures(unexpected_results)
|
dashboard.report_failures(unexpected_results)
|
||||||
|
|
||||||
def add_result(output, text, results: List[UnexpectedResult], filter_func) -> None:
|
def add_result(output, text, results: List[UnexpectedResult], filter_func) -> None:
|
||||||
|
@ -298,12 +278,14 @@ def filter_intermittents(
|
||||||
return not result.flaky and not result.issues
|
return not result.flaky and not result.issues
|
||||||
|
|
||||||
output: List[str] = []
|
output: List[str] = []
|
||||||
add_result(output, "Flaky unexpected results", unexpected_results,
|
add_result(output, "Flaky unexpected results", unexpected_results, lambda result: result.flaky)
|
||||||
lambda result: result.flaky)
|
add_result(
|
||||||
add_result(output, "Stable unexpected results that are known-intermittent",
|
output,
|
||||||
unexpected_results, lambda result: not result.flaky and result.issues)
|
"Stable unexpected results that are known-intermittent",
|
||||||
add_result(output, "Stable unexpected results",
|
unexpected_results,
|
||||||
unexpected_results, is_stable_and_unexpected)
|
lambda result: not result.flaky and result.issues,
|
||||||
|
)
|
||||||
|
add_result(output, "Stable unexpected results", unexpected_results, is_stable_and_unexpected)
|
||||||
print("\n".join(output))
|
print("\n".join(output))
|
||||||
|
|
||||||
with open(output_path, "w", encoding="utf-8") as file:
|
with open(output_path, "w", encoding="utf-8") as file:
|
||||||
|
@ -313,9 +295,7 @@ def filter_intermittents(
|
||||||
|
|
||||||
|
|
||||||
def write_unexpected_only_raw_log(
|
def write_unexpected_only_raw_log(
|
||||||
unexpected_results: List[UnexpectedResult],
|
unexpected_results: List[UnexpectedResult], raw_log_file: str, filtered_raw_log_file: str
|
||||||
raw_log_file: str,
|
|
||||||
filtered_raw_log_file: str
|
|
||||||
):
|
):
|
||||||
tests = [result.path for result in unexpected_results]
|
tests = [result.path for result in unexpected_results]
|
||||||
print(f"Writing unexpected-only raw log to {filtered_raw_log_file}")
|
print(f"Writing unexpected-only raw log to {filtered_raw_log_file}")
|
||||||
|
@ -324,6 +304,5 @@ def write_unexpected_only_raw_log(
|
||||||
with open(raw_log_file) as input:
|
with open(raw_log_file) as input:
|
||||||
for line in input.readlines():
|
for line in input.readlines():
|
||||||
data = json.loads(line)
|
data = json.loads(line)
|
||||||
if data["action"] in ["suite_start", "suite_end"] or \
|
if data["action"] in ["suite_start", "suite_end"] or ("test" in data and data["test"] in tests):
|
||||||
("test" in data and data["test"] in tests):
|
|
||||||
output.write(line)
|
output.write(line)
|
||||||
|
|
|
@ -49,13 +49,13 @@ PORT = 9000
|
||||||
|
|
||||||
|
|
||||||
@dataclasses.dataclass
|
@dataclasses.dataclass
|
||||||
class MockPullRequest():
|
class MockPullRequest:
|
||||||
head: str
|
head: str
|
||||||
number: int
|
number: int
|
||||||
state: str = "open"
|
state: str = "open"
|
||||||
|
|
||||||
|
|
||||||
class MockGitHubAPIServer():
|
class MockGitHubAPIServer:
|
||||||
def __init__(self, port: int):
|
def __init__(self, port: int):
|
||||||
self.port = port
|
self.port = port
|
||||||
self.disable_logging()
|
self.disable_logging()
|
||||||
|
@ -65,18 +65,19 @@ class MockGitHubAPIServer():
|
||||||
class NoLoggingHandler(WSGIRequestHandler):
|
class NoLoggingHandler(WSGIRequestHandler):
|
||||||
def log_message(self, *args):
|
def log_message(self, *args):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if logging.getLogger().level == logging.DEBUG:
|
if logging.getLogger().level == logging.DEBUG:
|
||||||
handler = WSGIRequestHandler
|
handler = WSGIRequestHandler
|
||||||
else:
|
else:
|
||||||
handler = NoLoggingHandler
|
handler = NoLoggingHandler
|
||||||
|
|
||||||
self.server = make_server('localhost', self.port, self.app, handler_class=handler)
|
self.server = make_server("localhost", self.port, self.app, handler_class=handler)
|
||||||
self.start_server_thread()
|
self.start_server_thread()
|
||||||
|
|
||||||
def disable_logging(self):
|
def disable_logging(self):
|
||||||
flask.cli.show_server_banner = lambda *args: None
|
flask.cli.show_server_banner = lambda *args: None
|
||||||
logging.getLogger("werkzeug").disabled = True
|
logging.getLogger("werkzeug").disabled = True
|
||||||
logging.getLogger('werkzeug').setLevel(logging.CRITICAL)
|
logging.getLogger("werkzeug").setLevel(logging.CRITICAL)
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.thread.start()
|
self.thread.start()
|
||||||
|
@ -84,21 +85,21 @@ class MockGitHubAPIServer():
|
||||||
# Wait for the server to be started.
|
# Wait for the server to be started.
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
response = requests.get(f'http://localhost:{self.port}/ping', timeout=1)
|
response = requests.get(f"http://localhost:{self.port}/ping", timeout=1)
|
||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
assert response.text == 'pong'
|
assert response.text == "pong"
|
||||||
break
|
break
|
||||||
except Exception:
|
except Exception:
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
|
|
||||||
def reset_server_state_with_pull_requests(self, pulls: list[MockPullRequest]):
|
def reset_server_state_with_pull_requests(self, pulls: list[MockPullRequest]):
|
||||||
response = requests.get(
|
response = requests.get(
|
||||||
f'http://localhost:{self.port}/reset-mock-github',
|
f"http://localhost:{self.port}/reset-mock-github",
|
||||||
json=[dataclasses.asdict(pull_request) for pull_request in pulls],
|
json=[dataclasses.asdict(pull_request) for pull_request in pulls],
|
||||||
timeout=1
|
timeout=1,
|
||||||
)
|
)
|
||||||
assert response.status_code == 200
|
assert response.status_code == 200
|
||||||
assert response.text == '👍'
|
assert response.text == "👍"
|
||||||
|
|
||||||
def shutdown(self):
|
def shutdown(self):
|
||||||
self.server.shutdown()
|
self.server.shutdown()
|
||||||
|
@ -111,26 +112,25 @@ class MockGitHubAPIServer():
|
||||||
|
|
||||||
@self.app.route("/ping")
|
@self.app.route("/ping")
|
||||||
def ping():
|
def ping():
|
||||||
return ('pong', 200)
|
return ("pong", 200)
|
||||||
|
|
||||||
@self.app.route("/reset-mock-github")
|
@self.app.route("/reset-mock-github")
|
||||||
def reset_server():
|
def reset_server():
|
||||||
self.pulls = [
|
self.pulls = [
|
||||||
MockPullRequest(pull_request['head'],
|
MockPullRequest(pull_request["head"], pull_request["number"], pull_request["state"])
|
||||||
pull_request['number'],
|
for pull_request in flask.request.json
|
||||||
pull_request['state'])
|
]
|
||||||
for pull_request in flask.request.json]
|
return ("👍", 200)
|
||||||
return ('👍', 200)
|
|
||||||
|
|
||||||
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>/merge", methods=['PUT'])
|
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>/merge", methods=["PUT"])
|
||||||
def merge_pull_request(org, repo, number):
|
def merge_pull_request(org, repo, number):
|
||||||
for pull_request in self.pulls:
|
for pull_request in self.pulls:
|
||||||
if pull_request.number == number:
|
if pull_request.number == number:
|
||||||
pull_request.state = 'closed'
|
pull_request.state = "closed"
|
||||||
return ('', 204)
|
return ("", 204)
|
||||||
return ('', 404)
|
return ("", 404)
|
||||||
|
|
||||||
@self.app.route("/search/issues", methods=['GET'])
|
@self.app.route("/search/issues", methods=["GET"])
|
||||||
def search():
|
def search():
|
||||||
params = {}
|
params = {}
|
||||||
param_strings = flask.request.args.get("q", "").split(" ")
|
param_strings = flask.request.args.get("q", "").split(" ")
|
||||||
|
@ -145,38 +145,29 @@ class MockGitHubAPIServer():
|
||||||
|
|
||||||
for pull_request in self.pulls:
|
for pull_request in self.pulls:
|
||||||
if pull_request.head.endswith(head_ref):
|
if pull_request.head.endswith(head_ref):
|
||||||
return json.dumps({
|
return json.dumps({"total_count": 1, "items": [{"number": pull_request.number}]})
|
||||||
"total_count": 1,
|
|
||||||
"items": [{
|
|
||||||
"number": pull_request.number
|
|
||||||
}]
|
|
||||||
})
|
|
||||||
return json.dumps({"total_count": 0, "items": []})
|
return json.dumps({"total_count": 0, "items": []})
|
||||||
|
|
||||||
@self.app.route("/repos/<org>/<repo>/pulls", methods=['POST'])
|
@self.app.route("/repos/<org>/<repo>/pulls", methods=["POST"])
|
||||||
def create_pull_request(org, repo):
|
def create_pull_request(org, repo):
|
||||||
new_pr_number = len(self.pulls) + 1
|
new_pr_number = len(self.pulls) + 1
|
||||||
self.pulls.append(MockPullRequest(
|
self.pulls.append(MockPullRequest(flask.request.json["head"], new_pr_number, "open"))
|
||||||
flask.request.json["head"],
|
|
||||||
new_pr_number,
|
|
||||||
"open"
|
|
||||||
))
|
|
||||||
return {"number": new_pr_number}
|
return {"number": new_pr_number}
|
||||||
|
|
||||||
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>", methods=['PATCH'])
|
@self.app.route("/repos/<org>/<repo>/pulls/<int:number>", methods=["PATCH"])
|
||||||
def update_pull_request(org, repo, number):
|
def update_pull_request(org, repo, number):
|
||||||
for pull_request in self.pulls:
|
for pull_request in self.pulls:
|
||||||
if pull_request.number == number:
|
if pull_request.number == number:
|
||||||
if 'state' in flask.request.json:
|
if "state" in flask.request.json:
|
||||||
pull_request.state = flask.request.json['state']
|
pull_request.state = flask.request.json["state"]
|
||||||
return ('', 204)
|
return ("", 204)
|
||||||
return ('', 404)
|
return ("", 404)
|
||||||
|
|
||||||
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels", methods=['GET', 'POST'])
|
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels", methods=["GET", "POST"])
|
||||||
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels/<label>", methods=['DELETE'])
|
@self.app.route("/repos/<org>/<repo>/issues/<number>/labels/<label>", methods=["DELETE"])
|
||||||
@self.app.route("/repos/<org>/<repo>/issues/<issue>/comments", methods=['GET', 'POST'])
|
@self.app.route("/repos/<org>/<repo>/issues/<issue>/comments", methods=["GET", "POST"])
|
||||||
def other_requests(*args, **kwargs):
|
def other_requests(*args, **kwargs):
|
||||||
return ('', 204)
|
return ("", 204)
|
||||||
|
|
||||||
|
|
||||||
class TestCleanUpBodyText(unittest.TestCase):
|
class TestCleanUpBodyText(unittest.TestCase):
|
||||||
|
@ -196,28 +187,22 @@ class TestCleanUpBodyText(unittest.TestCase):
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
"Subject\n\nBody text #<!-- nolink -->1",
|
"Subject\n\nBody text #<!-- nolink -->1",
|
||||||
SyncRun.clean_up_body_text(
|
SyncRun.clean_up_body_text("Subject\n\nBody text #1\n---<!-- Thank you for contributing"),
|
||||||
"Subject\n\nBody text #1\n---<!-- Thank you for contributing"
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
"Subject\n\nNo dashes",
|
"Subject\n\nNo dashes",
|
||||||
SyncRun.clean_up_body_text(
|
SyncRun.clean_up_body_text("Subject\n\nNo dashes<!-- Thank you for contributing"),
|
||||||
"Subject\n\nNo dashes<!-- Thank you for contributing"
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
"Subject\n\nNo --- comment",
|
"Subject\n\nNo --- comment",
|
||||||
SyncRun.clean_up_body_text(
|
SyncRun.clean_up_body_text("Subject\n\nNo --- comment\n---Other stuff that"),
|
||||||
"Subject\n\nNo --- comment\n---Other stuff that"
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
"Subject\n\n#<!-- nolink -->3 servo#<!-- nolink -->3 servo/servo#3",
|
"Subject\n\n#<!-- nolink -->3 servo#<!-- nolink -->3 servo/servo#3",
|
||||||
SyncRun.clean_up_body_text(
|
SyncRun.clean_up_body_text(
|
||||||
"Subject\n\n#3 servo#3 servo/servo#3",
|
"Subject\n\n#3 servo#3 servo/servo#3",
|
||||||
),
|
),
|
||||||
"Only relative and bare issue reference links should be escaped."
|
"Only relative and bare issue reference links should be escaped.",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -236,9 +221,7 @@ class TestApplyCommitsToWPT(unittest.TestCase):
|
||||||
pull_request = SYNC.servo.get_pull_request(pr_number)
|
pull_request = SYNC.servo.get_pull_request(pr_number)
|
||||||
step = CreateOrUpdateBranchForPRStep({"number": pr_number}, pull_request)
|
step = CreateOrUpdateBranchForPRStep({"number": pr_number}, pull_request)
|
||||||
|
|
||||||
def get_applied_commits(
|
def get_applied_commits(num_commits: int, applied_commits: list[Tuple[str, str]]):
|
||||||
num_commits: int, applied_commits: list[Tuple[str, str]]
|
|
||||||
):
|
|
||||||
assert SYNC is not None
|
assert SYNC is not None
|
||||||
repo = SYNC.local_wpt_repo
|
repo = SYNC.local_wpt_repo
|
||||||
log = ["log", "--oneline", f"-{num_commits}"]
|
log = ["log", "--oneline", f"-{num_commits}"]
|
||||||
|
@ -252,17 +235,13 @@ class TestApplyCommitsToWPT(unittest.TestCase):
|
||||||
|
|
||||||
applied_commits: list[Any] = []
|
applied_commits: list[Any] = []
|
||||||
callback = partial(get_applied_commits, len(commits), applied_commits)
|
callback = partial(get_applied_commits, len(commits), applied_commits)
|
||||||
step._create_or_update_branch_for_pr(
|
step._create_or_update_branch_for_pr(SyncRun(SYNC, pull_request, None, None), commits, callback)
|
||||||
SyncRun(SYNC, pull_request, None, None), commits, callback
|
|
||||||
)
|
|
||||||
|
|
||||||
expected_commits = [(commit["author"], commit["message"]) for commit in commits]
|
expected_commits = [(commit["author"], commit["message"]) for commit in commits]
|
||||||
self.assertListEqual(applied_commits, expected_commits)
|
self.assertListEqual(applied_commits, expected_commits)
|
||||||
|
|
||||||
def test_simple_commit(self):
|
def test_simple_commit(self):
|
||||||
self.run_test(
|
self.run_test(45, [["test author <test@author>", "test commit message", "18746.diff"]])
|
||||||
45, [["test author <test@author>", "test commit message", "18746.diff"]]
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_two_commits(self):
|
def test_two_commits(self):
|
||||||
self.run_test(
|
self.run_test(
|
||||||
|
@ -299,9 +278,7 @@ class TestFullSyncRun(unittest.TestCase):
|
||||||
assert SYNC is not None
|
assert SYNC is not None
|
||||||
|
|
||||||
# Clean up any old files.
|
# Clean up any old files.
|
||||||
first_commit_hash = SYNC.local_servo_repo.run("rev-list", "HEAD").splitlines()[
|
first_commit_hash = SYNC.local_servo_repo.run("rev-list", "HEAD").splitlines()[-1]
|
||||||
-1
|
|
||||||
]
|
|
||||||
SYNC.local_servo_repo.run("reset", "--hard", first_commit_hash)
|
SYNC.local_servo_repo.run("reset", "--hard", first_commit_hash)
|
||||||
SYNC.local_servo_repo.run("clean", "-fxd")
|
SYNC.local_servo_repo.run("clean", "-fxd")
|
||||||
|
|
||||||
|
@ -339,9 +316,7 @@ class TestFullSyncRun(unittest.TestCase):
|
||||||
SYNC.local_servo_repo.run("reset", "--hard", orig_sha)
|
SYNC.local_servo_repo.run("reset", "--hard", orig_sha)
|
||||||
return last_commit_sha
|
return last_commit_sha
|
||||||
|
|
||||||
def run_test(
|
def run_test(self, payload_file: str, diffs: list, existing_prs: list[MockPullRequest] = []):
|
||||||
self, payload_file: str, diffs: list, existing_prs: list[MockPullRequest] = []
|
|
||||||
):
|
|
||||||
with open(os.path.join(TESTS_DIR, payload_file), encoding="utf-8") as file:
|
with open(os.path.join(TESTS_DIR, payload_file), encoding="utf-8") as file:
|
||||||
payload = json.loads(file.read())
|
payload = json.loads(file.read())
|
||||||
|
|
||||||
|
@ -413,12 +388,8 @@ class TestFullSyncRun(unittest.TestCase):
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_opened_new_mr_with_no_sync_signal(self):
|
def test_opened_new_mr_with_no_sync_signal(self):
|
||||||
self.assertListEqual(
|
self.assertListEqual(self.run_test("opened-with-no-sync-signal.json", ["18746.diff"]), [])
|
||||||
self.run_test("opened-with-no-sync-signal.json", ["18746.diff"]), []
|
self.assertListEqual(self.run_test("opened-with-no-sync-signal.json", ["non-wpt.diff"]), [])
|
||||||
)
|
|
||||||
self.assertListEqual(
|
|
||||||
self.run_test("opened-with-no-sync-signal.json", ["non-wpt.diff"]), []
|
|
||||||
)
|
|
||||||
|
|
||||||
def test_opened_upstreamable_pr_not_applying_cleanly_to_upstream(self):
|
def test_opened_upstreamable_pr_not_applying_cleanly_to_upstream(self):
|
||||||
self.assertListEqual(
|
self.assertListEqual(
|
||||||
|
@ -459,7 +430,7 @@ class TestFullSyncRun(unittest.TestCase):
|
||||||
"RemoveBranchForPRStep:servo/wpt/servo_export_18746",
|
"RemoveBranchForPRStep:servo/wpt/servo_export_18746",
|
||||||
"CommentStep:servo/servo#18746:🤖 This change no longer contains upstreamable changes "
|
"CommentStep:servo/servo#18746:🤖 This change no longer contains upstreamable changes "
|
||||||
"to WPT; closed existing upstream pull request (wpt/wpt#1).",
|
"to WPT; closed existing upstream pull request (wpt/wpt#1).",
|
||||||
]
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_opened_upstreamable_pr_with_non_utf8_file_contents(self):
|
def test_opened_upstreamable_pr_with_non_utf8_file_contents(self):
|
||||||
|
@ -502,10 +473,7 @@ class TestFullSyncRun(unittest.TestCase):
|
||||||
["18746.diff"],
|
["18746.diff"],
|
||||||
[MockPullRequest("servo:servo_export_18746", 10)],
|
[MockPullRequest("servo:servo_export_18746", 10)],
|
||||||
),
|
),
|
||||||
[
|
["ChangePRStep:wpt/wpt#10:closed", "RemoveBranchForPRStep:servo/wpt/servo_export_18746"],
|
||||||
"ChangePRStep:wpt/wpt#10:closed",
|
|
||||||
"RemoveBranchForPRStep:servo/wpt/servo_export_18746"
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_synchronize_move_new_changes_to_preexisting_upstream_pr(self):
|
def test_synchronize_move_new_changes_to_preexisting_upstream_pr(self):
|
||||||
|
@ -520,7 +488,7 @@ class TestFullSyncRun(unittest.TestCase):
|
||||||
"CreateOrUpdateBranchForPRStep:1:servo/wpt/servo_export_19612",
|
"CreateOrUpdateBranchForPRStep:1:servo/wpt/servo_export_19612",
|
||||||
"CommentStep:servo/servo#19612:📝 Transplanted new upstreamable changes to existing "
|
"CommentStep:servo/servo#19612:📝 Transplanted new upstreamable changes to existing "
|
||||||
"upstream WPT pull request (wpt/wpt#10).",
|
"upstream WPT pull request (wpt/wpt#10).",
|
||||||
]
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_synchronize_close_upstream_pr_after_new_changes_do_not_include_wpt(self):
|
def test_synchronize_close_upstream_pr_after_new_changes_do_not_include_wpt(self):
|
||||||
|
@ -537,7 +505,7 @@ class TestFullSyncRun(unittest.TestCase):
|
||||||
"RemoveBranchForPRStep:servo/wpt/servo_export_19612",
|
"RemoveBranchForPRStep:servo/wpt/servo_export_19612",
|
||||||
"CommentStep:servo/servo#19612:🤖 This change no longer contains upstreamable changes to WPT; "
|
"CommentStep:servo/servo#19612:🤖 This change no longer contains upstreamable changes to WPT; "
|
||||||
"closed existing upstream pull request (wpt/wpt#11).",
|
"closed existing upstream pull request (wpt/wpt#11).",
|
||||||
]
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_synchronize_open_upstream_pr_after_new_changes_include_wpt(self):
|
def test_synchronize_open_upstream_pr_after_new_changes_include_wpt(self):
|
||||||
|
@ -548,7 +516,7 @@ class TestFullSyncRun(unittest.TestCase):
|
||||||
"OpenPRStep:servo/wpt/servo_export_19612→wpt/wpt#1",
|
"OpenPRStep:servo/wpt/servo_export_19612→wpt/wpt#1",
|
||||||
"CommentStep:servo/servo#19612:🤖 Opened new upstream WPT pull request "
|
"CommentStep:servo/servo#19612:🤖 Opened new upstream WPT pull request "
|
||||||
"(wpt/wpt#1) with upstreamable changes.",
|
"(wpt/wpt#1) with upstreamable changes.",
|
||||||
]
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_synchronize_fail_to_update_preexisting_pr_after_new_changes_do_not_apply(
|
def test_synchronize_fail_to_update_preexisting_pr_after_new_changes_do_not_apply(
|
||||||
|
@ -567,20 +535,17 @@ class TestFullSyncRun(unittest.TestCase):
|
||||||
"latest upstream WPT. Servo's copy of the Web Platform Tests may be out of sync.",
|
"latest upstream WPT. Servo's copy of the Web Platform Tests may be out of sync.",
|
||||||
"CommentStep:wpt/wpt#11:🛠 Changes from the source pull request (servo/servo#19612) can "
|
"CommentStep:wpt/wpt#11:🛠 Changes from the source pull request (servo/servo#19612) can "
|
||||||
"no longer be cleanly applied. Waiting for a new version of these changes downstream.",
|
"no longer be cleanly applied. Waiting for a new version of these changes downstream.",
|
||||||
]
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_edited_with_upstream_pr(self):
|
def test_edited_with_upstream_pr(self):
|
||||||
self.assertListEqual(
|
self.assertListEqual(
|
||||||
self.run_test(
|
self.run_test("edited.json", ["wpt.diff"], [MockPullRequest("servo:servo_export_19620", 10)]),
|
||||||
"edited.json", ["wpt.diff"],
|
|
||||||
[MockPullRequest("servo:servo_export_19620", 10)]
|
|
||||||
),
|
|
||||||
[
|
[
|
||||||
"ChangePRStep:wpt/wpt#10:open:A cool new title:Reference #<!--...[136]",
|
"ChangePRStep:wpt/wpt#10:open:A cool new title:Reference #<!--...[136]",
|
||||||
"CommentStep:servo/servo#19620:✍ Updated existing upstream WPT pull "
|
"CommentStep:servo/servo#19620:✍ Updated existing upstream WPT pull "
|
||||||
"request (wpt/wpt#10) title and body."
|
"request (wpt/wpt#10) title and body.",
|
||||||
]
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_edited_with_no_upstream_pr(self):
|
def test_edited_with_no_upstream_pr(self):
|
||||||
|
@ -590,15 +555,13 @@ class TestFullSyncRun(unittest.TestCase):
|
||||||
self,
|
self,
|
||||||
):
|
):
|
||||||
self.assertListEqual(
|
self.assertListEqual(
|
||||||
self.run_test(
|
self.run_test("synchronize-multiple.json", ["18746.diff", "non-wpt.diff", "wpt.diff"]),
|
||||||
"synchronize-multiple.json", ["18746.diff", "non-wpt.diff", "wpt.diff"]
|
|
||||||
),
|
|
||||||
[
|
[
|
||||||
"CreateOrUpdateBranchForPRStep:2:servo/wpt/servo_export_19612",
|
"CreateOrUpdateBranchForPRStep:2:servo/wpt/servo_export_19612",
|
||||||
"OpenPRStep:servo/wpt/servo_export_19612→wpt/wpt#1",
|
"OpenPRStep:servo/wpt/servo_export_19612→wpt/wpt#1",
|
||||||
"CommentStep:servo/servo#19612:"
|
"CommentStep:servo/servo#19612:"
|
||||||
"🤖 Opened new upstream WPT pull request (wpt/wpt#1) with upstreamable changes.",
|
"🤖 Opened new upstream WPT pull request (wpt/wpt#1) with upstreamable changes.",
|
||||||
]
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_synchronize_with_non_upstreamable_changes(self):
|
def test_synchronize_with_non_upstreamable_changes(self):
|
||||||
|
@ -606,15 +569,8 @@ class TestFullSyncRun(unittest.TestCase):
|
||||||
|
|
||||||
def test_merge_upstream_pr_after_merge(self):
|
def test_merge_upstream_pr_after_merge(self):
|
||||||
self.assertListEqual(
|
self.assertListEqual(
|
||||||
self.run_test(
|
self.run_test("merged.json", ["18746.diff"], [MockPullRequest("servo:servo_export_19620", 100)]),
|
||||||
"merged.json",
|
["MergePRStep:wpt/wpt#100", "RemoveBranchForPRStep:servo/wpt/servo_export_19620"],
|
||||||
["18746.diff"],
|
|
||||||
[MockPullRequest("servo:servo_export_19620", 100)]
|
|
||||||
),
|
|
||||||
[
|
|
||||||
"MergePRStep:wpt/wpt#100",
|
|
||||||
"RemoveBranchForPRStep:servo/wpt/servo_export_19620"
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_pr_merged_no_upstream_pr(self):
|
def test_pr_merged_no_upstream_pr(self):
|
||||||
|
@ -644,8 +600,7 @@ def setUpModule():
|
||||||
)
|
)
|
||||||
|
|
||||||
def setup_mock_repo(repo_name, local_repo, default_branch: str):
|
def setup_mock_repo(repo_name, local_repo, default_branch: str):
|
||||||
subprocess.check_output(
|
subprocess.check_output(["cp", "-R", "-p", os.path.join(TESTS_DIR, repo_name), local_repo.path])
|
||||||
["cp", "-R", "-p", os.path.join(TESTS_DIR, repo_name), local_repo.path])
|
|
||||||
local_repo.run("init", "-b", default_branch)
|
local_repo.run("init", "-b", default_branch)
|
||||||
local_repo.run("add", ".")
|
local_repo.run("add", ".")
|
||||||
local_repo.run("commit", "-a", "-m", "Initial commit")
|
local_repo.run("commit", "-a", "-m", "Initial commit")
|
||||||
|
@ -666,12 +621,16 @@ def run_tests():
|
||||||
verbosity = 1 if logging.getLogger().level >= logging.WARN else 2
|
verbosity = 1 if logging.getLogger().level >= logging.WARN else 2
|
||||||
|
|
||||||
def run_suite(test_case: Type[unittest.TestCase]):
|
def run_suite(test_case: Type[unittest.TestCase]):
|
||||||
return unittest.TextTestRunner(verbosity=verbosity).run(
|
return (
|
||||||
unittest.TestLoader().loadTestsFromTestCase(test_case)
|
unittest.TextTestRunner(verbosity=verbosity)
|
||||||
).wasSuccessful()
|
.run(unittest.TestLoader().loadTestsFromTestCase(test_case))
|
||||||
|
.wasSuccessful()
|
||||||
|
)
|
||||||
|
|
||||||
return all([
|
return all(
|
||||||
|
[
|
||||||
run_suite(TestApplyCommitsToWPT),
|
run_suite(TestApplyCommitsToWPT),
|
||||||
run_suite(TestCleanUpBodyText),
|
run_suite(TestCleanUpBodyText),
|
||||||
run_suite(TestFullSyncRun),
|
run_suite(TestFullSyncRun),
|
||||||
])
|
]
|
||||||
|
)
|
||||||
|
|
|
@ -5,6 +5,6 @@ index 10d52a0..92fb89d 100644
|
||||||
@@ -8,3 +8,4 @@
|
@@ -8,3 +8,4 @@
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
print('this is a python file')
|
print("this is a python file")
|
||||||
+print('this is a change')
|
+print("this is a change")
|
||||||
|
|
||||||
|
|
|
@ -7,4 +7,4 @@
|
||||||
# option. This file may not be copied, modified, or distributed
|
# option. This file may not be copied, modified, or distributed
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
print('this is a python file')
|
print("this is a python file")
|
||||||
|
|
|
@ -15,11 +15,8 @@ from wptrunner import wptcommandline # noqa: F401
|
||||||
from . import WPT_PATH
|
from . import WPT_PATH
|
||||||
from . import manifestupdate
|
from . import manifestupdate
|
||||||
|
|
||||||
TEST_ROOT = os.path.join(WPT_PATH, 'tests')
|
TEST_ROOT = os.path.join(WPT_PATH, "tests")
|
||||||
META_ROOTS = [
|
META_ROOTS = [os.path.join(WPT_PATH, "meta"), os.path.join(WPT_PATH, "meta-legacy")]
|
||||||
os.path.join(WPT_PATH, 'meta'),
|
|
||||||
os.path.join(WPT_PATH, 'meta-legacy')
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def do_sync(**kwargs) -> int:
|
def do_sync(**kwargs) -> int:
|
||||||
|
@ -28,8 +25,8 @@ def do_sync(**kwargs) -> int:
|
||||||
# Commits should always be authored by the GitHub Actions bot.
|
# Commits should always be authored by the GitHub Actions bot.
|
||||||
os.environ["GIT_AUTHOR_NAME"] = "Servo WPT Sync"
|
os.environ["GIT_AUTHOR_NAME"] = "Servo WPT Sync"
|
||||||
os.environ["GIT_AUTHOR_EMAIL"] = "ghbot+wpt-sync@servo.org"
|
os.environ["GIT_AUTHOR_EMAIL"] = "ghbot+wpt-sync@servo.org"
|
||||||
os.environ["GIT_COMMITTER_NAME"] = os.environ['GIT_AUTHOR_NAME']
|
os.environ["GIT_COMMITTER_NAME"] = os.environ["GIT_AUTHOR_NAME"]
|
||||||
os.environ["GIT_COMMITTER_EMAIL"] = os.environ['GIT_AUTHOR_EMAIL']
|
os.environ["GIT_COMMITTER_EMAIL"] = os.environ["GIT_AUTHOR_EMAIL"]
|
||||||
|
|
||||||
print("Updating WPT from upstream...")
|
print("Updating WPT from upstream...")
|
||||||
run_update(**kwargs)
|
run_update(**kwargs)
|
||||||
|
@ -67,7 +64,7 @@ def remove_unused_metadata():
|
||||||
dir_path = os.path.join(base_dir, dir_name)
|
dir_path = os.path.join(base_dir, dir_name)
|
||||||
|
|
||||||
# Skip any known directories that are meta-metadata.
|
# Skip any known directories that are meta-metadata.
|
||||||
if dir_name == '.cache':
|
if dir_name == ".cache":
|
||||||
unused_dirs.append(dir_path)
|
unused_dirs.append(dir_path)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -78,12 +75,11 @@ def remove_unused_metadata():
|
||||||
|
|
||||||
for fname in files:
|
for fname in files:
|
||||||
# Skip any known files that are meta-metadata.
|
# Skip any known files that are meta-metadata.
|
||||||
if not fname.endswith(".ini") or fname == '__dir__.ini':
|
if not fname.endswith(".ini") or fname == "__dir__.ini":
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Turn tests/wpt/meta/foo/bar.html.ini into tests/wpt/tests/foo/bar.html.
|
# Turn tests/wpt/meta/foo/bar.html.ini into tests/wpt/tests/foo/bar.html.
|
||||||
test_file = os.path.join(
|
test_file = os.path.join(TEST_ROOT, os.path.relpath(base_dir, meta_root), fname[:-4])
|
||||||
TEST_ROOT, os.path.relpath(base_dir, meta_root), fname[:-4])
|
|
||||||
|
|
||||||
if not os.path.exists(test_file):
|
if not os.path.exists(test_file):
|
||||||
unused_files.append(os.path.join(base_dir, fname))
|
unused_files.append(os.path.join(base_dir, fname))
|
||||||
|
@ -106,10 +102,10 @@ def update_tests(**kwargs) -> int:
|
||||||
kwargs["store_state"] = False
|
kwargs["store_state"] = False
|
||||||
|
|
||||||
wptcommandline.set_from_config(kwargs)
|
wptcommandline.set_from_config(kwargs)
|
||||||
if hasattr(wptcommandline, 'check_paths'):
|
if hasattr(wptcommandline, "check_paths"):
|
||||||
wptcommandline.check_paths(kwargs["test_paths"])
|
wptcommandline.check_paths(kwargs["test_paths"])
|
||||||
|
|
||||||
if kwargs.get('sync', False):
|
if kwargs.get("sync", False):
|
||||||
return do_sync(**kwargs)
|
return do_sync(**kwargs)
|
||||||
|
|
||||||
return 0 if run_update(**kwargs) else 1
|
return 0 if run_update(**kwargs) else 1
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue