Use ruff to enforce python code formatting (#37117)

Requires servo/servo#37045 for deps and config.

Testing: No need for tests to test tests.
Fixes: servo/servo#37041

---------

Signed-off-by: zefr0x <zer0-x.7ty50@aleeas.com>
This commit is contained in:
zefr0x 2025-05-26 14:54:43 +03:00 committed by GitHub
parent 41ecfb53a1
commit c96de69e80
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
67 changed files with 3021 additions and 3085 deletions

View file

@ -16,43 +16,47 @@ SCRIPT_PATH = os.path.split(__file__)[0]
def main():
default_output_dir = os.path.join(SCRIPT_PATH, 'output')
default_cache_dir = os.path.join(SCRIPT_PATH, '.cache')
default_output_dir = os.path.join(SCRIPT_PATH, "output")
default_cache_dir = os.path.join(SCRIPT_PATH, ".cache")
parser = argparse.ArgumentParser(
description="Download buildbot metadata"
parser = argparse.ArgumentParser(description="Download buildbot metadata")
parser.add_argument(
"--index-url",
type=str,
default="https://build.servo.org/json",
help="the URL to get the JSON index data index from. Default: https://build.servo.org/json",
)
parser.add_argument("--index-url",
type=str,
default='https://build.servo.org/json',
help="the URL to get the JSON index data index from. "
"Default: https://build.servo.org/json")
parser.add_argument("--build-url",
type=str,
default='https://build.servo.org/json/builders/{}/builds/{}',
help="the URL to get the JSON build data from. "
"Default: https://build.servo.org/json/builders/{}/builds/{}")
parser.add_argument("--cache-dir",
type=str,
default=default_cache_dir,
help="the directory to cache JSON files in. Default: " + default_cache_dir)
parser.add_argument("--cache-name",
type=str,
default='build-{}-{}.json',
help="the filename to cache JSON data in. "
"Default: build-{}-{}.json")
parser.add_argument("--output-dir",
type=str,
default=default_output_dir,
help="the directory to save the CSV data to. Default: " + default_output_dir)
parser.add_argument("--output-name",
type=str,
default='builds-{}-{}.csv',
help="the filename to save the CSV data to. "
"Default: builds-{}-{}.csv")
parser.add_argument("--verbose", "-v",
action='store_true',
help="print every HTTP request")
parser.add_argument(
"--build-url",
type=str,
default="https://build.servo.org/json/builders/{}/builds/{}",
help="the URL to get the JSON build data from. Default: https://build.servo.org/json/builders/{}/builds/{}",
)
parser.add_argument(
"--cache-dir",
type=str,
default=default_cache_dir,
help="the directory to cache JSON files in. Default: " + default_cache_dir,
)
parser.add_argument(
"--cache-name",
type=str,
default="build-{}-{}.json",
help="the filename to cache JSON data in. Default: build-{}-{}.json",
)
parser.add_argument(
"--output-dir",
type=str,
default=default_output_dir,
help="the directory to save the CSV data to. Default: " + default_output_dir,
)
parser.add_argument(
"--output-name",
type=str,
default="builds-{}-{}.csv",
help="the filename to save the CSV data to. Default: builds-{}-{}.csv",
)
parser.add_argument("--verbose", "-v", action="store_true", help="print every HTTP request")
args = parser.parse_args()
os.makedirs(args.cache_dir, exist_ok=True)
@ -63,7 +67,7 @@ def main():
if args.verbose:
print("Downloading index {}.".format(args.index_url))
with urlopen(args.index_url) as response:
index = json.loads(response.read().decode('utf-8'))
index = json.loads(response.read().decode("utf-8"))
builds = []
@ -75,12 +79,11 @@ def main():
if args.verbose:
print("Downloading recent build {}.".format(recent_build_url))
with urlopen(recent_build_url) as response:
recent_build = json.loads(response.read().decode('utf-8'))
recent_build = json.loads(response.read().decode("utf-8"))
recent_build_number = recent_build["number"]
# Download each build, and convert to CSV
for build_number in range(0, recent_build_number):
# Rather annoyingly, we can't just use the Python http cache,
# because it doesn't cache 404 responses. So we roll our own.
cache_json_name = args.cache_name.format(builder, build_number)
@ -96,7 +99,7 @@ def main():
print("Downloading build {}.".format(build_url))
try:
with urlopen(build_url) as response:
build = json.loads(response.read().decode('utf-8'))
build = json.loads(response.read().decode("utf-8"))
except HTTPError as e:
if e.code == 404:
build = {}
@ -104,46 +107,46 @@ def main():
raise
# Don't cache current builds.
if build.get('currentStep'):
if build.get("currentStep"):
continue
with open(cache_json, 'w+') as f:
with open(cache_json, "w+") as f:
json.dump(build, f)
if 'times' in build:
if "times" in build:
builds.append(build)
years = {}
for build in builds:
build_date = date.fromtimestamp(build['times'][0])
build_date = date.fromtimestamp(build["times"][0])
years.setdefault(build_date.year, {}).setdefault(build_date.month, []).append(build)
for year, months in years.items():
for month, builds in months.items():
output_name = args.output_name.format(year, month)
output = os.path.join(args.output_dir, output_name)
# Create the CSV file.
if args.verbose:
print('Creating file {}.'.format(output))
with open(output, 'w+') as output_file:
print("Creating file {}.".format(output))
with open(output, "w+") as output_file:
output_csv = csv.writer(output_file)
# The CSV column names
output_csv.writerow([
'builder',
'buildNumber',
'buildTimestamp',
'stepName',
'stepText',
'stepNumber',
'stepStart',
'stepFinish'
])
output_csv.writerow(
[
"builder",
"buildNumber",
"buildTimestamp",
"stepName",
"stepText",
"stepNumber",
"stepStart",
"stepFinish",
]
)
for build in builds:
builder = build["builderName"]
build_number = build["number"]
build_timestamp = datetime.fromtimestamp(build["times"][0]).replace(microsecond=0)
@ -152,20 +155,22 @@ def main():
for step in build["steps"]:
if step["isFinished"]:
step_name = step["name"]
step_text = ' '.join(step["text"])
step_text = " ".join(step["text"])
step_number = step["step_number"]
step_start = floor(step["times"][0])
step_finish = floor(step["times"][1])
output_csv.writerow([
builder,
build_number,
build_timestamp,
step_name,
step_text,
step_number,
step_start,
step_finish
])
output_csv.writerow(
[
builder,
build_number,
build_timestamp,
step_name,
step_text,
step_number,
step_start,
step_finish,
]
)
if __name__ == "__main__":

View file

@ -15,7 +15,7 @@ import sys
@contextmanager
def create_gecko_session():
try:
firefox_binary = os.environ['FIREFOX_BIN']
firefox_binary = os.environ["FIREFOX_BIN"]
except KeyError:
print("+=============================================================+")
print("| You must set the path to your firefox binary to FIREFOX_BIN |")
@ -36,10 +36,7 @@ def generate_placeholder(testcase):
# use a placeholder with values = -1 to make Treeherder happy, and still be
# able to identify failed tests (successful tests have time >=0).
timings = {
"testcase": testcase,
"title": ""
}
timings = {"testcase": testcase, "title": ""}
timing_names = [
"navigationStart",
@ -81,16 +78,9 @@ def run_gecko_test(testcase, url, date, timeout, is_async):
return generate_placeholder(testcase)
try:
timings = {
"testcase": testcase,
"title": driver.title.replace(",", "&#44;")
}
timings = {"testcase": testcase, "title": driver.title.replace(",", "&#44;")}
timings.update(json.loads(
driver.execute_script(
"return JSON.stringify(performance.timing)"
)
))
timings.update(json.loads(driver.execute_script("return JSON.stringify(performance.timing)")))
except Exception:
# We need to return a timing object no matter what happened.
# See the comment in generate_placeholder() for explanation
@ -101,17 +91,14 @@ def run_gecko_test(testcase, url, date, timeout, is_async):
# TODO: the timeout is hardcoded
driver.implicitly_wait(5) # sec
driver.find_element_by_id("GECKO_TEST_DONE")
timings.update(json.loads(
driver.execute_script(
"return JSON.stringify(window.customTimers)"
)
))
timings.update(json.loads(driver.execute_script("return JSON.stringify(window.customTimers)")))
return [timings]
if __name__ == '__main__':
if __name__ == "__main__":
# Just for manual testing
from pprint import pprint
url = "http://localhost:8000/page_load_test/tp5n/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html"
pprint(run_gecko_test(url, 15))

View file

@ -23,14 +23,13 @@ SYSTEM = platform.system()
def load_manifest(filename):
with open(filename, 'r') as f:
with open(filename, "r") as f:
text = f.read()
return list(parse_manifest(text))
def parse_manifest(text):
lines = filter(lambda x: x != "" and not x.startswith("#"),
map(lambda x: x.strip(), text.splitlines()))
lines = filter(lambda x: x != "" and not x.startswith("#"), map(lambda x: x.strip(), text.splitlines()))
output = []
for line in lines:
if line.split(" ")[0] == "async":
@ -46,21 +45,18 @@ def testcase_url(base, testcase):
# the server on port 80. To allow non-root users to run the test
# case, we take the URL to be relative to a base URL.
(scheme, netloc, path, query, fragment) = urlsplit(testcase)
relative_url = urlunsplit(('', '', '.' + path, query, fragment))
relative_url = urlunsplit(("", "", "." + path, query, fragment))
absolute_url = urljoin(base, relative_url)
return absolute_url
def execute_test(url, command, timeout):
try:
return subprocess.check_output(
command, stderr=subprocess.STDOUT, timeout=timeout
)
return subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
except subprocess.CalledProcessError as e:
print("Unexpected Fail:")
print(e)
print("You may want to re-run the test manually:\n{}"
.format(' '.join(command)))
print("You may want to re-run the test manually:\n{}".format(" ".join(command)))
except subprocess.TimeoutExpired:
print("Test FAILED due to timeout: {}".format(url))
return ""
@ -74,22 +70,21 @@ def run_servo_test(testcase, url, date, timeout, is_async):
ua_script_path = "{}/user-agent-js".format(os.getcwd())
command = [
"../../../target/release/servo", url,
"../../../target/release/servo",
url,
"--userscripts=" + ua_script_path,
"--headless",
"-x", "-o", "output.png"
"-x",
"-o",
"output.png",
]
log = ""
try:
log = subprocess.check_output(
command, stderr=subprocess.STDOUT, timeout=timeout
)
log = subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
except subprocess.CalledProcessError as e:
print("Unexpected Fail:")
print(e)
print("You may want to re-run the test manually:\n{}".format(
' '.join(command)
))
print("You may want to re-run the test manually:\n{}".format(" ".join(command)))
except subprocess.TimeoutExpired:
print("Test FAILED due to timeout: {}".format(testcase))
return parse_log(log, testcase, url, date)
@ -100,7 +95,7 @@ def parse_log(log, testcase, url, date):
block = []
copy = False
for line_bytes in log.splitlines():
line = line_bytes.decode('utf-8')
line = line_bytes.decode("utf-8")
if line.strip() == ("[PERF] perf block start"):
copy = True
@ -119,10 +114,10 @@ def parse_log(log, testcase, url, date):
except ValueError:
print("[DEBUG] failed to parse the following line:")
print(line)
print('[DEBUG] log:')
print('-----')
print("[DEBUG] log:")
print("-----")
print(log)
print('-----')
print("-----")
return None
if key == "testcase" or key == "title":
@ -133,10 +128,12 @@ def parse_log(log, testcase, url, date):
return timing
def valid_timing(timing, url=None):
if (timing is None
or testcase is None
or timing.get('title') == 'Error loading page'
or timing.get('testcase') != url):
if (
timing is None
or testcase is None
or timing.get("title") == "Error loading page"
or timing.get("testcase") != url
):
return False
else:
return True
@ -178,10 +175,10 @@ def parse_log(log, testcase, url, date):
# Set the testcase field to contain the original testcase name,
# rather than the url.
def set_testcase(timing, testcase=None, date=None):
timing['testcase'] = testcase
timing['system'] = SYSTEM
timing['machine'] = MACHINE
timing['date'] = date
timing["testcase"] = testcase
timing["system"] = SYSTEM
timing["machine"] = MACHINE
timing["date"] = date
return timing
valid_timing_for_case = partial(valid_timing, url=url)
@ -190,10 +187,10 @@ def parse_log(log, testcase, url, date):
if len(timings) == 0:
print("Didn't find any perf data in the log, test timeout?")
print('[DEBUG] log:')
print('-----')
print("[DEBUG] log:")
print("-----")
print(log)
print('-----')
print("-----")
return [create_placeholder(testcase)]
else:
@ -204,22 +201,25 @@ def filter_result_by_manifest(result_json, manifest, base):
filtered = []
for name, is_async in manifest:
url = testcase_url(base, name)
match = [tc for tc in result_json if tc['testcase'] == url]
match = [tc for tc in result_json if tc["testcase"] == url]
if len(match) == 0:
raise Exception(("Missing test result: {}. This will cause a "
"discontinuity in the treeherder graph, "
"so we won't submit this data.").format(name))
raise Exception(
(
"Missing test result: {}. This will cause a "
"discontinuity in the treeherder graph, "
"so we won't submit this data."
).format(name)
)
filtered += match
return filtered
def take_result_median(result_json, expected_runs):
median_results = []
for k, g in itertools.groupby(result_json, lambda x: x['testcase']):
for k, g in itertools.groupby(result_json, lambda x: x["testcase"]):
group = list(g)
if len(group) != expected_runs:
print(("Warning: Not enough test data for {},"
" maybe some runs failed?").format(k))
print(("Warning: Not enough test data for {}, maybe some runs failed?").format(k))
median_result = {}
for k, _ in group[0].items():
@ -227,8 +227,7 @@ def take_result_median(result_json, expected_runs):
median_result[k] = group[0][k]
else:
try:
median_result[k] = median([x[k] for x in group
if x[k] is not None])
median_result[k] = median([x[k] for x in group if x[k] is not None])
except StatisticsError:
median_result[k] = -1
median_results.append(median_result)
@ -236,72 +235,65 @@ def take_result_median(result_json, expected_runs):
def save_result_json(results, filename, manifest, expected_runs, base):
results = filter_result_by_manifest(results, manifest, base)
results = take_result_median(results, expected_runs)
if len(results) == 0:
with open(filename, 'w') as f:
json.dump("No test result found in the log. All tests timeout?",
f, indent=2)
with open(filename, "w") as f:
json.dump("No test result found in the log. All tests timeout?", f, indent=2)
else:
with open(filename, 'w') as f:
with open(filename, "w") as f:
json.dump(results, f, indent=2)
print("Result saved to {}".format(filename))
def save_result_csv(results, filename, manifest, expected_runs, base):
fieldnames = [
'system',
'machine',
'date',
'testcase',
'title',
'connectEnd',
'connectStart',
'domComplete',
'domContentLoadedEventEnd',
'domContentLoadedEventStart',
'domInteractive',
'domLoading',
'domainLookupEnd',
'domainLookupStart',
'fetchStart',
'loadEventEnd',
'loadEventStart',
'navigationStart',
'redirectEnd',
'redirectStart',
'requestStart',
'responseEnd',
'responseStart',
'secureConnectionStart',
'unloadEventEnd',
'unloadEventStart',
"system",
"machine",
"date",
"testcase",
"title",
"connectEnd",
"connectStart",
"domComplete",
"domContentLoadedEventEnd",
"domContentLoadedEventStart",
"domInteractive",
"domLoading",
"domainLookupEnd",
"domainLookupStart",
"fetchStart",
"loadEventEnd",
"loadEventStart",
"navigationStart",
"redirectEnd",
"redirectStart",
"requestStart",
"responseEnd",
"responseStart",
"secureConnectionStart",
"unloadEventEnd",
"unloadEventStart",
]
successes = [r for r in results if r['domComplete'] != -1]
successes = [r for r in results if r["domComplete"] != -1]
with open(filename, 'w', encoding='utf-8') as csvfile:
with open(filename, "w", encoding="utf-8") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames)
writer.writeheader()
writer.writerows(successes)
def format_result_summary(results):
failures = list(filter(lambda x: x['domComplete'] == -1, results))
failures = list(filter(lambda x: x["domComplete"] == -1, results))
result_log = """
========================================
Total {total} tests; {suc} succeeded, {fail} failed.
Failure summary:
""".format(
total=len(results),
suc=len(list(filter(lambda x: x['domComplete'] != -1, results))),
fail=len(failures)
)
uniq_failures = list(set(map(lambda x: x['testcase'], failures)))
""".format(total=len(results), suc=len(list(filter(lambda x: x["domComplete"] != -1, results))), fail=len(failures))
uniq_failures = list(set(map(lambda x: x["testcase"], failures)))
for failure in uniq_failures:
result_log += " - {}\n".format(failure)
@ -311,40 +303,40 @@ Failure summary:
def main():
parser = argparse.ArgumentParser(
description="Run page load test on servo"
parser = argparse.ArgumentParser(description="Run page load test on servo")
parser.add_argument("tp5_manifest", help="the test manifest in tp5 format")
parser.add_argument("output_file", help="filename for the output json")
parser.add_argument(
"--base",
type=str,
default="http://localhost:8000/",
help="the base URL for tests. Default: http://localhost:8000/",
)
parser.add_argument("--runs", type=int, default=20, help="number of runs for each test case. Defult: 20")
parser.add_argument(
"--timeout",
type=int,
default=300, # 5 min
help=("kill the test if not finished in time (sec). Default: 5 min"),
)
parser.add_argument(
"--date",
type=str,
default=None, # 5 min
help=("the date to use in the CSV file."),
)
parser.add_argument(
"--engine",
type=str,
default="servo",
help=("The engine to run the tests on. Currently only servo and gecko are supported."),
)
parser.add_argument("tp5_manifest",
help="the test manifest in tp5 format")
parser.add_argument("output_file",
help="filename for the output json")
parser.add_argument("--base",
type=str,
default='http://localhost:8000/',
help="the base URL for tests. Default: http://localhost:8000/")
parser.add_argument("--runs",
type=int,
default=20,
help="number of runs for each test case. Defult: 20")
parser.add_argument("--timeout",
type=int,
default=300, # 5 min
help=("kill the test if not finished in time (sec)."
" Default: 5 min"))
parser.add_argument("--date",
type=str,
default=None, # 5 min
help=("the date to use in the CSV file."))
parser.add_argument("--engine",
type=str,
default='servo',
help=("The engine to run the tests on. Currently only"
" servo and gecko are supported."))
args = parser.parse_args()
if args.engine == 'servo':
if args.engine == "servo":
run_test = run_servo_test
elif args.engine == 'gecko':
elif args.engine == "gecko":
import gecko_driver # Load this only when we need gecko test
run_test = gecko_driver.run_gecko_test
date = args.date or DATE
try:
@ -354,9 +346,7 @@ def main():
for testcase, is_async in testcases:
url = testcase_url(args.base, testcase)
for run in range(args.runs):
print("Running test {}/{} on {}".format(run + 1,
args.runs,
url))
print("Running test {}/{} on {}".format(run + 1, args.runs, url))
# results will be a mixure of timings dict and testcase strings
# testcase string indicates a failed test
results += run_test(testcase, url, date, args.timeout, is_async)
@ -364,7 +354,7 @@ def main():
# TODO: Record and analyze other performance.timing properties
print(format_result_summary(results))
if args.output_file.endswith('.csv'):
if args.output_file.endswith(".csv"):
save_result_csv(results, args.output_file, testcases, args.runs, args.base)
else:
save_result_json(results, args.output_file, testcases, args.runs, args.base)

View file

@ -10,13 +10,14 @@ import boto3
def main():
parser = argparse.ArgumentParser(
description=("Set the policy of the servo-perf bucket. "
"Remember to set your S3 credentials "
"https://github.com/boto/boto3"))
description=(
"Set the policy of the servo-perf bucket. Remember to set your S3 credentials https://github.com/boto/boto3"
)
)
parser.parse_args()
s3 = boto3.resource('s3')
BUCKET = 'servo-perf'
s3 = boto3.resource("s3")
BUCKET = "servo-perf"
POLICY = """{
"Version":"2012-10-17",
"Statement":[

View file

@ -11,8 +11,7 @@ import operator
import os
import random
import string
from thclient import (TreeherderClient, TreeherderResultSetCollection,
TreeherderJobCollection)
from thclient import TreeherderClient, TreeherderResultSetCollection, TreeherderJobCollection
import time
from runner import format_result_summary
@ -24,33 +23,28 @@ def geometric_mean(iterable):
def format_testcase_name(name):
temp = name.replace('http://localhost:8000/page_load_test/', '')
temp = temp.replace('http://localhost:8000/tp6/', '')
temp = temp.split('/')[0]
temp = name.replace("http://localhost:8000/page_load_test/", "")
temp = temp.replace("http://localhost:8000/tp6/", "")
temp = temp.split("/")[0]
temp = temp[0:80]
return temp
def format_perf_data(perf_json, engine='servo'):
def format_perf_data(perf_json, engine="servo"):
suites = []
measurement = "domComplete" # Change this to an array when we have more
def get_time_from_nav_start(timings, measurement):
return timings[measurement] - timings['navigationStart']
return timings[measurement] - timings["navigationStart"]
measurementFromNavStart = partial(get_time_from_nav_start,
measurement=measurement)
measurementFromNavStart = partial(get_time_from_nav_start, measurement=measurement)
if (engine == 'gecko'):
name = 'gecko.{}'.format(measurement)
if engine == "gecko":
name = "gecko.{}".format(measurement)
else:
name = measurement
suite = {
"name": name,
"value": geometric_mean(map(measurementFromNavStart, perf_json)),
"subtests": []
}
suite = {"name": name, "value": geometric_mean(map(measurementFromNavStart, perf_json)), "subtests": []}
for testcase in perf_json:
if measurementFromNavStart(testcase) < 0:
value = -1
@ -58,10 +52,7 @@ def format_perf_data(perf_json, engine='servo'):
else:
value = measurementFromNavStart(testcase)
suite["subtests"].append({
"name": format_testcase_name(testcase["testcase"]),
"value": value
})
suite["subtests"].append({"name": format_testcase_name(testcase["testcase"]), "value": value})
suites.append(suite)
@ -69,7 +60,7 @@ def format_perf_data(perf_json, engine='servo'):
"performance_data": {
# https://bugzilla.mozilla.org/show_bug.cgi?id=1271472
"framework": {"name": "servo-perf"},
"suites": suites
"suites": suites,
}
}
@ -82,20 +73,20 @@ def create_resultset_collection(dataset):
for data in dataset:
trs = trsc.get_resultset()
trs.add_push_timestamp(data['push_timestamp'])
trs.add_revision(data['revision'])
trs.add_author(data['author'])
trs.add_push_timestamp(data["push_timestamp"])
trs.add_revision(data["revision"])
trs.add_author(data["author"])
# TODO: figure out where type is used
# trs.add_type(data['type'])
revisions = []
for rev in data['revisions']:
for rev in data["revisions"]:
tr = trs.get_revision()
tr.add_revision(rev['revision'])
tr.add_author(rev['author'])
tr.add_comment(rev['comment'])
tr.add_repository(rev['repository'])
tr.add_revision(rev["revision"])
tr.add_author(rev["author"])
tr.add_comment(rev["comment"])
tr.add_repository(rev["repository"])
revisions.append(tr)
trs.add_revisions(revisions)
@ -114,46 +105,42 @@ def create_job_collection(dataset):
for data in dataset:
tj = tjc.get_job()
tj.add_revision(data['revision'])
tj.add_project(data['project'])
tj.add_coalesced_guid(data['job']['coalesced'])
tj.add_job_guid(data['job']['job_guid'])
tj.add_job_name(data['job']['name'])
tj.add_job_symbol(data['job']['job_symbol'])
tj.add_group_name(data['job']['group_name'])
tj.add_group_symbol(data['job']['group_symbol'])
tj.add_description(data['job']['desc'])
tj.add_product_name(data['job']['product_name'])
tj.add_state(data['job']['state'])
tj.add_result(data['job']['result'])
tj.add_reason(data['job']['reason'])
tj.add_who(data['job']['who'])
tj.add_tier(data['job']['tier'])
tj.add_submit_timestamp(data['job']['submit_timestamp'])
tj.add_start_timestamp(data['job']['start_timestamp'])
tj.add_end_timestamp(data['job']['end_timestamp'])
tj.add_machine(data['job']['machine'])
tj.add_revision(data["revision"])
tj.add_project(data["project"])
tj.add_coalesced_guid(data["job"]["coalesced"])
tj.add_job_guid(data["job"]["job_guid"])
tj.add_job_name(data["job"]["name"])
tj.add_job_symbol(data["job"]["job_symbol"])
tj.add_group_name(data["job"]["group_name"])
tj.add_group_symbol(data["job"]["group_symbol"])
tj.add_description(data["job"]["desc"])
tj.add_product_name(data["job"]["product_name"])
tj.add_state(data["job"]["state"])
tj.add_result(data["job"]["result"])
tj.add_reason(data["job"]["reason"])
tj.add_who(data["job"]["who"])
tj.add_tier(data["job"]["tier"])
tj.add_submit_timestamp(data["job"]["submit_timestamp"])
tj.add_start_timestamp(data["job"]["start_timestamp"])
tj.add_end_timestamp(data["job"]["end_timestamp"])
tj.add_machine(data["job"]["machine"])
tj.add_build_info(
data['job']['build_platform']['os_name'],
data['job']['build_platform']['platform'],
data['job']['build_platform']['architecture']
data["job"]["build_platform"]["os_name"],
data["job"]["build_platform"]["platform"],
data["job"]["build_platform"]["architecture"],
)
tj.add_machine_info(
data['job']['machine_platform']['os_name'],
data['job']['machine_platform']['platform'],
data['job']['machine_platform']['architecture']
data["job"]["machine_platform"]["os_name"],
data["job"]["machine_platform"]["platform"],
data["job"]["machine_platform"]["architecture"],
)
tj.add_option_collection(data['job']['option_collection'])
tj.add_option_collection(data["job"]["option_collection"])
for artifact_data in data['job']['artifacts']:
tj.add_artifact(
artifact_data['name'],
artifact_data['type'],
artifact_data['blob']
)
for artifact_data in data["job"]["artifacts"]:
tj.add_artifact(artifact_data["name"], artifact_data["type"], artifact_data["blob"])
tjc.add(tj)
return tjc
@ -161,30 +148,28 @@ def create_job_collection(dataset):
# TODO: refactor this big function to smaller chunks
def submit(perf_data, failures, revision, summary, engine):
print("[DEBUG] failures:")
print(list(map(lambda x: x['testcase'], failures)))
print(list(map(lambda x: x["testcase"], failures)))
author = "{} <{}>".format(revision['author']['name'],
revision['author']['email'])
author = "{} <{}>".format(revision["author"]["name"], revision["author"]["email"])
dataset = [
{
# The top-most revision in the list of commits for a push.
'revision': revision['commit'],
'author': author,
'push_timestamp': int(revision['author']['timestamp']),
'type': 'push',
"revision": revision["commit"],
"author": author,
"push_timestamp": int(revision["author"]["timestamp"]),
"type": "push",
# a list of revisions associated with the resultset. There should
# be at least one.
'revisions': [
"revisions": [
{
'comment': revision['subject'],
'revision': revision['commit'],
'repository': 'servo',
'author': author
"comment": revision["subject"],
"revision": revision["commit"],
"repository": "servo",
"author": author,
}
]
],
}
]
@ -195,158 +180,129 @@ def submit(perf_data, failures, revision, summary, engine):
# if len(failures) > 0:
# result = "testfailed"
hashlen = len(revision['commit'])
job_guid = ''.join(
random.choice(string.ascii_letters + string.digits) for i in range(hashlen)
)
hashlen = len(revision["commit"])
job_guid = "".join(random.choice(string.ascii_letters + string.digits) for i in range(hashlen))
if (engine == "gecko"):
if engine == "gecko":
project = "servo"
job_symbol = 'PLG'
group_symbol = 'SPG'
group_name = 'Servo Perf on Gecko'
job_symbol = "PLG"
group_symbol = "SPG"
group_name = "Servo Perf on Gecko"
else:
project = "servo"
job_symbol = 'PL'
group_symbol = 'SP'
group_name = 'Servo Perf'
job_symbol = "PL"
group_symbol = "SP"
group_name = "Servo Perf"
dataset = [
{
'project': project,
'revision': revision['commit'],
'job': {
'job_guid': job_guid,
'product_name': project,
'reason': 'scheduler',
"project": project,
"revision": revision["commit"],
"job": {
"job_guid": job_guid,
"product_name": project,
"reason": "scheduler",
# TODO: What is `who` for?
'who': 'Servo',
'desc': 'Servo Page Load Time Tests',
'name': 'Servo Page Load Time',
"who": "Servo",
"desc": "Servo Page Load Time Tests",
"name": "Servo Page Load Time",
# The symbol representing the job displayed in
# treeherder.allizom.org
'job_symbol': job_symbol,
"job_symbol": job_symbol,
# The symbol representing the job group in
# treeherder.allizom.org
'group_symbol': group_symbol,
'group_name': group_name,
"group_symbol": group_symbol,
"group_name": group_name,
# TODO: get the real timing from the test runner
'submit_timestamp': str(int(time.time())),
'start_timestamp': str(int(time.time())),
'end_timestamp': str(int(time.time())),
'state': 'completed',
'result': result, # "success" or "testfailed"
'machine': 'local-machine',
"submit_timestamp": str(int(time.time())),
"start_timestamp": str(int(time.time())),
"end_timestamp": str(int(time.time())),
"state": "completed",
"result": result, # "success" or "testfailed"
"machine": "local-machine",
# TODO: read platform from test result
'build_platform': {
'platform': 'linux64',
'os_name': 'linux',
'architecture': 'x86_64'
},
'machine_platform': {
'platform': 'linux64',
'os_name': 'linux',
'architecture': 'x86_64'
},
'option_collection': {'opt': True},
"build_platform": {"platform": "linux64", "os_name": "linux", "architecture": "x86_64"},
"machine_platform": {"platform": "linux64", "os_name": "linux", "architecture": "x86_64"},
"option_collection": {"opt": True},
# jobs can belong to different tiers
# setting the tier here will determine which tier the job
# belongs to. However, if a job is set as Tier of 1, but
# belongs to the Tier 2 profile on the server, it will still
# be saved as Tier 2.
'tier': 1,
"tier": 1,
# the ``name`` of the log can be the default of "buildbot_text"
# however, you can use a custom name. See below.
# TODO: point this to the log when we have them uploaded to S3
'log_references': [
{
'url': 'TBD',
'name': 'test log'
}
],
"log_references": [{"url": "TBD", "name": "test log"}],
# The artifact can contain any kind of structured data
# associated with a test.
'artifacts': [
"artifacts": [
{
'type': 'json',
'name': 'performance_data',
"type": "json",
"name": "performance_data",
# TODO: include the job_guid when the runner actually
# generates one
# 'job_guid': job_guid,
'blob': perf_data
"blob": perf_data,
},
{
'type': 'json',
'name': 'Job Info',
"type": "json",
"name": "Job Info",
# 'job_guid': job_guid,
"blob": {
"job_details": [
{
"content_type": "raw_html",
"title": "Result Summary",
"value": summary
}
]
}
}
"job_details": [{"content_type": "raw_html", "title": "Result Summary", "value": summary}]
},
},
],
# List of job guids that were coalesced to this job
'coalesced': []
}
"coalesced": [],
},
}
]
tjc = create_job_collection(dataset)
# TODO: extract this read credential code out of this function.
cred = {
'client_id': os.environ['TREEHERDER_CLIENT_ID'],
'secret': os.environ['TREEHERDER_CLIENT_SECRET']
}
cred = {"client_id": os.environ["TREEHERDER_CLIENT_ID"], "secret": os.environ["TREEHERDER_CLIENT_SECRET"]}
client = TreeherderClient(server_url='https://treeherder.mozilla.org',
client_id=cred['client_id'],
secret=cred['secret'])
client = TreeherderClient(
server_url="https://treeherder.mozilla.org", client_id=cred["client_id"], secret=cred["secret"]
)
# data structure validation is automatically performed here, if validation
# fails a TreeherderClientError is raised
client.post_collection('servo', trsc)
client.post_collection('servo', tjc)
client.post_collection("servo", trsc)
client.post_collection("servo", tjc)
def main():
parser = argparse.ArgumentParser(
description=("Submit Servo performance data to Perfherder. "
"Remember to set your Treeherder credential as environment"
" variable \'TREEHERDER_CLIENT_ID\' and "
"\'TREEHERDER_CLIENT_SECRET\'"))
parser.add_argument("perf_json",
help="the output json from runner")
parser.add_argument("revision_json",
help="the json containing the servo revision data")
parser.add_argument("--engine",
type=str,
default='servo',
help=("The engine to run the tests on. Currently only"
" servo and gecko are supported."))
description=(
"Submit Servo performance data to Perfherder. "
"Remember to set your Treeherder credential as environment"
" variable 'TREEHERDER_CLIENT_ID' and "
"'TREEHERDER_CLIENT_SECRET'"
)
)
parser.add_argument("perf_json", help="the output json from runner")
parser.add_argument("revision_json", help="the json containing the servo revision data")
parser.add_argument(
"--engine",
type=str,
default="servo",
help=("The engine to run the tests on. Currently only servo and gecko are supported."),
)
args = parser.parse_args()
with open(args.perf_json, 'r') as f:
with open(args.perf_json, "r") as f:
result_json = json.load(f)
with open(args.revision_json, 'r') as f:
with open(args.revision_json, "r") as f:
revision = json.load(f)
perf_data = format_perf_data(result_json, args.engine)
failures = list(filter(lambda x: x['domComplete'] == -1, result_json))
summary = format_result_summary(result_json).replace('\n', '<br/>')
failures = list(filter(lambda x: x["domComplete"] == -1, result_json))
summary = format_result_summary(result_json).replace("\n", "<br/>")
submit(perf_data, failures, revision, summary, args.engine)
print("Done!")

View file

@ -10,17 +10,16 @@ import boto3
def main():
parser = argparse.ArgumentParser(
description=("Submit Servo performance data to S3. "
"Remember to set your S3 credentials "
"https://github.com/boto/boto3"))
parser.add_argument("perf_file",
help="the output CSV file from runner")
parser.add_argument("perf_key",
help="the S3 key to upload to")
description=(
"Submit Servo performance data to S3. Remember to set your S3 credentials https://github.com/boto/boto3"
)
)
parser.add_argument("perf_file", help="the output CSV file from runner")
parser.add_argument("perf_key", help="the S3 key to upload to")
args = parser.parse_args()
s3 = boto3.client('s3')
BUCKET = 'servo-perf'
s3 = boto3.client("s3")
BUCKET = "servo-perf"
s3.upload_file(args.perf_file, BUCKET, args.perf_key)
print("Done!")

View file

@ -16,16 +16,16 @@ args = parser.parse_args()
def load_data(filename):
with open(filename, 'r') as f:
with open(filename, "r") as f:
results = {}
totals = {}
counts = {}
records = json.load(f)
for record in records:
key = record.get('testcase')
value = record.get('domComplete') - record.get('domLoading')
totals[key] = totals.get('key', 0) + value
counts[key] = counts.get('key', 0) + 1
key = record.get("testcase")
value = record.get("domComplete") - record.get("domLoading")
totals[key] = totals.get("key", 0) + value
counts[key] = counts.get("key", 0) + 1
results[key] = round(totals[key] / counts[key])
return results
@ -34,10 +34,10 @@ data1 = load_data(args.file1)
data2 = load_data(args.file2)
keys = set(data1.keys()).union(data2.keys())
BLUE = '\033[94m'
GREEN = '\033[92m'
WARNING = '\033[93m'
END = '\033[0m'
BLUE = "\033[94m"
GREEN = "\033[92m"
WARNING = "\033[93m"
END = "\033[0m"
total1 = 0

View file

@ -10,7 +10,7 @@ import pytest
def test_log_parser():
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
mock_log = b'''
mock_log = b"""
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
[PERF],navigationStart,1460358376
@ -36,38 +36,40 @@ def test_log_parser():
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
"""
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}]
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None,
}
]
result = runner.parse_log(mock_log, mock_url)
assert (expected == list(result))
assert expected == list(result)
def test_log_parser_complex():
mock_log = b'''
mock_log = b"""
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/content.html
[PERF],navigationStart,1460358300
@ -119,38 +121,40 @@ Some other js error logs here
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
"""
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}]
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None,
}
]
result = runner.parse_log(mock_log, mock_url)
assert (expected == list(result))
assert expected == list(result)
def test_log_parser_empty():
mock_log = b'''
mock_log = b"""
[PERF] perf block start
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
@ -158,75 +162,79 @@ def test_log_parser_empty():
[PERF]BROKEN!!!!!!!!!1
[PERF]BROKEN!!!!!!!!!1
[PERF] perf block end
'''
"""
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"title": "",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"title": "",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1,
}
]
result = runner.parse_log(mock_log, mock_testcase)
assert (expected == list(result))
assert expected == list(result)
def test_log_parser_error():
mock_log = b'Nothing here! Test failed!'
mock_log = b"Nothing here! Test failed!"
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"title": "",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"title": "",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1,
}
]
result = runner.parse_log(mock_log, mock_testcase)
assert (expected == list(result))
assert expected == list(result)
def test_log_parser_bad_testcase_name():
mock_testcase = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
# Notice the testcase is about:blank, servo crashed
mock_log = b'''
mock_log = b"""
[PERF] perf block start
[PERF],testcase,about:blank
[PERF],navigationStart,1460358376
@ -252,182 +260,196 @@ def test_log_parser_bad_testcase_name():
[PERF],loadEventEnd,undefined
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
"""
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"title": "",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1
}]
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"title": "",
"navigationStart": 0,
"unloadEventStart": -1,
"unloadEventEnd": -1,
"redirectStart": -1,
"redirectEnd": -1,
"fetchStart": -1,
"domainLookupStart": -1,
"domainLookupEnd": -1,
"connectStart": -1,
"connectEnd": -1,
"secureConnectionStart": -1,
"requestStart": -1,
"responseStart": -1,
"responseEnd": -1,
"domLoading": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"domContentLoadedEventEnd": -1,
"domComplete": -1,
"loadEventStart": -1,
"loadEventEnd": -1,
}
]
result = runner.parse_log(mock_log, mock_testcase)
assert (expected == list(result))
assert expected == list(result)
def test_manifest_loader():
text = '''
text = """
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
# Disabled! http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html
'''
"""
expected = [
("http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html", False),
("http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html", False),
("http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html", False)
("http://localhost/page_load_test/tp5n/aljazeera.net/aljazeera.net/portal.html", False),
]
assert (expected == list(runner.parse_manifest(text)))
assert expected == list(runner.parse_manifest(text))
def test_manifest_loader_async():
text = '''
text = """
http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html
async http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html
'''
"""
expected = [
("http://localhost/page_load_test/tp5n/163.com/www.163.com/index.html", False),
("http://localhost/page_load_test/tp5n/56.com/www.56.com/index.html", True),
]
assert (expected == list(runner.parse_manifest(text)))
assert expected == list(runner.parse_manifest(text))
def test_filter_result_by_manifest():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
"domComplete": 1460358389000,
}, {
"testcase": "non-existing-html",
"domComplete": 1460358389000,
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389000,
}]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389000,
}]
manifest = [
("http://localhost:8000/page_load_test/56.com/www.56.com/index.html", False)
input_json = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
"domComplete": 1460358389000,
},
{
"testcase": "non-existing-html",
"domComplete": 1460358389000,
},
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389000,
},
]
assert (expected == runner.filter_result_by_manifest(input_json, manifest))
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389000,
}
]
manifest = [("http://localhost:8000/page_load_test/56.com/www.56.com/index.html", False)]
assert expected == runner.filter_result_by_manifest(input_json, manifest)
def test_filter_result_by_manifest_error():
input_json = [{
"testcase": "1.html",
"domComplete": 1460358389000,
}]
manifest = [
("1.html", False),
("2.html", False)
input_json = [
{
"testcase": "1.html",
"domComplete": 1460358389000,
}
]
manifest = [("1.html", False), ("2.html", False)]
with pytest.raises(Exception) as execinfo:
runner.filter_result_by_manifest(input_json, manifest)
assert "Missing test result" in str(execinfo.value)
def test_take_result_median_odd():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001,
"domLoading": 1460358380002
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389003,
"domLoading": 1460358380003
}]
input_json = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001,
"domLoading": 1460358380002,
},
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001,
},
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389003,
"domLoading": 1460358380003,
},
]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380002
}]
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380002,
}
]
assert (expected == runner.take_result_median(input_json, len(input_json)))
assert expected == runner.take_result_median(input_json, len(input_json))
def test_take_result_median_even():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001,
"domLoading": 1460358380002
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}]
input_json = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001,
"domLoading": 1460358380002,
},
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001,
},
]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001.5,
"domLoading": 1460358380001.5
}]
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389001.5,
"domLoading": 1460358380001.5,
}
]
assert (expected == runner.take_result_median(input_json, len(input_json)))
assert expected == runner.take_result_median(input_json, len(input_json))
def test_take_result_median_error():
input_json = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": None,
"domLoading": 1460358380002
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001
}]
input_json = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": None,
"domLoading": 1460358380002,
},
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001,
},
]
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001.5
}]
expected = [
{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": 1460358389002,
"domLoading": 1460358380001.5,
}
]
assert (expected == runner.take_result_median(input_json, len(input_json)))
assert expected == runner.take_result_median(input_json, len(input_json))
def test_log_result():
results = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": -1
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"domComplete": -1
}, {
"testcase": "http://localhost:8000/page_load_test/104.com/www.104.com/index.html",
"domComplete": 123456789
}]
results = [
{"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html", "domComplete": -1},
{"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html", "domComplete": -1},
{"testcase": "http://localhost:8000/page_load_test/104.com/www.104.com/index.html", "domComplete": 123456789},
]
expected = """
========================================
@ -437,4 +459,4 @@ Failure summary:
- http://localhost:8000/page_load_test/56.com/www.56.com/index.html
========================================
"""
assert (expected == runner.format_result_summary(results))
assert expected == runner.format_result_summary(results)

View file

@ -8,18 +8,18 @@ import submit_to_perfherder
def test_format_testcase_name():
assert ('about:blank' == submit_to_perfherder.format_testcase_name(
'about:blank'))
assert ('163.com' == submit_to_perfherder.format_testcase_name((
'http://localhost:8000/page_load_test/163.com/p.mail.163.com/'
'mailinfo/shownewmsg_www_1222.htm.html')))
assert (('1234567890223456789032345678904234567890'
'5234567890623456789072345678908234567890')
== submit_to_perfherder.format_testcase_name((
'1234567890223456789032345678904234567890'
'52345678906234567890723456789082345678909234567890')))
assert ('news.ycombinator.com' == submit_to_perfherder.format_testcase_name(
'http://localhost:8000/tp6/news.ycombinator.com/index.html'))
assert "about:blank" == submit_to_perfherder.format_testcase_name("about:blank")
assert "163.com" == submit_to_perfherder.format_testcase_name(
("http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html")
)
assert (
"12345678902234567890323456789042345678905234567890623456789072345678908234567890"
) == submit_to_perfherder.format_testcase_name(
("123456789022345678903234567890423456789052345678906234567890723456789082345678909234567890")
)
assert "news.ycombinator.com" == submit_to_perfherder.format_testcase_name(
"http://localhost:8000/tp6/news.ycombinator.com/index.html"
)
def test_format_perf_data():
@ -46,7 +46,7 @@ def test_format_perf_data():
"unloadEventEnd": None,
"responseEnd": None,
"testcase": "about:blank",
"domComplete": 1460444931000
"domComplete": 1460444931000,
},
{
"unloadEventStart": None,
@ -69,11 +69,11 @@ def test_format_perf_data():
"domainLookupEnd": None,
"unloadEventEnd": None,
"responseEnd": None,
"testcase": ("http://localhost:8000/page_load_test/163.com/"
"p.mail.163.com/mailinfo/"
"shownewmsg_www_1222.htm.html"),
"domComplete": 1460444948000
}
"testcase": (
"http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html"
),
"domComplete": 1460444948000,
},
]
expected = {
@ -84,33 +84,27 @@ def test_format_perf_data():
"name": "domComplete",
"value": 3741.657386773941,
"subtests": [
{"name": "about:blank",
"value": 1000},
{"name": "163.com",
"value": 14000},
]
{"name": "about:blank", "value": 1000},
{"name": "163.com", "value": 14000},
],
}
]
],
}
}
result = submit_to_perfherder.format_perf_data(mock_result)
assert (expected == result)
assert expected == result
def test_format_bad_perf_data():
mock_result = [
{
"navigationStart": 1460444930000,
"testcase": "about:blank",
"domComplete": 0
},
{"navigationStart": 1460444930000, "testcase": "about:blank", "domComplete": 0},
{
"navigationStart": 1460444934000,
"testcase": ("http://localhost:8000/page_load_test/163.com/"
"p.mail.163.com/mailinfo/"
"shownewmsg_www_1222.htm.html"),
"domComplete": 1460444948000
}
"testcase": (
"http://localhost:8000/page_load_test/163.com/p.mail.163.com/mailinfo/shownewmsg_www_1222.htm.html"
),
"domComplete": 1460444948000,
},
]
expected = {
@ -121,14 +115,12 @@ def test_format_bad_perf_data():
"name": "domComplete",
"value": 14000.0,
"subtests": [
{"name": "about:blank",
"value": -1}, # Timeout
{"name": "163.com",
"value": 14000},
]
{"name": "about:blank", "value": -1}, # Timeout
{"name": "163.com", "value": 14000},
],
}
]
],
}
}
result = submit_to_perfherder.format_perf_data(mock_result)
assert (expected == result)
assert expected == result