mirror of
https://github.com/servo/servo.git
synced 2025-06-06 16:45:39 +00:00
Use selenium to drive gecko test
This commit is contained in:
parent
1c26f44cbb
commit
6110017569
5 changed files with 165 additions and 84 deletions
|
@ -64,9 +64,10 @@ If you want to test the data submission code in `submit_to_perfherder.py` withou
|
|||
## For Gecko
|
||||
|
||||
* Install Firefox Nightly in your PATH
|
||||
* Install [jpm](https://developer.mozilla.org/en-US/Add-ons/SDK/Tools/jpm#Installation)
|
||||
* Run `jpm xpi` in the `firefox/addon` folder
|
||||
* Install the generated `xpi` file to your Firefox Nightly
|
||||
* Download [geckodrive](https://github.com/mozilla/geckodriver/releases) and add it to the `PATH`
|
||||
* `pip install selenium`
|
||||
* Run `python gecko_driver.py` to test
|
||||
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
|
|
78
etc/ci/performance/gecko_driver.py
Normal file
78
etc/ci/performance/gecko_driver.py
Normal file
|
@ -0,0 +1,78 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
from selenium import webdriver
|
||||
from selenium.common.exceptions import TimeoutException
|
||||
|
||||
|
||||
def execute_gecko_test(testcase, timeout):
|
||||
firefox_binary = "./firefox/firefox/firefox"
|
||||
driver = webdriver.Firefox(firefox_binary=firefox_binary)
|
||||
driver.set_page_load_timeout(timeout)
|
||||
failed = False
|
||||
try:
|
||||
driver.get(testcase)
|
||||
except TimeoutException:
|
||||
print("Timeout!")
|
||||
driver.quit()
|
||||
failed = True
|
||||
|
||||
timings = {
|
||||
"testcase": testcase,
|
||||
"title": driver.title.replace(",", ",")
|
||||
}
|
||||
|
||||
timing_names = [
|
||||
"navigationStart",
|
||||
"unloadEventStart",
|
||||
"domLoading",
|
||||
"fetchStart",
|
||||
"responseStart",
|
||||
"loadEventEnd",
|
||||
"connectStart",
|
||||
"domainLookupStart",
|
||||
"redirectStart",
|
||||
"domContentLoadedEventEnd",
|
||||
"requestStart",
|
||||
"secureConnectionStart",
|
||||
"connectEnd",
|
||||
"loadEventStart",
|
||||
"domInteractive",
|
||||
"domContentLoadedEventStart",
|
||||
"redirectEnd",
|
||||
"domainLookupEnd",
|
||||
"unloadEventEnd",
|
||||
"responseEnd",
|
||||
"domComplete",
|
||||
]
|
||||
|
||||
# We could have just run
|
||||
# timings = driver.execute_script("return performance.timing")
|
||||
# But for some test case the webdriver will fail with "too much recursion"
|
||||
# So we need to get the timing fields one by one
|
||||
for name in timing_names:
|
||||
if failed:
|
||||
if name == "navigationStart":
|
||||
timings[name] = 0
|
||||
else:
|
||||
timings[name] = -1
|
||||
else:
|
||||
timings[name] = driver.execute_script(
|
||||
"return performance.timing.{0}".format(name)
|
||||
)
|
||||
|
||||
timings['testcase'] = testcase
|
||||
# driver.quit() gives an "'NoneType' object has no attribute 'path'" error.
|
||||
# Fixed in
|
||||
# https://github.com/SeleniumHQ/selenium/commit/9157c7071f9900c2608f5ca40ae4f518ed373b96
|
||||
driver.quit()
|
||||
return [timings]
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Just for manual testing
|
||||
from pprint import pprint
|
||||
url = "http://localhost:8000/page_load_test/tp5n/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html"
|
||||
pprint(execute_gecko_test(url, 15))
|
|
@ -9,6 +9,8 @@ import itertools
|
|||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import gecko_driver
|
||||
from functools import partial
|
||||
from statistics import median, StatisticsError
|
||||
|
||||
|
||||
|
@ -25,7 +27,8 @@ def parse_manifest(text):
|
|||
|
||||
def execute_test(url, command, timeout):
|
||||
try:
|
||||
return subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
|
||||
return subprocess.check_output(command, stderr=subprocess.STDOUT,
|
||||
timeout=timeout)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("Unexpected Fail:")
|
||||
print(e)
|
||||
|
@ -37,21 +40,33 @@ def execute_test(url, command, timeout):
|
|||
|
||||
|
||||
def get_servo_command(url):
|
||||
ua_script_path = "{}/user-agent-js".format(os.getcwd())
|
||||
return ["../../../target/release/servo", url,
|
||||
"--userscripts", ua_script_path,
|
||||
"--headless",
|
||||
"-x", "-o", "output.png"]
|
||||
def run_servo_test(url, timeout):
|
||||
ua_script_path = "{}/user-agent-js".format(os.getcwd())
|
||||
command = ["../../../target/release/servo", url,
|
||||
"--userscripts", ua_script_path,
|
||||
"--headless",
|
||||
"-x", "-o", "output.png"]
|
||||
log = ""
|
||||
try:
|
||||
log = subprocess.check_output(command,
|
||||
stderr=subprocess.STDOUT,
|
||||
timeout=timeout)
|
||||
except subprocess.CalledProcessError as e:
|
||||
print("Unexpected Fail:")
|
||||
print(e)
|
||||
print("You may want to re-run the test manually:\n{}"
|
||||
.format(' '.join(command)))
|
||||
except subprocess.TimeoutExpired:
|
||||
print("Test FAILED due to timeout: {}".format(url))
|
||||
return parse_log(log, url)
|
||||
return run_servo_test
|
||||
|
||||
|
||||
def get_gecko_command(url):
|
||||
return ["./firefox/firefox/firefox",
|
||||
" --display=:0", "--no-remote"
|
||||
" -profile", "./firefox/servo",
|
||||
url]
|
||||
return gecko_driver.execute_gecko_test
|
||||
|
||||
|
||||
def parse_log(log, testcase=None):
|
||||
def parse_log(log, testcase):
|
||||
blocks = []
|
||||
block = []
|
||||
copy = False
|
||||
|
@ -67,48 +82,19 @@ def parse_log(log, testcase=None):
|
|||
elif copy and line.strip().startswith("[PERF]"):
|
||||
block.append(line)
|
||||
|
||||
# We need to still include the failed tests, otherwise Treeherder will
|
||||
# consider the result to be a new test series, and thus a new graph. So we
|
||||
# use a placeholder with values = -1 to make Treeherder happy, and still be
|
||||
# able to identify failed tests (successful tests have time >=0).
|
||||
placeholder = {
|
||||
"navigationStart": 0,
|
||||
"unloadEventStart": -1,
|
||||
"domLoading": -1,
|
||||
"fetchStart": -1,
|
||||
"responseStart": -1,
|
||||
"loadEventEnd": -1,
|
||||
"connectStart": -1,
|
||||
"domainLookupStart": -1,
|
||||
"redirectStart": -1,
|
||||
"domContentLoadedEventEnd": -1,
|
||||
"requestStart": -1,
|
||||
"secureConnectionStart": -1,
|
||||
"connectEnd": -1,
|
||||
"loadEventStart": -1,
|
||||
"domInteractive": -1,
|
||||
"domContentLoadedEventStart": -1,
|
||||
"redirectEnd": -1,
|
||||
"domainLookupEnd": -1,
|
||||
"unloadEventEnd": -1,
|
||||
"responseEnd": -1,
|
||||
"testcase": testcase,
|
||||
"domComplete": -1,
|
||||
}
|
||||
|
||||
def parse_block(block):
|
||||
timing = {}
|
||||
for line in block:
|
||||
try:
|
||||
(_, key, value) = line.split(",")
|
||||
except:
|
||||
print("[DEBUG] failed to parse the following block:")
|
||||
print(block)
|
||||
print("[DEBUG] failed to parse the following line:")
|
||||
print(line)
|
||||
print('[DEBUG] log:')
|
||||
print('-----')
|
||||
print(log)
|
||||
print('-----')
|
||||
return placeholder
|
||||
return None
|
||||
|
||||
if key == "testcase" or key == "title":
|
||||
timing[key] = value
|
||||
|
@ -117,20 +103,57 @@ def parse_log(log, testcase=None):
|
|||
|
||||
return timing
|
||||
|
||||
def valid_timing(timing):
|
||||
return (timing.get('title') != 'Error response') and (testcase is None or timing.get('testcase') == testcase)
|
||||
def valid_timing(timing, testcase=None):
|
||||
if (timing is None or
|
||||
testcase is None or
|
||||
timing.get('title') == 'Error response' or
|
||||
timing.get('testcase') != testcase):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
timings = list(filter(valid_timing, map(parse_block, blocks)))
|
||||
# We need to still include the failed tests, otherwise Treeherder will
|
||||
# consider the result to be a new test series, and thus a new graph. So we
|
||||
# use a placeholder with values = -1 to make Treeherder happy, and still be
|
||||
# able to identify failed tests (successful tests have time >=0).
|
||||
def create_placeholder(testcase):
|
||||
return {
|
||||
"testcase": testcase,
|
||||
"title": "",
|
||||
"navigationStart": 0,
|
||||
"unloadEventStart": -1,
|
||||
"domLoading": -1,
|
||||
"fetchStart": -1,
|
||||
"responseStart": -1,
|
||||
"loadEventEnd": -1,
|
||||
"connectStart": -1,
|
||||
"domainLookupStart": -1,
|
||||
"redirectStart": -1,
|
||||
"domContentLoadedEventEnd": -1,
|
||||
"requestStart": -1,
|
||||
"secureConnectionStart": -1,
|
||||
"connectEnd": -1,
|
||||
"loadEventStart": -1,
|
||||
"domInteractive": -1,
|
||||
"domContentLoadedEventStart": -1,
|
||||
"redirectEnd": -1,
|
||||
"domainLookupEnd": -1,
|
||||
"unloadEventEnd": -1,
|
||||
"responseEnd": -1,
|
||||
"domComplete": -1,
|
||||
}
|
||||
|
||||
valid_timing_for_case = partial(valid_timing, testcase=testcase)
|
||||
timings = list(filter(valid_timing_for_case, map(parse_block, blocks)))
|
||||
|
||||
if len(timings) == 0:
|
||||
print("Didn't find any perf data in the log, test timeout?")
|
||||
print("Fillng in a dummy perf data")
|
||||
print('[DEBUG] log:')
|
||||
print('-----')
|
||||
print(log)
|
||||
print('-----')
|
||||
|
||||
return [placeholder]
|
||||
return [create_placeholder(testcase)]
|
||||
else:
|
||||
return timings
|
||||
|
||||
|
@ -237,19 +260,16 @@ def main():
|
|||
testcases = load_manifest(args.tp5_manifest)
|
||||
results = []
|
||||
for testcase in testcases:
|
||||
command = (["timeout", "{}s".format(args.timeout)] +
|
||||
command_factory(testcase))
|
||||
run_test = command_factory(testcase)
|
||||
for run in range(args.runs):
|
||||
print("Running test {}/{} on {}".format(run + 1,
|
||||
args.runs,
|
||||
testcase))
|
||||
log = execute_test(testcase, command, args.timeout)
|
||||
# results will be a mixure of timeings dict and testcase strings
|
||||
# testcase string indicates a failed test
|
||||
results += run_test(testcase, args.timeout)
|
||||
print("Finished")
|
||||
result = parse_log(log, testcase)
|
||||
# TODO: Record and analyze other performance.timing properties
|
||||
results += result
|
||||
print("To reproduce the above test, run the following command:")
|
||||
print(" {0}\n".format(' '.join(command)))
|
||||
|
||||
print(format_result_summary(results))
|
||||
save_result_json(results, args.output_file, testcases, args.runs)
|
||||
|
|
|
@ -13,9 +13,11 @@ do
|
|||
case "${1}" in
|
||||
--servo)
|
||||
engine="--engine=servo"
|
||||
timeout=60
|
||||
;;
|
||||
--gecko)
|
||||
engine="--engine=gecko"
|
||||
timeout=15
|
||||
;;
|
||||
--submit)
|
||||
submit=1
|
||||
|
@ -43,7 +45,8 @@ MANIFEST="page_load_test/tp5n/20160509.manifest" # A manifest that excludes
|
|||
PERF_FILE="output/perf-$(date +%s).json"
|
||||
|
||||
echo "Running tests"
|
||||
python3 runner.py ${engine} --runs 3 "${MANIFEST}" "${PERF_FILE}"
|
||||
python3 runner.py ${engine} --runs 3 --timeout "${timeout}"\
|
||||
"${MANIFEST}" "${PERF_FILE}"
|
||||
|
||||
if [[ "${submit:-}" ]];
|
||||
then
|
||||
|
|
|
@ -9,6 +9,7 @@ import pytest
|
|||
|
||||
|
||||
def test_log_parser():
|
||||
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
||||
mock_log = b'''
|
||||
[PERF] perf block start
|
||||
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
|
||||
|
@ -61,7 +62,7 @@ Shutting down the Constellation after generating an output file or exit flag spe
|
|||
"loadEventStart": None,
|
||||
"loadEventEnd": None
|
||||
}]
|
||||
result = runner.parse_log(mock_log)
|
||||
result = runner.parse_log(mock_log, mock_url)
|
||||
assert(expected == list(result))
|
||||
|
||||
|
||||
|
@ -119,30 +120,8 @@ Some other js error logs here
|
|||
[PERF] perf block end
|
||||
Shutting down the Constellation after generating an output file or exit flag specified
|
||||
'''
|
||||
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
|
||||
expected = [{
|
||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
|
||||
"navigationStart": 1460358300,
|
||||
"unloadEventStart": None,
|
||||
"unloadEventEnd": None,
|
||||
"redirectStart": None,
|
||||
"redirectEnd": None,
|
||||
"fetchStart": None,
|
||||
"domainLookupStart": None,
|
||||
"domainLookupEnd": None,
|
||||
"connectStart": None,
|
||||
"connectEnd": None,
|
||||
"secureConnectionStart": None,
|
||||
"requestStart": None,
|
||||
"responseStart": None,
|
||||
"responseEnd": None,
|
||||
"domLoading": 1460358376000,
|
||||
"domInteractive": 1460358388000,
|
||||
"domContentLoadedEventStart": 1460358388000,
|
||||
"domContentLoadedEventEnd": 1460358388000,
|
||||
"domComplete": 1460358389000,
|
||||
"loadEventStart": None,
|
||||
"loadEventEnd": None
|
||||
}, {
|
||||
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
|
||||
"navigationStart": 1460358376,
|
||||
"unloadEventStart": None,
|
||||
|
@ -166,7 +145,7 @@ Shutting down the Constellation after generating an output file or exit flag spe
|
|||
"loadEventStart": None,
|
||||
"loadEventEnd": None
|
||||
}]
|
||||
result = runner.parse_log(mock_log)
|
||||
result = runner.parse_log(mock_log, mock_url)
|
||||
assert(expected == list(result))
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue