Use selenium to drive gecko test

This commit is contained in:
Shing Lyu 2016-11-10 11:31:07 +08:00
parent 1c26f44cbb
commit 6110017569
5 changed files with 165 additions and 84 deletions

View file

@ -64,9 +64,10 @@ If you want to test the data submission code in `submit_to_perfherder.py` withou
## For Gecko ## For Gecko
* Install Firefox Nightly in your PATH * Install Firefox Nightly in your PATH
* Install [jpm](https://developer.mozilla.org/en-US/Add-ons/SDK/Tools/jpm#Installation) * Download [geckodrive](https://github.com/mozilla/geckodriver/releases) and add it to the `PATH`
* Run `jpm xpi` in the `firefox/addon` folder * `pip install selenium`
* Install the generated `xpi` file to your Firefox Nightly * Run `python gecko_driver.py` to test
# Troubleshooting # Troubleshooting

View file

@ -0,0 +1,78 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
def execute_gecko_test(testcase, timeout):
firefox_binary = "./firefox/firefox/firefox"
driver = webdriver.Firefox(firefox_binary=firefox_binary)
driver.set_page_load_timeout(timeout)
failed = False
try:
driver.get(testcase)
except TimeoutException:
print("Timeout!")
driver.quit()
failed = True
timings = {
"testcase": testcase,
"title": driver.title.replace(",", ",")
}
timing_names = [
"navigationStart",
"unloadEventStart",
"domLoading",
"fetchStart",
"responseStart",
"loadEventEnd",
"connectStart",
"domainLookupStart",
"redirectStart",
"domContentLoadedEventEnd",
"requestStart",
"secureConnectionStart",
"connectEnd",
"loadEventStart",
"domInteractive",
"domContentLoadedEventStart",
"redirectEnd",
"domainLookupEnd",
"unloadEventEnd",
"responseEnd",
"domComplete",
]
# We could have just run
# timings = driver.execute_script("return performance.timing")
# But for some test case the webdriver will fail with "too much recursion"
# So we need to get the timing fields one by one
for name in timing_names:
if failed:
if name == "navigationStart":
timings[name] = 0
else:
timings[name] = -1
else:
timings[name] = driver.execute_script(
"return performance.timing.{0}".format(name)
)
timings['testcase'] = testcase
# driver.quit() gives an "'NoneType' object has no attribute 'path'" error.
# Fixed in
# https://github.com/SeleniumHQ/selenium/commit/9157c7071f9900c2608f5ca40ae4f518ed373b96
driver.quit()
return [timings]
if __name__ == '__main__':
# Just for manual testing
from pprint import pprint
url = "http://localhost:8000/page_load_test/tp5n/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html"
pprint(execute_gecko_test(url, 15))

View file

@ -9,6 +9,8 @@ import itertools
import json import json
import os import os
import subprocess import subprocess
import gecko_driver
from functools import partial
from statistics import median, StatisticsError from statistics import median, StatisticsError
@ -25,7 +27,8 @@ def parse_manifest(text):
def execute_test(url, command, timeout): def execute_test(url, command, timeout):
try: try:
return subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout) return subprocess.check_output(command, stderr=subprocess.STDOUT,
timeout=timeout)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
print("Unexpected Fail:") print("Unexpected Fail:")
print(e) print(e)
@ -37,21 +40,33 @@ def execute_test(url, command, timeout):
def get_servo_command(url): def get_servo_command(url):
ua_script_path = "{}/user-agent-js".format(os.getcwd()) def run_servo_test(url, timeout):
return ["../../../target/release/servo", url, ua_script_path = "{}/user-agent-js".format(os.getcwd())
"--userscripts", ua_script_path, command = ["../../../target/release/servo", url,
"--headless", "--userscripts", ua_script_path,
"-x", "-o", "output.png"] "--headless",
"-x", "-o", "output.png"]
log = ""
try:
log = subprocess.check_output(command,
stderr=subprocess.STDOUT,
timeout=timeout)
except subprocess.CalledProcessError as e:
print("Unexpected Fail:")
print(e)
print("You may want to re-run the test manually:\n{}"
.format(' '.join(command)))
except subprocess.TimeoutExpired:
print("Test FAILED due to timeout: {}".format(url))
return parse_log(log, url)
return run_servo_test
def get_gecko_command(url): def get_gecko_command(url):
return ["./firefox/firefox/firefox", return gecko_driver.execute_gecko_test
" --display=:0", "--no-remote"
" -profile", "./firefox/servo",
url]
def parse_log(log, testcase=None): def parse_log(log, testcase):
blocks = [] blocks = []
block = [] block = []
copy = False copy = False
@ -67,48 +82,19 @@ def parse_log(log, testcase=None):
elif copy and line.strip().startswith("[PERF]"): elif copy and line.strip().startswith("[PERF]"):
block.append(line) block.append(line)
# We need to still include the failed tests, otherwise Treeherder will
# consider the result to be a new test series, and thus a new graph. So we
# use a placeholder with values = -1 to make Treeherder happy, and still be
# able to identify failed tests (successful tests have time >=0).
placeholder = {
"navigationStart": 0,
"unloadEventStart": -1,
"domLoading": -1,
"fetchStart": -1,
"responseStart": -1,
"loadEventEnd": -1,
"connectStart": -1,
"domainLookupStart": -1,
"redirectStart": -1,
"domContentLoadedEventEnd": -1,
"requestStart": -1,
"secureConnectionStart": -1,
"connectEnd": -1,
"loadEventStart": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"redirectEnd": -1,
"domainLookupEnd": -1,
"unloadEventEnd": -1,
"responseEnd": -1,
"testcase": testcase,
"domComplete": -1,
}
def parse_block(block): def parse_block(block):
timing = {} timing = {}
for line in block: for line in block:
try: try:
(_, key, value) = line.split(",") (_, key, value) = line.split(",")
except: except:
print("[DEBUG] failed to parse the following block:") print("[DEBUG] failed to parse the following line:")
print(block) print(line)
print('[DEBUG] log:') print('[DEBUG] log:')
print('-----') print('-----')
print(log) print(log)
print('-----') print('-----')
return placeholder return None
if key == "testcase" or key == "title": if key == "testcase" or key == "title":
timing[key] = value timing[key] = value
@ -117,20 +103,57 @@ def parse_log(log, testcase=None):
return timing return timing
def valid_timing(timing): def valid_timing(timing, testcase=None):
return (timing.get('title') != 'Error response') and (testcase is None or timing.get('testcase') == testcase) if (timing is None or
testcase is None or
timing.get('title') == 'Error response' or
timing.get('testcase') != testcase):
return False
else:
return True
timings = list(filter(valid_timing, map(parse_block, blocks))) # We need to still include the failed tests, otherwise Treeherder will
# consider the result to be a new test series, and thus a new graph. So we
# use a placeholder with values = -1 to make Treeherder happy, and still be
# able to identify failed tests (successful tests have time >=0).
def create_placeholder(testcase):
return {
"testcase": testcase,
"title": "",
"navigationStart": 0,
"unloadEventStart": -1,
"domLoading": -1,
"fetchStart": -1,
"responseStart": -1,
"loadEventEnd": -1,
"connectStart": -1,
"domainLookupStart": -1,
"redirectStart": -1,
"domContentLoadedEventEnd": -1,
"requestStart": -1,
"secureConnectionStart": -1,
"connectEnd": -1,
"loadEventStart": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"redirectEnd": -1,
"domainLookupEnd": -1,
"unloadEventEnd": -1,
"responseEnd": -1,
"domComplete": -1,
}
valid_timing_for_case = partial(valid_timing, testcase=testcase)
timings = list(filter(valid_timing_for_case, map(parse_block, blocks)))
if len(timings) == 0: if len(timings) == 0:
print("Didn't find any perf data in the log, test timeout?") print("Didn't find any perf data in the log, test timeout?")
print("Fillng in a dummy perf data")
print('[DEBUG] log:') print('[DEBUG] log:')
print('-----') print('-----')
print(log) print(log)
print('-----') print('-----')
return [placeholder] return [create_placeholder(testcase)]
else: else:
return timings return timings
@ -237,19 +260,16 @@ def main():
testcases = load_manifest(args.tp5_manifest) testcases = load_manifest(args.tp5_manifest)
results = [] results = []
for testcase in testcases: for testcase in testcases:
command = (["timeout", "{}s".format(args.timeout)] + run_test = command_factory(testcase)
command_factory(testcase))
for run in range(args.runs): for run in range(args.runs):
print("Running test {}/{} on {}".format(run + 1, print("Running test {}/{} on {}".format(run + 1,
args.runs, args.runs,
testcase)) testcase))
log = execute_test(testcase, command, args.timeout) # results will be a mixure of timeings dict and testcase strings
# testcase string indicates a failed test
results += run_test(testcase, args.timeout)
print("Finished") print("Finished")
result = parse_log(log, testcase)
# TODO: Record and analyze other performance.timing properties # TODO: Record and analyze other performance.timing properties
results += result
print("To reproduce the above test, run the following command:")
print(" {0}\n".format(' '.join(command)))
print(format_result_summary(results)) print(format_result_summary(results))
save_result_json(results, args.output_file, testcases, args.runs) save_result_json(results, args.output_file, testcases, args.runs)

View file

@ -13,9 +13,11 @@ do
case "${1}" in case "${1}" in
--servo) --servo)
engine="--engine=servo" engine="--engine=servo"
timeout=60
;; ;;
--gecko) --gecko)
engine="--engine=gecko" engine="--engine=gecko"
timeout=15
;; ;;
--submit) --submit)
submit=1 submit=1
@ -43,7 +45,8 @@ MANIFEST="page_load_test/tp5n/20160509.manifest" # A manifest that excludes
PERF_FILE="output/perf-$(date +%s).json" PERF_FILE="output/perf-$(date +%s).json"
echo "Running tests" echo "Running tests"
python3 runner.py ${engine} --runs 3 "${MANIFEST}" "${PERF_FILE}" python3 runner.py ${engine} --runs 3 --timeout "${timeout}"\
"${MANIFEST}" "${PERF_FILE}"
if [[ "${submit:-}" ]]; if [[ "${submit:-}" ]];
then then

View file

@ -9,6 +9,7 @@ import pytest
def test_log_parser(): def test_log_parser():
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
mock_log = b''' mock_log = b'''
[PERF] perf block start [PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html [PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
@ -61,7 +62,7 @@ Shutting down the Constellation after generating an output file or exit flag spe
"loadEventStart": None, "loadEventStart": None,
"loadEventEnd": None "loadEventEnd": None
}] }]
result = runner.parse_log(mock_log) result = runner.parse_log(mock_log, mock_url)
assert(expected == list(result)) assert(expected == list(result))
@ -119,30 +120,8 @@ Some other js error logs here
[PERF] perf block end [PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified Shutting down the Constellation after generating an output file or exit flag specified
''' '''
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{ expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
"navigationStart": 1460358300,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html", "testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376, "navigationStart": 1460358376,
"unloadEventStart": None, "unloadEventStart": None,
@ -166,7 +145,7 @@ Shutting down the Constellation after generating an output file or exit flag spe
"loadEventStart": None, "loadEventStart": None,
"loadEventEnd": None "loadEventEnd": None
}] }]
result = runner.parse_log(mock_log) result = runner.parse_log(mock_log, mock_url)
assert(expected == list(result)) assert(expected == list(result))