Auto merge of #14194 - shinglyu:gecko-webdriver, r=larsbergstrom,aneeshusa,jgraham

Use Selenium for Gecko performance test

<!-- Please describe your changes on the following line: -->
We run the `etc/ci/performance` test on Gecko for comparison, but the old add-on approach doesn't work anymore. I'm using selenium to drive Gecko instead. Also did some refactoring and fixed some nits I saw along the way.

r? @aneeshusa

---
<!-- Thank you for contributing to Servo! Please replace each `[ ]` by `[X]` when the step is complete, and replace `__` with appropriate data: -->
- [x] `./mach build -d` does not report any errors
- [x] `./mach test-tidy` does not report any errors
- [x] These changes fix #14148  (github issue number if applicable).

<!-- Either: -->
- [x] There are tests for these changes OR
- [ ] These changes do not require tests because _____

<!-- Pull requests that do not address these steps are welcome, but they will require additional verification as part of the review process. -->

<!-- Reviewable:start -->
---
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/servo/servo/14194)
<!-- Reviewable:end -->
This commit is contained in:
bors-servo 2016-11-28 20:22:05 -08:00 committed by GitHub
commit 69f914b8b5
9 changed files with 186 additions and 116 deletions

View file

@ -64,9 +64,10 @@ If you want to test the data submission code in `submit_to_perfherder.py` withou
## For Gecko
* Install Firefox Nightly in your PATH
* Install [jpm](https://developer.mozilla.org/en-US/Add-ons/SDK/Tools/jpm#Installation)
* Run `jpm xpi` in the `firefox/addon` folder
* Install the generated `xpi` file to your Firefox Nightly
* Download [geckodriver](https://github.com/mozilla/geckodriver/releases) and add it to the `PATH`
* `pip install selenium`
* Run `python gecko_driver.py` to test
# Troubleshooting

View file

@ -1,2 +0,0 @@
#Servo Performance Comparison
Monitor website rendering performance

View file

@ -1 +0,0 @@
../../../user-agent-js/01.perf-timing.js

View file

@ -1,8 +0,0 @@
var self = require("sdk/self");
var pageMod = require("sdk/page-mod");
pageMod.PageMod({
include: "*",
contentScriptFile: self.data.url('perf.js'),
attachTo: ["top", "existing"]
});

View file

@ -1,16 +0,0 @@
{
"title": "Servo Performance Comparison",
"name": "addon",
"version": "0.0.1",
"description": "Monitor website rendering performance",
"main": "index.js",
"author": "The Servo team",
"engines": {
"firefox": ">=38.0a1",
"fennec": ">=38.0a1"
},
"license": "MPL",
"keywords": [
"jetpack"
]
}

View file

@ -0,0 +1,97 @@
#!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from contextlib import contextmanager
import json
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
@contextmanager
def create_gecko_session():
firefox_binary = "./firefox/firefox/firefox"
driver = webdriver.Firefox(firefox_binary=firefox_binary)
yield driver
# driver.quit() gives an "'NoneType' object has no attribute 'path'" error.
# Fixed in
# https://github.com/SeleniumHQ/selenium/commit/9157c7071f9900c2608f5ca40ae4f518ed373b96
driver.quit()
def generate_placeholder(testcase):
# We need to still include the failed tests, otherwise Treeherder will
# consider the result to be a new test series, and thus a new graph. So we
# use a placeholder with values = -1 to make Treeherder happy, and still be
# able to identify failed tests (successful tests have time >=0).
timings = {
"testcase": testcase,
"title": ""
}
timing_names = [
"navigationStart",
"unloadEventStart",
"domLoading",
"fetchStart",
"responseStart",
"loadEventEnd",
"connectStart",
"domainLookupStart",
"redirectStart",
"domContentLoadedEventEnd",
"requestStart",
"secureConnectionStart",
"connectEnd",
"loadEventStart",
"domInteractive",
"domContentLoadedEventStart",
"redirectEnd",
"domainLookupEnd",
"unloadEventEnd",
"responseEnd",
"domComplete",
]
for name in timing_names:
timings[name] = 0 if name == "navigationStart" else -1
return [timings]
def run_gecko_test(testcase, timeout):
with create_gecko_session() as driver:
driver.set_page_load_timeout(timeout)
try:
driver.get(testcase)
except TimeoutException:
print("Timeout!")
return generate_placeholder(testcase)
try:
timings = {
"testcase": testcase,
"title": driver.title.replace(",", "&#44;")
}
timings.update(json.loads(
driver.execute_script(
"return JSON.stringify(performance.timing)"
)
))
except:
# We need to return a timing object no matter what happened.
# See the comment in generate_placeholder() for explanation
print("Failed to get a valid timing measurement.")
return generate_placeholder(testcase)
return [timings]
if __name__ == '__main__':
# Just for manual testing
from pprint import pprint
url = "http://localhost:8000/page_load_test/tp5n/dailymail.co.uk/www.dailymail.co.uk/ushome/index.html"
pprint(run_gecko_test(url, 15))

View file

@ -9,6 +9,7 @@ import itertools
import json
import os
import subprocess
from functools import partial
from statistics import median, StatisticsError
@ -25,7 +26,9 @@ def parse_manifest(text):
def execute_test(url, command, timeout):
try:
return subprocess.check_output(command, stderr=subprocess.STDOUT, timeout=timeout)
return subprocess.check_output(
command, stderr=subprocess.STDOUT, timeout=timeout
)
except subprocess.CalledProcessError as e:
print("Unexpected Fail:")
print(e)
@ -36,22 +39,31 @@ def execute_test(url, command, timeout):
return ""
def get_servo_command(url):
def run_servo_test(url, timeout):
ua_script_path = "{}/user-agent-js".format(os.getcwd())
return ["../../../target/release/servo", url,
"--userscripts", ua_script_path,
"--headless",
"-x", "-o", "output.png"]
command = [
"../../../target/release/servo", url,
"--userscripts", ua_script_path,
"--headless",
"-x", "-o", "output.png"
]
log = ""
try:
log = subprocess.check_output(
command, stderr=subprocess.STDOUT, timeout=timeout
)
except subprocess.CalledProcessError as e:
print("Unexpected Fail:")
print(e)
print("You may want to re-run the test manually:\n{}".format(
' '.join(command)
))
except subprocess.TimeoutExpired:
print("Test FAILED due to timeout: {}".format(url))
return parse_log(log, url)
def get_gecko_command(url):
return ["./firefox/firefox/firefox",
" --display=:0", "--no-remote"
" -profile", "./firefox/servo",
url]
def parse_log(log, testcase=None):
def parse_log(log, testcase):
blocks = []
block = []
copy = False
@ -67,48 +79,19 @@ def parse_log(log, testcase=None):
elif copy and line.strip().startswith("[PERF]"):
block.append(line)
# We need to still include the failed tests, otherwise Treeherder will
# consider the result to be a new test series, and thus a new graph. So we
# use a placeholder with values = -1 to make Treeherder happy, and still be
# able to identify failed tests (successful tests have time >=0).
placeholder = {
"navigationStart": 0,
"unloadEventStart": -1,
"domLoading": -1,
"fetchStart": -1,
"responseStart": -1,
"loadEventEnd": -1,
"connectStart": -1,
"domainLookupStart": -1,
"redirectStart": -1,
"domContentLoadedEventEnd": -1,
"requestStart": -1,
"secureConnectionStart": -1,
"connectEnd": -1,
"loadEventStart": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"redirectEnd": -1,
"domainLookupEnd": -1,
"unloadEventEnd": -1,
"responseEnd": -1,
"testcase": testcase,
"domComplete": -1,
}
def parse_block(block):
timing = {}
for line in block:
try:
(_, key, value) = line.split(",")
except:
print("[DEBUG] failed to parse the following block:")
print(block)
print("[DEBUG] failed to parse the following line:")
print(line)
print('[DEBUG] log:')
print('-----')
print(log)
print('-----')
return placeholder
return None
if key == "testcase" or key == "title":
timing[key] = value
@ -117,20 +100,57 @@ def parse_log(log, testcase=None):
return timing
def valid_timing(timing):
return (timing.get('title') != 'Error response') and (testcase is None or timing.get('testcase') == testcase)
def valid_timing(timing, testcase=None):
if (timing is None or
testcase is None or
timing.get('title') == 'Error response' or
timing.get('testcase') != testcase):
return False
else:
return True
timings = list(filter(valid_timing, map(parse_block, blocks)))
# We need to still include the failed tests, otherwise Treeherder will
# consider the result to be a new test series, and thus a new graph. So we
# use a placeholder with values = -1 to make Treeherder happy, and still be
# able to identify failed tests (successful tests have time >=0).
def create_placeholder(testcase):
return {
"testcase": testcase,
"title": "",
"navigationStart": 0,
"unloadEventStart": -1,
"domLoading": -1,
"fetchStart": -1,
"responseStart": -1,
"loadEventEnd": -1,
"connectStart": -1,
"domainLookupStart": -1,
"redirectStart": -1,
"domContentLoadedEventEnd": -1,
"requestStart": -1,
"secureConnectionStart": -1,
"connectEnd": -1,
"loadEventStart": -1,
"domInteractive": -1,
"domContentLoadedEventStart": -1,
"redirectEnd": -1,
"domainLookupEnd": -1,
"unloadEventEnd": -1,
"responseEnd": -1,
"domComplete": -1,
}
valid_timing_for_case = partial(valid_timing, testcase=testcase)
timings = list(filter(valid_timing_for_case, map(parse_block, blocks)))
if len(timings) == 0:
print("Didn't find any perf data in the log, test timeout?")
print("Fillng in a dummy perf data")
print('[DEBUG] log:')
print('-----')
print(log)
print('-----')
return [placeholder]
return [create_placeholder(testcase)]
else:
return timings
@ -229,27 +249,24 @@ def main():
" servo and gecko are supported."))
args = parser.parse_args()
if args.engine == 'servo':
command_factory = get_servo_command
run_test = run_servo_test
elif args.engine == 'gecko':
command_factory = get_gecko_command
import gecko_driver # Load this only when we need gecko test
run_test = gecko_driver.run_gecko_test
try:
# Assume the server is up and running
testcases = load_manifest(args.tp5_manifest)
results = []
for testcase in testcases:
command = (["timeout", "{}s".format(args.timeout)] +
command_factory(testcase))
for run in range(args.runs):
print("Running test {}/{} on {}".format(run + 1,
args.runs,
testcase))
log = execute_test(testcase, command, args.timeout)
# results will be a mixure of timings dict and testcase strings
# testcase string indicates a failed test
results += run_test(testcase, args.timeout)
print("Finished")
result = parse_log(log, testcase)
# TODO: Record and analyze other performance.timing properties
results += result
print("To reproduce the above test, run the following command:")
print(" {0}\n".format(' '.join(command)))
print(format_result_summary(results))
save_result_json(results, args.output_file, testcases, args.runs)

View file

@ -13,9 +13,11 @@ do
case "${1}" in
--servo)
engine="--engine=servo"
timeout=60
;;
--gecko)
engine="--engine=gecko"
timeout=15
;;
--submit)
submit=1
@ -43,7 +45,8 @@ MANIFEST="page_load_test/tp5n/20160509.manifest" # A manifest that excludes
PERF_FILE="output/perf-$(date +%s).json"
echo "Running tests"
python3 runner.py ${engine} --runs 3 "${MANIFEST}" "${PERF_FILE}"
python3 runner.py ${engine} --runs 3 --timeout "${timeout}" \
"${MANIFEST}" "${PERF_FILE}"
if [[ "${submit:-}" ]];
then

View file

@ -9,6 +9,7 @@ import pytest
def test_log_parser():
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
mock_log = b'''
[PERF] perf block start
[PERF],testcase,http://localhost:8000/page_load_test/56.com/www.56.com/index.html
@ -61,7 +62,7 @@ Shutting down the Constellation after generating an output file or exit flag spe
"loadEventStart": None,
"loadEventEnd": None
}]
result = runner.parse_log(mock_log)
result = runner.parse_log(mock_log, mock_url)
assert(expected == list(result))
@ -119,30 +120,8 @@ Some other js error logs here
[PERF] perf block end
Shutting down the Constellation after generating an output file or exit flag specified
'''
mock_url = "http://localhost:8000/page_load_test/56.com/www.56.com/index.html"
expected = [{
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/content.html",
"navigationStart": 1460358300,
"unloadEventStart": None,
"unloadEventEnd": None,
"redirectStart": None,
"redirectEnd": None,
"fetchStart": None,
"domainLookupStart": None,
"domainLookupEnd": None,
"connectStart": None,
"connectEnd": None,
"secureConnectionStart": None,
"requestStart": None,
"responseStart": None,
"responseEnd": None,
"domLoading": 1460358376000,
"domInteractive": 1460358388000,
"domContentLoadedEventStart": 1460358388000,
"domContentLoadedEventEnd": 1460358388000,
"domComplete": 1460358389000,
"loadEventStart": None,
"loadEventEnd": None
}, {
"testcase": "http://localhost:8000/page_load_test/56.com/www.56.com/index.html",
"navigationStart": 1460358376,
"unloadEventStart": None,
@ -166,7 +145,7 @@ Shutting down the Constellation after generating an output file or exit flag spe
"loadEventStart": None,
"loadEventEnd": None
}]
result = runner.parse_log(mock_log)
result = runner.parse_log(mock_log, mock_url)
assert(expected == list(result))