Use upstream wptrunner from web-platform-tests.

This commit is contained in:
Josh Matthews 2017-06-19 16:52:16 -04:00 committed by Ms2ger
parent d9baadd3df
commit ca340eca39
157 changed files with 4 additions and 15095 deletions

View file

@ -115,7 +115,7 @@ def wpt_harness_path(is_firefox, topdir, *paths):
if is_firefox:
rel = os.path.join(wpt_root, "tests", "tools", "wptrunner")
else:
rel = os.path.join(wpt_root, "harness")
rel = os.path.join(wpt_root, "web-platform-tests", "tools", "wptrunner")
return os.path.join(topdir, rel, *paths)

View file

@ -285,29 +285,3 @@ class MachCommands(CommandBase):
# Fetch Cargo dependencies
with cd(self.context.topdir):
call(["cargo", "fetch"], env=self.build_env())
@Command('wptrunner-upgrade',
description='upgrade wptrunner.',
category='devenv')
def upgrade_wpt_runner(self):
env = self.build_env()
with cd(path.join(self.context.topdir, 'tests', 'wpt', 'harness')):
code = call(["git", "init"], env=env)
if code:
return code
# No need to report an error if this fails, as it will for the first use
call(["git", "remote", "rm", "upstream"], env=env)
code = call(
["git", "remote", "add", "upstream", "https://github.com/w3c/wptrunner.git"], env=env)
if code:
return code
code = call(["git", "fetch", "upstream"], env=env)
if code:
return code
code = call(["git", "reset", "--hard", "remotes/upstream/master"], env=env)
if code:
return code
code = call(["rm", "-rf", ".git"], env=env)
if code:
return code
return 0

View file

@ -186,20 +186,6 @@ This should create two commits in your servo repository with the
updated tests and updated metadata. The same process works for the
CSSWG tests, using the `update-css` and `test-css` mach commands.
Updating the test harness
=========================
The easiest way to update the test harness is using git:
cd tests/wpt/harness
git init .
git remote add origin https://github.com/w3c/wptrunner
git fetch origin
git checkout -f origin/master
cd ../../..
At this point you should commit the updated files in the *servo* git repository.
Servo-specific tests
====================

View file

@ -1,7 +0,0 @@
*.py[co]
*~
*#
\#*
_virtualenv
test/test.cfg
test/metadata/MANIFEST.json

View file

@ -1,20 +0,0 @@
language: python
python: 2.7
sudo: false
cache:
directories:
- $HOME/.cache/pip
env:
- TOXENV="{py27,pypy}-base"
- TOXENV="{py27,pypy}-chrome"
- TOXENV="{py27,pypy}-firefox"
- TOXENV="{py27,pypy}-servo"
install:
- pip install -U tox
script:
- tox

View file

@ -1,13 +0,0 @@
exclude MANIFEST.in
include requirements.txt
include wptrunner/browsers/b2g_setup/*
include wptrunner.default.ini
include wptrunner/testharness_runner.html
include wptrunner/testharnessreport.js
include wptrunner/testharnessreport-servo.js
include wptrunner/executors/testharness_marionette.js
include wptrunner/executors/testharness_webdriver.js
include wptrunner/executors/reftest.js
include wptrunner/executors/reftest-wait.js
include wptrunner/config.json
include wptrunner/browsers/server-locations.txt

View file

@ -1,242 +0,0 @@
wptrunner: A web-platform-tests harness
=======================================
wptrunner is a harness for running the W3C `web-platform-tests testsuite`_.
.. contents::
Installation
~~~~~~~~~~~~
wptrunner is expected to be installed into a virtualenv using pip. For
development, it can be installed using the `-e` option::
pip install -e ./
Running the Tests
~~~~~~~~~~~~~~~~~
After installation, the command ``wptrunner`` should be available to run
the tests.
The ``wptrunner`` command takes multiple options, of which the
following are most significant:
``--product`` (defaults to `firefox`)
The product to test against: `b2g`, `chrome`, `firefox`, or `servo`.
``--binary`` (required if product is `firefox` or `servo`)
The path to a binary file for the product (browser) to test against.
``--webdriver-binary`` (required if product is `chrome`)
The path to a `driver` binary; e.g., a `chromedriver` binary.
``--certutil-binary`` (required if product is `firefox` [#]_)
The path to a `certutil` binary (for tests that must be run over https).
``--metadata`` (required)
The path to a directory containing test metadata. [#]_
``--tests`` (required)
The path to a directory containing a web-platform-tests checkout.
``--prefs-root`` (required only when testing a Firefox binary)
The path to a directory containing Firefox test-harness preferences. [#]_
``--config`` (should default to `wptrunner.default.ini`)
The path to the config (ini) file.
.. [#] The ``--certutil-binary`` option is required when the product is
``firefox`` unless ``--ssl-type=none`` is specified.
.. [#] The ``--metadata`` path is to a directory that contains:
* a ``MANIFEST.json`` file (instructions on generating this file are
available in the `detailed documentation
<http://wptrunner.readthedocs.org/en/latest/usage.html#installing-wptrunner>`_);
and
* (optionally) any expectation files (see below)
.. [#] Example ``--prefs-root`` value: ``~/mozilla-central/testing/profiles``.
There are also a variety of other options available; use ``--help`` to
list them.
-------------------------------
Example: How to start wptrunner
-------------------------------
To test a Firefox Nightly build in an OS X environment, you might start
wptrunner using something similar to the following example::
wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
--binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/dist/Nightly.app/Contents/MacOS/firefox \
--certutil-binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/security/nss/cmd/certutil/certutil \
--prefs-root=~/mozilla-central/testing/profiles
And to test a Chromium build in an OS X environment, you might start
wptrunner using something similar to the following example::
wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
--binary=~/chromium/src/out/Release/Chromium.app/Contents/MacOS/Chromium \
--webdriver-binary=/usr/local/bin/chromedriver --product=chrome
-------------------------------------
Example: How to run a subset of tests
-------------------------------------
To restrict a test run just to tests in a particular web-platform-tests
subdirectory, specify the directory name in the positional arguments after
the options; for example, run just the tests in the `dom` subdirectory::
wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
--binary=/path/to/firefox --certutil-binary=/path/to/certutil \
--prefs-root=/path/to/testing/profiles \
dom
Output
~~~~~~
By default wptrunner just dumps its entire output as raw JSON messages
to stdout. This is convenient for piping into other tools, but not ideal
for humans reading the output.
As an alternative, you can use the ``--log-mach`` option, which provides
output in a reasonable format for humans. The option requires a value:
either the path for a file to write the `mach`-formatted output to, or
"`-`" (a hyphen) to write the `mach`-formatted output to stdout.
When using ``--log-mach``, output of the full raw JSON log is still
available, from the ``--log-raw`` option. So to output the full raw JSON
log to a file and a human-readable summary to stdout, you might start
wptrunner using something similar to the following example::
wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
--binary=/path/to/firefox --certutil-binary=/path/to/certutil \
--prefs-root=/path/to/testing/profiles \
--log-raw=output.log --log-mach=-
Expectation Data
~~~~~~~~~~~~~~~~
wptrunner is designed to be used in an environment where it is not
just necessary to know which tests passed, but to compare the results
between runs. For this reason it is possible to store the results of a
previous run in a set of ini-like "expectation files". This format is
documented below. To generate the expectation files use `wptrunner` with
the `--log-raw=/path/to/log/file` option. This can then be used as
input to the `wptupdate` tool.
Expectation File Format
~~~~~~~~~~~~~~~~~~~~~~~
Metadata about tests, notably including their expected results, is
stored in a modified ini-like format that is designed to be human
editable, but also to be machine updatable.
Each test file that requires metadata to be specified (because it has
a non-default expectation or because it is disabled, for example) has
a corresponding expectation file in the `metadata` directory. For
example a test file `html/test1.html` containing a failing test would
have an expectation file called `html/test1.html.ini` in the
`metadata` directory.
An example of an expectation file is::
example_default_key: example_value
[filename.html]
type: testharness
[subtest1]
expected: FAIL
[subtest2]
expected:
if platform == 'win': TIMEOUT
if platform == 'osx': ERROR
FAIL
[filename.html?query=something]
type: testharness
disabled: bug12345
The file consists of two elements, key-value pairs and
sections.
Sections are delimited by headings enclosed in square brackets. Any
closing square bracket in the heading itself my be escaped with a
backslash. Each section may then contain any number of key-value pairs
followed by any number of subsections. So that it is clear which data
belongs to each section without the use of end-section markers, the
data for each section (i.e. the key-value pairs and subsections) must
be indented using spaces. Indentation need only be consistent, but
using two spaces per level is recommended.
In a test expectation file, each resource provided by the file has a
single section, with the section heading being the part after the last
`/` in the test url. Tests that have subsections may have subsections
for those subtests in which the heading is the name of the subtest.
Simple key-value pairs are of the form::
key: value
Note that unlike ini files, only `:` is a valid seperator; `=` will
not work as expected. Key-value pairs may also have conditional
values of the form::
key:
if condition1: value1
if condition2: value2
default
In this case each conditional is evaluated in turn and the value is
that on the right hand side of the first matching conditional. In the
case that no condition matches, the unconditional default is used. If
no condition matches and no default is provided it is equivalent to
the key not being present. Conditionals use a simple python-like expression
language e.g.::
if debug and (platform == "linux" or platform == "osx"): FAIL
For test expectations the avaliable variables are those in the
`run_info` which for desktop are `version`, `os`, `bits`, `processor`,
`debug` and `product`.
Key-value pairs specified at the top level of the file before any
sections are special as they provide defaults for the rest of the file
e.g.::
key1: value1
[section 1]
key2: value2
[section 2]
key1: value3
In this case, inside section 1, `key1` would have the value `value1`
and `key2` the value `value2` whereas in section 2 `key1` would have
the value `value3` and `key2` would be undefined.
The web-platform-test harness knows about several keys:
`expected`
Must evaluate to a possible test status indicating the expected
result of the test. The implicit default is PASS or OK when the
field isn't present.
`disabled`
Any value indicates that the test is disabled.
`type`
The test type e.g. `testharness`, `reftest`, or `wdspec`.
`reftype`
The type of comparison for reftests; either `==` or `!=`.
`refurl`
The reference url for reftests.
.. _`web-platform-tests testsuite`: https://github.com/w3c/web-platform-tests

View file

@ -1,177 +0,0 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/wptrunner.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/wptrunner.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/wptrunner"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/wptrunner"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 19 KiB

View file

@ -1,267 +0,0 @@
# -*- coding: utf-8 -*-
#
# wptrunner documentation build configuration file, created by
# sphinx-quickstart on Mon May 19 18:14:20 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wptrunner'
copyright = u''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wptrunnerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'wptrunner.tex', u'wptrunner Documentation',
u'James Graham', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wptrunner', u'wptrunner Documentation',
[u'James Graham'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wptrunner', u'wptrunner Documentation',
u'James Graham', 'wptrunner', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'mozlog': ('http://mozbase.readthedocs.org/en/latest/', None)}

View file

@ -1,106 +0,0 @@
wptrunner Design
================
The design of wptrunner is intended to meet the following
requirements:
* Possible to run tests from W3C web-platform-tests.
* Tests should be run as fast as possible. In particular it should
not be necessary to restart the browser between tests, or similar.
* As far as possible, the tests should run in a "normal" browser and
browsing context. In particular many tests assume that they are
running in a top-level browsing context, so we must avoid the use
of an ``iframe`` test container.
* It must be possible to deal with all kinds of behaviour of the
browser runder test, for example, crashing, hanging, etc.
* It should be possible to add support for new platforms and browsers
with minimal code changes.
* It must be possible to run tests in parallel to further improve
performance.
* Test output must be in a machine readable form.
Architecture
------------
In order to meet the above requirements, wptrunner is designed to
push as much of the test scheduling as possible into the harness. This
allows the harness to monitor the state of the browser and perform
appropriate action if it gets into an unwanted state e.g. kill the
browser if it appears to be hung.
The harness will typically communicate with the browser via some remote
control protocol such as WebDriver. However for browsers where no such
protocol is supported, other implementation strategies are possible,
typically at the expense of speed.
The overall architecture of wptrunner is shown in the diagram below:
.. image:: architecture.svg
The main entry point to the code is :py:func:`run_tests` in
``wptrunner.py``. This is responsible for setting up the test
environment, loading the list of tests to be executed, and invoking
the remainder of the code to actually execute some tests.
The test environment is encapsulated in the
:py:class:`TestEnvironment` class. This defers to code in
``web-platform-tests`` which actually starts the required servers to
run the tests.
The set of tests to run is defined by the
:py:class:`TestLoader`. This is constructed with a
:py:class:`TestFilter` (not shown), which takes any filter arguments
from the command line to restrict the set of tests that will be
run. The :py:class:`TestLoader` reads both the ``web-platform-tests``
JSON manifest and the expectation data stored in ini files and
produces a :py:class:`multiprocessing.Queue` of tests to run, and
their expected results.
Actually running the tests happens through the
:py:class:`ManagerGroup` object. This takes the :py:class:`Queue` of
tests to be run and starts a :py:class:`testrunner.TestRunnerManager` for each
instance of the browser under test that will be started. These
:py:class:`TestRunnerManager` instances are each started in their own
thread.
A :py:class:`TestRunnerManager` coordinates starting the product under
test, and outputting results from the test. In the case that the test
has timed out or the browser has crashed, it has to restart the
browser to ensure the test run can continue. The functionality for
initialising the browser under test, and probing its state
(e.g. whether the process is still alive) is implemented through a
:py:class:`Browser` object. An implementation of this class must be
provided for each product that is supported.
The functionality for actually running the tests is provided by a
:py:class:`TestRunner` object. :py:class:`TestRunner` instances are
run in their own child process created with the
:py:mod:`multiprocessing` module. This allows them to run concurrently
and to be killed and restarted as required. Communication between the
:py:class:`TestRunnerManager` and the :py:class:`TestRunner` is
provided by a pair of queues, one for sending messages in each
direction. In particular test results are sent from the
:py:class:`TestRunner` to the :py:class:`TestRunnerManager` using one
of these queues.
The :py:class:`TestRunner` object is generic in that the same
:py:class:`TestRunner` is used regardless of the product under
test. However the details of how to run the test may vary greatly with
the product since different products support different remote control
protocols (or none at all). These protocol-specific parts are placed
in the :py:class:`Executor` object. There is typically a different
:py:class:`Executor` class for each combination of control protocol
and test type. The :py:class:`TestRunner` is responsible for pulling
each test off the :py:class:`Queue` of tests and passing it down to
the :py:class:`Executor`.
The executor often requires access to details of the particular
browser instance that it is testing so that it knows e.g. which port
to connect to to send commands to the browser. These details are
encapsulated in the :py:class:`ExecutorBrowser` class.

View file

@ -1,248 +0,0 @@
Expectation Data
================
Introduction
------------
For use in continuous integration systems, and other scenarios where
regression tracking is required, wptrunner supports storing and
loading the expected result of each test in a test run. Typically
these expected results will initially be generated by running the
testsuite in a baseline build. They may then be edited by humans as
new features are added to the product that change the expected
results. The expected results may also vary for a single product
depending on the platform on which it is run. Therefore, the raw
structured log data is not a suitable format for storing these
files. Instead something is required that is:
* Human readable
* Human editable
* Machine readable / writable
* Capable of storing test id / result pairs
* Suitable for storing in a version control system (i.e. text-based)
The need for different results per platform means either having
multiple expectation files for each platform, or having a way to
express conditional values within a certain file. The former would be
rather cumbersome for humans updating the expectation files, so the
latter approach has been adopted, leading to the requirement:
* Capable of storing result values that are conditional on the platform.
There are few extant formats that meet these requirements, so
wptrunner uses a bespoke ``expectation manifest`` format, which is
closely based on the standard ``ini`` format.
Directory Layout
----------------
Expectation manifest files must be stored under the ``metadata``
directory passed to the test runner. The directory layout follows that
of web-platform-tests with each test path having a corresponding
manifest file. Tests that differ only by query string, or reftests
with the same test path but different ref paths share the same
reference file. The file name is taken from the last /-separated part
of the path, suffixed with ``.ini``.
As an optimisation, files which produce only default results
(i.e. ``PASS`` or ``OK``) don't require a corresponding manifest file.
For example a test with url::
/spec/section/file.html?query=param
would have an expectation file ::
metadata/spec/section/file.html.ini
.. _wptupdate-label:
Generating Expectation Files
----------------------------
wptrunner provides the tool ``wptupdate`` to generate expectation
files from the results of a set of baseline test runs. The basic
syntax for this is::
wptupdate [options] [logfile]...
Each ``logfile`` is a structured log file from a previous run. These
can be generated from wptrunner using the ``--log-raw`` option
e.g. ``--log-raw=structured.log``. The default behaviour is to update
all the test data for the particular combination of hardware and OS
used in the run corresponding to the log data, whilst leaving any
other expectations untouched.
wptupdate takes several useful options:
``--sync``
Pull the latest version of web-platform-tests from the
upstream specified in the config file. If this is specified in
combination with logfiles, it is assumed that the results in the log
files apply to the post-update tests.
``--no-check-clean``
Don't attempt to check if the working directory is clean before
doing the update (assuming that the working directory is a git or
mercurial tree).
``--patch``
Create a a git commit, or a mq patch, with the changes made by wptupdate.
``--ignore-existing``
Overwrite all the expectation data for any tests that have a result
in the passed log files, not just data for the same platform.
Examples
~~~~~~~~
Update the local copy of web-platform-tests without changing the
expectation data and commit (or create a mq patch for) the result::
wptupdate --patch --sync
Update all the expectations from a set of cross-platform test runs::
wptupdate --no-check-clean --patch osx.log linux.log windows.log
Add expectation data for some new tests that are expected to be
platform-independent::
wptupdate --no-check-clean --patch --ignore-existing tests.log
Manifest Format
---------------
The format of the manifest files is based on the ini format. Files are
divided into sections, each (apart from the root section) having a
heading enclosed in square braces. Within each section are key-value
pairs. There are several notable differences from standard .ini files,
however:
* Sections may be hierarchically nested, with significant whitespace
indicating nesting depth.
* Only ``:`` is valid as a key/value separator
A simple example of a manifest file is::
root_key: root_value
[section]
section_key: section_value
[subsection]
subsection_key: subsection_value
[another_section]
another_key: another_value
Conditional Values
~~~~~~~~~~~~~~~~~~
In order to support values that depend on some external data, the
right hand side of a key/value pair can take a set of conditionals
rather than a plain value. These values are placed on a new line
following the key, with significant indentation. Conditional values
are prefixed with ``if`` and terminated with a colon, for example::
key:
if cond1: value1
if cond2: value2
value3
In this example, the value associated with ``key`` is determined by
first evaluating ``cond1`` against external data. If that is true,
``key`` is assigned the value ``value1``, otherwise ``cond2`` is
evaluated in the same way. If both ``cond1`` and ``cond2`` are false,
the unconditional ``value3`` is used.
Conditions themselves use a Python-like expression syntax. Operands
can either be variables, corresponding to data passed in, numbers
(integer or floating point; exponential notation is not supported) or
quote-delimited strings. Equality is tested using ``==`` and
inequality by ``!=``. The operators ``and``, ``or`` and ``not`` are
used in the expected way. Parentheses can also be used for
grouping. For example::
key:
if (a == 2 or a == 3) and b == "abc": value1
if a == 1 or b != "abc": value2
value3
Here ``a`` and ``b`` are variables, the value of which will be
supplied when the manifest is used.
Expectation Manifests
---------------------
When used for expectation data, manifests have the following format:
* A section per test URL described by the manifest, with the section
heading being the part of the test URL following the last ``/`` in
the path (this allows multiple tests in a single manifest file with
the same path part of the URL, but different query parts).
* A subsection per subtest, with the heading being the title of the
subtest.
* A key ``type`` indicating the test type. This takes the values
``testharness`` and ``reftest``.
* For reftests, keys ``reftype`` indicating the reference type
(``==`` or ``!=``) and ``refurl`` indicating the URL of the
reference.
* A key ``expected`` giving the expectation value of each (sub)test.
* A key ``disabled`` which can be set to any value to indicate that
the (sub)test is disabled and should either not be run (for tests)
or that its results should be ignored (subtests).
* A key ``restart-after`` which can be set to any value to indicate that
the runner should restart the browser after running this test (e.g. to
clear out unwanted state).
* Variables ``debug``, ``os``, ``version``, ``processor`` and
``bits`` that describe the configuration of the browser under
test. ``debug`` is a boolean indicating whether a build is a debug
build. ``os`` is a string indicating the operating system, and
``version`` a string indicating the particular version of that
operating system. ``processor`` is a string indicating the
processor architecture and ``bits`` an integer indicating the
number of bits. This information is typically provided by
:py:mod:`mozinfo`.
* Top level keys are taken as defaults for the whole file. So, for
example, a top level key with ``expected: FAIL`` would indicate
that all tests and subtests in the file are expected to fail,
unless they have an ``expected`` key of their own.
An simple example manifest might look like::
[test.html?variant=basic]
type: testharness
[Test something unsupported]
expected: FAIL
[test.html?variant=broken]
expected: ERROR
[test.html?variant=unstable]
disabled: http://test.bugs.example.org/bugs/12345
A more complex manifest with conditional properties might be::
[canvas_test.html]
expected:
if os == "osx": FAIL
if os == "windows" and version == "XP": FAIL
PASS
Note that ``PASS`` in the above works, but is unnecessary; ``PASS``
(or ``OK``) is always the default expectation for (sub)tests.

View file

@ -1,24 +0,0 @@
.. wptrunner documentation master file, created by
sphinx-quickstart on Mon May 19 18:14:20 2014.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to wptrunner's documentation!
=====================================
Contents:
.. toctree::
:maxdepth: 2
usage
expectation
design
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View file

@ -1,242 +0,0 @@
@ECHO OFF
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
set SPHINXBUILD=sphinx-build
)
set BUILDDIR=_build
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
set I18NSPHINXOPTS=%SPHINXOPTS% .
if NOT "%PAPER%" == "" (
set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
if "%1" == "help" (
:help
echo.Please use `make ^<target^>` where ^<target^> is one of
echo. html to make standalone HTML files
echo. dirhtml to make HTML files named index.html in directories
echo. singlehtml to make a single large HTML file
echo. pickle to make pickle files
echo. json to make JSON files
echo. htmlhelp to make HTML files and a HTML help project
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
echo. texinfo to make Texinfo files
echo. gettext to make PO message catalogs
echo. changes to make an overview over all changed/added/deprecated items
echo. xml to make Docutils-native XML files
echo. pseudoxml to make pseudoxml-XML files for display purposes
echo. linkcheck to check all external links for integrity
echo. doctest to run all doctests embedded in the documentation if enabled
goto end
)
if "%1" == "clean" (
for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
del /q /s %BUILDDIR%\*
goto end
)
%SPHINXBUILD% 2> nul
if errorlevel 9009 (
echo.
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
echo.installed, then set the SPHINXBUILD environment variable to point
echo.to the full path of the 'sphinx-build' executable. Alternatively you
echo.may add the Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/html.
goto end
)
if "%1" == "dirhtml" (
%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
goto end
)
if "%1" == "singlehtml" (
%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
goto end
)
if "%1" == "pickle" (
%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the pickle files.
goto end
)
if "%1" == "json" (
%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can process the JSON files.
goto end
)
if "%1" == "htmlhelp" (
%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run HTML Help Workshop with the ^
.hhp project file in %BUILDDIR%/htmlhelp.
goto end
)
if "%1" == "qthelp" (
%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished; now you can run "qcollectiongenerator" with the ^
.qhcp project file in %BUILDDIR%/qthelp, like this:
echo.^> qcollectiongenerator %BUILDDIR%\qthelp\wptrunner.qhcp
echo.To view the help file:
echo.^> assistant -collectionFile %BUILDDIR%\qthelp\wptrunner.ghc
goto end
)
if "%1" == "devhelp" (
%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
if errorlevel 1 exit /b 1
echo.
echo.Build finished.
goto end
)
if "%1" == "epub" (
%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The epub file is in %BUILDDIR%/epub.
goto end
)
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
echo.
echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdf" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf
cd %BUILDDIR%/..
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "latexpdfja" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
cd %BUILDDIR%/latex
make all-pdf-ja
cd %BUILDDIR%/..
echo.
echo.Build finished; the PDF files are in %BUILDDIR%/latex.
goto end
)
if "%1" == "text" (
%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The text files are in %BUILDDIR%/text.
goto end
)
if "%1" == "man" (
%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The manual pages are in %BUILDDIR%/man.
goto end
)
if "%1" == "texinfo" (
%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
goto end
)
if "%1" == "gettext" (
%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
goto end
)
if "%1" == "changes" (
%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
if errorlevel 1 exit /b 1
echo.
echo.The overview file is in %BUILDDIR%/changes.
goto end
)
if "%1" == "linkcheck" (
%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
if errorlevel 1 exit /b 1
echo.
echo.Link check complete; look for any errors in the above output ^
or in %BUILDDIR%/linkcheck/output.txt.
goto end
)
if "%1" == "doctest" (
%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
if errorlevel 1 exit /b 1
echo.
echo.Testing of doctests in the sources finished, look at the ^
results in %BUILDDIR%/doctest/output.txt.
goto end
)
if "%1" == "xml" (
%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The XML files are in %BUILDDIR%/xml.
goto end
)
if "%1" == "pseudoxml" (
%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
if errorlevel 1 exit /b 1
echo.
echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
goto end
)
:end

View file

@ -1,238 +0,0 @@
Getting Started
===============
Installing wptrunner
--------------------
The easiest way to install wptrunner is into a virtualenv, using pip::
virtualenv wptrunner
cd wptrunner
source bin/activate
pip install wptrunner
This will install the base dependencies for wptrunner, but not any
extra dependencies required to test against specific browsers. In
order to do this you must use use the extra requirements files in
``$VIRTUAL_ENV/requirements/requirements_browser.txt``. For example,
in order to test against Firefox you would have to run::
pip install -r requirements/requirements_firefox.txt
If you intend to work on the code, the ``-e`` option to pip should be
used in combination with a source checkout i.e. inside a virtual
environment created as above::
git clone https://github.com/w3c/wptrunner.git
cd wptrunner
pip install -e ./
In addition to the dependencies installed by pip, wptrunner requires
a copy of the web-platform-tests repository. This can be located
anywhere on the filesystem, but the easiest option is to put it
under the same parent directory as the wptrunner checkout::
git clone https://github.com/w3c/web-platform-tests.git
It is also necessary to generate a web-platform-tests ``MANIFEST.json``
file. It's recommended to also put that under the same parent directory as
the wptrunner checkout, in a directory named ``meta``::
mkdir meta
cd web-platform-tests
python manifest --path ../meta/MANIFEST.json
The ``MANIFEST.json`` file needs to be regenerated each time the
web-platform-tests checkout is updated. To aid with the update process
there is a tool called ``wptupdate``, which is described in
:ref:`wptupdate-label`.
Running the Tests
-----------------
A test run is started using the ``wptrunner`` command. The command
takes multiple options, of which the following are most significant:
``--product`` (defaults to `firefox`)
The product to test against: `b2g`, `chrome`, `firefox`, or `servo`.
``--binary`` (required if product is `firefox` or `servo`)
The path to a binary file for the product (browser) to test against.
``--webdriver-binary`` (required if product is `chrome`)
The path to a `*driver` binary; e.g., a `chromedriver` binary.
``--certutil-binary`` (required if product is `firefox` [#]_)
The path to a `certutil` binary (for tests that must be run over https).
``--metadata`` (required only when not `using default paths`_)
The path to a directory containing test metadata. [#]_
``--tests`` (required only when not `using default paths`_)
The path to a directory containing a web-platform-tests checkout.
``--prefs-root`` (required only when testing a Firefox binary)
The path to a directory containing Firefox test-harness preferences. [#]_
``--config`` (should default to `wptrunner.default.ini`)
The path to the config (ini) file.
.. [#] The ``--certutil-binary`` option is required when the product is
``firefox`` unless ``--ssl-type=none`` is specified.
.. [#] The ``--metadata`` path is to a directory that contains:
* a ``MANIFEST.json`` file (the web-platform-tests documentation has
instructions on generating this file)
* (optionally) any expectation files (see :ref:`wptupdate-label`)
.. [#] Example ``--prefs-root`` value: ``~/mozilla-central/testing/profiles``.
There are also a variety of other command-line options available; use
``--help`` to list them.
The following examples show how to start wptrunner with various options.
------------------
Starting wptrunner
------------------
The examples below assume the following directory layout,
though no specific folder structure is required::
~/testtwf/wptrunner # wptrunner checkout
~/testtwf/web-platform-tests # web-platform-tests checkout
~/testtwf/meta # metadata
To test a Firefox Nightly build in an OS X environment, you might start
wptrunner using something similar to the following example::
wptrunner --metadata=~/testtwf/meta/ --tests=~/testtwf/web-platform-tests/ \
--binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/dist/Nightly.app/Contents/MacOS/firefox \
--certutil-binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/security/nss/cmd/certutil/certutil \
--prefs-root=~/mozilla-central/testing/profiles
And to test a Chromium build in an OS X environment, you might start
wptrunner using something similar to the following example::
wptrunner --metadata=~/testtwf/meta/ --tests=~/testtwf/web-platform-tests/ \
--binary=~/chromium/src/out/Release/Chromium.app/Contents/MacOS/Chromium \
--webdriver-binary=/usr/local/bin/chromedriver --product=chrome
--------------------
Running test subsets
--------------------
To restrict a test run just to tests in a particular web-platform-tests
subdirectory, specify the directory name in the positional arguments after
the options; for example, run just the tests in the `dom` subdirectory::
wptrunner --metadata=~/testtwf/meta --tests=~/testtwf/web-platform-tests/ \
--binary=/path/to/firefox --certutil-binary=/path/to/certutil \
--prefs-root=/path/to/testing/profiles \
dom
-------------------
Running in parallel
-------------------
To speed up the testing process, use the ``--processes`` option to have
wptrunner run multiple browser instances in parallel. For example, to
have wptrunner attempt to run tests against with six browser instances
in parallel, specify ``--processes=6``. But note that behaviour in this
mode is necessarily less deterministic than with ``--processes=1`` (the
default), so there may be more noise in the test results.
-------------------
Using default paths
-------------------
The (otherwise-required) ``--tests`` and ``--metadata`` command-line
options/flags be omitted if any configuration file is found that
contains a section specifying the ``tests`` and ``metadata`` keys.
See the `Configuration File`_ section for more information about
configuration files, including information about their expected
locations.
The content of the ``wptrunner.default.ini`` default configuration file
makes wptrunner look for tests (that is, a web-platform-tests checkout)
as a subdirectory of the current directory named ``tests``, and for
metadata files in a subdirectory of the current directory named ``meta``.
Output
------
wptrunner uses the :py:mod:`mozlog` package for output. This
structures events such as test results or log messages as JSON objects
that can then be fed to other tools for interpretation. More details
about the message format are given in the
:py:mod:`mozlog` documentation.
By default the raw JSON messages are dumped to stdout. This is
convenient for piping into other tools, but not ideal for humans
reading the output. :py:mod:`mozlog` comes with several other
formatters, which are accessible through command line options. The
general format of these options is ``--log-name=dest``, where ``name``
is the name of the format and ``dest`` is a path to a destination
file, or ``-`` for stdout. The raw JSON data is written by the ``raw``
formatter so, the default setup corresponds to ``--log-raw=-``.
A reasonable output format for humans is provided as ``mach``. So in
order to output the full raw log to a file and a human-readable
summary to stdout, one might pass the options::
--log-raw=output.log --log-mach=-
Configuration File
------------------
wptrunner uses a ``.ini`` file to control some configuration
sections. The file has three sections; ``[products]``,
``[manifest:default]`` and ``[web-platform-tests]``.
``[products]`` is used to
define the set of available products. By default this section is empty
which means that all the products distributed with wptrunner are
enabled (although their dependencies may not be installed). The set
of enabled products can be set by using the product name as the
key. For built in products the value is empty. It is also possible to
provide the path to a script implementing the browser functionality
e.g.::
[products]
chrome =
netscape4 = path/to/netscape.py
``[manifest:default]`` specifies the default paths for the tests and metadata,
relative to the config file. For example::
[manifest:default]
tests = ~/testtwf/web-platform-tests
metadata = ~/testtwf/meta
``[web-platform-tests]`` is used to set the properties of the upstream
repository when updating the paths. ``remote_url`` specifies the git
url to pull from; ``branch`` the branch to sync against and
``sync_path`` the local path, relative to the configuration file, to
use when checking out the tests e.g.::
[web-platform-tests]
remote_url = https://github.com/w3c/web-platform-tests.git
branch = master
sync_path = sync
A configuration file must contain all the above fields; falling back
to the default values for unspecified fields is not yet supported.
The ``wptrunner`` and ``wptupdate`` commands will use configuration
files in the following order:
* Any path supplied with a ``--config`` flag to the command.
* A file called ``wptrunner.ini`` in the current directory
* The default configuration file (``wptrunner.default.ini`` in the
source directory)

View file

@ -1,4 +0,0 @@
html5lib >= 0.99
mozinfo >= 0.7
mozlog >= 3.3
mozdebug >= 0.1

View file

@ -1,7 +0,0 @@
fxos_appgen >= 0.5
mozdevice >= 0.41
gaiatest >= 0.26
marionette_client >= 0.7.10
moznetwork >= 0.24
mozprofile >= 0.21
mozrunner >= 6.1

View file

@ -1,2 +0,0 @@
mozprocess >= 0.19
selenium >= 2.41.0

View file

@ -1,5 +0,0 @@
marionette_driver >= 0.4
mozprofile >= 0.21
mozprocess >= 0.19
mozcrash >= 0.13
mozrunner >= 6.7

View file

@ -1 +0,0 @@
mozprocess >= 0.19

View file

@ -1,73 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import glob
import os
import sys
import textwrap
from setuptools import setup, find_packages
here = os.path.split(__file__)[0]
PACKAGE_NAME = 'wptrunner'
PACKAGE_VERSION = '1.14'
# Dependencies
with open(os.path.join(here, "requirements.txt")) as f:
deps = f.read().splitlines()
# Browser-specific requirements
requirements_files = glob.glob(os.path.join(here, "requirements_*.txt"))
profile_dest = None
dest_exists = False
setup(name=PACKAGE_NAME,
version=PACKAGE_VERSION,
description="Harness for running the W3C web-platform-tests against various products",
author='Mozilla Automation and Testing Team',
author_email='tools@lists.mozilla.org',
license='MPL 2.0',
packages=find_packages(exclude=["tests", "metadata", "prefs"]),
entry_points={
'console_scripts': [
'wptrunner = wptrunner.wptrunner:main',
'wptupdate = wptrunner.update:main',
]
},
zip_safe=False,
platforms=['Any'],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Operating System :: OS Independent'],
package_data={"wptrunner": ["executors/testharness_marionette.js",
"executors/testharness_webdriver.js",
"executors/reftest.js",
"executors/reftest-wait.js",
"testharnessreport.js",
"testharness_runner.html",
"config.json",
"wptrunner.default.ini",
"browsers/server-locations.txt",
"browsers/b2g_setup/*",
"prefs/*"]},
include_package_data=True,
data_files=[("requirements", requirements_files)],
install_requires=deps
)
if "install" in sys.argv:
path = os.path.relpath(os.path.join(sys.prefix, "requirements"), os.curdir)
print textwrap.fill("""In order to use with one of the built-in browser
products, you will need to install the extra dependencies. These are provided
as requirements_[name].txt in the %s directory and can be installed using
e.g.""" % path, 80)
print """
pip install -r %s/requirements_firefox.txt
""" % path

View file

@ -1,3 +0,0 @@
[reftest_and_fail.html]
type: reftest
expected: FAIL

View file

@ -1,3 +0,0 @@
[reftest_cycle_fail.html]
type: reftest
expected: FAIL

View file

@ -1,3 +0,0 @@
[reftest_match_fail.html]
type: reftest
expected: FAIL

View file

@ -1,3 +0,0 @@
[reftest_mismatch_fail.html]
type: reftest
expected: FAIL

View file

@ -1,3 +0,0 @@
[reftest_ref_timeout.html]
type: reftest
expected: TIMEOUT

View file

@ -1,3 +0,0 @@
[reftest_timeout.html]
type: reftest
expected: TIMEOUT

View file

@ -1,2 +0,0 @@
prefs: ["browser.display.foreground_color:#FF0000",
"browser.display.background_color:#000000"]

View file

@ -1,3 +0,0 @@
[test_pref_reset.html]
type: testharness
prefs: [@Reset]

View file

@ -1,4 +0,0 @@
[test_pref_set.html]
type: testharness
prefs: ["browser.display.foreground_color:#00FF00",
"browser.display.background_color:#000000"]

View file

@ -1 +0,0 @@
disabled: true

View file

@ -1,3 +0,0 @@
[testharness_1.html]
type: testharness
disabled: @False

View file

@ -1 +0,0 @@
tags: [dir-tag-1, dir-tag-2]

View file

@ -1,5 +0,0 @@
tags: [file-tag]
[testharness_0.html]
type: testharness
tags: [test-tag]

View file

@ -1,3 +0,0 @@
[testharness_0.html]
type: testharness
tags: [test-1-tag]

View file

@ -1,5 +0,0 @@
tags: [file-tag]
[testharness_2.html]
type: testharness
tags: [test-2-tag, @Reset]

View file

@ -1,4 +0,0 @@
[testharness_0.html]
type: testharness
[Test that should fail]
expected: FAIL

View file

@ -1,3 +0,0 @@
[testharness_error.html]
type: testharness
expected: ERROR

View file

@ -1,3 +0,0 @@
[testharness_timeout.html]
type: testharness
expected: TIMEOUT

View file

@ -1,20 +0,0 @@
[general]
tests=/path/to/web-platform-tests/
metadata=/path/to/web-platform-tests/
ssl-type=none
# [firefox]
# binary=/path/to/firefox
# prefs-root=/path/to/gecko-src/testing/profiles/
# [servo]
# binary=/path/to/servo-src/target/release/servo
# exclude=testharness # Because it needs a special testharness.js
# [servodriver]
# binary=/path/to/servo-src/target/release/servo
# exclude=testharness # Because it needs a special testharness.js
# [chrome]
# binary=/path/to/chrome
# webdriver-binary=/path/to/chromedriver

View file

@ -1,166 +0,0 @@
import ConfigParser
import argparse
import json
import os
import sys
import tempfile
import threading
import time
from StringIO import StringIO
from mozlog import structuredlog, reader
from mozlog.handlers import BaseHandler, StreamHandler, StatusHandler
from mozlog.formatters import MachFormatter
from wptrunner import wptcommandline, wptrunner
here = os.path.abspath(os.path.dirname(__file__))
def setup_wptrunner_logging(logger):
structuredlog.set_default_logger(logger)
wptrunner.logger = logger
wptrunner.wptlogging.setup_stdlib_logger()
class ResultHandler(BaseHandler):
def __init__(self, verbose=False, logger=None):
self.inner = StreamHandler(sys.stdout, MachFormatter())
BaseHandler.__init__(self, self.inner)
self.product = None
self.verbose = verbose
self.logger = logger
self.register_message_handlers("wptrunner-test", {"set-product": self.set_product})
def set_product(self, product):
self.product = product
def __call__(self, data):
if self.product is not None and data["action"] in ["suite_start", "suite_end"]:
# Hack: mozlog sets some internal state to prevent multiple suite_start or
# suite_end messages. We actually want that here (one from the metaharness
# and one from the individual test type harness), so override that internal
# state (a better solution might be to not share loggers, but this works well
# enough)
self.logger._state.suite_started = True
return
if (not self.verbose and
(data["action"] == "process_output" or
data["action"] == "log" and data["level"] not in ["error", "critical"])):
return
if "test" in data:
data = data.copy()
data["test"] = "%s: %s" % (self.product, data["test"])
return self.inner(data)
def test_settings():
return {
"include": "_test",
"manifest-update": "",
"no-capture-stdio": ""
}
def read_config():
parser = ConfigParser.ConfigParser()
parser.read("test.cfg")
rv = {"general":{},
"products":{}}
rv["general"].update(dict(parser.items("general")))
# This only allows one product per whatever for now
for product in parser.sections():
if product != "general":
dest = rv["products"][product] = {}
for key, value in parser.items(product):
rv["products"][product][key] = value
return rv
def run_tests(product, kwargs):
kwargs["test_paths"]["/_test/"] = {"tests_path": os.path.join(here, "testdata"),
"metadata_path": os.path.join(here, "metadata")}
wptrunner.run_tests(**kwargs)
def settings_to_argv(settings):
rv = []
for name, value in settings.iteritems():
key = "--%s" % name
if not value:
rv.append(key)
elif isinstance(value, list):
for item in value:
rv.extend([key, item])
else:
rv.extend([key, value])
return rv
def set_from_args(settings, args):
if args.test:
settings["include"] = args.test
if args.tags:
settings["tags"] = args.tags
def run(config, args):
logger = structuredlog.StructuredLogger("web-platform-tests")
logger.add_handler(ResultHandler(logger=logger, verbose=args.verbose))
setup_wptrunner_logging(logger)
parser = wptcommandline.create_parser()
logger.suite_start(tests=[])
for product, product_settings in config["products"].iteritems():
if args.product and product not in args.product:
continue
settings = test_settings()
settings.update(config["general"])
settings.update(product_settings)
settings["product"] = product
set_from_args(settings, args)
kwargs = vars(parser.parse_args(settings_to_argv(settings)))
wptcommandline.check_args(kwargs)
logger.send_message("wptrunner-test", "set-product", product)
run_tests(product, kwargs)
logger.send_message("wptrunner-test", "set-product", None)
logger.suite_end()
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true", default=False,
help="verbose log output")
parser.add_argument("--product", action="append",
help="Specific product to include in test run")
parser.add_argument("--pdb", action="store_true",
help="Invoke pdb on uncaught exception")
parser.add_argument("--tag", action="append", dest="tags",
help="tags to select tests")
parser.add_argument("test", nargs="*",
help="Specific tests to include in test run")
return parser
def main():
config = read_config()
args = get_parser().parse_args()
try:
run(config, args)
except Exception:
if args.pdb:
import pdb, traceback
print traceback.format_exc()
pdb.post_mortem()
else:
raise
if __name__ == "__main__":
main()

View file

@ -1,4 +0,0 @@
<link rel=match href=green.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,3 +0,0 @@
<style>
:root {background-color:green}
</style>

View file

@ -1,3 +0,0 @@
<style>
:root {background-color:red}
</style>

View file

@ -1,9 +0,0 @@
<link rel=match href=green.html>
<style>
:root {background-color:red}
</style>
<script>
if (window.location.protocol === "https:") {
document.documentElement.style.backgroundColor = "green";
}
</script>

View file

@ -1,5 +0,0 @@
<title>Reftest chain that should fail</title>
<link rel=match href=reftest_and_fail_0-ref.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,5 +0,0 @@
<title>Reftest chain that should fail</title>
<link rel=match href=red.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,5 +0,0 @@
<title>Reftest with cycle, all match</title>
<link rel=match href=reftest_cycle_0-ref.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,5 +0,0 @@
<title>OR match that should pass</title>
<link rel=match href=reftest_cycle_1-ref.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,5 +0,0 @@
<title>Reftest with cycle, all match</title>
<link rel=match href=reftest_cycle.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,5 +0,0 @@
<title>Reftest with cycle, fails</title>
<link rel=match href=reftest_cycle_fail_0-ref.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,5 +0,0 @@
<title>Reftest with cycle, fails</title>
<link rel=mismatch href=reftest_cycle_fail.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,5 +0,0 @@
<title>rel=match that should pass</title>
<link rel=match href=green.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,5 +0,0 @@
<title>rel=match that should fail</title>
<link rel=match href=red.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,5 +0,0 @@
<title>rel=mismatch that should pass</title>
<link rel=mismatch href=red.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,5 +0,0 @@
<title>rel=mismatch that should fail</title>
<link rel=mismatch href=green.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,6 +0,0 @@
<title>OR match that should pass</title>
<link rel=match href=red.html>
<link rel=match href=green.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,6 +0,0 @@
<html class="reftest-wait">
<title>rel=match that should time out in the ref</title>
<link rel=match href=reftest_ref_timeout-ref.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,6 +0,0 @@
<html>
<title>rel=match that should time out in the ref</title>
<link rel=match href=reftest_ref_timeout-ref.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,6 +0,0 @@
<html class="reftest-wait">
<title>rel=match that should timeout</title>
<link rel=match href=green.html>
<style>
:root {background-color:green}
</style>

View file

@ -1,11 +0,0 @@
<title>rel=match that should fail</title>
<link rel=match href=red.html>
<style>
:root {background-color:red}
</style>
<body class="reftest-wait">
<script>
setTimeout(function() {
document.documentElement.style.backgroundColor = "green";
body.className = "";
}, 2000);

View file

@ -1,10 +0,0 @@
<!doctype html>
<title>Example pref test</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<p>Test requires the pref browser.display.foreground_color to be set to #00FF00</p>
<script>
test(function() {
assert_equals(getComputedStyle(document.body).color, "rgb(255, 0, 0)");
}, "Test that pref was set");
</script>

View file

@ -1,10 +0,0 @@
<!doctype html>
<title>Example pref test</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<p>Test requires the pref browser.display.foreground_color to be set to #00FF00</p>
<script>
test(function() {
assert_equals(getComputedStyle(document.body).color, "rgb(0, 0, 0)");
}, "Test that pref was reset");
</script>

View file

@ -1,10 +0,0 @@
<!doctype html>
<title>Example pref test</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<p>Test requires the pref browser.display.foreground_color to be set to #FF0000</p>
<script>
test(function() {
assert_equals(getComputedStyle(document.body).color, "rgb(255, 0, 0)");
}, "Test that pref was set");
</script>

View file

@ -1,10 +0,0 @@
<!doctype html>
<title>Example pref test</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<p>Test requires the pref browser.display.foreground_color to be set to #00FF00</p>
<script>
test(function() {
assert_equals(getComputedStyle(document.body).color, "rgb(0, 255, 0)");
}, "Test that pref was set");
</script>

View file

@ -1,9 +0,0 @@
<!doctype html>
<title>Test should be enabled</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>
test(function() {
assert_true(true);
}, "Test that should pass");
</script>

View file

@ -1,9 +0,0 @@
<!doctype html>
<title>Test</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>
test(function() {
assert_true(true);
}, "Test that should pass");
</script>

View file

@ -1,9 +0,0 @@
<!doctype html>
<title>Test</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>
test(function() {
assert_true(true);
}, "Test that should pass");
</script>

View file

@ -1,9 +0,0 @@
<!doctype html>
<title>Test</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>
test(function() {
assert_true(true);
}, "Test that should pass");
</script>

View file

@ -1,10 +0,0 @@
<!doctype html>
<title>Example https test</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>
test(function() {
assert_equals(window.location.protocol, "https:");
}, "Test that file was loaded with the correct protocol");
</script>

View file

@ -1,9 +0,0 @@
<!doctype html>
<title>Test should be disabled</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>
test(function() {
assert_true(false);
}, "Test that should fail");
</script>

View file

@ -1,7 +0,0 @@
<!doctype html>
<title>testharness.js test that should error</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>
undefined_function()
</script>

View file

@ -1,9 +0,0 @@
<!doctype html>
<title>testharness.js test with long timeout</title>
<meta name=timeout content=long>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<script>
var t = async_test("Long timeout test");
setTimeout(t.step_func_done(function() {assert_true(true)}), 15*1000);
</script>

View file

@ -1,6 +0,0 @@
<!doctype html>
<title>Simple testharness.js usage</title>
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
// This file should time out, obviously

View file

@ -1,15 +0,0 @@
[pytest]
xfail_strict=true
[tox]
envlist = {py27,pypy}-{base,b2g,chrome,firefox,servo}
[testenv]
deps =
pytest>=2.9
-r{toxinidir}/requirements.txt
chrome: -r{toxinidir}/requirements_chrome.txt
firefox: -r{toxinidir}/requirements_firefox.txt
servo: -r{toxinidir}/requirements_servo.txt
commands = py.test []

View file

@ -1,11 +0,0 @@
[products]
[web-platform-tests]
remote_url = https://github.com/w3c/web-platform-tests.git
branch = master
sync_path = %(pwd)s/sync
[manifest:default]
tests = %(pwd)s/tests
metadata = %(pwd)s/meta
url_base = /

View file

@ -1,3 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

View file

@ -1,33 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""Subpackage where each product is defined. Each product is created by adding a
a .py file containing a __wptrunner__ variable in the global scope. This must be
a dictionary with the fields
"product": Name of the product, assumed to be unique.
"browser": String indicating the Browser implementation used to launch that
product.
"executor": Dictionary with keys as supported test types and values as the name
of the Executor implemantation that will be used to run that test
type.
"browser_kwargs": String naming function that takes product, binary,
prefs_root and the wptrunner.run_tests kwargs dict as arguments
and returns a dictionary of kwargs to use when creating the
Browser class.
"executor_kwargs": String naming a function that takes http server url and
timeout multiplier and returns kwargs to use when creating
the executor class.
"env_options": String naming a funtion of no arguments that returns the
arguments passed to the TestEnvironment.
All classes and functions named in the above dict must be imported into the
module global scope.
"""
product_list = ["chrome",
"edge",
"firefox",
"servo",
"servodriver"]

View file

@ -1,243 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import tempfile
import shutil
import subprocess
import fxos_appgen
import gaiatest
import mozdevice
import moznetwork
import mozrunner
from marionette import expected
from marionette.by import By
from marionette.wait import Wait
from mozprofile import FirefoxProfile, Preferences
from .base import get_free_port, BrowserError, Browser, ExecutorBrowser
from ..executors.executormarionette import MarionetteTestharnessExecutor
from ..hosts import HostsFile, HostsLine
from ..environment import hostnames
here = os.path.split(__file__)[0]
__wptrunner__ = {"product": "b2g",
"check_args": "check_args",
"browser": "B2GBrowser",
"executor": {"testharness": "B2GMarionetteTestharnessExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_options": "env_options"}
def check_args(**kwargs):
pass
def browser_kwargs(test_environment, **kwargs):
return {"prefs_root": kwargs["prefs_root"],
"no_backup": kwargs.get("b2g_no_backup", False)}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 2
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"close_after_done": False}
if test_type == "reftest":
executor_kwargs["cache_manager"] = cache_manager
return executor_kwargs
def env_options():
return {"host": "web-platform.test",
"bind_hostname": "false",
"test_server_port": False}
class B2GBrowser(Browser):
used_ports = set()
init_timeout = 180
def __init__(self, logger, prefs_root, no_backup=False):
Browser.__init__(self, logger)
logger.info("Waiting for device")
subprocess.call(["adb", "wait-for-device"])
self.device = mozdevice.DeviceManagerADB()
self.marionette_port = get_free_port(2828, exclude=self.used_ports)
self.used_ports.add(self.marionette_port)
self.cert_test_app = None
self.runner = None
self.prefs_root = prefs_root
self.no_backup = no_backup
self.backup_path = None
self.backup_paths = []
self.backup_dirs = []
def setup(self):
self.logger.info("Running B2G setup")
self.backup_path = tempfile.mkdtemp()
self.logger.debug("Backing up device to %s" % (self.backup_path,))
if not self.no_backup:
self.backup_dirs = [("/data/local", os.path.join(self.backup_path, "local")),
("/data/b2g/mozilla", os.path.join(self.backup_path, "profile"))]
self.backup_paths = [("/system/etc/hosts", os.path.join(self.backup_path, "hosts"))]
for remote, local in self.backup_dirs:
self.device.getDirectory(remote, local)
for remote, local in self.backup_paths:
self.device.getFile(remote, local)
self.setup_hosts()
def start(self):
profile = FirefoxProfile()
profile.set_preferences({"dom.disable_open_during_load": False,
"marionette.defaultPrefs.enabled": True})
self.logger.debug("Creating device runner")
self.runner = mozrunner.B2GDeviceRunner(profile=profile)
self.logger.debug("Starting device runner")
self.runner.start()
self.logger.debug("Device runner started")
def setup_hosts(self):
host_ip = moznetwork.get_ip()
temp_dir = tempfile.mkdtemp()
hosts_path = os.path.join(temp_dir, "hosts")
remote_path = "/system/etc/hosts"
try:
self.device.getFile("/system/etc/hosts", hosts_path)
with open(hosts_path) as f:
hosts_file = HostsFile.from_file(f)
for canonical_hostname in hostnames:
hosts_file.set_host(HostsLine(host_ip, canonical_hostname))
with open(hosts_path, "w") as f:
hosts_file.to_file(f)
self.logger.info("Installing hosts file")
self.device.remount()
self.device.removeFile(remote_path)
self.device.pushFile(hosts_path, remote_path)
finally:
os.unlink(hosts_path)
os.rmdir(temp_dir)
def load_prefs(self):
prefs_path = os.path.join(self.prefs_root, "prefs_general.js")
if os.path.exists(prefs_path):
preferences = Preferences.read_prefs(prefs_path)
else:
self.logger.warning("Failed to find base prefs file in %s" % prefs_path)
preferences = []
return preferences
def stop(self):
pass
def on_output(self):
raise NotImplementedError
def cleanup(self):
self.logger.debug("Running browser cleanup steps")
self.device.remount()
for remote, local in self.backup_dirs:
self.device.removeDir(remote)
self.device.pushDir(local, remote)
for remote, local in self.backup_paths:
self.device.removeFile(remote)
self.device.pushFile(local, remote)
shutil.rmtree(self.backup_path)
self.device.reboot(wait=True)
def pid(self):
return None
def is_alive(self):
return True
def executor_browser(self):
return B2GExecutorBrowser, {"marionette_port": self.marionette_port}
class B2GExecutorBrowser(ExecutorBrowser):
# The following methods are called from a different process
def __init__(self, *args, **kwargs):
ExecutorBrowser.__init__(self, *args, **kwargs)
import sys, subprocess
self.device = mozdevice.ADBB2G()
self.device.forward("tcp:%s" % self.marionette_port,
"tcp:2828")
self.executor = None
self.marionette = None
self.gaia_device = None
self.gaia_apps = None
def after_connect(self, executor):
self.executor = executor
self.marionette = executor.marionette
self.executor.logger.debug("Running browser.after_connect steps")
self.gaia_apps = gaiatest.GaiaApps(marionette=executor.marionette)
self.executor.logger.debug("Waiting for homescreen to load")
# Moved out of gaia_test temporarily
self.executor.logger.info("Waiting for B2G to be ready")
self.wait_for_homescreen(timeout=60)
self.install_cert_app()
self.use_cert_app()
def install_cert_app(self):
"""Install the container app used to run the tests"""
if fxos_appgen.is_installed("CertTest App"):
self.executor.logger.info("CertTest App is already installed")
return
self.executor.logger.info("Installing CertTest App")
app_path = os.path.join(here, "b2g_setup", "certtest_app.zip")
fxos_appgen.install_app("CertTest App", app_path, marionette=self.marionette)
self.executor.logger.debug("Install complete")
def use_cert_app(self):
"""Start the app used to run the tests"""
self.executor.logger.info("Homescreen loaded")
self.gaia_apps.launch("CertTest App")
def wait_for_homescreen(self, timeout):
self.executor.logger.info("Waiting for home screen to load")
Wait(self.marionette, timeout).until(expected.element_present(
By.CSS_SELECTOR, '#homescreen[loading-state=false]'))
class B2GMarionetteTestharnessExecutor(MarionetteTestharnessExecutor):
def after_connect(self):
self.browser.after_connect(self)
MarionetteTestharnessExecutor.after_connect(self)

View file

@ -1,160 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import platform
import socket
from abc import ABCMeta, abstractmethod
from ..wptcommandline import require_arg
here = os.path.split(__file__)[0]
def cmd_arg(name, value=None):
prefix = "-" if platform.system() == "Windows" else "--"
rv = prefix + name
if value is not None:
rv += "=" + value
return rv
def get_free_port(start_port, exclude=None):
"""Get the first port number after start_port (inclusive) that is
not currently bound.
:param start_port: Integer port number at which to start testing.
:param exclude: Set of port numbers to skip"""
port = start_port
while True:
if exclude and port in exclude:
port += 1
continue
s = socket.socket()
try:
s.bind(("127.0.0.1", port))
except socket.error:
port += 1
else:
return port
finally:
s.close()
def browser_command(binary, args, debug_info):
if debug_info:
if debug_info.requiresEscapedArgs:
args = [item.replace("&", "\\&") for item in args]
debug_args = [debug_info.path] + debug_info.args
else:
debug_args = []
command = [binary] + args
return debug_args, command
class BrowserError(Exception):
pass
class Browser(object):
__metaclass__ = ABCMeta
process_cls = None
init_timeout = 30
def __init__(self, logger):
"""Abstract class serving as the basis for Browser implementations.
The Browser is used in the TestRunnerManager to start and stop the browser
process, and to check the state of that process. This class also acts as a
context manager, enabling it to do browser-specific setup at the start of
the testrun and cleanup after the run is complete.
:param logger: Structured logger to use for output.
"""
self.logger = logger
def __enter__(self):
self.setup()
return self
def __exit__(self, *args, **kwargs):
self.cleanup()
def setup(self):
"""Used for browser-specific setup that happens at the start of a test run"""
pass
@abstractmethod
def start(self):
"""Launch the browser object and get it into a state where is is ready to run tests"""
pass
@abstractmethod
def stop(self):
"""Stop the running browser process."""
pass
@abstractmethod
def pid(self):
"""pid of the browser process or None if there is no pid"""
pass
@abstractmethod
def is_alive(self):
"""Boolean indicating whether the browser process is still running"""
pass
def setup_ssl(self, hosts):
"""Return a certificate to use for tests requiring ssl that will be trusted by the browser"""
raise NotImplementedError("ssl testing not supported")
def cleanup(self):
"""Browser-specific cleanup that is run after the testrun is finished"""
pass
def executor_browser(self):
"""Returns the ExecutorBrowser subclass for this Browser subclass and the keyword arguments
with which it should be instantiated"""
return ExecutorBrowser, {}
def log_crash(self, process, test):
"""Return a list of dictionaries containing information about crashes that happend
in the browser, or an empty list if no crashes occurred"""
self.logger.crash(process, test)
class NullBrowser(Browser):
def start(self):
"""No-op browser to use in scenarios where the TestRunnerManager shouldn't
actually own the browser process (e.g. Servo where we start one browser
per test)"""
pass
def stop(self):
pass
def pid(self):
return None
def is_alive(self):
return True
def on_output(self, line):
raise NotImplementedError
class ExecutorBrowser(object):
def __init__(self, **kwargs):
"""View of the Browser used by the Executor object.
This is needed because the Executor runs in a child process and
we can't ship Browser instances between processes on Windows.
Typically this will have a few product-specific properties set,
but in some cases it may have more elaborate methods for setting
up the browser from the runner process.
"""
for k, v in kwargs.iteritems():
setattr(self, k, v)

View file

@ -1,81 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from .base import Browser, ExecutorBrowser, require_arg
from ..webdriver_server import ChromeDriverServer
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorselenium import (SeleniumTestharnessExecutor,
SeleniumRefTestExecutor)
__wptrunner__ = {"product": "chrome",
"check_args": "check_args",
"browser": "ChromeBrowser",
"executor": {"testharness": "SeleniumTestharnessExecutor",
"reftest": "SeleniumRefTestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_options": "env_options"}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(**kwargs):
return {"binary": kwargs["binary"],
"webdriver_binary": kwargs["webdriver_binary"]}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
from selenium.webdriver import DesiredCapabilities
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["capabilities"] = dict(DesiredCapabilities.CHROME.items())
if kwargs["binary"] is not None:
executor_kwargs["capabilities"]["chromeOptions"] = {"binary": kwargs["binary"]}
return executor_kwargs
def env_options():
return {"host": "web-platform.test",
"bind_hostname": "true"}
class ChromeBrowser(Browser):
"""Chrome is backed by chromedriver, which is supplied through
``wptrunner.webdriver.ChromeDriverServer``.
"""
def __init__(self, logger, binary, webdriver_binary="chromedriver"):
"""Creates a new representation of Chrome. The `binary` argument gives
the browser binary to use for testing."""
Browser.__init__(self, logger)
self.binary = binary
self.server = ChromeDriverServer(self.logger, binary=webdriver_binary)
def start(self):
self.server.start(block=False)
def stop(self):
self.server.stop()
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the driver is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive()
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.server.url}

View file

@ -1,71 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from .base import Browser, ExecutorBrowser, require_arg
from ..webdriver_server import EdgeDriverServer
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorselenium import (SeleniumTestharnessExecutor,
SeleniumRefTestExecutor)
__wptrunner__ = {"product": "edge",
"check_args": "check_args",
"browser": "EdgeBrowser",
"executor": {"testharness": "SeleniumTestharnessExecutor",
"reftest": "SeleniumRefTestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_options": "env_options"}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(**kwargs):
return {"webdriver_binary": kwargs["webdriver_binary"]}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
from selenium.webdriver import DesiredCapabilities
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["capabilities"] = dict(DesiredCapabilities.EDGE.items())
return executor_kwargs
def env_options():
return {"host": "web-platform.test",
"bind_hostname": "true",
"supports_debugger": False}
class EdgeBrowser(Browser):
used_ports = set()
def __init__(self, logger, webdriver_binary):
Browser.__init__(self, logger)
self.server = EdgeDriverServer(self.logger, binary=webdriver_binary)
self.webdriver_host = "localhost"
self.webdriver_port = self.server.port
def start(self):
print self.server.url
self.server.start()
def stop(self):
self.server.stop()
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the server is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive()
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.server.url}

View file

@ -1,264 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import platform
import subprocess
import sys
import mozinfo
from mozprocess import ProcessHandler
from mozprofile import FirefoxProfile, Preferences
from mozprofile.permissions import ServerLocations
from mozrunner import FirefoxRunner
from mozcrash import mozcrash
from .base import (get_free_port,
Browser,
ExecutorBrowser,
require_arg,
cmd_arg,
browser_command)
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executormarionette import (MarionetteTestharnessExecutor,
MarionetteRefTestExecutor,
MarionetteWdspecExecutor)
from ..environment import hostnames
here = os.path.join(os.path.split(__file__)[0])
__wptrunner__ = {"product": "firefox",
"check_args": "check_args",
"browser": "FirefoxBrowser",
"executor": {"testharness": "MarionetteTestharnessExecutor",
"reftest": "MarionetteRefTestExecutor",
"wdspec": "MarionetteWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_options": "env_options",
"run_info_extras": "run_info_extras",
"update_properties": "update_properties"}
def check_args(**kwargs):
require_arg(kwargs, "binary")
if kwargs["ssl_type"] != "none":
require_arg(kwargs, "certutil_binary")
def browser_kwargs(**kwargs):
return {"binary": kwargs["binary"],
"prefs_root": kwargs["prefs_root"],
"debug_info": kwargs["debug_info"],
"symbols_path": kwargs["symbols_path"],
"stackwalk_binary": kwargs["stackwalk_binary"],
"certutil_binary": kwargs["certutil_binary"],
"ca_certificate_path": kwargs["ssl_env"].ca_cert_path(),
"e10s": kwargs["gecko_e10s"]}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
executor_kwargs["close_after_done"] = True
if kwargs["timeout_multiplier"] is None:
if test_type == "reftest":
if run_info_data["debug"] or run_info_data.get("asan"):
executor_kwargs["timeout_multiplier"] = 4
else:
executor_kwargs["timeout_multiplier"] = 2
elif run_info_data["debug"] or run_info_data.get("asan"):
executor_kwargs["timeout_multiplier"] = 3
if test_type == "wdspec":
executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
return executor_kwargs
def env_options():
return {"host": "127.0.0.1",
"external_host": "web-platform.test",
"bind_hostname": "false",
"certificate_domain": "web-platform.test",
"supports_debugger": True}
def run_info_extras(**kwargs):
return {"e10s": kwargs["gecko_e10s"]}
def update_properties():
return ["debug", "e10s", "os", "version", "processor", "bits"], {"debug", "e10s"}
class FirefoxBrowser(Browser):
used_ports = set()
init_timeout = 60
def __init__(self, logger, binary, prefs_root, debug_info=None,
symbols_path=None, stackwalk_binary=None, certutil_binary=None,
ca_certificate_path=None, e10s=False):
Browser.__init__(self, logger)
self.binary = binary
self.prefs_root = prefs_root
self.marionette_port = None
self.runner = None
self.debug_info = debug_info
self.profile = None
self.symbols_path = symbols_path
self.stackwalk_binary = stackwalk_binary
self.ca_certificate_path = ca_certificate_path
self.certutil_binary = certutil_binary
self.e10s = e10s
def start(self):
self.marionette_port = get_free_port(2828, exclude=self.used_ports)
self.used_ports.add(self.marionette_port)
env = os.environ.copy()
env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
locations = ServerLocations(filename=os.path.join(here, "server-locations.txt"))
preferences = self.load_prefs()
self.profile = FirefoxProfile(locations=locations,
preferences=preferences)
self.profile.set_preferences({"marionette.defaultPrefs.enabled": True,
"marionette.defaultPrefs.port": self.marionette_port,
"dom.disable_open_during_load": False,
"network.dns.localDomains": ",".join(hostnames),
"network.proxy.type": 0,
"places.history.enabled": False})
if self.e10s:
self.profile.set_preferences({"browser.tabs.remote.autostart": True})
# Bug 1262954: winxp + e10s, disable hwaccel
if (self.e10s and platform.system() in ("Windows", "Microsoft") and
'5.1' in platform.version()):
self.profile.set_preferences({"layers.acceleration.disabled": True})
if self.ca_certificate_path is not None:
self.setup_ssl()
debug_args, cmd = browser_command(self.binary, [cmd_arg("marionette"), "about:blank"],
self.debug_info)
self.runner = FirefoxRunner(profile=self.profile,
binary=cmd[0],
cmdargs=cmd[1:],
env=env,
process_class=ProcessHandler,
process_args={"processOutputLine": [self.on_output]})
self.logger.debug("Starting Firefox")
self.runner.start(debug_args=debug_args, interactive=self.debug_info and self.debug_info.interactive)
self.logger.debug("Firefox Started")
def load_prefs(self):
prefs_path = os.path.join(self.prefs_root, "prefs_general.js")
if os.path.exists(prefs_path):
preferences = Preferences.read_prefs(prefs_path)
else:
self.logger.warning("Failed to find base prefs file in %s" % prefs_path)
preferences = []
return preferences
def stop(self):
self.logger.debug("Stopping browser")
if self.runner is not None:
try:
self.runner.stop()
except OSError:
# This can happen on Windows if the process is already dead
pass
def pid(self):
if self.runner.process_handler is None:
return None
try:
return self.runner.process_handler.pid
except AttributeError:
return None
def on_output(self, line):
"""Write a line of output from the firefox process to the log"""
self.logger.process_output(self.pid(),
line.decode("utf8", "replace"),
command=" ".join(self.runner.command))
def is_alive(self):
if self.runner:
return self.runner.is_running()
return False
def cleanup(self):
self.stop()
def executor_browser(self):
assert self.marionette_port is not None
return ExecutorBrowser, {"marionette_port": self.marionette_port}
def log_crash(self, process, test):
dump_dir = os.path.join(self.profile.profile, "minidumps")
mozcrash.log_crashes(self.logger,
dump_dir,
symbols_path=self.symbols_path,
stackwalk_binary=self.stackwalk_binary,
process=process,
test=test)
def setup_ssl(self):
"""Create a certificate database to use in the test profile. This is configured
to trust the CA Certificate that has signed the web-platform.test server
certificate."""
self.logger.info("Setting up ssl")
# Make sure the certutil libraries from the source tree are loaded when using a
# local copy of certutil
# TODO: Maybe only set this if certutil won't launch?
env = os.environ.copy()
certutil_dir = os.path.dirname(self.binary)
if mozinfo.isMac:
env_var = "DYLD_LIBRARY_PATH"
elif mozinfo.isUnix:
env_var = "LD_LIBRARY_PATH"
else:
env_var = "PATH"
env[env_var] = (os.path.pathsep.join([certutil_dir, env[env_var]])
if env_var in env else certutil_dir).encode(
sys.getfilesystemencoding() or 'utf-8', 'replace')
def certutil(*args):
cmd = [self.certutil_binary] + list(args)
self.logger.process_output("certutil",
subprocess.check_output(cmd,
env=env,
stderr=subprocess.STDOUT),
" ".join(cmd))
pw_path = os.path.join(self.profile.profile, ".crtdbpw")
with open(pw_path, "w") as f:
# Use empty password for certificate db
f.write("\n")
cert_db_path = self.profile.profile
# Create a new certificate db
certutil("-N", "-d", cert_db_path, "-f", pw_path)
# Add the CA certificate to the database and mark as trusted to issue server certs
certutil("-A", "-d", cert_db_path, "-f", pw_path, "-t", "CT,,",
"-n", "web-platform-tests", "-i", self.ca_certificate_path)
# List all certs in the database
certutil("-L", "-d", cert_db_path)

View file

@ -1,38 +0,0 @@
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# See /build/pgo/server-locations.txt for documentation on the format
http://localhost:8000 primary
http://web-platform.test:8000
http://www.web-platform.test:8000
http://www1.web-platform.test:8000
http://www2.web-platform.test:8000
http://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8000
http://xn--lve-6lad.web-platform.test:8000
http://web-platform.test:8001
http://www.web-platform.test:8001
http://www1.web-platform.test:8001
http://www2.web-platform.test:8001
http://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8001
http://xn--lve-6lad.web-platform.test:8001
https://web-platform.test:8443
https://www.web-platform.test:8443
https://www1.web-platform.test:8443
https://www2.web-platform.test:8443
https://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8443
https://xn--lve-6lad.web-platform.test:8443
# These are actually ws servers, but until mozprofile is
# fixed we have to pretend that they are http servers
http://web-platform.test:8888
http://www.web-platform.test:8888
http://www1.web-platform.test:8888
http://www2.web-platform.test:8888
http://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8888
http://xn--lve-6lad.web-platform.test:8888

View file

@ -1,84 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from .base import NullBrowser, ExecutorBrowser, require_arg
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorservo import ServoTestharnessExecutor, ServoRefTestExecutor, ServoWdspecExecutor
here = os.path.join(os.path.split(__file__)[0])
__wptrunner__ = {"product": "servo",
"check_args": "check_args",
"browser": "ServoBrowser",
"executor": {"testharness": "ServoTestharnessExecutor",
"reftest": "ServoRefTestExecutor",
"wdspec": "ServoWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_options": "env_options",
"run_info_extras": "run_info_extras",
"update_properties": "update_properties"}
def check_args(**kwargs):
require_arg(kwargs, "binary")
def browser_kwargs(**kwargs):
return {"binary": kwargs["binary"],
"debug_info": kwargs["debug_info"],
"binary_args": kwargs["binary_args"],
"user_stylesheets": kwargs.get("user_stylesheets"),
"render_backend": kwargs.get("servo_backend"),
"ca_certificate_path": kwargs["ssl_env"].ca_cert_path()}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
rv = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
rv["pause_after_test"] = kwargs["pause_after_test"]
return rv
def env_options():
return {"host": "127.0.0.1",
"external_host": "web-platform.test",
"bind_hostname": "true",
"testharnessreport": "testharnessreport-servo.js",
"supports_debugger": True}
def run_info_extras(**kwargs):
return {"backend": kwargs["servo_backend"]}
def update_properties():
return ["debug", "os", "version", "processor", "bits", "backend"], None
def render_arg(render_backend):
return {"cpu": "--cpu", "webrender": "-w"}[render_backend]
class ServoBrowser(NullBrowser):
def __init__(self, logger, binary, debug_info=None, binary_args=None,
user_stylesheets=None, render_backend="webrender", ca_certificate_path=None):
NullBrowser.__init__(self, logger)
self.binary = binary
self.debug_info = debug_info
self.binary_args = binary_args or []
self.user_stylesheets = user_stylesheets or []
self.render_backend = render_backend
self.ca_certificate_path = ca_certificate_path
def executor_browser(self):
return ExecutorBrowser, {"binary": self.binary,
"debug_info": self.debug_info,
"binary_args": self.binary_args,
"user_stylesheets": self.user_stylesheets,
"render_backend": self.render_backend,
"ca_certificate_path": self.ca_certificate_path}

View file

@ -1,162 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import subprocess
import tempfile
from mozprocess import ProcessHandler
from .base import Browser, require_arg, get_free_port, browser_command, ExecutorBrowser
from .servo import render_arg
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorservodriver import (ServoWebDriverTestharnessExecutor,
ServoWebDriverRefTestExecutor)
here = os.path.join(os.path.split(__file__)[0])
__wptrunner__ = {"product": "servodriver",
"check_args": "check_args",
"browser": "ServoWebDriverBrowser",
"executor": {"testharness": "ServoWebDriverTestharnessExecutor",
"reftest": "ServoWebDriverRefTestExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_options": "env_options",
"run_info_extras": "run_info_extras",
"update_properties": "update_properties"}
hosts_text = """127.0.0.1 web-platform.test
127.0.0.1 www.web-platform.test
127.0.0.1 www1.web-platform.test
127.0.0.1 www2.web-platform.test
127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
127.0.0.1 xn--lve-6lad.web-platform.test
"""
def check_args(**kwargs):
require_arg(kwargs, "binary")
def browser_kwargs(**kwargs):
return {"binary": kwargs["binary"],
"debug_info": kwargs["debug_info"],
"user_stylesheets": kwargs.get("user_stylesheets"),
"render_backend": kwargs.get("servo_backend")}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data, **kwargs):
rv = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
return rv
def env_options():
return {"host": "127.0.0.1",
"external_host": "web-platform.test",
"bind_hostname": "true",
"testharnessreport": "testharnessreport-servodriver.js",
"supports_debugger": True}
def run_info_extras(**kwargs):
return {"backend": kwargs["servo_backend"]}
def update_properties():
return ["debug", "os", "version", "processor", "bits", "backend"], None
def make_hosts_file():
hosts_fd, hosts_path = tempfile.mkstemp()
with os.fdopen(hosts_fd, "w") as f:
f.write(hosts_text)
return hosts_path
class ServoWebDriverBrowser(Browser):
used_ports = set()
def __init__(self, logger, binary, debug_info=None, webdriver_host="127.0.0.1",
user_stylesheets=None, render_backend="webrender"):
Browser.__init__(self, logger)
self.binary = binary
self.webdriver_host = webdriver_host
self.webdriver_port = None
self.proc = None
self.debug_info = debug_info
self.hosts_path = make_hosts_file()
self.command = None
self.user_stylesheets = user_stylesheets if user_stylesheets else []
self.render_backend = render_backend
def start(self):
self.webdriver_port = get_free_port(4444, exclude=self.used_ports)
self.used_ports.add(self.webdriver_port)
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
debug_args, command = browser_command(self.binary,
[render_arg(self.render_backend), "--hard-fail",
"--webdriver", str(self.webdriver_port),
"about:blank"],
self.debug_info)
for stylesheet in self.user_stylesheets:
command += ["--user-stylesheet", stylesheet]
self.command = command
self.command = debug_args + self.command
if not self.debug_info or not self.debug_info.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
env=env,
storeOutput=False)
self.proc.run()
else:
self.proc = subprocess.Popen(self.command, env=env)
self.logger.debug("Servo Started")
def stop(self):
self.logger.debug("Stopping browser")
if self.proc is not None:
try:
self.proc.kill()
except OSError:
# This can happen on Windows if the process is already dead
pass
def pid(self):
if self.proc is None:
return None
try:
return self.proc.pid
except AttributeError:
return None
def on_output(self, line):
"""Write a line of output from the process to the log"""
self.logger.process_output(self.pid(),
line.decode("utf8", "replace"),
command=" ".join(self.command))
def is_alive(self):
if self.runner:
return self.runner.is_running()
return False
def cleanup(self):
self.stop()
def executor_browser(self):
assert self.webdriver_port is not None
return ExecutorBrowser, {"webdriver_host": self.webdriver_host,
"webdriver_port": self.webdriver_port}

View file

@ -1,137 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import errno
import socket
import time
import traceback
import urlparse
import mozprocess
from .base import get_free_port, cmd_arg
__all__ = ["SeleniumLocalServer", "ChromedriverLocalServer"]
class LocalServer(object):
used_ports = set()
default_endpoint = "/"
def __init__(self, logger, binary, port=None, endpoint=None):
self.logger = logger
self.binary = binary
self.port = port
self.endpoint = endpoint or self.default_endpoint
if self.port is None:
self.port = get_free_port(4444, exclude=self.used_ports)
self.used_ports.add(self.port)
self.url = "http://127.0.0.1:%i%s" % (self.port, self.endpoint)
self.proc, self.cmd = None, None
def start(self):
self.proc = mozprocess.ProcessHandler(
self.cmd, processOutputLine=self.on_output)
try:
self.proc.run()
except OSError as e:
if e.errno == errno.ENOENT:
raise IOError(
"chromedriver executable not found: %s" % self.binary)
raise
self.logger.debug(
"Waiting for server to become accessible: %s" % self.url)
surl = urlparse.urlparse(self.url)
addr = (surl.hostname, surl.port)
try:
wait_service(addr)
except:
self.logger.error(
"Server was not accessible within the timeout:\n%s" % traceback.format_exc())
raise
else:
self.logger.info("Server listening on port %i" % self.port)
def stop(self):
if hasattr(self.proc, "proc"):
self.proc.kill()
def is_alive(self):
if hasattr(self.proc, "proc"):
exitcode = self.proc.poll()
return exitcode is None
return False
def on_output(self, line):
self.logger.process_output(self.pid,
line.decode("utf8", "replace"),
command=" ".join(self.cmd))
@property
def pid(self):
if hasattr(self.proc, "proc"):
return self.proc.pid
class SeleniumLocalServer(LocalServer):
default_endpoint = "/wd/hub"
def __init__(self, logger, binary, port=None):
LocalServer.__init__(self, logger, binary, port=port)
self.cmd = ["java",
"-jar", self.binary,
"-port", str(self.port)]
def start(self):
self.logger.debug("Starting local Selenium server")
LocalServer.start(self)
def stop(self):
LocalServer.stop(self)
self.logger.info("Selenium server stopped listening")
class ChromedriverLocalServer(LocalServer):
default_endpoint = "/wd/hub"
def __init__(self, logger, binary="chromedriver", port=None, endpoint=None):
LocalServer.__init__(self, logger, binary, port=port, endpoint=endpoint)
# TODO: verbose logging
self.cmd = [self.binary,
cmd_arg("port", str(self.port)) if self.port else "",
cmd_arg("url-base", self.endpoint) if self.endpoint else ""]
def start(self):
self.logger.debug("Starting local chromedriver server")
LocalServer.start(self)
def stop(self):
LocalServer.stop(self)
self.logger.info("chromedriver server stopped listening")
def wait_service(addr, timeout=15):
"""Waits until network service given as a tuple of (host, port) becomes
available or the `timeout` duration is reached, at which point
``socket.error`` is raised."""
end = time.time() + timeout
while end > time.time():
so = socket.socket()
try:
so.connect(addr)
except socket.timeout:
pass
except socket.error as e:
if e[0] != errno.ECONNREFUSED:
raise
else:
return True
finally:
so.close()
time.sleep(0.5)
raise socket.error("Service is unavailable: %s:%i" % addr)

View file

@ -1,7 +0,0 @@
{"host": "%(host)s",
"ports":{"http":[8000, 8001],
"https":[8443],
"ws":[8888]},
"check_subdomains":false,
"bind_hostname":%(bind_hostname)s,
"ssl":{}}

View file

@ -1,64 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import ConfigParser
import os
import sys
from collections import OrderedDict
here = os.path.split(__file__)[0]
class ConfigDict(dict):
def __init__(self, base_path, *args, **kwargs):
self.base_path = base_path
dict.__init__(self, *args, **kwargs)
def get_path(self, key, default=None):
if key not in self:
return default
path = self[key]
os.path.expanduser(path)
return os.path.abspath(os.path.join(self.base_path, path))
def read(config_path):
config_path = os.path.abspath(config_path)
config_root = os.path.split(config_path)[0]
parser = ConfigParser.SafeConfigParser()
success = parser.read(config_path)
assert config_path in success, success
subns = {"pwd": os.path.abspath(os.path.curdir)}
rv = OrderedDict()
for section in parser.sections():
rv[section] = ConfigDict(config_root)
for key in parser.options(section):
rv[section][key] = parser.get(section, key, False, subns)
return rv
def path(argv=None):
if argv is None:
argv = []
path = None
for i, arg in enumerate(argv):
if arg == "--config":
if i + 1 < len(argv):
path = argv[i + 1]
elif arg.startswith("--config="):
path = arg.split("=", 1)[1]
if path is not None:
break
if path is None:
if os.path.exists("wptrunner.ini"):
path = os.path.abspath("wptrunner.ini")
else:
path = os.path.join(here, "..", "wptrunner.default.ini")
return os.path.abspath(path)
def load():
return read(path(sys.argv))

View file

@ -1,212 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import multiprocessing
import signal
import socket
import sys
import time
from mozlog import get_default_logger, handlers
from wptlogging import LogLevelRewriter
here = os.path.split(__file__)[0]
serve = None
sslutils = None
hostnames = ["web-platform.test",
"www.web-platform.test",
"www1.web-platform.test",
"www2.web-platform.test",
"xn--n8j6ds53lwwkrqhv28a.web-platform.test",
"xn--lve-6lad.web-platform.test"]
def do_delayed_imports(logger, test_paths):
global serve, sslutils
serve_root = serve_path(test_paths)
sys.path.insert(0, serve_root)
failed = []
try:
from tools.serve import serve
except ImportError:
failed.append("serve")
try:
import sslutils
except ImportError:
failed.append("sslutils")
if failed:
logger.critical(
"Failed to import %s. Ensure that tests path %s contains web-platform-tests" %
(", ".join(failed), serve_root))
sys.exit(1)
def serve_path(test_paths):
return test_paths["/"]["tests_path"]
def get_ssl_kwargs(**kwargs):
if kwargs["ssl_type"] == "openssl":
args = {"openssl_binary": kwargs["openssl_binary"]}
elif kwargs["ssl_type"] == "pregenerated":
args = {"host_key_path": kwargs["host_key_path"],
"host_cert_path": kwargs["host_cert_path"],
"ca_cert_path": kwargs["ca_cert_path"]}
else:
args = {}
return args
def ssl_env(logger, **kwargs):
ssl_env_cls = sslutils.environments[kwargs["ssl_type"]]
return ssl_env_cls(logger, **get_ssl_kwargs(**kwargs))
class TestEnvironmentError(Exception):
pass
class TestEnvironment(object):
def __init__(self, test_paths, ssl_env, pause_after_test, debug_info, options):
"""Context manager that owns the test environment i.e. the http and
websockets servers"""
self.test_paths = test_paths
self.ssl_env = ssl_env
self.server = None
self.config = None
self.external_config = None
self.pause_after_test = pause_after_test
self.test_server_port = options.pop("test_server_port", True)
self.debug_info = debug_info
self.options = options if options is not None else {}
self.cache_manager = multiprocessing.Manager()
self.stash = serve.stash.StashServer()
def __enter__(self):
self.stash.__enter__()
self.ssl_env.__enter__()
self.cache_manager.__enter__()
self.setup_server_logging()
self.config = self.load_config()
serve.set_computed_defaults(self.config)
self.external_config, self.servers = serve.start(self.config, self.ssl_env,
self.get_routes())
if self.options.get("supports_debugger") and self.debug_info and self.debug_info.interactive:
self.ignore_interrupts()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.process_interrupts()
for scheme, servers in self.servers.iteritems():
for port, server in servers:
server.kill()
self.cache_manager.__exit__(exc_type, exc_val, exc_tb)
self.ssl_env.__exit__(exc_type, exc_val, exc_tb)
self.stash.__exit__()
def ignore_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def process_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def load_config(self):
default_config_path = os.path.join(serve_path(self.test_paths), "config.default.json")
local_config_path = os.path.join(here, "config.json")
with open(default_config_path) as f:
default_config = json.load(f)
with open(local_config_path) as f:
data = f.read()
local_config = json.loads(data % self.options)
#TODO: allow non-default configuration for ssl
local_config["external_host"] = self.options.get("external_host", None)
local_config["ssl"]["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False)
config = serve.merge_json(default_config, local_config)
config["doc_root"] = serve_path(self.test_paths)
if not self.ssl_env.ssl_enabled:
config["ports"]["https"] = [None]
host = self.options.get("certificate_domain", config["host"])
hosts = [host]
hosts.extend("%s.%s" % (item[0], host) for item in serve.get_subdomains(host).values())
key_file, certificate = self.ssl_env.host_cert_path(hosts)
config["key_file"] = key_file
config["certificate"] = certificate
return config
def setup_server_logging(self):
server_logger = get_default_logger(component="wptserve")
assert server_logger is not None
log_filter = handlers.LogLevelFilter(lambda x:x, "info")
# Downgrade errors to warnings for the server
log_filter = LogLevelRewriter(log_filter, ["error"], "warning")
server_logger.component_filter = log_filter
try:
#Set as the default logger for wptserve
serve.set_logger(server_logger)
serve.logger = server_logger
except Exception:
# This happens if logging has already been set up for wptserve
pass
def get_routes(self):
route_builder = serve.RoutesBuilder()
for path, format_args, content_type, route in [
("testharness_runner.html", {}, "text/html", "/testharness_runner.html"),
(self.options.get("testharnessreport", "testharnessreport.js"),
{"output": self.pause_after_test}, "text/javascript",
"/resources/testharnessreport.js")]:
path = os.path.normpath(os.path.join(here, path))
route_builder.add_static(path, format_args, content_type, route)
for url_base, paths in self.test_paths.iteritems():
if url_base == "/":
continue
route_builder.add_mount_point(url_base, paths["tests_path"])
if "/" not in self.test_paths:
del route_builder.mountpoint_routes["/"]
return route_builder.get_routes()
def ensure_started(self):
# Pause for a while to ensure that the server has a chance to start
time.sleep(2)
for scheme, servers in self.servers.iteritems():
for port, server in servers:
if self.test_server_port:
s = socket.socket()
try:
s.connect((self.config["host"], port))
except socket.error:
raise EnvironmentError(
"%s server on port %d failed to start" % (scheme, port))
finally:
s.close()
if not server.is_alive():
raise EnvironmentError("%s server on port %d failed to start" % (scheme, port))

View file

@ -1,8 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from base import (executor_kwargs,
testharness_result_converter,
reftest_result_converter,
TestExecutor)

View file

@ -1,329 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import hashlib
import json
import os
import traceback
import urlparse
from abc import ABCMeta, abstractmethod
from ..testrunner import Stop
here = os.path.split(__file__)[0]
def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
timeout_multiplier = kwargs["timeout_multiplier"]
if timeout_multiplier is None:
timeout_multiplier = 1
executor_kwargs = {"server_config": server_config,
"timeout_multiplier": timeout_multiplier,
"debug_info": kwargs["debug_info"]}
if test_type == "reftest":
executor_kwargs["screenshot_cache"] = cache_manager.dict()
return executor_kwargs
def strip_server(url):
"""Remove the scheme and netloc from a url, leaving only the path and any query
or fragment.
url - the url to strip
e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
url_parts = list(urlparse.urlsplit(url))
url_parts[0] = ""
url_parts[1] = ""
return urlparse.urlunsplit(url_parts)
class TestharnessResultConverter(object):
harness_codes = {0: "OK",
1: "ERROR",
2: "TIMEOUT"}
test_codes = {0: "PASS",
1: "FAIL",
2: "TIMEOUT",
3: "NOTRUN"}
def __call__(self, test, result):
"""Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
result_url, status, message, stack, subtest_results = result
assert result_url == test.url, ("Got results from %s, expected %s" %
(result_url, test.url))
harness_result = test.result_cls(self.harness_codes[status], message)
return (harness_result,
[test.subtest_result_cls(name, self.test_codes[status], message, stack)
for name, status, message, stack in subtest_results])
testharness_result_converter = TestharnessResultConverter()
def reftest_result_converter(self, test, result):
return (test.result_cls(result["status"], result["message"],
extra=result.get("extra")), [])
def pytest_result_converter(self, test, data):
harness_data, subtest_data = data
if subtest_data is None:
subtest_data = []
harness_result = test.result_cls(*harness_data)
subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
return (harness_result, subtest_results)
class ExecutorException(Exception):
def __init__(self, status, message):
self.status = status
self.message = message
class TestExecutor(object):
__metaclass__ = ABCMeta
test_type = None
convert_result = None
def __init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None):
"""Abstract Base class for object that actually executes the tests in a
specific browser. Typically there will be a different TestExecutor
subclass for each test type and method of executing tests.
:param browser: ExecutorBrowser instance providing properties of the
browser that will be tested.
:param server_config: Dictionary of wptserve server configuration of the
form stored in TestEnvironment.external_config
:param timeout_multiplier: Multiplier relative to base timeout to use
when setting test timeout.
"""
self.runner = None
self.browser = browser
self.server_config = server_config
self.timeout_multiplier = timeout_multiplier
self.debug_info = debug_info
self.last_environment = {"protocol": "http",
"prefs": {}}
self.protocol = None # This must be set in subclasses
@property
def logger(self):
"""StructuredLogger for this executor"""
if self.runner is not None:
return self.runner.logger
def setup(self, runner):
"""Run steps needed before tests can be started e.g. connecting to
browser instance
:param runner: TestRunner instance that is going to run the tests"""
self.runner = runner
if self.protocol is not None:
self.protocol.setup(runner)
def teardown(self):
"""Run cleanup steps after tests have finished"""
if self.protocol is not None:
self.protocol.teardown()
def run_test(self, test):
"""Run a particular test.
:param test: The test to run"""
if test.environment != self.last_environment:
self.on_environment_change(test.environment)
try:
result = self.do_test(test)
except Exception as e:
result = self.result_from_exception(test, e)
if result is Stop:
return result
# log result of parent test
if result[0].status == "ERROR":
self.logger.debug(result[0].message)
self.last_environment = test.environment
self.runner.send_message("test_ended", test, result)
def server_url(self, protocol):
return "%s://%s:%s" % (protocol,
self.server_config["host"],
self.server_config["ports"][protocol][0])
def test_url(self, test):
return urlparse.urljoin(self.server_url(test.environment["protocol"]), test.url)
@abstractmethod
def do_test(self, test):
"""Test-type and protocol specific implementation of running a
specific test.
:param test: The test to run."""
pass
def on_environment_change(self, new_environment):
pass
def result_from_exception(self, test, e):
if hasattr(e, "status") and e.status in test.result_cls.statuses:
status = e.status
else:
status = "ERROR"
message = unicode(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls(status, message), []
class TestharnessExecutor(TestExecutor):
convert_result = testharness_result_converter
class RefTestExecutor(TestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
debug_info=None):
TestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.screenshot_cache = screenshot_cache
class RefTestImplementation(object):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
# Cache of url:(screenshot hash, screenshot). Typically the
# screenshot is None, but we set this value if a test fails
# and the screenshot was taken from the cache so that we may
# retrieve the screenshot from the cache directly in the future
self.screenshot_cache = self.executor.screenshot_cache
self.message = None
@property
def logger(self):
return self.executor.logger
def get_hash(self, test, viewport_size, dpi):
timeout = test.timeout * self.timeout_multiplier
key = (test.url, viewport_size, dpi)
if key not in self.screenshot_cache:
success, data = self.executor.screenshot(test, viewport_size, dpi)
if not success:
return False, data
screenshot = data
hash_value = hashlib.sha1(screenshot).hexdigest()
self.screenshot_cache[key] = (hash_value, None)
rv = (hash_value, screenshot)
else:
rv = self.screenshot_cache[key]
self.message.append("%s %s" % (test.url, rv[0]))
return True, rv
def is_pass(self, lhs_hash, rhs_hash, relation):
assert relation in ("==", "!=")
self.message.append("Testing %s %s %s" % (lhs_hash, relation, rhs_hash))
return ((relation == "==" and lhs_hash == rhs_hash) or
(relation == "!=" and lhs_hash != rhs_hash))
def run_test(self, test):
viewport_size = test.viewport_size
dpi = test.dpi
self.message = []
# Depth-first search of reference tree, with the goal
# of reachings a leaf node with only pass results
stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
while stack:
hashes = [None, None]
screenshots = [None, None]
nodes, relation = stack.pop()
for i, node in enumerate(nodes):
success, data = self.get_hash(node, viewport_size, dpi)
if success is False:
return {"status": data[0], "message": data[1]}
hashes[i], screenshots[i] = data
if self.is_pass(hashes[0], hashes[1], relation):
if nodes[1].references:
stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
else:
# We passed
return {"status":"PASS", "message": None}
# We failed, so construct a failure message
for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
if screenshot is None:
success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
if success:
screenshots[i] = screenshot
log_data = [{"url": nodes[0].url, "screenshot": screenshots[0]}, relation,
{"url": nodes[1].url, "screenshot": screenshots[1]}]
return {"status": "FAIL",
"message": "\n".join(self.message),
"extra": {"reftest_screenshots": log_data}}
def retake_screenshot(self, node, viewport_size, dpi):
success, data = self.executor.screenshot(node, viewport_size, dpi)
if not success:
return False, data
key = (node.url, viewport_size, dpi)
hash_val, _ = self.screenshot_cache[key]
self.screenshot_cache[key] = hash_val, data
return True, data
class WdspecExecutor(TestExecutor):
convert_result = pytest_result_converter
class Protocol(object):
def __init__(self, executor, browser):
self.executor = executor
self.browser = browser
@property
def logger(self):
return self.executor.logger
def setup(self, runner):
pass
def teardown(self):
pass
def wait(self):
pass

View file

@ -1,561 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import hashlib
import httplib
import os
import socket
import threading
import time
import traceback
import urlparse
import uuid
from collections import defaultdict
from ..wpttest import WdspecResult, WdspecSubtestResult
errors = None
marionette = None
pytestrunner = None
webdriver = None
here = os.path.join(os.path.split(__file__)[0])
from .base import (ExecutorException,
Protocol,
RefTestExecutor,
RefTestImplementation,
TestExecutor,
TestharnessExecutor,
testharness_result_converter,
reftest_result_converter,
strip_server,
WdspecExecutor)
from ..testrunner import Stop
from ..webdriver_server import GeckoDriverServer
# Extra timeout to use after internal test timeout at which the harness
# should force a timeout
extra_timeout = 5 # seconds
def do_delayed_imports():
global errors, marionette
# Marionette client used to be called marionette, recently it changed
# to marionette_driver for unfathomable reasons
try:
import marionette
from marionette import errors
except ImportError:
from marionette_driver import marionette, errors
class MarionetteProtocol(Protocol):
def __init__(self, executor, browser):
do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.marionette = None
self.marionette_port = browser.marionette_port
def setup(self, runner):
"""Connect to browser via Marionette."""
Protocol.setup(self, runner)
self.logger.debug("Connecting to Marionette on port %i" % self.marionette_port)
self.marionette = marionette.Marionette(host='localhost',
port=self.marionette_port,
socket_timeout=None)
# XXX Move this timeout somewhere
self.logger.debug("Waiting for Marionette connection")
while True:
success = self.marionette.wait_for_port(60)
#When running in a debugger wait indefinitely for firefox to start
if success or self.executor.debug_info is None:
break
session_started = False
if success:
try:
self.logger.debug("Starting Marionette session")
self.marionette.start_session()
except Exception as e:
self.logger.warning("Starting marionette session failed: %s" % e)
else:
self.logger.debug("Marionette session started")
session_started = True
if not success or not session_started:
self.logger.warning("Failed to connect to Marionette")
self.executor.runner.send_message("init_failed")
else:
try:
self.after_connect()
except Exception:
self.logger.warning("Post-connection steps failed")
self.logger.error(traceback.format_exc())
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
try:
self.marionette.delete_session()
except Exception:
# This is typically because the session never started
pass
del self.marionette
@property
def is_alive(self):
"""Check if the Marionette connection is still active."""
try:
self.marionette.current_window_handle
except Exception:
return False
return True
def after_connect(self):
self.load_runner("http")
def load_runner(self, protocol):
# Check if we previously had a test window open, and if we did make sure it's closed
self.marionette.execute_script("if (window.wrappedJSObject.win) {window.wrappedJSObject.win.close()}")
url = urlparse.urljoin(self.executor.server_url(protocol), "/testharness_runner.html")
self.logger.debug("Loading %s" % url)
try:
self.marionette.navigate(url)
except Exception as e:
self.logger.critical(
"Loading initial page %s failed. Ensure that the "
"there are no other programs bound to this port and "
"that your firewall rules or network setup does not "
"prevent access.\e%s" % (url, traceback.format_exc(e)))
self.marionette.execute_script(
"document.title = '%s'" % threading.current_thread().name.replace("'", '"'))
def wait(self):
socket_timeout = self.marionette.client.sock.gettimeout()
if socket_timeout:
self.marionette.set_script_timeout((socket_timeout / 2) * 1000)
while True:
try:
self.marionette.execute_async_script("")
except errors.ScriptTimeoutException:
self.logger.debug("Script timed out")
pass
except (socket.timeout, IOError):
self.logger.debug("Socket closed")
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
def on_environment_change(self, old_environment, new_environment):
#Unset all the old prefs
for name in old_environment.get("prefs", {}).iterkeys():
value = self.executor.original_pref_values[name]
if value is None:
self.clear_user_pref(name)
else:
self.set_pref(name, value)
for name, value in new_environment.get("prefs", {}).iteritems():
self.executor.original_pref_values[name] = self.get_pref(name)
self.set_pref(name, value)
def set_pref(self, name, value):
if value.lower() not in ("true", "false"):
try:
int(value)
except ValueError:
value = "'%s'" % value
else:
value = value.lower()
self.logger.info("Setting pref %s (%s)" % (name, value))
script = """
let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
.getService(Components.interfaces.nsIPrefBranch);
let pref = '%s';
let type = prefInterface.getPrefType(pref);
let value = %s;
switch(type) {
case prefInterface.PREF_STRING:
prefInterface.setCharPref(pref, value);
break;
case prefInterface.PREF_BOOL:
prefInterface.setBoolPref(pref, value);
break;
case prefInterface.PREF_INT:
prefInterface.setIntPref(pref, value);
break;
}
""" % (name, value)
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.marionette.execute_script(script)
def clear_user_pref(self, name):
self.logger.info("Clearing pref %s" % (name))
script = """
let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
.getService(Components.interfaces.nsIPrefBranch);
let pref = '%s';
prefInterface.clearUserPref(pref);
""" % name
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.marionette.execute_script(script)
def get_pref(self, name):
script = """
let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
.getService(Components.interfaces.nsIPrefBranch);
let pref = '%s';
let type = prefInterface.getPrefType(pref);
switch(type) {
case prefInterface.PREF_STRING:
return prefInterface.getCharPref(pref);
case prefInterface.PREF_BOOL:
return prefInterface.getBoolPref(pref);
case prefInterface.PREF_INT:
return prefInterface.getIntPref(pref);
case prefInterface.PREF_INVALID:
return null;
}
""" % name
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.marionette.execute_script(script)
class RemoteMarionetteProtocol(Protocol):
def __init__(self, executor, browser):
do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.session = None
self.webdriver_binary = executor.webdriver_binary
self.marionette_port = browser.marionette_port
self.server = None
def setup(self, runner):
"""Connect to browser via the Marionette HTTP server."""
try:
self.server = GeckoDriverServer(
self.logger, self.marionette_port, binary=self.webdriver_binary)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.logger.info(
"Establishing new WebDriver session with %s" % self.server.url)
self.session = webdriver.Session(
self.server.host, self.server.port, self.server.base_path)
except Exception:
self.logger.error(traceback.format_exc())
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
try:
if self.session.session_id is not None:
self.session.end()
except Exception:
pass
if self.server is not None and self.server.is_alive:
self.server.stop()
@property
def is_alive(self):
"""Test that the Marionette connection is still alive.
Because the remote communication happens over HTTP we need to
make an explicit request to the remote. It is allowed for
WebDriver spec tests to not have a WebDriver session, since this
may be what is tested.
An HTTP request to an invalid path that results in a 404 is
proof enough to us that the server is alive and kicking.
"""
conn = httplib.HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
class ExecuteAsyncScriptRun(object):
def __init__(self, logger, func, marionette, url, timeout):
self.logger = logger
self.result = (None, None)
self.marionette = marionette
self.func = func
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
if timeout is not None:
self.marionette.set_script_timeout((timeout + extra_timeout) * 1000)
else:
# We just want it to never time out, really, but marionette doesn't
# make that possible. It also seems to time out immediately if the
# timeout is set too high. This works at least.
self.marionette.set_script_timeout(2**31 - 1)
except IOError:
self.logger.error("Lost marionette connection before starting test")
return Stop
executor = threading.Thread(target = self._run)
executor.start()
if timeout is not None:
wait_timeout = timeout + 2 * extra_timeout
else:
wait_timeout = None
flag = self.result_flag.wait(wait_timeout)
if self.result[1] is None:
self.logger.debug("Timed out waiting for a result")
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.marionette, self.url, self.timeout)
except errors.ScriptTimeoutException:
self.logger.debug("Got a marionette timeout")
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, IOError):
# This can happen on a crash
# Also, should check after the test if the firefox process is still running
# and otherwise ignore any other result and set it to crash
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
class MarionetteTestharnessExecutor(TestharnessExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, debug_info=None, **kwargs):
"""Marionette-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = MarionetteProtocol(self, browser)
self.script = open(os.path.join(here, "testharness_marionette.js")).read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
self.original_pref_values = {}
if marionette is None:
do_delayed_imports()
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
self.protocol.on_environment_change(self.last_environment, new_environment)
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.load_runner(new_environment["protocol"])
def do_test(self, test):
timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
else None)
success, data = ExecuteAsyncScriptRun(self.logger,
self.do_testharness,
self.protocol.marionette,
self.test_url(test),
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, marionette, url, timeout):
if self.close_after_done:
marionette.execute_script("if (window.wrappedJSObject.win) {window.wrappedJSObject.win.close()}")
if timeout is not None:
timeout_ms = str(timeout * 1000)
else:
timeout_ms = "null"
script = self.script % {"abs_url": url,
"url": strip_server(url),
"window_id": self.window_id,
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout_ms,
"explicit_timeout": timeout is None}
rv = marionette.execute_async_script(script, new_sandbox=False)
return rv
class MarionetteRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, **kwargs):
"""Marionette-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = MarionetteProtocol(self, browser)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
self.original_pref_values = {}
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
self.protocol.on_environment_change(self.last_environment, new_environment)
def do_test(self, test):
if self.close_after_done and self.has_window:
self.protocol.marionette.close()
self.protocol.marionette.switch_to_window(
self.protocol.marionette.window_handles[-1])
self.has_window = False
if not self.has_window:
self.protocol.marionette.execute_script(self.script)
self.protocol.marionette.switch_to_window(self.protocol.marionette.window_handles[-1])
self.has_window = True
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
timeout = self.timeout_multiplier * test.timeout if self.debug_info is None else None
test_url = self.test_url(test)
return ExecuteAsyncScriptRun(self.logger,
self._screenshot,
self.protocol.marionette,
test_url,
timeout).run()
def _screenshot(self, marionette, url, timeout):
marionette.navigate(url)
marionette.execute_async_script(self.wait_script)
screenshot = marionette.screenshot()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
class WdspecRun(object):
def __init__(self, func, session, path, timeout):
self.func = func
self.result = (None, None)
self.session = session
self.path = path
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
"""Runs function in a thread and interrupts it if it exceeds the
given timeout. Returns (True, (Result, [SubtestResult ...])) in
case of success, or (False, (status, extra information)) in the
event of failure.
"""
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(self.timeout)
if self.result[1] is None:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.path, self.timeout)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", message)
finally:
self.result_flag.set()
class MarionetteWdspecExecutor(WdspecExecutor):
def __init__(self, browser, server_config, webdriver_binary,
timeout_multiplier=1, close_after_done=True, debug_info=None):
self.do_delayed_imports()
WdspecExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.webdriver_binary = webdriver_binary
self.protocol = RemoteMarionetteProtocol(self, browser)
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session,
test.path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session, path, timeout):
harness_result = ("OK", None)
subtest_results = pytestrunner.run(path, session, timeout=timeout)
return (harness_result, subtest_results)
def do_delayed_imports(self):
global pytestrunner, webdriver
from . import pytestrunner
import webdriver

View file

@ -1,263 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import socket
import sys
import threading
import time
import traceback
import urlparse
import uuid
from .base import (ExecutorException,
Protocol,
RefTestExecutor,
RefTestImplementation,
TestExecutor,
TestharnessExecutor,
testharness_result_converter,
reftest_result_converter,
strip_server)
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
webdriver = None
exceptions = None
RemoteConnection = None
extra_timeout = 5
def do_delayed_imports():
global webdriver
global exceptions
global RemoteConnection
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.remote.remote_connection import RemoteConnection
class SeleniumProtocol(Protocol):
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def setup(self, runner):
"""Connect to browser via Selenium's WebDriver implementation."""
self.runner = runner
self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
session_started = False
try:
self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
resolve_ip=False),
desired_capabilities=self.capabilities)
except:
self.logger.warning(
"Connecting to Selenium failed:\n%s" % traceback.format_exc())
else:
self.logger.debug("Selenium session started")
session_started = True
if not session_started:
self.logger.warning("Failed to connect to Selenium")
self.executor.runner.send_message("init_failed")
else:
try:
self.after_connect()
except:
print >> sys.stderr, traceback.format_exc()
self.logger.warning(
"Failed to connect to navigate initial page")
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
self.logger.debug("Hanging up on Selenium session")
try:
self.webdriver.quit()
except:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.current_window_handle
# TODO what exception?
except (socket.timeout, exceptions.ErrorInResponseException):
return False
return True
def after_connect(self):
self.load_runner("http")
def load_runner(self, protocol):
url = urlparse.urljoin(self.executor.server_url(protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.get(url)
self.webdriver.execute_script("document.title = '%s'" %
threading.current_thread().name.replace("'", '"'))
def wait(self):
while True:
try:
self.webdriver.execute_async_script("");
except exceptions.TimeoutException:
pass
except (socket.timeout, exceptions.NoSuchWindowException,
exceptions.ErrorInResponseException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class SeleniumRun(object):
def __init__(self, func, webdriver, url, timeout):
self.func = func
self.result = None
self.webdriver = webdriver
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.webdriver.set_script_timeout((timeout + extra_timeout) * 1000)
except exceptions.ErrorInResponseException:
self.logger.error("Lost webdriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
assert not flag
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.webdriver, self.url, self.timeout)
except exceptions.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, exceptions.ErrorInResponseException):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
class SeleniumTestharnessExecutor(TestharnessExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None):
"""Selenium-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver.js")) as f:
self.script = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
def is_alive(self):
return self.protocol.is_alive()
def on_protocol_change(self, new_protocol):
self.protocol.load_runner(new_protocol)
def do_test(self, test):
url = self.test_url(test)
success, data = SeleniumRun(self.do_testharness,
self.protocol.webdriver,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, webdriver, url, timeout):
return webdriver.execute_async_script(
self.script % {"abs_url": url,
"url": strip_server(url),
"window_id": self.window_id,
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000})
class SeleniumRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.logger.info("Test requires OS-level window focus")
self.protocol.webdriver.set_window_size(600, 600)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return SeleniumRun(self._screenshot,
self.protocol.webdriver,
self.test_url(test),
test.timeout).run()
def _screenshot(self, webdriver, url, timeout):
webdriver.get(url)
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.get_screenshot_as_base64()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot

View file

@ -1,372 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import base64
import hashlib
import httplib
import json
import os
import subprocess
import tempfile
import threading
import traceback
import urlparse
import uuid
from collections import defaultdict
from mozprocess import ProcessHandler
from .base import (ExecutorException,
Protocol,
RefTestImplementation,
testharness_result_converter,
reftest_result_converter,
WdspecExecutor)
from .process import ProcessTestExecutor
from ..browsers.base import browser_command
from ..wpttest import WdspecResult, WdspecSubtestResult
from ..webdriver_server import ServoDriverServer
from .executormarionette import WdspecRun
pytestrunner = None
render_arg = None
webdriver = None
extra_timeout = 5 # seconds
def do_delayed_imports():
global render_arg
from ..browsers.servo import render_arg
hosts_text = """127.0.0.1 web-platform.test
127.0.0.1 www.web-platform.test
127.0.0.1 www1.web-platform.test
127.0.0.1 www2.web-platform.test
127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
127.0.0.1 xn--lve-6lad.web-platform.test
"""
def make_hosts_file():
hosts_fd, hosts_path = tempfile.mkstemp()
with os.fdopen(hosts_fd, "w") as f:
f.write(hosts_text)
return hosts_path
class ServoTestharnessExecutor(ProcessTestExecutor):
convert_result = testharness_result_converter
def __init__(self, browser, server_config, timeout_multiplier=1, debug_info=None,
pause_after_test=False):
do_delayed_imports()
ProcessTestExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.pause_after_test = pause_after_test
self.result_data = None
self.result_flag = None
self.protocol = Protocol(self, browser)
self.hosts_path = make_hosts_file()
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
ProcessTestExecutor.teardown(self)
def do_test(self, test):
self.result_data = None
self.result_flag = threading.Event()
args = [render_arg(self.browser.render_backend), "--hard-fail", "-u", "Servo/wptrunner",
"-Z", "replace-surrogates", "-z", self.test_url(test)]
for stylesheet in self.browser.user_stylesheets:
args += ["--user-stylesheet", stylesheet]
for pref, value in test.environment.get('prefs', {}).iteritems():
args += ["--pref", "%s=%s" % (pref, value)]
if self.browser.ca_certificate_path:
args += ["--certificate-path", self.browser.ca_certificate_path]
args += self.browser.binary_args
debug_args, command = browser_command(self.binary, args, self.debug_info)
self.command = command
if self.pause_after_test:
self.command.remove("-z")
self.command = debug_args + self.command
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
onFinish=self.on_finish,
env=env,
storeOutput=False)
self.proc.run()
else:
self.proc = subprocess.Popen(self.command, env=env)
try:
timeout = test.timeout * self.timeout_multiplier
# Now wait to get the output we expect, or until we reach the timeout
if not self.interactive and not self.pause_after_test:
wait_timeout = timeout + 5
self.result_flag.wait(wait_timeout)
else:
wait_timeout = None
self.proc.wait()
proc_is_running = True
if self.result_flag.is_set():
if self.result_data is not None:
result = self.convert_result(test, self.result_data)
else:
self.proc.wait()
result = (test.result_cls("CRASH", None), [])
proc_is_running = False
else:
result = (test.result_cls("TIMEOUT", None), [])
if proc_is_running:
if self.pause_after_test:
self.logger.info("Pausing until the browser exits")
self.proc.wait()
else:
self.proc.kill()
except KeyboardInterrupt:
self.proc.kill()
raise
return result
def on_output(self, line):
prefix = "ALERT: RESULT: "
line = line.decode("utf8", "replace")
if line.startswith(prefix):
self.result_data = json.loads(line[len(prefix):])
self.result_flag.set()
else:
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
def on_finish(self):
self.result_flag.set()
class TempFilename(object):
def __init__(self, directory):
self.directory = directory
self.path = None
def __enter__(self):
self.path = os.path.join(self.directory, str(uuid.uuid4()))
return self.path
def __exit__(self, *args, **kwargs):
try:
os.unlink(self.path)
except OSError:
pass
class ServoRefTestExecutor(ProcessTestExecutor):
convert_result = reftest_result_converter
def __init__(self, browser, server_config, binary=None, timeout_multiplier=1,
screenshot_cache=None, debug_info=None, pause_after_test=False):
do_delayed_imports()
ProcessTestExecutor.__init__(self,
browser,
server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = Protocol(self, browser)
self.screenshot_cache = screenshot_cache
self.implementation = RefTestImplementation(self)
self.tempdir = tempfile.mkdtemp()
self.hosts_path = make_hosts_file()
def teardown(self):
try:
os.unlink(self.hosts_path)
except OSError:
pass
os.rmdir(self.tempdir)
ProcessTestExecutor.teardown(self)
def screenshot(self, test, viewport_size, dpi):
full_url = self.test_url(test)
with TempFilename(self.tempdir) as output_path:
debug_args, command = browser_command(
self.binary,
[render_arg(self.browser.render_backend), "--hard-fail", "--exit",
"-u", "Servo/wptrunner", "-Z", "disable-text-aa,load-webfonts-synchronously,replace-surrogates",
"--output=%s" % output_path, full_url] + self.browser.binary_args,
self.debug_info)
for stylesheet in self.browser.user_stylesheets:
command += ["--user-stylesheet", stylesheet]
for pref, value in test.environment.get('prefs', {}).iteritems():
command += ["--pref", "%s=%s" % (pref, value)]
command += ["--resolution", viewport_size or "800x600"]
if self.browser.ca_certificate_path:
command += ["--certificate-path", self.browser.ca_certificate_path]
if dpi:
command += ["--device-pixel-ratio", dpi]
# Run ref tests in headless mode
command += ["-z"]
self.command = debug_args + command
env = os.environ.copy()
env["HOST_FILE"] = self.hosts_path
env["RUST_BACKTRACE"] = "1"
if not self.interactive:
self.proc = ProcessHandler(self.command,
processOutputLine=[self.on_output],
env=env)
try:
self.proc.run()
timeout = test.timeout * self.timeout_multiplier + 5
rv = self.proc.wait(timeout=timeout)
except KeyboardInterrupt:
self.proc.kill()
raise
else:
self.proc = subprocess.Popen(self.command,
env=env)
try:
rv = self.proc.wait()
except KeyboardInterrupt:
self.proc.kill()
raise
if rv is None:
self.proc.kill()
return False, ("EXTERNAL-TIMEOUT", None)
if rv != 0 or not os.path.exists(output_path):
return False, ("CRASH", None)
with open(output_path) as f:
# Might need to strip variable headers or something here
data = f.read()
return True, base64.b64encode(data)
def do_test(self, test):
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def on_output(self, line):
line = line.decode("utf8", "replace")
if self.interactive:
print line
else:
self.logger.process_output(self.proc.pid,
line,
" ".join(self.command))
class ServoWdspecProtocol(Protocol):
def __init__(self, executor, browser):
self.do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.session = None
self.server = None
def setup(self, runner):
try:
self.server = ServoDriverServer(self.logger, binary=self.browser.binary, binary_args=self.browser.binary_args, render_backend=self.browser.render_backend)
self.server.start(block=False)
self.logger.info(
"WebDriver HTTP server listening at %s" % self.server.url)
self.logger.info(
"Establishing new WebDriver session with %s" % self.server.url)
self.session = webdriver.Session(
self.server.host, self.server.port, self.server.base_path)
except Exception:
self.logger.error(traceback.format_exc())
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
if self.server is not None:
try:
if self.session.session_id is not None:
self.session.end()
except Exception:
pass
if self.server.is_alive:
self.server.stop()
@property
def is_alive(self):
conn = httplib.HTTPConnection(self.server.host, self.server.port)
conn.request("HEAD", self.server.base_path + "invalid")
res = conn.getresponse()
return res.status == 404
def do_delayed_imports(self):
global pytestrunner, webdriver
from . import pytestrunner
import webdriver
class ServoWdspecExecutor(WdspecExecutor):
def __init__(self, browser, server_config,
timeout_multiplier=1, close_after_done=True, debug_info=None,
**kwargs):
WdspecExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = ServoWdspecProtocol(self, browser)
def is_alive(self):
return self.protocol.is_alive
def on_environment_change(self, new_environment):
pass
def do_test(self, test):
timeout = test.timeout * self.timeout_multiplier + extra_timeout
success, data = WdspecRun(self.do_wdspec,
self.protocol.session,
test.path,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_wdspec(self, session, path, timeout):
harness_result = ("OK", None)
subtest_results = pytestrunner.run(path, session, timeout=timeout)
return (harness_result, subtest_results)

View file

@ -1,261 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import socket
import threading
import time
import traceback
from .base import (Protocol,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
strip_server)
from ..testrunner import Stop
webdriver = None
here = os.path.join(os.path.split(__file__)[0])
extra_timeout = 5
def do_delayed_imports():
global webdriver
from tools import webdriver
class ServoWebDriverProtocol(Protocol):
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
Protocol.__init__(self, executor, browser)
self.capabilities = capabilities
self.host = browser.webdriver_host
self.port = browser.webdriver_port
self.session = None
def setup(self, runner):
"""Connect to browser via WebDriver."""
self.runner = runner
url = "http://%s:%d" % (self.host, self.port)
session_started = False
try:
self.session = webdriver.Session(self.host, self.port,
extension=webdriver.servo.ServoCommandExtensions)
self.session.start()
except:
self.logger.warning(
"Connecting with WebDriver failed:\n%s" % traceback.format_exc())
else:
self.logger.debug("session started")
session_started = True
if not session_started:
self.logger.warning("Failed to connect via WebDriver")
self.executor.runner.send_message("init_failed")
else:
self.executor.runner.send_message("init_succeeded")
def teardown(self):
self.logger.debug("Hanging up on WebDriver session")
try:
self.session.end()
except:
pass
def is_alive(self):
try:
# Get a simple property over the connection
self.session.window_handle
# TODO what exception?
except Exception:
return False
return True
def after_connect(self):
pass
def wait(self):
while True:
try:
self.session.execute_async_script("")
except webdriver.TimeoutException:
pass
except (socket.timeout, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
def on_environment_change(self, old_environment, new_environment):
#Unset all the old prefs
self.session.extension.reset_prefs(*old_environment.get("prefs", {}).keys())
self.session.extension.set_prefs(new_environment.get("prefs", {}))
class ServoWebDriverRun(object):
def __init__(self, func, session, url, timeout, current_timeout=None):
self.func = func
self.result = None
self.session = session
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(self.timeout + extra_timeout)
if self.result is None:
assert not flag
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.session, self.url, self.timeout)
except webdriver.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, IOError):
self.result = False, ("CRASH", None)
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("ERROR", e)
finally:
self.result_flag.set()
def timeout_func(timeout):
if timeout:
t0 = time.time()
return lambda: time.time() - t0 > timeout + extra_timeout
else:
return lambda: False
class ServoWebDriverTestharnessExecutor(TestharnessExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None):
TestharnessExecutor.__init__(self, browser, server_config, timeout_multiplier=1,
debug_info=None)
self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities)
with open(os.path.join(here, "testharness_servodriver.js")) as f:
self.script = f.read()
self.timeout = None
def on_protocol_change(self, new_protocol):
pass
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
url = self.test_url(test)
timeout = test.timeout * self.timeout_multiplier + extra_timeout
if timeout != self.timeout:
try:
self.protocol.session.timeouts.script = timeout
self.timeout = timeout
except IOError:
self.logger.error("Lost webdriver connection")
return Stop
success, data = ServoWebDriverRun(self.do_testharness,
self.protocol.session,
url,
timeout).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, session, url, timeout):
session.url = url
result = json.loads(
session.execute_async_script(
self.script % {"abs_url": url,
"url": strip_server(url),
"timeout_multiplier": self.timeout_multiplier,
"timeout": timeout * 1000}))
# Prevent leaking every page in history until Servo develops a more sane
# page cache
session.back()
return result
class TimeoutError(Exception):
pass
class ServoWebDriverRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, capabilities=None, debug_info=None):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = ServoWebDriverProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.timeout = None
with open(os.path.join(here, "reftest-wait_servodriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
try:
result = self.implementation.run_test(test)
return self.convert_result(test, result)
except IOError:
return test.result_cls("CRASH", None), []
except TimeoutError:
return test.result_cls("TIMEOUT", None), []
except Exception as e:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc(e)
return test.result_cls("ERROR", message), []
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
timeout = (test.timeout * self.timeout_multiplier + extra_timeout
if self.debug_info is None else None)
if self.timeout != timeout:
try:
self.protocol.session.timeouts.script = timeout
self.timeout = timeout
except IOError:
self.logger.error("Lost webdriver connection")
return Stop
return ServoWebDriverRun(self._screenshot,
self.protocol.session,
self.test_url(test),
timeout).run()
def _screenshot(self, session, url, timeout):
session.url = url
session.execute_async_script(self.wait_script)
return session.screenshot()

View file

@ -1,24 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from .base import TestExecutor
class ProcessTestExecutor(TestExecutor):
def __init__(self, *args, **kwargs):
TestExecutor.__init__(self, *args, **kwargs)
self.binary = self.browser.binary
self.interactive = (False if self.debug_info is None
else self.debug_info.interactive)
def setup(self, runner):
self.runner = runner
self.runner.send_message("init_succeeded")
return True
def is_alive(self):
return True
def do_test(self, test):
raise NotImplementedError

View file

@ -1,6 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from . import fixtures
from .runner import run

View file

@ -1,136 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
import webdriver
import contextlib
import httplib
"""pytest fixtures for use in Python-based WPT tests.
The purpose of test fixtures is to provide a fixed baseline upon which
tests can reliably and repeatedly execute.
"""
class Session(object):
"""Fixture to allow access to wptrunner's existing WebDriver session
in tests.
The session is not created by default to enable testing of session
creation. However, a function-scoped session will be implicitly created
at the first call to a WebDriver command. This means methods such as
`session.send_command` and `session.session_id` are possible to use
without having a session.
To illustrate implicit session creation::
def test_session_scope(session):
# at this point there is no session
assert session.session_id is None
# window_id is a WebDriver command,
# and implicitly creates the session for us
assert session.window_id is not None
# we now have a session
assert session.session_id is not None
You can also access the session in custom fixtures defined in the
tests, such as a setup function::
@pytest.fixture(scope="function")
def setup(request, session):
session.url = "https://example.org"
def test_something(setup, session):
assert session.url == "https://example.org"
When the test function goes out of scope, any remaining user prompts
and opened windows are closed, and the current browsing context is
switched back to the top-level browsing context.
"""
def __init__(self, client):
self.client = client
@pytest.fixture(scope="function")
def session(self, request):
# finalisers are popped off a stack,
# making their ordering reverse
request.addfinalizer(self.switch_to_top_level_browsing_context)
request.addfinalizer(self.restore_windows)
request.addfinalizer(self.dismiss_user_prompts)
return self.client
def dismiss_user_prompts(self):
"""Dismisses any open user prompts in windows."""
current_window = self.client.window_handle
for window in self.windows():
self.client.window_handle = window
try:
self.client.alert.dismiss()
except webdriver.NoSuchAlertException:
pass
self.client.window_handle = current_window
def restore_windows(self):
"""Closes superfluous windows opened by the test without ending
the session implicitly by closing the last window.
"""
current_window = self.client.window_handle
for window in self.windows(exclude=[current_window]):
self.client.window_handle = window
if len(self.client.window_handles) > 1:
self.client.close()
self.client.window_handle = current_window
def switch_to_top_level_browsing_context(self):
"""If the current browsing context selected by WebDriver is a
`<frame>` or an `<iframe>`, switch it back to the top-level
browsing context.
"""
self.client.switch_frame(None)
def windows(self, exclude=None):
"""Set of window handles, filtered by an `exclude` list if
provided.
"""
if exclude is None:
exclude = []
wins = [w for w in self.client.handles if w not in exclude]
return set(wins)
class HTTPRequest(object):
def __init__(self, host, port):
self.host = host
self.port = port
def head(self, path):
return self._request("HEAD", path)
def get(self, path):
return self._request("GET", path)
@contextlib.contextmanager
def _request(self, method, path):
conn = httplib.HTTPConnection(self.host, self.port)
try:
conn.request(method, path)
yield conn.getresponse()
finally:
conn.close()
@pytest.fixture(scope="module")
def http(session):
return HTTPRequest(session.transport.host, session.transport.port)

Some files were not shown because too many files have changed in this diff Show more