Update web-platform-tests and CSS tests.

- Update CSS tests to revision e05bfd5e30ed662c2f8a353577003f8eed230180.
- Update web-platform-tests to revision a052787dd5c069a340031011196b73affbd68cd9.
This commit is contained in:
Ms2ger 2017-02-06 11:06:12 +01:00
parent fb4f421c8b
commit 296fa2512b
21852 changed files with 2080936 additions and 892894 deletions

View file

@ -0,0 +1,164 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
REGENDOC_ARGS := \
--normalize "/={8,} (.*) ={8,}/======= \1 ========/" \
--normalize "/_{8,} (.*) _{8,}/_______ \1 ________/" \
--normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \
--normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " showtarget to show the pytest.org target directory"
@echo " install to install docs to pytest.org/SITETARGET"
@echo " install-ldf to install the doc pdf to pytest.org/SITETARGET"
@echo " regen to regenerate pytest examples using the installed pytest"
@echo " linkcheck to check all external links for integrity"
clean:
-rm -rf $(BUILDDIR)/*
SITETARGET=$(shell ./_getdoctarget.py)
showtarget:
@echo $(SITETARGET)
install: html
# for access talk to someone with login rights to
# pytest-dev@pytest.org to add your ssh key
rsync -avz _build/html/ pytest-dev@pytest.org:pytest.org/$(SITETARGET)
installpdf: latexpdf
@scp $(BUILDDIR)/latex/pytest.pdf pytest-dev@pytest.org:pytest.org/$(SITETARGET)
installall: clean install installpdf
@echo "done"
regen:
PYTHONDONTWRITEBYTECODE=1 COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS}
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pytest.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pytest.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/pytest"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pytest"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
make -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
texinfo:
mkdir -p $(BUILDDIR)/texinfo
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
mkdir -p $(BUILDDIR)/texinfo
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."

View file

@ -0,0 +1,16 @@
#!/usr/bin/env python
import py
def get_version_string():
fn = py.path.local(__file__).join("..", "..", "..",
"_pytest", "__init__.py")
for line in fn.readlines():
if "version" in line and not line.strip().startswith('#'):
return eval(line.split("=")[-1])
def get_minor_version_string():
return ".".join(get_version_string().split(".")[:2])
if __name__ == "__main__":
print (get_minor_version_string())

View file

@ -0,0 +1,18 @@
<h3><a href="{{ pathto(master_doc) }}">{{ _('Table Of Contents') }}</a></h3>
<ul>
<li><a href="{{ pathto('index') }}">Home</a></li>
<li><a href="{{ pathto('contents') }}">Contents</a></li>
<li><a href="{{ pathto('getting-started') }}">Install</a></li>
<li><a href="{{ pathto('example/index') }}">Examples</a></li>
<li><a href="{{ pathto('customize') }}">Customize</a></li>
<li><a href="{{ pathto('contact') }}">Contact</a></li>
<li><a href="{{ pathto('talks') }}">Talks/Posts</a></li>
<li><a href="{{ pathto('changelog') }}">Changelog</a></li>
<li><a href="{{ pathto('license') }}">License</a></li>
</ul>
{%- if display_toc %}
<hr>
{{ toc }}
{%- endif %}

View file

@ -0,0 +1,34 @@
{% extends "!layout.html" %}
{% block header %}
<div align="center" xmlns="http://www.w3.org/1999/html" style="background-color: lightgreen; padding: .5em">
<h4>
Want to help improve pytest? Please
<a href="https://www.indiegogo.com/projects/python-testing-sprint-mid-2016#/">
contribute to
</a>
or
<a href="announce/sprint2016.html">
join
</a>
our upcoming sprint in June 2016!
</h4>
</div>
{{super()}}
{% endblock %}
{% block footer %}
{{ super() }}
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-7597274-13']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
{% endblock %}

View file

@ -0,0 +1,16 @@
<h3>Useful Links</h3>
<ul>
<li>
<a href="https://www.indiegogo.com/projects/python-testing-sprint-mid-2016#/">
<b>Sprint funding campaign</b>
</a>
</li>
<li><a href="{{ pathto('index') }}">The pytest Website</a></li>
<li><a href="{{ pathto('contributing') }}">Contribution Guide</a></li>
<li><a href="https://pypi.python.org/pypi/pytest">pytest @ PyPI</a></li>
<li><a href="https://github.com/pytest-dev/pytest/">pytest @ GitHub</a></li>
<li><a href="http://plugincompat.herokuapp.com/">3rd party plugins</a></li>
<li><a href="https://github.com/pytest-dev/pytest/issues">Issue Tracker</a></li>
<li><a href="http://pytest.org/latest/pytest.pdf">PDF Documentation</a>
</ul>

View file

@ -0,0 +1,5 @@
<h3>About pytest</h3>
<p>
pytest is a mature full-featured Python testing tool that helps
you write better programs.
</p>

View file

@ -0,0 +1,3 @@
*.pyc
*.pyo
.DS_Store

View file

@ -0,0 +1,37 @@
Copyright (c) 2010 by Armin Ronacher.
Some rights reserved.
Redistribution and use in source and binary forms of the theme, with or
without modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* The names of the contributors may not be used to endorse or
promote products derived from this software without specific
prior written permission.
We kindly ask you to only use these themes in an unmodified manner just
for Flask and Flask-related products, not for unrelated projects. If you
like the visual style and want to use it for your own projects, please
consider making some larger changes to the themes (such as changing
font faces, sizes, colors or margins).
THIS THEME IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS THEME, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,31 @@
Flask Sphinx Styles
===================
This repository contains sphinx styles for Flask and Flask related
projects. To use this style in your Sphinx documentation, follow
this guide:
1. put this folder as _themes into your docs folder. Alternatively
you can also use git submodules to check out the contents there.
2. add this to your conf.py:
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
html_theme = 'flask'
The following themes exist:
- 'flask' - the standard flask documentation theme for large
projects
- 'flask_small' - small one-page theme. Intended to be used by
very small addon libraries for flask.
The following options exist for the flask_small theme:
[options]
index_logo = '' filename of a picture in _static
to be used as replacement for the
h1 in the index.rst file.
index_logo_height = 120px height of the index logo
github_fork = '' repository name on github for the
"fork me" badge

View file

@ -0,0 +1,24 @@
{%- extends "basic/layout.html" %}
{%- block extrahead %}
{{ super() }}
{% if theme_touch_icon %}
<link rel="apple-touch-icon" href="{{ pathto('_static/' ~ theme_touch_icon, 1) }}" />
{% endif %}
<meta name="viewport" content="width=device-width, initial-scale=0.9, maximum-scale=0.9">
{% endblock %}
{%- block relbar2 %}{% endblock %}
{% block header %}
{{ super() }}
{% if pagename == 'index' %}
<div class=indexwrapper>
{% endif %}
{% endblock %}
{%- block footer %}
<div class="footer">
&copy; Copyright {{ copyright }}.
Created using <a href="http://sphinx.pocoo.org/">Sphinx</a>.
</div>
{% if pagename == 'index' %}
</div>
{% endif %}
{%- endblock %}

View file

@ -0,0 +1,19 @@
<h3>Related Topics</h3>
<ul>
<li><a href="{{ pathto(master_doc) }}">Documentation overview</a><ul>
{%- for parent in parents %}
<li><a href="{{ parent.link|e }}">{{ parent.title }}</a><ul>
{%- endfor %}
{%- if prev %}
<li>Previous: <a href="{{ prev.link|e }}" title="{{ _('previous chapter')
}}">{{ prev.title }}</a></li>
{%- endif %}
{%- if next %}
<li>Next: <a href="{{ next.link|e }}" title="{{ _('next chapter')
}}">{{ next.title }}</a></li>
{%- endif %}
{%- for parent in parents %}
</ul></li>
{%- endfor %}
</ul></li>
</ul>

View file

@ -0,0 +1,557 @@
/*
* flasky.css_t
* ~~~~~~~~~~~~
*
* :copyright: Copyright 2010 by Armin Ronacher.
* :license: Flask Design License, see LICENSE for details.
*/
{% set page_width = '1020px' %}
{% set sidebar_width = '220px' %}
/* orange of logo is #d67c29 but we use black for links for now */
{% set link_color = '#000' %}
{% set link_hover_color = '#000' %}
{% set base_font = 'sans-serif' %}
{% set header_font = 'serif' %}
@import url("basic.css");
/* -- page layout ----------------------------------------------------------- */
body {
font-family: {{ base_font }};
font-size: 17px;
background-color: white;
color: #000;
margin: 0;
padding: 0;
}
div.document {
width: {{ page_width }};
margin: 30px auto 0 auto;
}
div.documentwrapper {
float: left;
width: 100%;
}
div.bodywrapper {
margin: 0 0 0 {{ sidebar_width }};
}
div.sphinxsidebar {
width: {{ sidebar_width }};
}
hr {
border: 0;
border-top: 1px solid #B1B4B6;
}
div.body {
background-color: #ffffff;
color: #3E4349;
padding: 0 30px 0 30px;
}
img.floatingflask {
padding: 0 0 10px 10px;
float: right;
}
div.footer {
width: {{ page_width }};
margin: 20px auto 30px auto;
font-size: 14px;
color: #888;
text-align: right;
}
div.footer a {
color: #888;
}
div.related {
display: none;
}
div.sphinxsidebar a {
color: #444;
text-decoration: none;
border-bottom: 1px dotted #999;
}
div.sphinxsidebar a:hover {
border-bottom: 1px solid #999;
}
div.sphinxsidebar {
font-size: 14px;
line-height: 1.5;
}
div.sphinxsidebarwrapper {
padding: 18px 10px;
}
div.sphinxsidebarwrapper p.logo {
padding: 0 0 20px 0;
margin: 0;
text-align: center;
}
div.sphinxsidebar h3,
div.sphinxsidebar h4 {
font-family: {{ header_font }};
color: #444;
font-size: 24px;
font-weight: normal;
margin: 0 0 5px 0;
padding: 0;
}
div.sphinxsidebar h4 {
font-size: 20px;
}
div.sphinxsidebar h3 a {
color: #444;
}
div.sphinxsidebar p.logo a,
div.sphinxsidebar h3 a,
div.sphinxsidebar p.logo a:hover,
div.sphinxsidebar h3 a:hover {
border: none;
}
div.sphinxsidebar p {
color: #555;
margin: 10px 0;
}
div.sphinxsidebar ul {
margin: 10px 0;
padding: 0;
color: #000;
}
div.sphinxsidebar input {
border: 1px solid #ccc;
font-family: {{ base_font }};
font-size: 1em;
}
/* -- body styles ----------------------------------------------------------- */
a {
color: {{ link_color }};
text-decoration: underline;
}
a:hover {
color: {{ link_hover_color }};
text-decoration: underline;
}
a.reference.internal em {
font-style: normal;
}
div.body h1,
div.body h2,
div.body h3,
div.body h4,
div.body h5,
div.body h6 {
font-family: {{ header_font }};
font-weight: normal;
margin: 30px 0px 10px 0px;
padding: 0;
}
{% if theme_index_logo %}
div.indexwrapper h1 {
text-indent: -999999px;
background: url({{ theme_index_logo }}) no-repeat center center;
height: {{ theme_index_logo_height }};
}
{% else %}
div.indexwrapper div.body h1 {
font-size: 200%;
}
{% endif %}
div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; }
div.body h2 { font-size: 180%; }
div.body h3 { font-size: 150%; }
div.body h4 { font-size: 130%; }
div.body h5 { font-size: 100%; }
div.body h6 { font-size: 100%; }
a.headerlink {
color: #ddd;
padding: 0 4px;
text-decoration: none;
}
a.headerlink:hover {
color: #444;
background: #eaeaea;
}
div.body p, div.body dd, div.body li {
line-height: 1.4em;
}
div.admonition {
background: #fafafa;
margin: 20px -30px;
padding: 10px 30px;
border-top: 1px solid #ccc;
border-bottom: 1px solid #ccc;
}
div.admonition tt.xref, div.admonition a tt {
border-bottom: 1px solid #fafafa;
}
dd div.admonition {
margin-left: -60px;
padding-left: 60px;
}
div.admonition p.admonition-title {
font-family: {{ header_font }};
font-weight: normal;
font-size: 24px;
margin: 0 0 10px 0;
padding: 0;
line-height: 1;
}
div.admonition p.last {
margin-bottom: 0;
}
div.highlight {
background-color: white;
}
dt:target, .highlight {
background: #FAF3E8;
}
div.note {
background-color: #eee;
border: 1px solid #ccc;
}
div.seealso {
background-color: #ffc;
border: 1px solid #ff6;
}
div.topic {
background-color: #eee;
}
p.admonition-title {
display: inline;
}
p.admonition-title:after {
content: ":";
}
pre, tt, code {
font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace;
font-size: 0.9em;
background: #eee;
}
img.screenshot {
}
tt.descname, tt.descclassname {
font-size: 0.95em;
}
tt.descname {
padding-right: 0.08em;
}
img.screenshot {
-moz-box-shadow: 2px 2px 4px #eee;
-webkit-box-shadow: 2px 2px 4px #eee;
box-shadow: 2px 2px 4px #eee;
}
table.docutils {
border: 1px solid #888;
-moz-box-shadow: 2px 2px 4px #eee;
-webkit-box-shadow: 2px 2px 4px #eee;
box-shadow: 2px 2px 4px #eee;
}
table.docutils td, table.docutils th {
border: 1px solid #888;
padding: 0.25em 0.7em;
}
table.field-list, table.footnote {
border: none;
-moz-box-shadow: none;
-webkit-box-shadow: none;
box-shadow: none;
}
table.footnote {
margin: 15px 0;
width: 100%;
border: 1px solid #eee;
background: #fdfdfd;
font-size: 0.9em;
}
table.footnote + table.footnote {
margin-top: -15px;
border-top: none;
}
table.field-list th {
padding: 0 0.8em 0 0;
}
table.field-list td {
padding: 0;
}
table.footnote td.label {
width: 0px;
padding: 0.3em 0 0.3em 0.5em;
}
table.footnote td {
padding: 0.3em 0.5em;
}
dl {
margin: 0;
padding: 0;
}
dl dd {
margin-left: 30px;
}
blockquote {
margin: 0 0 0 30px;
padding: 0;
}
ul, ol {
margin: 10px 0 10px 30px;
padding: 0;
}
pre {
background: #eee;
padding: 7px 30px;
margin: 15px -30px;
line-height: 1.3em;
}
dl pre, blockquote pre, li pre {
margin-left: -60px;
padding-left: 60px;
}
dl dl pre {
margin-left: -90px;
padding-left: 90px;
}
tt {
background-color: #ecf0f3;
color: #222;
/* padding: 1px 2px; */
}
tt.xref, a tt {
background-color: #FBFBFB;
border-bottom: 1px solid white;
}
a.reference {
text-decoration: none;
border-bottom: 1px dotted {{ link_color }};
}
a.reference:hover {
border-bottom: 1px solid {{ link_hover_color }};
}
a.footnote-reference {
text-decoration: none;
font-size: 0.7em;
vertical-align: top;
border-bottom: 1px dotted {{ link_color }};
}
a.footnote-reference:hover {
border-bottom: 1px solid {{ link_hover_color }};
}
a:hover tt {
background: #EEE;
}
@media screen and (max-width: 870px) {
div.sphinxsidebar {
display: none;
}
div.document {
width: 100%;
}
div.documentwrapper {
margin-left: 0;
margin-top: 0;
margin-right: 0;
margin-bottom: 0;
}
div.bodywrapper {
margin-top: 0;
margin-right: 0;
margin-bottom: 0;
margin-left: 0;
}
ul {
margin-left: 0;
}
.document {
width: auto;
}
.footer {
width: auto;
}
.bodywrapper {
margin: 0;
}
.footer {
width: auto;
}
.github {
display: none;
}
}
@media screen and (max-width: 875px) {
body {
margin: 0;
padding: 20px 30px;
}
div.documentwrapper {
float: none;
background: white;
}
div.sphinxsidebar {
display: block;
float: none;
width: 102.5%;
margin: 50px -30px -20px -30px;
padding: 10px 20px;
background: #333;
color: white;
}
div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p,
div.sphinxsidebar h3 a, div.sphinxsidebar ul {
color: white;
}
div.sphinxsidebar a {
color: #aaa;
}
div.sphinxsidebar p.logo {
display: none;
}
div.document {
width: 100%;
margin: 0;
}
div.related {
display: block;
margin: 0;
padding: 10px 0 20px 0;
}
div.related ul,
div.related ul li {
margin: 0;
padding: 0;
}
div.footer {
display: none;
}
div.bodywrapper {
margin: 0;
}
div.body {
min-height: 0;
padding: 0;
}
.rtd_doc_footer {
display: none;
}
.document {
width: auto;
}
.footer {
width: auto;
}
.footer {
width: auto;
}
.github {
display: none;
}
}
/* misc. */
.revsys-inline {
display: none!important;
}

View file

@ -0,0 +1,9 @@
[theme]
inherit = basic
stylesheet = flasky.css
pygments_style = flask_theme_support.FlaskyStyle
[options]
index_logo = ''
index_logo_height = 120px
touch_icon =

View file

@ -0,0 +1,86 @@
# flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}

View file

@ -0,0 +1,78 @@
April 2015 is "adopt pytest month"
=============================================
Are you an enthusiastic pytest user, the local testing guru in your workplace? Or are you considering using pytest for your open source project, but not sure how to get started? Then you may be interested in "adopt pytest month"!
We will pair experienced pytest users with open source projects, for a month's effort of getting new development teams started with pytest.
In 2015 we are trying this for the first time. In February and March 2015 we will gather volunteers on both sides, in April we will do the work, and in May we will evaluate how it went. This effort is being coordinated by Brianna Laugher. If you have any questions or comments, you can raise them on the `@pytestdotorg twitter account <https://twitter.com/pytestdotorg>`_ the `issue tracker`_ or the `pytest-dev mailing list`_.
.. _`issue tracker`: https://github.com/pytest-dev/pytest/issues/676
.. _`pytest-dev mailing list`: https://mail.python.org/mailman/listinfo/pytest-dev
The ideal pytest helper
-----------------------------------------
- will be able to commit 2-4 hours a week to working with their particular project (this might involve joining their mailing list, installing the software and exploring any existing tests, offering advice, writing some example tests)
- feels confident in using pytest (e.g. has explored command line options, knows how to write parametrized tests, has an idea about conftest contents)
- does not need to be an expert in every aspect!
`Pytest helpers, sign up here`_! (preferably in February, hard deadline 22 March)
.. _`Pytest helpers, sign up here`: http://goo.gl/forms/nxqAhqWt1P
The ideal partner project
-----------------------------------------
- is open source, and predominantly written in Python
- has an automated/documented install process for developers
- has more than one core developer
- has at least one official release (e.g. is available on pypi)
- has the support of the core development team, in trying out pytest adoption
- has no tests... or 100% test coverage... or somewhere in between!
`Partner projects, sign up here`_! (by 22 March)
.. _`Partner projects, sign up here`: http://goo.gl/forms/ZGyqlHiwk3
What does it mean to "adopt pytest"?
-----------------------------------------
There can be many different definitions of "success". Pytest can run many `nose and unittest`_ tests by default, so using pytest as your testrunner may be possible from day 1. Job done, right?
Progressive success might look like:
- tests can be run (by pytest) without errors (there may be failures)
- tests can be run (by pytest) without failures
- test runner is integrated into CI server
- existing tests are rewritten to take advantage of pytest features - this can happen in several iterations, for example:
- changing to native assert_ statements (pycmd_ has a script to help with that, ``pyconvert_unittest.py``)
- changing `setUp/tearDown methods`_ to fixtures_
- adding markers_
- other changes to reduce boilerplate
- assess needs for future tests to be written, e.g. new fixtures, distributed_ testing tweaks
"Success" should also include that the development team feels comfortable with their knowledge of how to use pytest. In fact this is probably more important than anything else. So spending a lot of time on communication, giving examples, etc will probably be important - both in running the tests, and in writing them.
It may be after the month is up, the partner project decides that pytest is not right for it. That's okay - hopefully the pytest team will also learn something about its weaknesses or deficiencies.
.. _`nose and unittest`: faq.html#how-does-pytest-relate-to-nose-and-unittest
.. _assert: asserts.html
.. _pycmd: https://bitbucket.org/hpk42/pycmd/overview
.. _`setUp/tearDown methods`: xunit_setup.html
.. _fixtures: fixture.html
.. _markers: markers.html
.. _distributed: xdist.html
Other ways to help
-----------------------------------------
Promote! Do your favourite open source Python projects use pytest? If not, why not tell them about this page?

View file

@ -0,0 +1,48 @@
Release announcements
===========================================
.. toctree::
:maxdepth: 2
sprint2016
release-2.9.0
release-2.8.7
release-2.8.6
release-2.8.5
release-2.8.4
release-2.8.3
release-2.8.2
release-2.7.2
release-2.7.1
release-2.7.0
release-2.6.3
release-2.6.2
release-2.6.1
release-2.6.0
release-2.5.2
release-2.5.1
release-2.5.0
release-2.4.2
release-2.4.1
release-2.4.0
release-2.3.5
release-2.3.4
release-2.3.3
release-2.3.2
release-2.3.1
release-2.3.0
release-2.2.4
release-2.2.2
release-2.2.1
release-2.2.0
release-2.1.3
release-2.1.2
release-2.1.1
release-2.1.0
release-2.0.3
release-2.0.2
release-2.0.1
release-2.0.0

View file

@ -0,0 +1,129 @@
py.test 2.0.0: asserts++, unittest++, reporting++, config++, docs++
===========================================================================
Welcome to pytest-2.0.0, a major new release of "py.test", the rapid
easy Python testing tool. There are many new features and enhancements,
see below for summary and detailed lists. A lot of long-deprecated code
has been removed, resulting in a much smaller and cleaner
implementation. See the new docs with examples here:
http://pytest.org/2.0.0/index.html
A note on packaging: pytest used to part of the "py" distribution up
until version py-1.3.4 but this has changed now: pytest-2.0.0 only
contains py.test related code and is expected to be backward-compatible
to existing test code. If you want to install pytest, just type one of::
pip install -U pytest
easy_install -U pytest
Many thanks to all issue reporters and people asking questions or
complaining. Particular thanks to Floris Bruynooghe and Ronny Pfannschmidt
for their great coding contributions and many others for feedback and help.
best,
holger krekel
New Features
-----------------------
- new invocations through Python interpreter and from Python::
python -m pytest # on all pythons >= 2.5
or from a python program::
import pytest ; pytest.main(arglist, pluginlist)
see http://pytest.org/2.0.0/usage.html for details.
- new and better reporting information in assert expressions
if comparing lists, sequences or strings.
see http://pytest.org/2.0.0/assert.html#newreport
- new configuration through ini-files (setup.cfg or tox.ini recognized),
for example::
[pytest]
norecursedirs = .hg data* # don't ever recurse in such dirs
addopts = -x --pyargs # add these command line options by default
see http://pytest.org/2.0.0/customize.html
- improved standard unittest support. In general py.test should now
better be able to run custom unittest.TestCases like twisted trial
or Django based TestCases. Also you can now run the tests of an
installed 'unittest' package with py.test::
py.test --pyargs unittest
- new "-q" option which decreases verbosity and prints a more
nose/unittest-style "dot" output.
- many many more detailed improvements details
Fixes
-----------------------
- fix issue126 - introduce py.test.set_trace() to trace execution via
PDB during the running of tests even if capturing is ongoing.
- fix issue124 - make reporting more resilient against tests opening
files on filedescriptor 1 (stdout).
- fix issue109 - sibling conftest.py files will not be loaded.
(and Directory collectors cannot be customized anymore from a Directory's
conftest.py - this needs to happen at least one level up).
- fix issue88 (finding custom test nodes from command line arg)
- fix issue93 stdout/stderr is captured while importing conftest.py
- fix bug: unittest collected functions now also can have "pytestmark"
applied at class/module level
Important Notes
--------------------
* The usual way in pre-2.0 times to use py.test in python code was
to import "py" and then e.g. use "py.test.raises" for the helper.
This remains valid and is not planned to be deprecated. However,
in most examples and internal code you'll find "import pytest"
and "pytest.raises" used as the recommended default way.
* pytest now first performs collection of the complete test suite
before running any test. This changes for example the semantics of when
pytest_collectstart/pytest_collectreport are called. Some plugins may
need upgrading.
* The pytest package consists of a 400 LOC core.py and about 20 builtin plugins,
summing up to roughly 5000 LOCs, including docstrings. To be fair, it also
uses generic code from the "pylib", and the new "py" package to help
with filesystem and introspection/code manipulation.
(Incompatible) Removals
-----------------------------
- py.test.config is now only available if you are in a test run.
- the following (mostly already deprecated) functionality was removed:
- removed support for Module/Class/... collection node definitions
in conftest.py files. They will cause nothing special.
- removed support for calling the pre-1.0 collection API of "run()" and "join"
- removed reading option values from conftest.py files or env variables.
This can now be done much much better and easier through the ini-file
mechanism and the "addopts" entry in particular.
- removed the "disabled" attribute in test classes. Use the skipping
and pytestmark mechanism to skip or xfail a test class.
- py.test.collect.Directory does not exist anymore and it
is not possible to provide an own "Directory" object.
If you have used this and don't know what to do, get
in contact. We'll figure something out.
Note that pytest_collect_directory() is still called but
any return value will be ignored. This allows to keep
old code working that performed for example "py.test.skip()"
in collect() to prevent recursion into directory trees
if a certain dependency or command line option is missing.
see :ref:`changelog` for more detailed changes.

View file

@ -0,0 +1,67 @@
py.test 2.0.1: bug fixes
===========================================================================
Welcome to pytest-2.0.1, a maintenance and bug fix release of pytest,
a mature testing tool for Python, supporting CPython 2.4-3.2, Jython
and latest PyPy interpreters. See extensive docs with tested examples here:
http://pytest.org/
If you want to install or upgrade pytest, just type one of::
pip install -U pytest # or
easy_install -U pytest
Many thanks to all issue reporters and people asking questions or
complaining. Particular thanks to Floris Bruynooghe and Ronny Pfannschmidt
for their great coding contributions and many others for feedback and help.
best,
holger krekel
Changes between 2.0.0 and 2.0.1
----------------------------------------------
- refine and unify initial capturing so that it works nicely
even if the logging module is used on an early-loaded conftest.py
file or plugin.
- fix issue12 - show plugin versions with "--version" and
"--traceconfig" and also document how to add extra information
to reporting test header
- fix issue17 (import-* reporting issue on python3) by
requiring py>1.4.0 (1.4.1 is going to include it)
- fix issue10 (numpy arrays truth checking) by refining
assertion interpretation in py lib
- fix issue15: make nose compatibility tests compatible
with python3 (now that nose-1.0 supports python3)
- remove somewhat surprising "same-conftest" detection because
it ignores conftest.py when they appear in several subdirs.
- improve assertions ("not in"), thanks Floris Bruynooghe
- improve behaviour/warnings when running on top of "python -OO"
(assertions and docstrings are turned off, leading to potential
false positives)
- introduce a pytest_cmdline_processargs(args) hook
to allow dynamic computation of command line arguments.
This fixes a regression because py.test prior to 2.0
allowed to set command line options from conftest.py
files which so far pytest-2.0 only allowed from ini-files now.
- fix issue7: assert failures in doctest modules.
unexpected failures in doctests will not generally
show nicer, i.e. within the doctest failing context.
- fix issue9: setup/teardown functions for an xfail-marked
test will report as xfail if they fail but report as normally
passing (not xpassing) if they succeed. This only is true
for "direct" setup/teardown invocations because teardown_class/
teardown_module cannot closely relate to a single test.
- fix issue14: no logging errors at process exit
- refinements to "collecting" output on non-ttys
- refine internal plugin registration and --traceconfig output
- introduce a mechanism to prevent/unregister plugins from the
command line, see http://pytest.org/latest/plugins.html#cmdunregister
- activate resultlog plugin by default
- fix regression wrt yielded tests which due to the
collection-before-running semantics were not
setup as with pytest 1.3.4. Note, however, that
the recommended and much cleaner way to do test
parametrization remains the "pytest_generate_tests"
mechanism, see the docs.

View file

@ -0,0 +1,73 @@
py.test 2.0.2: bug fixes, improved xfail/skip expressions, speed ups
===========================================================================
Welcome to pytest-2.0.2, a maintenance and bug fix release of pytest,
a mature testing tool for Python, supporting CPython 2.4-3.2, Jython
and latest PyPy interpreters. See the extensive docs with tested examples here:
http://pytest.org/
If you want to install or upgrade pytest, just type one of::
pip install -U pytest # or
easy_install -U pytest
Many thanks to all issue reporters and people asking questions
or complaining, particularly Jurko for his insistence,
Laura, Victor and Brianna for helping with improving
and Ronny for his general advise.
best,
holger krekel
Changes between 2.0.1 and 2.0.2
----------------------------------------------
- tackle issue32 - speed up test runs of very quick test functions
by reducing the relative overhead
- fix issue30 - extended xfail/skipif handling and improved reporting.
If you have a syntax error in your skip/xfail
expressions you now get nice error reports.
Also you can now access module globals from xfail/skipif
expressions so that this for example works now::
import pytest
import mymodule
@pytest.mark.skipif("mymodule.__version__[0] == "1")
def test_function():
pass
This will not run the test function if the module's version string
does not start with a "1". Note that specifying a string instead
of a boolean expressions allows py.test to report meaningful information
when summarizing a test run as to what conditions lead to skipping
(or xfail-ing) tests.
- fix issue28 - setup_method and pytest_generate_tests work together
The setup_method fixture method now gets called also for
test function invocations generated from the pytest_generate_tests
hook.
- fix issue27 - collectonly and keyword-selection (-k) now work together
Also, if you do "py.test --collectonly -q" you now get a flat list
of test ids that you can use to paste to the py.test commandline
in order to execute a particular test.
- fix issue25 avoid reported problems with --pdb and python3.2/encodings output
- fix issue23 - tmpdir argument now works on Python3.2 and WindowsXP
Starting with Python3.2 os.symlink may be supported. By requiring
a newer py lib version the py.path.local() implementation acknowledges
this.
- fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular
thanks to Laura Creighton who also revieved parts of the documentation.
- fix slighly wrong output of verbose progress reporting for classes
(thanks Amaury)
- more precise (avoiding of) deprecation warnings for node.Class|Function accesses
- avoid std unittest assertion helper code in tracebacks (thanks Ronny)

View file

@ -0,0 +1,40 @@
py.test 2.0.3: bug fixes and speed ups
===========================================================================
Welcome to pytest-2.0.3, a maintenance and bug fix release of pytest,
a mature testing tool for Python, supporting CPython 2.4-3.2, Jython
and latest PyPy interpreters. See the extensive docs with tested examples here:
http://pytest.org/
If you want to install or upgrade pytest, just type one of::
pip install -U pytest # or
easy_install -U pytest
There also is a bugfix release 1.6 of pytest-xdist, the plugin
that enables seemless distributed and "looponfail" testing for Python.
best,
holger krekel
Changes between 2.0.2 and 2.0.3
----------------------------------------------
- fix issue38: nicer tracebacks on calls to hooks, particularly early
configure/sessionstart ones
- fix missing skip reason/meta information in junitxml files, reported
via http://lists.idyll.org/pipermail/testing-in-python/2011-March/003928.html
- fix issue34: avoid collection failure with "test" prefixed classes
deriving from object.
- don't require zlib (and other libs) for genscript plugin without
--genscript actually being used.
- speed up skips (by not doing a full traceback represenation
internally)
- fix issue37: avoid invalid characters in junitxml's output

View file

@ -0,0 +1,47 @@
py.test 2.1.0: perfected assertions and bug fixes
===========================================================================
Welcome to the release of pytest-2.1, a mature testing tool for Python,
supporting CPython 2.4-3.2, Jython and latest PyPy interpreters. See
the improved extensive docs (now also as PDF!) with tested examples here:
http://pytest.org/
The single biggest news about this release are **perfected assertions**
courtesy of Benjamin Peterson. You can now safely use ``assert``
statements in test modules without having to worry about side effects
or python optimization ("-OO") options. This is achieved by rewriting
assert statements in test modules upon import, using a PEP302 hook.
See http://pytest.org/assert.html#advanced-assertion-introspection for
detailed information. The work has been partly sponsored by my company,
merlinux GmbH.
For further details on bug fixes and smaller enhancements see below.
If you want to install or upgrade pytest, just type one of::
pip install -U pytest # or
easy_install -U pytest
best,
holger krekel / http://merlinux.eu
Changes between 2.0.3 and 2.1.0
----------------------------------------------
- fix issue53 call nosestyle setup functions with correct ordering
- fix issue58 and issue59: new assertion code fixes
- merge Benjamin's assertionrewrite branch: now assertions
for test modules on python 2.6 and above are done by rewriting
the AST and saving the pyc file before the test module is imported.
see doc/assert.txt for more info.
- fix issue43: improve doctests with better traceback reporting on
unexpected exceptions
- fix issue47: timing output in junitxml for test cases is now correct
- fix issue48: typo in MarkInfo repr leading to exception
- fix issue49: avoid confusing error when initialization partially fails
- fix issue44: env/username expansion for junitxml file path
- show releaselevel information in test runs for pypy
- reworked doc pages for better navigation and PDF generation
- report KeyboardInterrupt even if interrupted during session startup
- fix issue 35 - provide PDF doc version and download link from index page

View file

@ -0,0 +1,37 @@
py.test 2.1.1: assertion fixes and improved junitxml output
===========================================================================
pytest-2.1.1 is a backward compatible maintenance release of the
popular py.test testing tool. See extensive docs with examples here:
http://pytest.org/
Most bug fixes address remaining issues with the perfected assertions
introduced with 2.1.0 - many thanks to the bug reporters and to Benjamin
Peterson for helping to fix them. Also, junitxml output now produces
system-out/err tags which lead to better displays of tracebacks with Jenkins.
Also a quick note to package maintainers and others interested: there now
is a "pytest" man page which can be generated with "make man" in doc/.
If you want to install or upgrade pytest, just type one of::
pip install -U pytest # or
easy_install -U pytest
best,
holger krekel / http://merlinux.eu
Changes between 2.1.0 and 2.1.1
----------------------------------------------
- fix issue64 / pytest.set_trace now works within pytest_generate_tests hooks
- fix issue60 / fix error conditions involving the creation of __pycache__
- fix issue63 / assertion rewriting on inserts involving strings containing '%'
- fix assertion rewriting on calls with a ** arg
- don't cache rewritten modules if bytecode generation is disabled
- fix assertion rewriting in read-only directories
- fix issue59: provide system-out/err tags for junitxml output
- fix issue61: assertion rewriting on boolean operations with 3 or more operands
- you can now build a man page with "cd doc ; make man"

View file

@ -0,0 +1,33 @@
py.test 2.1.2: bug fixes and fixes for jython
===========================================================================
pytest-2.1.2 is a minor backward compatible maintenance release of the
popular py.test testing tool. pytest is commonly used for unit,
functional- and integration testing. See extensive docs with examples
here:
http://pytest.org/
Most bug fixes address remaining issues with the perfected assertions
introduced in the 2.1 series - many thanks to the bug reporters and to Benjamin
Peterson for helping to fix them. pytest should also work better with
Jython-2.5.1 (and Jython trunk).
If you want to install or upgrade pytest, just type one of::
pip install -U pytest # or
easy_install -U pytest
best,
holger krekel / http://merlinux.eu
Changes between 2.1.1 and 2.1.2
----------------------------------------
- fix assertion rewriting on files with windows newlines on some Python versions
- refine test discovery by package/module name (--pyargs), thanks Florian Mayer
- fix issue69 / assertion rewriting fixed on some boolean operations
- fix issue68 / packages now work with assertion rewriting
- fix issue66: use different assertion rewriting caches when the -O option is passed
- don't try assertion rewriting on Jython, use reinterp

View file

@ -0,0 +1,32 @@
py.test 2.1.3: just some more fixes
===========================================================================
pytest-2.1.3 is a minor backward compatible maintenance release of the
popular py.test testing tool. It is commonly used for unit, functional-
and integration testing. See extensive docs with examples here:
http://pytest.org/
The release contains another fix to the perfected assertions introduced
with the 2.1 series as well as the new possibility to customize reporting
for assertion expressions on a per-directory level.
If you want to install or upgrade pytest, just type one of::
pip install -U pytest # or
easy_install -U pytest
Thanks to the bug reporters and to Ronny Pfannschmidt, Benjamin Peterson
and Floris Bruynooghe who implemented the fixes.
best,
holger krekel
Changes between 2.1.2 and 2.1.3
----------------------------------------
- fix issue79: assertion rewriting failed on some comparisons in boolops,
- correctly handle zero length arguments (a la pytest '')
- fix issue67 / junitxml now contains correct test durations
- fix issue75 / skipping test failure on jython
- fix issue77 / Allow assertrepr_compare hook to apply to a subset of tests

View file

@ -0,0 +1,95 @@
py.test 2.2.0: test marking++, parametrization++ and duration profiling
===========================================================================
pytest-2.2.0 is a test-suite compatible release of the popular
py.test testing tool. Plugins might need upgrades. It comes
with these improvements:
* easier and more powerful parametrization of tests:
- new @pytest.mark.parametrize decorator to run tests with different arguments
- new metafunc.parametrize() API for parametrizing arguments independently
- see examples at http://pytest.org/latest/example/parametrize.html
- NOTE that parametrize() related APIs are still a bit experimental
and might change in future releases.
* improved handling of test markers and refined marking mechanism:
- "-m markexpr" option for selecting tests according to their mark
- a new "markers" ini-variable for registering test markers for your project
- the new "--strict" bails out with an error if using unregistered markers.
- see examples at http://pytest.org/latest/example/markers.html
* duration profiling: new "--duration=N" option showing the N slowest test
execution or setup/teardown calls. This is most useful if you want to
find out where your slowest test code is.
* also 2.2.0 performs more eager calling of teardown/finalizers functions
resulting in better and more accurate reporting when they fail
Besides there is the usual set of bug fixes along with a cleanup of
pytest's own test suite allowing it to run on a wider range of environments.
For general information, see extensive docs with examples here:
http://pytest.org/
If you want to install or upgrade pytest you might just type::
pip install -U pytest # or
easy_install -U pytest
Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, Alfredo Deza and all who gave feedback or sent bug reports.
best,
holger krekel
notes on incompatibility
------------------------------
While test suites should work unchanged you might need to upgrade plugins:
* You need a new version of the pytest-xdist plugin (1.7) for distributing
test runs.
* Other plugins might need an upgrade if they implement
the ``pytest_runtest_logreport`` hook which now is called unconditionally
for the setup/teardown fixture phases of a test. You may choose to
ignore setup/teardown failures by inserting "if rep.when != 'call': return"
or something similar. Note that most code probably "just" works because
the hook was already called for failing setup/teardown phases of a test
so a plugin should have been ready to grok such reports already.
Changes between 2.1.3 and 2.2.0
----------------------------------------
- fix issue90: introduce eager tearing down of test items so that
teardown function are called earlier.
- add an all-powerful metafunc.parametrize function which allows to
parametrize test function arguments in multiple steps and therefore
from independent plugins and places.
- add a @pytest.mark.parametrize helper which allows to easily
call a test function with different argument values.
- Add examples to the "parametrize" example page, including a quick port
of Test scenarios and the new parametrize function and decorator.
- introduce registration for "pytest.mark.*" helpers via ini-files
or through plugin hooks. Also introduce a "--strict" option which
will treat unregistered markers as errors
allowing to avoid typos and maintain a well described set of markers
for your test suite. See examples at http://pytest.org/latest/mark.html
and its links.
- issue50: introduce "-m marker" option to select tests based on markers
(this is a stricter and more predictable version of "-k" in that "-m"
only matches complete markers and has more obvious rules for and/or
semantics.
- new feature to help optimizing the speed of your tests:
--durations=N option for displaying N slowest test calls
and setup/teardown methods.
- fix issue87: --pastebin now works with python3
- fix issue89: --pdb with unexpected exceptions in doctest work more sensibly
- fix and cleanup pytest's own test suite to not leak FDs
- fix issue83: link to generated funcarg list
- fix issue74: pyarg module names are now checked against imp.find_module false positives
- fix compatibility with twisted/trial-11.1.0 use cases

View file

@ -0,0 +1,41 @@
pytest-2.2.1: bug fixes, perfect teardowns
===========================================================================
pytest-2.2.1 is a minor backward-compatible release of the the py.test
testing tool. It contains bug fixes and little improvements, including
documentation fixes. If you are using the distributed testing
pluginmake sure to upgrade it to pytest-xdist-1.8.
For general information see here:
http://pytest.org/
To install or upgrade pytest:
pip install -U pytest # or
easy_install -U pytest
Special thanks for helping on this release to Ronny Pfannschmidt, Jurko
Gospodnetic and Ralf Schmitt.
best,
holger krekel
Changes between 2.2.0 and 2.2.1
----------------------------------------
- fix issue99 (in pytest and py) internallerrors with resultlog now
produce better output - fixed by normalizing pytest_internalerror
input arguments.
- fix issue97 / traceback issues (in pytest and py) improve traceback output
in conjunction with jinja2 and cython which hack tracebacks
- fix issue93 (in pytest and pytest-xdist) avoid "delayed teardowns":
the final test in a test node will now run its teardown directly
instead of waiting for the end of the session. Thanks Dave Hunt for
the good reporting and feedback. The pytest_runtest_protocol as well
as the pytest_runtest_teardown hooks now have "nextitem" available
which will be None indicating the end of the test run.
- fix collection crash due to unknown-source collected items, thanks
to Ralf Schmitt (fixed by depending on a more recent pylib)

View file

@ -0,0 +1,43 @@
pytest-2.2.2: bug fixes
===========================================================================
pytest-2.2.2 (updated to 2.2.3 to fix packaging issues) is a minor
backward-compatible release of the versatile py.test testing tool. It
contains bug fixes and a few refinements particularly to reporting with
"--collectonly", see below for betails.
For general information see here:
http://pytest.org/
To install or upgrade pytest:
pip install -U pytest # or
easy_install -U pytest
Special thanks for helping on this release to Ronny Pfannschmidt
and Ralf Schmitt and the contributors of issues.
best,
holger krekel
Changes between 2.2.1 and 2.2.2
----------------------------------------
- fix issue101: wrong args to unittest.TestCase test function now
produce better output
- fix issue102: report more useful errors and hints for when a
test directory was renamed and some pyc/__pycache__ remain
- fix issue106: allow parametrize to be applied multiple times
e.g. from module, class and at function level.
- fix issue107: actually perform session scope finalization
- don't check in parametrize if indirect parameters are funcarg names
- add chdir method to monkeypatch funcarg
- fix crash resulting from calling monkeypatch undo a second time
- fix issue115: make --collectonly robust against early failure
(missing files/directories)
- "-qq --collectonly" now shows only files and the number of tests in them
- "-q --collectonly" now shows test ids
- allow adding of attributes to test reports such that it also works
with distributed testing (no upgrade of pytest-xdist needed)

View file

@ -0,0 +1,39 @@
pytest-2.2.4: bug fixes, better junitxml/unittest/python3 compat
===========================================================================
pytest-2.2.4 is a minor backward-compatible release of the versatile
py.test testing tool. It contains bug fixes and a few refinements
to junitxml reporting, better unittest- and python3 compatibility.
For general information see here:
http://pytest.org/
To install or upgrade pytest:
pip install -U pytest # or
easy_install -U pytest
Special thanks for helping on this release to Ronny Pfannschmidt
and Benjamin Peterson and the contributors of issues.
best,
holger krekel
Changes between 2.2.3 and 2.2.4
-----------------------------------
- fix error message for rewritten assertions involving the % operator
- fix issue 126: correctly match all invalid xml characters for junitxml
binary escape
- fix issue with unittest: now @unittest.expectedFailure markers should
be processed correctly (you can also use @pytest.mark markers)
- document integration with the extended distribute/setuptools test commands
- fix issue 140: propperly get the real functions
of bound classmethods for setup/teardown_class
- fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net
- fix issue #143: call unconfigure/sessionfinish always when
configure/sessionstart where called
- fix issue #144: better mangle test ids to junitxml classnames
- upgrade distribute_setup.py to 0.6.27

View file

@ -0,0 +1,134 @@
pytest-2.3: improved fixtures / better unittest integration
=============================================================================
pytest-2.3 comes with many major improvements for fixture/funcarg management
and parametrized testing in Python. It is now easier, more efficient and
more predicatable to re-run the same tests with different fixture
instances. Also, you can directly declare the caching "scope" of
fixtures so that dependent tests throughout your whole test suite can
re-use database or other expensive fixture objects with ease. Lastly,
it's possible for fixture functions (formerly known as funcarg
factories) to use other fixtures, allowing for a completely modular and
re-useable fixture design.
For detailed info and tutorial-style examples, see:
http://pytest.org/latest/fixture.html
Moreover, there is now support for using pytest fixtures/funcargs with
unittest-style suites, see here for examples:
http://pytest.org/latest/unittest.html
Besides, more unittest-test suites are now expected to "simply work"
with pytest.
All changes are backward compatible and you should be able to continue
to run your test suites and 3rd party plugins that worked with
pytest-2.2.4.
If you are interested in the precise reasoning (including examples) of the
pytest-2.3 fixture evolution, please consult
http://pytest.org/latest/funcarg_compare.html
For general info on installation and getting started:
http://pytest.org/latest/getting-started.html
Docs and PDF access as usual at:
http://pytest.org
and more details for those already in the knowing of pytest can be found
in the CHANGELOG below.
Particular thanks for this release go to Floris Bruynooghe, Alex Okrushko
Carl Meyer, Ronny Pfannschmidt, Benjamin Peterson and Alex Gaynor for helping
to get the new features right and well integrated. Ronny and Floris
also helped to fix a number of bugs and yet more people helped by
providing bug reports.
have fun,
holger krekel
Changes between 2.2.4 and 2.3.0
-----------------------------------
- fix issue202 - better automatic names for parametrized test functions
- fix issue139 - introduce @pytest.fixture which allows direct scoping
and parametrization of funcarg factories. Introduce new @pytest.setup
marker to allow the writing of setup functions which accept funcargs.
- fix issue198 - conftest fixtures were not found on windows32 in some
circumstances with nested directory structures due to path manipulation issues
- fix issue193 skip test functions with were parametrized with empty
parameter sets
- fix python3.3 compat, mostly reporting bits that previously depended
on dict ordering
- introduce re-ordering of tests by resource and parametrization setup
which takes precedence to the usual file-ordering
- fix issue185 monkeypatching time.time does not cause pytest to fail
- fix issue172 duplicate call of pytest.setup-decoratored setup_module
functions
- fix junitxml=path construction so that if tests change the
current working directory and the path is a relative path
it is constructed correctly from the original current working dir.
- fix "python setup.py test" example to cause a proper "errno" return
- fix issue165 - fix broken doc links and mention stackoverflow for FAQ
- catch unicode-issues when writing failure representations
to terminal to prevent the whole session from crashing
- fix xfail/skip confusion: a skip-mark or an imperative pytest.skip
will now take precedence before xfail-markers because we
can't determine xfail/xpass status in case of a skip. see also:
http://stackoverflow.com/questions/11105828/in-py-test-when-i-explicitly-skip-a-test-that-is-marked-as-xfail-how-can-i-get
- always report installed 3rd party plugins in the header of a test run
- fix issue160: a failing setup of an xfail-marked tests should
be reported as xfail (not xpass)
- fix issue128: show captured output when capsys/capfd are used
- fix issue179: propperly show the dependency chain of factories
- pluginmanager.register(...) now raises ValueError if the
plugin has been already registered or the name is taken
- fix issue159: improve http://pytest.org/latest/faq.html
especially with respect to the "magic" history, also mention
pytest-django, trial and unittest integration.
- make request.keywords and node.keywords writable. All descendant
collection nodes will see keyword values. Keywords are dictionaries
containing markers and other info.
- fix issue 178: xml binary escapes are now wrapped in py.xml.raw
- fix issue 176: correctly catch the builtin AssertionError
even when we replaced AssertionError with a subclass on the
python level
- factory discovery no longer fails with magic global callables
that provide no sane __code__ object (mock.call for example)
- fix issue 182: testdir.inprocess_run now considers passed plugins
- fix issue 188: ensure sys.exc_info is clear on python2
before calling into a test
- fix issue 191: add unittest TestCase runTest method support
- fix issue 156: monkeypatch correctly handles class level descriptors
- reporting refinements:
- pytest_report_header now receives a "startdir" so that
you can use startdir.bestrelpath(yourpath) to show
nice relative path
- allow plugins to implement both pytest_report_header and
pytest_sessionstart (sessionstart is invoked first).
- don't show deselected reason line if there is none
- py.test -vv will show all of assert comparisations instead of truncating

View file

@ -0,0 +1,39 @@
pytest-2.3.1: fix regression with factory functions
===========================================================================
pytest-2.3.1 is a quick follow-up release:
- fix issue202 - regression with fixture functions/funcarg factories:
using "self" is now safe again and works as in 2.2.4. Thanks
to Eduard Schettino for the quick bug report.
- disable pexpect pytest self tests on Freebsd - thanks Koob for the
quick reporting
- fix/improve interactive docs with --markers
See
http://pytest.org/
for general information. To install or upgrade pytest:
pip install -U pytest # or
easy_install -U pytest
best,
holger krekel
Changes between 2.3.0 and 2.3.1
-----------------------------------
- fix issue202 - fix regression: using "self" from fixture functions now
works as expected (it's the same "self" instance that a test method
which uses the fixture sees)
- skip pexpect using tests (test_pdb.py mostly) on freebsd* systems
due to pexpect not supporting it properly (hanging)
- link to web pages from --markers output which provides help for
pytest.mark.* usage.

View file

@ -0,0 +1,57 @@
pytest-2.3.2: some fixes and more traceback-printing speed
===========================================================================
pytest-2.3.2 is a another stabilization release:
- issue 205: fixes a regression with conftest detection
- issue 208/29: fixes traceback-printing speed in some bad cases
- fix teardown-ordering for parametrized setups
- fix unittest and trial compat behaviour with respect to runTest() methods
- issue 206 and others: some improvements to packaging
- fix issue127 and others: improve some docs
See
http://pytest.org/
for general information. To install or upgrade pytest:
pip install -U pytest # or
easy_install -U pytest
best,
holger krekel
Changes between 2.3.1 and 2.3.2
-----------------------------------
- fix issue208 and fix issue29 use new py version to avoid long pauses
when printing tracebacks in long modules
- fix issue205 - conftests in subdirs customizing
pytest_pycollect_makemodule and pytest_pycollect_makeitem
now work properly
- fix teardown-ordering for parametrized setups
- fix issue127 - better documentation for pytest_addoption
and related objects.
- fix unittest behaviour: TestCase.runtest only called if there are
test methods defined
- improve trial support: don't collect its empty
unittest.TestCase.runTest() method
- "python setup.py test" now works with pytest itself
- fix/improve internal/packaging related bits:
- exception message check of test_nose.py now passes on python33 as well
- issue206 - fix test_assertrewrite.py to work when a global
PYTHONDONTWRITEBYTECODE=1 is present
- add tox.ini to pytest distribution so that ignore-dirs and others config
bits are properly distributed for maintainers who run pytest-own tests

View file

@ -0,0 +1,62 @@
pytest-2.3.3: integration fixes, py24 suport, ``*/**`` shown in traceback
===========================================================================
pytest-2.3.3 is a another stabilization release of the py.test tool
which offers uebersimple assertions, scalable fixture mechanisms
and deep customization for testing with Python. Particularly,
this release provides:
- integration fixes and improvements related to flask, numpy, nose,
unittest, mock
- makes pytest work on py24 again (yes, people sometimes still need to use it)
- show ``*,**`` args in pytest tracebacks
Thanks to Manuel Jacob, Thomas Waldmann, Ronny Pfannschmidt, Pavel Repin
and Andreas Taumoefolau for providing patches and all for the issues.
See
http://pytest.org/
for general information. To install or upgrade pytest:
pip install -U pytest # or
easy_install -U pytest
best,
holger krekel
Changes between 2.3.2 and 2.3.3
-----------------------------------
- fix issue214 - parse modules that contain special objects like e. g.
flask's request object which blows up on getattr access if no request
is active. thanks Thomas Waldmann.
- fix issue213 - allow to parametrize with values like numpy arrays that
do not support an __eq__ operator
- fix issue215 - split test_python.org into multiple files
- fix issue148 - @unittest.skip on classes is now recognized and avoids
calling setUpClass/tearDownClass, thanks Pavel Repin
- fix issue209 - reintroduce python2.4 support by depending on newer
pylib which re-introduced statement-finding for pre-AST interpreters
- nose support: only call setup if its a callable, thanks Andrew
Taumoefolau
- fix issue219 - add py2.4-3.3 classifiers to TROVE list
- in tracebacks *,** arg values are now shown next to normal arguments
(thanks Manuel Jacob)
- fix issue217 - support mock.patch with pytest's fixtures - note that
you need either mock-1.0.1 or the python3.3 builtin unittest.mock.
- fix issue127 - improve documentation for pytest_addoption() and
add a ``config.getoption(name)`` helper function for consistency.

View file

@ -0,0 +1,39 @@
pytest-2.3.4: stabilization, more flexible selection via "-k expr"
===========================================================================
pytest-2.3.4 is a small stabilization release of the py.test tool
which offers uebersimple assertions, scalable fixture mechanisms
and deep customization for testing with Python. This release
comes with the following fixes and features:
- make "-k" option accept an expressions the same as with "-m" so that one
can write: -k "name1 or name2" etc. This is a slight usage incompatibility
if you used special syntax like "TestClass.test_method" which you now
need to write as -k "TestClass and test_method" to match a certain
method in a certain test class.
- allow to dynamically define markers via
item.keywords[...]=assignment integrating with "-m" option
- yielded test functions will now have autouse-fixtures active but
cannot accept fixtures as funcargs - it's anyway recommended to
rather use the post-2.0 parametrize features instead of yield, see:
http://pytest.org/latest/example/parametrize.html
- fix autouse-issue where autouse-fixtures would not be discovered
if defined in a a/conftest.py file and tests in a/tests/test_some.py
- fix issue226 - LIFO ordering for fixture teardowns
- fix issue224 - invocations with >256 char arguments now work
- fix issue91 - add/discuss package/directory level setups in example
- fixes related to autouse discovery and calling
Thanks in particular to Thomas Waldmann for spotting and reporting issues.
See
http://pytest.org/
for general information. To install or upgrade pytest:
pip install -U pytest # or
easy_install -U pytest
best,
holger krekel

View file

@ -0,0 +1,97 @@
pytest-2.3.5: bug fixes and little improvements
===========================================================================
pytest-2.3.5 is a maintenance release with many bug fixes and little
improvements. See the changelog below for details. No backward
compatibility issues are foreseen and all plugins which worked with the
prior version are expected to work unmodified. Speaking of which, a
few interesting new plugins saw the light last month:
- pytest-instafail: show failure information while tests are running
- pytest-qt: testing of GUI applications written with QT/Pyside
- pytest-xprocess: managing external processes across test runs
- pytest-random: randomize test ordering
And several others like pytest-django saw maintenance releases.
For a more complete list, check out
https://pypi.python.org/pypi?%3Aaction=search&term=pytest&submit=search.
For general information see:
http://pytest.org/
To install or upgrade pytest:
pip install -U pytest # or
easy_install -U pytest
Particular thanks to Floris, Ronny, Benjamin and the many bug reporters
and fix providers.
may the fixtures be with you,
holger krekel
Changes between 2.3.4 and 2.3.5
-----------------------------------
- never consider a fixture function for test function collection
- allow re-running of test items / helps to fix pytest-reruntests plugin
and also help to keep less fixture/resource references alive
- put captured stdout/stderr into junitxml output even for passing tests
(thanks Adam Goucher)
- Issue 265 - integrate nose setup/teardown with setupstate
so it doesnt try to teardown if it did not setup
- issue 271 - dont write junitxml on slave nodes
- Issue 274 - dont try to show full doctest example
when doctest does not know the example location
- issue 280 - disable assertion rewriting on buggy CPython 2.6.0
- inject "getfixture()" helper to retrieve fixtures from doctests,
thanks Andreas Zeidler
- issue 259 - when assertion rewriting, be consistent with the default
source encoding of ASCII on Python 2
- issue 251 - report a skip instead of ignoring classes with init
- issue250 unicode/str mixes in parametrization names and values now works
- issue257, assertion-triggered compilation of source ending in a
comment line doesn't blow up in python2.5 (fixed through py>=1.4.13.dev6)
- fix --genscript option to generate standalone scripts that also
work with python3.3 (importer ordering)
- issue171 - in assertion rewriting, show the repr of some
global variables
- fix option help for "-k"
- move long description of distribution into README.rst
- improve docstring for metafunc.parametrize()
- fix bug where using capsys with pytest.set_trace() in a test
function would break when looking at capsys.readouterr()
- allow to specify prefixes starting with "_" when
customizing python_functions test discovery. (thanks Graham Horler)
- improve PYTEST_DEBUG tracing output by puting
extra data on a new lines with additional indent
- ensure OutcomeExceptions like skip/fail have initialized exception attributes
- issue 260 - don't use nose special setup on plain unittest cases
- fix issue134 - print the collect errors that prevent running specified test items
- fix issue266 - accept unicode in MarkEvaluator expressions

View file

@ -0,0 +1,225 @@
pytest-2.4.0: new fixture features/hooks and bug fixes
===========================================================================
The just released pytest-2.4.0 brings many improvements and numerous
bug fixes while remaining plugin- and test-suite compatible apart
from a few supposedly very minor incompatibilities. See below for
a full list of details. A few feature highlights:
- new yield-style fixtures `pytest.yield_fixture
<http://pytest.org/latest/yieldfixture.html>`_, allowing to use
existing with-style context managers in fixture functions.
- improved pdb support: ``import pdb ; pdb.set_trace()`` now works
without requiring prior disabling of stdout/stderr capturing.
Also the ``--pdb`` options works now on collection and internal errors
and we introduced a new experimental hook for IDEs/plugins to
intercept debugging: ``pytest_exception_interact(node, call, report)``.
- shorter monkeypatch variant to allow specifying an import path as
a target, for example: ``monkeypatch.setattr("requests.get", myfunc)``
- better unittest/nose compatibility: all teardown methods are now only
called if the corresponding setup method succeeded.
- integrate tab-completion on command line options if you
have `argcomplete <http://pypi.python.org/pypi/argcomplete>`_
configured.
- allow boolean expression directly with skipif/xfail
if a "reason" is also specified.
- a new hook ``pytest_load_initial_conftests`` allows plugins like
`pytest-django <http://pypi.python.org/pypi/pytest-django>`_ to
influence the environment before conftest files import ``django``.
- reporting: color the last line red or green depending if
failures/errors occurred or everything passed.
The documentation has been updated to accomodate the changes,
see `http://pytest.org <http://pytest.org>`_
To install or upgrade pytest::
pip install -U pytest # or
easy_install -U pytest
**Many thanks to all who helped, including Floris Bruynooghe,
Brianna Laugher, Andreas Pelme, Anthon van der Neut, Anatoly Bubenkoff,
Vladimir Keleshev, Mathieu Agopian, Ronny Pfannschmidt, Christian
Theunert and many others.**
may passing tests be with you,
holger krekel
Changes between 2.3.5 and 2.4
-----------------------------------
known incompatibilities:
- if calling --genscript from python2.7 or above, you only get a
standalone script which works on python2.7 or above. Use Python2.6
to also get a python2.5 compatible version.
- all xunit-style teardown methods (nose-style, pytest-style,
unittest-style) will not be called if the corresponding setup method failed,
see issue322 below.
- the pytest_plugin_unregister hook wasn't ever properly called
and there is no known implementation of the hook - so it got removed.
- pytest.fixture-decorated functions cannot be generators (i.e. use
yield) anymore. This change might be reversed in 2.4.1 if it causes
unforeseen real-life issues. However, you can always write and return
an inner function/generator and change the fixture consumer to iterate
over the returned generator. This change was done in lieu of the new
``pytest.yield_fixture`` decorator, see below.
new features:
- experimentally introduce a new ``pytest.yield_fixture`` decorator
which accepts exactly the same parameters as pytest.fixture but
mandates a ``yield`` statement instead of a ``return statement`` from
fixture functions. This allows direct integration with "with-style"
context managers in fixture functions and generally avoids registering
of finalization callbacks in favour of treating the "after-yield" as
teardown code. Thanks Andreas Pelme, Vladimir Keleshev, Floris
Bruynooghe, Ronny Pfannschmidt and many others for discussions.
- allow boolean expression directly with skipif/xfail
if a "reason" is also specified. Rework skipping documentation
to recommend "condition as booleans" because it prevents surprises
when importing markers between modules. Specifying conditions
as strings will remain fully supported.
- reporting: color the last line red or green depending if
failures/errors occurred or everything passed. thanks Christian
Theunert.
- make "import pdb ; pdb.set_trace()" work natively wrt capturing (no
"-s" needed anymore), making ``pytest.set_trace()`` a mere shortcut.
- fix issue181: --pdb now also works on collect errors (and
on internal errors) . This was implemented by a slight internal
refactoring and the introduction of a new hook
``pytest_exception_interact`` hook (see next item).
- fix issue341: introduce new experimental hook for IDEs/terminals to
intercept debugging: ``pytest_exception_interact(node, call, report)``.
- new monkeypatch.setattr() variant to provide a shorter
invocation for patching out classes/functions from modules:
monkeypatch.setattr("requests.get", myfunc)
will replace the "get" function of the "requests" module with ``myfunc``.
- fix issue322: tearDownClass is not run if setUpClass failed. Thanks
Mathieu Agopian for the initial fix. Also make all of pytest/nose
finalizer mimick the same generic behaviour: if a setupX exists and
fails, don't run teardownX. This internally introduces a new method
"node.addfinalizer()" helper which can only be called during the setup
phase of a node.
- simplify pytest.mark.parametrize() signature: allow to pass a
CSV-separated string to specify argnames. For example:
``pytest.mark.parametrize("input,expected", [(1,2), (2,3)])``
works as well as the previous:
``pytest.mark.parametrize(("input", "expected"), ...)``.
- add support for setUpModule/tearDownModule detection, thanks Brian Okken.
- integrate tab-completion on options through use of "argcomplete".
Thanks Anthon van der Neut for the PR.
- change option names to be hyphen-separated long options but keep the
old spelling backward compatible. py.test -h will only show the
hyphenated version, for example "--collect-only" but "--collectonly"
will remain valid as well (for backward-compat reasons). Many thanks to
Anthon van der Neut for the implementation and to Hynek Schlawack for
pushing us.
- fix issue 308 - allow to mark/xfail/skip individual parameter sets
when parametrizing. Thanks Brianna Laugher.
- call new experimental pytest_load_initial_conftests hook to allow
3rd party plugins to do something before a conftest is loaded.
Bug fixes:
- fix issue358 - capturing options are now parsed more properly
by using a new parser.parse_known_args method.
- pytest now uses argparse instead of optparse (thanks Anthon) which
means that "argparse" is added as a dependency if installing into python2.6
environments or below.
- fix issue333: fix a case of bad unittest/pytest hook interaction.
- PR27: correctly handle nose.SkipTest during collection. Thanks
Antonio Cuni, Ronny Pfannschmidt.
- fix issue355: junitxml puts name="pytest" attribute to testsuite tag.
- fix issue336: autouse fixture in plugins should work again.
- fix issue279: improve object comparisons on assertion failure
for standard datatypes and recognise collections.abc. Thanks to
Brianna Laugher and Mathieu Agopian.
- fix issue317: assertion rewriter support for the is_package method
- fix issue335: document py.code.ExceptionInfo() object returned
from pytest.raises(), thanks Mathieu Agopian.
- remove implicit distribute_setup support from setup.py.
- fix issue305: ignore any problems when writing pyc files.
- SO-17664702: call fixture finalizers even if the fixture function
partially failed (finalizers would not always be called before)
- fix issue320 - fix class scope for fixtures when mixed with
module-level functions. Thanks Anatloy Bubenkoff.
- you can specify "-q" or "-qq" to get different levels of "quieter"
reporting (thanks Katarzyna Jachim)
- fix issue300 - Fix order of conftest loading when starting py.test
in a subdirectory.
- fix issue323 - sorting of many module-scoped arg parametrizations
- make sessionfinish hooks execute with the same cwd-context as at
session start (helps fix plugin behaviour which write output files
with relative path such as pytest-cov)
- fix issue316 - properly reference collection hooks in docs
- fix issue 306 - cleanup of -k/-m options to only match markers/test
names/keywords respectively. Thanks Wouter van Ackooy.
- improved doctest counting for doctests in python modules --
files without any doctest items will not show up anymore
and doctest examples are counted as separate test items.
thanks Danilo Bellini.
- fix issue245 by depending on the released py-1.4.14
which fixes py.io.dupfile to work with files with no
mode. Thanks Jason R. Coombs.
- fix junitxml generation when test output contains control characters,
addressing issue267, thanks Jaap Broekhuizen
- fix issue338: honor --tb style for setup/teardown errors as well. Thanks Maho.
- fix issue307 - use yaml.safe_load in example, thanks Mark Eichin.
- better parametrize error messages, thanks Brianna Laugher
- pytest_terminal_summary(terminalreporter) hooks can now use
".section(title)" and ".line(msg)" methods to print extra
information at the end of a test run.

View file

@ -0,0 +1,25 @@
pytest-2.4.1: fixing three regressions compared to 2.3.5
===========================================================================
pytest-2.4.1 is a quick follow up release to fix three regressions
compared to 2.3.5 before they hit more people:
- When using parser.addoption() unicode arguments to the
"type" keyword should also be converted to the respective types.
thanks Floris Bruynooghe, @dnozay. (fixes issue360 and issue362)
- fix dotted filename completion when using argcomplete
thanks Anthon van der Neuth. (fixes issue361)
- fix regression when a 1-tuple ("arg",) is used for specifying
parametrization (the values of the parametrization were passed
nested in a tuple). Thanks Donald Stufft.
- also merge doc typo fixes, thanks Andy Dirnberger
as usual, docs at http://pytest.org and upgrades via::
pip install -U pytest
have fun,
holger krekel

View file

@ -0,0 +1,39 @@
pytest-2.4.2: colorama on windows, plugin/tmpdir fixes
===========================================================================
pytest-2.4.2 is another bug-fixing release:
- on Windows require colorama and a newer py lib so that py.io.TerminalWriter()
now uses colorama instead of its own ctypes hacks. (fixes issue365)
thanks Paul Moore for bringing it up.
- fix "-k" matching of tests where "repr" and "attr" and other names would
cause wrong matches because of an internal implementation quirk
(don't ask) which is now properly implemented. fixes issue345.
- avoid tmpdir fixture to create too long filenames especially
when parametrization is used (issue354)
- fix pytest-pep8 and pytest-flakes / pytest interactions
(collection names in mark plugin was assuming an item always
has a function which is not true for those plugins etc.)
Thanks Andi Zeidler.
- introduce node.get_marker/node.add_marker API for plugins
like pytest-pep8 and pytest-flakes to avoid the messy
details of the node.keywords pseudo-dicts. Adapted
docs.
- remove attempt to "dup" stdout at startup as it's icky.
the normal capturing should catch enough possibilities
of tests messing up standard FDs.
- add pluginmanager.do_configure(config) as a link to
config.do_configure() for plugin-compatibility
as usual, docs at http://pytest.org and upgrades via::
pip install -U pytest
have fun,
holger krekel

View file

@ -0,0 +1,175 @@
pytest-2.5.0: now down to ZERO reported bugs!
===========================================================================
pytest-2.5.0 is a big fixing release, the result of two community bug
fixing days plus numerous additional works from many people and
reporters. The release should be fully compatible to 2.4.2, existing
plugins and test suites. We aim at maintaining this level of ZERO reported
bugs because it's no fun if your testing tool has bugs, is it? Under a
condition, though: when submitting a bug report please provide
clear information about the circumstances and a simple example which
reproduces the problem.
The issue tracker is of course not empty now. We have many remaining
"enhacement" issues which we'll hopefully can tackle in 2014 with your
help.
For those who use older Python versions, please note that pytest is not
automatically tested on python2.5 due to virtualenv, setuptools and tox
not supporting it anymore. Manual verification shows that it mostly
works fine but it's not going to be part of the automated release
process and thus likely to break in the future.
As usual, current docs are at
http://pytest.org
and you can upgrade from pypi via::
pip install -U pytest
Particular thanks for helping with this release go to Anatoly Bubenkoff,
Floris Bruynooghe, Marc Abramowitz, Ralph Schmitt, Ronny Pfannschmidt,
Donald Stufft, James Lan, Rob Dennis, Jason R. Coombs, Mathieu Agopian,
Virgil Dupras, Bruno Oliveira, Alex Gaynor and others.
have fun,
holger krekel
2.5.0
-----------------------------------
- dropped python2.5 from automated release testing of pytest itself
which means it's probably going to break soon (but still works
with this release we believe).
- simplified and fixed implementation for calling finalizers when
parametrized fixtures or function arguments are involved. finalization
is now performed lazily at setup time instead of in the "teardown phase".
While this might sound odd at first, it helps to ensure that we are
correctly handling setup/teardown even in complex code. User-level code
should not be affected unless it's implementing the pytest_runtest_teardown
hook and expecting certain fixture instances are torn down within (very
unlikely and would have been unreliable anyway).
- PR90: add --color=yes|no|auto option to force terminal coloring
mode ("auto" is default). Thanks Marc Abramowitz.
- fix issue319 - correctly show unicode in assertion errors. Many
thanks to Floris Bruynooghe for the complete PR. Also means
we depend on py>=1.4.19 now.
- fix issue396 - correctly sort and finalize class-scoped parametrized
tests independently from number of methods on the class.
- refix issue323 in a better way -- parametrization should now never
cause Runtime Recursion errors because the underlying algorithm
for re-ordering tests per-scope/per-fixture is not recursive
anymore (it was tail-call recursive before which could lead
to problems for more than >966 non-function scoped parameters).
- fix issue290 - there is preliminary support now for parametrizing
with repeated same values (sometimes useful to to test if calling
a second time works as with the first time).
- close issue240 - document precisely how pytest module importing
works, discuss the two common test directory layouts, and how it
interacts with PEP420-namespace packages.
- fix issue246 fix finalizer order to be LIFO on independent fixtures
depending on a parametrized higher-than-function scoped fixture.
(was quite some effort so please bear with the complexity of this sentence :)
Thanks Ralph Schmitt for the precise failure example.
- fix issue244 by implementing special index for parameters to only use
indices for paramentrized test ids
- fix issue287 by running all finalizers but saving the exception
from the first failing finalizer and re-raising it so teardown will
still have failed. We reraise the first failing exception because
it might be the cause for other finalizers to fail.
- fix ordering when mock.patch or other standard decorator-wrappings
are used with test methods. This fixues issue346 and should
help with random "xdist" collection failures. Thanks to
Ronny Pfannschmidt and Donald Stufft for helping to isolate it.
- fix issue357 - special case "-k" expressions to allow for
filtering with simple strings that are not valid python expressions.
Examples: "-k 1.3" matches all tests parametrized with 1.3.
"-k None" filters all tests that have "None" in their name
and conversely "-k 'not None'".
Previously these examples would raise syntax errors.
- fix issue384 by removing the trial support code
since the unittest compat enhancements allow
trial to handle it on its own
- don't hide an ImportError when importing a plugin produces one.
fixes issue375.
- fix issue275 - allow usefixtures and autouse fixtures
for running doctest text files.
- fix issue380 by making --resultlog only rely on longrepr instead
of the "reprcrash" attribute which only exists sometimes.
- address issue122: allow @pytest.fixture(params=iterator) by exploding
into a list early on.
- fix pexpect-3.0 compatibility for pytest's own tests.
(fixes issue386)
- allow nested parametrize-value markers, thanks James Lan for the PR.
- fix unicode handling with new monkeypatch.setattr(import_path, value)
API. Thanks Rob Dennis. Fixes issue371.
- fix unicode handling with junitxml, fixes issue368.
- In assertion rewriting mode on Python 2, fix the detection of coding
cookies. See issue #330.
- make "--runxfail" turn imperative pytest.xfail calls into no ops
(it already did neutralize pytest.mark.xfail markers)
- refine pytest / pkg_resources interactions: The AssertionRewritingHook
PEP302 compliant loader now registers itself with setuptools/pkg_resources
properly so that the pkg_resources.resource_stream method works properly.
Fixes issue366. Thanks for the investigations and full PR to Jason R. Coombs.
- pytestconfig fixture is now session-scoped as it is the same object during the
whole test run. Fixes issue370.
- avoid one surprising case of marker malfunction/confusion::
@pytest.mark.some(lambda arg: ...)
def test_function():
would not work correctly because pytest assumes @pytest.mark.some
gets a function to be decorated already. We now at least detect if this
arg is an lambda and thus the example will work. Thanks Alex Gaynor
for bringing it up.
- xfail a test on pypy that checks wrong encoding/ascii (pypy does
not error out). fixes issue385.
- internally make varnames() deal with classes's __init__,
although it's not needed by pytest itself atm. Also
fix caching. Fixes issue376.
- fix issue221 - handle importing of namespace-package with no
__init__.py properly.
- refactor internal FixtureRequest handling to avoid monkeypatching.
One of the positive user-facing effects is that the "request" object
can now be used in closures.
- fixed version comparison in pytest.importskip(modname, minverstring)
- fix issue377 by clarifying in the nose-compat docs that pytest
does not duplicate the unittest-API into the "plain" namespace.
- fix verbose reporting for @mock'd test functions

View file

@ -0,0 +1,47 @@
pytest-2.5.1: fixes and new home page styling
===========================================================================
pytest is a mature Python testing tool with more than a 1000 tests
against itself, passing on many different interpreters and platforms.
The 2.5.1 release maintains the "zero-reported-bugs" promise by fixing
the three bugs reported since the last release a few days ago. It also
features a new home page styling implemented by Tobias Bieniek, based on
the flask theme from Armin Ronacher:
http://pytest.org
If you have anything more to improve styling and docs,
we'd be very happy to merge further pull requests.
On the coding side, the release also contains a little enhancement to
fixture decorators allowing to directly influence generation of test
ids, thanks to Floris Bruynooghe. Other thanks for helping with
this release go to Anatoly Bubenkoff and Ronny Pfannschmidt.
As usual, you can upgrade from pypi via::
pip install -U pytest
have fun and a nice remaining "bug-free" time of the year :)
holger krekel
2.5.1
-----------------------------------
- merge new documentation styling PR from Tobias Bieniek.
- fix issue403: allow parametrize of multiple same-name functions within
a collection node. Thanks Andreas Kloeckner and Alex Gaynor for reporting
and analysis.
- Allow parameterized fixtures to specify the ID of the parameters by
adding an ids argument to pytest.fixture() and pytest.yield_fixture().
Thanks Floris Bruynooghe.
- fix issue404 by always using the binary xml escape in the junitxml
plugin. Thanks Ronny Pfannschmidt.
- fix issue407: fix addoption docstring to point to argparse instead of
optparse. Thanks Daniel D. Wright.

View file

@ -0,0 +1,64 @@
pytest-2.5.2: fixes
===========================================================================
pytest is a mature Python testing tool with more than a 1000 tests
against itself, passing on many different interpreters and platforms.
The 2.5.2 release fixes a few bugs with two maybe-bugs remaining and
actively being worked on (and waiting for the bug reporter's input).
We also have a new contribution guide thanks to Piotr Banaszkiewicz
and others.
See docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to the following people who contributed to this release:
Anatoly Bubenkov
Ronny Pfannschmidt
Floris Bruynooghe
Bruno Oliveira
Andreas Pelme
Jurko Gospodnetić
Piotr Banaszkiewicz
Simon Liedtke
lakka
Lukasz Balcerzak
Philippe Muller
Daniel Hahler
have fun,
holger krekel
2.5.2
-----------------------------------
- fix issue409 -- better interoperate with cx_freeze by not
trying to import from collections.abc which causes problems
for py27/cx_freeze. Thanks Wolfgang L. for reporting and tracking it down.
- fixed docs and code to use "pytest" instead of "py.test" almost everywhere.
Thanks Jurko Gospodnetic for the complete PR.
- fix issue425: mention at end of "py.test -h" that --markers
and --fixtures work according to specified test path (or current dir)
- fix issue413: exceptions with unicode attributes are now printed
correctly also on python2 and with pytest-xdist runs. (the fix
requires py-1.4.20)
- copy, cleanup and integrate py.io capture
from pylib 1.4.20.dev2 (rev 13d9af95547e)
- address issue416: clarify docs as to conftest.py loading semantics
- fix issue429: comparing byte strings with non-ascii chars in assert
expressions now work better. Thanks Floris Bruynooghe.
- make capfd/capsys.capture private, its unused and shouldnt be exposed

View file

@ -0,0 +1,153 @@
pytest-2.6.0: shorter tracebacks, new warning system, test runner compat
===========================================================================
pytest is a mature Python testing tool with more than a 1000 tests
against itself, passing on many different interpreters and platforms.
The 2.6.0 release should be drop-in backward compatible to 2.5.2 and
fixes a number of bugs and brings some new features, mainly:
- shorter tracebacks by default: only the first (test function) entry
and the last (failure location) entry are shown, the ones between
only in "short" format. Use ``--tb=long`` to get back the old
behaviour of showing "long" entries everywhere.
- a new warning system which reports oddities during collection
and execution. For example, ignoring collecting Test* classes with an
``__init__`` now produces a warning.
- various improvements to nose/mock/unittest integration
Note also that 2.6.0 departs with the "zero reported bugs" policy
because it has been too hard to keep up with it, unfortunately.
Instead we are for now rather bound to work on "upvoted" issues in
the https://bitbucket.org/pytest-dev/pytest/issues?status=new&status=open&sort=-votes
issue tracker.
See docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed, among them:
Benjamin Peterson
Jurko Gospodnetić
Floris Bruynooghe
Marc Abramowitz
Marc Schlaich
Trevor Bekolay
Bruno Oliveira
Alex Groenholm
have fun,
holger krekel
2.6.0
-----------------------------------
- fix issue537: Avoid importing old assertion reinterpretation code by default.
Thanks Benjamin Peterson.
- fix issue364: shorten and enhance tracebacks representation by default.
The new "--tb=auto" option (default) will only display long tracebacks
for the first and last entry. You can get the old behaviour of printing
all entries as long entries with "--tb=long". Also short entries by
default are now printed very similarly to "--tb=native" ones.
- fix issue514: teach assertion reinterpretation about private class attributes
Thanks Benjamin Peterson.
- change -v output to include full node IDs of tests. Users can copy
a node ID from a test run, including line number, and use it as a
positional argument in order to run only a single test.
- fix issue 475: fail early and comprehensible if calling
pytest.raises with wrong exception type.
- fix issue516: tell in getting-started about current dependencies.
- cleanup setup.py a bit and specify supported versions. Thanks Jurko
Gospodnetic for the PR.
- change XPASS colour to yellow rather then red when tests are run
with -v.
- fix issue473: work around mock putting an unbound method into a class
dict when double-patching.
- fix issue498: if a fixture finalizer fails, make sure that
the fixture is still invalidated.
- fix issue453: the result of the pytest_assertrepr_compare hook now gets
it's newlines escaped so that format_exception does not blow up.
- internal new warning system: pytest will now produce warnings when
it detects oddities in your test collection or execution.
Warnings are ultimately sent to a new pytest_logwarning hook which is
currently only implemented by the terminal plugin which displays
warnings in the summary line and shows more details when -rw (report on
warnings) is specified.
- change skips into warnings for test classes with an __init__ and
callables in test modules which look like a test but are not functions.
- fix issue436: improved finding of initial conftest files from command
line arguments by using the result of parse_known_args rather than
the previous flaky heuristics. Thanks Marc Abramowitz for tests
and initial fixing approaches in this area.
- fix issue #479: properly handle nose/unittest(2) SkipTest exceptions
during collection/loading of test modules. Thanks to Marc Schlaich
for the complete PR.
- fix issue490: include pytest_load_initial_conftests in documentation
and improve docstring.
- fix issue472: clarify that ``pytest.config.getvalue()`` cannot work
if it's triggered ahead of command line parsing.
- merge PR123: improved integration with mock.patch decorator on tests.
- fix issue412: messing with stdout/stderr FD-level streams is now
captured without crashes.
- fix issue483: trial/py33 works now properly. Thanks Daniel Grana for PR.
- improve example for pytest integration with "python setup.py test"
which now has a generic "-a" or "--pytest-args" option where you
can pass additional options as a quoted string. Thanks Trevor Bekolay.
- simplified internal capturing mechanism and made it more robust
against tests or setups changing FD1/FD2, also better integrated
now with pytest.pdb() in single tests.
- improvements to pytest's own test-suite leakage detection, courtesy of PRs
from Marc Abramowitz
- fix issue492: avoid leak in test_writeorg. Thanks Marc Abramowitz.
- fix issue493: don't run tests in doc directory with ``python setup.py test``
(use tox -e doctesting for that)
- fix issue486: better reporting and handling of early conftest loading failures
- some cleanup and simplification of internal conftest handling.
- work a bit harder to break reference cycles when catching exceptions.
Thanks Jurko Gospodnetic.
- fix issue443: fix skip examples to use proper comparison. Thanks Alex
Groenholm.
- support nose-style ``__test__`` attribute on modules, classes and
functions, including unittest-style Classes. If set to False, the
test will not be collected.
- fix issue512: show "<notset>" for arguments which might not be set
in monkeypatch plugin. Improves output in documentation.
- avoid importing "py.test" (an old alias module for "pytest")

View file

@ -0,0 +1,59 @@
pytest-2.6.1: fixes and new xfail feature
===========================================================================
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
The 2.6.1 release is drop-in compatible to 2.5.2 and actually fixes some
regressions introduced with 2.6.0. It also brings a little feature
to the xfail marker which now recognizes expected exceptions,
see the CHANGELOG below.
See docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed, among them:
Floris Bruynooghe
Bruno Oliveira
Nicolas Delaby
have fun,
holger krekel
Changes 2.6.1
=================
- No longer show line numbers in the --verbose output, the output is now
purely the nodeid. The line number is still shown in failure reports.
Thanks Floris Bruynooghe.
- fix issue437 where assertion rewriting could cause pytest-xdist slaves
to collect different tests. Thanks Bruno Oliveira.
- fix issue555: add "errors" attribute to capture-streams to satisfy
some distutils and possibly other code accessing sys.stdout.errors.
- fix issue547 capsys/capfd also work when output capturing ("-s") is disabled.
- address issue170: allow pytest.mark.xfail(...) to specify expected exceptions via
an optional "raises=EXC" argument where EXC can be a single exception
or a tuple of exception classes. Thanks David Mohr for the complete
PR.
- fix integration of pytest with unittest.mock.patch decorator when
it uses the "new" argument. Thanks Nicolas Delaby for test and PR.
- fix issue with detecting conftest files if the arguments contain
"::" node id specifications (copy pasted from "-v" output)
- fix issue544 by only removing "@NUM" at the end of "::" separated parts
and if the part has an ".py" extension
- don't use py.std import helper, rather import things directly.
Thanks Bruno Oliveira.

View file

@ -0,0 +1,52 @@
pytest-2.6.2: few fixes and cx_freeze support
===========================================================================
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
This release is drop-in compatible to 2.5.2 and 2.6.X. It also
brings support for including pytest with cx_freeze or similar
freezing tools into your single-file app distribution. For details
see the CHANGELOG below.
See docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed, among them:
Floris Bruynooghe
Benjamin Peterson
Bruno Oliveira
have fun,
holger krekel
2.6.2
-----------
- Added function pytest.freeze_includes(), which makes it easy to embed
pytest into executables using tools like cx_freeze.
See docs for examples and rationale. Thanks Bruno Oliveira.
- Improve assertion rewriting cache invalidation precision.
- fixed issue561: adapt autouse fixture example for python3.
- fixed issue453: assertion rewriting issue with __repr__ containing
"\n{", "\n}" and "\n~".
- fix issue560: correctly display code if an "else:" or "finally:" is
followed by statements on the same line.
- Fix example in monkeypatch documentation, thanks t-8ch.
- fix issue572: correct tmpdir doc example for python3.
- Do not mark as universal wheel because Python 2.6 is different from
other builds due to the extra argparse dependency. Fixes issue566.
Thanks sontek.

View file

@ -0,0 +1,52 @@
pytest-2.6.3: fixes and little improvements
===========================================================================
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
This release is drop-in compatible to 2.5.2 and 2.6.X.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed, among them:
Floris Bruynooghe
Oleg Sinyavskiy
Uwe Schmitt
Charles Cloud
Wolfgang Schnerring
have fun,
holger krekel
Changes 2.6.3
======================
- fix issue575: xunit-xml was reporting collection errors as failures
instead of errors, thanks Oleg Sinyavskiy.
- fix issue582: fix setuptools example, thanks Laszlo Papp and Ronny
Pfannschmidt.
- Fix infinite recursion bug when pickling capture.EncodedFile, thanks
Uwe Schmitt.
- fix issue589: fix bad interaction with numpy and others when showing
exceptions. Check for precise "maximum recursion depth exceed" exception
instead of presuming any RuntimeError is that one (implemented in py
dep). Thanks Charles Cloud for analysing the issue.
- fix conftest related fixture visibility issue: when running with a
CWD outside a test package pytest would get fixture discovery wrong.
Thanks to Wolfgang Schnerring for figuring out a reproducable example.
- Introduce pytest_enter_pdb hook (needed e.g. by pytest_timeout to cancel the
timeout when interactively entering pdb). Thanks Wolfgang Schnerring.
- check xfail/skip also with non-python function test items. Thanks
Floris Bruynooghe.

View file

@ -0,0 +1,101 @@
pytest-2.7.0: fixes, features, speed improvements
===========================================================================
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
This release is supposed to be drop-in compatible to 2.6.X.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed, among them:
Anatoly Bubenkoff
Floris Bruynooghe
Brianna Laugher
Eric Siegerman
Daniel Hahler
Charles Cloud
Tom Viner
Holger Peters
Ldiary Translations
almarklein
have fun,
holger krekel
2.7.0 (compared to 2.6.4)
-----------------------------
- fix issue435: make reload() work when assert rewriting is active.
Thanks Daniel Hahler.
- fix issue616: conftest.py files and their contained fixutres are now
properly considered for visibility, independently from the exact
current working directory and test arguments that are used.
Many thanks to Eric Siegerman and his PR235 which contains
systematic tests for conftest visibility and now passes.
This change also introduces the concept of a ``rootdir`` which
is printed as a new pytest header and documented in the pytest
customize web page.
- change reporting of "diverted" tests, i.e. tests that are collected
in one file but actually come from another (e.g. when tests in a test class
come from a base class in a different file). We now show the nodeid
and indicate via a postfix the other file.
- add ability to set command line options by environment variable PYTEST_ADDOPTS.
- added documentation on the new pytest-dev teams on bitbucket and
github. See https://pytest.org/latest/contributing.html .
Thanks to Anatoly for pushing and initial work on this.
- fix issue650: new option ``--docttest-ignore-import-errors`` which
will turn import errors in doctests into skips. Thanks Charles Cloud
for the complete PR.
- fix issue655: work around different ways that cause python2/3
to leak sys.exc_info into fixtures/tests causing failures in 3rd party code
- fix issue615: assertion re-writing did not correctly escape % signs
when formatting boolean operations, which tripped over mixing
booleans with modulo operators. Thanks to Tom Viner for the report,
triaging and fix.
- implement issue351: add ability to specify parametrize ids as a callable
to generate custom test ids. Thanks Brianna Laugher for the idea and
implementation.
- introduce and document new hookwrapper mechanism useful for plugins
which want to wrap the execution of certain hooks for their purposes.
This supersedes the undocumented ``__multicall__`` protocol which
pytest itself and some external plugins use. Note that pytest-2.8
is scheduled to drop supporting the old ``__multicall__``
and only support the hookwrapper protocol.
- majorly speed up invocation of plugin hooks
- use hookwrapper mechanism in builtin pytest plugins.
- add a doctest ini option for doctest flags, thanks Holger Peters.
- add note to docs that if you want to mark a parameter and the
parameter is a callable, you also need to pass in a reason to disambiguate
it from the "decorator" case. Thanks Tom Viner.
- "python_classes" and "python_functions" options now support glob-patterns
for test discovery, as discussed in issue600. Thanks Ldiary Translations.
- allow to override parametrized fixtures with non-parametrized ones and vice versa (bubenkoff).
- fix issue463: raise specific error for 'parameterize' misspelling (pfctdayelise).
- On failure, the ``sys.last_value``, ``sys.last_type`` and
``sys.last_traceback`` are set, so that a user can inspect the error
via postmortem debugging (almarklein).

View file

@ -0,0 +1,58 @@
pytest-2.7.1: bug fixes
=======================
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
This release is supposed to be drop-in compatible to 2.7.0.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed to this release, among them:
Bruno Oliveira
Holger Krekel
Ionel Maries Cristian
Floris Bruynooghe
Happy testing,
The py.test Development Team
2.7.1 (compared to 2.7.0)
-------------------------
- fix issue731: do not get confused by the braces which may be present
and unbalanced in an object's repr while collapsing False
explanations. Thanks Carl Meyer for the report and test case.
- fix issue553: properly handling inspect.getsourcelines failures in
FixtureLookupError which would lead to to an internal error,
obfuscating the original problem. Thanks talljosh for initial
diagnose/patch and Bruno Oliveira for final patch.
- fix issue660: properly report scope-mismatch-access errors
independently from ordering of fixture arguments. Also
avoid the pytest internal traceback which does not provide
information to the user. Thanks Holger Krekel.
- streamlined and documented release process. Also all versions
(in setup.py and documentation generation) are now read
from _pytest/__init__.py. Thanks Holger Krekel.
- fixed docs to remove the notion that yield-fixtures are experimental.
They are here to stay :) Thanks Bruno Oliveira.
- Support building wheels by using environment markers for the
requirements. Thanks Ionel Maries Cristian.
- fixed regression to 2.6.4 which surfaced e.g. in lost stdout capture printing
when tests raised SystemExit. Thanks Holger Krekel.
- reintroduced _pytest fixture of the pytester plugin which is used
at least by pytest-xdist.

View file

@ -0,0 +1,58 @@
pytest-2.7.2: bug fixes
=======================
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
This release is supposed to be drop-in compatible to 2.7.1.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed to this release, among them:
Bruno Oliveira
Floris Bruynooghe
Punyashloka Biswal
Aron Curzon
Benjamin Peterson
Thomas De Schampheleire
Edison Gustavo Muenz
Holger Krekel
Happy testing,
The py.test Development Team
2.7.2 (compared to 2.7.1)
-----------------------------
- fix issue767: pytest.raises value attribute does not contain the exception
instance on Python 2.6. Thanks Eric Siegerman for providing the test
case and Bruno Oliveira for PR.
- Automatically create directory for junitxml and results log.
Thanks Aron Curzon.
- fix issue713: JUnit XML reports for doctest failures.
Thanks Punyashloka Biswal.
- fix issue735: assertion failures on debug versions of Python 3.4+
Thanks Benjamin Peterson.
- fix issue114: skipif marker reports to internal skipping plugin;
Thanks Floris Bruynooghe for reporting and Bruno Oliveira for the PR.
- fix issue748: unittest.SkipTest reports to internal pytest unittest plugin.
Thanks Thomas De Schampheleire for reporting and Bruno Oliveira for the PR.
- fix issue718: failed to create representation of sets containing unsortable
elements in python 2. Thanks Edison Gustavo Muenz
- fix issue756, fix issue752 (and similar issues): depend on py-1.4.29
which has a refined algorithm for traceback generation.

View file

@ -0,0 +1,44 @@
pytest-2.8.2: bug fixes
=======================
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
This release is supposed to be drop-in compatible to 2.8.1.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed to this release, among them:
Bruno Oliveira
Demian Brecht
Florian Bruhin
Ionel Cristian Mărieș
Raphael Pierzina
Ronny Pfannschmidt
holger krekel
Happy testing,
The py.test Development Team
2.8.2 (compared to 2.7.2)
-----------------------------
- fix #1085: proper handling of encoding errors when passing encoded byte
strings to pytest.parametrize in Python 2.
Thanks Themanwithoutaplan for the report and Bruno Oliveira for the PR.
- fix #1087: handling SystemError when passing empty byte strings to
pytest.parametrize in Python 3.
Thanks Paul Kehrer for the report and Bruno Oliveira for the PR.
- fix #995: fixed internal error when filtering tracebacks where one entry
was generated by an exec() statement.
Thanks Daniel Hahler, Ashley C Straw, Philippe Gauthier and Pavel Savchenko
for contributing and Bruno Oliveira for the PR.

View file

@ -0,0 +1,59 @@
pytest-2.8.3: bug fixes
=======================
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
This release is supposed to be drop-in compatible to 2.8.2.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed to this release, among them:
Bruno Oliveira
Florian Bruhin
Gabe Hollombe
Gabriel Reis
Hartmut Goebel
John Vandenberg
Lee Kamentsky
Michael Birtwell
Raphael Pierzina
Ronny Pfannschmidt
William Martin Stewart
Happy testing,
The py.test Development Team
2.8.3 (compared to 2.8.2)
-----------------------------
- fix #1169: add __name__ attribute to testcases in TestCaseFunction to
support the @unittest.skip decorator on functions and methods.
Thanks Lee Kamentsky for the PR.
- fix #1035: collecting tests if test module level obj has __getattr__().
Thanks Suor for the report and Bruno Oliveira / Tom Viner for the PR.
- fix #331: don't collect tests if their failure cannot be reported correctly
e.g. they are a callable instance of a class.
- fix #1133: fixed internal error when filtering tracebacks where one entry
belongs to a file which is no longer available.
Thanks Bruno Oliveira for the PR.
- enhancement made to highlight in red the name of the failing tests so
they stand out in the output.
Thanks Gabriel Reis for the PR.
- add more talks to the documentation
- extend documentation on the --ignore cli option
- use pytest-runner for setuptools integration
- minor fixes for interaction with OS X El Capitan system integrity protection (thanks Florian)

View file

@ -0,0 +1,52 @@
pytest-2.8.4
============
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
This release is supposed to be drop-in compatible to 2.8.2.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed to this release, among them:
Bruno Oliveira
Florian Bruhin
Jeff Widman
Mehdy Khoshnoody
Nicholas Chammas
Ronny Pfannschmidt
Tim Chan
Happy testing,
The py.test Development Team
2.8.4 (compared to 2.8.3)
-----------------------------
- fix #1190: ``deprecated_call()`` now works when the deprecated
function has been already called by another test in the same
module. Thanks Mikhail Chernykh for the report and Bruno Oliveira for the
PR.
- fix #1198: ``--pastebin`` option now works on Python 3. Thanks
Mehdy Khoshnoody for the PR.
- fix #1219: ``--pastebin`` now works correctly when captured output contains
non-ascii characters. Thanks Bruno Oliveira for the PR.
- fix #1204: another error when collecting with a nasty __getattr__().
Thanks Florian Bruhin for the PR.
- fix the summary printed when no tests did run.
Thanks Florian Bruhin for the PR.
- a number of documentation modernizations wrt good practices.
Thanks Bruno Oliveira for the PR.

View file

@ -0,0 +1,39 @@
pytest-2.8.5
============
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
This release is supposed to be drop-in compatible to 2.8.4.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed to this release, among them:
Alex Gaynor
aselus-hub
Bruno Oliveira
Ronny Pfannschmidt
Happy testing,
The py.test Development Team
2.8.5 (compared to 2.8.4)
-------------------------
- fix #1243: fixed issue where class attributes injected during collection could break pytest.
PR by Alexei Kozlenok, thanks Ronny Pfannschmidt and Bruno Oliveira for the review and help.
- fix #1074: precompute junitxml chunks instead of storing the whole tree in objects
Thanks Bruno Oliveira for the report and Ronny Pfannschmidt for the PR
- fix #1238: fix ``pytest.deprecated_call()`` receiving multiple arguments
(Regression introduced in 2.8.4). Thanks Alex Gaynor for the report and
Bruno Oliveira for the PR.

View file

@ -0,0 +1,67 @@
pytest-2.8.6
============
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
This release is supposed to be drop-in compatible to 2.8.5.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed to this release, among them:
AMiT Kumar
Bruno Oliveira
Erik M. Bray
Florian Bruhin
Georgy Dyuldin
Jeff Widman
Kartik Singhal
Loïc Estève
Manu Phatak
Peter Demin
Rick van Hattem
Ronny Pfannschmidt
Ulrich Petri
foxx
Happy testing,
The py.test Development Team
2.8.6 (compared to 2.8.5)
-------------------------
- fix #1259: allow for double nodeids in junitxml,
this was a regression failing plugins combinations
like pytest-pep8 + pytest-flakes
- Workaround for exception that occurs in pyreadline when using
``--pdb`` with standard I/O capture enabled.
Thanks Erik M. Bray for the PR.
- fix #900: Better error message in case the target of a ``monkeypatch`` call
raises an ``ImportError``.
- fix #1292: monkeypatch calls (setattr, setenv, etc.) are now O(1).
Thanks David R. MacIver for the report and Bruno Oliveira for the PR.
- fix #1223: captured stdout and stderr are now properly displayed before
entering pdb when ``--pdb`` is used instead of being thrown away.
Thanks Cal Leeming for the PR.
- fix #1305: pytest warnings emitted during ``pytest_terminal_summary`` are now
properly displayed.
Thanks Ionel Maries Cristian for the report and Bruno Oliveira for the PR.
- fix #628: fixed internal UnicodeDecodeError when doctests contain unicode.
Thanks Jason R. Coombs for the report and Bruno Oliveira for the PR.
- fix #1334: Add captured stdout to jUnit XML report on setup error.
Thanks Georgy Dyuldin for the PR.

View file

@ -0,0 +1,31 @@
pytest-2.8.7
============
This is a hotfix release to solve a regression
in the builtin monkeypatch plugin that got introduced in 2.8.6.
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
This release is supposed to be drop-in compatible to 2.8.5.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed to this release, among them:
Ronny Pfannschmidt
Happy testing,
The py.test Development Team
2.8.7 (compared to 2.8.6)
-------------------------
- fix #1338: use predictable object resolution for monkeypatch

View file

@ -0,0 +1,159 @@
pytest-2.9.0
============
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed to this release, among them:
Anatoly Bubenkov
Bruno Oliveira
Buck Golemon
David Vierra
Florian Bruhin
Galaczi Endre
Georgy Dyuldin
Lukas Bednar
Luke Murphy
Marcin Biernat
Matt Williams
Michael Aquilina
Raphael Pierzina
Ronny Pfannschmidt
Ryan Wooden
Tiemo Kieft
TomV
holger krekel
jab
Happy testing,
The py.test Development Team
2.9.0 (compared to 2.8.7)
-------------------------
**New Features**
* New ``pytest.mark.skip`` mark, which unconditionally skips marked tests.
Thanks `@MichaelAquilina`_ for the complete PR (`#1040`_).
* ``--doctest-glob`` may now be passed multiple times in the command-line.
Thanks `@jab`_ and `@nicoddemus`_ for the PR.
* New ``-rp`` and ``-rP`` reporting options give the summary and full output
of passing tests, respectively. Thanks to `@codewarrior0`_ for the PR.
* ``pytest.mark.xfail`` now has a ``strict`` option which makes ``XPASS``
tests to fail the test suite, defaulting to ``False``. There's also a
``xfail_strict`` ini option that can be used to configure it project-wise.
Thanks `@rabbbit`_ for the request and `@nicoddemus`_ for the PR (`#1355`_).
* ``Parser.addini`` now supports options of type ``bool``. Thanks
`@nicoddemus`_ for the PR.
* New ``ALLOW_BYTES`` doctest option strips ``b`` prefixes from byte strings
in doctest output (similar to ``ALLOW_UNICODE``).
Thanks `@jaraco`_ for the request and `@nicoddemus`_ for the PR (`#1287`_).
* give a hint on KeyboardInterrupt to use the --fulltrace option to show the errors,
this fixes `#1366`_.
Thanks to `@hpk42`_ for the report and `@RonnyPfannschmidt`_ for the PR.
* catch IndexError exceptions when getting exception source location. This fixes
pytest internal error for dynamically generated code (fixtures and tests)
where source lines are fake by intention
**Changes**
* **Important**: `py.code <http://pylib.readthedocs.org/en/latest/code.html>`_ has been
merged into the ``pytest`` repository as ``pytest._code``. This decision
was made because ``py.code`` had very few uses outside ``pytest`` and the
fact that it was in a different repository made it difficult to fix bugs on
its code in a timely manner. The team hopes with this to be able to better
refactor out and improve that code.
This change shouldn't affect users, but it is useful to let users aware
if they encounter any strange behavior.
Keep in mind that the code for ``pytest._code`` is **private** and
**experimental**, so you definitely should not import it explicitly!
Please note that the original ``py.code`` is still available in
`pylib <http://pylib.readthedocs.org>`_.
* ``pytest_enter_pdb`` now optionally receives the pytest config object.
Thanks `@nicoddemus`_ for the PR.
* Removed code and documentation for Python 2.5 or lower versions,
including removal of the obsolete ``_pytest.assertion.oldinterpret`` module.
Thanks `@nicoddemus`_ for the PR (`#1226`_).
* Comparisons now always show up in full when ``CI`` or ``BUILD_NUMBER`` is
found in the environment, even when -vv isn't used.
Thanks `@The-Compiler`_ for the PR.
* ``--lf`` and ``--ff`` now support long names: ``--last-failed`` and
``--failed-first`` respectively.
Thanks `@MichaelAquilina`_ for the PR.
* Added expected exceptions to pytest.raises fail message
* Collection only displays progress ("collecting X items") when in a terminal.
This avoids cluttering the output when using ``--color=yes`` to obtain
colors in CI integrations systems (`#1397`_).
**Bug Fixes**
* The ``-s`` and ``-c`` options should now work under ``xdist``;
``Config.fromdictargs`` now represents its input much more faithfully.
Thanks to `@bukzor`_ for the complete PR (`#680`_).
* Fix (`#1290`_): support Python 3.5's ``@`` operator in assertion rewriting.
Thanks `@Shinkenjoe`_ for report with test case and `@tomviner`_ for the PR.
* Fix formatting utf-8 explanation messages (`#1379`_).
Thanks `@biern`_ for the PR.
* Fix `traceback style docs`_ to describe all of the available options
(auto/long/short/line/native/no), with `auto` being the default since v2.6.
Thanks `@hackebrot`_ for the PR.
* Fix (`#1422`_): junit record_xml_property doesn't allow multiple records
with same name.
.. _`traceback style docs`: https://pytest.org/latest/usage.html#modifying-python-traceback-printing
.. _#1422: https://github.com/pytest-dev/pytest/issues/1422
.. _#1379: https://github.com/pytest-dev/pytest/issues/1379
.. _#1366: https://github.com/pytest-dev/pytest/issues/1366
.. _#1040: https://github.com/pytest-dev/pytest/pull/1040
.. _#680: https://github.com/pytest-dev/pytest/issues/680
.. _#1287: https://github.com/pytest-dev/pytest/pull/1287
.. _#1226: https://github.com/pytest-dev/pytest/pull/1226
.. _#1290: https://github.com/pytest-dev/pytest/pull/1290
.. _#1355: https://github.com/pytest-dev/pytest/pull/1355
.. _#1397: https://github.com/pytest-dev/pytest/issues/1397
.. _@biern: https://github.com/biern
.. _@MichaelAquilina: https://github.com/MichaelAquilina
.. _@bukzor: https://github.com/bukzor
.. _@hpk42: https://github.com/hpk42
.. _@nicoddemus: https://github.com/nicoddemus
.. _@jab: https://github.com/jab
.. _@codewarrior0: https://github.com/codewarrior0
.. _@jaraco: https://github.com/jaraco
.. _@The-Compiler: https://github.com/The-Compiler
.. _@Shinkenjoe: https://github.com/Shinkenjoe
.. _@tomviner: https://github.com/tomviner
.. _@RonnyPfannschmidt: https://github.com/RonnyPfannschmidt
.. _@rabbbit: https://github.com/rabbbit
.. _@hackebrot: https://github.com/hackebrot

View file

@ -0,0 +1,65 @@
pytest-2.9.1
============
pytest is a mature Python testing tool with more than a 1100 tests
against itself, passing on many different interpreters and platforms.
See below for the changes and see docs at:
http://pytest.org
As usual, you can upgrade from pypi via::
pip install -U pytest
Thanks to all who contributed to this release, among them:
Bruno Oliveira
Daniel Hahler
Dmitry Malinovsky
Florian Bruhin
Floris Bruynooghe
Matt Bachmann
Ronny Pfannschmidt
TomV
Vladimir Bolshakov
Zearin
palaviv
Happy testing,
The py.test Development Team
2.9.1 (compared to 2.9.0)
-------------------------
**Bug Fixes**
* Improve error message when a plugin fails to load.
Thanks `@nicoddemus`_ for the PR.
* Fix (`#1178 <https://github.com/pytest-dev/pytest/issues/1178>`_):
``pytest.fail`` with non-ascii characters raises an internal pytest error.
Thanks `@nicoddemus`_ for the PR.
* Fix (`#469`_): junit parses report.nodeid incorrectly, when params IDs
contain ``::``. Thanks `@tomviner`_ for the PR (`#1431`_).
* Fix (`#578 <https://github.com/pytest-dev/pytest/issues/578>`_): SyntaxErrors
containing non-ascii lines at the point of failure generated an internal
py.test error.
Thanks `@asottile`_ for the report and `@nicoddemus`_ for the PR.
* Fix (`#1437`_): When passing in a bytestring regex pattern to parameterize
attempt to decode it as utf-8 ignoring errors.
* Fix (`#649`_): parametrized test nodes cannot be specified to run on the command line.
.. _#1437: https://github.com/pytest-dev/pytest/issues/1437
.. _#469: https://github.com/pytest-dev/pytest/issues/469
.. _#1431: https://github.com/pytest-dev/pytest/pull/1431
.. _#649: https://github.com/pytest-dev/pytest/issues/649
.. _@asottile: https://github.com/asottile

View file

@ -0,0 +1,105 @@
python testing sprint June 20th-26th 2016
======================================================
.. image:: ../img/freiburg2.jpg
:width: 400
The pytest core group is heading towards the biggest sprint
in its history, to take place in the black forest town Freiburg
in Germany. As of February 2016 we have started a `funding
campaign on Indiegogo to cover expenses
<http://igg.me/at/pytest-sprint/x/4034848>`_ The page also mentions
some preliminary topics:
- improving pytest-xdist test scheduling to take into account
fixture setups and explicit user hints.
- provide info on fixture dependencies during --collect-only
- tying pytest-xdist to tox so that you can do "py.test -e py34"
to run tests in a particular tox-managed virtualenv. Also
look into making pytest-xdist use tox environments on
remote ssh-sides so that remote dependency management becomes
easier.
- refactoring the fixture system so more people understand it :)
- integrating PyUnit setup methods as autouse fixtures.
possibly adding ways to influence ordering of same-scoped
fixtures (so you can make a choice of which fixtures come
before others)
- fixing bugs and issues from the tracker, really an endless source :)
Participants
--------------
Here are preliminary participants who said they are likely to come,
given some expenses funding::
Anatoly Bubenkoff, Netherlands
Andreas Pelme, Personalkollen, Sweden
Anthony Wang, Splunk, US
Brianna Laugher, Australia
Bruno Oliveira, Brazil
Danielle Jenkins, Splunk, US
Dave Hunt, UK
Florian Bruhin, Switzerland
Floris Bruynooghe, Cobe.io, UK
Holger Krekel, merlinux, Germany
Oliver Bestwalter, Avira, Germany
Omar Kohl, Germany
Raphael Pierzina, FanDuel, UK
Tom Viner, UK
<your name here?>
Other contributors and experienced newcomers are invited to join as well
but please send a mail to the pytest-dev mailing list if you intend to
do so somewhat soon, also how much funding you need if so. And if you
are working for a company and using pytest heavily you are welcome to
join and we encourage your company to provide some funding for the
sprint. They may see it, and rightfully so, as a very cheap and deep
training which brings you together with the experts in the field :)
Sprint organisation, schedule
-------------------------------
tentative schedule:
- 19/20th arrival in Freiburg
- 20th social get together, initial hacking
- 21/22th full sprint days
- 23rd break day, hiking
- 24/25th full sprint days
- 26th departure
We might adjust according to weather to make sure that if
we do some hiking or excursion we'll have good weather.
Freiburg is one of the sunniest places in Germany so
it shouldn't be too much of a constraint.
Accomodation
----------------
We'll see to arrange for renting a flat with multiple
beds/rooms. Hotels are usually below 100 per night.
The earlier we book the better.
Money / funding
---------------
The Indiegogo campaign asks for 11000 USD which should cover
the costs for flights and accomodation, renting a sprint place
and maybe a bit of food as well.
If your organisation wants to support the sprint but prefers
to give money according to an invoice, get in contact with
holger at http://merlinux.eu who can invoice your organisation
properly.
If we have excess money we'll use for further sprint/travel
funding for pytest/tox contributors.

View file

@ -0,0 +1,289 @@
The writing and reporting of assertions in tests
==================================================
.. _`assertfeedback`:
.. _`assert with the assert statement`:
.. _`assert`:
Asserting with the ``assert`` statement
---------------------------------------------------------
``pytest`` allows you to use the standard python ``assert`` for verifying
expectations and values in Python tests. For example, you can write the
following::
# content of test_assert1.py
def f():
return 3
def test_function():
assert f() == 4
to assert that your function returns a certain value. If this assertion fails
you will see the return value of the function call::
$ py.test test_assert1.py
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 1 items
test_assert1.py F
======= FAILURES ========
_______ test_function ________
def test_function():
> assert f() == 4
E assert 3 == 4
E + where 3 = f()
test_assert1.py:5: AssertionError
======= 1 failed in 0.12 seconds ========
``pytest`` has support for showing the values of the most common subexpressions
including calls, attributes, comparisons, and binary and unary
operators. (See :ref:`tbreportdemo`). This allows you to use the
idiomatic python constructs without boilerplate code while not losing
introspection information.
However, if you specify a message with the assertion like this::
assert a % 2 == 0, "value was odd, should be even"
then no assertion introspection takes places at all and the message
will be simply shown in the traceback.
See :ref:`assert-details` for more information on assertion introspection.
.. _`assertraises`:
Assertions about expected exceptions
------------------------------------------
In order to write assertions about raised exceptions, you can use
``pytest.raises`` as a context manager like this::
import pytest
def test_zero_division():
with pytest.raises(ZeroDivisionError):
1 / 0
and if you need to have access to the actual exception info you may use::
def test_recursion_depth():
with pytest.raises(RuntimeError) as excinfo:
def f():
f()
f()
assert 'maximum recursion' in str(excinfo.value)
``excinfo`` is a ``ExceptionInfo`` instance, which is a wrapper around
the actual exception raised. The main attributes of interest are
``.type``, ``.value`` and ``.traceback``.
If you want to write test code that works on Python 2.4 as well,
you may also use two other ways to test for an expected exception::
pytest.raises(ExpectedException, func, *args, **kwargs)
pytest.raises(ExpectedException, "func(*args, **kwargs)")
both of which execute the specified function with args and kwargs and
asserts that the given ``ExpectedException`` is raised. The reporter will
provide you with helpful output in case of failures such as *no
exception* or *wrong exception*.
Note that it is also possible to specify a "raises" argument to
``pytest.mark.xfail``, which checks that the test is failing in a more
specific way than just having any exception raised::
@pytest.mark.xfail(raises=IndexError)
def test_f():
f()
Using ``pytest.raises`` is likely to be better for cases where you are testing
exceptions your own code is deliberately raising, whereas using
``@pytest.mark.xfail`` with a check function is probably better for something
like documenting unfixed bugs (where the test describes what "should" happen)
or bugs in dependencies.
.. _`assertwarns`:
Assertions about expected warnings
-----------------------------------------
.. versionadded:: 2.8
You can check that code raises a particular warning using
:ref:`pytest.warns <warns>`.
.. _newreport:
Making use of context-sensitive comparisons
-------------------------------------------------
.. versionadded:: 2.0
``pytest`` has rich support for providing context-sensitive information
when it encounters comparisons. For example::
# content of test_assert2.py
def test_set_comparison():
set1 = set("1308")
set2 = set("8035")
assert set1 == set2
if you run this module::
$ py.test test_assert2.py
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 1 items
test_assert2.py F
======= FAILURES ========
_______ test_set_comparison ________
def test_set_comparison():
set1 = set("1308")
set2 = set("8035")
> assert set1 == set2
E assert set(['0', '1', '3', '8']) == set(['0', '3', '5', '8'])
E Extra items in the left set:
E '1'
E Extra items in the right set:
E '5'
E Use -v to get the full diff
test_assert2.py:5: AssertionError
======= 1 failed in 0.12 seconds ========
Special comparisons are done for a number of cases:
* comparing long strings: a context diff is shown
* comparing long sequences: first failing indices
* comparing dicts: different entries
See the :ref:`reporting demo <tbreportdemo>` for many more examples.
Defining your own assertion comparison
----------------------------------------------
It is possible to add your own detailed explanations by implementing
the ``pytest_assertrepr_compare`` hook.
.. autofunction:: _pytest.hookspec.pytest_assertrepr_compare
As an example consider adding the following hook in a conftest.py which
provides an alternative explanation for ``Foo`` objects::
# content of conftest.py
from test_foocompare import Foo
def pytest_assertrepr_compare(op, left, right):
if isinstance(left, Foo) and isinstance(right, Foo) and op == "==":
return ['Comparing Foo instances:',
' vals: %s != %s' % (left.val, right.val)]
now, given this test module::
# content of test_foocompare.py
class Foo:
def __init__(self, val):
self.val = val
def __eq__(self, other):
return self.val == other.val
def test_compare():
f1 = Foo(1)
f2 = Foo(2)
assert f1 == f2
you can run the test module and get the custom output defined in
the conftest file::
$ py.test -q test_foocompare.py
F
======= FAILURES ========
_______ test_compare ________
def test_compare():
f1 = Foo(1)
f2 = Foo(2)
> assert f1 == f2
E assert Comparing Foo instances:
E vals: 1 != 2
test_foocompare.py:11: AssertionError
1 failed in 0.12 seconds
.. _assert-details:
.. _`assert introspection`:
Advanced assertion introspection
----------------------------------
.. versionadded:: 2.1
Reporting details about a failing assertion is achieved either by rewriting
assert statements before they are run or re-evaluating the assert expression and
recording the intermediate values. Which technique is used depends on the
location of the assert, ``pytest`` configuration, and Python version being used
to run ``pytest``.
By default, ``pytest`` rewrites assert statements in test modules.
Rewritten assert statements put introspection information into the assertion failure message.
``pytest`` only rewrites test modules directly discovered by its test collection process, so
asserts in supporting modules which are not themselves test modules will not be
rewritten.
.. note::
``pytest`` rewrites test modules on import. It does this by using an import
hook to write a new pyc files. Most of the time this works transparently.
However, if you are messing with import yourself, the import hook may
interfere. If this is the case, simply use ``--assert=reinterp`` or
``--assert=plain``. Additionally, rewriting will fail silently if it cannot
write new pycs, i.e. in a read-only filesystem or a zipfile.
If an assert statement has not been rewritten or the Python version is less than
2.6, ``pytest`` falls back on assert reinterpretation. In assert
reinterpretation, ``pytest`` walks the frame of the function containing the
assert statement to discover sub-expression results of the failing assert
statement. You can force ``pytest`` to always use assertion reinterpretation by
passing the ``--assert=reinterp`` option.
Assert reinterpretation has a caveat not present with assert rewriting: If
evaluating the assert expression has side effects you may get a warning that the
intermediate values could not be determined safely. A common example of this
issue is an assertion which reads from a file::
assert f.read() != '...'
If this assertion fails then the re-evaluation will probably succeed!
This is because ``f.read()`` will return an empty string when it is
called the second time during the re-evaluation. However, it is
easy to rewrite the assertion and avoid any trouble::
content = f.read()
assert content != '...'
All assert introspection can be turned off by passing ``--assert=plain``.
For further information, Benjamin Peterson wrote up `Behind the scenes of pytest's new assertion rewriting <http://pybites.blogspot.com/2011/07/behind-scenes-of-pytests-new-assertion.html>`_.
.. versionadded:: 2.1
Add assert rewriting as an alternate introspection technique.
.. versionchanged:: 2.1
Introduce the ``--assert`` option. Deprecate ``--no-assert`` and
``--nomagic``.

View file

@ -0,0 +1,28 @@
.. _bash_completion:
Setting up bash completion
==========================
When using bash as your shell, ``pytest`` can use argcomplete
(https://argcomplete.readthedocs.org/) for auto-completion.
For this ``argcomplete`` needs to be installed **and** enabled.
Install argcomplete using::
sudo pip install 'argcomplete>=0.5.7'
For global activation of all argcomplete enabled python applications run::
sudo activate-global-python-argcomplete
For permanent (but not global) ``pytest`` activation, use::
register-python-argcomplete py.test >> ~/.bashrc
For one-time activation of argcomplete for ``pytest`` only, use::
eval "$(register-python-argcomplete py.test)"

View file

@ -0,0 +1,134 @@
.. _`pytest helpers`:
Pytest API and builtin fixtures
================================================
This is a list of ``pytest.*`` API functions and fixtures.
For information on plugin hooks and objects, see :ref:`plugins`.
For information on the ``pytest.mark`` mechanism, see :ref:`mark`.
For the below objects, you can also interactively ask for help, e.g. by
typing on the Python interactive prompt something like::
import pytest
help(pytest)
.. currentmodule:: pytest
Invoking pytest interactively
---------------------------------------------------
.. autofunction:: main
More examples at :ref:`pytest.main-usage`
Helpers for assertions about Exceptions/Warnings
--------------------------------------------------------
.. autofunction:: raises
Examples at :ref:`assertraises`.
.. autofunction:: deprecated_call
Raising a specific test outcome
--------------------------------------
You can use the following functions in your test, fixture or setup
functions to force a certain test outcome. Note that most often
you can rather use declarative marks, see :ref:`skipping`.
.. autofunction:: _pytest.runner.fail
.. autofunction:: _pytest.runner.skip
.. autofunction:: _pytest.runner.importorskip
.. autofunction:: _pytest.skipping.xfail
.. autofunction:: _pytest.runner.exit
fixtures and requests
-----------------------------------------------------
To mark a fixture function:
.. autofunction:: _pytest.python.fixture
Tutorial at :ref:`fixtures`.
The ``request`` object that can be used from fixture functions.
.. autoclass:: _pytest.python.FixtureRequest()
:members:
.. _builtinfixtures:
.. _builtinfuncargs:
Builtin fixtures/function arguments
-----------------------------------------
You can ask for available builtin or project-custom
:ref:`fixtures <fixtures>` by typing::
$ py.test -q --fixtures
cache
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
capsys
enables capturing of writes to sys.stdout/sys.stderr and makes
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple.
capfd
enables capturing of writes to file descriptors 1 and 2 and makes
captured output available via ``capfd.readouterr()`` method calls
which return a ``(out, err)`` tuple.
record_xml_property
Fixture that adds extra xml properties to the tag for the calling test.
The fixture is callable with (name, value), with value being automatically
xml-encoded.
monkeypatch
The returned ``monkeypatch`` funcarg provides these
helper methods to modify objects, dictionaries or os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=False)
monkeypatch.delenv(name, value, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting
test function has finished. The ``raising``
parameter determines if a KeyError or AttributeError
will be raised if the set/deletion operation has no target.
pytestconfig
the pytest config object with access to command line opts.
recwarn
Return a WarningsRecorder instance that provides these methods:
* ``pop(category=None)``: return last warning matching the category.
* ``clear()``: clear list of warnings
See http://docs.python.org/library/warnings.html for information
on warning categories.
tmpdir_factory
Return a TempdirFactory instance for the test session.
tmpdir
return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a `py.path.local`_
path object.
no tests ran in 0.12 seconds

View file

@ -0,0 +1,278 @@
Cache: working with cross-testrun state
=======================================
.. versionadded:: 2.8
.. warning::
The functionality of this core plugin was previously distributed
as a third party plugin named ``pytest-cache``. The core plugin
is compatible regarding command line options and API usage except that you
can only store/receive data between test runs that is json-serializable.
Usage
---------
The plugin provides two command line options to rerun failures from the
last ``py.test`` invocation:
* ``--lf``, ``--last-failed`` - to only re-run the failures.
* ``--ff``, ``--failed-first`` - to run the failures first and then the rest of
the tests.
For cleanup (usually not needed), a ``--cache-clear`` option allows to remove
all cross-session cache contents ahead of a test run.
Other plugins may access the `config.cache`_ object to set/get
**json encodable** values between ``py.test`` invocations.
.. note::
This plugin is enabled by default, but can be disabled if needed: see
:ref:`cmdunregister` (the internal name for this plugin is
``cacheprovider``).
Rerunning only failures or failures first
-----------------------------------------------
First, let's create 50 test invocation of which only 2 fail::
# content of test_50.py
import pytest
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
pytest.fail("bad luck")
If you run this for the first time you will see two failures::
$ py.test -q
.................F.......F........................
======= FAILURES ========
_______ test_num[17] ________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_______ test_num[25] ________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
2 failed, 48 passed in 0.12 seconds
If you then run it with ``--lf``::
$ py.test --lf
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
run-last-failure: rerun last 2 failures
rootdir: $REGENDOC_TMPDIR, inifile:
collected 50 items
test_50.py FF
======= FAILURES ========
_______ test_num[17] ________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_______ test_num[25] ________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
======= 2 failed, 48 deselected in 0.12 seconds ========
You have run only the two failing test from the last run, while 48 tests have
not been run ("deselected").
Now, if you run with the ``--ff`` option, all tests will be run but the first
previous failures will be executed first (as can be seen from the series
of ``FF`` and dots)::
$ py.test --ff
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
run-last-failure: rerun last 2 failures first
rootdir: $REGENDOC_TMPDIR, inifile:
collected 50 items
test_50.py FF................................................
======= FAILURES ========
_______ test_num[17] ________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_______ test_num[25] ________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
======= 2 failed, 48 passed in 0.12 seconds ========
.. _`config.cache`:
The new config.cache object
--------------------------------
.. regendoc:wipe
Plugins or conftest.py support code can get a cached value using the
pytest ``config`` object. Here is a basic example plugin which
implements a :ref:`fixture` which re-uses previously created state
across py.test invocations::
# content of test_caching.py
import pytest
import time
@pytest.fixture
def mydata(request):
val = request.config.cache.get("example/value", None)
if val is None:
time.sleep(9*0.6) # expensive computation :)
val = 42
request.config.cache.set("example/value", val)
return val
def test_function(mydata):
assert mydata == 23
If you run this command once, it will take a while because
of the sleep::
$ py.test -q
F
======= FAILURES ========
_______ test_function ________
mydata = 42
def test_function(mydata):
> assert mydata == 23
E assert 42 == 23
test_caching.py:14: AssertionError
1 failed in 0.12 seconds
If you run it a second time the value will be retrieved from
the cache and this will be quick::
$ py.test -q
F
======= FAILURES ========
_______ test_function ________
mydata = 42
def test_function(mydata):
> assert mydata == 23
E assert 42 == 23
test_caching.py:14: AssertionError
1 failed in 0.12 seconds
See the `cache-api`_ for more details.
Inspecting Cache content
-------------------------------
You can always peek at the content of the cache using the
``--cache-clear`` command line option::
$ py.test --cache-clear
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 1 items
test_caching.py F
======= FAILURES ========
_______ test_function ________
mydata = 42
def test_function(mydata):
> assert mydata == 23
E assert 42 == 23
test_caching.py:14: AssertionError
======= 1 failed in 0.12 seconds ========
Clearing Cache content
-------------------------------
You can instruct pytest to clear all cache files and values
by adding the ``--cache-clear`` option like this::
py.test --cache-clear
This is recommended for invocations from Continous Integration
servers where isolation and correctness is more important
than speed.
.. _`cache-api`:
config.cache API
------------------
The ``config.cache`` object allows other plugins,
including ``conftest.py`` files,
to safely and flexibly store and retrieve values across
test runs because the ``config`` object is available
in many places.
Under the hood, the cache plugin uses the simple
dumps/loads API of the json stdlib module
.. currentmodule:: _pytest.cacheprovider
.. automethod:: Cache.get
.. automethod:: Cache.set
.. automethod:: Cache.makedir

View file

@ -0,0 +1,118 @@
.. _`captures`:
Capturing of the stdout/stderr output
=========================================================
Default stdout/stderr/stdin capturing behaviour
---------------------------------------------------------
During test execution any output sent to ``stdout`` and ``stderr`` is
captured. If a test or a setup method fails its according captured
output will usually be shown along with the failure traceback.
In addition, ``stdin`` is set to a "null" object which will
fail on attempts to read from it because it is rarely desired
to wait for interactive input when running automated tests.
By default capturing is done by intercepting writes to low level
file descriptors. This allows to capture output from simple
print statements as well as output from a subprocess started by
a test.
Setting capturing methods or disabling capturing
-------------------------------------------------
There are two ways in which ``pytest`` can perform capturing:
* file descriptor (FD) level capturing (default): All writes going to the
operating system file descriptors 1 and 2 will be captured.
* ``sys`` level capturing: Only writes to Python files ``sys.stdout``
and ``sys.stderr`` will be captured. No capturing of writes to
filedescriptors is performed.
.. _`disable capturing`:
You can influence output capturing mechanisms from the command line::
py.test -s # disable all capturing
py.test --capture=sys # replace sys.stdout/stderr with in-mem files
py.test --capture=fd # also point filedescriptors 1 and 2 to temp file
.. _printdebugging:
Using print statements for debugging
---------------------------------------------------
One primary benefit of the default capturing of stdout/stderr output
is that you can use print statements for debugging::
# content of test_module.py
def setup_function(function):
print ("setting up %s" % function)
def test_func1():
assert True
def test_func2():
assert False
and running this module will show you precisely the output
of the failing function and hide the other one::
$ py.test
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 2 items
test_module.py .F
======= FAILURES ========
_______ test_func2 ________
def test_func2():
> assert False
E assert False
test_module.py:9: AssertionError
-------------------------- Captured stdout setup ---------------------------
setting up <function test_func2 at 0xdeadbeef>
======= 1 failed, 1 passed in 0.12 seconds ========
Accessing captured output from a test function
---------------------------------------------------
The ``capsys`` and ``capfd`` fixtures allow to access stdout/stderr
output created during test execution. Here is an example test function
that performs some output related checks:
.. code-block:: python
def test_myoutput(capsys): # or use "capfd" for fd-level
print ("hello")
sys.stderr.write("world\n")
out, err = capsys.readouterr()
assert out == "hello\n"
assert err == "world\n"
print "next"
out, err = capsys.readouterr()
assert out == "next\n"
The ``readouterr()`` call snapshots the output so far -
and capturing will be continued. After the test
function finishes the original streams will
be restored. Using ``capsys`` this way frees your
test from having to care about setting/resetting
output streams and also interacts well with pytest's
own per-test capturing.
If you want to capture on filedescriptor level you can use
the ``capfd`` function argument which offers the exact
same interface but allows to also capture output from
libraries or subprocesses that directly write to operating
system level output streams (FD1 and FD2).
.. include:: links.inc

View file

@ -0,0 +1,7 @@
.. _changelog:
Changelog history
=================================
.. include:: ../../CHANGELOG.rst

View file

@ -0,0 +1,17 @@
import py
import subprocess
def test_build_docs(tmpdir):
doctrees = tmpdir.join("doctrees")
htmldir = tmpdir.join("html")
subprocess.check_call([
"sphinx-build", "-W", "-bhtml",
"-d", str(doctrees), ".", str(htmldir)])
def test_linkcheck(tmpdir):
doctrees = tmpdir.join("doctrees")
htmldir = tmpdir.join("html")
subprocess.check_call(
["sphinx-build", "-blinkcheck",
"-d", str(doctrees), ".", str(htmldir)])

View file

@ -0,0 +1,326 @@
# -*- coding: utf-8 -*-
#
# pytest documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 8 17:54:28 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
# The short X.Y version.
import os, sys
sys.path.insert(0, os.path.dirname(__file__))
import _getdoctarget
version = _getdoctarget.get_minor_version_string()
release = _getdoctarget.get_version_string()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
autodoc_member_order = "bysource"
todo_include_todos = 1
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.autosummary',
'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'pytest'
copyright = u'2015, holger krekel and pytest-dev team'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['links.inc', '_build', 'naming20.rst', 'test/*',
"old_*",
'*attic*',
'*/attic*',
'funcargs.rst',
'setup.rst',
'example/remoteinterp.rst',
]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
sys.path.append(os.path.abspath('_themes'))
html_theme_path = ['_themes']
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'flask'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'index_logo': None
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "pytest-%s" % release
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "img/pytest1.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "img/pytest1favi.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
#html_sidebars = {'index': 'indexsidebar.html'}
html_sidebars = {
'index': [
'sidebarintro.html',
'globaltoc.html',
'links.html',
'sourcelink.html',
'searchbox.html'
],
'**': [
'globaltoc.html',
'relations.html',
'links.html',
'sourcelink.html',
'searchbox.html'
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
#html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pytestdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'pytest.tex', u'pytest Documentation',
u'holger krekel, trainer and consultant, http://merlinux.eu', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'img/pytest1.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('usage', 'pytest', u'pytest usage',
[u'holger krekel at merlinux eu'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pytest'
epub_author = u'holger krekel at merlinux eu'
epub_publisher = u'holger krekel at merlinux eu'
epub_copyright = u'2013, holger krekel et alii'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# -- Options for texinfo output ------------------------------------------------
texinfo_documents = [
(master_doc, 'pytest', 'pytest Documentation',
('Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*'
'Floris Bruynooghe@*others'),
'pytest',
'simple powerful testing with Pytho',
'Programming',
1),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
# 'lib': ("http://docs.python.org/2.7library/", None),
}
def setup(app):
#from sphinx.ext.autodoc import cut_lines
#app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_description_unit('confval', 'confval',
objname='configuration value',
indextemplate='pair: %s; configuration value')

View file

@ -0,0 +1 @@
collect_ignore = ["conf.py"]

View file

@ -0,0 +1,51 @@
.. _`contact channels`:
.. _`contact`:
Contact channels
===================================
- `pytest issue tracker`_ to report bugs or suggest features (for version
2.0 and above).
- `pytest on stackoverflow.com <http://stackoverflow.com/search?q=pytest>`_
to post questions with the tag ``pytest``. New Questions will usually
be seen by pytest users or developers and answered quickly.
- `Testing In Python`_: a mailing list for Python testing tools and discussion.
- `pytest-dev at python.org (mailing list)`_ pytest specific announcements and discussions.
- `pytest-commit at python.org (mailing list)`_: for commits and new issues
- :doc:`contribution guide <contributing>` for help on submitting pull
requests to bitbucket (including using git via gitifyhg).
- #pylib on irc.freenode.net IRC channel for random questions.
- private mail to Holger.Krekel at gmail com if you want to communicate sensitive issues
- `merlinux.eu`_ offers pytest and tox-related professional teaching and
consulting.
.. _`pytest issue tracker`: https://github.com/pytest-dev/pytest/issues
.. _`old issue tracker`: http://bitbucket.org/hpk42/py-trunk/issues/
.. _`merlinux.eu`: http://merlinux.eu
.. _`get an account`:
.. _tetamap: http://tetamap.wordpress.com
.. _`@pylibcommit`: http://twitter.com/pylibcommit
.. _`Testing in Python`: http://lists.idyll.org/listinfo/testing-in-python
.. _FOAF: http://en.wikipedia.org/wiki/FOAF
.. _`py-dev`:
.. _`development mailing list`:
.. _`pytest-dev at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-dev
.. _`py-svn`:
.. _`pytest-commit at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-commit

View file

@ -0,0 +1,39 @@
.. _toc:
Full pytest documentation
===========================
`Download latest version as PDF <pytest.pdf>`_
.. `Download latest version as EPUB <http://media.readthedocs.org/epub/pytest/latest/pytest.epub>`_
.. toctree::
:maxdepth: 2
overview
apiref
example/index
monkeypatch
tmpdir
capture
recwarn
cache
plugins
contributing
talks
.. only:: html
.. toctree::
funcarg_compare
announce/index
.. only:: html
.. toctree::
:hidden:
changelog

View file

@ -0,0 +1,3 @@
.. _contributing:
.. include:: ../../CONTRIBUTING.rst

View file

@ -0,0 +1,228 @@
Basic test configuration
===================================
Command line options and configuration file settings
-----------------------------------------------------------------
You can get help on command line options and values in INI-style
configurations files by using the general help option::
py.test -h # prints options _and_ config file settings
This will display command line and configuration file settings
which were registered by installed plugins.
.. _rootdir:
.. _inifiles:
initialization: determining rootdir and inifile
-----------------------------------------------
.. versionadded:: 2.7
pytest determines a "rootdir" for each test run which depends on
the command line arguments (specified test files, paths) and on
the existence of inifiles. The determined rootdir and ini-file are
printed as part of the pytest header. The rootdir is used for constructing
"nodeids" during collection and may also be used by plugins to store
project/testrun-specific information.
Here is the algorithm which finds the rootdir from ``args``:
- determine the common ancestor directory for the specified ``args``.
- look for ``pytest.ini``, ``tox.ini`` and ``setup.cfg`` files in the
ancestor directory and upwards. If one is matched, it becomes the
ini-file and its directory becomes the rootdir. An existing
``pytest.ini`` file will always be considered a match whereas
``tox.ini`` and ``setup.cfg`` will only match if they contain
a ``[pytest]`` section.
- if no ini-file was found, look for ``setup.py`` upwards from
the common ancestor directory to determine the ``rootdir``.
- if no ini-file and no ``setup.py`` was found, use the already
determined common ancestor as root directory. This allows to
work with pytest in structures that are not part of a package
and don't have any particular ini-file configuration.
Note that options from multiple ini-files candidates are never merged,
the first one wins (``pytest.ini`` always wins even if it does not
contain a ``[pytest]`` section).
The ``config`` object will subsequently carry these attributes:
- ``config.rootdir``: the determined root directory, guaranteed to exist.
- ``config.inifile``: the determined ini-file, may be ``None``.
The rootdir is used a reference directory for constructing test
addresses ("nodeids") and can be used also by plugins for storing
per-testrun information.
Example::
py.test path/to/testdir path/other/
will determine the common ancestor as ``path`` and then
check for ini-files as follows::
# first look for pytest.ini files
path/pytest.ini
path/setup.cfg # must also contain [pytest] section to match
path/tox.ini # must also contain [pytest] section to match
pytest.ini
... # all the way down to the root
# now look for setup.py
path/setup.py
setup.py
... # all the way down to the root
.. _`how to change command line options defaults`:
.. _`adding default options`:
How to change command line options defaults
------------------------------------------------
It can be tedious to type the same series of command line options
every time you use ``pytest``. For example, if you always want to see
detailed info on skipped and xfailed tests, as well as have terser "dot"
progress output, you can write it into a configuration file:
.. code-block:: ini
# content of pytest.ini
# (or tox.ini or setup.cfg)
[pytest]
addopts = -rsxX -q
Alternatively, you can set a PYTEST_ADDOPTS environment variable to add command
line options while the environment is in use::
export PYTEST_ADDOPTS="-rsxX -q"
From now on, running ``pytest`` will add the specified options.
Builtin configuration file options
----------------------------------------------
.. confval:: minversion
Specifies a minimal pytest version required for running tests.
minversion = 2.1 # will fail if we run with pytest-2.0
.. confval:: addopts
Add the specified ``OPTS`` to the set of command line arguments as if they
had been specified by the user. Example: if you have this ini file content:
.. code-block:: ini
[pytest]
addopts = --maxfail=2 -rf # exit after 2 failures, report fail info
issuing ``py.test test_hello.py`` actually means::
py.test --maxfail=2 -rf test_hello.py
Default is to add no options.
.. confval:: norecursedirs
Set the directory basename patterns to avoid when recursing
for test discovery. The individual (fnmatch-style) patterns are
applied to the basename of a directory to decide if to recurse into it.
Pattern matching characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
Default patterns are ``'.*', 'CVS', '_darcs', '{arch}', '*.egg'``.
Setting a ``norecursedirs`` replaces the default. Here is an example of
how to avoid certain directories:
.. code-block:: ini
# content of setup.cfg
[pytest]
norecursedirs = .svn _build tmp*
This would tell ``pytest`` to not look into typical subversion or
sphinx-build directories or into any ``tmp`` prefixed directory.
.. confval:: testpaths
.. versionadded:: 2.8
Sets list of directories that should be searched for tests when
no specific directories, files or test ids are given in the command line when
executing pytest from the :ref:`rootdir <rootdir>` directory.
Useful when all project tests are in a known location to speed up
test collection and to avoid picking up undesired tests by accident.
.. code-block:: ini
# content of pytest.ini
[pytest]
testpaths = testing doc
This tells pytest to only look for tests in ``testing`` and ``doc``
directories when executing from the root directory.
.. confval:: python_files
One or more Glob-style file patterns determining which python files
are considered as test modules.
.. confval:: python_classes
One or more name prefixes or glob-style patterns determining which classes
are considered for test collection. Here is an example of how to collect
tests from classes that end in ``Suite``:
.. code-block:: ini
# content of pytest.ini
[pytest]
python_classes = *Suite
Note that ``unittest.TestCase`` derived classes are always collected
regardless of this option, as ``unittest``'s own collection framework is used
to collect those tests.
.. confval:: python_functions
One or more name prefixes or glob-patterns determining which test functions
and methods are considered tests. Here is an example of how
to collect test functions and methods that end in ``_test``:
.. code-block:: ini
# content of pytest.ini
[pytest]
python_functions = *_test
Note that this has no effect on methods that live on a ``unittest
.TestCase`` derived class, as ``unittest``'s own collection framework is used
to collect those tests.
See :ref:`change naming conventions` for more detailed examples.
.. confval:: doctest_optionflags
One or more doctest flag names from the standard ``doctest`` module.
:doc:`See how py.test handles doctests <doctest>`.
.. confval:: confcutdir
Sets a directory where search upwards for ``conftest.py`` files stops.
By default, pytest will stop searching for ``conftest.py`` files upwards
from ``pytest.ini``/``tox.ini``/``setup.cfg`` of the project if any,
or up to the file-system root.

View file

@ -0,0 +1,105 @@
Doctest integration for modules and test files
=========================================================
By default all files matching the ``test*.txt`` pattern will
be run through the python standard ``doctest`` module. You
can change the pattern by issuing::
py.test --doctest-glob='*.rst'
on the command line. Since version ``2.9``, ``--doctest-glob``
can be given multiple times in the command-line.
You can also trigger running of doctests
from docstrings in all python modules (including regular
python test modules)::
py.test --doctest-modules
You can make these changes permanent in your project by
putting them into a pytest.ini file like this:
.. code-block:: ini
# content of pytest.ini
[pytest]
addopts = --doctest-modules
If you then have a text file like this::
# content of example.rst
hello this is a doctest
>>> x = 3
>>> x
3
and another like this::
# content of mymodule.py
def something():
""" a doctest in a docstring
>>> something()
42
"""
return 42
then you can just invoke ``py.test`` without command line options::
$ py.test
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
collected 1 items
mymodule.py .
======= 1 passed in 0.12 seconds ========
It is possible to use fixtures using the ``getfixture`` helper::
# content of example.rst
>>> tmp = getfixture('tmpdir')
>>> ...
>>>
Also, :ref:`usefixtures` and :ref:`autouse` fixtures are supported
when executing text doctest files.
The standard ``doctest`` module provides some setting flags to configure the
strictness of doctest tests. In py.test You can enable those flags those flags
using the configuration file. To make pytest ignore trailing whitespaces and
ignore lengthy exception stack traces you can just write:
.. code-block:: ini
[pytest]
doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
py.test also introduces new options to allow doctests to run in Python 2 and
Python 3 unchanged:
* ``ALLOW_UNICODE``: when enabled, the ``u`` prefix is stripped from unicode
strings in expected doctest output.
* ``ALLOW_BYTES``: when enabled, the ``b`` prefix is stripped from byte strings
in expected doctest output.
As with any other option flag, these flags can be enabled in ``pytest.ini`` using
the ``doctest_optionflags`` ini option:
.. code-block:: ini
[pytest]
doctest_optionflags = ALLOW_UNICODE ALLOW_BYTES
Alternatively, it can be enabled by an inline comment in the doc test
itself::
# content of example.rst
>>> get_unicode_greeting() # doctest: +ALLOW_UNICODE
'Hello'

View file

@ -0,0 +1,238 @@
from pytest import raises
import _pytest._code
import py
def otherfunc(a,b):
assert a==b
def somefunc(x,y):
otherfunc(x,y)
def otherfunc_multi(a,b):
assert (a ==
b)
def test_generative(param1, param2):
assert param1 * 2 < param2
def pytest_generate_tests(metafunc):
if 'param1' in metafunc.fixturenames:
metafunc.addcall(funcargs=dict(param1=3, param2=6))
class TestFailing(object):
def test_simple(self):
def f():
return 42
def g():
return 43
assert f() == g()
def test_simple_multiline(self):
otherfunc_multi(
42,
6*9)
def test_not(self):
def f():
return 42
assert not f()
class TestSpecialisedExplanations(object):
def test_eq_text(self):
assert 'spam' == 'eggs'
def test_eq_similar_text(self):
assert 'foo 1 bar' == 'foo 2 bar'
def test_eq_multiline_text(self):
assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
def test_eq_long_text(self):
a = '1'*100 + 'a' + '2'*100
b = '1'*100 + 'b' + '2'*100
assert a == b
def test_eq_long_text_multiline(self):
a = '1\n'*100 + 'a' + '2\n'*100
b = '1\n'*100 + 'b' + '2\n'*100
assert a == b
def test_eq_list(self):
assert [0, 1, 2] == [0, 1, 3]
def test_eq_list_long(self):
a = [0]*100 + [1] + [3]*100
b = [0]*100 + [2] + [3]*100
assert a == b
def test_eq_dict(self):
assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
def test_eq_set(self):
assert set([0, 10, 11, 12]) == set([0, 20, 21])
def test_eq_longer_list(self):
assert [1,2] == [1,2,3]
def test_in_list(self):
assert 1 in [0, 2, 3, 4, 5]
def test_not_in_text_multiline(self):
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
assert 'foo' not in text
def test_not_in_text_single(self):
text = 'single foo line'
assert 'foo' not in text
def test_not_in_text_single_long(self):
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
assert 'foo' not in text
def test_not_in_text_single_long_term(self):
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
assert 'f'*70 not in text
def test_attribute():
class Foo(object):
b = 1
i = Foo()
assert i.b == 2
def test_attribute_instance():
class Foo(object):
b = 1
assert Foo().b == 2
def test_attribute_failure():
class Foo(object):
def _get_b(self):
raise Exception('Failed to get attrib')
b = property(_get_b)
i = Foo()
assert i.b == 2
def test_attribute_multiple():
class Foo(object):
b = 1
class Bar(object):
b = 2
assert Foo().b == Bar().b
def globf(x):
return x+1
class TestRaises:
def test_raises(self):
s = 'qwe'
raises(TypeError, "int(s)")
def test_raises_doesnt(self):
raises(IOError, "int('3')")
def test_raise(self):
raise ValueError("demo error")
def test_tupleerror(self):
a,b = [1]
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
l = [1,2,3]
print ("l is %r" % l)
a,b = l.pop()
def test_some_error(self):
if namenotexi:
pass
def func1(self):
assert 41 == 42
# thanks to Matthew Scott for this test
def test_dynamic_compile_shows_nicely():
src = 'def foo():\n assert 1 == 0\n'
name = 'abc-123'
module = py.std.imp.new_module(name)
code = _pytest._code.compile(src, name, 'exec')
py.builtin.exec_(code, module.__dict__)
py.std.sys.modules[name] = module
module.foo()
class TestMoreErrors:
def test_complex_error(self):
def f():
return 44
def g():
return 43
somefunc(f(), g())
def test_z1_unpack_error(self):
l = []
a,b = l
def test_z2_type_error(self):
l = 3
a,b = l
def test_startswith(self):
s = "123"
g = "456"
assert s.startswith(g)
def test_startswith_nested(self):
def f():
return "123"
def g():
return "456"
assert f().startswith(g())
def test_global_func(self):
assert isinstance(globf(42), float)
def test_instance(self):
self.x = 6*7
assert self.x != 42
def test_compare(self):
assert globf(10) < 5
def test_try_finally(self):
x = 1
try:
assert x == 0
finally:
x = 0
class TestCustomAssertMsg:
def test_single_line(self):
class A:
a = 1
b = 2
assert A.a == b, "A.a appears not to be b"
def test_multiline(self):
class A:
a = 1
b = 2
assert A.a == b, "A.a appears not to be b\n" \
"or does not appear to be b\none of those"
def test_custom_repr(self):
class JSON:
a = 1
def __repr__(self):
return "This is JSON\n{\n 'foo': 'bar'\n}"
a = JSON()
b = 2
assert a.a == b, a

View file

@ -0,0 +1,10 @@
import pytest, py
mydir = py.path.local(__file__).dirpath()
def pytest_runtest_setup(item):
if isinstance(item, pytest.Function):
if not item.fspath.relto(mydir):
return
mod = item.getparent(pytest.Module).obj
if hasattr(mod, 'hello'):
print ("mod.hello %r" % (mod.hello,))

View file

@ -0,0 +1,5 @@
hello = "world"
def test_func():
pass

View file

@ -0,0 +1,14 @@
import py
failure_demo = py.path.local(__file__).dirpath('failure_demo.py')
pytest_plugins = 'pytester',
def test_failure_demo_fails_properly(testdir):
target = testdir.tmpdir.join(failure_demo.basename)
failure_demo.copy(target)
failure_demo.copy(testdir.tmpdir.join(failure_demo.basename))
result = testdir.runpytest(target, syspathinsert=True)
result.stdout.fnmatch_lines([
"*42 failed*"
])
assert result.ret != 0

View file

@ -0,0 +1,42 @@
def setup_module(module):
module.TestStateFullThing.classcount = 0
class TestStateFullThing:
def setup_class(cls):
cls.classcount += 1
def teardown_class(cls):
cls.classcount -= 1
def setup_method(self, method):
self.id = eval(method.__name__[5:])
def test_42(self):
assert self.classcount == 1
assert self.id == 42
def test_23(self):
assert self.classcount == 1
assert self.id == 23
def teardown_module(module):
assert module.TestStateFullThing.classcount == 0
""" For this example the control flow happens as follows::
import test_setup_flow_example
setup_module(test_setup_flow_example)
setup_class(TestStateFullThing)
instance = TestStateFullThing()
setup_method(instance, instance.test_42)
instance.test_42()
setup_method(instance, instance.test_23)
instance.test_23()
teardown_class(TestStateFullThing)
teardown_module(test_setup_flow_example)
Note that ``setup_class(TestStateFullThing)`` is called and not
``TestStateFullThing.setup_class()`` which would require you
to insert ``setup_class = classmethod(setup_class)`` to make
your setup function callable.
"""

View file

@ -0,0 +1,79 @@
.. _`accept example`:
example: specifying and selecting acceptance tests
--------------------------------------------------------------
.. sourcecode:: python
# ./conftest.py
def pytest_option(parser):
group = parser.getgroup("myproject")
group.addoption("-A", dest="acceptance", action="store_true",
help="run (slow) acceptance tests")
def pytest_funcarg__accept(request):
return AcceptFixture(request)
class AcceptFixture:
def __init__(self, request):
if not request.config.option.acceptance:
pytest.skip("specify -A to run acceptance tests")
self.tmpdir = request.config.mktemp(request.function.__name__, numbered=True)
def run(self, cmd):
""" called by test code to execute an acceptance test. """
self.tmpdir.chdir()
return py.process.cmdexec(cmd)
and the actual test function example:
.. sourcecode:: python
def test_some_acceptance_aspect(accept):
accept.tmpdir.mkdir("somesub")
result = accept.run("ls -la")
assert "somesub" in result
If you run this test without specifying a command line option
the test will get skipped with an appropriate message. Otherwise
you can start to add convenience and test support methods
to your AcceptFixture and drive running of tools or
applications and provide ways to do assertions about
the output.
.. _`decorate a funcarg`:
example: decorating a funcarg in a test module
--------------------------------------------------------------
For larger scale setups it's sometimes useful to decorate
a funcarg just for a particular test module. We can
extend the `accept example`_ by putting this in our test module:
.. sourcecode:: python
def pytest_funcarg__accept(request):
# call the next factory (living in our conftest.py)
arg = request.getfuncargvalue("accept")
# create a special layout in our tempdir
arg.tmpdir.mkdir("special")
return arg
class TestSpecialAcceptance:
def test_sometest(self, accept):
assert accept.tmpdir.join("special").check()
Our module level factory will be invoked first and it can
ask its request object to call the next factory and then
decorate its result. This mechanism allows us to stay
ignorant of how/where the function argument is provided -
in our example from a `conftest plugin`_.
sidenote: the temporary directory used here are instances of
the `py.path.local`_ class which provides many of the os.path
methods in a convenient way.
.. _`py.path.local`: ../path.html#local
.. _`conftest plugin`: customize.html#conftestplugin

View file

@ -0,0 +1 @@
collect_ignore = ["nonpython"]

View file

@ -0,0 +1,18 @@
import pytest
@pytest.fixture("session")
def setup(request):
setup = CostlySetup()
request.addfinalizer(setup.finalize)
return setup
class CostlySetup:
def __init__(self):
import time
print ("performing costly setup")
time.sleep(5)
self.timecostly = 1
def finalize(self):
del self.timecostly

View file

@ -0,0 +1,3 @@
def test_quick(setup):
pass

View file

@ -0,0 +1,6 @@
def test_something(setup):
assert setup.timecostly == 1
def test_something_more(setup):
assert setup.timecostly == 1

View file

@ -0,0 +1,34 @@
.. _examples:
Usages and Examples
===========================================
Here is a (growing) list of examples. :ref:`Contact <contact>` us if you
need more examples or have questions. Also take a look at the
:ref:`comprehensive documentation <toc>` which contains many example
snippets as well. Also, `pytest on stackoverflow.com
<http://stackoverflow.com/search?q=pytest>`_ often comes with example
answers.
For basic examples, see
- :doc:`../getting-started` for basic introductory examples
- :ref:`assert` for basic assertion examples
- :ref:`fixtures` for basic fixture/setup examples
- :ref:`parametrize` for basic test function parametrization
- :doc:`../unittest` for basic unittest integration
- :doc:`../nose` for basic nosetests integration
The following examples aim at various use cases you might encounter.
.. toctree::
:maxdepth: 2
reportingdemo
simple
parametrize
markers
special
pythoncollection
nonpython

View file

@ -0,0 +1,4 @@
[pytest]
testfilepatterns =
${topdir}/tests/unit/test_${basename}
${topdir}/tests/functional/*.py

View file

@ -0,0 +1,592 @@
.. _`mark examples`:
Working with custom markers
=================================================
Here are some example using the :ref:`mark` mechanism.
Marking test functions and selecting them for a run
----------------------------------------------------
You can "mark" a test function with custom metadata like this::
# content of test_server.py
import pytest
@pytest.mark.webtest
def test_send_http():
pass # perform some webtest test for your app
def test_something_quick():
pass
def test_another():
pass
class TestClass:
def test_method(self):
pass
.. versionadded:: 2.2
You can then restrict a test run to only run tests marked with ``webtest``::
$ py.test -v -m webtest
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
cachedir: .cache
rootdir: $REGENDOC_TMPDIR, inifile:
collecting ... collected 4 items
test_server.py::test_send_http PASSED
======= 3 tests deselected by "-m 'webtest'" ========
======= 1 passed, 3 deselected in 0.12 seconds ========
Or the inverse, running all tests except the webtest ones::
$ py.test -v -m "not webtest"
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
cachedir: .cache
rootdir: $REGENDOC_TMPDIR, inifile:
collecting ... collected 4 items
test_server.py::test_something_quick PASSED
test_server.py::test_another PASSED
test_server.py::TestClass::test_method PASSED
======= 1 tests deselected by "-m 'not webtest'" ========
======= 3 passed, 1 deselected in 0.12 seconds ========
Selecting tests based on their node ID
--------------------------------------
You can provide one or more :ref:`node IDs <node-id>` as positional
arguments to select only specified tests. This makes it easy to select
tests based on their module, class, method, or function name::
$ py.test -v test_server.py::TestClass::test_method
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
cachedir: .cache
rootdir: $REGENDOC_TMPDIR, inifile:
collecting ... collected 5 items
test_server.py::TestClass::test_method PASSED
======= 1 passed in 0.12 seconds ========
You can also select on the class::
$ py.test -v test_server.py::TestClass
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
cachedir: .cache
rootdir: $REGENDOC_TMPDIR, inifile:
collecting ... collected 4 items
test_server.py::TestClass::test_method PASSED
======= 1 passed in 0.12 seconds ========
Or select multiple nodes::
$ py.test -v test_server.py::TestClass test_server.py::test_send_http
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
cachedir: .cache
rootdir: $REGENDOC_TMPDIR, inifile:
collecting ... collected 8 items
test_server.py::TestClass::test_method PASSED
test_server.py::test_send_http PASSED
======= 2 passed in 0.12 seconds ========
.. _node-id:
.. note::
Node IDs are of the form ``module.py::class::method`` or
``module.py::function``. Node IDs control which tests are
collected, so ``module.py::class`` will select all test methods
on the class. Nodes are also created for each parameter of a
parametrized fixture or test, so selecting a parametrized test
must include the parameter value, e.g.
``module.py::function[param]``.
Node IDs for failing tests are displayed in the test summary info
when running py.test with the ``-rf`` option. You can also
construct Node IDs from the output of ``py.test --collectonly``.
Using ``-k expr`` to select tests based on their name
-------------------------------------------------------
.. versionadded: 2.0/2.3.4
You can use the ``-k`` command line option to specify an expression
which implements a substring match on the test names instead of the
exact match on markers that ``-m`` provides. This makes it easy to
select tests based on their names::
$ py.test -v -k http # running with the above defined example module
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
cachedir: .cache
rootdir: $REGENDOC_TMPDIR, inifile:
collecting ... collected 4 items
test_server.py::test_send_http PASSED
======= 3 tests deselected by '-khttp' ========
======= 1 passed, 3 deselected in 0.12 seconds ========
And you can also run all tests except the ones that match the keyword::
$ py.test -k "not send_http" -v
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
cachedir: .cache
rootdir: $REGENDOC_TMPDIR, inifile:
collecting ... collected 4 items
test_server.py::test_something_quick PASSED
test_server.py::test_another PASSED
test_server.py::TestClass::test_method PASSED
======= 1 tests deselected by '-knot send_http' ========
======= 3 passed, 1 deselected in 0.12 seconds ========
Or to select "http" and "quick" tests::
$ py.test -k "http or quick" -v
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
cachedir: .cache
rootdir: $REGENDOC_TMPDIR, inifile:
collecting ... collected 4 items
test_server.py::test_send_http PASSED
test_server.py::test_something_quick PASSED
======= 2 tests deselected by '-khttp or quick' ========
======= 2 passed, 2 deselected in 0.12 seconds ========
.. note::
If you are using expressions such as "X and Y" then both X and Y
need to be simple non-keyword names. For example, "pass" or "from"
will result in SyntaxErrors because "-k" evaluates the expression.
However, if the "-k" argument is a simple string, no such restrictions
apply. Also "-k 'not STRING'" has no restrictions. You can also
specify numbers like "-k 1.3" to match tests which are parametrized
with the float "1.3".
Registering markers
-------------------------------------
.. versionadded:: 2.2
.. ini-syntax for custom markers:
Registering markers for your test suite is simple::
# content of pytest.ini
[pytest]
markers =
webtest: mark a test as a webtest.
You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers::
$ py.test --markers
@pytest.mark.webtest: mark a test as a webtest.
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
@pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
@pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
For an example on how to add and work with markers from a plugin, see
:ref:`adding a custom marker from a plugin`.
.. note::
It is recommended to explicitly register markers so that:
* there is one place in your test suite defining your markers
* asking for existing markers via ``py.test --markers`` gives good output
* typos in function markers are treated as an error if you use
the ``--strict`` option. Future versions of ``pytest`` are probably
going to start treating non-registered markers as errors at some point.
.. _`scoped-marking`:
Marking whole classes or modules
----------------------------------------------------
You may use ``pytest.mark`` decorators with classes to apply markers to all of
its test methods::
# content of test_mark_classlevel.py
import pytest
@pytest.mark.webtest
class TestClass:
def test_startup(self):
pass
def test_startup_and_more(self):
pass
This is equivalent to directly applying the decorator to the
two test functions.
To remain backward-compatible with Python 2.4 you can also set a
``pytestmark`` attribute on a TestClass like this::
import pytest
class TestClass:
pytestmark = pytest.mark.webtest
or if you need to use multiple markers you can use a list::
import pytest
class TestClass:
pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
You can also set a module level marker::
import pytest
pytestmark = pytest.mark.webtest
in which case it will be applied to all functions and
methods defined in the module.
.. _`marking individual tests when using parametrize`:
Marking individual tests when using parametrize
-----------------------------------------------
When using parametrize, applying a mark will make it apply
to each individual test. However it is also possible to
apply a marker to an individual test instance::
import pytest
@pytest.mark.foo
@pytest.mark.parametrize(("n", "expected"), [
(1, 2),
pytest.mark.bar((1, 3)),
(2, 3),
])
def test_increment(n, expected):
assert n + 1 == expected
In this example the mark "foo" will apply to each of the three
tests, whereas the "bar" mark is only applied to the second test.
Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail with parametrize`.
.. note::
If the data you are parametrizing happen to be single callables, you need to be careful
when marking these items. `pytest.mark.xfail(my_func)` won't work because it's also the
signature of a function being decorated. To resolve this ambiguity, you need to pass a
reason argument:
`pytest.mark.xfail(func_bar, reason="Issue#7")`.
.. _`adding a custom marker from a plugin`:
Custom marker and command line option to control test runs
----------------------------------------------------------
.. regendoc:wipe
Plugins can provide custom markers and implement specific behaviour
based on it. This is a self-contained example which adds a command
line option and a parametrized test function marker to run tests
specifies via named environments::
# content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption("-E", action="store", metavar="NAME",
help="only run tests matching the environment NAME.")
def pytest_configure(config):
# register an additional marker
config.addinivalue_line("markers",
"env(name): mark test to run only on named environment")
def pytest_runtest_setup(item):
envmarker = item.get_marker("env")
if envmarker is not None:
envname = envmarker.args[0]
if envname != item.config.getoption("-E"):
pytest.skip("test requires env %r" % envname)
A test file using this local plugin::
# content of test_someenv.py
import pytest
@pytest.mark.env("stage1")
def test_basic_db_operation():
pass
and an example invocations specifying a different environment than what
the test needs::
$ py.test -E stage2
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 1 items
test_someenv.py s
======= 1 skipped in 0.12 seconds ========
and here is one that specifies exactly the environment needed::
$ py.test -E stage1
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 1 items
test_someenv.py .
======= 1 passed in 0.12 seconds ========
The ``--markers`` option always gives you a list of available markers::
$ py.test --markers
@pytest.mark.env(name): mark test to run only on named environment
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
@pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
@pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
Reading markers which were set from multiple places
----------------------------------------------------
.. versionadded: 2.2.2
.. regendoc:wipe
If you are heavily using markers in your test suite you may encounter the case where a marker is applied several times to a test function. From plugin
code you can read over all such settings. Example::
# content of test_mark_three_times.py
import pytest
pytestmark = pytest.mark.glob("module", x=1)
@pytest.mark.glob("class", x=2)
class TestClass:
@pytest.mark.glob("function", x=3)
def test_something(self):
pass
Here we have the marker "glob" applied three times to the same
test function. From a conftest file we can read it like this::
# content of conftest.py
import sys
def pytest_runtest_setup(item):
g = item.get_marker("glob")
if g is not None:
for info in g:
print ("glob args=%s kwargs=%s" %(info.args, info.kwargs))
sys.stdout.flush()
Let's run this without capturing output and see what we get::
$ py.test -q -s
glob args=('function',) kwargs={'x': 3}
glob args=('class',) kwargs={'x': 2}
glob args=('module',) kwargs={'x': 1}
.
1 passed in 0.12 seconds
marking platform specific tests with pytest
--------------------------------------------------------------
.. regendoc:wipe
Consider you have a test suite which marks tests for particular platforms,
namely ``pytest.mark.darwin``, ``pytest.mark.win32`` etc. and you
also have tests that run on all platforms and have no specific
marker. If you now want to have a way to only run the tests
for your particular platform, you could use the following plugin::
# content of conftest.py
#
import sys
import pytest
ALL = set("darwin linux2 win32".split())
def pytest_runtest_setup(item):
if isinstance(item, item.Function):
plat = sys.platform
if not item.get_marker(plat):
if ALL.intersection(item.keywords):
pytest.skip("cannot run on platform %s" %(plat))
then tests will be skipped if they were specified for a different platform.
Let's do a little test file to show how this looks like::
# content of test_plat.py
import pytest
@pytest.mark.darwin
def test_if_apple_is_evil():
pass
@pytest.mark.linux2
def test_if_linux_works():
pass
@pytest.mark.win32
def test_if_win32_crashes():
pass
def test_runs_everywhere():
pass
then you will see two test skipped and two executed tests as expected::
$ py.test -rs # this option reports skip reasons
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 4 items
test_plat.py sss.
======= short test summary info ========
SKIP [3] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux
======= 1 passed, 3 skipped in 0.12 seconds ========
Note that if you specify a platform via the marker-command line option like this::
$ py.test -m linux2
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 4 items
test_plat.py s
======= 3 tests deselected by "-m 'linux2'" ========
======= 1 skipped, 3 deselected in 0.12 seconds ========
then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
Automatically adding markers based on test names
--------------------------------------------------------
.. regendoc:wipe
If you a test suite where test function names indicate a certain
type of test, you can implement a hook that automatically defines
markers so that you can use the ``-m`` option with it. Let's look
at this test module::
# content of test_module.py
def test_interface_simple():
assert 0
def test_interface_complex():
assert 0
def test_event_simple():
assert 0
def test_something_else():
assert 0
We want to dynamically define two markers and can do it in a
``conftest.py`` plugin::
# content of conftest.py
import pytest
def pytest_collection_modifyitems(items):
for item in items:
if "interface" in item.nodeid:
item.add_marker(pytest.mark.interface)
elif "event" in item.nodeid:
item.add_marker(pytest.mark.event)
We can now use the ``-m option`` to select one set::
$ py.test -m interface --tb=short
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 4 items
test_module.py FF
======= FAILURES ========
_______ test_interface_simple ________
test_module.py:3: in test_interface_simple
assert 0
E assert 0
_______ test_interface_complex ________
test_module.py:6: in test_interface_complex
assert 0
E assert 0
======= 2 tests deselected by "-m 'interface'" ========
======= 2 failed, 2 deselected in 0.12 seconds ========
or to select both "event" and "interface" tests::
$ py.test -m "interface or event" --tb=short
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 4 items
test_module.py FFF
======= FAILURES ========
_______ test_interface_simple ________
test_module.py:3: in test_interface_simple
assert 0
E assert 0
_______ test_interface_complex ________
test_module.py:6: in test_interface_complex
assert 0
E assert 0
_______ test_event_simple ________
test_module.py:9: in test_event_simple
assert 0
E assert 0
======= 1 tests deselected by "-m 'interface or event'" ========
======= 3 failed, 1 deselected in 0.12 seconds ========

View file

@ -0,0 +1,52 @@
"""
module containing a parametrized tests testing cross-python
serialization via the pickle module.
"""
import py
import pytest
import _pytest._code
pythonlist = ['python2.6', 'python2.7', 'python3.3']
@pytest.fixture(params=pythonlist)
def python1(request, tmpdir):
picklefile = tmpdir.join("data.pickle")
return Python(request.param, picklefile)
@pytest.fixture(params=pythonlist)
def python2(request, python1):
return Python(request.param, python1.picklefile)
class Python:
def __init__(self, version, picklefile):
self.pythonpath = py.path.local.sysfind(version)
if not self.pythonpath:
pytest.skip("%r not found" %(version,))
self.picklefile = picklefile
def dumps(self, obj):
dumpfile = self.picklefile.dirpath("dump.py")
dumpfile.write(_pytest._code.Source("""
import pickle
f = open(%r, 'wb')
s = pickle.dump(%r, f, protocol=2)
f.close()
""" % (str(self.picklefile), obj)))
py.process.cmdexec("%s %s" %(self.pythonpath, dumpfile))
def load_and_is_true(self, expression):
loadfile = self.picklefile.dirpath("load.py")
loadfile.write(_pytest._code.Source("""
import pickle
f = open(%r, 'rb')
obj = pickle.load(f)
f.close()
res = eval(%r)
if not res:
raise SystemExit(1)
""" % (str(self.picklefile), expression)))
print (loadfile)
py.process.cmdexec("%s %s" %(self.pythonpath, loadfile))
@pytest.mark.parametrize("obj", [42, {}, {1:3},])
def test_basic_objects(python1, python2, obj):
python1.dumps(obj)
python2.load_and_is_true("obj == %s" % obj)

View file

@ -0,0 +1,91 @@
.. _`non-python tests`:
Working with non-python tests
====================================================
.. _`yaml plugin`:
A basic example for specifying tests in Yaml files
--------------------------------------------------------------
.. _`pytest-yamlwsgi`: http://bitbucket.org/aafshar/pytest-yamlwsgi/src/tip/pytest_yamlwsgi.py
.. _`PyYAML`: http://pypi.python.org/pypi/PyYAML/
Here is an example ``conftest.py`` (extracted from Ali Afshnars special purpose `pytest-yamlwsgi`_ plugin). This ``conftest.py`` will collect ``test*.yml`` files and will execute the yaml-formatted content as custom tests:
.. include:: nonpython/conftest.py
:literal:
You can create a simple example file:
.. include:: nonpython/test_simple.yml
:literal:
and if you installed `PyYAML`_ or a compatible YAML-parser you can
now execute the test specification::
nonpython $ py.test test_simple.yml
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
collected 2 items
test_simple.yml F.
======= FAILURES ========
_______ usecase: hello ________
usecase execution failed
spec failed: 'some': 'other'
no further details known at this point.
======= 1 failed, 1 passed in 0.12 seconds ========
.. regendoc:wipe
You get one dot for the passing ``sub1: sub1`` check and one failure.
Obviously in the above ``conftest.py`` you'll want to implement a more
interesting interpretation of the yaml-values. You can easily write
your own domain specific testing language this way.
.. note::
``repr_failure(excinfo)`` is called for representing test failures.
If you create custom collection nodes you can return an error
representation string of your choice. It
will be reported as a (red) string.
``reportinfo()`` is used for representing the test location and is also
consulted when reporting in ``verbose`` mode::
nonpython $ py.test -v
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
cachedir: .cache
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
collecting ... collected 2 items
test_simple.yml::hello FAILED
test_simple.yml::ok PASSED
======= FAILURES ========
_______ usecase: hello ________
usecase execution failed
spec failed: 'some': 'other'
no further details known at this point.
======= 1 failed, 1 passed in 0.12 seconds ========
.. regendoc:wipe
While developing your custom test collection and execution it's also
interesting to just look at the collection tree::
nonpython $ py.test --collect-only
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
collected 2 items
<YamlFile 'test_simple.yml'>
<YamlItem 'hello'>
<YamlItem 'ok'>
======= no tests ran in 0.12 seconds ========

View file

@ -0,0 +1,40 @@
# content of conftest.py
import pytest
def pytest_collect_file(parent, path):
if path.ext == ".yml" and path.basename.startswith("test"):
return YamlFile(path, parent)
class YamlFile(pytest.File):
def collect(self):
import yaml # we need a yaml parser, e.g. PyYAML
raw = yaml.safe_load(self.fspath.open())
for name, spec in raw.items():
yield YamlItem(name, self, spec)
class YamlItem(pytest.Item):
def __init__(self, name, parent, spec):
super(YamlItem, self).__init__(name, parent)
self.spec = spec
def runtest(self):
for name, value in self.spec.items():
# some custom test execution (dumb example follows)
if name != value:
raise YamlException(self, name, value)
def repr_failure(self, excinfo):
""" called when self.runtest() raises an exception. """
if isinstance(excinfo.value, YamlException):
return "\n".join([
"usecase execution failed",
" spec failed: %r: %r" % excinfo.value.args[1:3],
" no further details known at this point."
])
def reportinfo(self):
return self.fspath, 0, "usecase: %s" % self.name
class YamlException(Exception):
""" custom exception for error reporting. """

View file

@ -0,0 +1,7 @@
# test_simple.yml
ok:
sub1: sub1
hello:
world: world
some: other

View file

@ -0,0 +1,475 @@
.. _paramexamples:
Parametrizing tests
=================================================
.. currentmodule:: _pytest.python
``pytest`` allows to easily parametrize test functions.
For basic docs, see :ref:`parametrize-basics`.
In the following we provide some examples using
the builtin mechanisms.
Generating parameters combinations, depending on command line
----------------------------------------------------------------------------
.. regendoc:wipe
Let's say we want to execute a test with different computation
parameters and the parameter range shall be determined by a command
line argument. Let's first write a simple (do-nothing) computation test::
# content of test_compute.py
def test_compute(param1):
assert param1 < 4
Now we add a test configuration like this::
# content of conftest.py
def pytest_addoption(parser):
parser.addoption("--all", action="store_true",
help="run all combinations")
def pytest_generate_tests(metafunc):
if 'param1' in metafunc.fixturenames:
if metafunc.config.option.all:
end = 5
else:
end = 2
metafunc.parametrize("param1", range(end))
This means that we only run 2 tests if we do not pass ``--all``::
$ py.test -q test_compute.py
..
2 passed in 0.12 seconds
We run only two computations, so we see two dots.
let's run the full monty::
$ py.test -q --all
....F
======= FAILURES ========
_______ test_compute[4] ________
param1 = 4
def test_compute(param1):
> assert param1 < 4
E assert 4 < 4
test_compute.py:3: AssertionError
1 failed, 4 passed in 0.12 seconds
As expected when running the full range of ``param1`` values
we'll get an error on the last one.
Different options for test IDs
------------------------------------
pytest will build a string that is the test ID for each set of values in a
parametrized test. These IDs can be used with ``-k`` to select specific cases
to run, and they will also identify the specific case when one is failing.
Running pytest with ``--collect-only`` will show the generated IDs.
Numbers, strings, booleans and None will have their usual string representation
used in the test ID. For other objects, pytest will make a string based on
the argument name::
# content of test_time.py
import pytest
from datetime import datetime, timedelta
testdata = [
(datetime(2001, 12, 12), datetime(2001, 12, 11), timedelta(1)),
(datetime(2001, 12, 11), datetime(2001, 12, 12), timedelta(-1)),
]
@pytest.mark.parametrize("a,b,expected", testdata)
def test_timedistance_v0(a, b, expected):
diff = a - b
assert diff == expected
@pytest.mark.parametrize("a,b,expected", testdata, ids=["forward", "backward"])
def test_timedistance_v1(a, b, expected):
diff = a - b
assert diff == expected
def idfn(val):
if isinstance(val, (datetime,)):
# note this wouldn't show any hours/minutes/seconds
return val.strftime('%Y%m%d')
@pytest.mark.parametrize("a,b,expected", testdata, ids=idfn)
def test_timedistance_v2(a, b, expected):
diff = a - b
assert diff == expected
In ``test_timedistance_v0``, we let pytest generate the test IDs.
In ``test_timedistance_v1``, we specified ``ids`` as a list of strings which were
used as the test IDs. These are succinct, but can be a pain to maintain.
In ``test_timedistance_v2``, we specified ``ids`` as a function that can generate a
string representation to make part of the test ID. So our ``datetime`` values use the
label generated by ``idfn``, but because we didn't generate a label for ``timedelta``
objects, they are still using the default pytest representation::
$ py.test test_time.py --collect-only
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 6 items
<Module 'test_time.py'>
<Function 'test_timedistance_v0[a0-b0-expected0]'>
<Function 'test_timedistance_v0[a1-b1-expected1]'>
<Function 'test_timedistance_v1[forward]'>
<Function 'test_timedistance_v1[backward]'>
<Function 'test_timedistance_v2[20011212-20011211-expected0]'>
<Function 'test_timedistance_v2[20011211-20011212-expected1]'>
======= no tests ran in 0.12 seconds ========
A quick port of "testscenarios"
------------------------------------
.. _`test scenarios`: http://pypi.python.org/pypi/testscenarios/
Here is a quick port to run tests configured with `test scenarios`_,
an add-on from Robert Collins for the standard unittest framework. We
only have to work a bit to construct the correct arguments for pytest's
:py:func:`Metafunc.parametrize`::
# content of test_scenarios.py
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for scenario in metafunc.cls.scenarios:
idlist.append(scenario[0])
items = scenario[1].items()
argnames = [x[0] for x in items]
argvalues.append(([x[1] for x in items]))
metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
scenario1 = ('basic', {'attribute': 'value'})
scenario2 = ('advanced', {'attribute': 'value2'})
class TestSampleWithScenarios:
scenarios = [scenario1, scenario2]
def test_demo1(self, attribute):
assert isinstance(attribute, str)
def test_demo2(self, attribute):
assert isinstance(attribute, str)
this is a fully self-contained example which you can run with::
$ py.test test_scenarios.py
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 4 items
test_scenarios.py ....
======= 4 passed in 0.12 seconds ========
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
$ py.test --collect-only test_scenarios.py
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 4 items
<Module 'test_scenarios.py'>
<Class 'TestSampleWithScenarios'>
<Instance '()'>
<Function 'test_demo1[basic]'>
<Function 'test_demo2[basic]'>
<Function 'test_demo1[advanced]'>
<Function 'test_demo2[advanced]'>
======= no tests ran in 0.12 seconds ========
Note that we told ``metafunc.parametrize()`` that your scenario values
should be considered class-scoped. With pytest-2.3 this leads to a
resource-based ordering.
Deferring the setup of parametrized resources
---------------------------------------------------
.. regendoc:wipe
The parametrization of test functions happens at collection
time. It is a good idea to setup expensive resources like DB
connections or subprocess only when the actual test is run.
Here is a simple example how you can achieve that, first
the actual test requiring a ``db`` object::
# content of test_backends.py
import pytest
def test_db_initialized(db):
# a dummy test
if db.__class__.__name__ == "DB2":
pytest.fail("deliberately failing for demo purposes")
We can now add a test configuration that generates two invocations of
the ``test_db_initialized`` function and also implements a factory that
creates a database object for the actual test invocations::
# content of conftest.py
import pytest
def pytest_generate_tests(metafunc):
if 'db' in metafunc.fixturenames:
metafunc.parametrize("db", ['d1', 'd2'], indirect=True)
class DB1:
"one database object"
class DB2:
"alternative database object"
@pytest.fixture
def db(request):
if request.param == "d1":
return DB1()
elif request.param == "d2":
return DB2()
else:
raise ValueError("invalid internal test config")
Let's first see how it looks like at collection time::
$ py.test test_backends.py --collect-only
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 2 items
<Module 'test_backends.py'>
<Function 'test_db_initialized[d1]'>
<Function 'test_db_initialized[d2]'>
======= no tests ran in 0.12 seconds ========
And then when we run the test::
$ py.test -q test_backends.py
.F
======= FAILURES ========
_______ test_db_initialized[d2] ________
db = <conftest.DB2 object at 0xdeadbeef>
def test_db_initialized(db):
# a dummy test
if db.__class__.__name__ == "DB2":
> pytest.fail("deliberately failing for demo purposes")
E Failed: deliberately failing for demo purposes
test_backends.py:6: Failed
1 failed, 1 passed in 0.12 seconds
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
.. regendoc:wipe
Apply indirect on particular arguments
---------------------------------------------------
Very often parametrization uses more than one argument name. There is opportunity to apply ``indirect``
parameter on particular arguments. It can be done by passing list or tuple of
arguments' names to ``indirect``. In the example below there is a function ``test_indirect`` which uses
two fixtures: ``x`` and ``y``. Here we give to indirect the list, which contains the name of the
fixture ``x``. The indirect parameter will be applied to this argument only, and the value ``a``
will be passed to respective fixture function::
# content of test_indirect_list.py
import pytest
@pytest.fixture(scope='function')
def x(request):
return request.param * 3
@pytest.fixture(scope='function')
def y(request):
return request.param * 2
@pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x'])
def test_indirect(x,y):
assert x == 'aaa'
assert y == 'b'
The result of this test will be successful::
$ py.test test_indirect_list.py --collect-only
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 1 items
<Module 'test_indirect_list.py'>
<Function 'test_indirect[a-b]'>
======= no tests ran in 0.12 seconds ========
.. regendoc:wipe
Parametrizing test methods through per-class configuration
--------------------------------------------------------------
.. _`unittest parametrizer`: https://github.com/testing-cabal/unittest-ext/blob/master/params.py
Here is an example ``pytest_generate_function`` function implementing a
parametrization scheme similar to Michael Foord's `unittest
parametrizer`_ but in a lot less code::
# content of ./test_parametrize.py
import pytest
def pytest_generate_tests(metafunc):
# called once per each test function
funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = list(funcarglist[0])
metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]
for funcargs in funcarglist])
class TestClass:
# a map specifying multiple argument sets for a test method
params = {
'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ],
'test_zerodivision': [dict(a=1, b=0), ],
}
def test_equals(self, a, b):
assert a == b
def test_zerodivision(self, a, b):
pytest.raises(ZeroDivisionError, "a/b")
Our test generator looks up a class-level definition which specifies which
argument sets to use for each test function. Let's run it::
$ py.test -q
F..
======= FAILURES ========
_______ TestClass.test_equals[1-2] ________
self = <test_parametrize.TestClass object at 0xdeadbeef>, a = 1, b = 2
def test_equals(self, a, b):
> assert a == b
E assert 1 == 2
test_parametrize.py:18: AssertionError
1 failed, 2 passed in 0.12 seconds
Indirect parametrization with multiple fixtures
--------------------------------------------------------------
Here is a stripped down real-life example of using parametrized
testing for testing serialization of objects between different python
interpreters. We define a ``test_basic_objects`` function which
is to be run with different sets of arguments for its three arguments:
* ``python1``: first python interpreter, run to pickle-dump an object to a file
* ``python2``: second interpreter, run to pickle-load an object from a file
* ``obj``: object to be dumped/loaded
.. literalinclude:: multipython.py
Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize)::
. $ py.test -rs -q multipython.py
ssssssssssss...ssssssssssss
======= short test summary info ========
SKIP [12] $REGENDOC_TMPDIR/CWD/multipython.py:23: 'python3.3' not found
SKIP [12] $REGENDOC_TMPDIR/CWD/multipython.py:23: 'python2.6' not found
3 passed, 24 skipped in 0.12 seconds
Indirect parametrization of optional implementations/imports
--------------------------------------------------------------------
If you want to compare the outcomes of several implementations of a given
API, you can write test functions that receive the already imported implementations
and get skipped in case the implementation is not importable/available. Let's
say we have a "base" implementation and the other (possibly optimized ones)
need to provide similar results::
# content of conftest.py
import pytest
@pytest.fixture(scope="session")
def basemod(request):
return pytest.importorskip("base")
@pytest.fixture(scope="session", params=["opt1", "opt2"])
def optmod(request):
return pytest.importorskip(request.param)
And then a base implementation of a simple function::
# content of base.py
def func1():
return 1
And an optimized version::
# content of opt1.py
def func1():
return 1.0001
And finally a little test module::
# content of test_module.py
def test_func1(basemod, optmod):
assert round(basemod.func1(), 3) == round(optmod.func1(), 3)
If you run this with reporting for skips enabled::
$ py.test -rs test_module.py
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 2 items
test_module.py .s
======= short test summary info ========
SKIP [1] $REGENDOC_TMPDIR/conftest.py:10: could not import 'opt2'
======= 1 passed, 1 skipped in 0.12 seconds ========
You'll see that we don't have a ``opt2`` module and thus the second test run
of our ``test_func1`` was skipped. A few notes:
- the fixture functions in the ``conftest.py`` file are "session-scoped" because we
don't need to import more than once
- if you have multiple test functions and a skipped import, you will see
the ``[1]`` count increasing in the report
- you can put :ref:`@pytest.mark.parametrize <@pytest.mark.parametrize>` style
parametrization on the test functions to parametrize input/output
values as well.

View file

@ -0,0 +1,16 @@
import sys
import pytest
py3 = sys.version_info[0] >= 3
class DummyCollector(pytest.collect.File):
def collect(self):
return []
def pytest_pycollect_makemodule(path, parent):
bn = path.basename
if "py3" in bn and not py3 or ("py2" in bn and py3):
return DummyCollector(path, parent=parent)

View file

@ -0,0 +1,7 @@
def test_exception_syntax():
try:
0/0
except ZeroDivisionError, e:
pass

View file

@ -0,0 +1,7 @@
def test_exception_syntax():
try:
0/0
except ZeroDivisionError as e:
pass

View file

@ -0,0 +1,11 @@
# run this with $ py.test --collect-only test_collectonly.py
#
def test_function():
pass
class TestClass:
def test_method(self):
pass
def test_anothermethod(self):
pass

View file

@ -0,0 +1,192 @@
Changing standard (Python) test discovery
===============================================
Ignore paths during test collection
-----------------------------------
You can easily ignore certain test directories and modules during collection
by passing the ``--ignore=path`` option on the cli. ``pytest`` allows multiple
``--ignore`` options. Example::
tests/
├── example
│   ├── test_example_01.py
│   ├── test_example_02.py
│   └── test_example_03.py
├── foobar
│   ├── test_foobar_01.py
│   ├── test_foobar_02.py
│   └── test_foobar_03.py
└── hello
└── world
├── test_world_01.py
├── test_world_02.py
└── test_world_03.py
Now if you invoke ``pytest`` with ``--ignore=tests/foobar/test_foobar_03.py --ignore=tests/hello/``,
you will see that ``pytest`` only collects test-modules, which do not match the patterns specified::
========= test session starts ==========
platform darwin -- Python 2.7.10, pytest-2.8.2, py-1.4.30, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 5 items
tests/example/test_example_01.py .
tests/example/test_example_02.py .
tests/example/test_example_03.py .
tests/foobar/test_foobar_01.py .
tests/foobar/test_foobar_02.py .
======= 5 passed in 0.02 seconds =======
Changing directory recursion
-----------------------------------------------------
You can set the :confval:`norecursedirs` option in an ini-file, for example your ``setup.cfg`` in the project root directory::
# content of setup.cfg
[pytest]
norecursedirs = .svn _build tmp*
This would tell ``pytest`` to not recurse into typical subversion or sphinx-build directories or into any ``tmp`` prefixed directory.
.. _`change naming conventions`:
Changing naming conventions
-----------------------------------------------------
You can configure different naming conventions by setting
the :confval:`python_files`, :confval:`python_classes` and
:confval:`python_functions` configuration options. Example::
# content of setup.cfg
# can also be defined in in tox.ini or pytest.ini file
[pytest]
python_files=check_*.py
python_classes=Check
python_functions=*_check
This would make ``pytest`` look for tests in files that match the ``check_*
.py`` glob-pattern, ``Check`` prefixes in classes, and functions and methods
that match ``*_check``. For example, if we have::
# content of check_myapp.py
class CheckMyApp:
def simple_check(self):
pass
def complex_check(self):
pass
then the test collection looks like this::
$ py.test --collect-only
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile: setup.cfg
collected 2 items
<Module 'check_myapp.py'>
<Class 'CheckMyApp'>
<Instance '()'>
<Function 'simple_check'>
<Function 'complex_check'>
======= no tests ran in 0.12 seconds ========
.. note::
the ``python_functions`` and ``python_classes`` options has no effect
for ``unittest.TestCase`` test discovery because pytest delegates
detection of test case methods to unittest code.
Interpreting cmdline arguments as Python packages
-----------------------------------------------------
You can use the ``--pyargs`` option to make ``pytest`` try
interpreting arguments as python package names, deriving
their file system path and then running the test. For
example if you have unittest2 installed you can type::
py.test --pyargs unittest2.test.test_skipping -q
which would run the respective test module. Like with
other options, through an ini-file and the :confval:`addopts` option you
can make this change more permanently::
# content of pytest.ini
[pytest]
addopts = --pyargs
Now a simple invocation of ``py.test NAME`` will check
if NAME exists as an importable package/module and otherwise
treat it as a filesystem path.
Finding out what is collected
-----------------------------------------------
You can always peek at the collection tree without running tests like this::
. $ py.test --collect-only pythoncollection.py
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
collected 3 items
<Module 'CWD/pythoncollection.py'>
<Function 'test_function'>
<Class 'TestClass'>
<Instance '()'>
<Function 'test_method'>
<Function 'test_anothermethod'>
======= no tests ran in 0.12 seconds ========
customizing test collection to find all .py files
---------------------------------------------------------
.. regendoc:wipe
You can easily instruct ``pytest`` to discover tests from every python file::
# content of pytest.ini
[pytest]
python_files = *.py
However, many projects will have a ``setup.py`` which they don't want to be imported. Moreover, there may files only importable by a specific python version.
For such cases you can dynamically define files to be ignored by listing
them in a ``conftest.py`` file::
# content of conftest.py
import sys
collect_ignore = ["setup.py"]
if sys.version_info[0] > 2:
collect_ignore.append("pkg/module_py2.py")
And then if you have a module file like this::
# content of pkg/module_py2.py
def test_only_on_python2():
try:
assert 0
except Exception, e:
pass
and a setup.py dummy file like this::
# content of setup.py
0/0 # will raise exception if imported
then a pytest run on python2 will find the one test when run with a python2
interpreters and will leave out the setup.py file::
$ py.test --collect-only
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
collected 0 items
======= no tests ran in 0.12 seconds ========
If you run with a Python3 interpreter the moduled added through the conftest.py file will not be considered for test collection.

View file

@ -0,0 +1,598 @@
.. _`tbreportdemo`:
Demo of Python failure reports with pytest
==================================================
Here is a nice run of several tens of failures
and how ``pytest`` presents things (unfortunately
not showing the nice colors here in the HTML that you
get on the terminal - we are working on that):
.. code-block:: python
assertion $ py.test failure_demo.py
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR/assertion, inifile:
collected 42 items
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
======= FAILURES ========
_______ test_generative[0] ________
param1 = 3, param2 = 6
def test_generative(param1, param2):
> assert param1 * 2 < param2
E assert (3 * 2) < 6
failure_demo.py:16: AssertionError
_______ TestFailing.test_simple ________
self = <failure_demo.TestFailing object at 0xdeadbeef>
def test_simple(self):
def f():
return 42
def g():
return 43
> assert f() == g()
E assert 42 == 43
E + where 42 = <function TestFailing.test_simple.<locals>.f at 0xdeadbeef>()
E + and 43 = <function TestFailing.test_simple.<locals>.g at 0xdeadbeef>()
failure_demo.py:29: AssertionError
_______ TestFailing.test_simple_multiline ________
self = <failure_demo.TestFailing object at 0xdeadbeef>
def test_simple_multiline(self):
otherfunc_multi(
42,
> 6*9)
failure_demo.py:34:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
a = 42, b = 54
def otherfunc_multi(a,b):
> assert (a ==
b)
E assert 42 == 54
failure_demo.py:12: AssertionError
_______ TestFailing.test_not ________
self = <failure_demo.TestFailing object at 0xdeadbeef>
def test_not(self):
def f():
return 42
> assert not f()
E assert not 42
E + where 42 = <function TestFailing.test_not.<locals>.f at 0xdeadbeef>()
failure_demo.py:39: AssertionError
_______ TestSpecialisedExplanations.test_eq_text ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_eq_text(self):
> assert 'spam' == 'eggs'
E assert 'spam' == 'eggs'
E - spam
E + eggs
failure_demo.py:43: AssertionError
_______ TestSpecialisedExplanations.test_eq_similar_text ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_eq_similar_text(self):
> assert 'foo 1 bar' == 'foo 2 bar'
E assert 'foo 1 bar' == 'foo 2 bar'
E - foo 1 bar
E ? ^
E + foo 2 bar
E ? ^
failure_demo.py:46: AssertionError
_______ TestSpecialisedExplanations.test_eq_multiline_text ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_eq_multiline_text(self):
> assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
E assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
E foo
E - spam
E + eggs
E bar
failure_demo.py:49: AssertionError
_______ TestSpecialisedExplanations.test_eq_long_text ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_eq_long_text(self):
a = '1'*100 + 'a' + '2'*100
b = '1'*100 + 'b' + '2'*100
> assert a == b
E assert '111111111111...2222222222222' == '1111111111111...2222222222222'
E Skipping 90 identical leading characters in diff, use -v to show
E Skipping 91 identical trailing characters in diff, use -v to show
E - 1111111111a222222222
E ? ^
E + 1111111111b222222222
E ? ^
failure_demo.py:54: AssertionError
_______ TestSpecialisedExplanations.test_eq_long_text_multiline ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_eq_long_text_multiline(self):
a = '1\n'*100 + 'a' + '2\n'*100
b = '1\n'*100 + 'b' + '2\n'*100
> assert a == b
E assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n'
E Skipping 190 identical leading characters in diff, use -v to show
E Skipping 191 identical trailing characters in diff, use -v to show
E 1
E 1
E 1
E 1
E 1
E - a2
E + b2
E 2
E 2
E 2
E 2
failure_demo.py:59: AssertionError
_______ TestSpecialisedExplanations.test_eq_list ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_eq_list(self):
> assert [0, 1, 2] == [0, 1, 3]
E assert [0, 1, 2] == [0, 1, 3]
E At index 2 diff: 2 != 3
E Use -v to get the full diff
failure_demo.py:62: AssertionError
_______ TestSpecialisedExplanations.test_eq_list_long ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_eq_list_long(self):
a = [0]*100 + [1] + [3]*100
b = [0]*100 + [2] + [3]*100
> assert a == b
E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...]
E At index 100 diff: 1 != 2
E Use -v to get the full diff
failure_demo.py:67: AssertionError
_______ TestSpecialisedExplanations.test_eq_dict ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_eq_dict(self):
> assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
E assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
E Omitting 1 identical items, use -v to show
E Differing items:
E {'b': 1} != {'b': 2}
E Left contains more items:
E {'c': 0}
E Right contains more items:
E {'d': 0}
E Use -v to get the full diff
failure_demo.py:70: AssertionError
_______ TestSpecialisedExplanations.test_eq_set ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_eq_set(self):
> assert set([0, 10, 11, 12]) == set([0, 20, 21])
E assert set([0, 10, 11, 12]) == set([0, 20, 21])
E Extra items in the left set:
E 10
E 11
E 12
E Extra items in the right set:
E 20
E 21
E Use -v to get the full diff
failure_demo.py:73: AssertionError
_______ TestSpecialisedExplanations.test_eq_longer_list ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_eq_longer_list(self):
> assert [1,2] == [1,2,3]
E assert [1, 2] == [1, 2, 3]
E Right contains more items, first extra item: 3
E Use -v to get the full diff
failure_demo.py:76: AssertionError
_______ TestSpecialisedExplanations.test_in_list ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_in_list(self):
> assert 1 in [0, 2, 3, 4, 5]
E assert 1 in [0, 2, 3, 4, 5]
failure_demo.py:79: AssertionError
_______ TestSpecialisedExplanations.test_not_in_text_multiline ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_not_in_text_multiline(self):
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
> assert 'foo' not in text
E assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail'
E 'foo' is contained here:
E some multiline
E text
E which
E includes foo
E ? +++
E and a
E tail
failure_demo.py:83: AssertionError
_______ TestSpecialisedExplanations.test_not_in_text_single ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_not_in_text_single(self):
text = 'single foo line'
> assert 'foo' not in text
E assert 'foo' not in 'single foo line'
E 'foo' is contained here:
E single foo line
E ? +++
failure_demo.py:87: AssertionError
_______ TestSpecialisedExplanations.test_not_in_text_single_long ________
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_not_in_text_single_long(self):
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
> assert 'foo' not in text
E assert 'foo' not in 'head head head head hea...ail tail tail tail tail '
E 'foo' is contained here:
E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
E ? +++
failure_demo.py:91: AssertionError
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
def test_not_in_text_single_long_term(self):
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
> assert 'f'*70 not in text
E assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail '
E 'ffffffffffffffffff...fffffffffffffffffff' is contained here:
E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
failure_demo.py:95: AssertionError
_______ test_attribute ________
def test_attribute():
class Foo(object):
b = 1
i = Foo()
> assert i.b == 2
E assert 1 == 2
E + where 1 = <failure_demo.test_attribute.<locals>.Foo object at 0xdeadbeef>.b
failure_demo.py:102: AssertionError
_______ test_attribute_instance ________
def test_attribute_instance():
class Foo(object):
b = 1
> assert Foo().b == 2
E assert 1 == 2
E + where 1 = <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef>.b
E + where <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef> = <class 'failure_demo.test_attribute_instance.<locals>.Foo'>()
failure_demo.py:108: AssertionError
_______ test_attribute_failure ________
def test_attribute_failure():
class Foo(object):
def _get_b(self):
raise Exception('Failed to get attrib')
b = property(_get_b)
i = Foo()
> assert i.b == 2
failure_demo.py:117:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <failure_demo.test_attribute_failure.<locals>.Foo object at 0xdeadbeef>
def _get_b(self):
> raise Exception('Failed to get attrib')
E Exception: Failed to get attrib
failure_demo.py:114: Exception
_______ test_attribute_multiple ________
def test_attribute_multiple():
class Foo(object):
b = 1
class Bar(object):
b = 2
> assert Foo().b == Bar().b
E assert 1 == 2
E + where 1 = <failure_demo.test_attribute_multiple.<locals>.Foo object at 0xdeadbeef>.b
E + where <failure_demo.test_attribute_multiple.<locals>.Foo object at 0xdeadbeef> = <class 'failure_demo.test_attribute_multiple.<locals>.Foo'>()
E + and 2 = <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef>.b
E + where <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef> = <class 'failure_demo.test_attribute_multiple.<locals>.Bar'>()
failure_demo.py:125: AssertionError
_______ TestRaises.test_raises ________
self = <failure_demo.TestRaises object at 0xdeadbeef>
def test_raises(self):
s = 'qwe'
> raises(TypeError, "int(s)")
failure_demo.py:134:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
> int(s)
E ValueError: invalid literal for int() with base 10: 'qwe'
<0-codegen $PYTHON_PREFIX/lib/python3.4/site-packages/_pytest/python.py:1302>:1: ValueError
_______ TestRaises.test_raises_doesnt ________
self = <failure_demo.TestRaises object at 0xdeadbeef>
def test_raises_doesnt(self):
> raises(IOError, "int('3')")
E Failed: DID NOT RAISE <class 'OSError'>
failure_demo.py:137: Failed
_______ TestRaises.test_raise ________
self = <failure_demo.TestRaises object at 0xdeadbeef>
def test_raise(self):
> raise ValueError("demo error")
E ValueError: demo error
failure_demo.py:140: ValueError
_______ TestRaises.test_tupleerror ________
self = <failure_demo.TestRaises object at 0xdeadbeef>
def test_tupleerror(self):
> a,b = [1]
E ValueError: need more than 1 value to unpack
failure_demo.py:143: ValueError
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
self = <failure_demo.TestRaises object at 0xdeadbeef>
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
l = [1,2,3]
print ("l is %r" % l)
> a,b = l.pop()
E TypeError: 'int' object is not iterable
failure_demo.py:148: TypeError
--------------------------- Captured stdout call ---------------------------
l is [1, 2, 3]
_______ TestRaises.test_some_error ________
self = <failure_demo.TestRaises object at 0xdeadbeef>
def test_some_error(self):
> if namenotexi:
E NameError: name 'namenotexi' is not defined
failure_demo.py:151: NameError
_______ test_dynamic_compile_shows_nicely ________
def test_dynamic_compile_shows_nicely():
src = 'def foo():\n assert 1 == 0\n'
name = 'abc-123'
module = py.std.imp.new_module(name)
code = _pytest._code.compile(src, name, 'exec')
py.builtin.exec_(code, module.__dict__)
py.std.sys.modules[name] = module
> module.foo()
failure_demo.py:166:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
def foo():
> assert 1 == 0
E assert 1 == 0
<2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:163>:2: AssertionError
_______ TestMoreErrors.test_complex_error ________
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
def test_complex_error(self):
def f():
return 44
def g():
return 43
> somefunc(f(), g())
failure_demo.py:176:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
failure_demo.py:9: in somefunc
otherfunc(x,y)
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
a = 44, b = 43
def otherfunc(a,b):
> assert a==b
E assert 44 == 43
failure_demo.py:6: AssertionError
_______ TestMoreErrors.test_z1_unpack_error ________
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
def test_z1_unpack_error(self):
l = []
> a,b = l
E ValueError: need more than 0 values to unpack
failure_demo.py:180: ValueError
_______ TestMoreErrors.test_z2_type_error ________
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
def test_z2_type_error(self):
l = 3
> a,b = l
E TypeError: 'int' object is not iterable
failure_demo.py:184: TypeError
_______ TestMoreErrors.test_startswith ________
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
def test_startswith(self):
s = "123"
g = "456"
> assert s.startswith(g)
E assert <built-in method startswith of str object at 0xdeadbeef>('456')
E + where <built-in method startswith of str object at 0xdeadbeef> = '123'.startswith
failure_demo.py:189: AssertionError
_______ TestMoreErrors.test_startswith_nested ________
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
def test_startswith_nested(self):
def f():
return "123"
def g():
return "456"
> assert f().startswith(g())
E assert <built-in method startswith of str object at 0xdeadbeef>('456')
E + where <built-in method startswith of str object at 0xdeadbeef> = '123'.startswith
E + where '123' = <function TestMoreErrors.test_startswith_nested.<locals>.f at 0xdeadbeef>()
E + and '456' = <function TestMoreErrors.test_startswith_nested.<locals>.g at 0xdeadbeef>()
failure_demo.py:196: AssertionError
_______ TestMoreErrors.test_global_func ________
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
def test_global_func(self):
> assert isinstance(globf(42), float)
E assert isinstance(43, float)
E + where 43 = globf(42)
failure_demo.py:199: AssertionError
_______ TestMoreErrors.test_instance ________
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
def test_instance(self):
self.x = 6*7
> assert self.x != 42
E assert 42 != 42
E + where 42 = <failure_demo.TestMoreErrors object at 0xdeadbeef>.x
failure_demo.py:203: AssertionError
_______ TestMoreErrors.test_compare ________
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
def test_compare(self):
> assert globf(10) < 5
E assert 11 < 5
E + where 11 = globf(10)
failure_demo.py:206: AssertionError
_______ TestMoreErrors.test_try_finally ________
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
def test_try_finally(self):
x = 1
try:
> assert x == 0
E assert 1 == 0
failure_demo.py:211: AssertionError
_______ TestCustomAssertMsg.test_single_line ________
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
def test_single_line(self):
class A:
a = 1
b = 2
> assert A.a == b, "A.a appears not to be b"
E AssertionError: A.a appears not to be b
E assert 1 == 2
E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_single_line.<locals>.A'>.a
failure_demo.py:222: AssertionError
_______ TestCustomAssertMsg.test_multiline ________
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
def test_multiline(self):
class A:
a = 1
b = 2
> assert A.a == b, "A.a appears not to be b\n" \
"or does not appear to be b\none of those"
E AssertionError: A.a appears not to be b
E or does not appear to be b
E one of those
E assert 1 == 2
E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_multiline.<locals>.A'>.a
failure_demo.py:228: AssertionError
_______ TestCustomAssertMsg.test_custom_repr ________
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
def test_custom_repr(self):
class JSON:
a = 1
def __repr__(self):
return "This is JSON\n{\n 'foo': 'bar'\n}"
a = JSON()
b = 2
> assert a.a == b, a
E AssertionError: This is JSON
E {
E 'foo': 'bar'
E }
E assert 1 == 2
E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
failure_demo.py:238: AssertionError
======= 42 failed in 0.12 seconds ========

View file

@ -0,0 +1,751 @@
.. highlightlang:: python
Basic patterns and examples
==========================================================
Pass different values to a test function, depending on command line options
----------------------------------------------------------------------------
.. regendoc:wipe
Suppose we want to write a test that depends on a command line option.
Here is a basic pattern to achieve this::
# content of test_sample.py
def test_answer(cmdopt):
if cmdopt == "type1":
print ("first")
elif cmdopt == "type2":
print ("second")
assert 0 # to see what was printed
For this to work we need to add a command line option and
provide the ``cmdopt`` through a :ref:`fixture function <fixture function>`::
# content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption("--cmdopt", action="store", default="type1",
help="my option: type1 or type2")
@pytest.fixture
def cmdopt(request):
return request.config.getoption("--cmdopt")
Let's run this without supplying our new option::
$ py.test -q test_sample.py
F
======= FAILURES ========
_______ test_answer ________
cmdopt = 'type1'
def test_answer(cmdopt):
if cmdopt == "type1":
print ("first")
elif cmdopt == "type2":
print ("second")
> assert 0 # to see what was printed
E assert 0
test_sample.py:6: AssertionError
--------------------------- Captured stdout call ---------------------------
first
1 failed in 0.12 seconds
And now with supplying a command line option::
$ py.test -q --cmdopt=type2
F
======= FAILURES ========
_______ test_answer ________
cmdopt = 'type2'
def test_answer(cmdopt):
if cmdopt == "type1":
print ("first")
elif cmdopt == "type2":
print ("second")
> assert 0 # to see what was printed
E assert 0
test_sample.py:6: AssertionError
--------------------------- Captured stdout call ---------------------------
second
1 failed in 0.12 seconds
You can see that the command line option arrived in our test. This
completes the basic pattern. However, one often rather wants to process
command line options outside of the test and rather pass in different or
more complex objects.
Dynamically adding command line options
--------------------------------------------------------------
.. regendoc:wipe
Through :confval:`addopts` you can statically add command line
options for your project. You can also dynamically modify
the command line arguments before they get processed::
# content of conftest.py
import sys
def pytest_cmdline_preparse(args):
if 'xdist' in sys.modules: # pytest-xdist plugin
import multiprocessing
num = max(multiprocessing.cpu_count() / 2, 1)
args[:] = ["-n", str(num)] + args
If you have the :ref:`xdist plugin <xdist>` installed
you will now always perform test runs using a number
of subprocesses close to your CPU. Running in an empty
directory with the above conftest.py::
$ py.test
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 0 items
======= no tests ran in 0.12 seconds ========
.. _`excontrolskip`:
Control skipping of tests according to command line option
--------------------------------------------------------------
.. regendoc:wipe
Here is a ``conftest.py`` file adding a ``--runslow`` command
line option to control skipping of ``slow`` marked tests::
# content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true",
help="run slow tests")
We can now write a test module like this::
# content of test_module.py
import pytest
slow = pytest.mark.skipif(
not pytest.config.getoption("--runslow"),
reason="need --runslow option to run"
)
def test_func_fast():
pass
@slow
def test_func_slow():
pass
and when running it will see a skipped "slow" test::
$ py.test -rs # "-rs" means report details on the little 's'
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 2 items
test_module.py .s
======= short test summary info ========
SKIP [1] test_module.py:14: need --runslow option to run
======= 1 passed, 1 skipped in 0.12 seconds ========
Or run it including the ``slow`` marked test::
$ py.test --runslow
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 2 items
test_module.py ..
======= 2 passed in 0.12 seconds ========
Writing well integrated assertion helpers
--------------------------------------------------
.. regendoc:wipe
If you have a test helper function called from a test you can
use the ``pytest.fail`` marker to fail a test with a certain message.
The test support function will not show up in the traceback if you
set the ``__tracebackhide__`` option somewhere in the helper function.
Example::
# content of test_checkconfig.py
import pytest
def checkconfig(x):
__tracebackhide__ = True
if not hasattr(x, "config"):
pytest.fail("not configured: %s" %(x,))
def test_something():
checkconfig(42)
The ``__tracebackhide__`` setting influences ``pytest`` showing
of tracebacks: the ``checkconfig`` function will not be shown
unless the ``--fulltrace`` command line option is specified.
Let's run our little function::
$ py.test -q test_checkconfig.py
F
======= FAILURES ========
_______ test_something ________
def test_something():
> checkconfig(42)
E Failed: not configured: 42
test_checkconfig.py:8: Failed
1 failed in 0.12 seconds
Detect if running from within a pytest run
--------------------------------------------------------------
.. regendoc:wipe
Usually it is a bad idea to make application code
behave differently if called from a test. But if you
absolutely must find out if your application code is
running from a test you can do something like this::
# content of conftest.py
def pytest_configure(config):
import sys
sys._called_from_test = True
def pytest_unconfigure(config):
del sys._called_from_test
and then check for the ``sys._called_from_test`` flag::
if hasattr(sys, '_called_from_test'):
# called from within a test run
else:
# called "normally"
accordingly in your application. It's also a good idea
to use your own application module rather than ``sys``
for handling flag.
Adding info to test report header
--------------------------------------------------------------
.. regendoc:wipe
It's easy to present extra information in a ``pytest`` run::
# content of conftest.py
def pytest_report_header(config):
return "project deps: mylib-1.1"
which will add the string to the test header accordingly::
$ py.test
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
project deps: mylib-1.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 0 items
======= no tests ran in 0.12 seconds ========
.. regendoc:wipe
You can also return a list of strings which will be considered as several
lines of information. You can of course also make the amount of reporting
information on e.g. the value of ``config.option.verbose`` so that
you present more information appropriately::
# content of conftest.py
def pytest_report_header(config):
if config.option.verbose > 0:
return ["info1: did you know that ...", "did you?"]
which will add info only when run with "--v"::
$ py.test -v
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.4
cachedir: .cache
info1: did you know that ...
did you?
rootdir: $REGENDOC_TMPDIR, inifile:
collecting ... collected 0 items
======= no tests ran in 0.12 seconds ========
and nothing when run plainly::
$ py.test
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 0 items
======= no tests ran in 0.12 seconds ========
profiling test duration
--------------------------
.. regendoc:wipe
.. versionadded: 2.2
If you have a slow running large test suite you might want to find
out which tests are the slowest. Let's make an artificial test suite::
# content of test_some_are_slow.py
import time
def test_funcfast():
pass
def test_funcslow1():
time.sleep(0.1)
def test_funcslow2():
time.sleep(0.2)
Now we can profile which test functions execute the slowest::
$ py.test --durations=3
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 3 items
test_some_are_slow.py ...
======= slowest 3 test durations ========
0.20s call test_some_are_slow.py::test_funcslow2
0.10s call test_some_are_slow.py::test_funcslow1
0.00s setup test_some_are_slow.py::test_funcfast
======= 3 passed in 0.12 seconds ========
incremental testing - test steps
---------------------------------------------------
.. regendoc:wipe
Sometimes you may have a testing situation which consists of a series
of test steps. If one step fails it makes no sense to execute further
steps as they are all expected to fail anyway and their tracebacks
add no insight. Here is a simple ``conftest.py`` file which introduces
an ``incremental`` marker which is to be used on classes::
# content of conftest.py
import pytest
def pytest_runtest_makereport(item, call):
if "incremental" in item.keywords:
if call.excinfo is not None:
parent = item.parent
parent._previousfailed = item
def pytest_runtest_setup(item):
if "incremental" in item.keywords:
previousfailed = getattr(item.parent, "_previousfailed", None)
if previousfailed is not None:
pytest.xfail("previous test failed (%s)" %previousfailed.name)
These two hook implementations work together to abort incremental-marked
tests in a class. Here is a test module example::
# content of test_step.py
import pytest
@pytest.mark.incremental
class TestUserHandling:
def test_login(self):
pass
def test_modification(self):
assert 0
def test_deletion(self):
pass
def test_normal():
pass
If we run this::
$ py.test -rx
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 4 items
test_step.py .Fx.
======= short test summary info ========
XFAIL test_step.py::TestUserHandling::()::test_deletion
reason: previous test failed (test_modification)
======= FAILURES ========
_______ TestUserHandling.test_modification ________
self = <test_step.TestUserHandling object at 0xdeadbeef>
def test_modification(self):
> assert 0
E assert 0
test_step.py:9: AssertionError
======= 1 failed, 2 passed, 1 xfailed in 0.12 seconds ========
We'll see that ``test_deletion`` was not executed because ``test_modification``
failed. It is reported as an "expected failure".
Package/Directory-level fixtures (setups)
-------------------------------------------------------
If you have nested test directories, you can have per-directory fixture scopes
by placing fixture functions in a ``conftest.py`` file in that directory
You can use all types of fixtures including :ref:`autouse fixtures
<autouse fixtures>` which are the equivalent of xUnit's setup/teardown
concept. It's however recommended to have explicit fixture references in your
tests or test classes rather than relying on implicitly executing
setup/teardown functions, especially if they are far away from the actual tests.
Here is a an example for making a ``db`` fixture available in a directory::
# content of a/conftest.py
import pytest
class DB:
pass
@pytest.fixture(scope="session")
def db():
return DB()
and then a test module in that directory::
# content of a/test_db.py
def test_a1(db):
assert 0, db # to show value
another test module::
# content of a/test_db2.py
def test_a2(db):
assert 0, db # to show value
and then a module in a sister directory which will not see
the ``db`` fixture::
# content of b/test_error.py
def test_root(db): # no db here, will error out
pass
We can run this::
$ py.test
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 7 items
test_step.py .Fx.
a/test_db.py F
a/test_db2.py F
b/test_error.py E
======= ERRORS ========
_______ ERROR at setup of test_root ________
file $REGENDOC_TMPDIR/b/test_error.py, line 1
def test_root(db): # no db here, will error out
fixture 'db' not found
available fixtures: record_xml_property, recwarn, cache, capsys, pytestconfig, tmpdir_factory, capfd, monkeypatch, tmpdir
use 'py.test --fixtures [testpath]' for help on them.
$REGENDOC_TMPDIR/b/test_error.py:1
======= FAILURES ========
_______ TestUserHandling.test_modification ________
self = <test_step.TestUserHandling object at 0xdeadbeef>
def test_modification(self):
> assert 0
E assert 0
test_step.py:9: AssertionError
_______ test_a1 ________
db = <conftest.DB object at 0xdeadbeef>
def test_a1(db):
> assert 0, db # to show value
E AssertionError: <conftest.DB object at 0xdeadbeef>
E assert 0
a/test_db.py:2: AssertionError
_______ test_a2 ________
db = <conftest.DB object at 0xdeadbeef>
def test_a2(db):
> assert 0, db # to show value
E AssertionError: <conftest.DB object at 0xdeadbeef>
E assert 0
a/test_db2.py:2: AssertionError
======= 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========
The two test modules in the ``a`` directory see the same ``db`` fixture instance
while the one test in the sister-directory ``b`` doesn't see it. We could of course
also define a ``db`` fixture in that sister directory's ``conftest.py`` file.
Note that each fixture is only instantiated if there is a test actually needing
it (unless you use "autouse" fixture which are always executed ahead of the first test
executing).
post-process test reports / failures
---------------------------------------
If you want to postprocess test reports and need access to the executing
environment you can implement a hook that gets called when the test
"report" object is about to be created. Here we write out all failing
test calls and also access a fixture (if it was used by the test) in
case you want to query/look at it during your post processing. In our
case we just write some informations out to a ``failures`` file::
# content of conftest.py
import pytest
import os.path
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# we only look at actual failing test calls, not setup/teardown
if rep.when == "call" and rep.failed:
mode = "a" if os.path.exists("failures") else "w"
with open("failures", mode) as f:
# let's also access a fixture for the fun of it
if "tmpdir" in item.fixturenames:
extra = " (%s)" % item.funcargs["tmpdir"]
else:
extra = ""
f.write(rep.nodeid + extra + "\n")
if you then have failing tests::
# content of test_module.py
def test_fail1(tmpdir):
assert 0
def test_fail2():
assert 0
and run them::
$ py.test test_module.py
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 2 items
test_module.py FF
======= FAILURES ========
_______ test_fail1 ________
tmpdir = local('PYTEST_TMPDIR/test_fail10')
def test_fail1(tmpdir):
> assert 0
E assert 0
test_module.py:2: AssertionError
_______ test_fail2 ________
def test_fail2():
> assert 0
E assert 0
test_module.py:4: AssertionError
======= 2 failed in 0.12 seconds ========
you will have a "failures" file which contains the failing test ids::
$ cat failures
test_module.py::test_fail1 (PYTEST_TMPDIR/test_fail10)
test_module.py::test_fail2
Making test result information available in fixtures
-----------------------------------------------------------
.. regendoc:wipe
If you want to make test result reports available in fixture finalizers
here is a little example implemented via a local plugin::
# content of conftest.py
import pytest
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item, call):
# execute all other hooks to obtain the report object
outcome = yield
rep = outcome.get_result()
# set an report attribute for each phase of a call, which can
# be "setup", "call", "teardown"
setattr(item, "rep_" + rep.when, rep)
@pytest.fixture
def something(request):
def fin():
# request.node is an "item" because we use the default
# "function" scope
if request.node.rep_setup.failed:
print ("setting up a test failed!", request.node.nodeid)
elif request.node.rep_setup.passed:
if request.node.rep_call.failed:
print ("executing test failed", request.node.nodeid)
request.addfinalizer(fin)
if you then have failing tests::
# content of test_module.py
import pytest
@pytest.fixture
def other():
assert 0
def test_setup_fails(something, other):
pass
def test_call_fails(something):
assert 0
def test_fail2():
assert 0
and run it::
$ py.test -s test_module.py
======= test session starts ========
platform linux -- Python 3.4.0, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR, inifile:
collected 3 items
test_module.py Esetting up a test failed! test_module.py::test_setup_fails
Fexecuting test failed test_module.py::test_call_fails
F
======= ERRORS ========
_______ ERROR at setup of test_setup_fails ________
@pytest.fixture
def other():
> assert 0
E assert 0
test_module.py:6: AssertionError
======= FAILURES ========
_______ test_call_fails ________
something = None
def test_call_fails(something):
> assert 0
E assert 0
test_module.py:12: AssertionError
_______ test_fail2 ________
def test_fail2():
> assert 0
E assert 0
test_module.py:15: AssertionError
======= 2 failed, 1 error in 0.12 seconds ========
You'll see that the fixture finalizers could use the precise reporting
information.
Integrating pytest runner and cx_freeze
-----------------------------------------------------------
If you freeze your application using a tool like
`cx_freeze <http://cx-freeze.readthedocs.org>`_ in order to distribute it
to your end-users, it is a good idea to also package your test runner and run
your tests using the frozen application.
This way packaging errors such as dependencies not being
included into the executable can be detected early while also allowing you to
send test files to users so they can run them in their machines, which can be
invaluable to obtain more information about a hard to reproduce bug.
Unfortunately ``cx_freeze`` can't discover them
automatically because of ``pytest``'s use of dynamic module loading, so you
must declare them explicitly by using ``pytest.freeze_includes()``::
# contents of setup.py
from cx_Freeze import setup, Executable
import pytest
setup(
name="app_main",
executables=[Executable("app_main.py")],
options={"build_exe":
{
'includes': pytest.freeze_includes()}
},
# ... other options
)
If you don't want to ship a different executable just in order to run your tests,
you can make your program check for a certain flag and pass control
over to ``pytest`` instead. For example::
# contents of app_main.py
import sys
if len(sys.argv) > 1 and sys.argv[1] == '--pytest':
import pytest
sys.exit(pytest.main(sys.argv[2:]))
else:
# normal application execution: at this point argv can be parsed
# by your argument-parsing library of choice as usual
...
This makes it convenient to execute your tests from within your frozen
application, using standard ``py.test`` command-line options::
./app_main --pytest --verbose --tb=long --junitxml=results.xml test-suite/

View file

@ -0,0 +1,72 @@
A session-fixture which can look at all collected tests
----------------------------------------------------------------
A session-scoped fixture effectively has access to all
collected test items. Here is an example of a fixture
function which walks all collected tests and looks
if their test class defines a ``callme`` method and
calls it::
# content of conftest.py
import pytest
@pytest.fixture(scope="session", autouse=True)
def callattr_ahead_of_alltests(request):
print ("callattr_ahead_of_alltests called")
seen = set([None])
session = request.node
for item in session.items:
cls = item.getparent(pytest.Class)
if cls not in seen:
if hasattr(cls.obj, "callme"):
cls.obj.callme()
seen.add(cls)
test classes may now define a ``callme`` method which
will be called ahead of running any tests::
# content of test_module.py
class TestHello:
@classmethod
def callme(cls):
print ("callme called!")
def test_method1(self):
print ("test_method1 called")
def test_method2(self):
print ("test_method1 called")
class TestOther:
@classmethod
def callme(cls):
print ("callme other called")
def test_other(self):
print ("test other")
# works with unittest as well ...
import unittest
class SomeTest(unittest.TestCase):
@classmethod
def callme(self):
print ("SomeTest callme called")
def test_unit1(self):
print ("test_unit1 method called")
If you run this without output capturing::
$ py.test -q -s test_module.py
callattr_ahead_of_alltests called
callme called!
callme other called
SomeTest callme called
test_method1 called
.test_method1 called
.test other
.test_unit1 method called
.
4 passed in 0.12 seconds

View file

@ -0,0 +1,30 @@
import pytest
xfail = pytest.mark.xfail
@xfail
def test_hello():
assert 0
@xfail(run=False)
def test_hello2():
assert 0
@xfail("hasattr(os, 'sep')")
def test_hello3():
assert 0
@xfail(reason="bug 110")
def test_hello4():
assert 0
@xfail('pytest.__version__[0] != "17"')
def test_hello5():
assert 0
def test_hello6():
pytest.xfail("reason")
@xfail(raises=IndexError)
def test_hello7():
x = []
x[1] = 1

Some files were not shown because too many files have changed in this diff Show more