Update web-platform-tests to revision d011702f368b88b3bae86e7a8fd2ddd22e18b33c

This commit is contained in:
Ms2ger 2016-04-12 09:07:41 +02:00
parent f9608022ca
commit 299ad0f9d0
573 changed files with 38776 additions and 14942 deletions

View file

@ -15,3 +15,6 @@
[submodule "pytest"]
path = pytest
url = https://github.com/pytest-dev/pytest.git
[submodule "webdriver"]
path = webdriver
url = https://github.com/w3c/wdclient.git

View file

@ -9,4 +9,6 @@ sys.path.insert(0, os.path.join(repo_root, "tools", "six"))
sys.path.insert(0, os.path.join(repo_root, "tools", "html5lib"))
sys.path.insert(0, os.path.join(repo_root, "tools", "wptserve"))
sys.path.insert(0, os.path.join(repo_root, "tools", "pywebsocket", "src"))
sys.path.insert(0, os.path.join(repo_root, "tools", "py"))
sys.path.insert(0, os.path.join(repo_root, "tools", "pytest"))
sys.path.insert(0, os.path.join(repo_root, "tools", "webdriver"))

View file

@ -0,0 +1,29 @@
# Automatically generated by `hgimportsvn`
syntax:glob
.svn
.hgsvn
# These lines are suggested according to the svn:ignore property
# Feel free to enable them by uncommenting them
syntax:glob
*.pyc
*.pyo
*.swp
*.html
*.class
*.orig
*~
doc/_build
build/
dist/
*.egg-info
issue/
env/
3rdparty/
.tox
lib/
bin/
include/
src/

View file

@ -0,0 +1,67 @@
52c6d9e78777a5a34e813123997dfc614a1a4767 1.0.0b3
1c7aaa8c61f3b0945921a9acc7beb184201aed4b 1.0.0b4
1c7aaa8c61f3b0945921a9acc7beb184201aed4b 1.0.0b4
0000000000000000000000000000000000000000 1.0.0b4
0000000000000000000000000000000000000000 1.0.0b4
8cd6eb91eba313b012d6e568f37d844dc0751f2e 1.0.0b4
8cd6eb91eba313b012d6e568f37d844dc0751f2e 1.0.0b4
0000000000000000000000000000000000000000 1.0.0b4
2cc0507f117ffe721dff7ee026648cfce00ec92f 1.0.0b6
86f1e1b6e49bf5882a809f11edd1dbb08162cdad 1.0.0b8
86f1e1b6e49bf5882a809f11edd1dbb08162cdad 1.0.0b8
c63f35c266cbb26dad6b87b5e115d65685adf448 1.0.0b8
c63f35c266cbb26dad6b87b5e115d65685adf448 1.0.0b8
0eaa0fdf2ba0163cf534dc2eff4ba2e5fc66c261 1.0.0b8
e2a60653cb490aeed81bbbd83c070b99401c211c 1.0.0b9
5ea0cdf7854c3d4278d36eda94a2b68483a0e211 1.0.0
5ea0cdf7854c3d4278d36eda94a2b68483a0e211 1.0.0
7acde360d94b6a2690ce3d03ff39301da84c0a2b 1.0.0
6bd221981ac99103002c1cb94fede400d23a96a1 1.0.1
4816e8b80602a3fd3a0a120333ad85fbe7d8bab4 1.0.2
60c44bdbf093285dc69d5462d4dbb4acad325ca6 1.1.0
319187fcda66714c5eb1353492babeec3d3c826f 1.1.1
4fc5212f7626a56b9eb6437b5c673f56dd7eb942 1.2.0
c143a8c8840a1c68570890c8ac6165bbf92fd3c6 1.2.1
eafd3c256e8732dfb0a4d49d051b5b4339858926 1.3.0
d5eacf390af74553227122b85e20345d47b2f9e6 1.3.1
d5eacf390af74553227122b85e20345d47b2f9e6 1.3.1
8b8e7c25a13cf863f01b2dd955978285ae9daf6a 1.3.1
3bff44b188a7ec1af328d977b9d39b6757bb38df 1.3.2
c59d3fa8681a5b5966b8375b16fccd64a3a8dbeb 1.3.3
79ef6377705184c55633d456832eea318fedcf61 1.3.4
79ef6377705184c55633d456832eea318fedcf61 1.3.4
90fffd35373e9f125af233f78b19416f0938d841 1.3.4
5346ab41b059c95a48cbe1e8a7bae96ce6e0da27 1.4.0
1f3125cba7976538952be268f107c1d0c36c5ce8 1.4.1
04ab22db4ff737cf31e91d75a0f5d7077f324167 1.4.2
9950bf9d684a984d511795013421c89c5cf88bef 1.4.3
d9951e3bdbc765e73835ae13012f6a074d13d8bf 1.4.4
b827dd156a36753e32c7f3f15ce82d6fe9e356c8 1.4.6
f15726f9e5a67cc6221c499affa4840e9d591763 1.4.7
abfabd07a1d328f13c730e8a50d80d2e470afd3b 1.4.9
7f37ee0aff9be4b839d6759cfee336f60e8393a4 1.4.10
fe4593263efa10ea7ba014db6e3379e0b82368a2 1.4.11
f07af25a26786e4825b5170e17ad693245cb3426 1.4.12
d3730d84ba7eda92fd3469a3f63fd6d8cb22c975 1.4.13
12c1ae8e7c5345721e9ec9f8e27b1e36c07f74dc 1.4.14
12c1ae8e7c5345721e9ec9f8e27b1e36c07f74dc 1.4.14
0000000000000000000000000000000000000000 1.4.14
0000000000000000000000000000000000000000 1.4.14
1497e2efd0f8c73a0e3d529debf0c489e4cd6cab 1.4.14
e065014c1ce8ad110a381e9baaaa5d647ba7ac6b 1.4.15
e9e5b38f53dc35b35aa1f9ee9a9be9bbd2d2c3b1 1.4.16
c603503945f52b78522d96a423605cbc953236d3 1.4.17
c59201105a29801cc858eb9160b7a19791b91a35 1.4.18
284cc172e294d48edc840012e1451c32c3963d92 1.4.19
a3e0626aa0c5aecf271367dc77e476ab216ea3c8 1.4.20
5e48016c4a3af8e7358a1267d33d021e71765bed 1.4.21
01ae2cfcc61c4fcb3aa5031349adb5b467c31018 1.4.23
5ffd982f4dff60b588f309cd9bdc61036547282a 1.4.24
dc9ffbcaf1f7d72e96be3f68c11deebb7e7193c5 1.4.25
6de1a44bf75de7af4fcae947c235e9072bbdbb9a 1.4.26
7d650ba2657890a2253c8c4a83f170febebd90fa 1.4.27
7d650ba2657890a2253c8c4a83f170febebd90fa 1.4.27
1810003dec63dd1b506a23849861fffa5bc3ba13 1.4.27
ba08706f08ddea1b77a426f00dfe2bdc244345e8 1.4.28
4e8054ada63f3327bcf759ae7cd36c7c8652bc9b 1.4.29
366ab346610c6de8aaa7617e24011794b40236c6 1.4.30

View file

@ -0,0 +1,24 @@
Holger Krekel, holger at merlinux eu
Benjamin Peterson, benjamin at python org
Ronny Pfannschmidt, Ronny.Pfannschmidt at gmx de
Guido Wesdorp, johnny at johnnydebris net
Samuele Pedroni, pedronis at openend se
Carl Friedrich Bolz, cfbolz at gmx de
Armin Rigo, arigo at tunes org
Maciek Fijalkowski, fijal at genesilico pl
Brian Dorsey, briandorsey at gmail com
Floris Bruynooghe, flub at devork be
merlinux GmbH, Germany, office at merlinux eu
Contributors include::
Ross Lawley
Ralf Schmitt
Chris Lamb
Harald Armin Massa
Martijn Faassen
Ian Bicking
Jan Balster
Grig Gheorghiu
Bob Ippolito
Christian Tismer

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,19 @@
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,9 @@
include CHANGELOG
include AUTHORS
include README.txt
include setup.py
include LICENSE
include conftest.py
include tox.ini
graft doc
graft testing

View file

@ -0,0 +1,21 @@
.. image:: https://drone.io/bitbucket.org/pytest-dev/py/status.png
:target: https://drone.io/bitbucket.org/pytest-dev/py/latest
.. image:: https://pypip.in/v/py/badge.png
:target: https://pypi.python.org/pypi/py
The py lib is a Python development support library featuring
the following tools and modules:
* py.path: uniform local and svn path objects
* py.apipkg: explicit API control and lazy-importing
* py.iniconfig: easy parsing of .ini files
* py.code: dynamic code generation and introspection
NOTE: prior to the 1.4 release this distribution used to
contain py.test which is now its own package, see http://pytest.org
For questions and more information please visit http://pylib.readthedocs.org
Bugs and issues: http://bitbucket.org/pytest-dev/py/issues/
Authors: Holger Krekel and others, 2004-2015

View file

@ -0,0 +1,75 @@
import py
import timeit
class Listdir:
numiter = 100000
numentries = 100
def setup(self):
tmpdir = py.path.local.make_numbered_dir(self.__class__.__name__)
for i in range(self.numentries):
tmpdir.join(str(i))
self.tmpdir = tmpdir
def run(self):
return self.tmpdir.listdir()
class Listdir_arg(Listdir):
numiter = 100000
numentries = 100
def run(self):
return self.tmpdir.listdir("47")
class Join_onearg(Listdir):
def run(self):
self.tmpdir.join("17")
self.tmpdir.join("18")
self.tmpdir.join("19")
class Join_multi(Listdir):
def run(self):
self.tmpdir.join("a", "b")
self.tmpdir.join("a", "b", "c")
self.tmpdir.join("a", "b", "c", "d")
class Check(Listdir):
def run(self):
self.tmpdir.check()
self.tmpdir.check()
self.tmpdir.check()
class CheckDir(Listdir):
def run(self):
self.tmpdir.check(dir=1)
self.tmpdir.check(dir=1)
assert not self.tmpdir.check(dir=0)
class CheckDir2(Listdir):
def run(self):
self.tmpdir.stat().isdir()
self.tmpdir.stat().isdir()
assert self.tmpdir.stat().isdir()
class CheckFile(Listdir):
def run(self):
self.tmpdir.check(file=1)
assert not self.tmpdir.check(file=1)
assert self.tmpdir.check(file=0)
if __name__ == "__main__":
import time
for cls in [Listdir, Listdir_arg,
Join_onearg, Join_multi,
Check, CheckDir, CheckDir2, CheckFile,]:
inst = cls()
inst.setup()
now = time.time()
for i in xrange(cls.numiter):
inst.run()
elapsed = time.time() - now
print "%s: %d loops took %.2f seconds, per call %.6f" %(
cls.__name__,
cls.numiter, elapsed, elapsed / cls.numiter)

View file

@ -0,0 +1,71 @@
import py
import sys
pytest_plugins = 'doctest pytester'.split()
collect_ignore = ['build', 'doc/_build']
import os, py
pid = os.getpid()
def pytest_addoption(parser):
group = parser.getgroup("pylib", "py lib testing options")
group.addoption('--runslowtests',
action="store_true", dest="runslowtests", default=False,
help=("run slow tests"))
def pytest_funcarg__sshhost(request):
val = request.config.getvalue("sshhost")
if val:
return val
py.test.skip("need --sshhost option")
def pytest_generate_tests(metafunc):
multi = getattr(metafunc.function, 'multi', None)
if multi is not None:
assert len(multi.kwargs) == 1
for name, l in multi.kwargs.items():
for val in l:
metafunc.addcall(funcargs={name: val})
elif 'anypython' in metafunc.funcargnames:
for name in ('python2.4', 'python2.5', 'python2.6',
'python2.7', 'python3.1', 'pypy-c', 'jython'):
metafunc.addcall(id=name, param=name)
# XXX copied from execnet's conftest.py - needs to be merged
winpymap = {
'python2.7': r'C:\Python27\python.exe',
'python2.6': r'C:\Python26\python.exe',
'python2.5': r'C:\Python25\python.exe',
'python2.4': r'C:\Python24\python.exe',
'python3.1': r'C:\Python31\python.exe',
}
def getexecutable(name, cache={}):
try:
return cache[name]
except KeyError:
executable = py.path.local.sysfind(name)
if executable:
if name == "jython":
import subprocess
popen = subprocess.Popen([str(executable), "--version"],
universal_newlines=True, stderr=subprocess.PIPE)
out, err = popen.communicate()
if not err or "2.5" not in err:
executable = None
cache[name] = executable
return executable
def pytest_funcarg__anypython(request):
name = request.param
executable = getexecutable(name)
if executable is None:
if sys.platform == "win32":
executable = winpymap.get(name, None)
if executable:
executable = py.path.local(executable)
if executable.check():
return executable
py.test.skip("no %s found" % (name,))
return executable

View file

@ -0,0 +1,133 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
install: clean html
rsync -avz _build/html/ code:www-pylib/
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/py.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/py.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/py"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/py"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
make -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."

View file

@ -0,0 +1,18 @@
{% extends "!layout.html" %}
{% block footer %}
{{ super() }}
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-7597274-14']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
{% endblock %}

View file

@ -0,0 +1,7 @@
py lib 1.0.0: XXX
======================================================================
Welcome to the 1.0.0 py lib release - a library aiming to
support agile and test-driven python development on various levels.
XXX

View file

@ -0,0 +1,27 @@
py lib 0.9.2: bugfix release
=============================
Welcome to the 0.9.2 py lib and py.test release -
mainly fixing Windows issues, providing better
packaging and integration with setuptools.
Here is a quick summary of what the py lib provides:
* py.test: cross-project testing tool with many advanced features
* py.execnet: ad-hoc code distribution to SSH, Socket and local sub processes
* py.magic.greenlet: micro-threads on standard CPython ("stackless-light")
* py.path: path abstractions over local and subversion files
* rich documentation of py's exported API
* tested against Linux, Win32, OSX, works on python 2.3-2.6
See here for more information:
Pypi pages: http://pypi.python.org/pypi/py/
Download/Install: http://codespeak.net/py/0.9.2/download.html
Documentation/API: http://codespeak.net/py/0.9.2/index.html
best and have fun,
holger krekel

View file

@ -0,0 +1,63 @@
pylib 1.0.0 released: testing-with-python innovations continue
--------------------------------------------------------------------
Took a few betas but finally i uploaded a `1.0.0 py lib release`_,
featuring the mature and powerful py.test tool and "execnet-style"
*elastic* distributed programming. With the new release, there are
many new advanced automated testing features - here is a quick summary:
* funcargs_ - pythonic zero-boilerplate fixtures for Python test functions :
- totally separates test code, test configuration and test setup
- ideal for integration and functional tests
- allows for flexible and natural test parametrization schemes
* new `plugin architecture`_, allowing easy-to-write project-specific and cross-project single-file plugins. The most notable new external plugin is `oejskit`_ which naturally enables **running and reporting of javascript-unittests in real-life browsers**.
* many new features done in easy-to-improve `default plugins`_, highlights:
* xfail: mark tests as "expected to fail" and report separately.
* pastebin: automatically send tracebacks to pocoo paste service
* capture: flexibly capture stdout/stderr of subprocesses, per-test ...
* monkeypatch: safely monkeypatch modules/classes from within tests
* unittest: run and integrate traditional unittest.py tests
* figleaf: generate html coverage reports with the figleaf module
* resultlog: generate buildbot-friendly reporting output
* ...
* `distributed testing`_ and `elastic distributed execution`_:
- new unified "TX" URL scheme for specifying remote processes
- new distribution modes "--dist=each" and "--dist=load"
- new sync/async ways to handle 1:N communication
- improved documentation
The py lib continues to offer most of the functionality used by
the testing tool in `independent namespaces`_.
Some non-test related code, notably greenlets/co-routines and
api-generation now live as their own projects which simplifies the
installation procedure because no C-Extensions are required anymore.
The whole package should work well with Linux, Win32 and OSX, on Python
2.3, 2.4, 2.5 and 2.6. (Expect Python3 compatibility soon!)
For more info, see the py.test and py lib documentation:
http://pytest.org
http://pylib.org
have fun,
holger
.. _`independent namespaces`: http://pylib.org
.. _`funcargs`: http://codespeak.net/py/dist/test/funcargs.html
.. _`plugin architecture`: http://codespeak.net/py/dist/test/extend.html
.. _`default plugins`: http://codespeak.net/py/dist/test/plugin/index.html
.. _`distributed testing`: http://codespeak.net/py/dist/test/dist.html
.. _`elastic distributed execution`: http://codespeak.net/py/dist/execnet.html
.. _`1.0.0 py lib release`: http://pypi.python.org/pypi/py
.. _`oejskit`: http://codespeak.net/py/dist/test/plugin/oejskit.html

View file

@ -0,0 +1,48 @@
1.0.1: improved reporting, nose/unittest.py support, bug fixes
-----------------------------------------------------------------------
This is a bugfix release of pylib/py.test also coming with:
* improved documentation, improved navigation
* test failure reporting improvements
* support for directly running existing nose/unittest.py style tests
visit here for more info, including quickstart and tutorials:
http://pytest.org and http://pylib.org
Changelog 1.0.0 to 1.0.1
------------------------
* added a default 'pytest_nose' plugin which handles nose.SkipTest,
nose-style function/method/generator setup/teardown and
tries to report functions correctly.
* improved documentation, better navigation: see http://pytest.org
* added a "--help-config" option to show conftest.py / ENV-var names for
all longopt cmdline options, and some special conftest.py variables.
renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
* unicode fixes: capturing and unicode writes to sys.stdout
(through e.g a print statement) now work within tests,
they are encoded as "utf8" by default, also terminalwriting
was adapted and somewhat unified between windows and linux
* fix issue #27: better reporting on non-collectable items given on commandline
(e.g. pyc files)
* fix issue #33: added --version flag (thanks Benjamin Peterson)
* fix issue #32: adding support for "incomplete" paths to wcpath.status()
* "Test" prefixed classes are *not* collected by default anymore if they
have an __init__ method
* monkeypatch setenv() now accepts a "prepend" parameter
* improved reporting of collection error tracebacks
* simplified multicall mechanism and plugin architecture,
renamed some internal methods and argnames

View file

@ -0,0 +1,5 @@
1.0.2: packaging fixes
-----------------------------------------------------------------------
this release is purely a release for fixing packaging issues.

View file

@ -0,0 +1,115 @@
py.test/pylib 1.1.0: Python3, Jython, advanced skipping, cleanups ...
--------------------------------------------------------------------------------
Features:
* compatible to Python3 (single py2/py3 source), `easy to install`_
* conditional skipping_: skip/xfail based on platform/dependencies
* generalized marking_: mark tests one a whole-class or whole-module basis
Fixes:
* code reduction and "de-magification" (e.g. 23 KLoc -> 11 KLOC)
* distribute testing requires the now separately released execnet_ package
* funcarg-setup/caching, "same-name" test modules now cause an exlicit error
* de-cluttered reporting options, --report for skipped/xfail details
Compatibilities
1.1.0 should allow running test code that already worked well with 1.0.2
plus some more due to improved unittest/nose compatibility.
More information: http://pytest.org
thanks and have fun,
holger (http://twitter.com/hpk42)
.. _execnet: http://codespeak.net/execnet
.. _`easy to install`: ../install.html
.. _marking: ../test/plugin/mark.html
.. _skipping: ../test/plugin/skipping.html
Changelog 1.0.2 -> 1.1.0
-----------------------------------------------------------------------
* remove py.rest tool and internal namespace - it was
never really advertised and can still be used with
the old release if needed. If there is interest
it could be revived into its own tool i guess.
* fix issue48 and issue59: raise an Error if the module
from an imported test file does not seem to come from
the filepath - avoids "same-name" confusion that has
been reported repeatedly
* merged Ronny's nose-compatibility hacks: now
nose-style setup_module() and setup() functions are
supported
* introduce generalized py.test.mark function marking
* reshuffle / refine command line grouping
* deprecate parser.addgroup in favour of getgroup which creates option group
* add --report command line option that allows to control showing of skipped/xfailed sections
* generalized skipping: a new way to mark python functions with skipif or xfail
at function, class and modules level based on platform or sys-module attributes.
* extend py.test.mark decorator to allow for positional args
* introduce and test "py.cleanup -d" to remove empty directories
* fix issue #59 - robustify unittest test collection
* make bpython/help interaction work by adding an __all__ attribute
to ApiModule, cleanup initpkg
* use MIT license for pylib, add some contributors
* remove py.execnet code and substitute all usages with 'execnet' proper
* fix issue50 - cached_setup now caches more to expectations
for test functions with multiple arguments.
* merge Jarko's fixes, issue #45 and #46
* add the ability to specify a path for py.lookup to search in
* fix a funcarg cached_setup bug probably only occuring
in distributed testing and "module" scope with teardown.
* many fixes and changes for making the code base python3 compatible,
many thanks to Benjamin Peterson for helping with this.
* consolidate builtins implementation to be compatible with >=2.3,
add helpers to ease keeping 2 and 3k compatible code
* deprecate py.compat.doctest|subprocess|textwrap|optparse
* deprecate py.magic.autopath, remove py/magic directory
* move pytest assertion handling to py/code and a pytest_assertion
plugin, add "--no-assert" option, deprecate py.magic namespaces
in favour of (less) py.code ones.
* consolidate and cleanup py/code classes and files
* cleanup py/misc, move tests to bin-for-dist
* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
* consolidate py.log implementation, remove old approach.
* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
text/unicode and byte-streams (uses underlying standard lib io.*
if available)
* make py.unittest_convert helper script available which converts "unittest.py"
style files into the simpler assert/direct-test-classes py.test/nosetests
style. The script was written by Laura Creighton.
* simplified internal localpath implementation

View file

@ -0,0 +1,48 @@
py.test/pylib 1.1.1: bugfix release, setuptools plugin registration
--------------------------------------------------------------------------------
This is a compatibility fixing release of pylib/py.test to work
better with previous 1.0.x test code bases. It also contains fixes
and changes to work with `execnet>=1.0.0`_ to provide distributed
testing and looponfailing testing modes. py-1.1.1 also introduces
a new mechanism for registering plugins via setuptools.
What is pylib/py.test?
-----------------------
py.test is an advanced automated testing tool working with
Python2, Python3 and Jython versions on all major operating
systems. It has an extensive plugin architecture and can run many
existing common Python test suites without modification. Moreover,
it offers some unique features not found in other
testing tools. See http://pytest.org for more info.
The pylib also contains a localpath and svnpath implementation
and some developer-oriented command line tools. See
http://pylib.org for more info.
thanks to all who helped and gave feedback,
have fun,
holger (http://twitter.com/hpk42)
.. _`execnet>=1.0.0`: http://codespeak.net/execnet
Changes between 1.1.1 and 1.1.0
=====================================
- introduce automatic plugin registration via 'pytest11'
entrypoints via setuptools' pkg_resources.iter_entry_points
- fix py.test dist-testing to work with execnet >= 1.0.0b4
- re-introduce py.test.cmdline.main() for better backward compatibility
- svn paths: fix a bug with path.check(versioned=True) for svn paths,
allow '%' in svn paths, make svnwc.update() default to interactive mode
like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
- refine distributed tarball to contain test and no pyc files
- try harder to have deprecation warnings for py.compat.* accesses
report a correct location

View file

@ -0,0 +1,116 @@
py.test/pylib 1.2.0: junitxml, standalone test scripts, pluginization
--------------------------------------------------------------------------------
py.test is an advanced automated testing tool working with
Python2, Python3 and Jython versions on all major operating
systems. It has a simple plugin architecture and can run many
existing common Python test suites without modification. It offers
some unique features not found in other testing tools.
See http://pytest.org for more info.
py.test 1.2.0 brings many bug fixes and interesting new abilities:
* --junitxml=path will create an XML file for use with CI processing
* --genscript=path creates a standalone py.test-equivalent test-script
* --ignore=path prevents collection of anything below that path
* --confcutdir=path only lookup conftest.py test configs below that path
* a 'pytest_report_header' hook to add info to the terminal report header
* a 'pytestconfig' function argument gives direct access to option values
* 'pytest_generate_tests' can now be put into a class as well
* on CPython py.test additionally installs as "py.test-VERSION", on
Jython as py.test-jython and on PyPy as py.test-pypy-XYZ
Apart from many bug fixes 1.2.0 also has better pluginization:
Distributed testing and looponfailing testing now live in the
separately installable 'pytest-xdist' plugin. The same is true for
'pytest-figleaf' for doing coverage reporting. Those two plugins
can serve well now as blue prints for doing your own.
thanks to all who helped and gave feedback,
have fun,
holger krekel, January 2010
Changes between 1.2.0 and 1.1.1
=====================================
- moved dist/looponfailing from py.test core into a new
separately released pytest-xdist plugin.
- new junitxml plugin: --junitxml=path will generate a junit style xml file
which is processable e.g. by the Hudson CI system.
- new option: --genscript=path will generate a standalone py.test script
which will not need any libraries installed. thanks to Ralf Schmitt.
- new option: --ignore will prevent specified path from collection.
Can be specified multiple times.
- new option: --confcutdir=dir will make py.test only consider conftest
files that are relative to the specified dir.
- new funcarg: "pytestconfig" is the pytest config object for access
to command line args and can now be easily used in a test.
- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
disambiguate between Python3, python2.X, Jython and PyPy installed versions.
- new "pytestconfig" funcarg allows access to test config object
- new "pytest_report_header" hook can return additional lines
to be displayed at the header of a test run.
- (experimental) allow "py.test path::name1::name2::..." for pointing
to a test within a test collection directly. This might eventually
evolve as a full substitute to "-k" specifications.
- streamlined plugin loading: order is now as documented in
customize.html: setuptools, ENV, commandline, conftest.
also setuptools entry point names are turned to canonical namees ("pytest_*")
- automatically skip tests that need 'capfd' but have no os.dup
- allow pytest_generate_tests to be defined in classes as well
- deprecate usage of 'disabled' attribute in favour of pytestmark
- deprecate definition of Directory, Module, Class and Function nodes
in conftest.py files. Use pytest collect hooks instead.
- collection/item node specific runtest/collect hooks are only called exactly
on matching conftest.py files, i.e. ones which are exactly below
the filesystem path of an item
- change: the first pytest_collect_directory hook to return something
will now prevent further hooks to be called.
- change: figleaf plugin now requires --figleaf to run. Also
change its long command line options to be a bit shorter (see py.test -h).
- change: pytest doctest plugin is now enabled by default and has a
new option --doctest-glob to set a pattern for file matches.
- change: remove internal py._* helper vars, only keep py._pydir
- robustify capturing to survive if custom pytest_runtest_setup
code failed and prevented the capturing setup code from running.
- make py.test.* helpers provided by default plugins visible early -
works transparently both for pydoc and for interactive sessions
which will regularly see e.g. py.test.mark and py.test.importorskip.
- simplify internal plugin manager machinery
- simplify internal collection tree by introducing a RootCollector node
- fix assert reinterpreation that sees a call containing "keyword=..."
- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
hooks on slaves during dist-testing, report module/session teardown
hooks correctly.
- fix issue65: properly handle dist-testing if no
execnet/py lib installed remotely.
- skip some install-tests if no execnet is available
- fix docs, fix internal bin/ script generation

View file

@ -0,0 +1,66 @@
py.test/pylib 1.2.1: little fixes and improvements
--------------------------------------------------------------------------------
py.test is an advanced automated testing tool working with
Python2, Python3 and Jython versions on all major operating
systems. It has a simple plugin architecture and can run many
existing common Python test suites without modification. It offers
some unique features not found in other testing tools.
See http://pytest.org for more info.
py.test 1.2.1 brings bug fixes and some new options and abilities triggered
by user feedback:
* --funcargs [testpath] will show available builtin- and project funcargs.
* display a short and concise traceback if funcarg lookup fails.
* early-load "conftest.py" files in non-dot first-level sub directories.
* --tb=line will print a single line for each failing test (issue67)
* py.cleanup has a number of new options, cleanups up setup.py related files
* fix issue78: always call python-level teardown functions even if the
according setup failed.
For more detailed information see the changelog below.
cheers and have fun,
holger
Changes between 1.2.1 and 1.2.0
=====================================
- refined usage and options for "py.cleanup"::
py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
py.cleanup -e .swp -e .cache # also remove files with these extensions
py.cleanup -s # remove "build" and "dist" directory next to setup.py files
py.cleanup -d # also remove empty directories
py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
py.cleanup -n # dry run, only show what would be removed
- add a new option "py.test --funcargs" which shows available funcargs
and their help strings (docstrings on their respective factory function)
for a given test path
- display a short and concise traceback if a funcarg lookup fails
- early-load "conftest.py" files in non-dot first-level sub directories.
allows to conveniently keep and access test-related options in a ``test``
subdir and still add command line options.
- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
- fix issue78: always call python-level teardown functions even if the
according setup failed. This includes refinements for calling setup_module/class functions
which will now only be called once instead of the previous behaviour where they'd be called
multiple times if they raise an exception (including a Skipped exception). Any exception
will be re-corded and associated with all tests in the according module/class scope.
- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
- fix pdb debugging to be in the correct frame on raises-related errors
- update apipkg.py to fix an issue where recursive imports might
unnecessarily break importing
- fix plugin links

View file

@ -0,0 +1,580 @@
py.test/pylib 1.3.0: new options, per-plugin hooks, fixes ...
===========================================================================
The 1.3.0 release introduces new options, bug fixes and improved compatibility
with Python3 and Jython-2.5.1 on Windows. If you already use py-1.2 chances
are you can use py-1.3.0. See the below CHANGELOG for more details and
http://pylib.org/install.html for installation instructions.
py.test is an advanced automated testing tool working with Python2,
Python3, Jython and PyPy versions on all major operating systems. It
offers a no-boilerplate testing approach and has inspired other testing
tools and enhancements in the standard Python library for more than five
years. It has a simple and extensive plugin architecture, configurable
reporting and provides unique ways to make it fit to your testing
process and needs.
See http://pytest.org for more info.
cheers and have fun,
holger krekel
Changes between 1.2.1 and 1.3.0
==================================================
- deprecate --report option in favour of a new shorter and easier to
remember -r option: it takes a string argument consisting of any
combination of 'xfsX' characters. They relate to the single chars
you see during the dotted progress printing and will print an extra line
per test at the end of the test run. This extra line indicates the exact
position or test ID that you directly paste to the py.test cmdline in order
to re-run a particular test.
- allow external plugins to register new hooks via the new
pytest_addhooks(pluginmanager) hook. The new release of
the pytest-xdist plugin for distributed and looponfailing
testing requires this feature.
- add a new pytest_ignore_collect(path, config) hook to allow projects and
plugins to define exclusion behaviour for their directory structure -
for example you may define in a conftest.py this method::
def pytest_ignore_collect(path):
return path.check(link=1)
to prevent even collection of any tests in symlinked dirs.
- new pytest_pycollect_makemodule(path, parent) hook for
allowing customization of the Module collection object for a
matching test module.
- extend and refine xfail mechanism::
@py.test.mark.xfail(run=False) do not run the decorated test
@py.test.mark.xfail(reason="...") prints the reason string in xfail summaries
specifiying ``--runxfail`` on command line ignores xfail markers to show
you the underlying traceback.
- expose (previously internal) commonly useful methods:
py.io.get_terminal_with() -> return terminal width
py.io.ansi_print(...) -> print colored/bold text on linux/win32
py.io.saferepr(obj) -> return limited representation string
- expose test outcome related exceptions as py.test.skip.Exception,
py.test.raises.Exception etc., useful mostly for plugins
doing special outcome interpretation/tweaking
- (issue85) fix junitxml plugin to handle tests with non-ascii output
- fix/refine python3 compatibility (thanks Benjamin Peterson)
- fixes for making the jython/win32 combination work, note however:
jython2.5.1/win32 does not provide a command line launcher, see
http://bugs.jython.org/issue1491 . See pylib install documentation
for how to work around.
- fixes for handling of unicode exception values and unprintable objects
- (issue87) fix unboundlocal error in assertionold code
- (issue86) improve documentation for looponfailing
- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
- ship distribute_setup.py version 0.6.10
- added links to the new capturelog and coverage plugins
Changes between 1.2.1 and 1.2.0
=====================================
- refined usage and options for "py.cleanup"::
py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
py.cleanup -e .swp -e .cache # also remove files with these extensions
py.cleanup -s # remove "build" and "dist" directory next to setup.py files
py.cleanup -d # also remove empty directories
py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
py.cleanup -n # dry run, only show what would be removed
- add a new option "py.test --funcargs" which shows available funcargs
and their help strings (docstrings on their respective factory function)
for a given test path
- display a short and concise traceback if a funcarg lookup fails
- early-load "conftest.py" files in non-dot first-level sub directories.
allows to conveniently keep and access test-related options in a ``test``
subdir and still add command line options.
- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
- fix issue78: always call python-level teardown functions even if the
according setup failed. This includes refinements for calling setup_module/class functions
which will now only be called once instead of the previous behaviour where they'd be called
multiple times if they raise an exception (including a Skipped exception). Any exception
will be re-corded and associated with all tests in the according module/class scope.
- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
- fix pdb debugging to be in the correct frame on raises-related errors
- update apipkg.py to fix an issue where recursive imports might
unnecessarily break importing
- fix plugin links
Changes between 1.2 and 1.1.1
=====================================
- moved dist/looponfailing from py.test core into a new
separately released pytest-xdist plugin.
- new junitxml plugin: --junitxml=path will generate a junit style xml file
which is processable e.g. by the Hudson CI system.
- new option: --genscript=path will generate a standalone py.test script
which will not need any libraries installed. thanks to Ralf Schmitt.
- new option: --ignore will prevent specified path from collection.
Can be specified multiple times.
- new option: --confcutdir=dir will make py.test only consider conftest
files that are relative to the specified dir.
- new funcarg: "pytestconfig" is the pytest config object for access
to command line args and can now be easily used in a test.
- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
disambiguate between Python3, python2.X, Jython and PyPy installed versions.
- new "pytestconfig" funcarg allows access to test config object
- new "pytest_report_header" hook can return additional lines
to be displayed at the header of a test run.
- (experimental) allow "py.test path::name1::name2::..." for pointing
to a test within a test collection directly. This might eventually
evolve as a full substitute to "-k" specifications.
- streamlined plugin loading: order is now as documented in
customize.html: setuptools, ENV, commandline, conftest.
also setuptools entry point names are turned to canonical namees ("pytest_*")
- automatically skip tests that need 'capfd' but have no os.dup
- allow pytest_generate_tests to be defined in classes as well
- deprecate usage of 'disabled' attribute in favour of pytestmark
- deprecate definition of Directory, Module, Class and Function nodes
in conftest.py files. Use pytest collect hooks instead.
- collection/item node specific runtest/collect hooks are only called exactly
on matching conftest.py files, i.e. ones which are exactly below
the filesystem path of an item
- change: the first pytest_collect_directory hook to return something
will now prevent further hooks to be called.
- change: figleaf plugin now requires --figleaf to run. Also
change its long command line options to be a bit shorter (see py.test -h).
- change: pytest doctest plugin is now enabled by default and has a
new option --doctest-glob to set a pattern for file matches.
- change: remove internal py._* helper vars, only keep py._pydir
- robustify capturing to survive if custom pytest_runtest_setup
code failed and prevented the capturing setup code from running.
- make py.test.* helpers provided by default plugins visible early -
works transparently both for pydoc and for interactive sessions
which will regularly see e.g. py.test.mark and py.test.importorskip.
- simplify internal plugin manager machinery
- simplify internal collection tree by introducing a RootCollector node
- fix assert reinterpreation that sees a call containing "keyword=..."
- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
hooks on slaves during dist-testing, report module/session teardown
hooks correctly.
- fix issue65: properly handle dist-testing if no
execnet/py lib installed remotely.
- skip some install-tests if no execnet is available
- fix docs, fix internal bin/ script generation
Changes between 1.1.1 and 1.1.0
=====================================
- introduce automatic plugin registration via 'pytest11'
entrypoints via setuptools' pkg_resources.iter_entry_points
- fix py.test dist-testing to work with execnet >= 1.0.0b4
- re-introduce py.test.cmdline.main() for better backward compatibility
- svn paths: fix a bug with path.check(versioned=True) for svn paths,
allow '%' in svn paths, make svnwc.update() default to interactive mode
like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
- refine distributed tarball to contain test and no pyc files
- try harder to have deprecation warnings for py.compat.* accesses
report a correct location
Changes between 1.1.0 and 1.0.2
=====================================
* adjust and improve docs
* remove py.rest tool and internal namespace - it was
never really advertised and can still be used with
the old release if needed. If there is interest
it could be revived into its own tool i guess.
* fix issue48 and issue59: raise an Error if the module
from an imported test file does not seem to come from
the filepath - avoids "same-name" confusion that has
been reported repeatedly
* merged Ronny's nose-compatibility hacks: now
nose-style setup_module() and setup() functions are
supported
* introduce generalized py.test.mark function marking
* reshuffle / refine command line grouping
* deprecate parser.addgroup in favour of getgroup which creates option group
* add --report command line option that allows to control showing of skipped/xfailed sections
* generalized skipping: a new way to mark python functions with skipif or xfail
at function, class and modules level based on platform or sys-module attributes.
* extend py.test.mark decorator to allow for positional args
* introduce and test "py.cleanup -d" to remove empty directories
* fix issue #59 - robustify unittest test collection
* make bpython/help interaction work by adding an __all__ attribute
to ApiModule, cleanup initpkg
* use MIT license for pylib, add some contributors
* remove py.execnet code and substitute all usages with 'execnet' proper
* fix issue50 - cached_setup now caches more to expectations
for test functions with multiple arguments.
* merge Jarko's fixes, issue #45 and #46
* add the ability to specify a path for py.lookup to search in
* fix a funcarg cached_setup bug probably only occuring
in distributed testing and "module" scope with teardown.
* many fixes and changes for making the code base python3 compatible,
many thanks to Benjamin Peterson for helping with this.
* consolidate builtins implementation to be compatible with >=2.3,
add helpers to ease keeping 2 and 3k compatible code
* deprecate py.compat.doctest|subprocess|textwrap|optparse
* deprecate py.magic.autopath, remove py/magic directory
* move pytest assertion handling to py/code and a pytest_assertion
plugin, add "--no-assert" option, deprecate py.magic namespaces
in favour of (less) py.code ones.
* consolidate and cleanup py/code classes and files
* cleanup py/misc, move tests to bin-for-dist
* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
* consolidate py.log implementation, remove old approach.
* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
text/unicode and byte-streams (uses underlying standard lib io.*
if available)
* make py.unittest_convert helper script available which converts "unittest.py"
style files into the simpler assert/direct-test-classes py.test/nosetests
style. The script was written by Laura Creighton.
* simplified internal localpath implementation
Changes between 1.0.1 and 1.0.2
=====================================
* fixing packaging issues, triggered by fedora redhat packaging,
also added doc, examples and contrib dirs to the tarball.
* added a documentation link to the new django plugin.
Changes between 1.0.0 and 1.0.1
=====================================
* added a 'pytest_nose' plugin which handles nose.SkipTest,
nose-style function/method/generator setup/teardown and
tries to report functions correctly.
* capturing of unicode writes or encoded strings to sys.stdout/err
work better, also terminalwriting was adapted and somewhat
unified between windows and linux.
* improved documentation layout and content a lot
* added a "--help-config" option to show conftest.py / ENV-var names for
all longopt cmdline options, and some special conftest.py variables.
renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
* fix issue #27: better reporting on non-collectable items given on commandline
(e.g. pyc files)
* fix issue #33: added --version flag (thanks Benjamin Peterson)
* fix issue #32: adding support for "incomplete" paths to wcpath.status()
* "Test" prefixed classes are *not* collected by default anymore if they
have an __init__ method
* monkeypatch setenv() now accepts a "prepend" parameter
* improved reporting of collection error tracebacks
* simplified multicall mechanism and plugin architecture,
renamed some internal methods and argnames
Changes between 1.0.0b9 and 1.0.0
=====================================
* more terse reporting try to show filesystem path relatively to current dir
* improve xfail output a bit
Changes between 1.0.0b8 and 1.0.0b9
=====================================
* cleanly handle and report final teardown of test setup
* fix svn-1.6 compat issue with py.path.svnwc().versioned()
(thanks Wouter Vanden Hove)
* setup/teardown or collection problems now show as ERRORs
or with big "E"'s in the progress lines. they are reported
and counted separately.
* dist-testing: properly handle test items that get locally
collected but cannot be collected on the remote side - often
due to platform/dependency reasons
* simplified py.test.mark API - see keyword plugin documentation
* integrate better with logging: capturing now by default captures
test functions and their immediate setup/teardown in a single stream
* capsys and capfd funcargs now have a readouterr() and a close() method
(underlyingly py.io.StdCapture/FD objects are used which grew a
readouterr() method as well to return snapshots of captured out/err)
* make assert-reinterpretation work better with comparisons not
returning bools (reported with numpy from thanks maciej fijalkowski)
* reworked per-test output capturing into the pytest_iocapture.py plugin
and thus removed capturing code from config object
* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
Changes between 1.0.0b7 and 1.0.0b8
=====================================
* pytest_unittest-plugin is now enabled by default
* introduced pytest_keyboardinterrupt hook and
refined pytest_sessionfinish hooked, added tests.
* workaround a buggy logging module interaction ("closing already closed
files"). Thanks to Sridhar Ratnakumar for triggering.
* if plugins use "py.test.importorskip" for importing
a dependency only a warning will be issued instead
of exiting the testing process.
* many improvements to docs:
- refined funcargs doc , use the term "factory" instead of "provider"
- added a new talk/tutorial doc page
- better download page
- better plugin docstrings
- added new plugins page and automatic doc generation script
* fixed teardown problem related to partially failing funcarg setups
(thanks MrTopf for reporting), "pytest_runtest_teardown" is now
always invoked even if the "pytest_runtest_setup" failed.
* tweaked doctest output for docstrings in py modules,
thanks Radomir.
Changes between 1.0.0b3 and 1.0.0b7
=============================================
* renamed py.test.xfail back to py.test.mark.xfail to avoid
two ways to decorate for xfail
* re-added py.test.mark decorator for setting keywords on functions
(it was actually documented so removing it was not nice)
* remove scope-argument from request.addfinalizer() because
request.cached_setup has the scope arg. TOOWTDI.
* perform setup finalization before reporting failures
* apply modified patches from Andreas Kloeckner to allow
test functions to have no func_code (#22) and to make
"-k" and function keywords work (#20)
* apply patch from Daniel Peolzleithner (issue #23)
* resolve issue #18, multiprocessing.Manager() and
redirection clash
* make __name__ == "__channelexec__" for remote_exec code
Changes between 1.0.0b1 and 1.0.0b3
=============================================
* plugin classes are removed: one now defines
hooks directly in conftest.py or global pytest_*.py
files.
* added new pytest_namespace(config) hook that allows
to inject helpers directly to the py.test.* namespace.
* documented and refined many hooks
* added new style of generative tests via
pytest_generate_tests hook that integrates
well with function arguments.
Changes between 0.9.2 and 1.0.0b1
=============================================
* introduced new "funcarg" setup method,
see doc/test/funcarg.txt
* introduced plugin architecuture and many
new py.test plugins, see
doc/test/plugins.txt
* teardown_method is now guaranteed to get
called after a test method has run.
* new method: py.test.importorskip(mod,minversion)
will either import or call py.test.skip()
* completely revised internal py.test architecture
* new py.process.ForkedFunc object allowing to
fork execution of a function to a sub process
and getting a result back.
XXX lots of things missing here XXX
Changes between 0.9.1 and 0.9.2
===============================
* refined installation and metadata, created new setup.py,
now based on setuptools/ez_setup (thanks to Ralf Schmitt
for his support).
* improved the way of making py.* scripts available in
windows environments, they are now added to the
Scripts directory as ".cmd" files.
* py.path.svnwc.status() now is more complete and
uses xml output from the 'svn' command if available
(Guido Wesdorp)
* fix for py.path.svn* to work with svn 1.5
(Chris Lamb)
* fix path.relto(otherpath) method on windows to
use normcase for checking if a path is relative.
* py.test's traceback is better parseable from editors
(follows the filenames:LINENO: MSG convention)
(thanks to Osmo Salomaa)
* fix to javascript-generation, "py.test --runbrowser"
should work more reliably now
* removed previously accidentally added
py.test.broken and py.test.notimplemented helpers.
* there now is a py.__version__ attribute
Changes between 0.9.0 and 0.9.1
===============================
This is a fairly complete list of changes between 0.9 and 0.9.1, which can
serve as a reference for developers.
* allowing + signs in py.path.svn urls [39106]
* fixed support for Failed exceptions without excinfo in py.test [39340]
* added support for killing processes for Windows (as well as platforms that
support os.kill) in py.misc.killproc [39655]
* added setup/teardown for generative tests to py.test [40702]
* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
* fixed problem with calling .remove() on wcpaths of non-versioned files in
py.path [44248]
* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
* fail to run greenlet tests when pypy is available, but without stackless
[45294]
* small fixes in rsession tests [45295]
* fixed issue with 2.5 type representations in py.test [45483, 45484]
* made that internal reporting issues displaying is done atomically in py.test
[45518]
* made that non-existing files are igored by the py.lookup script [45519]
* improved exception name creation in py.test [45535]
* made that less threads are used in execnet [merge in 45539]
* removed lock required for atomical reporting issue displaying in py.test
[45545]
* removed globals from execnet [45541, 45547]
* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
get called in 2.5 (py.execnet) [45548]
* fixed bug in joining threads in py.execnet's servemain [45549]
* refactored py.test.rsession tests to not rely on exact output format anymore
[45646]
* using repr() on test outcome [45647]
* added 'Reason' classes for py.test.skip() [45648, 45649]
* killed some unnecessary sanity check in py.test.collect [45655]
* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
usable by Administrators [45901]
* added support for locking and non-recursive commits to py.path.svnwc [45994]
* locking files in py.execnet to prevent CPython from segfaulting [46010]
* added export() method to py.path.svnurl
* fixed -d -x in py.test [47277]
* fixed argument concatenation problem in py.path.svnwc [49423]
* restore py.test behaviour that it exits with code 1 when there are failures
[49974]
* don't fail on html files that don't have an accompanying .txt file [50606]
* fixed 'utestconvert.py < input' [50645]
* small fix for code indentation in py.code.source [50755]
* fix _docgen.py documentation building [51285]
* improved checks for source representation of code blocks in py.test [51292]
* added support for passing authentication to py.path.svn* objects [52000,
52001]
* removed sorted() call for py.apigen tests in favour of [].sort() to support
Python 2.3 [52481]

View file

@ -0,0 +1,104 @@
py.test/pylib 1.3.1: new py.test.xfail, --maxfail, better reporting
===========================================================================
The pylib/py.test 1.3.1 release brings:
- the new imperative ``py.test.xfail()`` helper in order to have a test or
setup function result in an "expected failure"
- a new option ``--maxfail=NUM`` to stop the test run after some failures
- markers/decorators are now applicable to test classes (>=Python2.6)
- improved reporting, shorter tracebacks in several cases
- some simplified internals, more compatibility with Jython and PyPy
- bug fixes and various refinements
See the below CHANGELOG entry below for more details and
http://pylib.org/install.html for installation instructions.
If you used older versions of py.test you should be able to upgrade
to 1.3.1 without changes to your test source code.
py.test is an automated testing tool working with Python2,
Python3, Jython and PyPy versions on all major operating systems. It
offers a no-boilerplate testing approach and has inspired other testing
tools and enhancements in the standard Python library for more than five
years. It has a simple and extensive plugin architecture, configurable
reporting and provides unique ways to make it fit to your testing
process and needs.
See http://pytest.org for more info.
cheers and have fun,
holger krekel
Changes between 1.3.0 and 1.3.1
==================================================
New features
++++++++++++++++++
- issue91: introduce new py.test.xfail(reason) helper
to imperatively mark a test as expected to fail. Can
be used from within setup and test functions. This is
useful especially for parametrized tests when certain
configurations are expected-to-fail. In this case the
declarative approach with the @py.test.mark.xfail cannot
be used as it would mark all configurations as xfail.
- issue102: introduce new --maxfail=NUM option to stop
test runs after NUM failures. This is a generalization
of the '-x' or '--exitfirst' option which is now equivalent
to '--maxfail=1'. Both '-x' and '--maxfail' will
now also print a line near the end indicating the Interruption.
- issue89: allow py.test.mark decorators to be used on classes
(class decorators were introduced with python2.6) and
also allow to have multiple markers applied at class/module level
by specifying a list.
- improve and refine letter reporting in the progress bar:
. pass
f failed test
s skipped tests (reminder: use for dependency/platform mismatch only)
x xfailed test (test that was expected to fail)
X xpassed test (test that was expected to fail but passed)
You can use any combination of 'fsxX' with the '-r' extended
reporting option. The xfail/xpass results will show up as
skipped tests in the junitxml output - which also fixes
issue99.
- make py.test.cmdline.main() return the exitstatus instead of raising
SystemExit and also allow it to be called multiple times. This of
course requires that your application and tests are properly teared
down and don't have global state.
Fixes / Maintenance
++++++++++++++++++++++
- improved traceback presentation:
- improved and unified reporting for "--tb=short" option
- Errors during test module imports are much shorter, (using --tb=short style)
- raises shows shorter more relevant tracebacks
- --fulltrace now more systematically makes traces longer / inhibits cutting
- improve support for raises and other dynamically compiled code by
manipulating python's linecache.cache instead of the previous
rather hacky way of creating custom code objects. This makes
it seemlessly work on Jython and PyPy where it previously didn't.
- fix issue96: make capturing more resilient against Control-C
interruptions (involved somewhat substantial refactoring
to the underlying capturing functionality to avoid race
conditions).
- fix chaining of conditional skipif/xfail decorators - so it works now
as expected to use multiple @py.test.mark.skipif(condition) decorators,
including specific reporting which of the conditions lead to skipping.
- fix issue95: late-import zlib so that it's not required
for general py.test startup.
- fix issue94: make reporting more robust against bogus source code
(and internally be more careful when presenting unexpected byte sequences)

View file

@ -0,0 +1,720 @@
py.test/pylib 1.3.2: API and reporting refinements, many fixes
===========================================================================
The pylib/py.test 1.3.2 release brings many bug fixes and some new
features. It was refined for and tested against the recently released
Python2.7 and remains compatibile to the usual armada of interpreters
(Python2.4 through to Python3.1.2, Jython and PyPy). Note that for using
distributed testing features you'll need to upgrade to the jointly released
pytest-xdist-1.4 because of some internal refactorings.
See http://pytest.org for general documentation and below for
a detailed CHANGELOG.
cheers & particular thanks to Benjamin Peterson, Ronny Pfannschmidt
and all issue and patch contributors,
holger krekel
Changes between 1.3.1 and 1.3.2
==================================================
New features
++++++++++++++++++
- fix issue103: introduce py.test.raises as context manager, examples::
with py.test.raises(ZeroDivisionError):
x = 0
1 / x
with py.test.raises(RuntimeError) as excinfo:
call_something()
# you may do extra checks on excinfo.value|type|traceback here
(thanks Ronny Pfannschmidt)
- Funcarg factories can now dynamically apply a marker to a
test invocation. This is for example useful if a factory
provides parameters to a test which are expected-to-fail::
def pytest_funcarg__arg(request):
request.applymarker(py.test.mark.xfail(reason="flaky config"))
...
def test_function(arg):
...
- improved error reporting on collection and import errors. This makes
use of a more general mechanism, namely that for custom test item/collect
nodes ``node.repr_failure(excinfo)`` is now uniformly called so that you can
override it to return a string error representation of your choice
which is going to be reported as a (red) string.
- introduce '--junitprefix=STR' option to prepend a prefix
to all reports in the junitxml file.
Bug fixes / Maintenance
++++++++++++++++++++++++++
- make tests and the ``pytest_recwarn`` plugin in particular fully compatible
to Python2.7 (if you use the ``recwarn`` funcarg warnings will be enabled so that
you can properly check for their existence in a cross-python manner).
- refine --pdb: ignore xfailed tests, unify its TB-reporting and
don't display failures again at the end.
- fix assertion interpretation with the ** operator (thanks Benjamin Peterson)
- fix issue105 assignment on the same line as a failing assertion (thanks Benjamin Peterson)
- fix issue104 proper escaping for test names in junitxml plugin (thanks anonymous)
- fix issue57 -f|--looponfail to work with xpassing tests (thanks Ronny)
- fix issue92 collectonly reporter and --pastebin (thanks Benjamin Peterson)
- fix py.code.compile(source) to generate unique filenames
- fix assertion re-interp problems on PyPy, by defering code
compilation to the (overridable) Frame.eval class. (thanks Amaury Forgeot)
- fix py.path.local.pyimport() to work with directories
- streamline py.path.local.mkdtemp implementation and usage
- don't print empty lines when showing junitxml-filename
- add optional boolean ignore_errors parameter to py.path.local.remove
- fix terminal writing on win32/python2.4
- py.process.cmdexec() now tries harder to return properly encoded unicode objects
on all python versions
- install plain py.test/py.which scripts also for Jython, this helps to
get canonical script paths in virtualenv situations
- make path.bestrelpath(path) return ".", note that when calling
X.bestrelpath the assumption is that X is a directory.
- make initial conftest discovery ignore "--" prefixed arguments
- fix resultlog plugin when used in an multicpu/multihost xdist situation
(thanks Jakub Gustak)
- perform distributed testing related reporting in the xdist-plugin
rather than having dist-related code in the generic py.test
distribution
- fix homedir detection on Windows
- ship distribute_setup.py version 0.6.13
Changes between 1.3.0 and 1.3.1
==================================================
New features
++++++++++++++++++
- issue91: introduce new py.test.xfail(reason) helper
to imperatively mark a test as expected to fail. Can
be used from within setup and test functions. This is
useful especially for parametrized tests when certain
configurations are expected-to-fail. In this case the
declarative approach with the @py.test.mark.xfail cannot
be used as it would mark all configurations as xfail.
- issue102: introduce new --maxfail=NUM option to stop
test runs after NUM failures. This is a generalization
of the '-x' or '--exitfirst' option which is now equivalent
to '--maxfail=1'. Both '-x' and '--maxfail' will
now also print a line near the end indicating the Interruption.
- issue89: allow py.test.mark decorators to be used on classes
(class decorators were introduced with python2.6) and
also allow to have multiple markers applied at class/module level
by specifying a list.
- improve and refine letter reporting in the progress bar:
. pass
f failed test
s skipped tests (reminder: use for dependency/platform mismatch only)
x xfailed test (test that was expected to fail)
X xpassed test (test that was expected to fail but passed)
You can use any combination of 'fsxX' with the '-r' extended
reporting option. The xfail/xpass results will show up as
skipped tests in the junitxml output - which also fixes
issue99.
- make py.test.cmdline.main() return the exitstatus instead of raising
SystemExit and also allow it to be called multiple times. This of
course requires that your application and tests are properly teared
down and don't have global state.
Fixes / Maintenance
++++++++++++++++++++++
- improved traceback presentation:
- improved and unified reporting for "--tb=short" option
- Errors during test module imports are much shorter, (using --tb=short style)
- raises shows shorter more relevant tracebacks
- --fulltrace now more systematically makes traces longer / inhibits cutting
- improve support for raises and other dynamically compiled code by
manipulating python's linecache.cache instead of the previous
rather hacky way of creating custom code objects. This makes
it seemlessly work on Jython and PyPy where it previously didn't.
- fix issue96: make capturing more resilient against Control-C
interruptions (involved somewhat substantial refactoring
to the underlying capturing functionality to avoid race
conditions).
- fix chaining of conditional skipif/xfail decorators - so it works now
as expected to use multiple @py.test.mark.skipif(condition) decorators,
including specific reporting which of the conditions lead to skipping.
- fix issue95: late-import zlib so that it's not required
for general py.test startup.
- fix issue94: make reporting more robust against bogus source code
(and internally be more careful when presenting unexpected byte sequences)
Changes between 1.2.1 and 1.3.0
==================================================
- deprecate --report option in favour of a new shorter and easier to
remember -r option: it takes a string argument consisting of any
combination of 'xfsX' characters. They relate to the single chars
you see during the dotted progress printing and will print an extra line
per test at the end of the test run. This extra line indicates the exact
position or test ID that you directly paste to the py.test cmdline in order
to re-run a particular test.
- allow external plugins to register new hooks via the new
pytest_addhooks(pluginmanager) hook. The new release of
the pytest-xdist plugin for distributed and looponfailing
testing requires this feature.
- add a new pytest_ignore_collect(path, config) hook to allow projects and
plugins to define exclusion behaviour for their directory structure -
for example you may define in a conftest.py this method::
def pytest_ignore_collect(path):
return path.check(link=1)
to prevent even a collection try of any tests in symlinked dirs.
- new pytest_pycollect_makemodule(path, parent) hook for
allowing customization of the Module collection object for a
matching test module.
- extend and refine xfail mechanism:
``@py.test.mark.xfail(run=False)`` do not run the decorated test
``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries
specifiying ``--runxfail`` on command line virtually ignores xfail markers
- expose (previously internal) commonly useful methods:
py.io.get_terminal_with() -> return terminal width
py.io.ansi_print(...) -> print colored/bold text on linux/win32
py.io.saferepr(obj) -> return limited representation string
- expose test outcome related exceptions as py.test.skip.Exception,
py.test.raises.Exception etc., useful mostly for plugins
doing special outcome interpretation/tweaking
- (issue85) fix junitxml plugin to handle tests with non-ascii output
- fix/refine python3 compatibility (thanks Benjamin Peterson)
- fixes for making the jython/win32 combination work, note however:
jython2.5.1/win32 does not provide a command line launcher, see
http://bugs.jython.org/issue1491 . See pylib install documentation
for how to work around.
- fixes for handling of unicode exception values and unprintable objects
- (issue87) fix unboundlocal error in assertionold code
- (issue86) improve documentation for looponfailing
- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
- ship distribute_setup.py version 0.6.10
- added links to the new capturelog and coverage plugins
Changes between 1.2.1 and 1.2.0
=====================================
- refined usage and options for "py.cleanup"::
py.cleanup # remove "*.pyc" and "*$py.class" (jython) files
py.cleanup -e .swp -e .cache # also remove files with these extensions
py.cleanup -s # remove "build" and "dist" directory next to setup.py files
py.cleanup -d # also remove empty directories
py.cleanup -a # synonym for "-s -d -e 'pip-log.txt'"
py.cleanup -n # dry run, only show what would be removed
- add a new option "py.test --funcargs" which shows available funcargs
and their help strings (docstrings on their respective factory function)
for a given test path
- display a short and concise traceback if a funcarg lookup fails
- early-load "conftest.py" files in non-dot first-level sub directories.
allows to conveniently keep and access test-related options in a ``test``
subdir and still add command line options.
- fix issue67: new super-short traceback-printing option: "--tb=line" will print a single line for each failing (python) test indicating its filename, lineno and the failure value
- fix issue78: always call python-level teardown functions even if the
according setup failed. This includes refinements for calling setup_module/class functions
which will now only be called once instead of the previous behaviour where they'd be called
multiple times if they raise an exception (including a Skipped exception). Any exception
will be re-corded and associated with all tests in the according module/class scope.
- fix issue63: assume <40 columns to be a bogus terminal width, default to 80
- fix pdb debugging to be in the correct frame on raises-related errors
- update apipkg.py to fix an issue where recursive imports might
unnecessarily break importing
- fix plugin links
Changes between 1.2 and 1.1.1
=====================================
- moved dist/looponfailing from py.test core into a new
separately released pytest-xdist plugin.
- new junitxml plugin: --junitxml=path will generate a junit style xml file
which is processable e.g. by the Hudson CI system.
- new option: --genscript=path will generate a standalone py.test script
which will not need any libraries installed. thanks to Ralf Schmitt.
- new option: --ignore will prevent specified path from collection.
Can be specified multiple times.
- new option: --confcutdir=dir will make py.test only consider conftest
files that are relative to the specified dir.
- new funcarg: "pytestconfig" is the pytest config object for access
to command line args and can now be easily used in a test.
- install 'py.test' and `py.which` with a ``-$VERSION`` suffix to
disambiguate between Python3, python2.X, Jython and PyPy installed versions.
- new "pytestconfig" funcarg allows access to test config object
- new "pytest_report_header" hook can return additional lines
to be displayed at the header of a test run.
- (experimental) allow "py.test path::name1::name2::..." for pointing
to a test within a test collection directly. This might eventually
evolve as a full substitute to "-k" specifications.
- streamlined plugin loading: order is now as documented in
customize.html: setuptools, ENV, commandline, conftest.
also setuptools entry point names are turned to canonical namees ("pytest_*")
- automatically skip tests that need 'capfd' but have no os.dup
- allow pytest_generate_tests to be defined in classes as well
- deprecate usage of 'disabled' attribute in favour of pytestmark
- deprecate definition of Directory, Module, Class and Function nodes
in conftest.py files. Use pytest collect hooks instead.
- collection/item node specific runtest/collect hooks are only called exactly
on matching conftest.py files, i.e. ones which are exactly below
the filesystem path of an item
- change: the first pytest_collect_directory hook to return something
will now prevent further hooks to be called.
- change: figleaf plugin now requires --figleaf to run. Also
change its long command line options to be a bit shorter (see py.test -h).
- change: pytest doctest plugin is now enabled by default and has a
new option --doctest-glob to set a pattern for file matches.
- change: remove internal py._* helper vars, only keep py._pydir
- robustify capturing to survive if custom pytest_runtest_setup
code failed and prevented the capturing setup code from running.
- make py.test.* helpers provided by default plugins visible early -
works transparently both for pydoc and for interactive sessions
which will regularly see e.g. py.test.mark and py.test.importorskip.
- simplify internal plugin manager machinery
- simplify internal collection tree by introducing a RootCollector node
- fix assert reinterpreation that sees a call containing "keyword=..."
- fix issue66: invoke pytest_sessionstart and pytest_sessionfinish
hooks on slaves during dist-testing, report module/session teardown
hooks correctly.
- fix issue65: properly handle dist-testing if no
execnet/py lib installed remotely.
- skip some install-tests if no execnet is available
- fix docs, fix internal bin/ script generation
Changes between 1.1.1 and 1.1.0
=====================================
- introduce automatic plugin registration via 'pytest11'
entrypoints via setuptools' pkg_resources.iter_entry_points
- fix py.test dist-testing to work with execnet >= 1.0.0b4
- re-introduce py.test.cmdline.main() for better backward compatibility
- svn paths: fix a bug with path.check(versioned=True) for svn paths,
allow '%' in svn paths, make svnwc.update() default to interactive mode
like in 1.0.x and add svnwc.update(interactive=False) to inhibit interaction.
- refine distributed tarball to contain test and no pyc files
- try harder to have deprecation warnings for py.compat.* accesses
report a correct location
Changes between 1.1.0 and 1.0.2
=====================================
* adjust and improve docs
* remove py.rest tool and internal namespace - it was
never really advertised and can still be used with
the old release if needed. If there is interest
it could be revived into its own tool i guess.
* fix issue48 and issue59: raise an Error if the module
from an imported test file does not seem to come from
the filepath - avoids "same-name" confusion that has
been reported repeatedly
* merged Ronny's nose-compatibility hacks: now
nose-style setup_module() and setup() functions are
supported
* introduce generalized py.test.mark function marking
* reshuffle / refine command line grouping
* deprecate parser.addgroup in favour of getgroup which creates option group
* add --report command line option that allows to control showing of skipped/xfailed sections
* generalized skipping: a new way to mark python functions with skipif or xfail
at function, class and modules level based on platform or sys-module attributes.
* extend py.test.mark decorator to allow for positional args
* introduce and test "py.cleanup -d" to remove empty directories
* fix issue #59 - robustify unittest test collection
* make bpython/help interaction work by adding an __all__ attribute
to ApiModule, cleanup initpkg
* use MIT license for pylib, add some contributors
* remove py.execnet code and substitute all usages with 'execnet' proper
* fix issue50 - cached_setup now caches more to expectations
for test functions with multiple arguments.
* merge Jarko's fixes, issue #45 and #46
* add the ability to specify a path for py.lookup to search in
* fix a funcarg cached_setup bug probably only occuring
in distributed testing and "module" scope with teardown.
* many fixes and changes for making the code base python3 compatible,
many thanks to Benjamin Peterson for helping with this.
* consolidate builtins implementation to be compatible with >=2.3,
add helpers to ease keeping 2 and 3k compatible code
* deprecate py.compat.doctest|subprocess|textwrap|optparse
* deprecate py.magic.autopath, remove py/magic directory
* move pytest assertion handling to py/code and a pytest_assertion
plugin, add "--no-assert" option, deprecate py.magic namespaces
in favour of (less) py.code ones.
* consolidate and cleanup py/code classes and files
* cleanup py/misc, move tests to bin-for-dist
* introduce delattr/delitem/delenv methods to py.test's monkeypatch funcarg
* consolidate py.log implementation, remove old approach.
* introduce py.io.TextIO and py.io.BytesIO for distinguishing between
text/unicode and byte-streams (uses underlying standard lib io.*
if available)
* make py.unittest_convert helper script available which converts "unittest.py"
style files into the simpler assert/direct-test-classes py.test/nosetests
style. The script was written by Laura Creighton.
* simplified internal localpath implementation
Changes between 1.0.1 and 1.0.2
=====================================
* fixing packaging issues, triggered by fedora redhat packaging,
also added doc, examples and contrib dirs to the tarball.
* added a documentation link to the new django plugin.
Changes between 1.0.0 and 1.0.1
=====================================
* added a 'pytest_nose' plugin which handles nose.SkipTest,
nose-style function/method/generator setup/teardown and
tries to report functions correctly.
* capturing of unicode writes or encoded strings to sys.stdout/err
work better, also terminalwriting was adapted and somewhat
unified between windows and linux.
* improved documentation layout and content a lot
* added a "--help-config" option to show conftest.py / ENV-var names for
all longopt cmdline options, and some special conftest.py variables.
renamed 'conf_capture' conftest setting to 'option_capture' accordingly.
* fix issue #27: better reporting on non-collectable items given on commandline
(e.g. pyc files)
* fix issue #33: added --version flag (thanks Benjamin Peterson)
* fix issue #32: adding support for "incomplete" paths to wcpath.status()
* "Test" prefixed classes are *not* collected by default anymore if they
have an __init__ method
* monkeypatch setenv() now accepts a "prepend" parameter
* improved reporting of collection error tracebacks
* simplified multicall mechanism and plugin architecture,
renamed some internal methods and argnames
Changes between 1.0.0b9 and 1.0.0
=====================================
* more terse reporting try to show filesystem path relatively to current dir
* improve xfail output a bit
Changes between 1.0.0b8 and 1.0.0b9
=====================================
* cleanly handle and report final teardown of test setup
* fix svn-1.6 compat issue with py.path.svnwc().versioned()
(thanks Wouter Vanden Hove)
* setup/teardown or collection problems now show as ERRORs
or with big "E"'s in the progress lines. they are reported
and counted separately.
* dist-testing: properly handle test items that get locally
collected but cannot be collected on the remote side - often
due to platform/dependency reasons
* simplified py.test.mark API - see keyword plugin documentation
* integrate better with logging: capturing now by default captures
test functions and their immediate setup/teardown in a single stream
* capsys and capfd funcargs now have a readouterr() and a close() method
(underlyingly py.io.StdCapture/FD objects are used which grew a
readouterr() method as well to return snapshots of captured out/err)
* make assert-reinterpretation work better with comparisons not
returning bools (reported with numpy from thanks maciej fijalkowski)
* reworked per-test output capturing into the pytest_iocapture.py plugin
and thus removed capturing code from config object
* item.repr_failure(excinfo) instead of item.repr_failure(excinfo, outerr)
Changes between 1.0.0b7 and 1.0.0b8
=====================================
* pytest_unittest-plugin is now enabled by default
* introduced pytest_keyboardinterrupt hook and
refined pytest_sessionfinish hooked, added tests.
* workaround a buggy logging module interaction ("closing already closed
files"). Thanks to Sridhar Ratnakumar for triggering.
* if plugins use "py.test.importorskip" for importing
a dependency only a warning will be issued instead
of exiting the testing process.
* many improvements to docs:
- refined funcargs doc , use the term "factory" instead of "provider"
- added a new talk/tutorial doc page
- better download page
- better plugin docstrings
- added new plugins page and automatic doc generation script
* fixed teardown problem related to partially failing funcarg setups
(thanks MrTopf for reporting), "pytest_runtest_teardown" is now
always invoked even if the "pytest_runtest_setup" failed.
* tweaked doctest output for docstrings in py modules,
thanks Radomir.
Changes between 1.0.0b3 and 1.0.0b7
=============================================
* renamed py.test.xfail back to py.test.mark.xfail to avoid
two ways to decorate for xfail
* re-added py.test.mark decorator for setting keywords on functions
(it was actually documented so removing it was not nice)
* remove scope-argument from request.addfinalizer() because
request.cached_setup has the scope arg. TOOWTDI.
* perform setup finalization before reporting failures
* apply modified patches from Andreas Kloeckner to allow
test functions to have no func_code (#22) and to make
"-k" and function keywords work (#20)
* apply patch from Daniel Peolzleithner (issue #23)
* resolve issue #18, multiprocessing.Manager() and
redirection clash
* make __name__ == "__channelexec__" for remote_exec code
Changes between 1.0.0b1 and 1.0.0b3
=============================================
* plugin classes are removed: one now defines
hooks directly in conftest.py or global pytest_*.py
files.
* added new pytest_namespace(config) hook that allows
to inject helpers directly to the py.test.* namespace.
* documented and refined many hooks
* added new style of generative tests via
pytest_generate_tests hook that integrates
well with function arguments.
Changes between 0.9.2 and 1.0.0b1
=============================================
* introduced new "funcarg" setup method,
see doc/test/funcarg.txt
* introduced plugin architecuture and many
new py.test plugins, see
doc/test/plugins.txt
* teardown_method is now guaranteed to get
called after a test method has run.
* new method: py.test.importorskip(mod,minversion)
will either import or call py.test.skip()
* completely revised internal py.test architecture
* new py.process.ForkedFunc object allowing to
fork execution of a function to a sub process
and getting a result back.
XXX lots of things missing here XXX
Changes between 0.9.1 and 0.9.2
===============================
* refined installation and metadata, created new setup.py,
now based on setuptools/ez_setup (thanks to Ralf Schmitt
for his support).
* improved the way of making py.* scripts available in
windows environments, they are now added to the
Scripts directory as ".cmd" files.
* py.path.svnwc.status() now is more complete and
uses xml output from the 'svn' command if available
(Guido Wesdorp)
* fix for py.path.svn* to work with svn 1.5
(Chris Lamb)
* fix path.relto(otherpath) method on windows to
use normcase for checking if a path is relative.
* py.test's traceback is better parseable from editors
(follows the filenames:LINENO: MSG convention)
(thanks to Osmo Salomaa)
* fix to javascript-generation, "py.test --runbrowser"
should work more reliably now
* removed previously accidentally added
py.test.broken and py.test.notimplemented helpers.
* there now is a py.__version__ attribute
Changes between 0.9.0 and 0.9.1
===============================
This is a fairly complete list of changes between 0.9 and 0.9.1, which can
serve as a reference for developers.
* allowing + signs in py.path.svn urls [39106]
* fixed support for Failed exceptions without excinfo in py.test [39340]
* added support for killing processes for Windows (as well as platforms that
support os.kill) in py.misc.killproc [39655]
* added setup/teardown for generative tests to py.test [40702]
* added detection of FAILED TO LOAD MODULE to py.test [40703, 40738, 40739]
* fixed problem with calling .remove() on wcpaths of non-versioned files in
py.path [44248]
* fixed some import and inheritance issues in py.test [41480, 44648, 44655]
* fail to run greenlet tests when pypy is available, but without stackless
[45294]
* small fixes in rsession tests [45295]
* fixed issue with 2.5 type representations in py.test [45483, 45484]
* made that internal reporting issues displaying is done atomically in py.test
[45518]
* made that non-existing files are igored by the py.lookup script [45519]
* improved exception name creation in py.test [45535]
* made that less threads are used in execnet [merge in 45539]
* removed lock required for atomical reporting issue displaying in py.test
[45545]
* removed globals from execnet [45541, 45547]
* refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit
get called in 2.5 (py.execnet) [45548]
* fixed bug in joining threads in py.execnet's servemain [45549]
* refactored py.test.rsession tests to not rely on exact output format anymore
[45646]
* using repr() on test outcome [45647]
* added 'Reason' classes for py.test.skip() [45648, 45649]
* killed some unnecessary sanity check in py.test.collect [45655]
* avoid using os.tmpfile() in py.io.fdcapture because on Windows it's only
usable by Administrators [45901]
* added support for locking and non-recursive commits to py.path.svnwc [45994]
* locking files in py.execnet to prevent CPython from segfaulting [46010]
* added export() method to py.path.svnurl
* fixed -d -x in py.test [47277]
* fixed argument concatenation problem in py.path.svnwc [49423]
* restore py.test behaviour that it exits with code 1 when there are failures
[49974]
* don't fail on html files that don't have an accompanying .txt file [50606]
* fixed 'utestconvert.py < input' [50645]
* small fix for code indentation in py.code.source [50755]
* fix _docgen.py documentation building [51285]
* improved checks for source representation of code blocks in py.test [51292]
* added support for passing authentication to py.path.svn* objects [52000,
52001]
* removed sorted() call for py.apigen tests in favour of [].sort() to support
Python 2.3 [52481]

View file

@ -0,0 +1,26 @@
py.test/pylib 1.3.3: windows and other fixes
===========================================================================
pylib/py.test 1.3.3 is a minor bugfix release featuring some improvements
and fixes. See changelog_ for full history.
have fun,
holger krekel
.. _changelog: ../changelog.html
Changes between 1.3.2 and 1.3.3
==================================================
- fix issue113: assertion representation problem with triple-quoted strings
(and possibly other cases)
- make conftest loading detect that a conftest file with the same
content was already loaded, avoids surprises in nested directory structures
which can be produced e.g. by Hudson. It probably removes the need to use
--confcutdir in most cases.
- fix terminal coloring for win32
(thanks Michael Foord for reporting)
- fix weirdness: make terminal width detection work on stdout instead of stdin
(thanks Armin Ronacher for reporting)
- remove trailing whitespace in all py/text distribution files

View file

@ -0,0 +1,22 @@
py.test/pylib 1.3.4: fixes and new native traceback option
===========================================================================
pylib/py.test 1.3.4 is a minor maintenance release mostly containing bug fixes
and a new "--tb=native" traceback option to show "normal" Python standard
tracebacks instead of the py.test enhanced tracebacks. See below for more
change info and http://pytest.org for more general information on features
and configuration of the testing tool.
Thanks to the issue reporters and generally to Ronny Pfannschmidt for help.
cheers,
holger krekel
Changes between 1.3.3 and 1.3.4
==================================================
- fix issue111: improve install documentation for windows
- fix issue119: fix custom collectability of __init__.py as a module
- fix issue116: --doctestmodules work with __init__.py files as well
- fix issue115: unify internal exception passthrough/catching/GeneratorExit
- fix issue118: new --tb=native for presenting cpython-standard exceptions

View file

@ -0,0 +1,47 @@
.. _`release-1.4.0`:
py-1.4.0: cross-python lib for path, code, io, ... manipulations
===========================================================================
"py" is a small library comprising APIs for filesystem and svn path
manipulations, dynamic code construction and introspection, a Py2/Py3
compatibility namespace ("py.builtin"), IO capturing, terminal colored printing
(on windows and linux), ini-file parsing and a lazy import mechanism.
It runs unmodified on all Python interpreters compatible to Python2.4 up
until Python 3.2. The general goal with "py" is to provide stable APIs
for some common tasks that are continously tested against many Python
interpreters and thus also to help transition. Here are some docs:
http://pylib.org
NOTE: The prior py-1.3.X versions contained "py.test" which now comes
as its own separate "pytest" distribution and was just released
as "pytest-2.0.0", see here for the revamped docs:
http://pytest.org
And "py.cleanup|py.lookup|py.countloc" etc. helpers are now part of
the pycmd distribution, see http://pypi.python.org/pypi/pycmd
This makes "py-1.4.0" a simple library which does not install
any command line utilities anymore.
cheers,
holger
Changes between 1.3.4 and 1.4.0
-------------------------------------
- py.test was moved to a separate "pytest" package. What remains is
a stub hook which will proxy ``import py.test`` to ``pytest``.
- all command line tools ("py.cleanup/lookup/countloc/..." moved
to "pycmd" package)
- removed the old and deprecated "py.magic" namespace
- use apipkg-1.1 and make py.apipkg.initpkg|ApiModule available
- add py.iniconfig module for brain-dead easy ini-config file parsing
- introduce py.builtin.any()
- path objects have a .dirname attribute now (equivalent to
os.path.dirname(path))
- path.visit() accepts breadthfirst (bf) and sort options
- remove deprecated py.compat namespace

View file

@ -0,0 +1,47 @@
.. _`release-1.4.1`:
py-1.4.1: cross-python lib for fs path, code, io, ... manipulations
===========================================================================
This is a bug fix release of the "py" lib, see below for detailed changes.
The py lib is a small library comprising APIs for filesystem and svn path
manipulations, dynamic code construction and introspection, a Py2/Py3
compatibility namespace ("py.builtin"), IO capturing, terminal colored printing
(on windows and linux), ini-file parsing and a lazy import mechanism.
It runs unmodified on all Python interpreters compatible to Python2.4 up
until Python 3.2, PyPy and Jython. The general goal with "py" is to
provide stable APIs for some common tasks that are continously tested
against many Python interpreters and thus also to help transition. Here
are some docs:
http://pylib.org
NOTE: The prior py-1.3.X versions contained "py.test" which since py-1.4.0
comes as its own separate "pytest" distribution, see:
http://pytest.org
Also, the "py.cleanup|py.lookup|py.countloc" helpers are now part of
the pycmd distribution, see http://pypi.python.org/pypi/pycmd
Changes between 1.4.0 and 1.4.1
==================================================
- fix issue1 - py.error.* classes to be pickleable
- fix issue2 - on windows32 use PATHEXT as the list of potential
extensions to find find binaries with py.path.local.sysfind(commandname)
- fix (pytest-) issue10 and refine assertion reinterpretation
to avoid breaking if the __nonzero__ of an object fails
- fix (pytest-) issue17 where python3 does not like star-imports,
leading to misrepresentation of import-errors in test modules
- fix ``py.error.*`` attribute pypy access
- allow path.samefile(arg) to succeed when arg is a relative filename
- fix (pytest-) issue20 path.samefile(relpath) works as expected now

View file

@ -0,0 +1,16 @@
=============
Release notes
=============
Contents:
.. toctree::
:maxdepth: 2
.. include: release-1.1.0
.. include: release-1.0.2
release-1.0.1
release-1.0.0
release-0.9.2
release-0.9.0

View file

@ -0,0 +1,3 @@
.. _`changelog`:
.. include:: ../CHANGELOG

View file

@ -0,0 +1,150 @@
================================================================================
py.code: higher level python code and introspection objects
================================================================================
``py.code`` provides higher level APIs and objects for Code, Frame, Traceback,
ExceptionInfo and source code construction. The ``py.code`` library
tries to simplify accessing the code objects as well as creating them.
There is a small set of interfaces a user needs to deal with, all nicely
bundled together, and with a rich set of 'Pythonic' functionality.
Contents of the library
=======================
Every object in the ``py.code`` library wraps a code Python object related
to code objects, source code, frames and tracebacks: the ``py.code.Code``
class wraps code objects, ``py.code.Source`` source snippets,
``py.code.Traceback` exception tracebacks, ``py.code.Frame`` frame
objects (as found in e.g. tracebacks) and ``py.code.ExceptionInfo`` the
tuple provided by sys.exc_info() (containing exception and traceback
information when an exception occurs). Also in the library is a helper function
``py.code.compile()`` that provides the same functionality as Python's
built-in 'compile()' function, but returns a wrapped code object.
The wrappers
============
``py.code.Code``
-------------------
Code objects are instantiated with a code object or a callable as argument,
and provide functionality to compare themselves with other Code objects, get to
the source file or its contents, create new Code objects from scratch, etc.
A quick example::
>>> import py
>>> c = py.code.Code(py.path.local.read)
>>> c.path.basename
'common.py'
>>> isinstance(c.source(), py.code.Source)
True
>>> str(c.source()).split('\n')[0]
"def read(self, mode='r'):"
.. autoclass:: py.code.Code
:members:
:inherited-members:
``py.code.Source``
---------------------
Source objects wrap snippets of Python source code, providing a simple yet
powerful interface to read, deindent, slice, compare, compile and manipulate
them, things that are not so easy in core Python.
Example::
>>> s = py.code.Source("""\
... def foo():
... print "foo"
... """)
>>> str(s).startswith('def') # automatic de-indentation!
True
>>> s.isparseable()
True
>>> sub = s.getstatement(1) # get the statement starting at line 1
>>> str(sub).strip() # XXX why is the strip() required?!?
'print "foo"'
.. autoclass:: py.code.Source
:members:
``py.code.Traceback``
------------------------
Tracebacks are usually not very easy to examine, you need to access certain
somewhat hidden attributes of the traceback's items (resulting in expressions
such as 'fname = tb.tb_next.tb_frame.f_code.co_filename'). The Traceback
interface (and its TracebackItem children) tries to improve this.
Example::
>>> import sys
>>> try:
... py.path.local(100) # illegal argument
... except:
... exc, e, tb = sys.exc_info()
>>> t = py.code.Traceback(tb)
>>> first = t[1] # get the second entry (first is in this doc)
>>> first.path.basename # second is in py/path/local.py
'local.py'
>>> isinstance(first.statement, py.code.Source)
True
>>> str(first.statement).strip().startswith('raise ValueError')
True
.. autoclass:: py.code.Traceback
:members:
``py.code.Frame``
--------------------
Frame wrappers are used in ``py.code.Traceback`` items, and will usually not
directly be instantiated. They provide some nice methods to evaluate code
'inside' the frame (using the frame's local variables), get to the underlying
code (frames have a code attribute that points to a ``py.code.Code`` object)
and examine the arguments.
Example (using the 'first' TracebackItem instance created above)::
>>> frame = first.frame
>>> isinstance(frame.code, py.code.Code)
True
>>> isinstance(frame.eval('self'), py.path.local)
True
>>> [namevalue[0] for namevalue in frame.getargs()]
['cls', 'path']
.. autoclass:: py.code.Frame
:members:
``py.code.ExceptionInfo``
----------------------------
A wrapper around the tuple returned by sys.exc_info() (will call sys.exc_info()
itself if the tuple is not provided as an argument), provides some handy
attributes to easily access the traceback and exception string.
Example::
>>> import sys
>>> try:
... foobar()
... except:
... excinfo = py.code.ExceptionInfo()
>>> excinfo.typename
'NameError'
>>> isinstance(excinfo.traceback, py.code.Traceback)
True
>>> excinfo.exconly()
"NameError: name 'foobar' is not defined"
.. autoclass:: py.code.ExceptionInfo
:members:
.. autoclass:: py.code.Traceback
:members:

View file

@ -0,0 +1,263 @@
# -*- coding: utf-8 -*-
#
# py documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 21 08:30:10 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'py'
copyright = u'2010, holger krekel et. al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
import py
release = py.__version__
version = ".".join(release.split(".")[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'py'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'py.tex', u'py Documentation',
u'holger krekel et. al.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'py', u'py Documentation',
[u'holger krekel et. al.'], 1)
]
autodoc_member_order = "bysource"
autodoc_default_flags = "inherited-members"
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'py'
epub_author = u'holger krekel et. al.'
epub_publisher = u'holger krekel et. al.'
epub_copyright = u'2010, holger krekel et. al.'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}

View file

@ -0,0 +1,18 @@
<html>
<head>
<meta http-equiv="refresh" content=" 1 ; URL=install.html" />
</head>
<body>
<script type="text/javascript">
var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
</script>
<script type="text/javascript">
try {
var pageTracker = _gat._getTracker("UA-7597274-3");
pageTracker._trackPageview();
} catch(err) {}</script>
</body>
</html>

View file

@ -0,0 +1,13 @@
from py.xml import html
paras = "First Para", "Second para"
doc = html.html(
html.head(
html.meta(name="Content-Type", value="text/html; charset=latin1")),
html.body(
[html.p(p) for p in paras]))
print unicode(doc).encode('latin1')

View file

@ -0,0 +1,23 @@
import py
html = py.xml.html
class my(html):
"a custom style"
class body(html.body):
style = html.Style(font_size = "120%")
class h2(html.h2):
style = html.Style(background = "grey")
class p(html.p):
style = html.Style(font_weight="bold")
doc = my.html(
my.head(),
my.body(
my.h2("hello world"),
my.p("bold as bold can")
)
)
print doc.unicode(indent=2)

View file

@ -0,0 +1,17 @@
import py
class ns(py.xml.Namespace):
pass
doc = ns.books(
ns.book(
ns.author("May Day"),
ns.title("python for java programmers"),),
ns.book(
ns.author("why", class_="somecssclass"),
ns.title("Java for Python programmers"),),
publisher="N.N",
)
print doc.unicode(indent=2).encode('utf8')

View file

@ -0,0 +1,172 @@
==================================
Frequently Asked Questions
==================================
.. contents::
:local:
:depth: 2
On naming, nosetests, licensing and magic
===========================================
Why the ``py`` naming? Why not ``pytest``?
----------------------------------------------------
This mostly has historic reasons - the aim is
to get away from the somewhat questionable 'py' name
at some point. These days (2010) the 'py' library
almost completely comprises APIs that are used
by the ``py.test`` tool. There also are some
other uses, e.g. of the ``py.path.local()`` and
other path implementations. So it requires some
work to factor them out and do the shift.
Why the ``py.test`` naming?
------------------------------------
because of TAB-completion under Bash/Shells. If you hit
``py.<TAB>`` you'll get a list of available development
tools that all share the ``py.`` prefix. Another motivation
was to unify the package ("py.test") and tool filename.
What's py.test's relation to ``nosetests``?
---------------------------------------------
py.test and nose_ share basic philosophy when it comes
to running Python tests. In fact,
with py.test-1.1.0 it is ever easier to run many test suites
that currently work with ``nosetests``. nose_ was created
as a clone of ``py.test`` when py.test was in the ``0.8`` release
cycle so some of the newer features_ introduced with py.test-1.0
and py.test-1.1 have no counterpart in nose_.
.. _nose: http://somethingaboutorange.com/mrl/projects/nose/0.11.1/
.. _features: test/features.html
.. _apipkg: http://pypi.python.org/pypi/apipkg
What's this "magic" with py.test?
----------------------------------------
issues where people have used the term "magic" in the past:
* `py/__init__.py`_ uses the apipkg_ mechanism for lazy-importing
and full control on what API you get when importing "import py".
* when an ``assert`` statement fails, py.test re-interprets the expression
to show intermediate values if a test fails. If your expression
has side effects the intermediate values may not be the same, obfuscating
the initial error (this is also explained at the command line if it happens).
``py.test --no-assert`` turns off assert re-intepretation.
Sidenote: it is good practise to avoid asserts with side effects.
.. _`py namespaces`: index.html
.. _`py/__init__.py`: http://bitbucket.org/hpk42/py-trunk/src/trunk/py/__init__.py
Where does my ``py.test`` come/import from?
----------------------------------------------
You can issue::
py.test --version
which tells you both version and import location of the tool.
function arguments, parametrized tests and setup
====================================================
.. _funcargs: test/funcargs.html
Is using funcarg- versus xUnit-based setup a style question?
---------------------------------------------------------------
It depends. For simple applications or for people experienced
with nose_ or unittest-style test setup using `xUnit style setup`_
make some sense. For larger test suites, parametrized testing
or setup of complex test resources using funcargs_ is recommended.
Moreover, funcargs are ideal for writing advanced test support
code (like e.g. the monkeypatch_, the tmpdir_ or capture_ funcargs)
because the support code can register setup/teardown functions
in a managed class/module/function scope.
.. _monkeypatch: test/plugin/monkeypatch.html
.. _tmpdir: test/plugin/tmpdir.html
.. _capture: test/plugin/capture.html
.. _`xUnit style setup`: test/xunit_setup.html
.. _`pytest_nose`: test/plugin/nose.html
.. _`why pytest_pyfuncarg__ methods?`:
Why the ``pytest_funcarg__*`` name for funcarg factories?
---------------------------------------------------------------
When experimenting with funcargs an explicit registration mechanism
was considered. But lacking a good use case for this indirection and
flexibility we decided to go for `Convention over Configuration`_ and
allow to directly specify the factory. Besides removing the need
for an indirection it allows to "grep" for ``pytest_funcarg__MYARG``
and will safely find all factory functions for the ``MYARG`` function
argument. It helps to alleviate the de-coupling of function
argument usage and creation.
.. _`Convention over Configuration`: http://en.wikipedia.org/wiki/Convention_over_Configuration
Can I yield multiple values from a factory function?
-----------------------------------------------------
There are two conceptual reasons why yielding from a factory function
is not possible:
* Calling factories for obtaining test function arguments
is part of setting up and running a test. At that
point it is not possible to add new test calls to
the test collection anymore.
* If multiple factories yielded values there would
be no natural place to determine the combination
policy - in real-world examples some combinations
often should not run.
Use the `pytest_generate_tests`_ hook to solve both issues
and implement the `parametrization scheme of your choice`_.
.. _`pytest_generate_tests`: test/funcargs.html#parametrizing-tests
.. _`parametrization scheme of your choice`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/
py.test interaction with other packages
===============================================
Issues with py.test, multiprocess and setuptools?
------------------------------------------------------------
On windows the multiprocess package will instantiate sub processes
by pickling and thus implicitely re-import a lot of local modules.
Unfortuantely, setuptools-0.6.11 does not ``if __name__=='__main__'``
protect its generated command line script. This leads to infinite
recursion when running a test that instantiates Processes.
There are these workarounds:
* `install Distribute`_ as a drop-in replacement for setuptools
and install py.test
* `directly use a checkout`_ which avoids all setuptools/Distribute
installation
If those options are not available to you, you may also manually
fix the script that is created by setuptools by inserting an
``if __name__ == '__main__'``. Or you can create a "pytest.py"
script with this content and invoke that with the python version::
import py
if __name__ == '__main__':
py.cmdline.pytest()
.. _`directly use a checkout`: install.html#directly-use-a-checkout
.. _`install distribute`: http://pypi.python.org/pypi/distribute#installation-instructions

Binary file not shown.

After

Width:  |  Height:  |  Size: 8.1 KiB

View file

@ -0,0 +1,43 @@
.. py documentation master file, created by
sphinx-quickstart on Thu Oct 21 08:30:10 2010.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to py's documentation!
=================================
see :ref:`CHANGELOG <changelog>` for latest changes.
.. note::
Since version 1.4, the testing tool "py.test" is part of its own `pytest distribution`_.
.. _`pytest distribution`: http://pytest.org
Contents:
.. toctree::
install
path
code
io
log
xml
misc
:maxdepth: 2
.. toctree::
:hidden:
announce/release-2.0.0
changelog
announce/*
Indices and tables
==================
* :ref:`genindex`
* :ref:`search`

View file

@ -0,0 +1,88 @@
.. _`py`:
.. _`index page`: http://pypi.python.org/pypi/py/
installation info in a nutshell
===================================================
**PyPI name**: py_
**Pythons**: CPython 2.6, 2.7, 3.3, 3.4, PyPy-2.3
**Operating systems**: Linux, Windows, OSX, Unix
**Requirements**: setuptools_ or Distribute_
**Installers**: ``easy_install`` and ``pip``
**hg repository**: https://bitbucket.org/hpk42/py
easy install or pip ``py``
-----------------------------
Both `Distribute`_ and setuptools_ provide the ``easy_install``
installation tool with which you can type into a command line window::
easy_install -U py
to install the latest release of the py lib. The ``-U`` switch
will trigger an upgrade if you already have an older version installed.
.. note::
As of version 1.4 py does not contain py.test anymore - you
need to install the new `pytest`_ distribution.
.. _pytest: http://pytest.org
Working from version control or a tarball
-----------------------------------------------
To follow development or start experiments, checkout the
complete code and documentation source with mercurial_::
hg clone https://bitbucket.org/hpk42/py
Development takes place on the 'trunk' branch.
You can also go to the python package index and
download and unpack a TAR file::
http://pypi.python.org/pypi/py/
activating a checkout with setuptools
--------------------------------------------
With a working `Distribute`_ or setuptools_ installation you can type::
python setup.py develop
in order to work inline with the tools and the lib of your checkout.
.. _`no-setuptools`:
.. _`directly use a checkout`:
.. _`setuptools`: http://pypi.python.org/pypi/setuptools
Mailing list and issue tracker
--------------------------------------
- `py-dev developers list`_ and `commit mailing list`_.
- #pylib on irc.freenode.net IRC channel for random questions.
- `bitbucket issue tracker`_ use this bitbucket issue tracker to report
bugs or request features.
.. _`bitbucket issue tracker`: http://bitbucket.org/hpk42/py/issues/
.. _codespeak: http://codespeak.net/
.. _`py-dev`:
.. _`development mailing list`:
.. _`py-dev developers list`: http://codespeak.net/mailman/listinfo/py-dev
.. _`py-svn`:
.. _`commit mailing list`: http://codespeak.net/mailman/listinfo/py-svn
.. include:: links.inc

View file

@ -0,0 +1,59 @@
=======
py.io
=======
The 'py' lib provides helper classes for capturing IO during
execution of a program.
IO Capturing examples
===============================================
``py.io.StdCapture``
---------------------------
Basic Example::
>>> import py
>>> capture = py.io.StdCapture()
>>> print "hello"
>>> out,err = capture.reset()
>>> out.strip() == "hello"
True
For calling functions you may use a shortcut::
>>> import py
>>> def f(): print "hello"
>>> res, out, err = py.io.StdCapture.call(f)
>>> out.strip() == "hello"
True
``py.io.StdCaptureFD``
---------------------------
If you also want to capture writes to the stdout/stderr
filedescriptors you may invoke::
>>> import py, sys
>>> capture = py.io.StdCaptureFD(out=False, in_=False)
>>> sys.stderr.write("world")
>>> out,err = capture.reset()
>>> err
'world'
py.io object reference
============================
.. autoclass:: py.io.StdCaptureFD
:members:
:inherited-members:
.. autoclass:: py.io.StdCapture
:members:
:inherited-members:
.. autoclass:: py.io.TerminalWriter
:members:
:inherited-members:

View file

@ -0,0 +1,16 @@
.. _`skipping plugin`: plugin/skipping.html
.. _`funcargs mechanism`: funcargs.html
.. _`doctest.py`: http://docs.python.org/library/doctest.html
.. _`xUnit style setup`: xunit_setup.html
.. _`pytest_nose`: plugin/nose.html
.. _`reStructured Text`: http://docutils.sourceforge.net
.. _`Python debugger`: http://docs.python.org/lib/module-pdb.html
.. _nose: http://somethingaboutorange.com/mrl/projects/nose/
.. _pytest: http://pypi.python.org/pypi/pytest
.. _mercurial: http://mercurial.selenic.com/wiki/
.. _`setuptools`: http://pypi.python.org/pypi/setuptools
.. _`distribute`: http://pypi.python.org/pypi/distribute
.. _`pip`: http://pypi.python.org/pypi/pip
.. _`virtualenv`: http://pypi.python.org/pypi/virtualenv
.. _hudson: http://hudson-ci.org/

View file

@ -0,0 +1,208 @@
.. role:: code(literal)
.. role:: file(literal)
.. XXX figure out how the code literals should be dealt with in sphinx. There is probably something builtin.
========================================
py.log documentation and musings
========================================
Foreword
========
This document is an attempt to briefly state the actual specification of the
:code:`py.log` module. It was written by Francois Pinard and also contains
some ideas for enhancing the py.log facilities.
NOTE that :code:`py.log` is subject to refactorings, it may change with
the next release.
This document is meant to trigger or facilitate discussions. It shamelessly
steals from the `Agile Testing`__ comments, and from other sources as well,
without really trying to sort them out.
__ http://agiletesting.blogspot.com/2005/06/keyword-based-logging-with-py-library.html
Logging organisation
====================
The :code:`py.log` module aims a niche comparable to the one of the
`logging module`__ found within the standard Python distributions, yet
with much simpler paradigms for configuration and usage.
__ http://www.python.org/doc/2.4.2/lib/module-logging.html
Holger Krekel, the main :code:`py` library developer, introduced
the idea of keyword-based logging and the idea of logging *producers* and
*consumers*. A log producer is an object used by the application code
to send messages to various log consumers. When you create a log
producer, you define a set of keywords that are then used to both route
the logging messages to consumers, and to prefix those messages.
In fact, each log producer has a few keywords associated with it for
identification purposes. These keywords form a tuple of strings, and
may be used to later retrieve a particular log producer.
A log producer may (or may not) be associated with a log consumer, meant
to handle log messages in particular ways. The log consumers can be
``STDOUT``, ``STDERR``, log files, syslog, the Windows Event Log, user
defined functions, etc. (Yet, logging to syslog or to the Windows Event
Log is only future plans for now). A log producer has never more than
one consumer at a given time, but it is possible to dynamically switch
a producer to use another consumer. On the other hand, a single log
consumer may be associated with many producers.
Note that creating and associating a producer and a consumer is done
automatically when not otherwise overriden, so using :code:`py` logging
is quite comfortable even in the smallest programs. More typically,
the application programmer will likely design a hierarchy of producers,
and will select keywords appropriately for marking the hierarchy tree.
If a node of the hierarchical tree of producers has to be divided in
sub-trees, all producers in the sub-trees share, as a common prefix, the
keywords of the node being divided. In other words, we go further down
in the hierarchy of producers merely by adding keywords.
Using the py.log library
================================
To use the :code:`py.log` library, the user must import it into a Python
application, create at least one log producer and one log consumer, have
producers and consumers associated, and finally call the log producers
as needed, giving them log messages.
Importing
---------
Once the :code:`py` library is installed on your system, a mere::
import py
holds enough magic for lazily importing the various facilities of the
:code:`py` library when they are first needed. This is really how
:code:`py.log` is made available to the application. For example, after
the above ``import py``, one may directly write ``py.log.Producer(...)``
and everything should work fine, the user does not have to worry about
specifically importing more modules.
Creating a producer
-------------------
There are three ways for creating a log producer instance:
+ As soon as ``py.log`` is first evaluated within an application
program, a default log producer is created, and made available under
the name ``py.log.default``. The keyword ``default`` is associated
with that producer.
+ The ``py.log.Producer()`` constructor may be explicitly called
for creating a new instance of a log producer. That constructor
accepts, as an argument, the keywords that should be associated with
that producer. Keywords may be given either as a tuple of keyword
strings, or as a single space-separated string of keywords.
+ Whenever an attribute is *taken* out of a log producer instance,
for the first time that attribute is taken, a new log producer is
created. The keywords associated with that new producer are those
of the initial producer instance, to which is appended the name of
the attribute being taken.
The last point is especially useful, as it allows using log producers
without further declarations, merely creating them *on-the-fly*.
Creating a consumer
-------------------
There are many ways for creating or denoting a log consumer:
+ A default consumer exists within the ``py.log`` facilities, which
has the effect of writing log messages on the Python standard output
stream. That consumer is associated at the very top of the producer
hierarchy, and as such, is called whenever no other consumer is
found.
+ The notation ``py.log.STDOUT`` accesses a log consumer which writes
log messages on the Python standard output stream.
+ The notation ``py.log.STDERR`` accesses a log consumer which writes
log messages on the Python standard error stream.
+ The ``py.log.File()`` constructor accepts, as argument, either a file
already opened in write mode or any similar file-like object, and
creates a log consumer able to write log messages onto that file.
+ The ``py.log.Path()`` constructor accepts a file name for its first
argument, and creates a log consumer able to write log messages into
that file. The constructor call accepts a few keyword parameters:
+ ``append``, which is ``False`` by default, may be used for
opening the file in append mode instead of write mode.
+ ``delayed_create``, which is ``False`` by default, maybe be used
for opening the file at the latest possible time. Consequently,
the file will not be created if it did not exist, and no actual
log message gets written to it.
+ ``buffering``, which is 1 by default, is used when opening the
file. Buffering can be turned off by specifying a 0 value. The
buffer size may also be selected through this argument.
+ Any user defined function may be used for a log consumer. Such a
function should accept a single argument, which is the message to
write, and do whatever is deemed appropriate by the programmer.
When the need arises, this may be an especially useful and flexible
feature.
+ The special value ``None`` means no consumer at all. This acts just
like if there was a consumer which would silently discard all log
messages sent to it.
Associating producers and consumers
-----------------------------------
Each log producer may have at most one log consumer associated with
it. A log producer gets associated with a log consumer through a
``py.log.setconsumer()`` call. That function accepts two arguments,
the first identifying a producer (a tuple of keyword strings or a single
space-separated string of keywords), the second specifying the precise
consumer to use for that producer. Until this function is called for a
producer, that producer does not have any explicit consumer associated
with it.
Now, the hierarchy of log producers establishes which consumer gets used
whenever a producer has no explicit consumer. When a log producer
has no consumer explicitly associated with it, it dynamically and
recursively inherits the consumer of its parent node, that is, that node
being a bit closer to the root of the hierarchy. In other words, the
rightmost keywords of that producer are dropped until another producer
is found which has an explicit consumer. A nice side-effect is that,
by explicitly associating a consumer with a producer, all consumer-less
producers which appear under that producer, in the hierarchy tree,
automatically *inherits* that consumer.
Writing log messages
--------------------
All log producer instances are also functions, and this is by calling
them that log messages are generated. Each call to a producer object
produces the text for one log entry, which in turn, is sent to the log
consumer for that producer.
The log entry displays, after a prefix identifying the log producer
being used, all arguments given in the call, converted to strings and
space-separated. (This is meant by design to be fairly similar to what
the ``print`` statement does in Python). The prefix itself is made up
of a colon-separated list of keywords associated with the producer, the
whole being set within square brackets.
Note that the consumer is responsible for adding the newline at the end
of the log entry. That final newline is not part of the text for the
log entry.
.. Other details
.. -------------
.. XXX: fill in details
.. + Should speak about pickle-ability of :code:`py.log`.
..
.. + What is :code:`log.get` (in :file:`logger.py`)?

View file

@ -0,0 +1,93 @@
====================================
Miscellaneous features of the py lib
====================================
Mapping the standard python library into py
===========================================
The ``py.std`` object allows lazy access to
standard library modules. For example, to get to the print-exception
functionality of the standard library you can write::
py.std.traceback.print_exc()
without having to do anything else than the usual ``import py``
at the beginning. You can access any other top-level standard
library module this way. This means that you will only trigger
imports of modules that are actually needed. Note that no attempt
is made to import submodules.
Support for interaction with system utilities/binaries
======================================================
Currently, the py lib offers two ways to interact with
system executables. ``py.process.cmdexec()`` invokes
the shell in order to execute a string. The other
one, ``py.path.local``'s 'sysexec()' method lets you
directly execute a binary.
Both approaches will raise an exception in case of a return-
code other than 0 and otherwise return the stdout-output
of the child process.
The shell based approach
------------------------
You can execute a command via your system shell
by doing something like::
out = py.process.cmdexec('ls -v')
However, the ``cmdexec`` approach has a few shortcomings:
- it relies on the underlying system shell
- it neccessitates shell-escaping for expressing arguments
- it does not easily allow to "fix" the binary you want to run.
- it only allows to execute executables from the local
filesystem
.. _sysexec:
local paths have ``sysexec``
----------------------------
In order to synchronously execute an executable file you
can use ``sysexec``::
binsvn.sysexec('ls', 'http://codespeak.net/svn')
where ``binsvn`` is a path that points to the ``svn`` commandline
binary. Note that this function does not offer any shell-escaping
so you have to pass in already separated arguments.
finding an executable local path
--------------------------------
Finding an executable is quite different on multiple platforms.
Currently, the ``PATH`` environment variable based search on
unix platforms is supported::
py.path.local.sysfind('svn')
which returns the first path whose ``basename`` matches ``svn``.
In principle, `sysfind` deploys platform specific algorithms
to perform the search. On Windows, for example, it may look
at the registry (XXX).
To make the story complete, we allow to pass in a second ``checker``
argument that is called for each found executable. For example, if
you have multiple binaries available you may want to select the
right version::
def mysvn(p):
""" check that the given svn binary has version 1.1. """
line = p.execute('--version'').readlines()[0]
if line.find('version 1.1'):
return p
binsvn = py.path.local.sysfind('svn', checker=mysvn)
Cross-Python Version compatibility helpers
=============================================
The ``py.builtin`` namespace provides a number of helpers that help to write python code compatible across Python interpreters, mainly Python2 and Python3. Type ``help(py.builtin)`` on a Python prompt for a the selection of builtins.

View file

@ -0,0 +1,260 @@
=======
py.path
=======
The 'py' lib provides a uniform high-level api to deal with filesystems
and filesystem-like interfaces: ``py.path``. It aims to offer a central
object to fs-like object trees (reading from and writing to files, adding
files/directories, examining the types and structure, etc.), and out-of-the-box
provides a number of implementations of this API.
py.path.local - local file system path
===============================================
.. _`local`:
basic interactive example
-------------------------------------
The first and most obvious of the implementations is a wrapper around a local
filesystem. It's just a bit nicer in usage than the regular Python APIs, and
of course all the functionality is bundled together rather than spread over a
number of modules.
Example usage, here we use the ``py.test.ensuretemp()`` function to create
a ``py.path.local`` object for us (which wraps a directory):
.. sourcecode:: pycon
>>> import py
>>> temppath = py.test.ensuretemp('py.path_documentation')
>>> foopath = temppath.join('foo') # get child 'foo' (lazily)
>>> foopath.check() # check if child 'foo' exists
False
>>> foopath.write('bar') # write some data to it
>>> foopath.check()
True
>>> foopath.read()
'bar'
>>> foofile = foopath.open() # return a 'real' file object
>>> foofile.read(1)
'b'
reference documentation
---------------------------------
.. autoclass:: py._path.local.LocalPath
:members:
:inherited-members:
``py.path.svnurl`` and ``py.path.svnwc``
==================================================
Two other ``py.path`` implementations that the py lib provides wrap the
popular `Subversion`_ revision control system: the first (called 'svnurl')
by interfacing with a remote server, the second by wrapping a local checkout.
Both allow you to access relatively advanced features such as metadata and
versioning, and both in a way more user-friendly manner than existing other
solutions.
Some example usage of ``py.path.svnurl``:
.. sourcecode:: pycon
.. >>> import py
.. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
>>> url = py.path.svnurl('http://codespeak.net/svn/py')
>>> info = url.info()
>>> info.kind
'dir'
>>> firstentry = url.log()[-1]
>>> import time
>>> time.strftime('%Y-%m-%d', time.gmtime(firstentry.date))
'2004-10-02'
Example usage of ``py.path.svnwc``:
.. sourcecode:: pycon
.. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
>>> temp = py.test.ensuretemp('py.path_documentation')
>>> wc = py.path.svnwc(temp.join('svnwc'))
>>> wc.checkout('http://codespeak.net/svn/py/dist/py/path/local')
>>> wc.join('local.py').check()
True
.. _`Subversion`: http://subversion.tigris.org/
svn path related API reference
-----------------------------------------
.. autoclass:: py._path.svnwc.SvnWCCommandPath
:members:
:inherited-members:
.. autoclass:: py._path.svnurl.SvnCommandPath
:members:
:inherited-members:
.. autoclass:: py._path.svnwc.SvnAuth
:members:
:inherited-members:
Common vs. specific API, Examples
========================================
All Path objects support a common set of operations, suitable
for many use cases and allowing to transparently switch the
path object within an application (e.g. from "local" to "svnwc").
The common set includes functions such as `path.read()` to read all data
from a file, `path.write()` to write data, `path.listdir()` to get a list
of directory entries, `path.check()` to check if a node exists
and is of a particular type, `path.join()` to get
to a (grand)child, `path.visit()` to recursively walk through a node's
children, etc. Only things that are not common on 'normal' filesystems (yet),
such as handling metadata (e.g. the Subversion "properties") require
using specific APIs.
A quick 'cookbook' of small examples that will be useful 'in real life',
which also presents parts of the 'common' API, and shows some non-common
methods:
Searching `.txt` files
--------------------------------
Search for a particular string inside all files with a .txt extension in a
specific directory.
.. sourcecode:: pycon
>>> dirpath = temppath.ensure('testdir', dir=True)
>>> dirpath.join('textfile1.txt').write('foo bar baz')
>>> dirpath.join('textfile2.txt').write('frob bar spam eggs')
>>> subdir = dirpath.ensure('subdir', dir=True)
>>> subdir.join('textfile1.txt').write('foo baz')
>>> subdir.join('textfile2.txt').write('spam eggs spam foo bar spam')
>>> results = []
>>> for fpath in dirpath.visit('*.txt'):
... if 'bar' in fpath.read():
... results.append(fpath.basename)
>>> results.sort()
>>> results
['textfile1.txt', 'textfile2.txt', 'textfile2.txt']
Working with Paths
----------------------------
This example shows the ``py.path`` features to deal with
filesystem paths Note that the filesystem is never touched,
all operations are performed on a string level (so the paths
don't have to exist, either):
.. sourcecode:: pycon
>>> p1 = py.path.local('/foo/bar')
>>> p2 = p1.join('baz/qux')
>>> p2 == py.path.local('/foo/bar/baz/qux')
True
>>> sep = py.path.local.sep
>>> p2.relto(p1).replace(sep, '/') # os-specific path sep in the string
'baz/qux'
>>> p2.bestrelpath(p1).replace(sep, '/')
'../..'
>>> p2.join(p2.bestrelpath(p1)) == p1
True
>>> p3 = p1 / 'baz/qux' # the / operator allows joining, too
>>> p2 == p3
True
>>> p4 = p1 + ".py"
>>> p4.basename == "bar.py"
True
>>> p4.ext == ".py"
True
>>> p4.purebasename == "bar"
True
This should be possible on every implementation of ``py.path``, so
regardless of whether the implementation wraps a UNIX filesystem, a Windows
one, or a database or object tree, these functions should be available (each
with their own notion of path seperators and dealing with conversions, etc.).
Checking path types
-------------------------------
Now we will show a bit about the powerful 'check()' method on paths, which
allows you to check whether a file exists, what type it is, etc.:
.. sourcecode:: pycon
>>> file1 = temppath.join('file1')
>>> file1.check() # does it exist?
False
>>> file1 = file1.ensure(file=True) # 'touch' the file
>>> file1.check()
True
>>> file1.check(dir=True) # is it a dir?
False
>>> file1.check(file=True) # or a file?
True
>>> file1.check(ext='.txt') # check the extension
False
>>> textfile = temppath.ensure('text.txt', file=True)
>>> textfile.check(ext='.txt')
True
>>> file1.check(basename='file1') # we can use all the path's properties here
True
Setting svn-properties
--------------------------------
As an example of 'uncommon' methods, we'll show how to read and write
properties in an ``py.path.svnwc`` instance:
.. sourcecode:: pycon
.. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
>>> wc.propget('foo')
''
>>> wc.propset('foo', 'bar')
>>> wc.propget('foo')
'bar'
>>> len(wc.status().prop_modified) # our own props
1
>>> msg = wc.revert() # roll back our changes
>>> len(wc.status().prop_modified)
0
SVN authentication
----------------------------
Some uncommon functionality can also be provided as extensions, such as SVN
authentication:
.. sourcecode:: pycon
.. >>> if not py.test.config.option.urlcheck: raise ValueError('skipchunk')
>>> auth = py.path.SvnAuth('anonymous', 'user', cache_auth=False,
... interactive=False)
>>> wc.auth = auth
>>> wc.update() # this should work
>>> path = wc.ensure('thisshouldnotexist.txt')
>>> try:
... path.commit('testing')
... except py.process.cmdexec.Error, e:
... pass
>>> 'authorization failed' in str(e)
True
Known problems / limitations
===================================
* The SVN path objects require the "svn" command line,
there is currently no support for python bindings.
Parsing the svn output can lead to problems, particularly
regarding if you have a non-english "locales" setting.
* While the path objects basically work on windows,
there is no attention yet on making unicode paths
work or deal with the famous "8.3" filename issues.

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,164 @@
====================================================
py.xml: simple pythonic xml/html file generation
====================================================
Motivation
==========
There are a plethora of frameworks and libraries to generate
xml and html trees. However, many of them are large, have a
steep learning curve and are often hard to debug. Not to
speak of the fact that they are frameworks to begin with.
.. _xist: http://www.livinglogic.de/Python/xist/index.html
a pythonic object model , please
================================
The py lib offers a pythonic way to generate xml/html, based on
ideas from xist_ which `uses python class objects`_ to build
xml trees. However, xist_'s implementation is somewhat heavy
because it has additional goals like transformations and
supporting many namespaces. But its basic idea is very easy.
.. _`uses python class objects`: http://www.livinglogic.de/Python/xist/Howto.html
generating arbitrary xml structures
-----------------------------------
With ``py.xml.Namespace`` you have the basis
to generate custom xml-fragments on the fly::
class ns(py.xml.Namespace):
"my custom xml namespace"
doc = ns.books(
ns.book(
ns.author("May Day"),
ns.title("python for java programmers"),),
ns.book(
ns.author("why"),
ns.title("Java for Python programmers"),),
publisher="N.N",
)
print doc.unicode(indent=2).encode('utf8')
will give you this representation::
<books publisher="N.N">
<book>
<author>May Day</author>
<title>python for java programmers</title></book>
<book>
<author>why</author>
<title>Java for Python programmers</title></book></books>
In a sentence: positional arguments are child-tags and
keyword-arguments are attributes.
On a side note, you'll see that the unicode-serializer
supports a nice indentation style which keeps your generated
html readable, basically through emulating python's white
space significance by putting closing-tags rightmost and
almost invisible at first glance :-)
basic example for generating html
---------------------------------
Consider this example::
from py.xml import html # html namespace
paras = "First Para", "Second para"
doc = html.html(
html.head(
html.meta(name="Content-Type", value="text/html; charset=latin1")),
html.body(
[html.p(p) for p in paras]))
print unicode(doc).encode('latin1')
Again, tags are objects which contain tags and have attributes.
More exactly, Tags inherit from the list type and thus can be
manipulated as list objects. They additionally support a default
way to represent themselves as a serialized unicode object.
If you happen to look at the py.xml implementation you'll
note that the tag/namespace implementation consumes some 50 lines
with another 50 lines for the unicode serialization code.
CSS-styling your html Tags
--------------------------
One aspect where many of the huge python xml/html generation
frameworks utterly fail is a clean and convenient integration
of CSS styling. Often, developers are left alone with keeping
CSS style definitions in sync with some style files
represented as strings (often in a separate .css file). Not
only is this hard to debug but the missing abstractions make
it hard to modify the styling of your tags or to choose custom
style representations (inline, html.head or external). Add the
Browers usual tolerance of messyness and errors in Style
references and welcome to hell, known as the domain of
developing web applications :-)
By contrast, consider this CSS styling example::
class my(html):
"my initial custom style"
class body(html.body):
style = html.Style(font_size = "120%")
class h2(html.h2):
style = html.Style(background = "grey")
class p(html.p):
style = html.Style(font_weight="bold")
doc = my.html(
my.head(),
my.body(
my.h2("hello world"),
my.p("bold as bold can")
)
)
print doc.unicode(indent=2)
This will give you a small'n mean self contained
represenation by default::
<html>
<head/>
<body style="font-size: 120%">
<h2 style="background: grey">hello world</h2>
<p style="font-weight: bold">bold as bold can</p></body></html>
Most importantly, note that the inline-styling is just an
implementation detail of the unicode serialization code.
You can easily modify the serialization to put your styling into the
``html.head`` or in a separate file and autogenerate CSS-class
names or ids.
Hey, you could even write tests that you are using correct
styles suitable for specific browser requirements. Did i mention
that the ability to easily write tests for your generated
html and its serialization could help to develop _stable_ user
interfaces?
More to come ...
----------------
For now, i don't think we should strive to offer much more
than the above. However, it is probably not hard to offer
*partial serialization* to allow generating maybe hundreds of
complex html documents per second. Basically we would allow
putting callables both as Tag content and as values of
attributes. A slightly more advanced Serialization would then
produce a list of unicode objects intermingled with callables.
At HTTP-Request time the callables would get called to
complete the probably request-specific serialization of
your Tags. Hum, it's probably harder to explain this than to
actually code it :-)
.. _`py.test`: test/index.html

View file

@ -0,0 +1,150 @@
"""
py.test and pylib: rapid testing and development utils
this module uses apipkg.py for lazy-loading sub modules
and classes. The initpkg-dictionary below specifies
name->value mappings where value can be another namespace
dictionary or an import path.
(c) Holger Krekel and others, 2004-2014
"""
__version__ = '1.4.31'
from py import _apipkg
# so that py.error.* instances are picklable
import sys
sys.modules['py.error'] = _apipkg.AliasModule("py.error", "py._error", 'error')
_apipkg.initpkg(__name__, attr={'_apipkg': _apipkg}, exportdefs={
# access to all standard lib modules
'std': '._std:std',
# access to all posix errno's as classes
'error': '._error:error',
'_pydir' : '.__metainfo:pydir',
'version': 'py:__version__', # backward compatibility
# pytest-2.0 has a flat namespace, we use alias modules
# to keep old references compatible
'test' : 'pytest',
'test.collect' : 'pytest',
'test.cmdline' : 'pytest',
# hook into the top-level standard library
'process' : {
'__doc__' : '._process:__doc__',
'cmdexec' : '._process.cmdexec:cmdexec',
'kill' : '._process.killproc:kill',
'ForkedFunc' : '._process.forkedfunc:ForkedFunc',
},
'apipkg' : {
'initpkg' : '._apipkg:initpkg',
'ApiModule' : '._apipkg:ApiModule',
},
'iniconfig' : {
'IniConfig' : '._iniconfig:IniConfig',
'ParseError' : '._iniconfig:ParseError',
},
'path' : {
'__doc__' : '._path:__doc__',
'svnwc' : '._path.svnwc:SvnWCCommandPath',
'svnurl' : '._path.svnurl:SvnCommandPath',
'local' : '._path.local:LocalPath',
'SvnAuth' : '._path.svnwc:SvnAuth',
},
# python inspection/code-generation API
'code' : {
'__doc__' : '._code:__doc__',
'compile' : '._code.source:compile_',
'Source' : '._code.source:Source',
'Code' : '._code.code:Code',
'Frame' : '._code.code:Frame',
'ExceptionInfo' : '._code.code:ExceptionInfo',
'Traceback' : '._code.code:Traceback',
'getfslineno' : '._code.source:getfslineno',
'getrawcode' : '._code.code:getrawcode',
'patch_builtins' : '._code.code:patch_builtins',
'unpatch_builtins' : '._code.code:unpatch_builtins',
'_AssertionError' : '._code.assertion:AssertionError',
'_reinterpret_old' : '._code.assertion:reinterpret_old',
'_reinterpret' : '._code.assertion:reinterpret',
'_reprcompare' : '._code.assertion:_reprcompare',
'_format_explanation' : '._code.assertion:_format_explanation',
},
# backports and additions of builtins
'builtin' : {
'__doc__' : '._builtin:__doc__',
'enumerate' : '._builtin:enumerate',
'reversed' : '._builtin:reversed',
'sorted' : '._builtin:sorted',
'any' : '._builtin:any',
'all' : '._builtin:all',
'set' : '._builtin:set',
'frozenset' : '._builtin:frozenset',
'BaseException' : '._builtin:BaseException',
'GeneratorExit' : '._builtin:GeneratorExit',
'_sysex' : '._builtin:_sysex',
'print_' : '._builtin:print_',
'_reraise' : '._builtin:_reraise',
'_tryimport' : '._builtin:_tryimport',
'exec_' : '._builtin:exec_',
'_basestring' : '._builtin:_basestring',
'_totext' : '._builtin:_totext',
'_isbytes' : '._builtin:_isbytes',
'_istext' : '._builtin:_istext',
'_getimself' : '._builtin:_getimself',
'_getfuncdict' : '._builtin:_getfuncdict',
'_getcode' : '._builtin:_getcode',
'builtins' : '._builtin:builtins',
'execfile' : '._builtin:execfile',
'callable' : '._builtin:callable',
'bytes' : '._builtin:bytes',
'text' : '._builtin:text',
},
# input-output helping
'io' : {
'__doc__' : '._io:__doc__',
'dupfile' : '._io.capture:dupfile',
'TextIO' : '._io.capture:TextIO',
'BytesIO' : '._io.capture:BytesIO',
'FDCapture' : '._io.capture:FDCapture',
'StdCapture' : '._io.capture:StdCapture',
'StdCaptureFD' : '._io.capture:StdCaptureFD',
'TerminalWriter' : '._io.terminalwriter:TerminalWriter',
'ansi_print' : '._io.terminalwriter:ansi_print',
'get_terminal_width' : '._io.terminalwriter:get_terminal_width',
'saferepr' : '._io.saferepr:saferepr',
},
# small and mean xml/html generation
'xml' : {
'__doc__' : '._xmlgen:__doc__',
'html' : '._xmlgen:html',
'Tag' : '._xmlgen:Tag',
'raw' : '._xmlgen:raw',
'Namespace' : '._xmlgen:Namespace',
'escape' : '._xmlgen:escape',
},
'log' : {
# logging API ('producers' and 'consumers' connected via keywords)
'__doc__' : '._log:__doc__',
'_apiwarn' : '._log.warning:_apiwarn',
'Producer' : '._log.log:Producer',
'setconsumer' : '._log.log:setconsumer',
'_setstate' : '._log.log:setstate',
'_getstate' : '._log.log:getstate',
'Path' : '._log.log:Path',
'STDOUT' : '._log.log:STDOUT',
'STDERR' : '._log.log:STDERR',
'Syslog' : '._log.log:Syslog',
},
})

View file

@ -0,0 +1,2 @@
import py
pydir = py.path.local(py.__file__).dirpath()

View file

@ -0,0 +1,181 @@
"""
apipkg: control the exported namespace of a python package.
see http://pypi.python.org/pypi/apipkg
(c) holger krekel, 2009 - MIT license
"""
import os
import sys
from types import ModuleType
__version__ = '1.3.dev'
def _py_abspath(path):
"""
special version of abspath
that will leave paths from jython jars alone
"""
if path.startswith('__pyclasspath__'):
return path
else:
return os.path.abspath(path)
def initpkg(pkgname, exportdefs, attr=dict()):
""" initialize given package from the export definitions. """
oldmod = sys.modules.get(pkgname)
d = {}
f = getattr(oldmod, '__file__', None)
if f:
f = _py_abspath(f)
d['__file__'] = f
if hasattr(oldmod, '__version__'):
d['__version__'] = oldmod.__version__
if hasattr(oldmod, '__loader__'):
d['__loader__'] = oldmod.__loader__
if hasattr(oldmod, '__path__'):
d['__path__'] = [_py_abspath(p) for p in oldmod.__path__]
if '__doc__' not in exportdefs and getattr(oldmod, '__doc__', None):
d['__doc__'] = oldmod.__doc__
d.update(attr)
if hasattr(oldmod, "__dict__"):
oldmod.__dict__.update(d)
mod = ApiModule(pkgname, exportdefs, implprefix=pkgname, attr=d)
sys.modules[pkgname] = mod
def importobj(modpath, attrname):
module = __import__(modpath, None, None, ['__doc__'])
if not attrname:
return module
retval = module
names = attrname.split(".")
for x in names:
retval = getattr(retval, x)
return retval
class ApiModule(ModuleType):
def __docget(self):
try:
return self.__doc
except AttributeError:
if '__doc__' in self.__map__:
return self.__makeattr('__doc__')
def __docset(self, value):
self.__doc = value
__doc__ = property(__docget, __docset)
def __init__(self, name, importspec, implprefix=None, attr=None):
self.__name__ = name
self.__all__ = [x for x in importspec if x != '__onfirstaccess__']
self.__map__ = {}
self.__implprefix__ = implprefix or name
if attr:
for name, val in attr.items():
# print "setting", self.__name__, name, val
setattr(self, name, val)
for name, importspec in importspec.items():
if isinstance(importspec, dict):
subname = '%s.%s' % (self.__name__, name)
apimod = ApiModule(subname, importspec, implprefix)
sys.modules[subname] = apimod
setattr(self, name, apimod)
else:
parts = importspec.split(':')
modpath = parts.pop(0)
attrname = parts and parts[0] or ""
if modpath[0] == '.':
modpath = implprefix + modpath
if not attrname:
subname = '%s.%s' % (self.__name__, name)
apimod = AliasModule(subname, modpath)
sys.modules[subname] = apimod
if '.' not in name:
setattr(self, name, apimod)
else:
self.__map__[name] = (modpath, attrname)
def __repr__(self):
l = []
if hasattr(self, '__version__'):
l.append("version=" + repr(self.__version__))
if hasattr(self, '__file__'):
l.append('from ' + repr(self.__file__))
if l:
return '<ApiModule %r %s>' % (self.__name__, " ".join(l))
return '<ApiModule %r>' % (self.__name__,)
def __makeattr(self, name):
"""lazily compute value for name or raise AttributeError if unknown."""
# print "makeattr", self.__name__, name
target = None
if '__onfirstaccess__' in self.__map__:
target = self.__map__.pop('__onfirstaccess__')
importobj(*target)()
try:
modpath, attrname = self.__map__[name]
except KeyError:
if target is not None and name != '__onfirstaccess__':
# retry, onfirstaccess might have set attrs
return getattr(self, name)
raise AttributeError(name)
else:
result = importobj(modpath, attrname)
setattr(self, name, result)
try:
del self.__map__[name]
except KeyError:
pass # in a recursive-import situation a double-del can happen
return result
__getattr__ = __makeattr
def __dict__(self):
# force all the content of the module to be loaded when __dict__ is read
dictdescr = ModuleType.__dict__['__dict__']
dict = dictdescr.__get__(self)
if dict is not None:
hasattr(self, 'some')
for name in self.__all__:
try:
self.__makeattr(name)
except AttributeError:
pass
return dict
__dict__ = property(__dict__)
def AliasModule(modname, modpath, attrname=None):
mod = []
def getmod():
if not mod:
x = importobj(modpath, None)
if attrname is not None:
x = getattr(x, attrname)
mod.append(x)
return mod[0]
class AliasModule(ModuleType):
def __repr__(self):
x = modpath
if attrname:
x += "." + attrname
return '<AliasModule %r for %r>' % (modname, x)
def __getattribute__(self, name):
try:
return getattr(getmod(), name)
except ImportError:
return None
def __setattr__(self, name, value):
setattr(getmod(), name, value)
def __delattr__(self, name):
delattr(getmod(), name)
return AliasModule(str(modname))

View file

@ -0,0 +1,248 @@
import sys
try:
reversed = reversed
except NameError:
def reversed(sequence):
"""reversed(sequence) -> reverse iterator over values of the sequence
Return a reverse iterator
"""
if hasattr(sequence, '__reversed__'):
return sequence.__reversed__()
if not hasattr(sequence, '__getitem__'):
raise TypeError("argument to reversed() must be a sequence")
return reversed_iterator(sequence)
class reversed_iterator(object):
def __init__(self, seq):
self.seq = seq
self.remaining = len(seq)
def __iter__(self):
return self
def next(self):
i = self.remaining
if i > 0:
i -= 1
item = self.seq[i]
self.remaining = i
return item
raise StopIteration
def __length_hint__(self):
return self.remaining
try:
any = any
except NameError:
def any(iterable):
for x in iterable:
if x:
return True
return False
try:
all = all
except NameError:
def all(iterable):
for x in iterable:
if not x:
return False
return True
try:
sorted = sorted
except NameError:
builtin_cmp = cmp # need to use cmp as keyword arg
def sorted(iterable, cmp=None, key=None, reverse=0):
use_cmp = None
if key is not None:
if cmp is None:
def use_cmp(x, y):
return builtin_cmp(x[0], y[0])
else:
def use_cmp(x, y):
return cmp(x[0], y[0])
l = [(key(element), element) for element in iterable]
else:
if cmp is not None:
use_cmp = cmp
l = list(iterable)
if use_cmp is not None:
l.sort(use_cmp)
else:
l.sort()
if reverse:
l.reverse()
if key is not None:
return [element for (_, element) in l]
return l
try:
set, frozenset = set, frozenset
except NameError:
from sets import set, frozenset
# pass through
enumerate = enumerate
try:
BaseException = BaseException
except NameError:
BaseException = Exception
try:
GeneratorExit = GeneratorExit
except NameError:
class GeneratorExit(Exception):
""" This exception is never raised, it is there to make it possible to
write code compatible with CPython 2.5 even in lower CPython
versions."""
pass
GeneratorExit.__module__ = 'exceptions'
_sysex = (KeyboardInterrupt, SystemExit, MemoryError, GeneratorExit)
try:
callable = callable
except NameError:
def callable(obj):
return hasattr(obj, "__call__")
if sys.version_info >= (3, 0):
exec ("print_ = print ; exec_=exec")
import builtins
# some backward compatibility helpers
_basestring = str
def _totext(obj, encoding=None, errors=None):
if isinstance(obj, bytes):
if errors is None:
obj = obj.decode(encoding)
else:
obj = obj.decode(encoding, errors)
elif not isinstance(obj, str):
obj = str(obj)
return obj
def _isbytes(x):
return isinstance(x, bytes)
def _istext(x):
return isinstance(x, str)
text = str
bytes = bytes
def _getimself(function):
return getattr(function, '__self__', None)
def _getfuncdict(function):
return getattr(function, "__dict__", None)
def _getcode(function):
return getattr(function, "__code__", None)
def execfile(fn, globs=None, locs=None):
if globs is None:
back = sys._getframe(1)
globs = back.f_globals
locs = back.f_locals
del back
elif locs is None:
locs = globs
fp = open(fn, "r")
try:
source = fp.read()
finally:
fp.close()
co = compile(source, fn, "exec", dont_inherit=True)
exec_(co, globs, locs)
else:
import __builtin__ as builtins
_totext = unicode
_basestring = basestring
text = unicode
bytes = str
execfile = execfile
callable = callable
def _isbytes(x):
return isinstance(x, str)
def _istext(x):
return isinstance(x, unicode)
def _getimself(function):
return getattr(function, 'im_self', None)
def _getfuncdict(function):
return getattr(function, "__dict__", None)
def _getcode(function):
try:
return getattr(function, "__code__")
except AttributeError:
return getattr(function, "func_code", None)
def print_(*args, **kwargs):
""" minimal backport of py3k print statement. """
sep = ' '
if 'sep' in kwargs:
sep = kwargs.pop('sep')
end = '\n'
if 'end' in kwargs:
end = kwargs.pop('end')
file = 'file' in kwargs and kwargs.pop('file') or sys.stdout
if kwargs:
args = ", ".join([str(x) for x in kwargs])
raise TypeError("invalid keyword arguments: %s" % args)
at_start = True
for x in args:
if not at_start:
file.write(sep)
file.write(str(x))
at_start = False
file.write(end)
def exec_(obj, globals=None, locals=None):
""" minimal backport of py3k exec statement. """
__tracebackhide__ = True
if globals is None:
frame = sys._getframe(1)
globals = frame.f_globals
if locals is None:
locals = frame.f_locals
elif locals is None:
locals = globals
exec2(obj, globals, locals)
if sys.version_info >= (3, 0):
def _reraise(cls, val, tb):
__tracebackhide__ = True
assert hasattr(val, '__traceback__')
raise cls.with_traceback(val, tb)
else:
exec ("""
def _reraise(cls, val, tb):
__tracebackhide__ = True
raise cls, val, tb
def exec2(obj, globals, locals):
__tracebackhide__ = True
exec obj in globals, locals
""")
def _tryimport(*names):
""" return the first successfully imported module. """
assert names
for name in names:
try:
__import__(name)
except ImportError:
excinfo = sys.exc_info()
else:
return sys.modules[name]
_reraise(*excinfo)

View file

@ -0,0 +1 @@
""" python inspection/code generation API """

View file

@ -0,0 +1,339 @@
"""
Find intermediate evalutation results in assert statements through builtin AST.
This should replace _assertionold.py eventually.
"""
import sys
import ast
import py
from py._code.assertion import _format_explanation, BuiltinAssertionError
if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
# See http://bugs.jython.org/issue1497
_exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
"ListComp", "GeneratorExp", "Yield", "Compare", "Call",
"Repr", "Num", "Str", "Attribute", "Subscript", "Name",
"List", "Tuple")
_stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
"AugAssign", "Print", "For", "While", "If", "With", "Raise",
"TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
"Exec", "Global", "Expr", "Pass", "Break", "Continue")
_expr_nodes = set(getattr(ast, name) for name in _exprs)
_stmt_nodes = set(getattr(ast, name) for name in _stmts)
def _is_ast_expr(node):
return node.__class__ in _expr_nodes
def _is_ast_stmt(node):
return node.__class__ in _stmt_nodes
else:
def _is_ast_expr(node):
return isinstance(node, ast.expr)
def _is_ast_stmt(node):
return isinstance(node, ast.stmt)
class Failure(Exception):
"""Error found while interpreting AST."""
def __init__(self, explanation=""):
self.cause = sys.exc_info()
self.explanation = explanation
def interpret(source, frame, should_fail=False):
mod = ast.parse(source)
visitor = DebugInterpreter(frame)
try:
visitor.visit(mod)
except Failure:
failure = sys.exc_info()[1]
return getfailure(failure)
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --no-assert)")
def run(offending_line, frame=None):
if frame is None:
frame = py.code.Frame(sys._getframe(1))
return interpret(offending_line, frame)
def getfailure(failure):
explanation = _format_explanation(failure.explanation)
value = failure.cause[1]
if str(value):
lines = explanation.splitlines()
if not lines:
lines.append("")
lines[0] += " << %s" % (value,)
explanation = "\n".join(lines)
text = "%s: %s" % (failure.cause[0].__name__, explanation)
if text.startswith("AssertionError: assert "):
text = text[16:]
return text
operator_map = {
ast.BitOr : "|",
ast.BitXor : "^",
ast.BitAnd : "&",
ast.LShift : "<<",
ast.RShift : ">>",
ast.Add : "+",
ast.Sub : "-",
ast.Mult : "*",
ast.Div : "/",
ast.FloorDiv : "//",
ast.Mod : "%",
ast.Eq : "==",
ast.NotEq : "!=",
ast.Lt : "<",
ast.LtE : "<=",
ast.Gt : ">",
ast.GtE : ">=",
ast.Pow : "**",
ast.Is : "is",
ast.IsNot : "is not",
ast.In : "in",
ast.NotIn : "not in"
}
unary_map = {
ast.Not : "not %s",
ast.Invert : "~%s",
ast.USub : "-%s",
ast.UAdd : "+%s"
}
class DebugInterpreter(ast.NodeVisitor):
"""Interpret AST nodes to gleam useful debugging information. """
def __init__(self, frame):
self.frame = frame
def generic_visit(self, node):
# Fallback when we don't have a special implementation.
if _is_ast_expr(node):
mod = ast.Expression(node)
co = self._compile(mod)
try:
result = self.frame.eval(co)
except Exception:
raise Failure()
explanation = self.frame.repr(result)
return explanation, result
elif _is_ast_stmt(node):
mod = ast.Module([node])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co)
except Exception:
raise Failure()
return None, None
else:
raise AssertionError("can't handle %s" %(node,))
def _compile(self, source, mode="eval"):
return compile(source, "<assertion interpretation>", mode)
def visit_Expr(self, expr):
return self.visit(expr.value)
def visit_Module(self, mod):
for stmt in mod.body:
self.visit(stmt)
def visit_Name(self, name):
explanation, result = self.generic_visit(name)
# See if the name is local.
source = "%r in locals() is not globals()" % (name.id,)
co = self._compile(source)
try:
local = self.frame.eval(co)
except Exception:
# have to assume it isn't
local = False
if not local:
return name.id, result
return explanation, result
def visit_Compare(self, comp):
left = comp.left
left_explanation, left_result = self.visit(left)
for op, next_op in zip(comp.ops, comp.comparators):
next_explanation, next_result = self.visit(next_op)
op_symbol = operator_map[op.__class__]
explanation = "%s %s %s" % (left_explanation, op_symbol,
next_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=next_result)
except Exception:
raise Failure(explanation)
try:
if not result:
break
except KeyboardInterrupt:
raise
except:
break
left_explanation, left_result = next_explanation, next_result
rcomp = py.code._reprcompare
if rcomp:
res = rcomp(op_symbol, left_result, next_result)
if res:
explanation = res
return explanation, result
def visit_BoolOp(self, boolop):
is_or = isinstance(boolop.op, ast.Or)
explanations = []
for operand in boolop.values:
explanation, result = self.visit(operand)
explanations.append(explanation)
if result == is_or:
break
name = is_or and " or " or " and "
explanation = "(" + name.join(explanations) + ")"
return explanation, result
def visit_UnaryOp(self, unary):
pattern = unary_map[unary.op.__class__]
operand_explanation, operand_result = self.visit(unary.operand)
explanation = pattern % (operand_explanation,)
co = self._compile(pattern % ("__exprinfo_expr",))
try:
result = self.frame.eval(co, __exprinfo_expr=operand_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_BinOp(self, binop):
left_explanation, left_result = self.visit(binop.left)
right_explanation, right_result = self.visit(binop.right)
symbol = operator_map[binop.op.__class__]
explanation = "(%s %s %s)" % (left_explanation, symbol,
right_explanation)
source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_left=left_result,
__exprinfo_right=right_result)
except Exception:
raise Failure(explanation)
return explanation, result
def visit_Call(self, call):
func_explanation, func = self.visit(call.func)
arg_explanations = []
ns = {"__exprinfo_func" : func}
arguments = []
for arg in call.args:
arg_explanation, arg_result = self.visit(arg)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
arguments.append(arg_name)
arg_explanations.append(arg_explanation)
for keyword in call.keywords:
arg_explanation, arg_result = self.visit(keyword.value)
arg_name = "__exprinfo_%s" % (len(ns),)
ns[arg_name] = arg_result
keyword_source = "%s=%%s" % (keyword.arg)
arguments.append(keyword_source % (arg_name,))
arg_explanations.append(keyword_source % (arg_explanation,))
if call.starargs:
arg_explanation, arg_result = self.visit(call.starargs)
arg_name = "__exprinfo_star"
ns[arg_name] = arg_result
arguments.append("*%s" % (arg_name,))
arg_explanations.append("*%s" % (arg_explanation,))
if call.kwargs:
arg_explanation, arg_result = self.visit(call.kwargs)
arg_name = "__exprinfo_kwds"
ns[arg_name] = arg_result
arguments.append("**%s" % (arg_name,))
arg_explanations.append("**%s" % (arg_explanation,))
args_explained = ", ".join(arg_explanations)
explanation = "%s(%s)" % (func_explanation, args_explained)
args = ", ".join(arguments)
source = "__exprinfo_func(%s)" % (args,)
co = self._compile(source)
try:
result = self.frame.eval(co, **ns)
except Exception:
raise Failure(explanation)
pattern = "%s\n{%s = %s\n}"
rep = self.frame.repr(result)
explanation = pattern % (rep, rep, explanation)
return explanation, result
def _is_builtin_name(self, name):
pattern = "%r not in globals() and %r not in locals()"
source = pattern % (name.id, name.id)
co = self._compile(source)
try:
return self.frame.eval(co)
except Exception:
return False
def visit_Attribute(self, attr):
if not isinstance(attr.ctx, ast.Load):
return self.generic_visit(attr)
source_explanation, source_result = self.visit(attr.value)
explanation = "%s.%s" % (source_explanation, attr.attr)
source = "__exprinfo_expr.%s" % (attr.attr,)
co = self._compile(source)
try:
result = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
raise Failure(explanation)
explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
self.frame.repr(result),
source_explanation, attr.attr)
# Check if the attr is from an instance.
source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
source = source % (attr.attr,)
co = self._compile(source)
try:
from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
except Exception:
from_instance = True
if from_instance:
rep = self.frame.repr(result)
pattern = "%s\n{%s = %s\n}"
explanation = pattern % (rep, rep, explanation)
return explanation, result
def visit_Assert(self, assrt):
test_explanation, test_result = self.visit(assrt.test)
if test_explanation.startswith("False\n{False =") and \
test_explanation.endswith("\n"):
test_explanation = test_explanation[15:-2]
explanation = "assert %s" % (test_explanation,)
if not test_result:
try:
raise BuiltinAssertionError
except Exception:
raise Failure(explanation)
return explanation, test_result
def visit_Assign(self, assign):
value_explanation, value_result = self.visit(assign.value)
explanation = "... = %s" % (value_explanation,)
name = ast.Name("__exprinfo_expr", ast.Load(),
lineno=assign.value.lineno,
col_offset=assign.value.col_offset)
new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
col_offset=assign.col_offset)
mod = ast.Module([new_assign])
co = self._compile(mod, "exec")
try:
self.frame.exec_(co, __exprinfo_expr=value_result)
except Exception:
raise Failure(explanation)
return explanation, value_result

View file

@ -0,0 +1,555 @@
import py
import sys, inspect
from compiler import parse, ast, pycodegen
from py._code.assertion import BuiltinAssertionError, _format_explanation
passthroughex = py.builtin._sysex
class Failure:
def __init__(self, node):
self.exc, self.value, self.tb = sys.exc_info()
self.node = node
class View(object):
"""View base class.
If C is a subclass of View, then C(x) creates a proxy object around
the object x. The actual class of the proxy is not C in general,
but a *subclass* of C determined by the rules below. To avoid confusion
we call view class the class of the proxy (a subclass of C, so of View)
and object class the class of x.
Attributes and methods not found in the proxy are automatically read on x.
Other operations like setting attributes are performed on the proxy, as
determined by its view class. The object x is available from the proxy
as its __obj__ attribute.
The view class selection is determined by the __view__ tuples and the
optional __viewkey__ method. By default, the selected view class is the
most specific subclass of C whose __view__ mentions the class of x.
If no such subclass is found, the search proceeds with the parent
object classes. For example, C(True) will first look for a subclass
of C with __view__ = (..., bool, ...) and only if it doesn't find any
look for one with __view__ = (..., int, ...), and then ..., object,...
If everything fails the class C itself is considered to be the default.
Alternatively, the view class selection can be driven by another aspect
of the object x, instead of the class of x, by overriding __viewkey__.
See last example at the end of this module.
"""
_viewcache = {}
__view__ = ()
def __new__(rootclass, obj, *args, **kwds):
self = object.__new__(rootclass)
self.__obj__ = obj
self.__rootclass__ = rootclass
key = self.__viewkey__()
try:
self.__class__ = self._viewcache[key]
except KeyError:
self.__class__ = self._selectsubclass(key)
return self
def __getattr__(self, attr):
# attributes not found in the normal hierarchy rooted on View
# are looked up in the object's real class
return getattr(self.__obj__, attr)
def __viewkey__(self):
return self.__obj__.__class__
def __matchkey__(self, key, subclasses):
if inspect.isclass(key):
keys = inspect.getmro(key)
else:
keys = [key]
for key in keys:
result = [C for C in subclasses if key in C.__view__]
if result:
return result
return []
def _selectsubclass(self, key):
subclasses = list(enumsubclasses(self.__rootclass__))
for C in subclasses:
if not isinstance(C.__view__, tuple):
C.__view__ = (C.__view__,)
choices = self.__matchkey__(key, subclasses)
if not choices:
return self.__rootclass__
elif len(choices) == 1:
return choices[0]
else:
# combine the multiple choices
return type('?', tuple(choices), {})
def __repr__(self):
return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
def enumsubclasses(cls):
for subcls in cls.__subclasses__():
for subsubclass in enumsubclasses(subcls):
yield subsubclass
yield cls
class Interpretable(View):
"""A parse tree node with a few extra methods."""
explanation = None
def is_builtin(self, frame):
return False
def eval(self, frame):
# fall-back for unknown expression nodes
try:
expr = ast.Expression(self.__obj__)
expr.filename = '<eval>'
self.__obj__.filename = '<eval>'
co = pycodegen.ExpressionCodeGenerator(expr).getCode()
result = frame.eval(co)
except passthroughex:
raise
except:
raise Failure(self)
self.result = result
self.explanation = self.explanation or frame.repr(self.result)
def run(self, frame):
# fall-back for unknown statement nodes
try:
expr = ast.Module(None, ast.Stmt([self.__obj__]))
expr.filename = '<run>'
co = pycodegen.ModuleCodeGenerator(expr).getCode()
frame.exec_(co)
except passthroughex:
raise
except:
raise Failure(self)
def nice_explanation(self):
return _format_explanation(self.explanation)
class Name(Interpretable):
__view__ = ast.Name
def is_local(self, frame):
source = '%r in locals() is not globals()' % self.name
try:
return frame.is_true(frame.eval(source))
except passthroughex:
raise
except:
return False
def is_global(self, frame):
source = '%r in globals()' % self.name
try:
return frame.is_true(frame.eval(source))
except passthroughex:
raise
except:
return False
def is_builtin(self, frame):
source = '%r not in locals() and %r not in globals()' % (
self.name, self.name)
try:
return frame.is_true(frame.eval(source))
except passthroughex:
raise
except:
return False
def eval(self, frame):
super(Name, self).eval(frame)
if not self.is_local(frame):
self.explanation = self.name
class Compare(Interpretable):
__view__ = ast.Compare
def eval(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
for operation, expr2 in self.ops:
if hasattr(self, 'result'):
# shortcutting in chained expressions
if not frame.is_true(self.result):
break
expr2 = Interpretable(expr2)
expr2.eval(frame)
self.explanation = "%s %s %s" % (
expr.explanation, operation, expr2.explanation)
source = "__exprinfo_left %s __exprinfo_right" % operation
try:
self.result = frame.eval(source,
__exprinfo_left=expr.result,
__exprinfo_right=expr2.result)
except passthroughex:
raise
except:
raise Failure(self)
expr = expr2
class And(Interpretable):
__view__ = ast.And
def eval(self, frame):
explanations = []
for expr in self.nodes:
expr = Interpretable(expr)
expr.eval(frame)
explanations.append(expr.explanation)
self.result = expr.result
if not frame.is_true(expr.result):
break
self.explanation = '(' + ' and '.join(explanations) + ')'
class Or(Interpretable):
__view__ = ast.Or
def eval(self, frame):
explanations = []
for expr in self.nodes:
expr = Interpretable(expr)
expr.eval(frame)
explanations.append(expr.explanation)
self.result = expr.result
if frame.is_true(expr.result):
break
self.explanation = '(' + ' or '.join(explanations) + ')'
# == Unary operations ==
keepalive = []
for astclass, astpattern in {
ast.Not : 'not __exprinfo_expr',
ast.Invert : '(~__exprinfo_expr)',
}.items():
class UnaryArith(Interpretable):
__view__ = astclass
def eval(self, frame, astpattern=astpattern):
expr = Interpretable(self.expr)
expr.eval(frame)
self.explanation = astpattern.replace('__exprinfo_expr',
expr.explanation)
try:
self.result = frame.eval(astpattern,
__exprinfo_expr=expr.result)
except passthroughex:
raise
except:
raise Failure(self)
keepalive.append(UnaryArith)
# == Binary operations ==
for astclass, astpattern in {
ast.Add : '(__exprinfo_left + __exprinfo_right)',
ast.Sub : '(__exprinfo_left - __exprinfo_right)',
ast.Mul : '(__exprinfo_left * __exprinfo_right)',
ast.Div : '(__exprinfo_left / __exprinfo_right)',
ast.Mod : '(__exprinfo_left % __exprinfo_right)',
ast.Power : '(__exprinfo_left ** __exprinfo_right)',
}.items():
class BinaryArith(Interpretable):
__view__ = astclass
def eval(self, frame, astpattern=astpattern):
left = Interpretable(self.left)
left.eval(frame)
right = Interpretable(self.right)
right.eval(frame)
self.explanation = (astpattern
.replace('__exprinfo_left', left .explanation)
.replace('__exprinfo_right', right.explanation))
try:
self.result = frame.eval(astpattern,
__exprinfo_left=left.result,
__exprinfo_right=right.result)
except passthroughex:
raise
except:
raise Failure(self)
keepalive.append(BinaryArith)
class CallFunc(Interpretable):
__view__ = ast.CallFunc
def is_bool(self, frame):
source = 'isinstance(__exprinfo_value, bool)'
try:
return frame.is_true(frame.eval(source,
__exprinfo_value=self.result))
except passthroughex:
raise
except:
return False
def eval(self, frame):
node = Interpretable(self.node)
node.eval(frame)
explanations = []
vars = {'__exprinfo_fn': node.result}
source = '__exprinfo_fn('
for a in self.args:
if isinstance(a, ast.Keyword):
keyword = a.name
a = a.expr
else:
keyword = None
a = Interpretable(a)
a.eval(frame)
argname = '__exprinfo_%d' % len(vars)
vars[argname] = a.result
if keyword is None:
source += argname + ','
explanations.append(a.explanation)
else:
source += '%s=%s,' % (keyword, argname)
explanations.append('%s=%s' % (keyword, a.explanation))
if self.star_args:
star_args = Interpretable(self.star_args)
star_args.eval(frame)
argname = '__exprinfo_star'
vars[argname] = star_args.result
source += '*' + argname + ','
explanations.append('*' + star_args.explanation)
if self.dstar_args:
dstar_args = Interpretable(self.dstar_args)
dstar_args.eval(frame)
argname = '__exprinfo_kwds'
vars[argname] = dstar_args.result
source += '**' + argname + ','
explanations.append('**' + dstar_args.explanation)
self.explanation = "%s(%s)" % (
node.explanation, ', '.join(explanations))
if source.endswith(','):
source = source[:-1]
source += ')'
try:
self.result = frame.eval(source, **vars)
except passthroughex:
raise
except:
raise Failure(self)
if not node.is_builtin(frame) or not self.is_bool(frame):
r = frame.repr(self.result)
self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
class Getattr(Interpretable):
__view__ = ast.Getattr
def eval(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
source = '__exprinfo_expr.%s' % self.attrname
try:
self.result = frame.eval(source, __exprinfo_expr=expr.result)
except passthroughex:
raise
except:
raise Failure(self)
self.explanation = '%s.%s' % (expr.explanation, self.attrname)
# if the attribute comes from the instance, its value is interesting
source = ('hasattr(__exprinfo_expr, "__dict__") and '
'%r in __exprinfo_expr.__dict__' % self.attrname)
try:
from_instance = frame.is_true(
frame.eval(source, __exprinfo_expr=expr.result))
except passthroughex:
raise
except:
from_instance = True
if from_instance:
r = frame.repr(self.result)
self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
# == Re-interpretation of full statements ==
class Assert(Interpretable):
__view__ = ast.Assert
def run(self, frame):
test = Interpretable(self.test)
test.eval(frame)
# simplify 'assert False where False = ...'
if (test.explanation.startswith('False\n{False = ') and
test.explanation.endswith('\n}')):
test.explanation = test.explanation[15:-2]
# print the result as 'assert <explanation>'
self.result = test.result
self.explanation = 'assert ' + test.explanation
if not frame.is_true(test.result):
try:
raise BuiltinAssertionError
except passthroughex:
raise
except:
raise Failure(self)
class Assign(Interpretable):
__view__ = ast.Assign
def run(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
self.result = expr.result
self.explanation = '... = ' + expr.explanation
# fall-back-run the rest of the assignment
ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
mod = ast.Module(None, ast.Stmt([ass]))
mod.filename = '<run>'
co = pycodegen.ModuleCodeGenerator(mod).getCode()
try:
frame.exec_(co, __exprinfo_expr=expr.result)
except passthroughex:
raise
except:
raise Failure(self)
class Discard(Interpretable):
__view__ = ast.Discard
def run(self, frame):
expr = Interpretable(self.expr)
expr.eval(frame)
self.result = expr.result
self.explanation = expr.explanation
class Stmt(Interpretable):
__view__ = ast.Stmt
def run(self, frame):
for stmt in self.nodes:
stmt = Interpretable(stmt)
stmt.run(frame)
def report_failure(e):
explanation = e.node.nice_explanation()
if explanation:
explanation = ", in: " + explanation
else:
explanation = ""
sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
def check(s, frame=None):
if frame is None:
frame = sys._getframe(1)
frame = py.code.Frame(frame)
expr = parse(s, 'eval')
assert isinstance(expr, ast.Expression)
node = Interpretable(expr.node)
try:
node.eval(frame)
except passthroughex:
raise
except Failure:
e = sys.exc_info()[1]
report_failure(e)
else:
if not frame.is_true(node.result):
sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
###########################################################
# API / Entry points
# #########################################################
def interpret(source, frame, should_fail=False):
module = Interpretable(parse(source, 'exec').node)
#print "got module", module
if isinstance(frame, py.std.types.FrameType):
frame = py.code.Frame(frame)
try:
module.run(frame)
except Failure:
e = sys.exc_info()[1]
return getfailure(e)
except passthroughex:
raise
except:
import traceback
traceback.print_exc()
if should_fail:
return ("(assertion failed, but when it was re-run for "
"printing intermediate values, it did not fail. Suggestions: "
"compute assert expression before the assert or use --nomagic)")
else:
return None
def getmsg(excinfo):
if isinstance(excinfo, tuple):
excinfo = py.code.ExceptionInfo(excinfo)
#frame, line = gettbline(tb)
#frame = py.code.Frame(frame)
#return interpret(line, frame)
tb = excinfo.traceback[-1]
source = str(tb.statement).strip()
x = interpret(source, tb.frame, should_fail=True)
if not isinstance(x, str):
raise TypeError("interpret returned non-string %r" % (x,))
return x
def getfailure(e):
explanation = e.node.nice_explanation()
if str(e.value):
lines = explanation.split('\n')
lines[0] += " << %s" % (e.value,)
explanation = '\n'.join(lines)
text = "%s: %s" % (e.exc.__name__, explanation)
if text.startswith('AssertionError: assert '):
text = text[16:]
return text
def run(s, frame=None):
if frame is None:
frame = sys._getframe(1)
frame = py.code.Frame(frame)
module = Interpretable(parse(s, 'exec').node)
try:
module.run(frame)
except Failure:
e = sys.exc_info()[1]
report_failure(e)
if __name__ == '__main__':
# example:
def f():
return 5
def g():
return 3
def h(x):
return 'never'
check("f() * g() == 5")
check("not f()")
check("not (f() and g() or 0)")
check("f() == g()")
i = 4
check("i == f()")
check("len(f()) == 0")
check("isinstance(2+3+4, float)")
run("x = i")
check("x == 5")
run("assert not f(), 'oops'")
run("a, b, c = 1, 2")
run("a, b, c = f()")
check("max([f(),g()]) == 4")
check("'hello'[g()] == 'h'")
run("'guk%d' % h(f())")

View file

@ -0,0 +1,79 @@
# copied from python-2.7.3's traceback.py
# CHANGES:
# - some_str is replaced, trying to create unicode strings
#
import types
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would throw another exception and mask the original problem.
if (isinstance(etype, BaseException) or
isinstance(etype, types.InstanceType) or
etype is None or type(etype) is str):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "%s", line %d\n' % (filename, lineno))
if badline is not None:
lines.append(' %s\n' % badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(' %s^\n' % ''.join(caretspace))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return unicode(value)
except Exception:
try:
return str(value)
except Exception:
pass
return '<unprintable %s object>' % type(value).__name__

View file

@ -0,0 +1,94 @@
import sys
import py
BuiltinAssertionError = py.builtin.builtins.AssertionError
_reprcompare = None # if set, will be called by assert reinterp for comparison ops
def _format_explanation(explanation):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
raw_lines = (explanation or '').split('\n')
# escape newlines not followed by {, } and ~
lines = [raw_lines[0]]
for l in raw_lines[1:]:
if l.startswith('{') or l.startswith('}') or l.startswith('~'):
lines.append(l)
else:
lines[-1] += '\\n' + l
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith('{'):
if stackcnt[-1]:
s = 'and '
else:
s = 'where '
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(' +' + ' '*(len(stack)-1) + s + line[1:])
elif line.startswith('}'):
assert line.startswith('}')
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line.startswith('~')
result.append(' '*len(stack) + line[1:])
assert len(stack) == 1
return '\n'.join(result)
class AssertionError(BuiltinAssertionError):
def __init__(self, *args):
BuiltinAssertionError.__init__(self, *args)
if args:
try:
self.msg = str(args[0])
except py.builtin._sysex:
raise
except:
self.msg = "<[broken __repr__] %s at %0xd>" %(
args[0].__class__, id(args[0]))
else:
f = py.code.Frame(sys._getframe(1))
try:
source = f.code.fullsource
if source is not None:
try:
source = source.getstatement(f.lineno, assertion=True)
except IndexError:
source = None
else:
source = str(source.deindent()).strip()
except py.error.ENOENT:
source = None
# this can also occur during reinterpretation, when the
# co_filename is set to "<run>".
if source:
self.msg = reinterpret(source, f, should_fail=True)
else:
self.msg = "<could not determine information>"
if not self.args:
self.args = (self.msg,)
if sys.version_info > (3, 0):
AssertionError.__module__ = "builtins"
reinterpret_old = "old reinterpretation not available for py3"
else:
from py._code._assertionold import interpret as reinterpret_old
if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
from py._code._assertionnew import interpret as reinterpret
else:
reinterpret = reinterpret_old

View file

@ -0,0 +1,787 @@
import py
import sys
from inspect import CO_VARARGS, CO_VARKEYWORDS
builtin_repr = repr
reprlib = py.builtin._tryimport('repr', 'reprlib')
if sys.version_info[0] >= 3:
from traceback import format_exception_only
else:
from py._code._py2traceback import format_exception_only
class Code(object):
""" wrapper around Python code objects """
def __init__(self, rawcode):
if not hasattr(rawcode, "co_filename"):
rawcode = py.code.getrawcode(rawcode)
try:
self.filename = rawcode.co_filename
self.firstlineno = rawcode.co_firstlineno - 1
self.name = rawcode.co_name
except AttributeError:
raise TypeError("not a code object: %r" %(rawcode,))
self.raw = rawcode
def __eq__(self, other):
return self.raw == other.raw
def __ne__(self, other):
return not self == other
@property
def path(self):
""" return a path object pointing to source code (note that it
might not point to an actually existing file). """
p = py.path.local(self.raw.co_filename)
# maybe don't try this checking
if not p.check():
# XXX maybe try harder like the weird logic
# in the standard lib [linecache.updatecache] does?
p = self.raw.co_filename
return p
@property
def fullsource(self):
""" return a py.code.Source object for the full source file of the code
"""
from py._code import source
full, _ = source.findsource(self.raw)
return full
def source(self):
""" return a py.code.Source object for the code object's source only
"""
# return source only for that part of code
return py.code.Source(self.raw)
def getargs(self, var=False):
""" return a tuple with the argument names for the code object
if 'var' is set True also return the names of the variable and
keyword arguments when present
"""
# handfull shortcut for getting args
raw = self.raw
argcount = raw.co_argcount
if var:
argcount += raw.co_flags & CO_VARARGS
argcount += raw.co_flags & CO_VARKEYWORDS
return raw.co_varnames[:argcount]
class Frame(object):
"""Wrapper around a Python frame holding f_locals and f_globals
in which expressions can be evaluated."""
def __init__(self, frame):
self.lineno = frame.f_lineno - 1
self.f_globals = frame.f_globals
self.f_locals = frame.f_locals
self.raw = frame
self.code = py.code.Code(frame.f_code)
@property
def statement(self):
""" statement this frame is at """
if self.code.fullsource is None:
return py.code.Source("")
return self.code.fullsource.getstatement(self.lineno)
def eval(self, code, **vars):
""" evaluate 'code' in the frame
'vars' are optional additional local variables
returns the result of the evaluation
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
return eval(code, self.f_globals, f_locals)
def exec_(self, code, **vars):
""" exec 'code' in the frame
'vars' are optiona; additional local variables
"""
f_locals = self.f_locals.copy()
f_locals.update(vars)
py.builtin.exec_(code, self.f_globals, f_locals )
def repr(self, object):
""" return a 'safe' (non-recursive, one-line) string repr for 'object'
"""
return py.io.saferepr(object)
def is_true(self, object):
return object
def getargs(self, var=False):
""" return a list of tuples (name, value) for all arguments
if 'var' is set True also include the variable and keyword
arguments when present
"""
retval = []
for arg in self.code.getargs(var):
try:
retval.append((arg, self.f_locals[arg]))
except KeyError:
pass # this can occur when using Psyco
return retval
class TracebackEntry(object):
""" a single entry in a traceback """
_repr_style = None
exprinfo = None
def __init__(self, rawentry):
self._rawentry = rawentry
self.lineno = rawentry.tb_lineno - 1
def set_repr_style(self, mode):
assert mode in ("short", "long")
self._repr_style = mode
@property
def frame(self):
return py.code.Frame(self._rawentry.tb_frame)
@property
def relline(self):
return self.lineno - self.frame.code.firstlineno
def __repr__(self):
return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
@property
def statement(self):
""" py.code.Source object for the current statement """
source = self.frame.code.fullsource
return source.getstatement(self.lineno)
@property
def path(self):
""" path to the source code """
return self.frame.code.path
def getlocals(self):
return self.frame.f_locals
locals = property(getlocals, None, None, "locals of underlaying frame")
def reinterpret(self):
"""Reinterpret the failing statement and returns a detailed information
about what operations are performed."""
if self.exprinfo is None:
source = str(self.statement).strip()
x = py.code._reinterpret(source, self.frame, should_fail=True)
if not isinstance(x, str):
raise TypeError("interpret returned non-string %r" % (x,))
self.exprinfo = x
return self.exprinfo
def getfirstlinesource(self):
# on Jython this firstlineno can be -1 apparently
return max(self.frame.code.firstlineno, 0)
def getsource(self, astcache=None):
""" return failing source code. """
# we use the passed in astcache to not reparse asttrees
# within exception info printing
from py._code.source import getstatementrange_ast
source = self.frame.code.fullsource
if source is None:
return None
key = astnode = None
if astcache is not None:
key = self.frame.code.path
if key is not None:
astnode = astcache.get(key, None)
start = self.getfirstlinesource()
try:
astnode, _, end = getstatementrange_ast(self.lineno, source,
astnode=astnode)
except SyntaxError:
end = self.lineno + 1
else:
if key is not None:
astcache[key] = astnode
return source[start:end]
source = property(getsource)
def ishidden(self):
""" return True if the current frame has a var __tracebackhide__
resolving to True
mostly for internal use
"""
try:
return self.frame.f_locals['__tracebackhide__']
except KeyError:
try:
return self.frame.f_globals['__tracebackhide__']
except KeyError:
return False
def __str__(self):
try:
fn = str(self.path)
except py.error.Error:
fn = '???'
name = self.frame.code.name
try:
line = str(self.statement).lstrip()
except KeyboardInterrupt:
raise
except:
line = "???"
return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
def name(self):
return self.frame.code.raw.co_name
name = property(name, None, None, "co_name of underlaying code")
class Traceback(list):
""" Traceback objects encapsulate and offer higher level
access to Traceback entries.
"""
Entry = TracebackEntry
def __init__(self, tb):
""" initialize from given python traceback object. """
if hasattr(tb, 'tb_next'):
def f(cur):
while cur is not None:
yield self.Entry(cur)
cur = cur.tb_next
list.__init__(self, f(tb))
else:
list.__init__(self, tb)
def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
""" return a Traceback instance wrapping part of this Traceback
by provding any combination of path, lineno and firstlineno, the
first frame to start the to-be-returned traceback is determined
this allows cutting the first part of a Traceback instance e.g.
for formatting reasons (removing some uninteresting bits that deal
with handling of the exception/traceback)
"""
for x in self:
code = x.frame.code
codepath = code.path
if ((path is None or codepath == path) and
(excludepath is None or not hasattr(codepath, 'relto') or
not codepath.relto(excludepath)) and
(lineno is None or x.lineno == lineno) and
(firstlineno is None or x.frame.code.firstlineno == firstlineno)):
return Traceback(x._rawentry)
return self
def __getitem__(self, key):
val = super(Traceback, self).__getitem__(key)
if isinstance(key, type(slice(0))):
val = self.__class__(val)
return val
def filter(self, fn=lambda x: not x.ishidden()):
""" return a Traceback instance with certain items removed
fn is a function that gets a single argument, a TracebackItem
instance, and should return True when the item should be added
to the Traceback, False when not
by default this removes all the TracebackItems which are hidden
(see ishidden() above)
"""
return Traceback(filter(fn, self))
def getcrashentry(self):
""" return last non-hidden traceback entry that lead
to the exception of a traceback.
"""
for i in range(-1, -len(self)-1, -1):
entry = self[i]
if not entry.ishidden():
return entry
return self[-1]
def recursionindex(self):
""" return the index of the frame/TracebackItem where recursion
originates if appropriate, None if no recursion occurred
"""
cache = {}
for i, entry in enumerate(self):
# id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi
# which generates code objects that have hash/value equality
#XXX needs a test
key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
#print "checking for recursion at", key
l = cache.setdefault(key, [])
if l:
f = entry.frame
loc = f.f_locals
for otherloc in l:
if f.is_true(f.eval(co_equal,
__recursioncache_locals_1=loc,
__recursioncache_locals_2=otherloc)):
return i
l.append(entry.frame.f_locals)
return None
co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
'?', 'eval')
class ExceptionInfo(object):
""" wraps sys.exc_info() objects and offers
help for navigating the traceback.
"""
_striptext = ''
def __init__(self, tup=None, exprinfo=None):
if tup is None:
tup = sys.exc_info()
if exprinfo is None and isinstance(tup[1], AssertionError):
exprinfo = getattr(tup[1], 'msg', None)
if exprinfo is None:
exprinfo = str(tup[1])
if exprinfo and exprinfo.startswith('assert '):
self._striptext = 'AssertionError: '
self._excinfo = tup
#: the exception class
self.type = tup[0]
#: the exception instance
self.value = tup[1]
#: the exception raw traceback
self.tb = tup[2]
#: the exception type name
self.typename = self.type.__name__
#: the exception traceback (py.code.Traceback instance)
self.traceback = py.code.Traceback(self.tb)
def __repr__(self):
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
def exconly(self, tryshort=False):
""" return the exception as a string
when 'tryshort' resolves to True, and the exception is a
py.code._AssertionError, only the actual exception part of
the exception representation is returned (so 'AssertionError: ' is
removed from the beginning)
"""
lines = format_exception_only(self.type, self.value)
text = ''.join(lines)
text = text.rstrip()
if tryshort:
if text.startswith(self._striptext):
text = text[len(self._striptext):]
return text
def errisinstance(self, exc):
""" return True if the exception is an instance of exc """
return isinstance(self.value, exc)
def _getreprcrash(self):
exconly = self.exconly(tryshort=True)
entry = self.traceback.getcrashentry()
path, lineno = entry.frame.code.raw.co_filename, entry.lineno
return ReprFileLocation(path, lineno+1, exconly)
def getrepr(self, showlocals=False, style="long",
abspath=False, tbfilter=True, funcargs=False):
""" return str()able representation of this exception info.
showlocals: show locals per traceback entry
style: long|short|no|native traceback style
tbfilter: hide entries (where __tracebackhide__ is true)
in case of style==native, tbfilter and showlocals is ignored.
"""
if style == 'native':
return ReprExceptionInfo(ReprTracebackNative(
py.std.traceback.format_exception(
self.type,
self.value,
self.traceback[0]._rawentry,
)), self._getreprcrash())
fmt = FormattedExcinfo(showlocals=showlocals, style=style,
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
return fmt.repr_excinfo(self)
def __str__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return str(loc)
def __unicode__(self):
entry = self.traceback[-1]
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
return unicode(loc)
class FormattedExcinfo(object):
""" presenting information about failing Functions and Generators. """
# for traceback entries
flow_marker = ">"
fail_marker = "E"
def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
self.showlocals = showlocals
self.style = style
self.tbfilter = tbfilter
self.funcargs = funcargs
self.abspath = abspath
self.astcache = {}
def _getindent(self, source):
# figure out indent for given source
try:
s = str(source.getstatement(len(source)-1))
except KeyboardInterrupt:
raise
except:
try:
s = str(source[-1])
except KeyboardInterrupt:
raise
except:
return 0
return 4 + (len(s) - len(s.lstrip()))
def _getentrysource(self, entry):
source = entry.getsource(self.astcache)
if source is not None:
source = source.deindent()
return source
def _saferepr(self, obj):
return py.io.saferepr(obj)
def repr_args(self, entry):
if self.funcargs:
args = []
for argname, argvalue in entry.frame.getargs(var=True):
args.append((argname, self._saferepr(argvalue)))
return ReprFuncArgs(args)
def get_source(self, source, line_index=-1, excinfo=None, short=False):
""" return formatted and marked up source lines. """
lines = []
if source is None or line_index >= len(source.lines):
source = py.code.Source("???")
line_index = 0
if line_index < 0:
line_index += len(source)
space_prefix = " "
if short:
lines.append(space_prefix + source.lines[line_index].strip())
else:
for line in source.lines[:line_index]:
lines.append(space_prefix + line)
lines.append(self.flow_marker + " " + source.lines[line_index])
for line in source.lines[line_index+1:]:
lines.append(space_prefix + line)
if excinfo is not None:
indent = 4 if short else self._getindent(source)
lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
return lines
def get_exconly(self, excinfo, indent=4, markall=False):
lines = []
indent = " " * indent
# get the real exception information out
exlines = excinfo.exconly(tryshort=True).split('\n')
failindent = self.fail_marker + indent[1:]
for line in exlines:
lines.append(failindent + line)
if not markall:
failindent = indent
return lines
def repr_locals(self, locals):
if self.showlocals:
lines = []
keys = [loc for loc in locals if loc[0] != "@"]
keys.sort()
for name in keys:
value = locals[name]
if name == '__builtins__':
lines.append("__builtins__ = <builtins>")
else:
# This formatting could all be handled by the
# _repr() function, which is only reprlib.Repr in
# disguise, so is very configurable.
str_repr = self._saferepr(value)
#if len(str_repr) < 70 or not isinstance(value,
# (list, tuple, dict)):
lines.append("%-10s = %s" %(name, str_repr))
#else:
# self._line("%-10s =\\" % (name,))
# # XXX
# py.std.pprint.pprint(value, stream=self.excinfowriter)
return ReprLocals(lines)
def repr_traceback_entry(self, entry, excinfo=None):
source = self._getentrysource(entry)
if source is None:
source = py.code.Source("???")
line_index = 0
else:
# entry.getfirstlinesource() can be -1, should be 0 on jython
line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
lines = []
style = entry._repr_style
if style is None:
style = self.style
if style in ("short", "long"):
short = style == "short"
reprargs = self.repr_args(entry) if not short else None
s = self.get_source(source, line_index, excinfo, short=short)
lines.extend(s)
if short:
message = "in %s" %(entry.name)
else:
message = excinfo and excinfo.typename or ""
path = self._makepath(entry.path)
filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
localsrepr = None
if not short:
localsrepr = self.repr_locals(entry.locals)
return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
if excinfo:
lines.extend(self.get_exconly(excinfo, indent=4))
return ReprEntry(lines, None, None, None, style)
def _makepath(self, path):
if not self.abspath:
try:
np = py.path.local().bestrelpath(path)
except OSError:
return path
if len(np) < len(str(path)):
path = np
return path
def repr_traceback(self, excinfo):
traceback = excinfo.traceback
if self.tbfilter:
traceback = traceback.filter()
recursionindex = None
if excinfo.errisinstance(RuntimeError):
if "maximum recursion depth exceeded" in str(excinfo.value):
recursionindex = traceback.recursionindex()
last = traceback[-1]
entries = []
extraline = None
for index, entry in enumerate(traceback):
einfo = (last == entry) and excinfo or None
reprentry = self.repr_traceback_entry(entry, einfo)
entries.append(reprentry)
if index == recursionindex:
extraline = "!!! Recursion detected (same locals & position)"
break
return ReprTraceback(entries, extraline, style=self.style)
def repr_excinfo(self, excinfo):
reprtraceback = self.repr_traceback(excinfo)
reprcrash = excinfo._getreprcrash()
return ReprExceptionInfo(reprtraceback, reprcrash)
class TerminalRepr:
def __str__(self):
s = self.__unicode__()
if sys.version_info[0] < 3:
s = s.encode('utf-8')
return s
def __unicode__(self):
# FYI this is called from pytest-xdist's serialization of exception
# information.
io = py.io.TextIO()
tw = py.io.TerminalWriter(file=io)
self.toterminal(tw)
return io.getvalue().strip()
def __repr__(self):
return "<%s instance at %0x>" %(self.__class__, id(self))
class ReprExceptionInfo(TerminalRepr):
def __init__(self, reprtraceback, reprcrash):
self.reprtraceback = reprtraceback
self.reprcrash = reprcrash
self.sections = []
def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep))
def toterminal(self, tw):
self.reprtraceback.toterminal(tw)
for name, content, sep in self.sections:
tw.sep(sep, name)
tw.line(content)
class ReprTraceback(TerminalRepr):
entrysep = "_ "
def __init__(self, reprentries, extraline, style):
self.reprentries = reprentries
self.extraline = extraline
self.style = style
def toterminal(self, tw):
# the entries might have different styles
last_style = None
for i, entry in enumerate(self.reprentries):
if entry.style == "long":
tw.line("")
entry.toterminal(tw)
if i < len(self.reprentries) - 1:
next_entry = self.reprentries[i+1]
if entry.style == "long" or \
entry.style == "short" and next_entry.style == "long":
tw.sep(self.entrysep)
if self.extraline:
tw.line(self.extraline)
class ReprTracebackNative(ReprTraceback):
def __init__(self, tblines):
self.style = "native"
self.reprentries = [ReprEntryNative(tblines)]
self.extraline = None
class ReprEntryNative(TerminalRepr):
style = "native"
def __init__(self, tblines):
self.lines = tblines
def toterminal(self, tw):
tw.write("".join(self.lines))
class ReprEntry(TerminalRepr):
localssep = "_ "
def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
self.lines = lines
self.reprfuncargs = reprfuncargs
self.reprlocals = reprlocals
self.reprfileloc = filelocrepr
self.style = style
def toterminal(self, tw):
if self.style == "short":
self.reprfileloc.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
#tw.line("")
return
if self.reprfuncargs:
self.reprfuncargs.toterminal(tw)
for line in self.lines:
red = line.startswith("E ")
tw.line(line, bold=True, red=red)
if self.reprlocals:
#tw.sep(self.localssep, "Locals")
tw.line("")
self.reprlocals.toterminal(tw)
if self.reprfileloc:
if self.lines:
tw.line("")
self.reprfileloc.toterminal(tw)
def __str__(self):
return "%s\n%s\n%s" % ("\n".join(self.lines),
self.reprlocals,
self.reprfileloc)
class ReprFileLocation(TerminalRepr):
def __init__(self, path, lineno, message):
self.path = str(path)
self.lineno = lineno
self.message = message
def toterminal(self, tw):
# filename and lineno output for each entry,
# using an output format that most editors unterstand
msg = self.message
i = msg.find("\n")
if i != -1:
msg = msg[:i]
tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
class ReprLocals(TerminalRepr):
def __init__(self, lines):
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
class ReprFuncArgs(TerminalRepr):
def __init__(self, args):
self.args = args
def toterminal(self, tw):
if self.args:
linesofar = ""
for name, value in self.args:
ns = "%s = %s" %(name, value)
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
if linesofar:
tw.line(linesofar)
linesofar = ns
else:
if linesofar:
linesofar += ", " + ns
else:
linesofar = ns
if linesofar:
tw.line(linesofar)
tw.line("")
oldbuiltins = {}
def patch_builtins(assertion=True, compile=True):
""" put compile and AssertionError builtins to Python's builtins. """
if assertion:
from py._code import assertion
l = oldbuiltins.setdefault('AssertionError', [])
l.append(py.builtin.builtins.AssertionError)
py.builtin.builtins.AssertionError = assertion.AssertionError
if compile:
l = oldbuiltins.setdefault('compile', [])
l.append(py.builtin.builtins.compile)
py.builtin.builtins.compile = py.code.compile
def unpatch_builtins(assertion=True, compile=True):
""" remove compile and AssertionError builtins from Python builtins. """
if assertion:
py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
if compile:
py.builtin.builtins.compile = oldbuiltins['compile'].pop()
def getrawcode(obj, trycall=True):
""" return code object for given function. """
try:
return obj.__code__
except AttributeError:
obj = getattr(obj, 'im_func', obj)
obj = getattr(obj, 'func_code', obj)
obj = getattr(obj, 'f_code', obj)
obj = getattr(obj, '__code__', obj)
if trycall and not hasattr(obj, 'co_firstlineno'):
if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
x = getrawcode(obj.__call__, trycall=False)
if hasattr(x, 'co_firstlineno'):
return x
return obj

View file

@ -0,0 +1,419 @@
from __future__ import generators
from bisect import bisect_right
import sys
import inspect, tokenize
import py
from types import ModuleType
cpy_compile = compile
try:
import _ast
from _ast import PyCF_ONLY_AST as _AST_FLAG
except ImportError:
_AST_FLAG = 0
_ast = None
class Source(object):
""" a immutable object holding a source code fragment,
possibly deindenting it.
"""
_compilecounter = 0
def __init__(self, *parts, **kwargs):
self.lines = lines = []
de = kwargs.get('deindent', True)
rstrip = kwargs.get('rstrip', True)
for part in parts:
if not part:
partlines = []
if isinstance(part, Source):
partlines = part.lines
elif isinstance(part, (tuple, list)):
partlines = [x.rstrip("\n") for x in part]
elif isinstance(part, py.builtin._basestring):
partlines = part.split('\n')
if rstrip:
while partlines:
if partlines[-1].strip():
break
partlines.pop()
else:
partlines = getsource(part, deindent=de).lines
if de:
partlines = deindent(partlines)
lines.extend(partlines)
def __eq__(self, other):
try:
return self.lines == other.lines
except AttributeError:
if isinstance(other, str):
return str(self) == other
return False
def __getitem__(self, key):
if isinstance(key, int):
return self.lines[key]
else:
if key.step not in (None, 1):
raise IndexError("cannot slice a Source with a step")
return self.__getslice__(key.start, key.stop)
def __len__(self):
return len(self.lines)
def __getslice__(self, start, end):
newsource = Source()
newsource.lines = self.lines[start:end]
return newsource
def strip(self):
""" return new source object with trailing
and leading blank lines removed.
"""
start, end = 0, len(self)
while start < end and not self.lines[start].strip():
start += 1
while end > start and not self.lines[end-1].strip():
end -= 1
source = Source()
source.lines[:] = self.lines[start:end]
return source
def putaround(self, before='', after='', indent=' ' * 4):
""" return a copy of the source object with
'before' and 'after' wrapped around it.
"""
before = Source(before)
after = Source(after)
newsource = Source()
lines = [ (indent + line) for line in self.lines]
newsource.lines = before.lines + lines + after.lines
return newsource
def indent(self, indent=' ' * 4):
""" return a copy of the source object with
all lines indented by the given indent-string.
"""
newsource = Source()
newsource.lines = [(indent+line) for line in self.lines]
return newsource
def getstatement(self, lineno, assertion=False):
""" return Source statement which contains the
given linenumber (counted from 0).
"""
start, end = self.getstatementrange(lineno, assertion)
return self[start:end]
def getstatementrange(self, lineno, assertion=False):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
"""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
ast, start, end = getstatementrange_ast(lineno, self)
return start, end
def deindent(self, offset=None):
""" return a new source object deindented by offset.
If offset is None then guess an indentation offset from
the first non-blank line. Subsequent lines which have a
lower indentation offset will be copied verbatim as
they are assumed to be part of multilines.
"""
# XXX maybe use the tokenizer to properly handle multiline
# strings etc.pp?
newsource = Source()
newsource.lines[:] = deindent(self.lines, offset)
return newsource
def isparseable(self, deindent=True):
""" return True if source is parseable, heuristically
deindenting it by default.
"""
try:
import parser
except ImportError:
syntax_checker = lambda x: compile(x, 'asd', 'exec')
else:
syntax_checker = parser.suite
if deindent:
source = str(self.deindent())
else:
source = str(self)
try:
#compile(source+'\n', "x", "exec")
syntax_checker(source+'\n')
except KeyboardInterrupt:
raise
except Exception:
return False
else:
return True
def __str__(self):
return "\n".join(self.lines)
def compile(self, filename=None, mode='exec',
flag=generators.compiler_flag,
dont_inherit=0, _genframe=None):
""" return compiled code object. if filename is None
invent an artificial filename which displays
the source/line position of the caller frame.
"""
if not filename or py.path.local(filename).check(file=0):
if _genframe is None:
_genframe = sys._getframe(1) # the caller
fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
base = "<%d-codegen " % self._compilecounter
self.__class__._compilecounter += 1
if not filename:
filename = base + '%s:%d>' % (fn, lineno)
else:
filename = base + '%r %s:%d>' % (filename, fn, lineno)
source = "\n".join(self.lines) + '\n'
try:
co = cpy_compile(source, filename, mode, flag)
except SyntaxError:
ex = sys.exc_info()[1]
# re-represent syntax errors from parsing python strings
msglines = self.lines[:ex.lineno]
if ex.offset:
msglines.append(" "*ex.offset + '^')
msglines.append("(code was compiled probably from here: %s)" % filename)
newex = SyntaxError('\n'.join(msglines))
newex.offset = ex.offset
newex.lineno = ex.lineno
newex.text = ex.text
raise newex
else:
if flag & _AST_FLAG:
return co
lines = [(x + "\n") for x in self.lines]
if sys.version_info[0] >= 3:
# XXX py3's inspect.getsourcefile() checks for a module
# and a pep302 __loader__ ... we don't have a module
# at code compile-time so we need to fake it here
m = ModuleType("_pycodecompile_pseudo_module")
py.std.inspect.modulesbyfile[filename] = None
py.std.sys.modules[None] = m
m.__loader__ = 1
py.std.linecache.cache[filename] = (1, None, lines, filename)
return co
#
# public API shortcut functions
#
def compile_(source, filename=None, mode='exec', flags=
generators.compiler_flag, dont_inherit=0):
""" compile the given source to a raw code object,
and maintain an internal cache which allows later
retrieval of the source code for the code object
and any recursively created code objects.
"""
if _ast is not None and isinstance(source, _ast.AST):
# XXX should Source support having AST?
return cpy_compile(source, filename, mode, flags, dont_inherit)
_genframe = sys._getframe(1) # the caller
s = Source(source)
co = s.compile(filename, mode, flags, _genframe=_genframe)
return co
def getfslineno(obj):
""" Return source location (path, lineno) for the given object.
If the source cannot be determined return ("", -1)
"""
try:
code = py.code.Code(obj)
except TypeError:
try:
fn = (py.std.inspect.getsourcefile(obj) or
py.std.inspect.getfile(obj))
except TypeError:
return "", -1
fspath = fn and py.path.local(fn) or None
lineno = -1
if fspath:
try:
_, lineno = findsource(obj)
except IOError:
pass
else:
fspath = code.path
lineno = code.firstlineno
assert isinstance(lineno, int)
return fspath, lineno
#
# helper functions
#
def findsource(obj):
try:
sourcelines, lineno = py.std.inspect.findsource(obj)
except py.builtin._sysex:
raise
except:
return None, -1
source = Source()
source.lines = [line.rstrip() for line in sourcelines]
return source, lineno
def getsource(obj, **kwargs):
obj = py.code.getrawcode(obj)
try:
strsrc = inspect.getsource(obj)
except IndentationError:
strsrc = "\"Buggy python version consider upgrading, cannot get source\""
assert isinstance(strsrc, str)
return Source(strsrc, **kwargs)
def deindent(lines, offset=None):
if offset is None:
for line in lines:
line = line.expandtabs()
s = line.lstrip()
if s:
offset = len(line)-len(s)
break
else:
offset = 0
if offset == 0:
return list(lines)
newlines = []
def readline_generator(lines):
for line in lines:
yield line + '\n'
while True:
yield ''
it = readline_generator(lines)
try:
for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
if sline > len(lines):
break # End of input reached
if sline > len(newlines):
line = lines[sline - 1].expandtabs()
if line.lstrip() and line[:offset].isspace():
line = line[offset:] # Deindent
newlines.append(line)
for i in range(sline, eline):
# Don't deindent continuing lines of
# multiline tokens (i.e. multiline strings)
newlines.append(lines[i])
except (IndentationError, tokenize.TokenError):
pass
# Add any lines we didn't see. E.g. if an exception was raised.
newlines.extend(lines[len(newlines):])
return newlines
def get_statement_startend2(lineno, node):
import ast
# flatten all statements and except handlers into one lineno-list
# AST's line numbers start indexing at 1
l = []
for x in ast.walk(node):
if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
l.append(x.lineno - 1)
for name in "finalbody", "orelse":
val = getattr(x, name, None)
if val:
# treat the finally/orelse part as its own statement
l.append(val[0].lineno - 1 - 1)
l.sort()
insert_index = bisect_right(l, lineno)
start = l[insert_index - 1]
if insert_index >= len(l):
end = None
else:
end = l[insert_index]
return start, end
def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
if astnode is None:
content = str(source)
if sys.version_info < (2,7):
content += "\n"
try:
astnode = compile(content, "source", "exec", 1024) # 1024 for AST
except ValueError:
start, end = getstatementrange_old(lineno, source, assertion)
return None, start, end
start, end = get_statement_startend2(lineno, astnode)
# we need to correct the end:
# - ast-parsing strips comments
# - there might be empty lines
# - we might have lesser indented code blocks at the end
if end is None:
end = len(source.lines)
if end > start + 1:
# make sure we don't span differently indented code blocks
# by using the BlockFinder helper used which inspect.getsource() uses itself
block_finder = inspect.BlockFinder()
# if we start with an indented line, put blockfinder to "started" mode
block_finder.started = source.lines[start][0].isspace()
it = ((x + "\n") for x in source.lines[start:end])
try:
for tok in tokenize.generate_tokens(lambda: next(it)):
block_finder.tokeneater(*tok)
except (inspect.EndOfBlock, IndentationError):
end = block_finder.last + start
except Exception:
pass
# the end might still point to a comment or empty line, correct it
while end:
line = source.lines[end - 1].lstrip()
if line.startswith("#") or not line:
end -= 1
else:
break
return astnode, start, end
def getstatementrange_old(lineno, source, assertion=False):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
raise an IndexError if no such statementrange can be found.
"""
# XXX this logic is only used on python2.4 and below
# 1. find the start of the statement
from codeop import compile_command
for start in range(lineno, -1, -1):
if assertion:
line = source.lines[start]
# the following lines are not fully tested, change with care
if 'super' in line and 'self' in line and '__init__' in line:
raise IndexError("likely a subclass")
if "assert" not in line and "raise" not in line:
continue
trylines = source.lines[start:lineno+1]
# quick hack to prepare parsing an indented line with
# compile_command() (which errors on "return" outside defs)
trylines.insert(0, 'def xxx():')
trysource = '\n '.join(trylines)
# ^ space here
try:
compile_command(trysource)
except (SyntaxError, OverflowError, ValueError):
continue
# 2. find the end of the statement
for end in range(lineno+1, len(source)+1):
trysource = source[start:end]
if trysource.isparseable():
return start, end
raise SyntaxError("no valid source range around line %d " % (lineno,))

View file

@ -0,0 +1,88 @@
"""
create errno-specific classes for IO or os calls.
"""
import sys, os, errno
class Error(EnvironmentError):
def __repr__(self):
return "%s.%s %r: %s " %(self.__class__.__module__,
self.__class__.__name__,
self.__class__.__doc__,
" ".join(map(str, self.args)),
#repr(self.args)
)
def __str__(self):
s = "[%s]: %s" %(self.__class__.__doc__,
" ".join(map(str, self.args)),
)
return s
_winerrnomap = {
2: errno.ENOENT,
3: errno.ENOENT,
17: errno.EEXIST,
13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
22: errno.ENOTDIR,
20: errno.ENOTDIR,
267: errno.ENOTDIR,
5: errno.EACCES, # anything better?
}
class ErrorMaker(object):
""" lazily provides Exception classes for each possible POSIX errno
(as defined per the 'errno' module). All such instances
subclass EnvironmentError.
"""
Error = Error
_errno2class = {}
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError(name)
eno = getattr(errno, name)
cls = self._geterrnoclass(eno)
setattr(self, name, cls)
return cls
def _geterrnoclass(self, eno):
try:
return self._errno2class[eno]
except KeyError:
clsname = errno.errorcode.get(eno, "UnknownErrno%d" %(eno,))
errorcls = type(Error)(clsname, (Error,),
{'__module__':'py.error',
'__doc__': os.strerror(eno)})
self._errno2class[eno] = errorcls
return errorcls
def checked_call(self, func, *args, **kwargs):
""" call a function and raise an errno-exception if applicable. """
__tracebackhide__ = True
try:
return func(*args, **kwargs)
except self.Error:
raise
except (OSError, EnvironmentError):
cls, value, tb = sys.exc_info()
if not hasattr(value, 'errno'):
raise
__tracebackhide__ = False
errno = value.errno
try:
if not isinstance(value, WindowsError):
raise NameError
except NameError:
# we are not on Windows, or we got a proper OSError
cls = self._geterrnoclass(errno)
else:
try:
cls = self._geterrnoclass(_winerrnomap[errno])
except KeyError:
raise value
raise cls("%s%r" % (func.__name__, args))
__tracebackhide__ = True
error = ErrorMaker()

View file

@ -0,0 +1,162 @@
""" brain-dead simple parser for ini-style files.
(C) Ronny Pfannschmidt, Holger Krekel -- MIT licensed
"""
__version__ = "0.2.dev2"
__all__ = ['IniConfig', 'ParseError']
COMMENTCHARS = "#;"
class ParseError(Exception):
def __init__(self, path, lineno, msg):
Exception.__init__(self, path, lineno, msg)
self.path = path
self.lineno = lineno
self.msg = msg
def __str__(self):
return "%s:%s: %s" %(self.path, self.lineno+1, self.msg)
class SectionWrapper(object):
def __init__(self, config, name):
self.config = config
self.name = name
def lineof(self, name):
return self.config.lineof(self.name, name)
def get(self, key, default=None, convert=str):
return self.config.get(self.name, key, convert=convert, default=default)
def __getitem__(self, key):
return self.config.sections[self.name][key]
def __iter__(self):
section = self.config.sections.get(self.name, [])
def lineof(key):
return self.config.lineof(self.name, key)
for name in sorted(section, key=lineof):
yield name
def items(self):
for name in self:
yield name, self[name]
class IniConfig(object):
def __init__(self, path, data=None):
self.path = str(path) # convenience
if data is None:
f = open(self.path)
try:
tokens = self._parse(iter(f))
finally:
f.close()
else:
tokens = self._parse(data.splitlines(True))
self._sources = {}
self.sections = {}
for lineno, section, name, value in tokens:
if section is None:
self._raise(lineno, 'no section header defined')
self._sources[section, name] = lineno
if name is None:
if section in self.sections:
self._raise(lineno, 'duplicate section %r'%(section, ))
self.sections[section] = {}
else:
if name in self.sections[section]:
self._raise(lineno, 'duplicate name %r'%(name, ))
self.sections[section][name] = value
def _raise(self, lineno, msg):
raise ParseError(self.path, lineno, msg)
def _parse(self, line_iter):
result = []
section = None
for lineno, line in enumerate(line_iter):
name, data = self._parseline(line, lineno)
# new value
if name is not None and data is not None:
result.append((lineno, section, name, data))
# new section
elif name is not None and data is None:
if not name:
self._raise(lineno, 'empty section name')
section = name
result.append((lineno, section, None, None))
# continuation
elif name is None and data is not None:
if not result:
self._raise(lineno, 'unexpected value continuation')
last = result.pop()
last_name, last_data = last[-2:]
if last_name is None:
self._raise(lineno, 'unexpected value continuation')
if last_data:
data = '%s\n%s' % (last_data, data)
result.append(last[:-1] + (data,))
return result
def _parseline(self, line, lineno):
# blank lines
if iscommentline(line):
line = ""
else:
line = line.rstrip()
if not line:
return None, None
# section
if line[0] == '[':
realline = line
for c in COMMENTCHARS:
line = line.split(c)[0].rstrip()
if line[-1] == "]":
return line[1:-1], None
return None, realline.strip()
# value
elif not line[0].isspace():
try:
name, value = line.split('=', 1)
if ":" in name:
raise ValueError()
except ValueError:
try:
name, value = line.split(":", 1)
except ValueError:
self._raise(lineno, 'unexpected line: %r' % line)
return name.strip(), value.strip()
# continuation
else:
return None, line.strip()
def lineof(self, section, name=None):
lineno = self._sources.get((section, name))
if lineno is not None:
return lineno + 1
def get(self, section, name, default=None, convert=str):
try:
return convert(self.sections[section][name])
except KeyError:
return default
def __getitem__(self, name):
if name not in self.sections:
raise KeyError(name)
return SectionWrapper(self, name)
def __iter__(self):
for name in sorted(self.sections, key=self.lineof):
yield SectionWrapper(self, name)
def __contains__(self, arg):
return arg in self.sections
def iscommentline(line):
c = line.lstrip()[:1]
return c in COMMENTCHARS

View file

@ -0,0 +1 @@
""" input/output helping """

View file

@ -0,0 +1,371 @@
import os
import sys
import py
import tempfile
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
if sys.version_info < (3,0):
class TextIO(StringIO):
def write(self, data):
if not isinstance(data, unicode):
data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace')
StringIO.write(self, data)
else:
TextIO = StringIO
try:
from io import BytesIO
except ImportError:
class BytesIO(StringIO):
def write(self, data):
if isinstance(data, unicode):
raise TypeError("not a byte value: %r" %(data,))
StringIO.write(self, data)
patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
class FDCapture:
""" Capture IO to/from a given os-level filedescriptor. """
def __init__(self, targetfd, tmpfile=None, now=True, patchsys=False):
""" save targetfd descriptor, and open a new
temporary file there. If no tmpfile is
specified a tempfile.Tempfile() will be opened
in text mode.
"""
self.targetfd = targetfd
if tmpfile is None and targetfd != 0:
f = tempfile.TemporaryFile('wb+')
tmpfile = dupfile(f, encoding="UTF-8")
f.close()
self.tmpfile = tmpfile
self._savefd = os.dup(self.targetfd)
if patchsys:
self._oldsys = getattr(sys, patchsysdict[targetfd])
if now:
self.start()
def start(self):
try:
os.fstat(self._savefd)
except OSError:
raise ValueError("saved filedescriptor not valid, "
"did you call start() twice?")
if self.targetfd == 0 and not self.tmpfile:
fd = os.open(devnullpath, os.O_RDONLY)
os.dup2(fd, 0)
os.close(fd)
if hasattr(self, '_oldsys'):
setattr(sys, patchsysdict[self.targetfd], DontReadFromInput())
else:
os.dup2(self.tmpfile.fileno(), self.targetfd)
if hasattr(self, '_oldsys'):
setattr(sys, patchsysdict[self.targetfd], self.tmpfile)
def done(self):
""" unpatch and clean up, returns the self.tmpfile (file object)
"""
os.dup2(self._savefd, self.targetfd)
os.close(self._savefd)
if self.targetfd != 0:
self.tmpfile.seek(0)
if hasattr(self, '_oldsys'):
setattr(sys, patchsysdict[self.targetfd], self._oldsys)
return self.tmpfile
def writeorg(self, data):
""" write a string to the original file descriptor
"""
tempfp = tempfile.TemporaryFile()
try:
os.dup2(self._savefd, tempfp.fileno())
tempfp.write(data)
finally:
tempfp.close()
def dupfile(f, mode=None, buffering=0, raising=False, encoding=None):
""" return a new open file object that's a duplicate of f
mode is duplicated if not given, 'buffering' controls
buffer size (defaulting to no buffering) and 'raising'
defines whether an exception is raised when an incompatible
file object is passed in (if raising is False, the file
object itself will be returned)
"""
try:
fd = f.fileno()
mode = mode or f.mode
except AttributeError:
if raising:
raise
return f
newfd = os.dup(fd)
if sys.version_info >= (3,0):
if encoding is not None:
mode = mode.replace("b", "")
buffering = True
return os.fdopen(newfd, mode, buffering, encoding, closefd=True)
else:
f = os.fdopen(newfd, mode, buffering)
if encoding is not None:
return EncodedFile(f, encoding)
return f
class EncodedFile(object):
def __init__(self, _stream, encoding):
self._stream = _stream
self.encoding = encoding
def write(self, obj):
if isinstance(obj, unicode):
obj = obj.encode(self.encoding)
elif isinstance(obj, str):
pass
else:
obj = str(obj)
self._stream.write(obj)
def writelines(self, linelist):
data = ''.join(linelist)
self.write(data)
def __getattr__(self, name):
return getattr(self._stream, name)
class Capture(object):
def call(cls, func, *args, **kwargs):
""" return a (res, out, err) tuple where
out and err represent the output/error output
during function execution.
call the given function with args/kwargs
and capture output/error during its execution.
"""
so = cls()
try:
res = func(*args, **kwargs)
finally:
out, err = so.reset()
return res, out, err
call = classmethod(call)
def reset(self):
""" reset sys.stdout/stderr and return captured output as strings. """
if hasattr(self, '_reset'):
raise ValueError("was already reset")
self._reset = True
outfile, errfile = self.done(save=False)
out, err = "", ""
if outfile and not outfile.closed:
out = outfile.read()
outfile.close()
if errfile and errfile != outfile and not errfile.closed:
err = errfile.read()
errfile.close()
return out, err
def suspend(self):
""" return current snapshot captures, memorize tempfiles. """
outerr = self.readouterr()
outfile, errfile = self.done()
return outerr
class StdCaptureFD(Capture):
""" This class allows to capture writes to FD1 and FD2
and may connect a NULL file to FD0 (and prevent
reads from sys.stdin). If any of the 0,1,2 file descriptors
is invalid it will not be captured.
"""
def __init__(self, out=True, err=True, mixed=False,
in_=True, patchsys=True, now=True):
self._options = {
"out": out,
"err": err,
"mixed": mixed,
"in_": in_,
"patchsys": patchsys,
"now": now,
}
self._save()
if now:
self.startall()
def _save(self):
in_ = self._options['in_']
out = self._options['out']
err = self._options['err']
mixed = self._options['mixed']
patchsys = self._options['patchsys']
if in_:
try:
self.in_ = FDCapture(0, tmpfile=None, now=False,
patchsys=patchsys)
except OSError:
pass
if out:
tmpfile = None
if hasattr(out, 'write'):
tmpfile = out
try:
self.out = FDCapture(1, tmpfile=tmpfile,
now=False, patchsys=patchsys)
self._options['out'] = self.out.tmpfile
except OSError:
pass
if err:
if out and mixed:
tmpfile = self.out.tmpfile
elif hasattr(err, 'write'):
tmpfile = err
else:
tmpfile = None
try:
self.err = FDCapture(2, tmpfile=tmpfile,
now=False, patchsys=patchsys)
self._options['err'] = self.err.tmpfile
except OSError:
pass
def startall(self):
if hasattr(self, 'in_'):
self.in_.start()
if hasattr(self, 'out'):
self.out.start()
if hasattr(self, 'err'):
self.err.start()
def resume(self):
""" resume capturing with original temp files. """
self.startall()
def done(self, save=True):
""" return (outfile, errfile) and stop capturing. """
outfile = errfile = None
if hasattr(self, 'out') and not self.out.tmpfile.closed:
outfile = self.out.done()
if hasattr(self, 'err') and not self.err.tmpfile.closed:
errfile = self.err.done()
if hasattr(self, 'in_'):
tmpfile = self.in_.done()
if save:
self._save()
return outfile, errfile
def readouterr(self):
""" return snapshot value of stdout/stderr capturings. """
if hasattr(self, "out"):
out = self._readsnapshot(self.out.tmpfile)
else:
out = ""
if hasattr(self, "err"):
err = self._readsnapshot(self.err.tmpfile)
else:
err = ""
return [out, err]
def _readsnapshot(self, f):
f.seek(0)
res = f.read()
enc = getattr(f, "encoding", None)
if enc:
res = py.builtin._totext(res, enc, "replace")
f.truncate(0)
f.seek(0)
return res
class StdCapture(Capture):
""" This class allows to capture writes to sys.stdout|stderr "in-memory"
and will raise errors on tries to read from sys.stdin. It only
modifies sys.stdout|stderr|stdin attributes and does not
touch underlying File Descriptors (use StdCaptureFD for that).
"""
def __init__(self, out=True, err=True, in_=True, mixed=False, now=True):
self._oldout = sys.stdout
self._olderr = sys.stderr
self._oldin = sys.stdin
if out and not hasattr(out, 'file'):
out = TextIO()
self.out = out
if err:
if mixed:
err = out
elif not hasattr(err, 'write'):
err = TextIO()
self.err = err
self.in_ = in_
if now:
self.startall()
def startall(self):
if self.out:
sys.stdout = self.out
if self.err:
sys.stderr = self.err
if self.in_:
sys.stdin = self.in_ = DontReadFromInput()
def done(self, save=True):
""" return (outfile, errfile) and stop capturing. """
outfile = errfile = None
if self.out and not self.out.closed:
sys.stdout = self._oldout
outfile = self.out
outfile.seek(0)
if self.err and not self.err.closed:
sys.stderr = self._olderr
errfile = self.err
errfile.seek(0)
if self.in_:
sys.stdin = self._oldin
return outfile, errfile
def resume(self):
""" resume capturing with original temp files. """
self.startall()
def readouterr(self):
""" return snapshot value of stdout/stderr capturings. """
out = err = ""
if self.out:
out = self.out.getvalue()
self.out.truncate(0)
self.out.seek(0)
if self.err:
err = self.err.getvalue()
self.err.truncate(0)
self.err.seek(0)
return out, err
class DontReadFromInput:
"""Temporary stub class. Ideally when stdin is accessed, the
capturing should be turned off, with possibly all data captured
so far sent to the screen. This should be configurable, though,
because in automated test runs it is better to crash than
hang indefinitely.
"""
def read(self, *args):
raise IOError("reading from stdin while output is captured")
readline = read
readlines = read
__iter__ = read
def fileno(self):
raise ValueError("redirected Stdin is pseudofile, has no fileno()")
def isatty(self):
return False
def close(self):
pass
try:
devnullpath = os.devnull
except AttributeError:
if os.name == 'nt':
devnullpath = 'NUL'
else:
devnullpath = '/dev/null'

View file

@ -0,0 +1,71 @@
import py
import sys
builtin_repr = repr
reprlib = py.builtin._tryimport('repr', 'reprlib')
class SafeRepr(reprlib.Repr):
""" subclass of repr.Repr that limits the resulting size of repr()
and includes information on exceptions raised during the call.
"""
def repr(self, x):
return self._callhelper(reprlib.Repr.repr, self, x)
def repr_unicode(self, x, level):
# Strictly speaking wrong on narrow builds
def repr(u):
if "'" not in u:
return py.builtin._totext("'%s'") % u
elif '"' not in u:
return py.builtin._totext('"%s"') % u
else:
return py.builtin._totext("'%s'") % u.replace("'", r"\'")
s = repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
return self._callhelper(builtin_repr, x)
def _callhelper(self, call, x, *args):
try:
# Try the vanilla repr and make sure that the result is a string
s = call(x, *args)
except py.builtin._sysex:
raise
except:
cls, e, tb = sys.exc_info()
exc_name = getattr(cls, '__name__', 'unknown')
try:
exc_info = str(e)
except py.builtin._sysex:
raise
except:
exc_info = 'unknown'
return '<[%s("%s") raised in repr()] %s object at 0x%x>' % (
exc_name, exc_info, x.__class__.__name__, id(x))
else:
if len(s) > self.maxsize:
i = max(0, (self.maxsize-3)//2)
j = max(0, self.maxsize-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def saferepr(obj, maxsize=240):
""" return a size-limited safe repr-string for the given object.
Failing __repr__ functions of user instances will be represented
with a short exception info and 'saferepr' generally takes
care to never raise exceptions itself. This function is a wrapper
around the Repr/reprlib functionality of the standard 2.6 lib.
"""
# review exception handling
srepr = SafeRepr()
srepr.maxstring = maxsize
srepr.maxsize = maxsize
srepr.maxother = 160
return srepr.repr(obj)

View file

@ -0,0 +1,348 @@
"""
Helper functions for writing to terminals and files.
"""
import sys, os
import py
py3k = sys.version_info[0] >= 3
from py.builtin import text, bytes
win32_and_ctypes = False
colorama = None
if sys.platform == "win32":
try:
import colorama
except ImportError:
try:
import ctypes
win32_and_ctypes = True
except ImportError:
pass
def _getdimensions():
import termios,fcntl,struct
call = fcntl.ioctl(1,termios.TIOCGWINSZ,"\000"*8)
height,width = struct.unpack( "hhhh", call ) [:2]
return height, width
def get_terminal_width():
height = width = 0
try:
height, width = _getdimensions()
except py.builtin._sysex:
raise
except:
# pass to fallback below
pass
if width == 0:
# FALLBACK:
# * some exception happened
# * or this is emacs terminal which reports (0,0)
width = int(os.environ.get('COLUMNS', 80))
# XXX the windows getdimensions may be bogus, let's sanify a bit
if width < 40:
width = 80
return width
terminal_width = get_terminal_width()
# XXX unify with _escaped func below
def ansi_print(text, esc, file=None, newline=True, flush=False):
if file is None:
file = sys.stderr
text = text.rstrip()
if esc and not isinstance(esc, tuple):
esc = (esc,)
if esc and sys.platform != "win32" and file.isatty():
text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
text +
'\x1b[0m') # ANSI color code "reset"
if newline:
text += '\n'
if esc and win32_and_ctypes and file.isatty():
if 1 in esc:
bold = True
esc = tuple([x for x in esc if x != 1])
else:
bold = False
esctable = {() : FOREGROUND_WHITE, # normal
(31,): FOREGROUND_RED, # red
(32,): FOREGROUND_GREEN, # green
(33,): FOREGROUND_GREEN|FOREGROUND_RED, # yellow
(34,): FOREGROUND_BLUE, # blue
(35,): FOREGROUND_BLUE|FOREGROUND_RED, # purple
(36,): FOREGROUND_BLUE|FOREGROUND_GREEN, # cyan
(37,): FOREGROUND_WHITE, # white
(39,): FOREGROUND_WHITE, # reset
}
attr = esctable.get(esc, FOREGROUND_WHITE)
if bold:
attr |= FOREGROUND_INTENSITY
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
if file is sys.stderr:
handle = GetStdHandle(STD_ERROR_HANDLE)
else:
handle = GetStdHandle(STD_OUTPUT_HANDLE)
oldcolors = GetConsoleInfo(handle).wAttributes
attr |= (oldcolors & 0x0f0)
SetConsoleTextAttribute(handle, attr)
while len(text) > 32768:
file.write(text[:32768])
text = text[32768:]
if text:
file.write(text)
SetConsoleTextAttribute(handle, oldcolors)
else:
file.write(text)
if flush:
file.flush()
def should_do_markup(file):
if os.environ.get('PY_COLORS') == '1':
return True
if os.environ.get('PY_COLORS') == '0':
return False
return hasattr(file, 'isatty') and file.isatty() \
and os.environ.get('TERM') != 'dumb' \
and not (sys.platform.startswith('java') and os._name == 'nt')
class TerminalWriter(object):
_esctable = dict(black=30, red=31, green=32, yellow=33,
blue=34, purple=35, cyan=36, white=37,
Black=40, Red=41, Green=42, Yellow=43,
Blue=44, Purple=45, Cyan=46, White=47,
bold=1, light=2, blink=5, invert=7)
# XXX deprecate stringio argument
def __init__(self, file=None, stringio=False, encoding=None):
if file is None:
if stringio:
self.stringio = file = py.io.TextIO()
else:
file = py.std.sys.stdout
elif py.builtin.callable(file) and not (
hasattr(file, "write") and hasattr(file, "flush")):
file = WriteFile(file, encoding=encoding)
if hasattr(file, "isatty") and file.isatty() and colorama:
file = colorama.AnsiToWin32(file).stream
self.encoding = encoding or getattr(file, 'encoding', "utf-8")
self._file = file
self.fullwidth = get_terminal_width()
self.hasmarkup = should_do_markup(file)
self._lastlen = 0
def _escaped(self, text, esc):
if esc and self.hasmarkup:
text = (''.join(['\x1b[%sm' % cod for cod in esc]) +
text +'\x1b[0m')
return text
def markup(self, text, **kw):
esc = []
for name in kw:
if name not in self._esctable:
raise ValueError("unknown markup: %r" %(name,))
if kw[name]:
esc.append(self._esctable[name])
return self._escaped(text, tuple(esc))
def sep(self, sepchar, title=None, fullwidth=None, **kw):
if fullwidth is None:
fullwidth = self.fullwidth
# the goal is to have the line be as long as possible
# under the condition that len(line) <= fullwidth
if sys.platform == "win32":
# if we print in the last column on windows we are on a
# new line but there is no way to verify/neutralize this
# (we may not know the exact line width)
# so let's be defensive to avoid empty lines in the output
fullwidth -= 1
if title is not None:
# we want 2 + 2*len(fill) + len(title) <= fullwidth
# i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth
# 2*len(sepchar)*N <= fullwidth - len(title) - 2
# N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
N = (fullwidth - len(title) - 2) // (2*len(sepchar))
fill = sepchar * N
line = "%s %s %s" % (fill, title, fill)
else:
# we want len(sepchar)*N <= fullwidth
# i.e. N <= fullwidth // len(sepchar)
line = sepchar * (fullwidth // len(sepchar))
# in some situations there is room for an extra sepchar at the right,
# in particular if we consider that with a sepchar like "_ " the
# trailing space is not important at the end of the line
if len(line) + len(sepchar.rstrip()) <= fullwidth:
line += sepchar.rstrip()
self.line(line, **kw)
def write(self, msg, **kw):
if msg:
if not isinstance(msg, (bytes, text)):
msg = text(msg)
if self.hasmarkup and kw:
markupmsg = self.markup(msg, **kw)
else:
markupmsg = msg
write_out(self._file, markupmsg)
def line(self, s='', **kw):
self.write(s, **kw)
self._checkfill(s)
self.write('\n')
def reline(self, line, **kw):
if not self.hasmarkup:
raise ValueError("cannot use rewrite-line without terminal")
self.write(line, **kw)
self._checkfill(line)
self.write('\r')
self._lastlen = len(line)
def _checkfill(self, line):
diff2last = self._lastlen - len(line)
if diff2last > 0:
self.write(" " * diff2last)
class Win32ConsoleWriter(TerminalWriter):
def write(self, msg, **kw):
if msg:
if not isinstance(msg, (bytes, text)):
msg = text(msg)
oldcolors = None
if self.hasmarkup and kw:
handle = GetStdHandle(STD_OUTPUT_HANDLE)
oldcolors = GetConsoleInfo(handle).wAttributes
default_bg = oldcolors & 0x00F0
attr = default_bg
if kw.pop('bold', False):
attr |= FOREGROUND_INTENSITY
if kw.pop('red', False):
attr |= FOREGROUND_RED
elif kw.pop('blue', False):
attr |= FOREGROUND_BLUE
elif kw.pop('green', False):
attr |= FOREGROUND_GREEN
elif kw.pop('yellow', False):
attr |= FOREGROUND_GREEN|FOREGROUND_RED
else:
attr |= oldcolors & 0x0007
SetConsoleTextAttribute(handle, attr)
write_out(self._file, msg)
if oldcolors:
SetConsoleTextAttribute(handle, oldcolors)
class WriteFile(object):
def __init__(self, writemethod, encoding=None):
self.encoding = encoding
self._writemethod = writemethod
def write(self, data):
if self.encoding:
data = data.encode(self.encoding, "replace")
self._writemethod(data)
def flush(self):
return
if win32_and_ctypes:
TerminalWriter = Win32ConsoleWriter
import ctypes
from ctypes import wintypes
# ctypes access to the Windows console
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0000 # black text
FOREGROUND_BLUE = 0x0001 # text color contains blue.
FOREGROUND_GREEN = 0x0002 # text color contains green.
FOREGROUND_RED = 0x0004 # text color contains red.
FOREGROUND_WHITE = 0x0007
FOREGROUND_INTENSITY = 0x0008 # text color is intensified.
BACKGROUND_BLACK = 0x0000 # background color black
BACKGROUND_BLUE = 0x0010 # background color contains blue.
BACKGROUND_GREEN = 0x0020 # background color contains green.
BACKGROUND_RED = 0x0040 # background color contains red.
BACKGROUND_WHITE = 0x0070
BACKGROUND_INTENSITY = 0x0080 # background color is intensified.
SHORT = ctypes.c_short
class COORD(ctypes.Structure):
_fields_ = [('X', SHORT),
('Y', SHORT)]
class SMALL_RECT(ctypes.Structure):
_fields_ = [('Left', SHORT),
('Top', SHORT),
('Right', SHORT),
('Bottom', SHORT)]
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', wintypes.WORD),
('srWindow', SMALL_RECT),
('dwMaximumWindowSize', COORD)]
_GetStdHandle = ctypes.windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [wintypes.DWORD]
_GetStdHandle.restype = wintypes.HANDLE
def GetStdHandle(kind):
return _GetStdHandle(kind)
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
SetConsoleTextAttribute.restype = wintypes.BOOL
_GetConsoleScreenBufferInfo = \
ctypes.windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
def GetConsoleInfo(handle):
info = CONSOLE_SCREEN_BUFFER_INFO()
_GetConsoleScreenBufferInfo(handle, ctypes.byref(info))
return info
def _getdimensions():
handle = GetStdHandle(STD_OUTPUT_HANDLE)
info = GetConsoleInfo(handle)
# Substract one from the width, otherwise the cursor wraps
# and the ending \n causes an empty line to display.
return info.dwSize.Y, info.dwSize.X - 1
def write_out(fil, msg):
# XXX sometimes "msg" is of type bytes, sometimes text which
# complicates the situation. Should we try to enforce unicode?
try:
# on py27 and above writing out to sys.stdout with an encoding
# should usually work for unicode messages (if the encoding is
# capable of it)
fil.write(msg)
except UnicodeEncodeError:
# on py26 it might not work because stdout expects bytes
if fil.encoding:
try:
fil.write(msg.encode(fil.encoding))
except UnicodeEncodeError:
# it might still fail if the encoding is not capable
pass
else:
fil.flush()
return
# fallback: escape all unicode characters
msg = msg.encode("unicode-escape").decode("ascii")
fil.write(msg)
fil.flush()

View file

@ -0,0 +1,2 @@
""" logging API ('producers' and 'consumers' connected via keywords) """

View file

@ -0,0 +1,186 @@
"""
basic logging functionality based on a producer/consumer scheme.
XXX implement this API: (maybe put it into slogger.py?)
log = Logger(
info=py.log.STDOUT,
debug=py.log.STDOUT,
command=None)
log.info("hello", "world")
log.command("hello", "world")
log = Logger(info=Logger(something=...),
debug=py.log.STDOUT,
command=None)
"""
import py, sys
class Message(object):
def __init__(self, keywords, args):
self.keywords = keywords
self.args = args
def content(self):
return " ".join(map(str, self.args))
def prefix(self):
return "[%s] " % (":".join(self.keywords))
def __str__(self):
return self.prefix() + self.content()
class Producer(object):
""" (deprecated) Log producer API which sends messages to be logged
to a 'consumer' object, which then prints them to stdout,
stderr, files, etc. Used extensively by PyPy-1.1.
"""
Message = Message # to allow later customization
keywords2consumer = {}
def __init__(self, keywords, keywordmapper=None, **kw):
if hasattr(keywords, 'split'):
keywords = tuple(keywords.split())
self._keywords = keywords
if keywordmapper is None:
keywordmapper = default_keywordmapper
self._keywordmapper = keywordmapper
def __repr__(self):
return "<py.log.Producer %s>" % ":".join(self._keywords)
def __getattr__(self, name):
if '_' in name:
raise AttributeError(name)
producer = self.__class__(self._keywords + (name,))
setattr(self, name, producer)
return producer
def __call__(self, *args):
""" write a message to the appropriate consumer(s) """
func = self._keywordmapper.getconsumer(self._keywords)
if func is not None:
func(self.Message(self._keywords, args))
class KeywordMapper:
def __init__(self):
self.keywords2consumer = {}
def getstate(self):
return self.keywords2consumer.copy()
def setstate(self, state):
self.keywords2consumer.clear()
self.keywords2consumer.update(state)
def getconsumer(self, keywords):
""" return a consumer matching the given keywords.
tries to find the most suitable consumer by walking, starting from
the back, the list of keywords, the first consumer matching a
keyword is returned (falling back to py.log.default)
"""
for i in range(len(keywords), 0, -1):
try:
return self.keywords2consumer[keywords[:i]]
except KeyError:
continue
return self.keywords2consumer.get('default', default_consumer)
def setconsumer(self, keywords, consumer):
""" set a consumer for a set of keywords. """
# normalize to tuples
if isinstance(keywords, str):
keywords = tuple(filter(None, keywords.split()))
elif hasattr(keywords, '_keywords'):
keywords = keywords._keywords
elif not isinstance(keywords, tuple):
raise TypeError("key %r is not a string or tuple" % (keywords,))
if consumer is not None and not py.builtin.callable(consumer):
if not hasattr(consumer, 'write'):
raise TypeError(
"%r should be None, callable or file-like" % (consumer,))
consumer = File(consumer)
self.keywords2consumer[keywords] = consumer
def default_consumer(msg):
""" the default consumer, prints the message to stdout (using 'print') """
sys.stderr.write(str(msg)+"\n")
default_keywordmapper = KeywordMapper()
def setconsumer(keywords, consumer):
default_keywordmapper.setconsumer(keywords, consumer)
def setstate(state):
default_keywordmapper.setstate(state)
def getstate():
return default_keywordmapper.getstate()
#
# Consumers
#
class File(object):
""" log consumer wrapping a file(-like) object """
def __init__(self, f):
assert hasattr(f, 'write')
#assert isinstance(f, file) or not hasattr(f, 'open')
self._file = f
def __call__(self, msg):
""" write a message to the log """
self._file.write(str(msg) + "\n")
if hasattr(self._file, 'flush'):
self._file.flush()
class Path(object):
""" log consumer that opens and writes to a Path """
def __init__(self, filename, append=False,
delayed_create=False, buffering=False):
self._append = append
self._filename = str(filename)
self._buffering = buffering
if not delayed_create:
self._openfile()
def _openfile(self):
mode = self._append and 'a' or 'w'
f = open(self._filename, mode)
self._file = f
def __call__(self, msg):
""" write a message to the log """
if not hasattr(self, "_file"):
self._openfile()
self._file.write(str(msg) + "\n")
if not self._buffering:
self._file.flush()
def STDOUT(msg):
""" consumer that writes to sys.stdout """
sys.stdout.write(str(msg)+"\n")
def STDERR(msg):
""" consumer that writes to sys.stderr """
sys.stderr.write(str(msg)+"\n")
class Syslog:
""" consumer that writes to the syslog daemon """
def __init__(self, priority = None):
if priority is None:
priority = self.LOG_INFO
self.priority = priority
def __call__(self, msg):
""" write a message to the log """
py.std.syslog.syslog(self.priority, str(msg))
for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
_prio = "LOG_" + _prio
try:
setattr(Syslog, _prio, getattr(py.std.syslog, _prio))
except AttributeError:
pass

View file

@ -0,0 +1,76 @@
import py, sys
class DeprecationWarning(DeprecationWarning):
def __init__(self, msg, path, lineno):
self.msg = msg
self.path = path
self.lineno = lineno
def __repr__(self):
return "%s:%d: %s" %(self.path, self.lineno+1, self.msg)
def __str__(self):
return self.msg
def _apiwarn(startversion, msg, stacklevel=2, function=None):
# below is mostly COPIED from python2.4/warnings.py's def warn()
# Get context information
if isinstance(stacklevel, str):
frame = sys._getframe(1)
level = 1
found = frame.f_code.co_filename.find(stacklevel) != -1
while frame:
co = frame.f_code
if co.co_filename.find(stacklevel) == -1:
if found:
stacklevel = level
break
else:
found = True
level += 1
frame = frame.f_back
else:
stacklevel = 1
msg = "%s (since version %s)" %(msg, startversion)
warn(msg, stacklevel=stacklevel+1, function=function)
def warn(msg, stacklevel=1, function=None):
if function is not None:
filename = py.std.inspect.getfile(function)
lineno = py.code.getrawcode(function).co_firstlineno
else:
try:
caller = sys._getframe(stacklevel)
except ValueError:
globals = sys.__dict__
lineno = 1
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__name__' in globals:
module = globals['__name__']
else:
module = "<string>"
filename = globals.get('__file__')
if filename:
fnl = filename.lower()
if fnl.endswith(".pyc") or fnl.endswith(".pyo"):
filename = filename[:-1]
elif fnl.endswith("$py.class"):
filename = filename.replace('$py.class', '.py')
else:
if module == "__main__":
try:
filename = sys.argv[0]
except AttributeError:
# embedded interpreters don't have sys.argv, see bug #839151
filename = '__main__'
if not filename:
filename = module
path = py.path.local(filename)
warning = DeprecationWarning(msg, path, lineno)
py.std.warnings.warn_explicit(warning, category=Warning,
filename=str(warning.path),
lineno=warning.lineno,
registry=py.std.warnings.__dict__.setdefault(
"__warningsregistry__", {})
)

View file

@ -0,0 +1 @@
""" unified file system api """

View file

@ -0,0 +1,114 @@
"""
This module contains multithread-safe cache implementations.
All Caches have
getorbuild(key, builder)
delentry(key)
methods and allow configuration when instantiating the cache class.
"""
from time import time as gettime
class BasicCache(object):
def __init__(self, maxentries=128):
self.maxentries = maxentries
self.prunenum = int(maxentries - maxentries/8)
self._dict = {}
def clear(self):
self._dict.clear()
def _getentry(self, key):
return self._dict[key]
def _putentry(self, key, entry):
self._prunelowestweight()
self._dict[key] = entry
def delentry(self, key, raising=False):
try:
del self._dict[key]
except KeyError:
if raising:
raise
def getorbuild(self, key, builder):
try:
entry = self._getentry(key)
except KeyError:
entry = self._build(key, builder)
self._putentry(key, entry)
return entry.value
def _prunelowestweight(self):
""" prune out entries with lowest weight. """
numentries = len(self._dict)
if numentries >= self.maxentries:
# evict according to entry's weight
items = [(entry.weight, key)
for key, entry in self._dict.items()]
items.sort()
index = numentries - self.prunenum
if index > 0:
for weight, key in items[:index]:
# in MT situations the element might be gone
self.delentry(key, raising=False)
class BuildcostAccessCache(BasicCache):
""" A BuildTime/Access-counting cache implementation.
the weight of a value is computed as the product of
num-accesses-of-a-value * time-to-build-the-value
The values with the least such weights are evicted
if the cache maxentries threshold is superceded.
For implementation flexibility more than one object
might be evicted at a time.
"""
# time function to use for measuring build-times
def _build(self, key, builder):
start = gettime()
val = builder()
end = gettime()
return WeightedCountingEntry(val, end-start)
class WeightedCountingEntry(object):
def __init__(self, value, oneweight):
self._value = value
self.weight = self._oneweight = oneweight
def value(self):
self.weight += self._oneweight
return self._value
value = property(value)
class AgingCache(BasicCache):
""" This cache prunes out cache entries that are too old.
"""
def __init__(self, maxentries=128, maxseconds=10.0):
super(AgingCache, self).__init__(maxentries)
self.maxseconds = maxseconds
def _getentry(self, key):
entry = self._dict[key]
if entry.isexpired():
self.delentry(key)
raise KeyError(key)
return entry
def _build(self, key, builder):
val = builder()
entry = AgingEntry(val, gettime() + self.maxseconds)
return entry
class AgingEntry(object):
def __init__(self, value, expirationtime):
self.value = value
self.weight = expirationtime
def isexpired(self):
t = gettime()
return t >= self.weight

View file

@ -0,0 +1,403 @@
"""
"""
import os, sys, posixpath
import py
# Moved from local.py.
iswin32 = sys.platform == "win32" or (getattr(os, '_name', False) == 'nt')
class Checkers:
_depend_on_existence = 'exists', 'link', 'dir', 'file'
def __init__(self, path):
self.path = path
def dir(self):
raise NotImplementedError
def file(self):
raise NotImplementedError
def dotfile(self):
return self.path.basename.startswith('.')
def ext(self, arg):
if not arg.startswith('.'):
arg = '.' + arg
return self.path.ext == arg
def exists(self):
raise NotImplementedError
def basename(self, arg):
return self.path.basename == arg
def basestarts(self, arg):
return self.path.basename.startswith(arg)
def relto(self, arg):
return self.path.relto(arg)
def fnmatch(self, arg):
return self.path.fnmatch(arg)
def endswith(self, arg):
return str(self.path).endswith(arg)
def _evaluate(self, kw):
for name, value in kw.items():
invert = False
meth = None
try:
meth = getattr(self, name)
except AttributeError:
if name[:3] == 'not':
invert = True
try:
meth = getattr(self, name[3:])
except AttributeError:
pass
if meth is None:
raise TypeError(
"no %r checker available for %r" % (name, self.path))
try:
if py.code.getrawcode(meth).co_argcount > 1:
if (not meth(value)) ^ invert:
return False
else:
if bool(value) ^ bool(meth()) ^ invert:
return False
except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
# EBUSY feels not entirely correct,
# but its kind of necessary since ENOMEDIUM
# is not accessible in python
for name in self._depend_on_existence:
if name in kw:
if kw.get(name):
return False
name = 'not' + name
if name in kw:
if not kw.get(name):
return False
return True
class NeverRaised(Exception):
pass
class PathBase(object):
""" shared implementation for filesystem path objects."""
Checkers = Checkers
def __div__(self, other):
return self.join(str(other))
__truediv__ = __div__ # py3k
def basename(self):
""" basename part of path. """
return self._getbyspec('basename')[0]
basename = property(basename, None, None, basename.__doc__)
def dirname(self):
""" dirname part of path. """
return self._getbyspec('dirname')[0]
dirname = property(dirname, None, None, dirname.__doc__)
def purebasename(self):
""" pure base name of the path."""
return self._getbyspec('purebasename')[0]
purebasename = property(purebasename, None, None, purebasename.__doc__)
def ext(self):
""" extension of the path (including the '.')."""
return self._getbyspec('ext')[0]
ext = property(ext, None, None, ext.__doc__)
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
return self.new(basename='').join(*args, **kwargs)
def read_binary(self):
""" read and return a bytestring from reading the path. """
with self.open('rb') as f:
return f.read()
def read_text(self, encoding):
""" read and return a Unicode string from reading the path. """
with self.open("r", encoding=encoding) as f:
return f.read()
def read(self, mode='r'):
""" read and return a bytestring from reading the path. """
with self.open(mode) as f:
return f.read()
def readlines(self, cr=1):
""" read and return a list of lines from the path. if cr is False, the
newline will be removed from the end of each line. """
if not cr:
content = self.read('rU')
return content.split('\n')
else:
f = self.open('rU')
try:
return f.readlines()
finally:
f.close()
def load(self):
""" (deprecated) return object unpickled from self.read() """
f = self.open('rb')
try:
return py.error.checked_call(py.std.pickle.load, f)
finally:
f.close()
def move(self, target):
""" move this path to target. """
if target.relto(self):
raise py.error.EINVAL(target,
"cannot move path into a subdirectory of itself")
try:
self.rename(target)
except py.error.EXDEV: # invalid cross-device link
self.copy(target)
self.remove()
def __repr__(self):
""" return a string representation of this path. """
return repr(str(self))
def check(self, **kw):
""" check a path for existence and properties.
Without arguments, return True if the path exists, otherwise False.
valid checkers::
file=1 # is a file
file=0 # is not a file (may not even exist)
dir=1 # is a dir
link=1 # is a link
exists=1 # exists
You can specify multiple checker definitions, for example::
path.check(file=1, link=1) # a link pointing to a file
"""
if not kw:
kw = {'exists' : 1}
return self.Checkers(self)._evaluate(kw)
def fnmatch(self, pattern):
"""return true if the basename/fullname matches the glob-'pattern'.
valid pattern characters::
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
If the pattern contains a path-separator then the full path
is used for pattern matching and a '*' is prepended to the
pattern.
if the pattern doesn't contain a path-separator the pattern
is only matched against the basename.
"""
return FNMatcher(pattern)(self)
def relto(self, relpath):
""" return a string which is the relative part of the path
to the given 'relpath'.
"""
if not isinstance(relpath, (str, PathBase)):
raise TypeError("%r: not a string or path object" %(relpath,))
strrelpath = str(relpath)
if strrelpath and strrelpath[-1] != self.sep:
strrelpath += self.sep
#assert strrelpath[-1] == self.sep
#assert strrelpath[-2] != self.sep
strself = self.strpath
if sys.platform == "win32" or getattr(os, '_name', None) == 'nt':
if os.path.normcase(strself).startswith(
os.path.normcase(strrelpath)):
return strself[len(strrelpath):]
elif strself.startswith(strrelpath):
return strself[len(strrelpath):]
return ""
def ensure_dir(self, *args):
""" ensure the path joined with args is a directory. """
return self.ensure(*args, **{"dir": True})
def bestrelpath(self, dest):
""" return a string which is a relative path from self
(assumed to be a directory) to dest such that
self.join(bestrelpath) == dest and if not such
path can be determined return dest.
"""
try:
if self == dest:
return os.curdir
base = self.common(dest)
if not base: # can be the case on windows
return str(dest)
self2base = self.relto(base)
reldest = dest.relto(base)
if self2base:
n = self2base.count(self.sep) + 1
else:
n = 0
l = [os.pardir] * n
if reldest:
l.append(reldest)
target = dest.sep.join(l)
return target
except AttributeError:
return str(dest)
def exists(self):
return self.check()
def isdir(self):
return self.check(dir=1)
def isfile(self):
return self.check(file=1)
def parts(self, reverse=False):
""" return a root-first list of all ancestor directories
plus the path itself.
"""
current = self
l = [self]
while 1:
last = current
current = current.dirpath()
if last == current:
break
l.append(current)
if not reverse:
l.reverse()
return l
def common(self, other):
""" return the common part shared with the other path
or None if there is no common part.
"""
last = None
for x, y in zip(self.parts(), other.parts()):
if x != y:
return last
last = x
return last
def __add__(self, other):
""" return new path object with 'other' added to the basename"""
return self.new(basename=self.basename+str(other))
def __cmp__(self, other):
""" return sort value (-1, 0, +1). """
try:
return cmp(self.strpath, other.strpath)
except AttributeError:
return cmp(str(self), str(other)) # self.path, other.path)
def __lt__(self, other):
try:
return self.strpath < other.strpath
except AttributeError:
return str(self) < str(other)
def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False):
""" yields all paths below the current one
fil is a filter (glob pattern or callable), if not matching the
path will not be yielded, defaulting to None (everything is
returned)
rec is a filter (glob pattern or callable) that controls whether
a node is descended, defaulting to None
ignore is an Exception class that is ignoredwhen calling dirlist()
on any of the paths (by default, all exceptions are reported)
bf if True will cause a breadthfirst search instead of the
default depthfirst. Default: False
sort if True will sort entries within each directory level.
"""
for x in Visitor(fil, rec, ignore, bf, sort).gen(self):
yield x
def _sortlist(self, res, sort):
if sort:
if hasattr(sort, '__call__'):
res.sort(sort)
else:
res.sort()
def samefile(self, other):
""" return True if other refers to the same stat object as self. """
return self.strpath == str(other)
class Visitor:
def __init__(self, fil, rec, ignore, bf, sort):
if isinstance(fil, str):
fil = FNMatcher(fil)
if isinstance(rec, str):
self.rec = FNMatcher(rec)
elif not hasattr(rec, '__call__') and rec:
self.rec = lambda path: True
else:
self.rec = rec
self.fil = fil
self.ignore = ignore
self.breadthfirst = bf
self.optsort = sort and sorted or (lambda x: x)
def gen(self, path):
try:
entries = path.listdir()
except self.ignore:
return
rec = self.rec
dirs = self.optsort([p for p in entries
if p.check(dir=1) and (rec is None or rec(p))])
if not self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
for p in self.optsort(entries):
if self.fil is None or self.fil(p):
yield p
if self.breadthfirst:
for subdir in dirs:
for p in self.gen(subdir):
yield p
class FNMatcher:
def __init__(self, pattern):
self.pattern = pattern
def __call__(self, path):
pattern = self.pattern
if (pattern.find(path.sep) == -1 and
iswin32 and
pattern.find(posixpath.sep) != -1):
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posixpath.sep, path.sep)
if pattern.find(path.sep) == -1:
name = path.basename
else:
name = str(path) # path.strpath # XXX svn?
if not os.path.isabs(pattern):
pattern = '*' + path.sep + pattern
return py.std.fnmatch.fnmatch(name, pattern)

View file

@ -0,0 +1,911 @@
"""
local path implementation.
"""
from __future__ import with_statement
from contextlib import contextmanager
import sys, os, re, atexit, io
import py
from py._path import common
from py._path.common import iswin32
from stat import S_ISLNK, S_ISDIR, S_ISREG
from os.path import abspath, normpath, isabs, exists, isdir, isfile, islink, dirname
if sys.version_info > (3,0):
def map_as_list(func, iter):
return list(map(func, iter))
else:
map_as_list = map
class Stat(object):
def __getattr__(self, name):
return getattr(self._osstatresult, "st_" + name)
def __init__(self, path, osstatresult):
self.path = path
self._osstatresult = osstatresult
@property
def owner(self):
if iswin32:
raise NotImplementedError("XXX win32")
import pwd
entry = py.error.checked_call(pwd.getpwuid, self.uid)
return entry[0]
@property
def group(self):
""" return group name of file. """
if iswin32:
raise NotImplementedError("XXX win32")
import grp
entry = py.error.checked_call(grp.getgrgid, self.gid)
return entry[0]
def isdir(self):
return S_ISDIR(self._osstatresult.st_mode)
def isfile(self):
return S_ISREG(self._osstatresult.st_mode)
def islink(self):
st = self.path.lstat()
return S_ISLNK(self._osstatresult.st_mode)
class PosixPath(common.PathBase):
def chown(self, user, group, rec=0):
""" change ownership to the given user and group.
user and group may be specified by a number or
by a name. if rec is True change ownership
recursively.
"""
uid = getuserid(user)
gid = getgroupid(group)
if rec:
for x in self.visit(rec=lambda x: x.check(link=0)):
if x.check(link=0):
py.error.checked_call(os.chown, str(x), uid, gid)
py.error.checked_call(os.chown, str(self), uid, gid)
def readlink(self):
""" return value of a symbolic link. """
return py.error.checked_call(os.readlink, self.strpath)
def mklinkto(self, oldname):
""" posix style hard link to another name. """
py.error.checked_call(os.link, str(oldname), str(self))
def mksymlinkto(self, value, absolute=1):
""" create a symbolic link with the given value (pointing to another name). """
if absolute:
py.error.checked_call(os.symlink, str(value), self.strpath)
else:
base = self.common(value)
# with posix local paths '/' is always a common base
relsource = self.__class__(value).relto(base)
reldest = self.relto(base)
n = reldest.count(self.sep)
target = self.sep.join(('..', )*n + (relsource, ))
py.error.checked_call(os.symlink, target, self.strpath)
def getuserid(user):
import pwd
if not isinstance(user, int):
user = pwd.getpwnam(user)[2]
return user
def getgroupid(group):
import grp
if not isinstance(group, int):
group = grp.getgrnam(group)[2]
return group
FSBase = not iswin32 and PosixPath or common.PathBase
class LocalPath(FSBase):
""" object oriented interface to os.path and other local filesystem
related information.
"""
class ImportMismatchError(ImportError):
""" raised on pyimport() if there is a mismatch of __file__'s"""
sep = os.sep
class Checkers(common.Checkers):
def _stat(self):
try:
return self._statcache
except AttributeError:
try:
self._statcache = self.path.stat()
except py.error.ELOOP:
self._statcache = self.path.lstat()
return self._statcache
def dir(self):
return S_ISDIR(self._stat().mode)
def file(self):
return S_ISREG(self._stat().mode)
def exists(self):
return self._stat()
def link(self):
st = self.path.lstat()
return S_ISLNK(st.mode)
def __init__(self, path=None, expanduser=False):
""" Initialize and return a local Path instance.
Path can be relative to the current directory.
If path is None it defaults to the current working directory.
If expanduser is True, tilde-expansion is performed.
Note that Path instances always carry an absolute path.
Note also that passing in a local path object will simply return
the exact same path object. Use new() to get a new copy.
"""
if path is None:
self.strpath = py.error.checked_call(os.getcwd)
elif isinstance(path, common.PathBase):
self.strpath = path.strpath
elif isinstance(path, py.builtin._basestring):
if expanduser:
path = os.path.expanduser(path)
self.strpath = abspath(path)
else:
raise ValueError("can only pass None, Path instances "
"or non-empty strings to LocalPath")
def __hash__(self):
return hash(self.strpath)
def __eq__(self, other):
s1 = self.strpath
s2 = getattr(other, "strpath", other)
if iswin32:
s1 = s1.lower()
try:
s2 = s2.lower()
except AttributeError:
return False
return s1 == s2
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.strpath < getattr(other, "strpath", other)
def __gt__(self, other):
return self.strpath > getattr(other, "strpath", other)
def samefile(self, other):
""" return True if 'other' references the same file as 'self'.
"""
other = getattr(other, "strpath", other)
if not isabs(other):
other = abspath(other)
if self == other:
return True
if iswin32:
return False # there is no samefile
return py.error.checked_call(
os.path.samefile, self.strpath, other)
def remove(self, rec=1, ignore_errors=False):
""" remove a file or directory (or a directory tree if rec=1).
if ignore_errors is True, errors while removing directories will
be ignored.
"""
if self.check(dir=1, link=0):
if rec:
# force remove of readonly files on windows
if iswin32:
self.chmod(448, rec=1) # octcal 0700
py.error.checked_call(py.std.shutil.rmtree, self.strpath,
ignore_errors=ignore_errors)
else:
py.error.checked_call(os.rmdir, self.strpath)
else:
if iswin32:
self.chmod(448) # octcal 0700
py.error.checked_call(os.remove, self.strpath)
def computehash(self, hashtype="md5", chunksize=524288):
""" return hexdigest of hashvalue for this file. """
try:
try:
import hashlib as mod
except ImportError:
if hashtype == "sha1":
hashtype = "sha"
mod = __import__(hashtype)
hash = getattr(mod, hashtype)()
except (AttributeError, ImportError):
raise ValueError("Don't know how to compute %r hash" %(hashtype,))
f = self.open('rb')
try:
while 1:
buf = f.read(chunksize)
if not buf:
return hash.hexdigest()
hash.update(buf)
finally:
f.close()
def new(self, **kw):
""" create a modified version of this path.
the following keyword arguments modify various path parts::
a:/some/path/to/a/file.ext
xx drive
xxxxxxxxxxxxxxxxx dirname
xxxxxxxx basename
xxxx purebasename
xxx ext
"""
obj = object.__new__(self.__class__)
if not kw:
obj.strpath = self.strpath
return obj
drive, dirname, basename, purebasename,ext = self._getbyspec(
"drive,dirname,basename,purebasename,ext")
if 'basename' in kw:
if 'purebasename' in kw or 'ext' in kw:
raise ValueError("invalid specification %r" % kw)
else:
pb = kw.setdefault('purebasename', purebasename)
try:
ext = kw['ext']
except KeyError:
pass
else:
if ext and not ext.startswith('.'):
ext = '.' + ext
kw['basename'] = pb + ext
if ('dirname' in kw and not kw['dirname']):
kw['dirname'] = drive
else:
kw.setdefault('dirname', dirname)
kw.setdefault('sep', self.sep)
obj.strpath = normpath(
"%(dirname)s%(sep)s%(basename)s" % kw)
return obj
def _getbyspec(self, spec):
""" see new for what 'spec' can be. """
res = []
parts = self.strpath.split(self.sep)
args = filter(None, spec.split(',') )
append = res.append
for name in args:
if name == 'drive':
append(parts[0])
elif name == 'dirname':
append(self.sep.join(parts[:-1]))
else:
basename = parts[-1]
if name == 'basename':
append(basename)
else:
i = basename.rfind('.')
if i == -1:
purebasename, ext = basename, ''
else:
purebasename, ext = basename[:i], basename[i:]
if name == 'purebasename':
append(purebasename)
elif name == 'ext':
append(ext)
else:
raise ValueError("invalid part specification %r" % name)
return res
def dirpath(self, *args, **kwargs):
""" return the directory path joined with any given path arguments. """
if not kwargs:
path = object.__new__(self.__class__)
path.strpath = dirname(self.strpath)
if args:
path = path.join(*args)
return path
return super(LocalPath, self).dirpath(*args, **kwargs)
def join(self, *args, **kwargs):
""" return a new path by appending all 'args' as path
components. if abs=1 is used restart from root if any
of the args is an absolute path.
"""
sep = self.sep
strargs = [getattr(arg, "strpath", arg) for arg in args]
strpath = self.strpath
if kwargs.get('abs'):
newargs = []
for arg in reversed(strargs):
if isabs(arg):
strpath = arg
strargs = newargs
break
newargs.insert(0, arg)
for arg in strargs:
arg = arg.strip(sep)
if iswin32:
# allow unix style paths even on windows.
arg = arg.strip('/')
arg = arg.replace('/', sep)
strpath = strpath + sep + arg
obj = object.__new__(self.__class__)
obj.strpath = normpath(strpath)
return obj
def open(self, mode='r', ensure=False, encoding=None):
""" return an opened file with the given mode.
If ensure is True, create parent directories if needed.
"""
if ensure:
self.dirpath().ensure(dir=1)
if encoding:
return py.error.checked_call(io.open, self.strpath, mode, encoding=encoding)
return py.error.checked_call(open, self.strpath, mode)
def _fastjoin(self, name):
child = object.__new__(self.__class__)
child.strpath = self.strpath + self.sep + name
return child
def islink(self):
return islink(self.strpath)
def check(self, **kw):
if not kw:
return exists(self.strpath)
if len(kw) == 1:
if "dir" in kw:
return not kw["dir"] ^ isdir(self.strpath)
if "file" in kw:
return not kw["file"] ^ isfile(self.strpath)
return super(LocalPath, self).check(**kw)
_patternchars = set("*?[" + os.path.sep)
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
if fil is None and sort is None:
names = py.error.checked_call(os.listdir, self.strpath)
return map_as_list(self._fastjoin, names)
if isinstance(fil, py.builtin._basestring):
if not self._patternchars.intersection(fil):
child = self._fastjoin(fil)
if exists(child.strpath):
return [child]
return []
fil = common.FNMatcher(fil)
names = py.error.checked_call(os.listdir, self.strpath)
res = []
for name in names:
child = self._fastjoin(name)
if fil is None or fil(child):
res.append(child)
self._sortlist(res, sort)
return res
def size(self):
""" return size of the underlying file object """
return self.stat().size
def mtime(self):
""" return last modification time of the path. """
return self.stat().mtime
def copy(self, target, mode=False):
""" copy path to target."""
if self.check(file=1):
if target.check(dir=1):
target = target.join(self.basename)
assert self!=target
copychunked(self, target)
if mode:
copymode(self.strpath, target.strpath)
else:
def rec(p):
return p.check(link=0)
for x in self.visit(rec=rec):
relpath = x.relto(self)
newx = target.join(relpath)
newx.dirpath().ensure(dir=1)
if x.check(link=1):
newx.mksymlinkto(x.readlink())
continue
elif x.check(file=1):
copychunked(x, newx)
elif x.check(dir=1):
newx.ensure(dir=1)
if mode:
copymode(x.strpath, newx.strpath)
def rename(self, target):
""" rename this path to target. """
target = getattr(target, "strpath", target)
return py.error.checked_call(os.rename, self.strpath, target)
def dump(self, obj, bin=1):
""" pickle object into path location"""
f = self.open('wb')
try:
py.error.checked_call(py.std.pickle.dump, obj, f, bin)
finally:
f.close()
def mkdir(self, *args):
""" create & return the directory joined with args. """
p = self.join(*args)
py.error.checked_call(os.mkdir, getattr(p, "strpath", p))
return p
def write_binary(self, data, ensure=False):
""" write binary data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('wb') as f:
f.write(data)
def write_text(self, data, encoding, ensure=False):
""" write text data into path using the specified encoding.
If ensure is True create missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
with self.open('w', encoding=encoding) as f:
f.write(data)
def write(self, data, mode='w', ensure=False):
""" write data into path. If ensure is True create
missing parent directories.
"""
if ensure:
self.dirpath().ensure(dir=1)
if 'b' in mode:
if not py.builtin._isbytes(data):
raise ValueError("can only process bytes")
else:
if not py.builtin._istext(data):
if not py.builtin._isbytes(data):
data = str(data)
else:
data = py.builtin._totext(data, sys.getdefaultencoding())
f = self.open(mode)
try:
f.write(data)
finally:
f.close()
def _ensuredirs(self):
parent = self.dirpath()
if parent == self:
return self
if parent.check(dir=0):
parent._ensuredirs()
if self.check(dir=0):
try:
self.mkdir()
except py.error.EEXIST:
# race condition: file/dir created by another thread/process.
# complain if it is not a dir
if self.check(dir=0):
raise
return self
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). if you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
p = self.join(*args)
if kwargs.get('dir', 0):
return p._ensuredirs()
else:
p.dirpath()._ensuredirs()
if not p.check(file=1):
p.open('w').close()
return p
def stat(self, raising=True):
""" Return an os.stat() tuple. """
if raising == True:
return Stat(self, py.error.checked_call(os.stat, self.strpath))
try:
return Stat(self, os.stat(self.strpath))
except KeyboardInterrupt:
raise
except Exception:
return None
def lstat(self):
""" Return an os.lstat() tuple. """
return Stat(self, py.error.checked_call(os.lstat, self.strpath))
def setmtime(self, mtime=None):
""" set modification time for the given path. if 'mtime' is None
(the default) then the file's mtime is set to current time.
Note that the resolution for 'mtime' is platform dependent.
"""
if mtime is None:
return py.error.checked_call(os.utime, self.strpath, mtime)
try:
return py.error.checked_call(os.utime, self.strpath, (-1, mtime))
except py.error.EINVAL:
return py.error.checked_call(os.utime, self.strpath, (self.atime(), mtime))
def chdir(self):
""" change directory to self and return old current directory """
try:
old = self.__class__()
except py.error.ENOENT:
old = None
py.error.checked_call(os.chdir, self.strpath)
return old
@contextmanager
def as_cwd(self):
""" return context manager which changes to current dir during the
managed "with" context. On __enter__ it returns the old dir.
"""
old = self.chdir()
try:
yield old
finally:
old.chdir()
def realpath(self):
""" return a new path which contains no symbolic links."""
return self.__class__(os.path.realpath(self.strpath))
def atime(self):
""" return last access time of the path. """
return self.stat().atime
def __repr__(self):
return 'local(%r)' % self.strpath
def __str__(self):
""" return string representation of the Path. """
return self.strpath
def chmod(self, mode, rec=0):
""" change permissions to the given mode. If mode is an
integer it directly encodes the os-specific modes.
if rec is True perform recursively.
"""
if not isinstance(mode, int):
raise TypeError("mode %r must be an integer" % (mode,))
if rec:
for x in self.visit(rec=rec):
py.error.checked_call(os.chmod, str(x), mode)
py.error.checked_call(os.chmod, self.strpath, mode)
def pypkgpath(self):
""" return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Return None if a pkgpath can not be determined.
"""
pkgpath = None
for parent in self.parts(reverse=True):
if parent.isdir():
if not parent.join('__init__.py').exists():
break
if not isimportable(parent.basename):
break
pkgpath = parent
return pkgpath
def _ensuresyspath(self, ensuremode, path):
if ensuremode:
s = str(path)
if ensuremode == "append":
if s not in sys.path:
sys.path.append(s)
else:
if s != sys.path[0]:
sys.path.insert(0, s)
def pyimport(self, modname=None, ensuresyspath=True):
""" return path as an imported python module.
If modname is None, look for the containing package
and construct an according module name.
The module will be put/looked up in sys.modules.
if ensuresyspath is True then the root dir for importing
the file (taking __init__.py files into account) will
be prepended to sys.path if it isn't there already.
If ensuresyspath=="append" the root dir will be appended
if it isn't already contained in sys.path.
if ensuresyspath is False no modification of syspath happens.
"""
if not self.check():
raise py.error.ENOENT(self)
pkgpath = None
if modname is None:
pkgpath = self.pypkgpath()
if pkgpath is not None:
pkgroot = pkgpath.dirpath()
names = self.new(ext="").relto(pkgroot).split(self.sep)
if names[-1] == "__init__":
names.pop()
modname = ".".join(names)
else:
pkgroot = self.dirpath()
modname = self.purebasename
self._ensuresyspath(ensuresyspath, pkgroot)
__import__(modname)
mod = sys.modules[modname]
if self.basename == "__init__.py":
return mod # we don't check anything as we might
# we in a namespace package ... too icky to check
modfile = mod.__file__
if modfile[-4:] in ('.pyc', '.pyo'):
modfile = modfile[:-1]
elif modfile.endswith('$py.class'):
modfile = modfile[:-9] + '.py'
if modfile.endswith(os.path.sep + "__init__.py"):
if self.basename != "__init__.py":
modfile = modfile[:-12]
try:
issame = self.samefile(modfile)
except py.error.ENOENT:
issame = False
if not issame:
raise self.ImportMismatchError(modname, modfile, self)
return mod
else:
try:
return sys.modules[modname]
except KeyError:
# we have a custom modname, do a pseudo-import
mod = py.std.types.ModuleType(modname)
mod.__file__ = str(self)
sys.modules[modname] = mod
try:
py.builtin.execfile(str(self), mod.__dict__)
except:
del sys.modules[modname]
raise
return mod
def sysexec(self, *argv, **popen_opts):
""" return stdout text from executing a system child process,
where the 'self' path points to executable.
The process is directly invoked and not through a system shell.
"""
from subprocess import Popen, PIPE
argv = map_as_list(str, argv)
popen_opts['stdout'] = popen_opts['stderr'] = PIPE
proc = Popen([str(self)] + argv, **popen_opts)
stdout, stderr = proc.communicate()
ret = proc.wait()
if py.builtin._isbytes(stdout):
stdout = py.builtin._totext(stdout, sys.getdefaultencoding())
if ret != 0:
if py.builtin._isbytes(stderr):
stderr = py.builtin._totext(stderr, sys.getdefaultencoding())
raise py.process.cmdexec.Error(ret, ret, str(self),
stdout, stderr,)
return stdout
def sysfind(cls, name, checker=None, paths=None):
""" return a path object found by looking at the systems
underlying PATH specification. If the checker is not None
it will be invoked to filter matching paths. If a binary
cannot be found, None is returned
Note: This is probably not working on plain win32 systems
but may work on cygwin.
"""
if isabs(name):
p = py.path.local(name)
if p.check(file=1):
return p
else:
if paths is None:
if iswin32:
paths = py.std.os.environ['Path'].split(';')
if '' not in paths and '.' not in paths:
paths.append('.')
try:
systemroot = os.environ['SYSTEMROOT']
except KeyError:
pass
else:
paths = [re.sub('%SystemRoot%', systemroot, path)
for path in paths]
else:
paths = py.std.os.environ['PATH'].split(':')
tryadd = []
if iswin32:
tryadd += os.environ['PATHEXT'].split(os.pathsep)
tryadd.append("")
for x in paths:
for addext in tryadd:
p = py.path.local(x).join(name, abs=True) + addext
try:
if p.check(file=1):
if checker:
if not checker(p):
continue
return p
except py.error.EACCES:
pass
return None
sysfind = classmethod(sysfind)
def _gethomedir(cls):
try:
x = os.environ['HOME']
except KeyError:
try:
x = os.environ["HOMEDRIVE"] + os.environ['HOMEPATH']
except KeyError:
return None
return cls(x)
_gethomedir = classmethod(_gethomedir)
#"""
#special class constructors for local filesystem paths
#"""
def get_temproot(cls):
""" return the system's temporary directory
(where tempfiles are usually created in)
"""
return py.path.local(py.std.tempfile.gettempdir())
get_temproot = classmethod(get_temproot)
def mkdtemp(cls, rootdir=None):
""" return a Path object pointing to a fresh new temporary directory
(which we created ourself).
"""
import tempfile
if rootdir is None:
rootdir = cls.get_temproot()
return cls(py.error.checked_call(tempfile.mkdtemp, dir=str(rootdir)))
mkdtemp = classmethod(mkdtemp)
def make_numbered_dir(cls, prefix='session-', rootdir=None, keep=3,
lock_timeout = 172800): # two days
""" return unique directory with a number greater than the current
maximum one. The number is assumed to start directly after prefix.
if keep is true directories with a number less than (maxnum-keep)
will be removed.
"""
if rootdir is None:
rootdir = cls.get_temproot()
def parse_num(path):
""" parse the number out of a path (if it matches the prefix) """
bn = path.basename
if bn.startswith(prefix):
try:
return int(bn[len(prefix):])
except ValueError:
pass
# compute the maximum number currently in use with the
# prefix
lastmax = None
while True:
maxnum = -1
for path in rootdir.listdir():
num = parse_num(path)
if num is not None:
maxnum = max(maxnum, num)
# make the new directory
try:
udir = rootdir.mkdir(prefix + str(maxnum+1))
except py.error.EEXIST:
# race condition: another thread/process created the dir
# in the meantime. Try counting again
if lastmax == maxnum:
raise
lastmax = maxnum
continue
break
# put a .lock file in the new directory that will be removed at
# process exit
if lock_timeout:
lockfile = udir.join('.lock')
mypid = os.getpid()
if hasattr(lockfile, 'mksymlinkto'):
lockfile.mksymlinkto(str(mypid))
else:
lockfile.write(str(mypid))
def try_remove_lockfile():
# in a fork() situation, only the last process should
# remove the .lock, otherwise the other processes run the
# risk of seeing their temporary dir disappear. For now
# we remove the .lock in the parent only (i.e. we assume
# that the children finish before the parent).
if os.getpid() != mypid:
return
try:
lockfile.remove()
except py.error.Error:
pass
atexit.register(try_remove_lockfile)
# prune old directories
if keep:
for path in rootdir.listdir():
num = parse_num(path)
if num is not None and num <= (maxnum - keep):
lf = path.join('.lock')
try:
t1 = lf.lstat().mtime
t2 = lockfile.lstat().mtime
if not lock_timeout or abs(t2-t1) < lock_timeout:
continue # skip directories still locked
except py.error.Error:
pass # assume that it means that there is no 'lf'
try:
path.remove(rec=1)
except KeyboardInterrupt:
raise
except: # this might be py.error.Error, WindowsError ...
pass
# make link...
try:
username = os.environ['USER'] #linux, et al
except KeyError:
try:
username = os.environ['USERNAME'] #windows
except KeyError:
username = 'current'
src = str(udir)
dest = src[:src.rfind('-')] + '-' + username
try:
os.unlink(dest)
except OSError:
pass
try:
os.symlink(src, dest)
except (OSError, AttributeError, NotImplementedError):
pass
return udir
make_numbered_dir = classmethod(make_numbered_dir)
def copymode(src, dest):
py.std.shutil.copymode(src, dest)
def copychunked(src, dest):
chunksize = 524288 # half a meg of bytes
fsrc = src.open('rb')
try:
fdest = dest.open('wb')
try:
while 1:
buf = fsrc.read(chunksize)
if not buf:
break
fdest.write(buf)
finally:
fdest.close()
finally:
fsrc.close()
def isimportable(name):
if name and (name[0].isalpha() or name[0] == '_'):
name = name.replace("_", '')
return not name or name.isalnum()

View file

@ -0,0 +1,380 @@
"""
module defining a subversion path object based on the external
command 'svn'. This modules aims to work with svn 1.3 and higher
but might also interact well with earlier versions.
"""
import os, sys, time, re
import py
from py import path, process
from py._path import common
from py._path import svnwc as svncommon
from py._path.cacheutil import BuildcostAccessCache, AgingCache
DEBUG=False
class SvnCommandPath(svncommon.SvnPathBase):
""" path implementation that offers access to (possibly remote) subversion
repositories. """
_lsrevcache = BuildcostAccessCache(maxentries=128)
_lsnorevcache = AgingCache(maxentries=1000, maxseconds=60.0)
def __new__(cls, path, rev=None, auth=None):
self = object.__new__(cls)
if isinstance(path, cls):
rev = path.rev
auth = path.auth
path = path.strpath
svncommon.checkbadchars(path)
path = path.rstrip('/')
self.strpath = path
self.rev = rev
self.auth = auth
return self
def __repr__(self):
if self.rev == -1:
return 'svnurl(%r)' % self.strpath
else:
return 'svnurl(%r, %r)' % (self.strpath, self.rev)
def _svnwithrev(self, cmd, *args):
""" execute an svn command, append our own url and revision """
if self.rev is None:
return self._svnwrite(cmd, *args)
else:
args = ['-r', self.rev] + list(args)
return self._svnwrite(cmd, *args)
def _svnwrite(self, cmd, *args):
""" execute an svn command, append our own url """
l = ['svn %s' % cmd]
args = ['"%s"' % self._escape(item) for item in args]
l.extend(args)
l.append('"%s"' % self._encodedurl())
# fixing the locale because we can't otherwise parse
string = " ".join(l)
if DEBUG:
print("execing %s" % string)
out = self._svncmdexecauth(string)
return out
def _svncmdexecauth(self, cmd):
""" execute an svn command 'as is' """
cmd = svncommon.fixlocale() + cmd
if self.auth is not None:
cmd += ' ' + self.auth.makecmdoptions()
return self._cmdexec(cmd)
def _cmdexec(self, cmd):
try:
out = process.cmdexec(cmd)
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
if (e.err.find('File Exists') != -1 or
e.err.find('File already exists') != -1):
raise py.error.EEXIST(self)
raise
return out
def _svnpopenauth(self, cmd):
""" execute an svn command, return a pipe for reading stdin """
cmd = svncommon.fixlocale() + cmd
if self.auth is not None:
cmd += ' ' + self.auth.makecmdoptions()
return self._popen(cmd)
def _popen(self, cmd):
return os.popen(cmd)
def _encodedurl(self):
return self._escape(self.strpath)
def _norev_delentry(self, path):
auth = self.auth and self.auth.makecmdoptions() or None
self._lsnorevcache.delentry((str(path), auth))
def open(self, mode='r'):
""" return an opened file with the given mode. """
if mode not in ("r", "rU",):
raise ValueError("mode %r not supported" % (mode,))
assert self.check(file=1) # svn cat returns an empty file otherwise
if self.rev is None:
return self._svnpopenauth('svn cat "%s"' % (
self._escape(self.strpath), ))
else:
return self._svnpopenauth('svn cat -r %s "%s"' % (
self.rev, self._escape(self.strpath)))
def dirpath(self, *args, **kwargs):
""" return the directory path of the current path joined
with any given path arguments.
"""
l = self.strpath.split(self.sep)
if len(l) < 4:
raise py.error.EINVAL(self, "base is not valid")
elif len(l) == 4:
return self.join(*args, **kwargs)
else:
return self.new(basename='').join(*args, **kwargs)
# modifying methods (cache must be invalidated)
def mkdir(self, *args, **kwargs):
""" create & return the directory joined with args.
pass a 'msg' keyword argument to set the commit message.
"""
commit_msg = kwargs.get('msg', "mkdir by py lib invocation")
createpath = self.join(*args)
createpath._svnwrite('mkdir', '-m', commit_msg)
self._norev_delentry(createpath.dirpath())
return createpath
def copy(self, target, msg='copied by py lib invocation'):
""" copy path to target with checkin message msg."""
if getattr(target, 'rev', None) is not None:
raise py.error.EINVAL(target, "revisions are immutable")
self._svncmdexecauth('svn copy -m "%s" "%s" "%s"' %(msg,
self._escape(self), self._escape(target)))
self._norev_delentry(target.dirpath())
def rename(self, target, msg="renamed by py lib invocation"):
""" rename this path to target with checkin message msg. """
if getattr(self, 'rev', None) is not None:
raise py.error.EINVAL(self, "revisions are immutable")
self._svncmdexecauth('svn move -m "%s" --force "%s" "%s"' %(
msg, self._escape(self), self._escape(target)))
self._norev_delentry(self.dirpath())
self._norev_delentry(self)
def remove(self, rec=1, msg='removed by py lib invocation'):
""" remove a file or directory (or a directory tree if rec=1) with
checkin message msg."""
if self.rev is not None:
raise py.error.EINVAL(self, "revisions are immutable")
self._svncmdexecauth('svn rm -m "%s" "%s"' %(msg, self._escape(self)))
self._norev_delentry(self.dirpath())
def export(self, topath):
""" export to a local path
topath should not exist prior to calling this, returns a
py.path.local instance
"""
topath = py.path.local(topath)
args = ['"%s"' % (self._escape(self),),
'"%s"' % (self._escape(topath),)]
if self.rev is not None:
args = ['-r', str(self.rev)] + args
self._svncmdexecauth('svn export %s' % (' '.join(args),))
return topath
def ensure(self, *args, **kwargs):
""" ensure that an args-joined path exists (by default as
a file). If you specify a keyword argument 'dir=True'
then the path is forced to be a directory path.
"""
if getattr(self, 'rev', None) is not None:
raise py.error.EINVAL(self, "revisions are immutable")
target = self.join(*args)
dir = kwargs.get('dir', 0)
for x in target.parts(reverse=True):
if x.check():
break
else:
raise py.error.ENOENT(target, "has not any valid base!")
if x == target:
if not x.check(dir=dir):
raise dir and py.error.ENOTDIR(x) or py.error.EISDIR(x)
return x
tocreate = target.relto(x)
basename = tocreate.split(self.sep, 1)[0]
tempdir = py.path.local.mkdtemp()
try:
tempdir.ensure(tocreate, dir=dir)
cmd = 'svn import -m "%s" "%s" "%s"' % (
"ensure %s" % self._escape(tocreate),
self._escape(tempdir.join(basename)),
x.join(basename)._encodedurl())
self._svncmdexecauth(cmd)
self._norev_delentry(x)
finally:
tempdir.remove()
return target
# end of modifying methods
def _propget(self, name):
res = self._svnwithrev('propget', name)
return res[:-1] # strip trailing newline
def _proplist(self):
res = self._svnwithrev('proplist')
lines = res.split('\n')
lines = [x.strip() for x in lines[1:]]
return svncommon.PropListDict(self, lines)
def info(self):
""" return an Info structure with svn-provided information. """
parent = self.dirpath()
nameinfo_seq = parent._listdir_nameinfo()
bn = self.basename
for name, info in nameinfo_seq:
if name == bn:
return info
raise py.error.ENOENT(self)
def _listdir_nameinfo(self):
""" return sequence of name-info directory entries of self """
def builder():
try:
res = self._svnwithrev('ls', '-v')
except process.cmdexec.Error:
e = sys.exc_info()[1]
if e.err.find('non-existent in that revision') != -1:
raise py.error.ENOENT(self, e.err)
elif e.err.find("E200009:") != -1:
raise py.error.ENOENT(self, e.err)
elif e.err.find('File not found') != -1:
raise py.error.ENOENT(self, e.err)
elif e.err.find('not part of a repository')!=-1:
raise py.error.ENOENT(self, e.err)
elif e.err.find('Unable to open')!=-1:
raise py.error.ENOENT(self, e.err)
elif e.err.lower().find('method not allowed')!=-1:
raise py.error.EACCES(self, e.err)
raise py.error.Error(e.err)
lines = res.split('\n')
nameinfo_seq = []
for lsline in lines:
if lsline:
info = InfoSvnCommand(lsline)
if info._name != '.': # svn 1.5 produces '.' dirs,
nameinfo_seq.append((info._name, info))
nameinfo_seq.sort()
return nameinfo_seq
auth = self.auth and self.auth.makecmdoptions() or None
if self.rev is not None:
return self._lsrevcache.getorbuild((self.strpath, self.rev, auth),
builder)
else:
return self._lsnorevcache.getorbuild((self.strpath, auth),
builder)
def listdir(self, fil=None, sort=None):
""" list directory contents, possibly filter by the given fil func
and possibly sorted.
"""
if isinstance(fil, str):
fil = common.FNMatcher(fil)
nameinfo_seq = self._listdir_nameinfo()
if len(nameinfo_seq) == 1:
name, info = nameinfo_seq[0]
if name == self.basename and info.kind == 'file':
#if not self.check(dir=1):
raise py.error.ENOTDIR(self)
paths = [self.join(name) for (name, info) in nameinfo_seq]
if fil:
paths = [x for x in paths if fil(x)]
self._sortlist(paths, sort)
return paths
def log(self, rev_start=None, rev_end=1, verbose=False):
""" return a list of LogEntry instances for this path.
rev_start is the starting revision (defaulting to the first one).
rev_end is the last revision (defaulting to HEAD).
if verbose is True, then the LogEntry instances also know which files changed.
"""
assert self.check() #make it simpler for the pipe
rev_start = rev_start is None and "HEAD" or rev_start
rev_end = rev_end is None and "HEAD" or rev_end
if rev_start == "HEAD" and rev_end == 1:
rev_opt = ""
else:
rev_opt = "-r %s:%s" % (rev_start, rev_end)
verbose_opt = verbose and "-v" or ""
xmlpipe = self._svnpopenauth('svn log --xml %s %s "%s"' %
(rev_opt, verbose_opt, self.strpath))
from xml.dom import minidom
tree = minidom.parse(xmlpipe)
result = []
for logentry in filter(None, tree.firstChild.childNodes):
if logentry.nodeType == logentry.ELEMENT_NODE:
result.append(svncommon.LogEntry(logentry))
return result
#01234567890123456789012345678901234567890123467
# 2256 hpk 165 Nov 24 17:55 __init__.py
# XXX spotted by Guido, SVN 1.3.0 has different aligning, breaks the code!!!
# 1312 johnny 1627 May 05 14:32 test_decorators.py
#
class InfoSvnCommand:
# the '0?' part in the middle is an indication of whether the resource is
# locked, see 'svn help ls'
lspattern = re.compile(
r'^ *(?P<rev>\d+) +(?P<author>.+?) +(0? *(?P<size>\d+))? '
'*(?P<date>\w+ +\d{2} +[\d:]+) +(?P<file>.*)$')
def __init__(self, line):
# this is a typical line from 'svn ls http://...'
#_ 1127 jum 0 Jul 13 15:28 branch/
match = self.lspattern.match(line)
data = match.groupdict()
self._name = data['file']
if self._name[-1] == '/':
self._name = self._name[:-1]
self.kind = 'dir'
else:
self.kind = 'file'
#self.has_props = l.pop(0) == 'P'
self.created_rev = int(data['rev'])
self.last_author = data['author']
self.size = data['size'] and int(data['size']) or 0
self.mtime = parse_time_with_missing_year(data['date'])
self.time = self.mtime * 1000000
def __eq__(self, other):
return self.__dict__ == other.__dict__
#____________________________________________________
#
# helper functions
#____________________________________________________
def parse_time_with_missing_year(timestr):
""" analyze the time part from a single line of "svn ls -v"
the svn output doesn't show the year makes the 'timestr'
ambigous.
"""
import calendar
t_now = time.gmtime()
tparts = timestr.split()
month = time.strptime(tparts.pop(0), '%b')[1]
day = time.strptime(tparts.pop(0), '%d')[2]
last = tparts.pop(0) # year or hour:minute
try:
if ":" in last:
raise ValueError()
year = time.strptime(last, '%Y')[0]
hour = minute = 0
except ValueError:
hour, minute = time.strptime(last, '%H:%M')[3:5]
year = t_now[0]
t_result = (year, month, day, hour, minute, 0,0,0,0)
if t_result > t_now:
year -= 1
t_result = (year, month, day, hour, minute, 0,0,0,0)
return calendar.timegm(t_result)
class PathEntry:
def __init__(self, ppart):
self.strpath = ppart.firstChild.nodeValue.encode('UTF-8')
self.action = ppart.getAttribute('action').encode('UTF-8')
if self.action == 'A':
self.copyfrom_path = ppart.getAttribute('copyfrom-path').encode('UTF-8')
if self.copyfrom_path:
self.copyfrom_rev = int(ppart.getAttribute('copyfrom-rev'))

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1 @@
""" high-level sub-process handling """

View file

@ -0,0 +1,49 @@
import sys
import subprocess
import py
from subprocess import Popen, PIPE
def cmdexec(cmd):
""" return unicode output of executing 'cmd' in a separate process.
raise cmdexec.Error exeception if the command failed.
the exception will provide an 'err' attribute containing
the error-output from the command.
if the subprocess module does not provide a proper encoding/unicode strings
sys.getdefaultencoding() will be used, if that does not exist, 'UTF-8'.
"""
process = subprocess.Popen(cmd, shell=True,
universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
if sys.version_info[0] < 3: # on py3 we get unicode strings, on py2 not
try:
default_encoding = sys.getdefaultencoding() # jython may not have it
except AttributeError:
default_encoding = sys.stdout.encoding or 'UTF-8'
out = unicode(out, process.stdout.encoding or default_encoding)
err = unicode(err, process.stderr.encoding or default_encoding)
status = process.poll()
if status:
raise ExecutionFailed(status, status, cmd, out, err)
return out
class ExecutionFailed(py.error.Error):
def __init__(self, status, systemstatus, cmd, out, err):
Exception.__init__(self)
self.status = status
self.systemstatus = systemstatus
self.cmd = cmd
self.err = err
self.out = out
def __str__(self):
return "ExecutionFailed: %d %s\n%s" %(self.status, self.cmd, self.err)
# export the exception under the name 'py.process.cmdexec.Error'
cmdexec.Error = ExecutionFailed
try:
ExecutionFailed.__module__ = 'py.process.cmdexec'
ExecutionFailed.__name__ = 'Error'
except (AttributeError, TypeError):
pass

View file

@ -0,0 +1,120 @@
"""
ForkedFunc provides a way to run a function in a forked process
and get at its return value, stdout and stderr output as well
as signals and exitstatusus.
"""
import py
import os
import sys
import marshal
def get_unbuffered_io(fd, filename):
f = open(str(filename), "w")
if fd != f.fileno():
os.dup2(f.fileno(), fd)
class AutoFlush:
def write(self, data):
f.write(data)
f.flush()
def __getattr__(self, name):
return getattr(f, name)
return AutoFlush()
class ForkedFunc:
EXITSTATUS_EXCEPTION = 3
def __init__(self, fun, args=None, kwargs=None, nice_level=0,
child_on_start=None, child_on_exit=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
self.fun = fun
self.args = args
self.kwargs = kwargs
self.tempdir = tempdir = py.path.local.mkdtemp()
self.RETVAL = tempdir.ensure('retval')
self.STDOUT = tempdir.ensure('stdout')
self.STDERR = tempdir.ensure('stderr')
pid = os.fork()
if pid: # in parent process
self.pid = pid
else: # in child process
self.pid = None
self._child(nice_level, child_on_start, child_on_exit)
def _child(self, nice_level, child_on_start, child_on_exit):
# right now we need to call a function, but first we need to
# map all IO that might happen
sys.stdout = stdout = get_unbuffered_io(1, self.STDOUT)
sys.stderr = stderr = get_unbuffered_io(2, self.STDERR)
retvalf = self.RETVAL.open("wb")
EXITSTATUS = 0
try:
if nice_level:
os.nice(nice_level)
try:
if child_on_start is not None:
child_on_start()
retval = self.fun(*self.args, **self.kwargs)
retvalf.write(marshal.dumps(retval))
if child_on_exit is not None:
child_on_exit()
except:
excinfo = py.code.ExceptionInfo()
stderr.write(str(excinfo._getreprcrash()))
EXITSTATUS = self.EXITSTATUS_EXCEPTION
finally:
stdout.close()
stderr.close()
retvalf.close()
os.close(1)
os.close(2)
os._exit(EXITSTATUS)
def waitfinish(self, waiter=os.waitpid):
pid, systemstatus = waiter(self.pid, 0)
if systemstatus:
if os.WIFSIGNALED(systemstatus):
exitstatus = os.WTERMSIG(systemstatus) + 128
else:
exitstatus = os.WEXITSTATUS(systemstatus)
else:
exitstatus = 0
signal = systemstatus & 0x7f
if not exitstatus and not signal:
retval = self.RETVAL.open('rb')
try:
retval_data = retval.read()
finally:
retval.close()
retval = marshal.loads(retval_data)
else:
retval = None
stdout = self.STDOUT.read()
stderr = self.STDERR.read()
self._removetemp()
return Result(exitstatus, signal, retval, stdout, stderr)
def _removetemp(self):
if self.tempdir.check():
self.tempdir.remove()
def __del__(self):
if self.pid is not None: # only clean up in main process
self._removetemp()
class Result(object):
def __init__(self, exitstatus, signal, retval, stdout, stderr):
self.exitstatus = exitstatus
self.signal = signal
self.retval = retval
self.out = stdout
self.err = stderr

View file

@ -0,0 +1,23 @@
import py
import os, sys
if sys.platform == "win32" or getattr(os, '_name', '') == 'nt':
try:
import ctypes
except ImportError:
def dokill(pid):
py.process.cmdexec("taskkill /F /PID %d" %(pid,))
else:
def dokill(pid):
PROCESS_TERMINATE = 1
handle = ctypes.windll.kernel32.OpenProcess(
PROCESS_TERMINATE, False, pid)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
ctypes.windll.kernel32.CloseHandle(handle)
else:
def dokill(pid):
os.kill(pid, 15)
def kill(pid):
""" kill process by id. """
dokill(pid)

View file

@ -0,0 +1,18 @@
import sys
class Std(object):
""" makes top-level python modules available as an attribute,
importing them on first access.
"""
def __init__(self):
self.__dict__ = sys.modules
def __getattr__(self, name):
try:
m = __import__(name)
except ImportError:
raise AttributeError("py.std: could not import %s" % name)
return m
std = Std()

View file

@ -0,0 +1,253 @@
"""
module for generating and serializing xml and html structures
by using simple python objects.
(c) holger krekel, holger at merlinux eu. 2009
"""
import sys, re
if sys.version_info >= (3,0):
def u(s):
return s
def unicode(x, errors=None):
if hasattr(x, '__unicode__'):
return x.__unicode__()
return str(x)
else:
def u(s):
return unicode(s)
unicode = unicode
class NamespaceMetaclass(type):
def __getattr__(self, name):
if name[:1] == '_':
raise AttributeError(name)
if self == Namespace:
raise ValueError("Namespace class is abstract")
tagspec = self.__tagspec__
if tagspec is not None and name not in tagspec:
raise AttributeError(name)
classattr = {}
if self.__stickyname__:
classattr['xmlname'] = name
cls = type(name, (self.__tagclass__,), classattr)
setattr(self, name, cls)
return cls
class Tag(list):
class Attr(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def __init__(self, *args, **kwargs):
super(Tag, self).__init__(args)
self.attr = self.Attr(**kwargs)
def __unicode__(self):
return self.unicode(indent=0)
__str__ = __unicode__
def unicode(self, indent=2):
l = []
SimpleUnicodeVisitor(l.append, indent).visit(self)
return u("").join(l)
def __repr__(self):
name = self.__class__.__name__
return "<%r tag object %d>" % (name, id(self))
Namespace = NamespaceMetaclass('Namespace', (object, ), {
'__tagspec__': None,
'__tagclass__': Tag,
'__stickyname__': False,
})
class HtmlTag(Tag):
def unicode(self, indent=2):
l = []
HtmlVisitor(l.append, indent, shortempty=False).visit(self)
return u("").join(l)
# exported plain html namespace
class html(Namespace):
__tagclass__ = HtmlTag
__stickyname__ = True
__tagspec__ = dict([(x,1) for x in (
'a,abbr,acronym,address,applet,area,b,bdo,big,blink,'
'blockquote,body,br,button,caption,center,cite,code,col,'
'colgroup,comment,dd,del,dfn,dir,div,dl,dt,em,embed,'
'fieldset,font,form,frameset,h1,h2,h3,h4,h5,h6,head,html,'
'i,iframe,img,input,ins,kbd,label,legend,li,link,listing,'
'map,marquee,menu,meta,multicol,nobr,noembed,noframes,'
'noscript,object,ol,optgroup,option,p,pre,q,s,script,'
'select,small,span,strike,strong,style,sub,sup,table,'
'tbody,td,textarea,tfoot,th,thead,title,tr,tt,u,ul,xmp,'
'base,basefont,frame,hr,isindex,param,samp,var'
).split(',') if x])
class Style(object):
def __init__(self, **kw):
for x, y in kw.items():
x = x.replace('_', '-')
setattr(self, x, y)
class raw(object):
"""just a box that can contain a unicode string that will be
included directly in the output"""
def __init__(self, uniobj):
self.uniobj = uniobj
class SimpleUnicodeVisitor(object):
""" recursive visitor to write unicode. """
def __init__(self, write, indent=0, curindent=0, shortempty=True):
self.write = write
self.cache = {}
self.visited = {} # for detection of recursion
self.indent = indent
self.curindent = curindent
self.parents = []
self.shortempty = shortempty # short empty tags or not
def visit(self, node):
""" dispatcher on node's class/bases name. """
cls = node.__class__
try:
visitmethod = self.cache[cls]
except KeyError:
for subclass in cls.__mro__:
visitmethod = getattr(self, subclass.__name__, None)
if visitmethod is not None:
break
else:
visitmethod = self.__object
self.cache[cls] = visitmethod
visitmethod(node)
# the default fallback handler is marked private
# to avoid clashes with the tag name object
def __object(self, obj):
#self.write(obj)
self.write(escape(unicode(obj)))
def raw(self, obj):
self.write(obj.uniobj)
def list(self, obj):
assert id(obj) not in self.visited
self.visited[id(obj)] = 1
for elem in obj:
self.visit(elem)
def Tag(self, tag):
assert id(tag) not in self.visited
try:
tag.parent = self.parents[-1]
except IndexError:
tag.parent = None
self.visited[id(tag)] = 1
tagname = getattr(tag, 'xmlname', tag.__class__.__name__)
if self.curindent and not self._isinline(tagname):
self.write("\n" + u(' ') * self.curindent)
if tag:
self.curindent += self.indent
self.write(u('<%s%s>') % (tagname, self.attributes(tag)))
self.parents.append(tag)
for x in tag:
self.visit(x)
self.parents.pop()
self.write(u('</%s>') % tagname)
self.curindent -= self.indent
else:
nameattr = tagname+self.attributes(tag)
if self._issingleton(tagname):
self.write(u('<%s/>') % (nameattr,))
else:
self.write(u('<%s></%s>') % (nameattr, tagname))
def attributes(self, tag):
# serialize attributes
attrlist = dir(tag.attr)
attrlist.sort()
l = []
for name in attrlist:
res = self.repr_attribute(tag.attr, name)
if res is not None:
l.append(res)
l.extend(self.getstyle(tag))
return u("").join(l)
def repr_attribute(self, attrs, name):
if name[:2] != '__':
value = getattr(attrs, name)
if name.endswith('_'):
name = name[:-1]
if isinstance(value, raw):
insert = value.uniobj
else:
insert = escape(unicode(value))
return ' %s="%s"' % (name, insert)
def getstyle(self, tag):
""" return attribute list suitable for styling. """
try:
styledict = tag.style.__dict__
except AttributeError:
return []
else:
stylelist = [x+': ' + y for x,y in styledict.items()]
return [u(' style="%s"') % u('; ').join(stylelist)]
def _issingleton(self, tagname):
"""can (and will) be overridden in subclasses"""
return self.shortempty
def _isinline(self, tagname):
"""can (and will) be overridden in subclasses"""
return False
class HtmlVisitor(SimpleUnicodeVisitor):
single = dict([(x, 1) for x in
('br,img,area,param,col,hr,meta,link,base,'
'input,frame').split(',')])
inline = dict([(x, 1) for x in
('a abbr acronym b basefont bdo big br cite code dfn em font '
'i img input kbd label q s samp select small span strike '
'strong sub sup textarea tt u var'.split(' '))])
def repr_attribute(self, attrs, name):
if name == 'class_':
value = getattr(attrs, name)
if value is None:
return
return super(HtmlVisitor, self).repr_attribute(attrs, name)
def _issingleton(self, tagname):
return tagname in self.single
def _isinline(self, tagname):
return tagname in self.inline
class _escape:
def __init__(self):
self.escape = {
u('"') : u('&quot;'), u('<') : u('&lt;'), u('>') : u('&gt;'),
u('&') : u('&amp;'), u("'") : u('&apos;'),
}
self.charef_rex = re.compile(u("|").join(self.escape.keys()))
def _replacer(self, match):
return self.escape[match.group(0)]
def __call__(self, ustring):
""" xml-escape the given unicode string. """
try:
ustring = unicode(ustring)
except UnicodeDecodeError:
ustring = unicode(ustring, 'utf-8', errors='replace')
return self.charef_rex.sub(self._replacer, ustring)
escape = _escape()

View file

@ -0,0 +1,10 @@
import sys
if __name__ == '__main__':
import pytest
sys.exit(pytest.main())
else:
import sys, pytest
sys.modules['py.test'] = pytest
# for more API entry points see the 'tests' definition
# in __init__.py

View file

@ -0,0 +1,5 @@
[wheel]
universal = 1
[devpi:upload]
formats=sdist.tgz,bdist_wheel

View file

@ -0,0 +1,38 @@
import os, sys
from setuptools import setup
def main():
setup(
name='py',
description='library with cross-python path, ini-parsing, io, code, log facilities',
long_description = open('README.txt').read(),
version='1.4.31',
url='http://pylib.readthedocs.org/',
license='MIT license',
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
author='holger krekel, Ronny Pfannschmidt, Benjamin Peterson and others',
author_email='pytest-dev@python.org',
classifiers=['Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3'],
packages=['py',
'py._code',
'py._io',
'py._log',
'py._path',
'py._process',
],
zip_safe=False,
)
if __name__ == '__main__':
main()

View file

@ -0,0 +1,308 @@
import pytest, py
def exvalue():
return py.std.sys.exc_info()[1]
def f():
return 2
def test_assert():
try:
assert f() == 3
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_within_finally():
excinfo = py.test.raises(ZeroDivisionError, """
try:
1/0
finally:
i = 42
""")
s = excinfo.exconly()
assert py.std.re.search("division.+by zero", s) is not None
#def g():
# A.f()
#excinfo = getexcinfo(TypeError, g)
#msg = getmsg(excinfo)
#assert msg.find("must be called with A") != -1
def test_assert_multiline_1():
try:
assert (f() ==
3)
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_multiline_2():
try:
assert (f() == (4,
3)[-1])
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 ==')
def test_in():
try:
assert "hi" in [1, 2]
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 'hi' in")
def test_is():
try:
assert 1 is 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 is 2")
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_attrib():
class Foo(object):
b = 1
i = Foo()
try:
assert i.b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_attrib_inst():
class Foo(object):
b = 1
try:
assert Foo().b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
def test_len():
l = list(range(42))
try:
assert len(l) == 100
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 42 == 100")
assert "where 42 = len([" in s
def test_assert_keyword_arg():
def f(x=3):
return False
try:
assert f(x=5)
except AssertionError:
e = exvalue()
assert "x=5" in e.msg
# These tests should both fail, but should fail nicely...
class WeirdRepr:
def __repr__(self):
return '<WeirdRepr\nsecond line>'
def bug_test_assert_repr():
v = WeirdRepr()
try:
assert v == 1
except AssertionError:
e = exvalue()
assert e.msg.find('WeirdRepr') != -1
assert e.msg.find('second line') != -1
assert 0
def test_assert_non_string():
try:
assert 0, ['list']
except AssertionError:
e = exvalue()
assert e.msg.find("list") != -1
def test_assert_implicit_multiline():
try:
x = [1,2,3]
assert x != [1,
2, 3]
except AssertionError:
e = exvalue()
assert e.msg.find('assert [1, 2, 3] !=') != -1
def test_assert_with_brokenrepr_arg():
class BrokenRepr:
def __repr__(self): 0 / 0
e = AssertionError(BrokenRepr())
if e.msg.find("broken __repr__") == -1:
py.test.fail("broken __repr__ not handle correctly")
def test_multiple_statements_per_line():
try:
a = 1; assert a == 2
except AssertionError:
e = exvalue()
assert "assert 1 == 2" in e.msg
def test_power():
try:
assert 2**3 == 7
except AssertionError:
e = exvalue()
assert "assert (2 ** 3) == 7" in e.msg
class TestView:
def setup_class(cls):
cls.View = py.test.importorskip("py._code._assertionold").View
def test_class_dispatch(self):
### Use a custom class hierarchy with existing instances
class Picklable(self.View):
pass
class Simple(Picklable):
__view__ = object
def pickle(self):
return repr(self.__obj__)
class Seq(Picklable):
__view__ = list, tuple, dict
def pickle(self):
return ';'.join(
[Picklable(item).pickle() for item in self.__obj__])
class Dict(Seq):
__view__ = dict
def pickle(self):
return Seq.pickle(self) + '!' + Seq(self.values()).pickle()
assert Picklable(123).pickle() == '123'
assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4'
assert Picklable({1:2}).pickle() == '1!2'
def test_viewtype_class_hierarchy(self):
# Use a custom class hierarchy based on attributes of existing instances
class Operation:
"Existing class that I don't want to change."
def __init__(self, opname, *args):
self.opname = opname
self.args = args
existing = [Operation('+', 4, 5),
Operation('getitem', '', 'join'),
Operation('setattr', 'x', 'y', 3),
Operation('-', 12, 1)]
class PyOp(self.View):
def __viewkey__(self):
return self.opname
def generate(self):
return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args)))
class PyBinaryOp(PyOp):
__view__ = ('+', '-', '*', '/')
def generate(self):
return '%s %s %s' % (self.args[0], self.opname, self.args[1])
codelines = [PyOp(op).generate() for op in existing]
assert codelines == ["4 + 5", "getitem('', 'join')",
"setattr('x', 'y', 3)", "12 - 1"]
def test_underscore_api():
py.code._AssertionError
py.code._reinterpret_old # used by pypy
py.code._reinterpret
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_assert_customizable_reprcompare(monkeypatch):
util = pytest.importorskip("_pytest.assertion.util")
monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
try:
assert 3 == 4
except AssertionError:
e = exvalue()
s = str(e)
assert "hello" in s
def test_assert_long_source_1():
try:
assert len == [
(None, ['somet text', 'more text']),
]
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_long_source_2():
try:
assert(len == [
(None, ['somet text', 'more text']),
])
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_raise_alias(testdir):
testdir.makepyfile("""
import sys
EX = AssertionError
def test_hello():
raise EX("hello"
"multi"
"line")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello*",
"*raise EX*",
"*1 failed*",
])
@pytest.mark.skipif("sys.version_info < (2,5)")
def test_assert_raise_subclass():
class SomeEx(AssertionError):
def __init__(self, *args):
super(SomeEx, self).__init__()
try:
raise SomeEx("hello")
except AssertionError:
s = str(exvalue())
assert 're-run' not in s
assert 'could not determine' in s
def test_assert_raises_in_nonzero_of_object_pytest_issue10():
class A(object):
def __nonzero__(self):
raise ValueError(42)
def __lt__(self, other):
return A()
def __repr__(self):
return "<MY42 object>"
def myany(x):
return True
try:
assert not(myany(A() < 0))
except AssertionError:
e = exvalue()
s = str(e)
assert "<MY42 object> < 0" in s

View file

@ -0,0 +1,159 @@
import py
import sys
def test_ne():
code1 = py.code.Code(compile('foo = "bar"', '', 'exec'))
assert code1 == code1
code2 = py.code.Code(compile('foo = "baz"', '', 'exec'))
assert code2 != code1
def test_code_gives_back_name_for_not_existing_file():
name = 'abc-123'
co_code = compile("pass\n", name, 'exec')
assert co_code.co_filename == name
code = py.code.Code(co_code)
assert str(code.path) == name
assert code.fullsource is None
def test_code_with_class():
class A:
pass
py.test.raises(TypeError, "py.code.Code(A)")
if True:
def x():
pass
def test_code_fullsource():
code = py.code.Code(x)
full = code.fullsource
assert 'test_code_fullsource()' in str(full)
def test_code_source():
code = py.code.Code(x)
src = code.source()
expected = """def x():
pass"""
assert str(src) == expected
def test_frame_getsourcelineno_myself():
def func():
return sys._getframe(0)
f = func()
f = py.code.Frame(f)
source, lineno = f.code.fullsource, f.lineno
assert source[lineno].startswith(" return sys._getframe(0)")
def test_getstatement_empty_fullsource():
def func():
return sys._getframe(0)
f = func()
f = py.code.Frame(f)
prop = f.code.__class__.fullsource
try:
f.code.__class__.fullsource = None
assert f.statement == py.code.Source("")
finally:
f.code.__class__.fullsource = prop
def test_code_from_func():
co = py.code.Code(test_frame_getsourcelineno_myself)
assert co.firstlineno
assert co.path
def test_builtin_patch_unpatch(monkeypatch):
cpy_builtin = py.builtin.builtins
comp = cpy_builtin.compile
def mycompile(*args, **kwargs):
return comp(*args, **kwargs)
class Sub(AssertionError):
pass
monkeypatch.setattr(cpy_builtin, 'AssertionError', Sub)
monkeypatch.setattr(cpy_builtin, 'compile', mycompile)
py.code.patch_builtins()
assert cpy_builtin.AssertionError != Sub
assert cpy_builtin.compile != mycompile
py.code.unpatch_builtins()
assert cpy_builtin.AssertionError is Sub
assert cpy_builtin.compile == mycompile
def test_unicode_handling():
value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
def f():
raise Exception(value)
excinfo = py.test.raises(Exception, f)
s = str(excinfo)
if sys.version_info[0] < 3:
u = unicode(excinfo)
def test_code_getargs():
def f1(x):
pass
c1 = py.code.Code(f1)
assert c1.getargs(var=True) == ('x',)
def f2(x, *y):
pass
c2 = py.code.Code(f2)
assert c2.getargs(var=True) == ('x', 'y')
def f3(x, **z):
pass
c3 = py.code.Code(f3)
assert c3.getargs(var=True) == ('x', 'z')
def f4(x, *y, **z):
pass
c4 = py.code.Code(f4)
assert c4.getargs(var=True) == ('x', 'y', 'z')
def test_frame_getargs():
def f1(x):
return sys._getframe(0)
fr1 = py.code.Frame(f1('a'))
assert fr1.getargs(var=True) == [('x', 'a')]
def f2(x, *y):
return sys._getframe(0)
fr2 = py.code.Frame(f2('a', 'b', 'c'))
assert fr2.getargs(var=True) == [('x', 'a'), ('y', ('b', 'c'))]
def f3(x, **z):
return sys._getframe(0)
fr3 = py.code.Frame(f3('a', b='c'))
assert fr3.getargs(var=True) == [('x', 'a'), ('z', {'b': 'c'})]
def f4(x, *y, **z):
return sys._getframe(0)
fr4 = py.code.Frame(f4('a', 'b', c='d'))
assert fr4.getargs(var=True) == [('x', 'a'), ('y', ('b',)),
('z', {'c': 'd'})]
class TestExceptionInfo:
def test_bad_getsource(self):
try:
if False: pass
else: assert False
except AssertionError:
exci = py.code.ExceptionInfo()
assert exci.getrepr()
class TestTracebackEntry:
def test_getsource(self):
try:
if False: pass
else: assert False
except AssertionError:
exci = py.code.ExceptionInfo()
entry = exci.traceback[0]
source = entry.getsource()
assert len(source) == 4
assert 'else: assert False' in source[3]

View file

@ -0,0 +1,909 @@
# -*- coding: utf-8 -*-
import py
from py._code.code import FormattedExcinfo, ReprExceptionInfo
queue = py.builtin._tryimport('queue', 'Queue')
failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
from test_source import astonly
try:
import importlib
except ImportError:
invalidate_import_caches = None
else:
invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
import pytest
pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
class TWMock:
def __init__(self):
self.lines = []
def sep(self, sep, line=None):
self.lines.append((sep, line))
def line(self, line, **kw):
self.lines.append(line)
def markup(self, text, **kw):
return text
fullwidth = 80
def test_excinfo_simple():
try:
raise ValueError
except ValueError:
info = py.code.ExceptionInfo()
assert info.type == ValueError
def test_excinfo_getstatement():
def g():
raise ValueError
def f():
g()
try:
f()
except ValueError:
excinfo = py.code.ExceptionInfo()
linenumbers = [py.code.getrawcode(f).co_firstlineno-1+3,
py.code.getrawcode(f).co_firstlineno-1+1,
py.code.getrawcode(g).co_firstlineno-1+1,]
l = list(excinfo.traceback)
foundlinenumbers = [x.lineno for x in l]
assert foundlinenumbers == linenumbers
#for x in info:
# print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement)
#xxx
# testchain for getentries test below
def f():
#
raise ValueError
#
def g():
#
__tracebackhide__ = True
f()
#
def h():
#
g()
#
class TestTraceback_f_g_h:
def setup_method(self, method):
try:
h()
except ValueError:
self.excinfo = py.code.ExceptionInfo()
def test_traceback_entries(self):
tb = self.excinfo.traceback
entries = list(tb)
assert len(tb) == 4 # maybe fragile test
assert len(entries) == 4 # maybe fragile test
names = ['f', 'g', 'h']
for entry in entries:
try:
names.remove(entry.frame.code.name)
except ValueError:
pass
assert not names
def test_traceback_entry_getsource(self):
tb = self.excinfo.traceback
s = str(tb[-1].getsource() )
assert s.startswith("def f():")
assert s.endswith("raise ValueError")
@astonly
@failsonjython
def test_traceback_entry_getsource_in_construct(self):
source = py.code.Source("""\
def xyz():
try:
raise ValueError
except somenoname:
pass
xyz()
""")
try:
exec (source.compile())
except NameError:
tb = py.code.ExceptionInfo().traceback
print (tb[-1].getsource())
s = str(tb[-1].getsource())
assert s.startswith("def xyz():\n try:")
assert s.strip().endswith("except somenoname:")
def test_traceback_cut(self):
co = py.code.Code(f)
path, firstlineno = co.path, co.firstlineno
traceback = self.excinfo.traceback
newtraceback = traceback.cut(path=path, firstlineno=firstlineno)
assert len(newtraceback) == 1
newtraceback = traceback.cut(path=path, lineno=firstlineno+2)
assert len(newtraceback) == 1
def test_traceback_cut_excludepath(self, testdir):
p = testdir.makepyfile("def f(): raise ValueError")
excinfo = py.test.raises(ValueError, "p.pyimport().f()")
basedir = py.path.local(py.test.__file__).dirpath()
newtraceback = excinfo.traceback.cut(excludepath=basedir)
for x in newtraceback:
if hasattr(x, 'path'):
assert not py.path.local(x.path).relto(basedir)
assert newtraceback[-1].frame.code.path == p
def test_traceback_filter(self):
traceback = self.excinfo.traceback
ntraceback = traceback.filter()
assert len(ntraceback) == len(traceback) - 1
def test_traceback_recursion_index(self):
def f(n):
if n < 10:
n += 1
f(n)
excinfo = py.test.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex == 3
def test_traceback_only_specific_recursion_errors(self, monkeypatch):
def f(n):
if n == 0:
raise RuntimeError("hello")
f(n-1)
excinfo = pytest.raises(RuntimeError, f, 100)
monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex")
repr = excinfo.getrepr()
assert "RuntimeError: hello" in str(repr.reprcrash)
def test_traceback_no_recursion_index(self):
def do_stuff():
raise RuntimeError
def reraise_me():
import sys
exc, val, tb = sys.exc_info()
py.builtin._reraise(exc, val, tb)
def f(n):
try:
do_stuff()
except:
reraise_me()
excinfo = py.test.raises(RuntimeError, f, 8)
traceback = excinfo.traceback
recindex = traceback.recursionindex()
assert recindex is None
def test_traceback_messy_recursion(self):
#XXX: simplified locally testable version
decorator = py.test.importorskip('decorator').decorator
def log(f, *k, **kw):
print('%s %s' % (k, kw))
f(*k, **kw)
log = decorator(log)
def fail():
raise ValueError('')
fail = log(log(fail))
excinfo = py.test.raises(ValueError, fail)
assert excinfo.traceback.recursionindex() is None
def test_traceback_getcrashentry(self):
def i():
__tracebackhide__ = True
raise ValueError
def h():
i()
def g():
__tracebackhide__ = True
h()
def f():
g()
excinfo = py.test.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = py.code.Code(h)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 1
assert entry.frame.code.name == 'h'
def test_traceback_getcrashentry_empty(self):
def g():
__tracebackhide__ = True
raise ValueError
def f():
__tracebackhide__ = True
g()
excinfo = py.test.raises(ValueError, f)
tb = excinfo.traceback
entry = tb.getcrashentry()
co = py.code.Code(g)
assert entry.frame.code.path == co.path
assert entry.lineno == co.firstlineno + 2
assert entry.frame.code.name == 'g'
def hello(x):
x + 5
def test_tbentry_reinterpret():
try:
hello("hello")
except TypeError:
excinfo = py.code.ExceptionInfo()
tbentry = excinfo.traceback[-1]
msg = tbentry.reinterpret()
assert msg.startswith("TypeError: ('hello' + 5)")
def test_excinfo_exconly():
excinfo = py.test.raises(ValueError, h)
assert excinfo.exconly().startswith('ValueError')
excinfo = py.test.raises(ValueError,
"raise ValueError('hello\\nworld')")
msg = excinfo.exconly(tryshort=True)
assert msg.startswith('ValueError')
assert msg.endswith("world")
def test_excinfo_repr():
excinfo = py.test.raises(ValueError, h)
s = repr(excinfo)
assert s == "<ExceptionInfo ValueError tblen=4>"
def test_excinfo_str():
excinfo = py.test.raises(ValueError, h)
s = str(excinfo)
assert s.startswith(__file__[:-9]) # pyc file and $py.class
assert s.endswith("ValueError")
assert len(s.split(":")) >= 3 # on windows it's 4
def test_excinfo_errisinstance():
excinfo = py.test.raises(ValueError, h)
assert excinfo.errisinstance(ValueError)
def test_excinfo_no_sourcecode():
try:
exec ("raise ValueError()")
except ValueError:
excinfo = py.code.ExceptionInfo()
s = str(excinfo.traceback[-1])
if py.std.sys.version_info < (2,5):
assert s == " File '<string>':1 in ?\n ???\n"
else:
assert s == " File '<string>':1 in <module>\n ???\n"
def test_excinfo_no_python_sourcecode(tmpdir):
#XXX: simplified locally testable version
tmpdir.join('test.txt').write("{{ h()}}:")
jinja2 = py.test.importorskip('jinja2')
loader = jinja2.FileSystemLoader(str(tmpdir))
env = jinja2.Environment(loader=loader)
template = env.get_template('test.txt')
excinfo = py.test.raises(ValueError,
template.render, h=h)
for item in excinfo.traceback:
print(item) #XXX: for some reason jinja.Template.render is printed in full
item.source # shouldnt fail
if item.path.basename == 'test.txt':
assert str(item.source) == '{{ h()}}:'
def test_entrysource_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = py.code.ExceptionInfo()
entry = excinfo.traceback[-1]
source = entry.getsource()
assert source is not None
s = str(source).strip()
assert s.startswith("def get")
def test_codepath_Queue_example():
try:
queue.Queue().get(timeout=0.001)
except queue.Empty:
excinfo = py.code.ExceptionInfo()
entry = excinfo.traceback[-1]
path = entry.path
assert isinstance(path, py.path.local)
assert path.basename.lower() == "queue.py"
assert path.check()
class TestFormattedExcinfo:
def pytest_funcarg__importasmod(self, request):
def importasmod(source):
source = py.code.Source(source)
tmpdir = request.getfuncargvalue("tmpdir")
modpath = tmpdir.join("mod.py")
tmpdir.ensure("__init__.py")
modpath.write(source)
if invalidate_import_caches is not None:
invalidate_import_caches()
return modpath.pyimport()
return importasmod
def excinfo_from_exec(self, source):
source = py.code.Source(source).strip()
try:
exec (source.compile())
except KeyboardInterrupt:
raise
except:
return py.code.ExceptionInfo()
assert 0, "did not raise"
def test_repr_source(self):
pr = FormattedExcinfo()
source = py.code.Source("""
def f(x):
pass
""").strip()
pr.flow_marker = "|"
lines = pr.get_source(source, 0)
assert len(lines) == 2
assert lines[0] == "| def f(x):"
assert lines[1] == " pass"
def test_repr_source_excinfo(self):
""" check if indentation is right """
pr = FormattedExcinfo()
excinfo = self.excinfo_from_exec("""
def f():
assert 0
f()
""")
pr = FormattedExcinfo()
source = pr._getentrysource(excinfo.traceback[-1])
lines = pr.get_source(source, 1, excinfo)
assert lines == [
' def f():',
'> assert 0',
'E assert 0'
]
def test_repr_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("raise ValueError()", "", "exec")
try:
exec (co)
except ValueError:
excinfo = py.code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_many_line_source_not_existing(self):
pr = FormattedExcinfo()
co = compile("""
a = 1
raise ValueError()
""", "", "exec")
try:
exec (co)
except ValueError:
excinfo = py.code.ExceptionInfo()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
def test_repr_source_failing_fullsource(self):
pr = FormattedExcinfo()
class FakeCode(object):
class raw:
co_filename = '?'
path = '?'
firstlineno = 5
def fullsource(self):
return None
fullsource = property(fullsource)
class FakeFrame(object):
code = FakeCode()
f_locals = {}
f_globals = {}
class FakeTracebackEntry(py.code.Traceback.Entry):
def __init__(self, tb):
self.lineno = 5+3
@property
def frame(self):
return FakeFrame()
class Traceback(py.code.Traceback):
Entry = FakeTracebackEntry
class FakeExcinfo(py.code.ExceptionInfo):
typename = "Foo"
def __init__(self):
pass
def exconly(self, tryshort):
return "EXC"
def errisinstance(self, cls):
return False
excinfo = FakeExcinfo()
class FakeRawTB(object):
tb_next = None
tb = FakeRawTB()
excinfo.traceback = Traceback(tb)
fail = IOError()
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
fail = py.error.ENOENT
repr = pr.repr_excinfo(excinfo)
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
def test_repr_local(self):
p = FormattedExcinfo(showlocals=True)
loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}}
reprlocals = p.repr_locals(loc)
assert reprlocals.lines
assert reprlocals.lines[0] == '__builtins__ = <builtins>'
assert reprlocals.lines[1] == 'x = 3'
assert reprlocals.lines[2] == 'y = 5'
assert reprlocals.lines[3] == 'z = 7'
def test_repr_tracebackentry_lines(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1)
excinfo.traceback = excinfo.traceback.filter()
p = FormattedExcinfo()
reprtb = p.repr_traceback_entry(excinfo.traceback[-1])
# test as intermittent entry
lines = reprtb.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
# test as last entry
p = FormattedExcinfo(showlocals=True)
repr_entry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = repr_entry.lines
assert lines[0] == ' def func1():'
assert lines[1] == '> raise ValueError("hello\\nworld")'
assert lines[2] == 'E ValueError: hello'
assert lines[3] == 'E world'
assert not lines[4:]
loc = repr_entry.reprlocals is not None
loc = repr_entry.reprfileloc
assert loc.path == mod.__file__
assert loc.lineno == 3
#assert loc.message == "ValueError: hello"
def test_repr_tracebackentry_lines(self, importasmod):
mod = importasmod("""
def func1(m, x, y, z):
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120)
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('m', repr("m"*90))
assert reprfuncargs.args[1] == ('x', '5')
assert reprfuncargs.args[2] == ('y', '13')
assert reprfuncargs.args[3] == ('z', repr("z" * 120))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "m = " + repr('m' * 90)
assert tw.lines[1] == "x = 5, y = 13"
assert tw.lines[2] == "z = " + repr('z' * 120)
def test_repr_tracebackentry_lines_var_kw_args(self, importasmod):
mod = importasmod("""
def func1(x, *y, **z):
raise ValueError("hello\\nworld")
""")
excinfo = py.test.raises(ValueError, mod.func1, 'a', 'b', c='d')
excinfo.traceback = excinfo.traceback.filter()
entry = excinfo.traceback[-1]
p = FormattedExcinfo(funcargs=True)
reprfuncargs = p.repr_args(entry)
assert reprfuncargs.args[0] == ('x', repr('a'))
assert reprfuncargs.args[1] == ('y', repr(('b',)))
assert reprfuncargs.args[2] == ('z', repr({'c': 'd'}))
p = FormattedExcinfo(funcargs=True)
repr_entry = p.repr_traceback_entry(entry)
assert repr_entry.reprfuncargs.args == reprfuncargs.args
tw = TWMock()
repr_entry.toterminal(tw)
assert tw.lines[0] == "x = 'a', y = ('b',), z = {'c': 'd'}"
def test_repr_tracebackentry_short(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
basename = py.path.local(mod.__file__).basename
assert lines[0] == ' func1()'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 5
# test last entry
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprtb.lines
assert lines[0] == ' raise ValueError("hello")'
assert lines[1] == 'E ValueError: hello'
assert basename in str(reprtb.reprfileloc.path)
assert reprtb.reprfileloc.lineno == 3
def test_repr_tracebackentry_no(self, importasmod):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(style="no")
p.repr_traceback_entry(excinfo.traceback[-2])
p = FormattedExcinfo(style="no")
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[0] == 'E ValueError: hello'
assert not lines[1:]
def test_repr_traceback_tbfilter(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo(tbfilter=True)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
p = FormattedExcinfo(tbfilter=False)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 3
def test_traceback_short_no_source(self, importasmod, monkeypatch):
mod = importasmod("""
def func1():
raise ValueError("hello")
def entry():
func1()
""")
excinfo = py.test.raises(ValueError, mod.entry)
from py._code.code import Code
monkeypatch.setattr(Code, 'path', 'bogus')
excinfo.traceback[0].frame.code.path = "bogus"
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback_entry(excinfo.traceback[-2])
lines = reprtb.lines
last_p = FormattedExcinfo(style="short")
last_reprtb = last_p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
last_lines = last_reprtb.lines
monkeypatch.undo()
basename = py.path.local(mod.__file__).basename
assert lines[0] == ' func1()'
assert last_lines[0] == ' raise ValueError("hello")'
assert last_lines[1] == 'E ValueError: hello'
def test_repr_traceback_and_excinfo(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
for style in ("long", "short"):
p = FormattedExcinfo(style=style)
reprtb = p.repr_traceback(excinfo)
assert len(reprtb.reprentries) == 2
assert reprtb.style == style
assert not reprtb.extraline
repr = p.repr_excinfo(excinfo)
assert repr.reprtraceback
assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.message == "ValueError: 0"
def test_repr_traceback_with_invalid_cwd(self, importasmod, monkeypatch):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
p = FormattedExcinfo()
def raiseos():
raise OSError(2)
monkeypatch.setattr(py.std.os, 'getcwd', raiseos)
assert p._makepath(__file__) == __file__
reprtb = p.repr_traceback(excinfo)
def test_repr_excinfo_addouterr(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
repr.addsection("title", "content")
twmock = TWMock()
repr.toterminal(twmock)
assert twmock.lines[-1] == "content"
assert twmock.lines[-2] == ("-", "title")
def test_repr_excinfo_reprcrash(self, importasmod):
mod = importasmod("""
def entry():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.entry)
repr = excinfo.getrepr()
assert repr.reprcrash.path.endswith("mod.py")
assert repr.reprcrash.lineno == 3
assert repr.reprcrash.message == "ValueError"
assert str(repr.reprcrash).endswith("mod.py:3: ValueError")
def test_repr_traceback_recursion(self, importasmod):
mod = importasmod("""
def rec2(x):
return rec1(x+1)
def rec1(x):
return rec2(x-1)
def entry():
rec1(42)
""")
excinfo = py.test.raises(RuntimeError, mod.entry)
for style in ("short", "long", "no"):
p = FormattedExcinfo(style="short")
reprtb = p.repr_traceback(excinfo)
assert reprtb.extraline == "!!! Recursion detected (same locals & position)"
assert str(reprtb)
def test_tb_entry_AssertionError(self, importasmod):
# probably this test is a bit redundant
# as py/magic/testing/test_assertion.py
# already tests correctness of
# assertion-reinterpretation logic
mod = importasmod("""
def somefunc():
x = 1
assert x == 2
""")
excinfo = py.test.raises(AssertionError, mod.somefunc)
p = FormattedExcinfo()
reprentry = p.repr_traceback_entry(excinfo.traceback[-1], excinfo)
lines = reprentry.lines
assert lines[-1] == "E assert 1 == 2"
def test_reprexcinfo_getrepr(self, importasmod):
mod = importasmod("""
def f(x):
raise ValueError(x)
def entry():
f(0)
""")
excinfo = py.test.raises(ValueError, mod.entry)
for style in ("short", "long", "no"):
for showlocals in (True, False):
repr = excinfo.getrepr(style=style, showlocals=showlocals)
assert isinstance(repr, ReprExceptionInfo)
assert repr.reprtraceback.style == style
def test_reprexcinfo_unicode(self):
from py._code.code import TerminalRepr
class MyRepr(TerminalRepr):
def toterminal(self, tw):
tw.line(py.builtin._totext("я", "utf-8"))
x = py.builtin._totext(MyRepr())
assert x == py.builtin._totext("я", "utf-8")
def test_toterminal_long(self, importasmod):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == " def f():"
assert tw.lines[1] == "> g(3)"
assert tw.lines[2] == ""
assert tw.lines[3].endswith("mod.py:5: ")
assert tw.lines[4] == ("_ ", None)
assert tw.lines[5] == ""
assert tw.lines[6] == " def g(x):"
assert tw.lines[7] == "> raise ValueError(x)"
assert tw.lines[8] == "E ValueError: 3"
assert tw.lines[9] == ""
assert tw.lines[10].endswith("mod.py:3: ValueError")
def test_toterminal_long_missing_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tmpdir.join('mod.py').remove()
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
def test_toterminal_long_incomplete_source(self, importasmod, tmpdir):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tmpdir.join('mod.py').write('asdf')
excinfo.traceback = excinfo.traceback.filter()
repr = excinfo.getrepr()
tw = TWMock()
repr.toterminal(tw)
assert tw.lines[0] == ""
tw.lines.pop(0)
assert tw.lines[0] == "> ???"
assert tw.lines[1] == ""
assert tw.lines[2].endswith("mod.py:5: ")
assert tw.lines[3] == ("_ ", None)
assert tw.lines[4] == ""
assert tw.lines[5] == "> ???"
assert tw.lines[6] == "E ValueError: 3"
assert tw.lines[7] == ""
assert tw.lines[8].endswith("mod.py:3: ValueError")
def test_toterminal_long_filenames(self, importasmod):
mod = importasmod("""
def f():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.f)
tw = TWMock()
path = py.path.local(mod.__file__)
old = path.dirpath().chdir()
try:
repr = excinfo.getrepr(abspath=False)
repr.toterminal(tw)
line = tw.lines[-1]
x = py.path.local().bestrelpath(path)
if len(x) < len(str(path)):
assert line == "mod.py:3: ValueError"
repr = excinfo.getrepr(abspath=True)
repr.toterminal(tw)
line = tw.lines[-1]
assert line == "%s:3: ValueError" %(path,)
finally:
old.chdir()
@py.test.mark.multi(reproptions=[
{'style': style, 'showlocals': showlocals,
'funcargs': funcargs, 'tbfilter': tbfilter
} for style in ("long", "short", "no")
for showlocals in (True, False)
for tbfilter in (True, False)
for funcargs in (True, False)])
def test_format_excinfo(self, importasmod, reproptions):
mod = importasmod("""
def g(x):
raise ValueError(x)
def f():
g(3)
""")
excinfo = py.test.raises(ValueError, mod.f)
tw = py.io.TerminalWriter(stringio=True)
repr = excinfo.getrepr(**reproptions)
repr.toterminal(tw)
assert tw.stringio.getvalue()
def test_native_style(self):
excinfo = self.excinfo_from_exec("""
assert 0
""")
repr = excinfo.getrepr(style='native')
assert "assert 0" in str(repr.reprcrash)
s = str(repr)
assert s.startswith('Traceback (most recent call last):\n File')
assert s.endswith('\nAssertionError: assert 0')
assert 'exec (source.compile())' in s
# python 2.4 fails to get the source line for the assert
if py.std.sys.version_info >= (2, 5):
assert s.count('assert 0') == 2
def test_traceback_repr_style(self, importasmod):
mod = importasmod("""
def f():
g()
def g():
h()
def h():
i()
def i():
raise ValueError()
""")
excinfo = py.test.raises(ValueError, mod.f)
excinfo.traceback = excinfo.traceback.filter()
excinfo.traceback[1].set_repr_style("short")
excinfo.traceback[2].set_repr_style("short")
r = excinfo.getrepr(style="long")
tw = TWMock()
r.toterminal(tw)
for line in tw.lines: print (line)
assert tw.lines[0] == ""
assert tw.lines[1] == " def f():"
assert tw.lines[2] == "> g()"
assert tw.lines[3] == ""
assert tw.lines[4].endswith("mod.py:3: ")
assert tw.lines[5] == ("_ ", None)
assert tw.lines[6].endswith("in g")
assert tw.lines[7] == " h()"
assert tw.lines[8].endswith("in h")
assert tw.lines[9] == " i()"
assert tw.lines[10] == ("_ ", None)
assert tw.lines[11] == ""
assert tw.lines[12] == " def i():"
assert tw.lines[13] == "> raise ValueError()"
assert tw.lines[14] == "E ValueError"
assert tw.lines[15] == ""
assert tw.lines[16].endswith("mod.py:9: ValueError")

View file

@ -0,0 +1,651 @@
from py.code import Source
import py
import sys
from py._code.source import _ast
if _ast is not None:
astonly = py.test.mark.nothing
else:
astonly = py.test.mark.xfail("True", reason="only works with AST-compile")
failsonjython = py.test.mark.xfail("sys.platform.startswith('java')")
def test_source_str_function():
x = Source("3")
assert str(x) == "3"
x = Source(" 3")
assert str(x) == "3"
x = Source("""
3
""", rstrip=False)
assert str(x) == "\n3\n "
x = Source("""
3
""", rstrip=True)
assert str(x) == "\n3"
def test_unicode():
try:
unicode
except NameError:
return
x = Source(unicode("4"))
assert str(x) == "4"
co = py.code.compile(unicode('u"\xc3\xa5"', 'utf8'), mode='eval')
val = eval(co)
assert isinstance(val, unicode)
def test_source_from_function():
source = py.code.Source(test_source_str_function)
assert str(source).startswith('def test_source_str_function():')
def test_source_from_method():
class TestClass:
def test_method(self):
pass
source = py.code.Source(TestClass().test_method)
assert source.lines == ["def test_method(self):",
" pass"]
def test_source_from_lines():
lines = ["a \n", "b\n", "c"]
source = py.code.Source(lines)
assert source.lines == ['a ', 'b', 'c']
def test_source_from_inner_function():
def f():
pass
source = py.code.Source(f, deindent=False)
assert str(source).startswith(' def f():')
source = py.code.Source(f)
assert str(source).startswith('def f():')
def test_source_putaround_simple():
source = Source("raise ValueError")
source = source.putaround(
"try:", """\
except ValueError:
x = 42
else:
x = 23""")
assert str(source)=="""\
try:
raise ValueError
except ValueError:
x = 42
else:
x = 23"""
def test_source_putaround():
source = Source()
source = source.putaround("""
if 1:
x=1
""")
assert str(source).strip() == "if 1:\n x=1"
def test_source_strips():
source = Source("")
assert source == Source()
assert str(source) == ''
assert source.strip() == source
def test_source_strip_multiline():
source = Source()
source.lines = ["", " hello", " "]
source2 = source.strip()
assert source2.lines == [" hello"]
def test_syntaxerror_rerepresentation():
ex = py.test.raises(SyntaxError, py.code.compile, 'xyz xyz')
assert ex.value.lineno == 1
assert ex.value.offset in (4,7) # XXX pypy/jython versus cpython?
assert ex.value.text.strip(), 'x x'
def test_isparseable():
assert Source("hello").isparseable()
assert Source("if 1:\n pass").isparseable()
assert Source(" \nif 1:\n pass").isparseable()
assert not Source("if 1:\n").isparseable()
assert not Source(" \nif 1:\npass").isparseable()
assert not Source(chr(0)).isparseable()
class TestAccesses:
source = Source("""\
def f(x):
pass
def g(x):
pass
""")
def test_getrange(self):
x = self.source[0:2]
assert x.isparseable()
assert len(x.lines) == 2
assert str(x) == "def f(x):\n pass"
def test_getline(self):
x = self.source[0]
assert x == "def f(x):"
def test_len(self):
assert len(self.source) == 4
def test_iter(self):
l = [x for x in self.source]
assert len(l) == 4
class TestSourceParsingAndCompiling:
source = Source("""\
def f(x):
assert (x ==
3 +
4)
""").strip()
def test_compile(self):
co = py.code.compile("x=3")
d = {}
exec (co, d)
assert d['x'] == 3
def test_compile_and_getsource_simple(self):
co = py.code.compile("x=3")
exec (co)
source = py.code.Source(co)
assert str(source) == "x=3"
def test_compile_and_getsource_through_same_function(self):
def gensource(source):
return py.code.compile(source)
co1 = gensource("""
def f():
raise KeyError()
""")
co2 = gensource("""
def f():
raise ValueError()
""")
source1 = py.std.inspect.getsource(co1)
assert 'KeyError' in source1
source2 = py.std.inspect.getsource(co2)
assert 'ValueError' in source2
def test_getstatement(self):
#print str(self.source)
ass = str(self.source[1:])
for i in range(1, 4):
#print "trying start in line %r" % self.source[i]
s = self.source.getstatement(i)
#x = s.deindent()
assert str(s) == ass
def test_getstatementrange_triple_quoted(self):
#print str(self.source)
source = Source("""hello('''
''')""")
s = source.getstatement(0)
assert s == str(source)
s = source.getstatement(1)
assert s == str(source)
@astonly
def test_getstatementrange_within_constructs(self):
source = Source("""\
try:
try:
raise ValueError
except SomeThing:
pass
finally:
42
""")
assert len(source) == 7
# check all lineno's that could occur in a traceback
#assert source.getstatementrange(0) == (0, 7)
#assert source.getstatementrange(1) == (1, 5)
assert source.getstatementrange(2) == (2, 3)
assert source.getstatementrange(3) == (3, 4)
assert source.getstatementrange(4) == (4, 5)
#assert source.getstatementrange(5) == (0, 7)
assert source.getstatementrange(6) == (6, 7)
def test_getstatementrange_bug(self):
source = Source("""\
try:
x = (
y +
z)
except:
pass
""")
assert len(source) == 6
assert source.getstatementrange(2) == (1, 4)
def test_getstatementrange_bug2(self):
source = Source("""\
assert (
33
==
[
X(3,
b=1, c=2
),
]
)
""")
assert len(source) == 9
assert source.getstatementrange(5) == (0, 9)
def test_getstatementrange_ast_issue58(self):
source = Source("""\
def test_some():
for a in [a for a in
CAUSE_ERROR]: pass
x = 3
""")
assert getstatement(2, source).lines == source.lines[2:3]
assert getstatement(3, source).lines == source.lines[3:4]
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_getstatementrange_out_of_bounds_py3(self):
source = Source("if xxx:\n from .collections import something")
r = source.getstatementrange(1)
assert r == (1,2)
def test_getstatementrange_with_syntaxerror_issue7(self):
source = Source(":")
py.test.raises(SyntaxError, lambda: source.getstatementrange(0))
@py.test.mark.skipif("sys.version_info < (2,6)")
def test_compile_to_ast(self):
import ast
source = Source("x = 4")
mod = source.compile(flag=ast.PyCF_ONLY_AST)
assert isinstance(mod, ast.Module)
compile(mod, "<filename>", "exec")
def test_compile_and_getsource(self):
co = self.source.compile()
py.builtin.exec_(co, globals())
f(7)
excinfo = py.test.raises(AssertionError, "f(6)")
frame = excinfo.traceback[-1].frame
stmt = frame.code.fullsource.getstatement(frame.lineno)
#print "block", str(block)
assert str(stmt).strip().startswith('assert')
def test_compilefuncs_and_path_sanity(self):
def check(comp, name):
co = comp(self.source, name)
if not name:
expected = "codegen %s:%d>" %(mypath, mylineno+2+1)
else:
expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+1)
fn = co.co_filename
assert fn.endswith(expected)
mycode = py.code.Code(self.test_compilefuncs_and_path_sanity)
mylineno = mycode.firstlineno
mypath = mycode.path
for comp in py.code.compile, py.code.Source.compile:
for name in '', None, 'my':
yield check, comp, name
def test_offsetless_synerr(self):
py.test.raises(SyntaxError, py.code.compile, "lambda a,a: 0", mode='eval')
def test_getstartingblock_singleline():
class A:
def __init__(self, *args):
frame = sys._getframe(1)
self.source = py.code.Frame(frame).statement
x = A('x', 'y')
l = [i for i in x.source.lines if i.strip()]
assert len(l) == 1
def test_getstartingblock_multiline():
class A:
def __init__(self, *args):
frame = sys._getframe(1)
self.source = py.code.Frame(frame).statement
x = A('x',
'y' \
,
'z')
l = [i for i in x.source.lines if i.strip()]
assert len(l) == 4
def test_getline_finally():
def c(): pass
excinfo = py.test.raises(TypeError, """
teardown = None
try:
c(1)
finally:
if teardown:
teardown()
""")
source = excinfo.traceback[-1].statement
assert str(source).strip() == 'c(1)'
def test_getfuncsource_dynamic():
source = """
def f():
raise ValueError
def g(): pass
"""
co = py.code.compile(source)
py.builtin.exec_(co, globals())
assert str(py.code.Source(f)).strip() == 'def f():\n raise ValueError'
assert str(py.code.Source(g)).strip() == 'def g(): pass'
def test_getfuncsource_with_multine_string():
def f():
c = '''while True:
pass
'''
assert str(py.code.Source(f)).strip() == "def f():\n c = '''while True:\n pass\n'''"
def test_deindent():
from py._code.source import deindent as deindent
assert deindent(['\tfoo', '\tbar', ]) == ['foo', 'bar']
def f():
c = '''while True:
pass
'''
import inspect
lines = deindent(inspect.getsource(f).splitlines())
assert lines == ["def f():", " c = '''while True:", " pass", "'''"]
source = """
def f():
def g():
pass
"""
lines = deindent(source.splitlines())
assert lines == ['', 'def f():', ' def g():', ' pass', ' ']
@py.test.mark.xfail("sys.version_info[:3] < (2,7,0) or "
"((3,0) <= sys.version_info[:2] < (3,2))")
def test_source_of_class_at_eof_without_newline(tmpdir):
# this test fails because the implicit inspect.getsource(A) below
# does not return the "x = 1" last line.
source = py.code.Source('''
class A(object):
def method(self):
x = 1
''')
path = tmpdir.join("a.py")
path.write(source)
s2 = py.code.Source(tmpdir.join("a.py").pyimport().A)
assert str(source).strip() == str(s2).strip()
if True:
def x():
pass
def test_getsource_fallback():
from py._code.source import getsource
expected = """def x():
pass"""
src = getsource(x)
assert src == expected
def test_idem_compile_and_getsource():
from py._code.source import getsource
expected = "def x(): pass"
co = py.code.compile(expected)
src = getsource(co)
assert src == expected
def test_findsource_fallback():
from py._code.source import findsource
src, lineno = findsource(x)
assert 'test_findsource_simple' in str(src)
assert src[lineno] == ' def x():'
def test_findsource():
from py._code.source import findsource
co = py.code.compile("""if 1:
def x():
pass
""")
src, lineno = findsource(co)
assert 'if 1:' in str(src)
d = {}
eval(co, d)
src, lineno = findsource(d['x'])
assert 'if 1:' in str(src)
assert src[lineno] == " def x():"
def test_getfslineno():
from py.code import getfslineno
def f(x):
pass
fspath, lineno = getfslineno(f)
assert fspath.basename == "test_source.py"
assert lineno == py.code.getrawcode(f).co_firstlineno-1 # see findsource
class A(object):
pass
fspath, lineno = getfslineno(A)
_, A_lineno = py.std.inspect.findsource(A)
assert fspath.basename == "test_source.py"
assert lineno == A_lineno
assert getfslineno(3) == ("", -1)
class B:
pass
B.__name__ = "B2"
assert getfslineno(B)[1] == -1
def test_code_of_object_instance_with_call():
class A:
pass
py.test.raises(TypeError, lambda: py.code.Source(A()))
class WithCall:
def __call__(self):
pass
code = py.code.Code(WithCall())
assert 'pass' in str(code.source())
class Hello(object):
def __call__(self):
pass
py.test.raises(TypeError, lambda: py.code.Code(Hello))
def getstatement(lineno, source):
from py._code.source import getstatementrange_ast
source = py.code.Source(source, deindent=False)
ast, start, end = getstatementrange_ast(lineno, source)
return source[start:end]
def test_oneline():
source = getstatement(0, "raise ValueError")
assert str(source) == "raise ValueError"
def test_comment_and_no_newline_at_end():
from py._code.source import getstatementrange_ast
source = Source(['def test_basic_complex():',
' assert 1 == 2',
'# vim: filetype=pyopencl:fdm=marker'])
ast, start, end = getstatementrange_ast(1, source)
assert end == 2
def test_oneline_and_comment():
source = getstatement(0, "raise ValueError\n#hello")
assert str(source) == "raise ValueError"
def test_comments():
source = '''def test():
"comment 1"
x = 1
# comment 2
# comment 3
assert False
"""
comment 4
"""
'''
for line in range(2,6):
assert str(getstatement(line, source)) == ' x = 1'
for line in range(6,10):
assert str(getstatement(line, source)) == ' assert False'
assert str(getstatement(10, source)) == '"""'
def test_comment_in_statement():
source = '''test(foo=1,
# comment 1
bar=2)
'''
for line in range(1,3):
assert str(getstatement(line, source)) == \
'test(foo=1,\n # comment 1\n bar=2)'
def test_single_line_else():
source = getstatement(1, "if False: 2\nelse: 3")
assert str(source) == "else: 3"
def test_single_line_finally():
source = getstatement(1, "try: 1\nfinally: 3")
assert str(source) == "finally: 3"
def test_issue55():
source = ('def round_trip(dinp):\n assert 1 == dinp\n'
'def test_rt():\n round_trip("""\n""")\n')
s = getstatement(3, source)
assert str(s) == ' round_trip("""\n""")'
def XXXtest_multiline():
source = getstatement(0, """\
raise ValueError(
23
)
x = 3
""")
assert str(source) == "raise ValueError(\n 23\n)"
class TestTry:
pytestmark = astonly
source = """\
try:
raise ValueError
except Something:
raise IndexError(1)
else:
raise KeyError()
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_except_line(self):
source = getstatement(2, self.source)
assert str(source) == "except Something:"
def test_except_body(self):
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
def test_else(self):
source = getstatement(5, self.source)
assert str(source) == " raise KeyError()"
class TestTryFinally:
source = """\
try:
raise ValueError
finally:
raise IndexError(1)
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_finally(self):
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
class TestIf:
pytestmark = astonly
source = """\
if 1:
y = 3
elif False:
y = 5
else:
y = 7
"""
def test_body(self):
source = getstatement(1, self.source)
assert str(source) == " y = 3"
def test_elif_clause(self):
source = getstatement(2, self.source)
assert str(source) == "elif False:"
def test_elif(self):
source = getstatement(3, self.source)
assert str(source) == " y = 5"
def test_else(self):
source = getstatement(5, self.source)
assert str(source) == " y = 7"
def test_semicolon():
s = """\
hello ; pytest.skip()
"""
source = getstatement(0, s)
assert str(source) == s.strip()
def test_def_online():
s = """\
def func(): raise ValueError(42)
def something():
pass
"""
source = getstatement(0, s)
assert str(source) == "def func(): raise ValueError(42)"
def XXX_test_expression_multiline():
source = """\
something
'''
'''"""
result = getstatement(1, source)
assert str(result) == "'''\n'''"

View file

@ -0,0 +1,3 @@
pytest_plugins = "pytester",

View file

@ -0,0 +1 @@
#

View file

@ -0,0 +1,501 @@
from __future__ import with_statement
import os, sys
import py
needsdup = py.test.mark.skipif("not hasattr(os, 'dup')")
from py.builtin import print_
if sys.version_info >= (3,0):
def tobytes(obj):
if isinstance(obj, str):
obj = obj.encode('UTF-8')
assert isinstance(obj, bytes)
return obj
def totext(obj):
if isinstance(obj, bytes):
obj = str(obj, 'UTF-8')
assert isinstance(obj, str)
return obj
else:
def tobytes(obj):
if isinstance(obj, unicode):
obj = obj.encode('UTF-8')
assert isinstance(obj, str)
return obj
def totext(obj):
if isinstance(obj, str):
obj = unicode(obj, 'UTF-8')
assert isinstance(obj, unicode)
return obj
def oswritebytes(fd, obj):
os.write(fd, tobytes(obj))
class TestTextIO:
def test_text(self):
f = py.io.TextIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = py.io.TextIO()
if sys.version_info >= (3,0):
f.write("\u00f6")
py.test.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))")
else:
f.write(unicode("\u00f6", 'UTF-8'))
f.write("hello") # bytes
s = f.getvalue()
f.close()
assert isinstance(s, unicode)
def test_bytes_io():
f = py.io.BytesIO()
f.write(tobytes("hello"))
py.test.raises(TypeError, "f.write(totext('hello'))")
s = f.getvalue()
assert s == tobytes("hello")
def test_dontreadfrominput():
from py._io.capture import DontReadFromInput
f = DontReadFromInput()
assert not f.isatty()
py.test.raises(IOError, f.read)
py.test.raises(IOError, f.readlines)
py.test.raises(IOError, iter, f)
py.test.raises(ValueError, f.fileno)
f.close() # just for completeness
def pytest_funcarg__tmpfile(request):
testdir = request.getfuncargvalue("testdir")
f = testdir.makepyfile("").open('wb+')
request.addfinalizer(f.close)
return f
@needsdup
def test_dupfile(tmpfile):
flist = []
for i in range(5):
nf = py.io.dupfile(tmpfile, encoding="utf-8")
assert nf != tmpfile
assert nf.fileno() != tmpfile.fileno()
assert nf not in flist
print_(i, end="", file=nf)
flist.append(nf)
for i in range(5):
f = flist[i]
f.close()
tmpfile.seek(0)
s = tmpfile.read()
assert "01234" in repr(s)
tmpfile.close()
def test_dupfile_no_mode():
"""
dupfile should trap an AttributeError and return f if no mode is supplied.
"""
class SomeFileWrapper(object):
"An object with a fileno method but no mode attribute"
def fileno(self):
return 1
tmpfile = SomeFileWrapper()
assert py.io.dupfile(tmpfile) is tmpfile
with py.test.raises(AttributeError):
py.io.dupfile(tmpfile, raising=True)
def lsof_check(func):
pid = os.getpid()
try:
out = py.process.cmdexec("lsof -p %d" % pid)
except py.process.cmdexec.Error:
py.test.skip("could not run 'lsof'")
func()
out2 = py.process.cmdexec("lsof -p %d" % pid)
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
pytestmark = needsdup
def test_not_now(self, tmpfile):
fd = tmpfile.fileno()
cap = py.io.FDCapture(fd, now=False)
data = tobytes("hello")
os.write(fd, data)
f = cap.done()
s = f.read()
assert not s
cap = py.io.FDCapture(fd, now=False)
cap.start()
os.write(fd, data)
f = cap.done()
s = f.read()
assert s == "hello"
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = py.io.FDCapture(fd)
data = tobytes("hello")
os.write(fd, data)
f = cap.done()
s = f.read()
assert s == "hello"
f.close()
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, tmpfile):
lsof_check(lambda: self.test_simple_many(tmpfile))
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = py.io.FDCapture(fd)
f = cap.done()
py.test.raises(ValueError, cap.start)
f.close()
def test_stderr(self):
cap = py.io.FDCapture(2, patchsys=True)
print_("hello", file=sys.stderr)
f = cap.done()
s = f.read()
assert s == "hello\n"
def test_stdin(self, tmpfile):
tmpfile.write(tobytes("3"))
tmpfile.seek(0)
cap = py.io.FDCapture(0, tmpfile=tmpfile)
# check with os.read() directly instead of raw_input(), because
# sys.stdin itself may be redirected (as py.test now does by default)
x = os.read(0, 100).strip()
f = cap.done()
assert x == tobytes("3")
def test_writeorg(self, tmpfile):
data1, data2 = tobytes("foo"), tobytes("bar")
try:
cap = py.io.FDCapture(tmpfile.fileno())
tmpfile.write(data1)
cap.writeorg(data2)
finally:
tmpfile.close()
f = cap.done()
scap = f.read()
assert scap == totext(data1)
stmp = open(tmpfile.name, 'rb').read()
assert stmp == data2
class TestStdCapture:
def getcapture(self, **kw):
return py.io.StdCapture(**kw)
def test_capturing_done_simple(self):
cap = self.getcapture()
sys.stdout.write("hello")
sys.stderr.write("world")
outfile, errfile = cap.done()
s = outfile.read()
assert s == "hello"
s = errfile.read()
assert s == "world"
def test_capturing_reset_simple(self):
cap = self.getcapture()
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.reset()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
cap = self.getcapture()
try:
print ("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
finally:
out, err = cap.reset()
assert err == "error2"
def test_capturing_readouterr_unicode(self):
cap = self.getcapture()
print ("hx\xc4\x85\xc4\x87")
out, err = cap.readouterr()
assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8")
@py.test.mark.skipif('sys.version_info >= (3,)',
reason='text output different for bytes on python3')
def test_capturing_readouterr_decode_error_handling(self):
cap = self.getcapture()
# triggered a internal error in pytest
print('\xa6')
out, err = cap.readouterr()
assert out == py.builtin._totext('\ufffd\n', 'unicode-escape')
def test_capturing_mixed(self):
cap = self.getcapture(mixed=True)
sys.stdout.write("hello ")
sys.stderr.write("world")
sys.stdout.write(".")
out, err = cap.reset()
assert out.strip() == "hello world."
assert not err
def test_reset_twice_error(self):
cap = self.getcapture()
print ("hello")
out, err = cap.reset()
py.test.raises(ValueError, cap.reset)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
cap = self.getcapture()
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = py.io.TextIO()
sys.stderr = py.io.TextIO()
print ("not seen")
sys.stderr.write("not seen\n")
out, err = cap.reset()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
cap1 = self.getcapture()
print ("cap1")
cap2 = self.getcapture()
print ("cap2")
out2, err2 = cap2.reset()
out1, err1 = cap1.reset()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
cap = self.getcapture(out=True, err=False)
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.reset()
assert out == "hello"
assert not err
def test_just_err_capture(self):
cap = self.getcapture(out=False, err=True)
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.reset()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
cap = self.getcapture(in_=True)
newstdin = sys.stdin
out, err = cap.reset()
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print ("XXX this test may well hang instead of crashing")
print ("XXX which indicates an error in the underlying capturing")
print ("XXX mechanisms")
cap = self.getcapture()
py.test.raises(IOError, "sys.stdin.read()")
out, err = cap.reset()
def test_suspend_resume(self):
cap = self.getcapture(out=True, err=False, in_=False)
try:
print ("hello")
sys.stderr.write("error\n")
out, err = cap.suspend()
assert out == "hello\n"
assert not err
print ("in between")
sys.stderr.write("in between\n")
cap.resume()
print ("after")
sys.stderr.write("error_after\n")
finally:
out, err = cap.reset()
assert out == "after\n"
assert not err
class TestStdCaptureNotNow(TestStdCapture):
def getcapture(self, **kw):
kw['now'] = False
cap = py.io.StdCapture(**kw)
cap.startall()
return cap
class TestStdCaptureFD(TestStdCapture):
pytestmark = needsdup
def getcapture(self, **kw):
return py.io.StdCaptureFD(**kw)
def test_intermingling(self):
cap = self.getcapture()
oswritebytes(1, "1")
sys.stdout.write(str(2))
sys.stdout.flush()
oswritebytes(1, "3")
oswritebytes(2, "a")
sys.stderr.write("b")
sys.stderr.flush()
oswritebytes(2, "c")
out, err = cap.reset()
assert out == "123"
assert err == "abc"
def test_callcapture(self):
def func(x, y):
print (x)
py.std.sys.stderr.write(str(y))
return 42
res, out, err = py.io.StdCaptureFD.call(func, 3, y=4)
assert res == 42
assert out.startswith("3")
assert err.startswith("4")
def test_many(self, capfd):
def f():
for i in range(10):
cap = py.io.StdCaptureFD()
cap.reset()
lsof_check(f)
class TestStdCaptureFDNotNow(TestStdCaptureFD):
pytestmark = needsdup
def getcapture(self, **kw):
kw['now'] = False
cap = py.io.StdCaptureFD(**kw)
cap.startall()
return cap
@needsdup
def test_stdcapture_fd_tmpfile(tmpfile):
capfd = py.io.StdCaptureFD(out=tmpfile)
os.write(1, "hello".encode("ascii"))
os.write(2, "world".encode("ascii"))
outf, errf = capfd.done()
assert outf == tmpfile
class TestStdCaptureFDinvalidFD:
pytestmark = needsdup
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile("""
import py, os
def test_stdout():
os.close(1)
cap = py.io.StdCaptureFD(out=True, err=False, in_=False)
cap.done()
def test_stderr():
os.close(2)
cap = py.io.StdCaptureFD(out=False, err=True, in_=False)
cap.done()
def test_stdin():
os.close(0)
cap = py.io.StdCaptureFD(out=False, err=False, in_=True)
cap.done()
""")
result = testdir.runpytest("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()['passed'] == 3
def test_capture_not_started_but_reset():
capsys = py.io.StdCapture(now=False)
capsys.done()
capsys.done()
capsys.reset()
@needsdup
def test_capture_no_sys():
capsys = py.io.StdCapture()
try:
cap = py.io.StdCaptureFD(patchsys=False)
sys.stdout.write("hello")
sys.stderr.write("world")
oswritebytes(1, "1")
oswritebytes(2, "2")
out, err = cap.reset()
assert out == "1"
assert err == "2"
finally:
capsys.reset()
@needsdup
def test_callcapture_nofd():
def func(x, y):
oswritebytes(1, "hello")
oswritebytes(2, "hello")
print (x)
sys.stderr.write(str(y))
return 42
capfd = py.io.StdCaptureFD(patchsys=False)
try:
res, out, err = py.io.StdCapture.call(func, 3, y=4)
finally:
capfd.reset()
assert res == 42
assert out.startswith("3")
assert err.startswith("4")
@needsdup
@py.test.mark.multi(use=[True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = py.io.StdCaptureFD(out=False, err=tmpfile, now=False)
cap.startall()
capfile = cap.err.tmpfile
cap.suspend()
cap.resume()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
@py.test.mark.multi(method=['StdCapture', 'StdCaptureFD'])
def test_capturing_and_logging_fundamentals(testdir, method):
if method == "StdCaptureFD" and not hasattr(os, 'dup'):
py.test.skip("need os.dup")
# here we check a fundamental feature
p = testdir.makepyfile("""
import sys, os
import py, logging
cap = py.io.%s(out=False, in_=False)
logging.warn("hello1")
outerr = cap.suspend()
print ("suspend, captured %%s" %%(outerr,))
logging.warn("hello2")
cap.resume()
logging.warn("hello3")
outerr = cap.suspend()
print ("suspend2, captured %%s" %% (outerr,))
""" % (method,))
result = testdir.runpython(p)
result.stdout.fnmatch_lines([
"suspend, captured*hello1*",
"suspend2, captured*hello2*WARNING:root:hello3*",
])
assert "atexit" not in result.stderr.str()

View file

@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
from __future__ import generators
import py
import sys
saferepr = py.io.saferepr
class TestSafeRepr:
def test_simple_repr(self):
assert saferepr(1) == '1'
assert saferepr(None) == 'None'
def test_maxsize(self):
s = saferepr('x'*50, maxsize=25)
assert len(s) == 25
expected = repr('x'*10 + '...' + 'x'*10)
assert s == expected
def test_maxsize_error_on_instance(self):
class A:
def __repr__(self):
raise ValueError('...')
s = saferepr(('*'*50, A()), maxsize=25)
assert len(s) == 25
assert s[0] == '(' and s[-1] == ')'
def test_exceptions(self):
class BrokenRepr:
def __init__(self, ex):
self.ex = ex
foo = 0
def __repr__(self):
raise self.ex
class BrokenReprException(Exception):
__str__ = None
__repr__ = None
assert 'Exception' in saferepr(BrokenRepr(Exception("broken")))
s = saferepr(BrokenReprException("really broken"))
assert 'TypeError' in s
if py.std.sys.version_info < (2,6):
assert 'unknown' in saferepr(BrokenRepr("string"))
else:
assert 'TypeError' in saferepr(BrokenRepr("string"))
s2 = saferepr(BrokenRepr(BrokenReprException('omg even worse')))
assert 'NameError' not in s2
assert 'unknown' in s2
def test_big_repr(self):
from py._io.saferepr import SafeRepr
assert len(saferepr(range(1000))) <= \
len('[' + SafeRepr().maxlist * "1000" + ']')
def test_repr_on_newstyle(self):
class Function(object):
def __repr__(self):
return "<%s>" %(self.name)
try:
s = saferepr(Function())
except Exception:
py.test.fail("saferepr failed for newstyle class")
def test_unicode(self):
val = py.builtin._totext('£€', 'utf-8')
reprval = py.builtin._totext("'£€'", 'utf-8')
assert saferepr(val) == reprval
def test_unicode_handling():
value = py.builtin._totext('\xc4\x85\xc4\x87\n', 'utf-8').encode('utf8')
def f():
raise Exception(value)
excinfo = py.test.raises(Exception, f)
s = str(excinfo)
if sys.version_info[0] < 3:
u = unicode(excinfo)

View file

@ -0,0 +1,271 @@
import py
import os, sys
from py._io import terminalwriter
import codecs
import pytest
def test_get_terminal_width():
x = py.io.get_terminal_width
assert x == terminalwriter.get_terminal_width
def test_getdimensions(monkeypatch):
fcntl = py.test.importorskip("fcntl")
import struct
l = []
monkeypatch.setattr(fcntl, 'ioctl', lambda *args: l.append(args))
try:
terminalwriter._getdimensions()
except (TypeError, struct.error):
pass
assert len(l) == 1
assert l[0][0] == 1
def test_terminal_width_COLUMNS(monkeypatch):
""" Dummy test for get_terminal_width
"""
fcntl = py.test.importorskip("fcntl")
monkeypatch.setattr(fcntl, 'ioctl', lambda *args: int('x'))
monkeypatch.setenv('COLUMNS', '42')
assert terminalwriter.get_terminal_width() == 42
monkeypatch.delenv('COLUMNS', raising=False)
def test_terminalwriter_defaultwidth_80(monkeypatch):
monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: 0/0)
monkeypatch.delenv('COLUMNS', raising=False)
tw = py.io.TerminalWriter()
assert tw.fullwidth == 80
def test_terminalwriter_getdimensions_bogus(monkeypatch):
monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: (10,10))
monkeypatch.delenv('COLUMNS', raising=False)
tw = py.io.TerminalWriter()
assert tw.fullwidth == 80
def test_terminalwriter_getdimensions_emacs(monkeypatch):
# emacs terminal returns (0,0) but set COLUMNS properly
monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: (0,0))
monkeypatch.setenv('COLUMNS', '42')
tw = py.io.TerminalWriter()
assert tw.fullwidth == 42
def test_terminalwriter_computes_width(monkeypatch):
monkeypatch.setattr(terminalwriter, 'get_terminal_width', lambda: 42)
tw = py.io.TerminalWriter()
assert tw.fullwidth == 42
def test_terminalwriter_default_instantiation():
tw = py.io.TerminalWriter(stringio=True)
assert hasattr(tw, 'stringio')
def test_terminalwriter_dumb_term_no_markup(monkeypatch):
monkeypatch.setattr(os, 'environ', {'TERM': 'dumb', 'PATH': ''})
class MyFile:
closed = False
def isatty(self):
return True
monkeypatch.setattr(sys, 'stdout', MyFile())
try:
assert sys.stdout.isatty()
tw = py.io.TerminalWriter()
assert not tw.hasmarkup
finally:
monkeypatch.undo()
def test_terminalwriter_file_unicode(tmpdir):
f = py.std.codecs.open(str(tmpdir.join("xyz")), "wb", "utf8")
tw = py.io.TerminalWriter(file=f)
assert tw.encoding == "utf8"
def test_unicode_encoding():
msg = py.builtin._totext('b\u00f6y', 'utf8')
for encoding in 'utf8', 'latin1':
l = []
tw = py.io.TerminalWriter(l.append, encoding=encoding)
tw.line(msg)
assert l[0].strip() == msg.encode(encoding)
@pytest.mark.parametrize("encoding", ["ascii"])
def test_unicode_on_file_with_ascii_encoding(tmpdir, monkeypatch, encoding):
msg = py.builtin._totext('hell\xf6', "latin1")
#pytest.raises(UnicodeEncodeError, lambda: bytes(msg))
f = py.std.codecs.open(str(tmpdir.join("x")), "w", encoding)
tw = py.io.TerminalWriter(f)
tw.line(msg)
f.close()
s = tmpdir.join("x").open("rb").read().strip()
assert encoding == "ascii"
assert s == msg.encode("unicode-escape")
win32 = int(sys.platform == "win32")
class TestTerminalWriter:
def pytest_generate_tests(self, metafunc):
if "tw" in metafunc.funcargnames:
metafunc.addcall(id="path", param="path")
metafunc.addcall(id="stringio", param="stringio")
metafunc.addcall(id="callable", param="callable")
def pytest_funcarg__tw(self, request):
if request.param == "path":
tmpdir = request.getfuncargvalue("tmpdir")
p = tmpdir.join("tmpfile")
f = codecs.open(str(p), 'w+', encoding='utf8')
tw = py.io.TerminalWriter(f)
def getlines():
tw._file.flush()
return codecs.open(str(p), 'r',
encoding='utf8').readlines()
elif request.param == "stringio":
tw = py.io.TerminalWriter(stringio=True)
def getlines():
tw.stringio.seek(0)
return tw.stringio.readlines()
elif request.param == "callable":
writes = []
tw = py.io.TerminalWriter(writes.append)
def getlines():
io = py.io.TextIO()
io.write("".join(writes))
io.seek(0)
return io.readlines()
tw.getlines = getlines
tw.getvalue = lambda: "".join(getlines())
return tw
def test_line(self, tw):
tw.line("hello")
l = tw.getlines()
assert len(l) == 1
assert l[0] == "hello\n"
def test_line_unicode(self, tw):
for encoding in 'utf8', 'latin1':
tw._encoding = encoding
msg = py.builtin._totext('b\u00f6y', 'utf8')
tw.line(msg)
l = tw.getlines()
assert l[0] == msg + "\n"
def test_sep_no_title(self, tw):
tw.sep("-", fullwidth=60)
l = tw.getlines()
assert len(l) == 1
assert l[0] == "-" * (60-win32) + "\n"
def test_sep_with_title(self, tw):
tw.sep("-", "hello", fullwidth=60)
l = tw.getlines()
assert len(l) == 1
assert l[0] == "-" * 26 + " hello " + "-" * (27-win32) + "\n"
@py.test.mark.skipif("sys.platform == 'win32'")
def test__escaped(self, tw):
text2 = tw._escaped("hello", (31))
assert text2.find("hello") != -1
@py.test.mark.skipif("sys.platform == 'win32'")
def test_markup(self, tw):
for bold in (True, False):
for color in ("red", "green"):
text2 = tw.markup("hello", **{color: True, 'bold': bold})
assert text2.find("hello") != -1
py.test.raises(ValueError, "tw.markup('x', wronkw=3)")
py.test.raises(ValueError, "tw.markup('x', wronkw=0)")
def test_line_write_markup(self, tw):
tw.hasmarkup = True
tw.line("x", bold=True)
tw.write("x\n", red=True)
l = tw.getlines()
if sys.platform != "win32":
assert len(l[0]) >= 2, l
assert len(l[1]) >= 2, l
def test_attr_fullwidth(self, tw):
tw.sep("-", "hello", fullwidth=70)
tw.fullwidth = 70
tw.sep("-", "hello")
l = tw.getlines()
assert len(l[0]) == len(l[1])
def test_reline(self, tw):
tw.line("hello")
tw.hasmarkup = False
pytest.raises(ValueError, lambda: tw.reline("x"))
tw.hasmarkup = True
tw.reline("0 1 2")
tw.getlines()
l = tw.getvalue().split("\n")
assert len(l) == 2
tw.reline("0 1 3")
l = tw.getvalue().split("\n")
assert len(l) == 2
assert l[1].endswith("0 1 3\r")
tw.line("so")
l = tw.getvalue().split("\n")
assert len(l) == 3
assert l[-1] == ""
assert l[1] == ("0 1 2\r0 1 3\rso ")
assert l[0] == "hello"
def test_terminal_with_callable_write_and_flush():
l = set()
class fil:
flush = lambda self: l.add("1")
write = lambda self, x: l.add("1")
__call__ = lambda self, x: l.add("2")
tw = py.io.TerminalWriter(fil())
tw.line("hello")
assert l == set(["1"])
del fil.flush
l.clear()
tw = py.io.TerminalWriter(fil())
tw.line("hello")
assert l == set(["2"])
@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
def test_attr_hasmarkup():
tw = py.io.TerminalWriter(stringio=True)
assert not tw.hasmarkup
tw.hasmarkup = True
tw.line("hello", bold=True)
s = tw.stringio.getvalue()
assert len(s) > len("hello\n")
assert '\x1b[1m' in s
assert '\x1b[0m' in s
@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
def test_ansi_print():
# we have no easy way to construct a file that
# represents a terminal
f = py.io.TextIO()
f.isatty = lambda: True
py.io.ansi_print("hello", 0x32, file=f)
text2 = f.getvalue()
assert text2.find("hello") != -1
assert len(text2) >= len("hello\n")
assert '\x1b[50m' in text2
assert '\x1b[0m' in text2
def test_should_do_markup_PY_COLORS_eq_1(monkeypatch):
monkeypatch.setitem(os.environ, 'PY_COLORS', '1')
tw = py.io.TerminalWriter(stringio=True)
assert tw.hasmarkup
tw.line("hello", bold=True)
s = tw.stringio.getvalue()
assert len(s) > len("hello\n")
assert '\x1b[1m' in s
assert '\x1b[0m' in s
def test_should_do_markup_PY_COLORS_eq_0(monkeypatch):
monkeypatch.setitem(os.environ, 'PY_COLORS', '0')
f = py.io.TextIO()
f.isatty = lambda: True
tw = py.io.TerminalWriter(file=f)
assert not tw.hasmarkup
tw.line("hello", bold=True)
s = f.getvalue()
assert s == "hello\n"

View file

@ -0,0 +1,190 @@
import py
import sys
from py._log.log import default_keywordmapper
callcapture = py.io.StdCapture.call
def setup_module(mod):
mod._oldstate = default_keywordmapper.getstate()
def teardown_module(mod):
default_keywordmapper.setstate(mod._oldstate)
class TestLogProducer:
def setup_method(self, meth):
default_keywordmapper.setstate(_oldstate)
def test_getstate_setstate(self):
state = py.log._getstate()
py.log.setconsumer("hello", [].append)
state2 = py.log._getstate()
assert state2 != state
py.log._setstate(state)
state3 = py.log._getstate()
assert state3 == state
def test_producer_repr(self):
d = py.log.Producer("default")
assert repr(d).find('default') != -1
def test_produce_one_keyword(self):
l = []
py.log.setconsumer('s1', l.append)
py.log.Producer('s1')("hello world")
assert len(l) == 1
msg = l[0]
assert msg.content().startswith('hello world')
assert msg.prefix() == '[s1] '
assert str(msg) == "[s1] hello world"
def test_producer_class(self):
p = py.log.Producer('x1')
l = []
py.log.setconsumer(p._keywords, l.append)
p("hello")
assert len(l) == 1
assert len(l[0].keywords) == 1
assert 'x1' == l[0].keywords[0]
def test_producer_caching(self):
p = py.log.Producer('x1')
x2 = p.x2
assert x2 is p.x2
class TestLogConsumer:
def setup_method(self, meth):
default_keywordmapper.setstate(_oldstate)
def test_log_none(self):
log = py.log.Producer("XXX")
l = []
py.log.setconsumer('XXX', l.append)
log("1")
assert l
l[:] = []
py.log.setconsumer('XXX', None)
log("2")
assert not l
def test_log_default_stderr(self):
res, out, err = callcapture(py.log.Producer("default"), "hello")
assert err.strip() == "[default] hello"
def test_simple_consumer_match(self):
l = []
py.log.setconsumer("x1", l.append)
p = py.log.Producer("x1 x2")
p("hello")
assert l
assert l[0].content() == "hello"
def test_simple_consumer_match_2(self):
l = []
p = py.log.Producer("x1 x2")
py.log.setconsumer(p._keywords, l.append)
p("42")
assert l
assert l[0].content() == "42"
def test_no_auto_producer(self):
p = py.log.Producer('x')
py.test.raises(AttributeError, "p._x")
py.test.raises(AttributeError, "p.x_y")
def test_setconsumer_with_producer(self):
l = []
p = py.log.Producer("hello")
py.log.setconsumer(p, l.append)
p("world")
assert str(l[0]) == "[hello] world"
def test_multi_consumer(self):
l = []
py.log.setconsumer("x1", l.append)
py.log.setconsumer("x1 x2", None)
p = py.log.Producer("x1 x2")
p("hello")
assert not l
py.log.Producer("x1")("hello")
assert l
assert l[0].content() == "hello"
def test_log_stderr(self):
py.log.setconsumer("xyz", py.log.STDOUT)
res, out, err = callcapture(py.log.Producer("xyz"), "hello")
assert not err
assert out.strip() == '[xyz] hello'
def test_log_file(self, tmpdir):
customlog = tmpdir.join('log.out')
py.log.setconsumer("default", open(str(customlog), 'w', 1))
py.log.Producer("default")("hello world #1")
assert customlog.readlines() == ['[default] hello world #1\n']
py.log.setconsumer("default", py.log.Path(customlog, buffering=False))
py.log.Producer("default")("hello world #2")
res = customlog.readlines()
assert res == ['[default] hello world #2\n'] # no append by default!
def test_log_file_append_mode(self, tmpdir):
logfilefn = tmpdir.join('log_append.out')
# The append mode is on by default, so we don't need to specify it for File
py.log.setconsumer("default", py.log.Path(logfilefn, append=True,
buffering=0))
assert logfilefn.check()
py.log.Producer("default")("hello world #1")
lines = logfilefn.readlines()
assert lines == ['[default] hello world #1\n']
py.log.setconsumer("default", py.log.Path(logfilefn, append=True,
buffering=0))
py.log.Producer("default")("hello world #1")
lines = logfilefn.readlines()
assert lines == ['[default] hello world #1\n',
'[default] hello world #1\n']
def test_log_file_delayed_create(self, tmpdir):
logfilefn = tmpdir.join('log_create.out')
py.log.setconsumer("default", py.log.Path(logfilefn,
delayed_create=True, buffering=0))
assert not logfilefn.check()
py.log.Producer("default")("hello world #1")
lines = logfilefn.readlines()
assert lines == ['[default] hello world #1\n']
def test_keyword_based_log_files(self, tmpdir):
logfiles = []
keywords = 'k1 k2 k3'.split()
for key in keywords:
path = tmpdir.join(key)
py.log.setconsumer(key, py.log.Path(path, buffering=0))
py.log.Producer('k1')('1')
py.log.Producer('k2')('2')
py.log.Producer('k3')('3')
for key in keywords:
path = tmpdir.join(key)
assert path.read().strip() == '[%s] %s' % (key, key[-1])
# disabled for now; the syslog log file can usually be read only by root
# I manually inspected /var/log/messages and the entries were there
def no_test_log_syslog(self):
py.log.setconsumer("default", py.log.Syslog())
py.log.default("hello world #1")
# disabled for now until I figure out how to read entries in the
# Event Logs on Windows
# I manually inspected the Application Log and the entries were there
def no_test_log_winevent(self):
py.log.setconsumer("default", py.log.WinEvent())
py.log.default("hello world #1")
# disabled for now until I figure out how to properly pass the parameters
def no_test_log_email(self):
py.log.setconsumer("default", py.log.Email(mailhost="gheorghiu.net",
fromaddr="grig",
toaddrs="grig",
subject = "py.log email"))
py.log.default("hello world #1")

View file

@ -0,0 +1,76 @@
import pytest
import py
mypath = py.path.local(__file__).new(ext=".py")
@pytest.mark.xfail
def test_forwarding_to_warnings_module():
pytest.deprecated_call(py.log._apiwarn, "1.3", "..")
def test_apiwarn_functional(recwarn):
capture = py.io.StdCapture()
py.log._apiwarn("x.y.z", "something", stacklevel=1)
out, err = capture.reset()
py.builtin.print_("out", out)
py.builtin.print_("err", err)
assert err.find("x.y.z") != -1
lno = py.code.getrawcode(test_apiwarn_functional).co_firstlineno + 2
exp = "%s:%s" % (mypath, lno)
assert err.find(exp) != -1
def test_stacklevel(recwarn):
def f():
py.log._apiwarn("x", "some", stacklevel=2)
# 3
# 4
capture = py.io.StdCapture()
f()
out, err = capture.reset()
lno = py.code.getrawcode(test_stacklevel).co_firstlineno + 6
warning = str(err)
assert warning.find(":%s" % lno) != -1
def test_stacklevel_initpkg_with_resolve(testdir, recwarn):
testdir.makepyfile(modabc="""
import py
def f():
py.log._apiwarn("x", "some", stacklevel="apipkg123")
""")
testdir.makepyfile(apipkg123="""
def __getattr__():
import modabc
modabc.f()
""")
p = testdir.makepyfile("""
import apipkg123
apipkg123.__getattr__()
""")
capture = py.io.StdCapture()
p.pyimport()
out, err = capture.reset()
warning = str(err)
loc = 'test_stacklevel_initpkg_with_resolve.py:2'
assert warning.find(loc) != -1
def test_stacklevel_initpkg_no_resolve(recwarn):
def f():
py.log._apiwarn("x", "some", stacklevel="apipkg")
capture = py.io.StdCapture()
f()
out, err = capture.reset()
lno = py.code.getrawcode(test_stacklevel_initpkg_no_resolve).co_firstlineno + 2
warning = str(err)
assert warning.find(":%s" % lno) != -1
def test_function(recwarn):
capture = py.io.StdCapture()
py.log._apiwarn("x.y.z", "something", function=test_function)
out, err = capture.reset()
py.builtin.print_("out", out)
py.builtin.print_("err", err)
assert err.find("x.y.z") != -1
lno = py.code.getrawcode(test_function).co_firstlineno
exp = "%s:%s" % (mypath, lno)
assert err.find(exp) != -1

View file

@ -0,0 +1,470 @@
import py
import sys
class CommonFSTests(object):
def test_constructor_equality(self, path1):
p = path1.__class__(path1)
assert p == path1
def test_eq_nonstring(self, path1):
p1 = path1.join('sampledir')
p2 = path1.join('sampledir')
assert p1 == p2
def test_new_identical(self, path1):
assert path1 == path1.new()
def test_join(self, path1):
p = path1.join('sampledir')
strp = str(p)
assert strp.endswith('sampledir')
assert strp.startswith(str(path1))
def test_join_normalized(self, path1):
newpath = path1.join(path1.sep+'sampledir')
strp = str(newpath)
assert strp.endswith('sampledir')
assert strp.startswith(str(path1))
newpath = path1.join((path1.sep*2) + 'sampledir')
strp = str(newpath)
assert strp.endswith('sampledir')
assert strp.startswith(str(path1))
def test_join_noargs(self, path1):
newpath = path1.join()
assert path1 == newpath
def test_add_something(self, path1):
p = path1.join('sample')
p = p + 'dir'
assert p.check()
assert p.exists()
assert p.isdir()
assert not p.isfile()
def test_parts(self, path1):
newpath = path1.join('sampledir', 'otherfile')
par = newpath.parts()[-3:]
assert par == [path1, path1.join('sampledir'), newpath]
revpar = newpath.parts(reverse=True)[:3]
assert revpar == [newpath, path1.join('sampledir'), path1]
def test_common(self, path1):
other = path1.join('sampledir')
x = other.common(path1)
assert x == path1
#def test_parents_nonexisting_file(self, path1):
# newpath = path1 / 'dirnoexist' / 'nonexisting file'
# par = list(newpath.parents())
# assert par[:2] == [path1 / 'dirnoexist', path1]
def test_basename_checks(self, path1):
newpath = path1.join('sampledir')
assert newpath.check(basename='sampledir')
assert newpath.check(notbasename='xyz')
assert newpath.basename == 'sampledir'
def test_basename(self, path1):
newpath = path1.join('sampledir')
assert newpath.check(basename='sampledir')
assert newpath.basename, 'sampledir'
def test_dirname(self, path1):
newpath = path1.join('sampledir')
assert newpath.dirname == str(path1)
def test_dirpath(self, path1):
newpath = path1.join('sampledir')
assert newpath.dirpath() == path1
def test_dirpath_with_args(self, path1):
newpath = path1.join('sampledir')
assert newpath.dirpath('x') == path1.join('x')
def test_newbasename(self, path1):
newpath = path1.join('samplefile')
newbase = newpath.new(basename="samplefile2")
assert newbase.basename == "samplefile2"
assert newbase.dirpath() == newpath.dirpath()
def test_not_exists(self, path1):
assert not path1.join('does_not_exist').check()
assert path1.join('does_not_exist').check(exists=0)
def test_exists(self, path1):
assert path1.join("samplefile").check()
assert path1.join("samplefile").check(exists=1)
assert path1.join("samplefile").exists()
assert path1.join("samplefile").isfile()
assert not path1.join("samplefile").isdir()
def test_dir(self, path1):
#print repr(path1.join("sampledir"))
assert path1.join("sampledir").check(dir=1)
assert path1.join('samplefile').check(notdir=1)
assert not path1.join("samplefile").check(dir=1)
assert path1.join("samplefile").exists()
assert not path1.join("samplefile").isdir()
assert path1.join("samplefile").isfile()
def test_fnmatch_file(self, path1):
assert path1.join("samplefile").check(fnmatch='s*e')
assert path1.join("samplefile").fnmatch('s*e')
assert not path1.join("samplefile").fnmatch('s*x')
assert not path1.join("samplefile").check(fnmatch='s*x')
#def test_fnmatch_dir(self, path1):
# pattern = path1.sep.join(['s*file'])
# sfile = path1.join("samplefile")
# assert sfile.check(fnmatch=pattern)
def test_relto(self, path1):
l=path1.join("sampledir", "otherfile")
assert l.relto(path1) == l.sep.join(["sampledir", "otherfile"])
assert l.check(relto=path1)
assert path1.check(notrelto=l)
assert not path1.check(relto=l)
def test_bestrelpath(self, path1):
curdir = path1
sep = curdir.sep
s = curdir.bestrelpath(curdir)
assert s == "."
s = curdir.bestrelpath(curdir.join("hello", "world"))
assert s == "hello" + sep + "world"
s = curdir.bestrelpath(curdir.dirpath().join("sister"))
assert s == ".." + sep + "sister"
assert curdir.bestrelpath(curdir.dirpath()) == ".."
assert curdir.bestrelpath("hello") == "hello"
def test_relto_not_relative(self, path1):
l1=path1.join("bcde")
l2=path1.join("b")
assert not l1.relto(l2)
assert not l2.relto(l1)
@py.test.mark.xfail("sys.platform.startswith('java')")
def test_listdir(self, path1):
l = path1.listdir()
assert path1.join('sampledir') in l
assert path1.join('samplefile') in l
py.test.raises(py.error.ENOTDIR,
"path1.join('samplefile').listdir()")
def test_listdir_fnmatchstring(self, path1):
l = path1.listdir('s*dir')
assert len(l)
assert l[0], path1.join('sampledir')
def test_listdir_filter(self, path1):
l = path1.listdir(lambda x: x.check(dir=1))
assert path1.join('sampledir') in l
assert not path1.join('samplefile') in l
def test_listdir_sorted(self, path1):
l = path1.listdir(lambda x: x.check(basestarts="sample"), sort=True)
assert path1.join('sampledir') == l[0]
assert path1.join('samplefile') == l[1]
assert path1.join('samplepickle') == l[2]
def test_visit_nofilter(self, path1):
l = []
for i in path1.visit():
l.append(i.relto(path1))
assert "sampledir" in l
assert path1.sep.join(["sampledir", "otherfile"]) in l
def test_visit_norecurse(self, path1):
l = []
for i in path1.visit(None, lambda x: x.basename != "sampledir"):
l.append(i.relto(path1))
assert "sampledir" in l
assert not path1.sep.join(["sampledir", "otherfile"]) in l
def test_visit_filterfunc_is_string(self, path1):
l = []
for i in path1.visit('*dir'):
l.append(i.relto(path1))
assert len(l), 2
assert "sampledir" in l
assert "otherdir" in l
@py.test.mark.xfail("sys.platform.startswith('java')")
def test_visit_ignore(self, path1):
p = path1.join('nonexisting')
assert list(p.visit(ignore=py.error.ENOENT)) == []
def test_visit_endswith(self, path1):
l = []
for i in path1.visit(lambda x: x.check(endswith="file")):
l.append(i.relto(path1))
assert path1.sep.join(["sampledir", "otherfile"]) in l
assert "samplefile" in l
def test_endswith(self, path1):
assert path1.check(notendswith='.py')
x = path1.join('samplefile')
assert x.check(endswith='file')
def test_cmp(self, path1):
path1 = path1.join('samplefile')
path2 = path1.join('samplefile2')
assert (path1 < path2) == ('samplefile' < 'samplefile2')
assert not (path1 < path1)
def test_simple_read(self, path1):
x = path1.join('samplefile').read('r')
assert x == 'samplefile\n'
def test_join_div_operator(self, path1):
newpath = path1 / '/sampledir' / '/test//'
newpath2 = path1.join('sampledir', 'test')
assert newpath == newpath2
def test_ext(self, path1):
newpath = path1.join('sampledir.ext')
assert newpath.ext == '.ext'
newpath = path1.join('sampledir')
assert not newpath.ext
def test_purebasename(self, path1):
newpath = path1.join('samplefile.py')
assert newpath.purebasename == 'samplefile'
def test_multiple_parts(self, path1):
newpath = path1.join('samplefile.py')
dirname, purebasename, basename, ext = newpath._getbyspec(
'dirname,purebasename,basename,ext')
assert str(path1).endswith(dirname) # be careful with win32 'drive'
assert purebasename == 'samplefile'
assert basename == 'samplefile.py'
assert ext == '.py'
def test_dotted_name_ext(self, path1):
newpath = path1.join('a.b.c')
ext = newpath.ext
assert ext == '.c'
assert newpath.ext == '.c'
def test_newext(self, path1):
newpath = path1.join('samplefile.py')
newext = newpath.new(ext='.txt')
assert newext.basename == "samplefile.txt"
assert newext.purebasename == "samplefile"
def test_readlines(self, path1):
fn = path1.join('samplefile')
contents = fn.readlines()
assert contents == ['samplefile\n']
def test_readlines_nocr(self, path1):
fn = path1.join('samplefile')
contents = fn.readlines(cr=0)
assert contents == ['samplefile', '']
def test_file(self, path1):
assert path1.join('samplefile').check(file=1)
def test_not_file(self, path1):
assert not path1.join("sampledir").check(file=1)
assert path1.join("sampledir").check(file=0)
def test_non_existent(self, path1):
assert path1.join("sampledir.nothere").check(dir=0)
assert path1.join("sampledir.nothere").check(file=0)
assert path1.join("sampledir.nothere").check(notfile=1)
assert path1.join("sampledir.nothere").check(notdir=1)
assert path1.join("sampledir.nothere").check(notexists=1)
assert not path1.join("sampledir.nothere").check(notfile=0)
# pattern = path1.sep.join(['s*file'])
# sfile = path1.join("samplefile")
# assert sfile.check(fnmatch=pattern)
def test_size(self, path1):
url = path1.join("samplefile")
assert url.size() > len("samplefile")
def test_mtime(self, path1):
url = path1.join("samplefile")
assert url.mtime() > 0
def test_relto_wrong_type(self, path1):
py.test.raises(TypeError, "path1.relto(42)")
def test_load(self, path1):
p = path1.join('samplepickle')
obj = p.load()
assert type(obj) is dict
assert obj.get('answer',None) == 42
def test_visit_filesonly(self, path1):
l = []
for i in path1.visit(lambda x: x.check(file=1)):
l.append(i.relto(path1))
assert not "sampledir" in l
assert path1.sep.join(["sampledir", "otherfile"]) in l
def test_visit_nodotfiles(self, path1):
l = []
for i in path1.visit(lambda x: x.check(dotfile=0)):
l.append(i.relto(path1))
assert "sampledir" in l
assert path1.sep.join(["sampledir", "otherfile"]) in l
assert not ".dotfile" in l
def test_visit_breadthfirst(self, path1):
l = []
for i in path1.visit(bf=True):
l.append(i.relto(path1))
for i, p in enumerate(l):
if path1.sep in p:
for j in range(i, len(l)):
assert path1.sep in l[j]
break
else:
py.test.fail("huh")
def test_visit_sort(self, path1):
l = []
for i in path1.visit(bf=True, sort=True):
l.append(i.relto(path1))
for i, p in enumerate(l):
if path1.sep in p:
break
assert l[:i] == sorted(l[:i])
assert l[i:] == sorted(l[i:])
def test_endswith(self, path1):
def chk(p):
return p.check(endswith="pickle")
assert not chk(path1)
assert not chk(path1.join('samplefile'))
assert chk(path1.join('somepickle'))
def test_copy_file(self, path1):
otherdir = path1.join('otherdir')
initpy = otherdir.join('__init__.py')
copied = otherdir.join('copied')
initpy.copy(copied)
try:
assert copied.check()
s1 = initpy.read()
s2 = copied.read()
assert s1 == s2
finally:
if copied.check():
copied.remove()
def test_copy_dir(self, path1):
otherdir = path1.join('otherdir')
copied = path1.join('newdir')
try:
otherdir.copy(copied)
assert copied.check(dir=1)
assert copied.join('__init__.py').check(file=1)
s1 = otherdir.join('__init__.py').read()
s2 = copied.join('__init__.py').read()
assert s1 == s2
finally:
if copied.check(dir=1):
copied.remove(rec=1)
def test_remove_file(self, path1):
d = path1.ensure('todeleted')
assert d.check()
d.remove()
assert not d.check()
def test_remove_dir_recursive_by_default(self, path1):
d = path1.ensure('to', 'be', 'deleted')
assert d.check()
p = path1.join('to')
p.remove()
assert not p.check()
def test_ensure_dir(self, path1):
b = path1.ensure_dir("001", "002")
assert b.basename == "002"
assert b.isdir()
def test_mkdir_and_remove(self, path1):
tmpdir = path1
py.test.raises(py.error.EEXIST, tmpdir.mkdir, 'sampledir')
new = tmpdir.join('mktest1')
new.mkdir()
assert new.check(dir=1)
new.remove()
new = tmpdir.mkdir('mktest')
assert new.check(dir=1)
new.remove()
assert tmpdir.join('mktest') == new
def test_move_file(self, path1):
p = path1.join('samplefile')
newp = p.dirpath('moved_samplefile')
p.move(newp)
try:
assert newp.check(file=1)
assert not p.check()
finally:
dp = newp.dirpath()
if hasattr(dp, 'revert'):
dp.revert()
else:
newp.move(p)
assert p.check()
def test_move_dir(self, path1):
source = path1.join('sampledir')
dest = path1.join('moveddir')
source.move(dest)
assert dest.check(dir=1)
assert dest.join('otherfile').check(file=1)
assert not source.join('sampledir').check()
def setuptestfs(path):
if path.join('samplefile').check():
return
#print "setting up test fs for", repr(path)
samplefile = path.ensure('samplefile')
samplefile.write('samplefile\n')
execfile = path.ensure('execfile')
execfile.write('x=42')
execfilepy = path.ensure('execfile.py')
execfilepy.write('x=42')
d = {1:2, 'hello': 'world', 'answer': 42}
path.ensure('samplepickle').dump(d)
sampledir = path.ensure('sampledir', dir=1)
sampledir.ensure('otherfile')
otherdir = path.ensure('otherdir', dir=1)
otherdir.ensure('__init__.py')
module_a = otherdir.ensure('a.py')
if sys.version_info >= (2,6):
module_a.write('from .b import stuff as result\n')
else:
module_a.write('from b import stuff as result\n')
module_b = otherdir.ensure('b.py')
module_b.write('stuff="got it"\n')
module_c = otherdir.ensure('c.py')
module_c.write('''import py;
import otherdir.a
value = otherdir.a.result
''')
module_d = otherdir.ensure('d.py')
module_d.write('''import py;
from otherdir import a
value2 = a.result
''')

View file

@ -0,0 +1,80 @@
import py
import sys
from py._path import svnwc as svncommon
svnbin = py.path.local.sysfind('svn')
repodump = py.path.local(__file__).dirpath('repotest.dump')
from py.builtin import print_
def pytest_funcarg__repowc1(request):
if svnbin is None:
py.test.skip("svn binary not found")
tmpdir = request.getfuncargvalue("tmpdir")
repo, repourl, wc = request.cached_setup(
setup=lambda: getrepowc(tmpdir, "path1repo", "path1wc"),
scope="module",
)
for x in ('test_remove', 'test_move', 'test_status_deleted'):
if request.function.__name__.startswith(x):
#print >>sys.stderr, ("saving repo", repo, "for", request.function)
_savedrepowc = save_repowc(repo, wc)
request.addfinalizer(lambda: restore_repowc(_savedrepowc))
return repo, repourl, wc
def pytest_funcarg__repowc2(request):
tmpdir = request.getfuncargvalue("tmpdir")
name = request.function.__name__
repo, url, wc = getrepowc(tmpdir, "%s-repo-2" % name, "%s-wc-2" % name)
return repo, url, wc
def getsvnbin():
if svnbin is None:
py.test.skip("svn binary not found")
return svnbin
# make a wc directory out of a given root url
# cache previously obtained wcs!
#
def getrepowc(tmpdir, reponame='basetestrepo', wcname='wc'):
repo = tmpdir.mkdir(reponame)
wcdir = tmpdir.mkdir(wcname)
repo.ensure(dir=1)
py.process.cmdexec('svnadmin create "%s"' %
svncommon._escape_helper(repo))
py.process.cmdexec('svnadmin load -q "%s" <"%s"' %
(svncommon._escape_helper(repo), repodump))
print_("created svn repository", repo)
wcdir.ensure(dir=1)
wc = py.path.svnwc(wcdir)
if py.std.sys.platform == 'win32':
repourl = "file://" + '/' + str(repo).replace('\\', '/')
else:
repourl = "file://%s" % repo
wc.checkout(repourl)
print_("checked out new repo into", wc)
return (repo, repourl, wc)
def save_repowc(repo, wc):
assert not str(repo).startswith("file://"), repo
assert repo.check()
savedrepo = repo.dirpath(repo.basename+".1")
savedwc = wc.dirpath(wc.basename+".1")
repo.copy(savedrepo)
wc.localpath.copy(savedwc.localpath)
return savedrepo, savedwc
def restore_repowc(obj):
savedrepo, savedwc = obj
#print >>sys.stderr, ("restoring", savedrepo)
repo = savedrepo.new(basename=savedrepo.basename[:-2])
assert repo.check()
wc = savedwc.new(basename=savedwc.basename[:-2])
assert wc.check()
wc.localpath.remove()
repo.remove()
savedrepo.move(repo)
savedwc.localpath.move(wc.localpath)
py.path.svnurl._lsnorevcache.clear()
py.path.svnurl._lsrevcache.clear()

View file

@ -0,0 +1,228 @@
SVN-fs-dump-format-version: 2
UUID: 876a30f4-1eed-0310-aeb7-ae314d1e5934
Revision-number: 0
Prop-content-length: 56
Content-length: 56
K 8
svn:date
V 27
2005-01-07T23:55:31.755989Z
PROPS-END
Revision-number: 1
Prop-content-length: 118
Content-length: 118
K 7
svn:log
V 20
testrepo setup rev 1
K 10
svn:author
V 3
hpk
K 8
svn:date
V 27
2005-01-07T23:55:37.815386Z
PROPS-END
Node-path: execfile
Node-kind: file
Node-action: add
Prop-content-length: 10
Text-content-length: 4
Text-content-md5: d4b5bc61e16310f08c5d11866eba0a22
Content-length: 14
PROPS-END
x=42
Node-path: otherdir
Node-kind: dir
Node-action: add
Prop-content-length: 10
Content-length: 10
PROPS-END
Node-path: otherdir/__init__.py
Node-kind: file
Node-action: add
Prop-content-length: 10
Text-content-length: 0
Text-content-md5: d41d8cd98f00b204e9800998ecf8427e
Content-length: 10
PROPS-END
Node-path: otherdir/a.py
Node-kind: file
Node-action: add
Prop-content-length: 10
Text-content-length: 30
Text-content-md5: 247c7daeb2ee5dcab0aba7bd12bad665
Content-length: 40
PROPS-END
from b import stuff as result
Node-path: otherdir/b.py
Node-kind: file
Node-action: add
Prop-content-length: 10
Text-content-length: 15
Text-content-md5: c1b13503469a7711306d03a4b0721bc6
Content-length: 25
PROPS-END
stuff="got it"
Node-path: otherdir/c.py
Node-kind: file
Node-action: add
Prop-content-length: 10
Text-content-length: 75
Text-content-md5: 250cdb6b5df68536152c681f48297569
Content-length: 85
PROPS-END
import py; py.magic.autopath()
import otherdir.a
value = otherdir.a.result
Node-path: otherdir/d.py
Node-kind: file
Node-action: add
Prop-content-length: 10
Text-content-length: 72
Text-content-md5: 940c9c621e7b198e081459642c37f5a7
Content-length: 82
PROPS-END
import py; py.magic.autopath()
from otherdir import a
value2 = a.result
Node-path: sampledir
Node-kind: dir
Node-action: add
Prop-content-length: 10
Content-length: 10
PROPS-END
Node-path: sampledir/otherfile
Node-kind: file
Node-action: add
Prop-content-length: 10
Text-content-length: 0
Text-content-md5: d41d8cd98f00b204e9800998ecf8427e
Content-length: 10
PROPS-END
Node-path: samplefile
Node-kind: file
Node-action: add
Prop-content-length: 40
Text-content-length: 11
Text-content-md5: 9225ac28b32156979ab6482b8bb5fb8c
Content-length: 51
K 13
svn:eol-style
V 6
native
PROPS-END
samplefile
Node-path: samplepickle
Node-kind: file
Node-action: add
Prop-content-length: 10
Text-content-length: 56
Text-content-md5: 719d85c1329a33134bb98f56b756c545
Content-length: 66
PROPS-END
(dp1
S'answer'
p2
I42
sI1
I2
sS'hello'
p3
S'world'
p4
s.
Revision-number: 2
Prop-content-length: 108
Content-length: 108
K 7
svn:log
V 10
second rev
K 10
svn:author
V 3
hpk
K 8
svn:date
V 27
2005-01-07T23:55:39.223202Z
PROPS-END
Node-path: anotherfile
Node-kind: file
Node-action: add
Prop-content-length: 10
Text-content-length: 5
Text-content-md5: 5d41402abc4b2a76b9719d911017c592
Content-length: 15
PROPS-END
hello
Revision-number: 3
Prop-content-length: 106
Content-length: 106
K 7
svn:log
V 9
third rev
K 10
svn:author
V 3
hpk
K 8
svn:date
V 27
2005-01-07T23:55:41.556642Z
PROPS-END
Node-path: anotherfile
Node-kind: file
Node-action: change
Text-content-length: 5
Text-content-md5: 7d793037a0760186574b0282f2f435e7
Content-length: 5
world

View file

@ -0,0 +1,31 @@
import sys
import py
from py._path import svnwc as svncommon
from common import CommonFSTests
class CommonSvnTests(CommonFSTests):
def test_propget(self, path1):
url = path1.join("samplefile")
value = url.propget('svn:eol-style')
assert value == 'native'
def test_proplist(self, path1):
url = path1.join("samplefile")
res = url.proplist()
assert res['svn:eol-style'] == 'native'
def test_info(self, path1):
url = path1.join("samplefile")
res = url.info()
assert res.size > len("samplefile") and res.created_rev >= 0
def test_log_simple(self, path1):
url = path1.join("samplefile")
logentries = url.log()
for logentry in logentries:
assert logentry.rev == 1
assert hasattr(logentry, 'author')
assert hasattr(logentry, 'date')
#cache.repositories.put(svnrepourl, 1200, 0)

View file

@ -0,0 +1,84 @@
import py
from py._path import cacheutil
class BasicCacheAPITest:
cache = None
def test_getorbuild(self):
val = self.cache.getorbuild(-42, lambda: 42)
assert val == 42
val = self.cache.getorbuild(-42, lambda: 23)
assert val == 42
def test_cache_get_key_error(self):
py.test.raises(KeyError, "self.cache._getentry(-23)")
def test_delentry_non_raising(self):
val = self.cache.getorbuild(100, lambda: 100)
self.cache.delentry(100)
py.test.raises(KeyError, "self.cache._getentry(100)")
def test_delentry_raising(self):
val = self.cache.getorbuild(100, lambda: 100)
self.cache.delentry(100)
py.test.raises(KeyError, "self.cache.delentry(100, raising=True)")
def test_clear(self):
self.cache.clear()
class TestBuildcostAccess(BasicCacheAPITest):
cache = cacheutil.BuildcostAccessCache(maxentries=128)
def test_cache_works_somewhat_simple(self, monkeypatch):
cache = cacheutil.BuildcostAccessCache()
# the default gettime
# BuildcostAccessCache.build can
# result into time()-time() == 0 which makes the below
# test fail randomly. Let's rather use incrementing
# numbers instead.
l = [0]
def counter():
l[0] = l[0] + 1
return l[0]
monkeypatch.setattr(cacheutil, 'gettime', counter)
for x in range(cache.maxentries):
y = cache.getorbuild(x, lambda: x)
assert x == y
for x in range(cache.maxentries):
assert cache.getorbuild(x, None) == x
halfentries = int(cache.maxentries / 2)
for x in range(halfentries):
assert cache.getorbuild(x, None) == x
assert cache.getorbuild(x, None) == x
# evict one entry
val = cache.getorbuild(-1, lambda: 42)
assert val == 42
# check that recently used ones are still there
# and are not build again
for x in range(halfentries):
assert cache.getorbuild(x, None) == x
assert cache.getorbuild(-1, None) == 42
class TestAging(BasicCacheAPITest):
maxsecs = 0.10
cache = cacheutil.AgingCache(maxentries=128, maxseconds=maxsecs)
def test_cache_eviction(self):
self.cache.getorbuild(17, lambda: 17)
endtime = py.std.time.time() + self.maxsecs * 10
while py.std.time.time() < endtime:
try:
self.cache._getentry(17)
except KeyError:
break
py.std.time.sleep(self.maxsecs*0.3)
else:
py.test.fail("waiting for cache eviction failed")
def test_prune_lowestweight():
maxsecs = 0.05
cache = cacheutil.AgingCache(maxentries=10, maxseconds=maxsecs)
for x in range(cache.maxentries):
cache.getorbuild(x, lambda: x)
py.std.time.sleep(maxsecs*1.1)
cache.getorbuild(cache.maxentries+1, lambda: 42)

Some files were not shown because too many files have changed in this diff Show more