Update: remove venv

Signed-off-by: Nicolas Lamirault <nicolas.lamirault@gmail.com>
pull/342/head
Nicolas Lamirault 5 years ago
parent 5100e2fc42
commit 459b0258f2
No known key found for this signature in database
GPG Key ID: 65D6BB0526B7051F

@ -1,230 +0,0 @@
<#
.Synopsis
Activate a Python virtual environment for the current Powershell session.
.Description
Pushes the python executable for a virtual environment to the front of the
$Env:PATH environment variable and sets the prompt to signify that you are
in a Python virtual environment. Makes use of the command line switches as
well as the `pyvenv.cfg` file values present in the virtual environment.
.Parameter VenvDir
Path to the directory that contains the virtual environment to activate. The
default value for this is the parent of the directory that the Activate.ps1
script is located within.
.Parameter Prompt
The prompt prefix to display when this virtual environment is activated. By
default, this prompt is the name of the virtual environment folder (VenvDir)
surrounded by parentheses and followed by a single space (ie. '(.venv) ').
.Example
Activate.ps1
Activates the Python virtual environment that contains the Activate.ps1 script.
.Example
Activate.ps1 -Verbose
Activates the Python virtual environment that contains the Activate.ps1 script,
and shows extra information about the activation as it executes.
.Example
Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv
Activates the Python virtual environment located in the specified location.
.Example
Activate.ps1 -Prompt "MyPython"
Activates the Python virtual environment that contains the Activate.ps1 script,
and prefixes the current prompt with the specified string (surrounded in
parentheses) while the virtual environment is active.
#>
Param(
[Parameter(Mandatory = $false)]
[String]
$VenvDir,
[Parameter(Mandatory = $false)]
[String]
$Prompt
)
<# Function declarations --------------------------------------------------- #>
<#
.Synopsis
Remove all shell session elements added by the Activate script, including the
addition of the virtual environment's Python executable from the beginning of
the PATH variable.
.Parameter NonDestructive
If present, do not remove this function from the global namespace for the
session.
#>
function global:deactivate ([switch]$NonDestructive) {
# Revert to original values
# The prior prompt:
if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) {
Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt
Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT
}
# The prior PYTHONHOME:
if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) {
Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME
Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME
}
# The prior PATH:
if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) {
Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH
Remove-Item -Path Env:_OLD_VIRTUAL_PATH
}
# Just remove the VIRTUAL_ENV altogether:
if (Test-Path -Path Env:VIRTUAL_ENV) {
Remove-Item -Path env:VIRTUAL_ENV
}
# Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether:
if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) {
Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force
}
# Leave deactivate function in the global namespace if requested:
if (-not $NonDestructive) {
Remove-Item -Path function:deactivate
}
}
<#
.Description
Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the
given folder, and returns them in a map.
For each line in the pyvenv.cfg file, if that line can be parsed into exactly
two strings separated by `=` (with any amount of whitespace surrounding the =)
then it is considered a `key = value` line. The left hand string is the key,
the right hand is the value.
If the value starts with a `'` or a `"` then the first and last character is
stripped from the value before being captured.
.Parameter ConfigDir
Path to the directory that contains the `pyvenv.cfg` file.
#>
function Get-PyVenvConfig(
[String]
$ConfigDir
) {
Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg"
# Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue).
$pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue
# An empty map will be returned if no config file is found.
$pyvenvConfig = @{ }
if ($pyvenvConfigPath) {
Write-Verbose "File exists, parse `key = value` lines"
$pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath
$pyvenvConfigContent | ForEach-Object {
$keyval = $PSItem -split "\s*=\s*", 2
if ($keyval[0] -and $keyval[1]) {
$val = $keyval[1]
# Remove extraneous quotations around a string value.
if ("'""".Contains($val.Substring(0,1))) {
$val = $val.Substring(1, $val.Length - 2)
}
$pyvenvConfig[$keyval[0]] = $val
Write-Verbose "Adding Key: '$($keyval[0])'='$val'"
}
}
}
return $pyvenvConfig
}
<# Begin Activate script --------------------------------------------------- #>
# Determine the containing directory of this script
$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition
$VenvExecDir = Get-Item -Path $VenvExecPath
Write-Verbose "Activation script is located in path: '$VenvExecPath'"
Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)"
Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)"
# Set values required in priority: CmdLine, ConfigFile, Default
# First, get the location of the virtual environment, it might not be
# VenvExecDir if specified on the command line.
if ($VenvDir) {
Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values"
} else {
Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir."
$VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/")
Write-Verbose "VenvDir=$VenvDir"
}
# Next, read the `pyvenv.cfg` file to determine any required value such
# as `prompt`.
$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir
# Next, set the prompt from the command line, or the config file, or
# just use the name of the virtual environment folder.
if ($Prompt) {
Write-Verbose "Prompt specified as argument, using '$Prompt'"
} else {
Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value"
if ($pyvenvCfg -and $pyvenvCfg['prompt']) {
Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'"
$Prompt = $pyvenvCfg['prompt'];
}
else {
Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)"
Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'"
$Prompt = Split-Path -Path $venvDir -Leaf
}
}
Write-Verbose "Prompt = '$Prompt'"
Write-Verbose "VenvDir='$VenvDir'"
# Deactivate any currently active virtual environment, but leave the
# deactivate function in place.
deactivate -nondestructive
# Now set the environment variable VIRTUAL_ENV, used by many tools to determine
# that there is an activated venv.
$env:VIRTUAL_ENV = $VenvDir
if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) {
Write-Verbose "Setting prompt to '$Prompt'"
# Set the prompt to include the env name
# Make sure _OLD_VIRTUAL_PROMPT is global
function global:_OLD_VIRTUAL_PROMPT { "" }
Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT
New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt
function global:prompt {
Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) "
_OLD_VIRTUAL_PROMPT
}
}
# Clear PYTHONHOME
if (Test-Path -Path Env:PYTHONHOME) {
Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME
Remove-Item -Path Env:PYTHONHOME
}
# Add the venv to the PATH
Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH
$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH"

@ -1,76 +0,0 @@
# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then
PATH="${_OLD_VIRTUAL_PATH:-}"
export PATH
unset _OLD_VIRTUAL_PATH
fi
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}"
export PYTHONHOME
unset _OLD_VIRTUAL_PYTHONHOME
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r
fi
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then
PS1="${_OLD_VIRTUAL_PS1:-}"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
if [ ! "${1:-}" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelevant variables
deactivate nondestructive
VIRTUAL_ENV="/home/nicolas/Projects/diagrams/venv"
export VIRTUAL_ENV
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
# unset PYTHONHOME if set
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway)
# could use `if (set -u; : $PYTHONHOME) ;` in bash
if [ -n "${PYTHONHOME:-}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}"
unset PYTHONHOME
fi
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then
_OLD_VIRTUAL_PS1="${PS1:-}"
if [ "x(venv) " != x ] ; then
PS1="(venv) ${PS1:-}"
else
if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then
# special case for Aspen magic directories
# see http://www.zetadev.com/software/aspen/
PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1"
else
PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1"
fi
fi
export PS1
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then
hash -r
fi

@ -1,37 +0,0 @@
# This file must be used with "source bin/activate.csh" *from csh*.
# You cannot run it directly.
# Created by Davide Di Blasi <davidedb@gmail.com>.
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com>
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate'
# Unset irrelevant variables.
deactivate nondestructive
setenv VIRTUAL_ENV "/home/nicolas/Projects/diagrams/venv"
set _OLD_VIRTUAL_PATH="$PATH"
setenv PATH "$VIRTUAL_ENV/bin:$PATH"
set _OLD_VIRTUAL_PROMPT="$prompt"
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then
if ("venv" != "") then
set env_name = "venv"
else
if (`basename "VIRTUAL_ENV"` == "__") then
# special case for Aspen magic directories
# see http://www.zetadev.com/software/aspen/
set env_name = `basename \`dirname "$VIRTUAL_ENV"\``
else
set env_name = `basename "$VIRTUAL_ENV"`
endif
endif
set prompt = "[$env_name] $prompt"
unset env_name
endif
alias pydoc python -m pydoc
rehash

@ -1,75 +0,0 @@
# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org)
# you cannot run it directly
function deactivate -d "Exit virtualenv and return to normal shell environment"
# reset old environment variables
if test -n "$_OLD_VIRTUAL_PATH"
set -gx PATH $_OLD_VIRTUAL_PATH
set -e _OLD_VIRTUAL_PATH
end
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME
set -e _OLD_VIRTUAL_PYTHONHOME
end
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
functions -e fish_prompt
set -e _OLD_FISH_PROMPT_OVERRIDE
functions -c _old_fish_prompt fish_prompt
functions -e _old_fish_prompt
end
set -e VIRTUAL_ENV
if test "$argv[1]" != "nondestructive"
# Self destruct!
functions -e deactivate
end
end
# unset irrelevant variables
deactivate nondestructive
set -gx VIRTUAL_ENV "/home/nicolas/Projects/diagrams/venv"
set -gx _OLD_VIRTUAL_PATH $PATH
set -gx PATH "$VIRTUAL_ENV/bin" $PATH
# unset PYTHONHOME if set
if set -q PYTHONHOME
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
set -e PYTHONHOME
end
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
# fish uses a function instead of an env var to generate the prompt.
# save the current fish_prompt function as the function _old_fish_prompt
functions -c fish_prompt _old_fish_prompt
# with the original prompt function renamed, we can override with our own.
function fish_prompt
# Save the return status of the last command
set -l old_status $status
# Prompt override?
if test -n "(venv) "
printf "%s%s" "(venv) " (set_color normal)
else
# ...Otherwise, prepend env
set -l _checkbase (basename "$VIRTUAL_ENV")
if test $_checkbase = "__"
# special case for Aspen magic directories
# see http://www.zetadev.com/software/aspen/
printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal)
else
printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal)
end
end
# Restore the return status of the previous command.
echo "exit $old_status" | .
_old_fish_prompt
end
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
end

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from black import patched_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(patched_main())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from blackd import patched_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(patched_main())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_epylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_epylint())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from isort.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pytest import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pylint
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pylint())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pytest import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())

@ -1 +0,0 @@
python3

@ -1 +0,0 @@
/usr/bin/python3

@ -1,10 +0,0 @@
#!/home/nicolas/Projects/diagrams/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())

@ -1,39 +0,0 @@
Copyright © 2014 by the Pallets team.
Some rights reserved.
Redistribution and use in source and binary forms of the software as
well as documentation, with or without modification, are permitted
provided that the following conditions are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
- Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
----
Click uses parts of optparse written by Gregory P. Ward and maintained
by the Python Software Foundation. This is limited to code in parser.py.
Copyright © 2001-2006 Gregory P. Ward. All rights reserved.
Copyright © 2002-2006 Python Software Foundation. All rights reserved.

@ -1,121 +0,0 @@
Metadata-Version: 2.1
Name: Click
Version: 7.0
Summary: Composable command line interface toolkit
Home-page: https://palletsprojects.com/p/click/
Author: Armin Ronacher
Author-email: armin.ronacher@active-4.com
Maintainer: Pallets Team
Maintainer-email: contact@palletsprojects.com
License: BSD
Project-URL: Documentation, https://click.palletsprojects.com/
Project-URL: Code, https://github.com/pallets/click
Project-URL: Issue tracker, https://github.com/pallets/click/issues
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
\$ click\_
==========
Click is a Python package for creating beautiful command line interfaces
in a composable way with as little code as necessary. It's the "Command
Line Interface Creation Kit". It's highly configurable but comes with
sensible defaults out of the box.
It aims to make the process of writing command line tools quick and fun
while also preventing any frustration caused by the inability to
implement an intended CLI API.
Click in three points:
- Arbitrary nesting of commands
- Automatic help page generation
- Supports lazy loading of subcommands at runtime
Installing
----------
Install and update using `pip`_:
.. code-block:: text
$ pip install click
Click supports Python 3.4 and newer, Python 2.7, and PyPy.
.. _pip: https://pip.pypa.io/en/stable/quickstart/
A Simple Example
----------------
What does it look like? Here is an example of a simple Click program:
.. code-block:: python
import click
@click.command()
@click.option("--count", default=1, help="Number of greetings.")
@click.option("--name", prompt="Your name",
help="The person to greet.")
def hello(count, name):
"""Simple program that greets NAME for a total of COUNT times."""
for _ in range(count):
click.echo("Hello, %s!" % name)
if __name__ == '__main__':
hello()
And what it looks like when run:
.. code-block:: text
$ python hello.py --count=3
Your name: Click
Hello, Click!
Hello, Click!
Hello, Click!
Donate
------
The Pallets organization develops and supports Click and other popular
packages. In order to grow the community of contributors and users, and
allow the maintainers to devote more time to the projects, `please
donate today`_.
.. _please donate today: https://palletsprojects.com/donate
Links
-----
* Website: https://palletsprojects.com/p/click/
* Documentation: https://click.palletsprojects.com/
* License: `BSD <https://github.com/pallets/click/blob/master/LICENSE.rst>`_
* Releases: https://pypi.org/project/click/
* Code: https://github.com/pallets/click
* Issue tracker: https://github.com/pallets/click/issues
* Test status:
* Linux, Mac: https://travis-ci.org/pallets/click
* Windows: https://ci.appveyor.com/project/pallets/click
* Test coverage: https://codecov.io/gh/pallets/click

@ -1,40 +0,0 @@
Click-7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
Click-7.0.dist-info/LICENSE.txt,sha256=4hIxn676T0Wcisk3_chVcECjyrivKTZsoqSNI5AlIlw,1876
Click-7.0.dist-info/METADATA,sha256=-r8jeke3Zer4diRvT1MjFZuiJ6yTT_qFP39svLqdaLI,3516
Click-7.0.dist-info/RECORD,,
Click-7.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110
Click-7.0.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6
click/__init__.py,sha256=HjGThQ7tef9kkwCV371TBnrf0SAi6fKfU_jtEnbYTvQ,2789
click/__pycache__/__init__.cpython-38.pyc,,
click/__pycache__/_bashcomplete.cpython-38.pyc,,
click/__pycache__/_compat.cpython-38.pyc,,
click/__pycache__/_termui_impl.cpython-38.pyc,,
click/__pycache__/_textwrap.cpython-38.pyc,,
click/__pycache__/_unicodefun.cpython-38.pyc,,
click/__pycache__/_winconsole.cpython-38.pyc,,
click/__pycache__/core.cpython-38.pyc,,
click/__pycache__/decorators.cpython-38.pyc,,
click/__pycache__/exceptions.cpython-38.pyc,,
click/__pycache__/formatting.cpython-38.pyc,,
click/__pycache__/globals.cpython-38.pyc,,
click/__pycache__/parser.cpython-38.pyc,,
click/__pycache__/termui.cpython-38.pyc,,
click/__pycache__/testing.cpython-38.pyc,,
click/__pycache__/types.cpython-38.pyc,,
click/__pycache__/utils.cpython-38.pyc,,
click/_bashcomplete.py,sha256=iaNUmtxag0YPfxba3TDYCNietiTMQIrvhRLj-H8okFU,11014
click/_compat.py,sha256=vYmvoj4opPxo-c-2GMQQjYT_r_QkOKybkfGoeVrt0dA,23399
click/_termui_impl.py,sha256=xHmLtOJhKUCVD6168yucJ9fknUJPAMs0eUTPgVUO-GQ,19611
click/_textwrap.py,sha256=gwS4m7bdQiJnzaDG8osFcRb-5vn4t4l2qSCy-5csCEc,1198
click/_unicodefun.py,sha256=QHy2_5jYlX-36O-JVrTHNnHOqg8tquUR0HmQFev7Ics,4364
click/_winconsole.py,sha256=PPWVak8Iikm_gAPsxMrzwsVFCvHgaW3jPaDWZ1JBl3U,8965
click/core.py,sha256=q8FLcDZsagBGSRe5Y9Hi_FGvAeZvusNfoO5EkhkSQ8Y,75305
click/decorators.py,sha256=idKt6duLUUfAFftrHoREi8MJSd39XW36pUVHthdglwk,11226
click/exceptions.py,sha256=CNpAjBAE7qjaV4WChxQeak95e5yUOau8AsvT-8m6wss,7663
click/formatting.py,sha256=eh-cypTUAhpI3HD-K4ZpR3vCiURIO62xXvKkR3tNUTM,8889
click/globals.py,sha256=oQkou3ZQ5DgrbVM6BwIBirwiqozbjfirzsLGAlLRRdg,1514
click/parser.py,sha256=m-nGZz4VwprM42_qtFlWFGo7yRJQxkBlRcZodoH593Y,15510
click/termui.py,sha256=o_ZXB2jyvL2Rce7P_bFGq452iyBq9ykJyRApIPMCZO0,23207
click/testing.py,sha256=aYGqY_iWLu2p4k7lkuJ6t3fqpf6aPGqTsyLzNY_ngKg,13062
click/types.py,sha256=2Q929p-aBP_ZYuMFJqJR-Ipucofv3fmDc5JzBDPmzJU,23287
click/utils.py,sha256=6-D0WkAxvv9FkgHXSHwDIv0l9Gdx9Mm6Z5vuKNLIfZI,15763

@ -1,6 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.31.1)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

@ -1,28 +0,0 @@
Copyright 2007 Pallets
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -1,106 +0,0 @@
Metadata-Version: 2.1
Name: Jinja2
Version: 2.11.1
Summary: A very fast and expressive template engine.
Home-page: https://palletsprojects.com/p/jinja/
Author: Armin Ronacher
Author-email: armin.ronacher@active-4.com
Maintainer: Pallets
Maintainer-email: contact@palletsprojects.com
License: BSD-3-Clause
Project-URL: Documentation, https://jinja.palletsprojects.com/
Project-URL: Code, https://github.com/pallets/jinja
Project-URL: Issue tracker, https://github.com/pallets/jinja/issues
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: Text Processing :: Markup :: HTML
Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
Description-Content-Type: text/x-rst
Requires-Dist: MarkupSafe (>=0.23)
Provides-Extra: i18n
Requires-Dist: Babel (>=0.8) ; extra == 'i18n'
Jinja
=====
Jinja is a fast, expressive, extensible templating engine. Special
placeholders in the template allow writing code similar to Python
syntax. Then the template is passed data to render the final document.
It includes:
- Template inheritance and inclusion.
- Define and import macros within templates.
- HTML templates can use autoescaping to prevent XSS from untrusted
user input.
- A sandboxed environment can safely render untrusted templates.
- AsyncIO support for generating templates and calling async
functions.
- I18N support with Babel.
- Templates are compiled to optimized Python code just-in-time and
cached, or can be compiled ahead-of-time.
- Exceptions point to the correct line in templates to make debugging
easier.
- Extensible filters, tests, functions, and even syntax.
Jinja's philosophy is that while application logic belongs in Python if
possible, it shouldn't make the template designer's job difficult by
restricting functionality too much.
Installing
----------
Install and update using `pip`_:
.. code-block:: text
$ pip install -U Jinja2
.. _pip: https://pip.pypa.io/en/stable/quickstart/
In A Nutshell
-------------
.. code-block:: jinja
{% extends "base.html" %}
{% block title %}Members{% endblock %}
{% block content %}
<ul>
{% for user in users %}
<li><a href="{{ user.url }}">{{ user.username }}</a></li>
{% endfor %}
</ul>
{% endblock %}
Links
-----
- Website: https://palletsprojects.com/p/jinja/
- Documentation: https://jinja.palletsprojects.com/
- Releases: https://pypi.org/project/Jinja2/
- Code: https://github.com/pallets/jinja
- Issue tracker: https://github.com/pallets/jinja/issues
- Test status: https://dev.azure.com/pallets/jinja/_build
- Official chat: https://discord.gg/t6rrQZH

@ -1,61 +0,0 @@
Jinja2-2.11.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
Jinja2-2.11.1.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
Jinja2-2.11.1.dist-info/METADATA,sha256=7e9_tz7RirTbxIeiHTSq3e5g6ddCjoym3o5vdlRLuxU,3535
Jinja2-2.11.1.dist-info/RECORD,,
Jinja2-2.11.1.dist-info/WHEEL,sha256=hq9T7ntHzQqUTLUmJ2UVhPL-W4tJi3Yb2Lh5lMfs2mk,110
Jinja2-2.11.1.dist-info/entry_points.txt,sha256=Qy_DkVo6Xj_zzOtmErrATe8lHZhOqdjpt3e4JJAGyi8,61
Jinja2-2.11.1.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
jinja2/__init__.py,sha256=Nq1rzGErXYjIQnqc1pDCJht5LmInBRIZkeL2qkrYEyI,1549
jinja2/__pycache__/__init__.cpython-38.pyc,,
jinja2/__pycache__/_compat.cpython-38.pyc,,
jinja2/__pycache__/_identifier.cpython-38.pyc,,
jinja2/__pycache__/asyncfilters.cpython-38.pyc,,
jinja2/__pycache__/asyncsupport.cpython-38.pyc,,
jinja2/__pycache__/bccache.cpython-38.pyc,,
jinja2/__pycache__/compiler.cpython-38.pyc,,
jinja2/__pycache__/constants.cpython-38.pyc,,
jinja2/__pycache__/debug.cpython-38.pyc,,
jinja2/__pycache__/defaults.cpython-38.pyc,,
jinja2/__pycache__/environment.cpython-38.pyc,,
jinja2/__pycache__/exceptions.cpython-38.pyc,,
jinja2/__pycache__/ext.cpython-38.pyc,,
jinja2/__pycache__/filters.cpython-38.pyc,,
jinja2/__pycache__/idtracking.cpython-38.pyc,,
jinja2/__pycache__/lexer.cpython-38.pyc,,
jinja2/__pycache__/loaders.cpython-38.pyc,,
jinja2/__pycache__/meta.cpython-38.pyc,,
jinja2/__pycache__/nativetypes.cpython-38.pyc,,
jinja2/__pycache__/nodes.cpython-38.pyc,,
jinja2/__pycache__/optimizer.cpython-38.pyc,,
jinja2/__pycache__/parser.cpython-38.pyc,,
jinja2/__pycache__/runtime.cpython-38.pyc,,
jinja2/__pycache__/sandbox.cpython-38.pyc,,
jinja2/__pycache__/tests.cpython-38.pyc,,
jinja2/__pycache__/utils.cpython-38.pyc,,
jinja2/__pycache__/visitor.cpython-38.pyc,,
jinja2/_compat.py,sha256=B6Se8HjnXVpzz9-vfHejn-DV2NjaVK-Iewupc5kKlu8,3191
jinja2/_identifier.py,sha256=EdgGJKi7O1yvr4yFlvqPNEqV6M1qHyQr8Gt8GmVTKVM,1775
jinja2/asyncfilters.py,sha256=8uwjG1zgHTv3K4nEvsj4HXWkK4NsOlfx7-CcnCULDWw,4185
jinja2/asyncsupport.py,sha256=ZBFsDLuq3Gtji3Ia87lcyuDbqaHZJRdtShZcqwpFnSQ,7209
jinja2/bccache.py,sha256=3Pmp4jo65M9FQuIxdxoDBbEDFwe4acDMQf77nEJfrHA,12139
jinja2/compiler.py,sha256=xCNpF7-xAduODbGKSVEyzU7XZGeLWHZr1cwcZTQob30,66236
jinja2/constants.py,sha256=RR1sTzNzUmKco6aZicw4JpQpJGCuPuqm1h1YmCNUEFY,1458
jinja2/debug.py,sha256=UmsW6OxNmbIGvIkwytOyM1NsZB6xJvl_nSz3VgNETUk,8597
jinja2/defaults.py,sha256=85B6YUUCyWPSdrSeVhcqFVuu_bHUAQXeey--FIwSeVQ,1126
jinja2/environment.py,sha256=XqCM_GmncAXPm--CxpRPVF6uV_sPKb0Q0jVa7Znry04,50605
jinja2/exceptions.py,sha256=VjNLawcmf2ODffqVMCQK1cRmvFaUfQWF4u8ouP3QPcE,5425
jinja2/ext.py,sha256=AtwL5O5enT_L3HR9-oBvhGyUTdGoyaqG_ICtnR_EVd4,26441
jinja2/filters.py,sha256=4xEq1qfJ7burpHW5GyL6bkGomp0W47jOXg-HG5aLP-Y,41401
jinja2/idtracking.py,sha256=J3O4VHsrbf3wzwiBc7Cro26kHb6_5kbULeIOzocchIU,9211
jinja2/lexer.py,sha256=VeGdW_t82Le4H-jLy-hX6UeosLf7ApUq2kuUos8YF4Y,29942
jinja2/loaders.py,sha256=UUy5ud3lNtGtnn8iorlF9o1FJ6UqZZKMxd0VGnnqMHI,20350
jinja2/meta.py,sha256=QjyYhfNRD3QCXjBJpiPl9KgkEkGXJbAkCUq4-Ur10EQ,4131
jinja2/nativetypes.py,sha256=Arb2_3IuM386vWZbGPY7DmxryrXg3WzXAEnaHJNdWa0,3576
jinja2/nodes.py,sha256=YwErhE9plVWeoxTQPtMwl10wovsyBRY4x9eAVgtP6zg,31071
jinja2/optimizer.py,sha256=gQLlMYzvQhluhzmAIFA1tXS0cwgWYOjprN-gTRcHVsc,1457
jinja2/parser.py,sha256=fcfdqePNTNyvosIvczbytVA332qpsURvYnCGcjDHSkA,35660
jinja2/runtime.py,sha256=94chnK20a1m1t5AaLWeuiTq6L3g3GLs6AxVPfbNXIHE,30582
jinja2/sandbox.py,sha256=knayyUvXsZ-F0mk15mO2-ehK9gsw04UhB8td-iUOtLc,17127
jinja2/tests.py,sha256=iO_Y-9Vo60zrVe1lMpSl5sKHqAxe2leZHC08OoZ8K24,4799
jinja2/utils.py,sha256=26B9HI2lVWaHY8iOnQTJzAcCL4PYOLiA3V79dm3oOSE,22456
jinja2/visitor.py,sha256=DUHupl0a4PGp7nxRtZFttUzAi1ccxzqc2hzetPYUz8U,3240

@ -1,6 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.34.1)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

@ -1,3 +0,0 @@
[babel.extractors]
jinja2 = jinja2.ext:babel_extract [i18n]

@ -1,28 +0,0 @@
Copyright 2010 Pallets
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -1,105 +0,0 @@
Metadata-Version: 2.1
Name: MarkupSafe
Version: 1.1.1
Summary: Safely add untrusted strings to HTML/XML markup.
Home-page: https://palletsprojects.com/p/markupsafe/
Author: Armin Ronacher
Author-email: armin.ronacher@active-4.com
Maintainer: The Pallets Team
Maintainer-email: contact@palletsprojects.com
License: BSD-3-Clause
Project-URL: Documentation, https://markupsafe.palletsprojects.com/
Project-URL: Code, https://github.com/pallets/markupsafe
Project-URL: Issue tracker, https://github.com/pallets/markupsafe/issues
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: BSD License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: Text Processing :: Markup :: HTML
Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*
Description-Content-Type: text/x-rst
MarkupSafe
==========
MarkupSafe implements a text object that escapes characters so it is
safe to use in HTML and XML. Characters that have special meanings are
replaced so that they display as the actual characters. This mitigates
injection attacks, meaning untrusted user input can safely be displayed
on a page.
Installing
----------
Install and update using `pip`_:
.. code-block:: text
pip install -U MarkupSafe
.. _pip: https://pip.pypa.io/en/stable/quickstart/
Examples
--------
.. code-block:: pycon
>>> from markupsafe import Markup, escape
>>> # escape replaces special characters and wraps in Markup
>>> escape('<script>alert(document.cookie);</script>')
Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
>>> # wrap in Markup to mark text "safe" and prevent escaping
>>> Markup('<strong>Hello</strong>')
Markup('<strong>hello</strong>')
>>> escape(Markup('<strong>Hello</strong>'))
Markup('<strong>hello</strong>')
>>> # Markup is a text subclass (str on Python 3, unicode on Python 2)
>>> # methods and operators escape their arguments
>>> template = Markup("Hello <em>%s</em>")
>>> template % '"World"'
Markup('Hello <em>&#34;World&#34;</em>')
Donate
------
The Pallets organization develops and supports MarkupSafe and other
libraries that use it. In order to grow the community of contributors
and users, and allow the maintainers to devote more time to the
projects, `please donate today`_.
.. _please donate today: https://palletsprojects.com/donate
Links
-----
* Website: https://palletsprojects.com/p/markupsafe/
* Documentation: https://markupsafe.palletsprojects.com/
* License: `BSD-3-Clause <https://github.com/pallets/markupsafe/blob/master/LICENSE.rst>`_
* Releases: https://pypi.org/project/MarkupSafe/
* Code: https://github.com/pallets/markupsafe
* Issue tracker: https://github.com/pallets/markupsafe/issues
* Test status:
* Linux, Mac: https://travis-ci.org/pallets/markupsafe
* Windows: https://ci.appveyor.com/project/pallets/markupsafe
* Test coverage: https://codecov.io/gh/pallets/markupsafe
* Official chat: https://discord.gg/t6rrQZH

@ -1,16 +0,0 @@
MarkupSafe-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
MarkupSafe-1.1.1.dist-info/LICENSE.txt,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475
MarkupSafe-1.1.1.dist-info/METADATA,sha256=IFCP4hCNGjXJgMoSvdjPiKDLAMUTTWoxKXQsQvmyMNU,3653
MarkupSafe-1.1.1.dist-info/RECORD,,
MarkupSafe-1.1.1.dist-info/WHEEL,sha256=VEyGcIFAmk_1KbI6gaZGw_mMiT-pdGweASQLX-DzYaY,108
MarkupSafe-1.1.1.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
markupsafe/__init__.py,sha256=oTblO5f9KFM-pvnq9bB0HgElnqkJyqHnFN1Nx2NIvnY,10126
markupsafe/__pycache__/__init__.cpython-38.pyc,,
markupsafe/__pycache__/_compat.cpython-38.pyc,,
markupsafe/__pycache__/_constants.cpython-38.pyc,,
markupsafe/__pycache__/_native.cpython-38.pyc,,
markupsafe/_compat.py,sha256=uEW1ybxEjfxIiuTbRRaJpHsPFf4yQUMMKaPgYEC5XbU,558
markupsafe/_constants.py,sha256=zo2ajfScG-l1Sb_52EP3MlDCqO7Y1BVHUXXKRsVDRNk,4690
markupsafe/_native.py,sha256=d-8S_zzYt2y512xYcuSxq0NeG2DUUvG80wVdTn-4KI8,1873
markupsafe/_speedups.c,sha256=k0fzEIK3CP6MmMqeY0ob43TP90mVN0DTyn7BAl3RqSg,9884
markupsafe/_speedups.cpython-38-x86_64-linux-gnu.so,sha256=SbJwN321Xn7OPYGv5a6Ghzga75uT8RHQUGkoQUASF-o,48016

@ -1,5 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.31.1)
Root-Is-Purelib: false
Tag: cp38-cp38-manylinux1_x86_64

@ -1,8 +0,0 @@
__all__ = ["__version__"]
try:
from ._version import version as __version__
except ImportError:
# broken installation, we don't even try
# unknown only works because we do poor mans version compare
__version__ = "unknown"

@ -1,109 +0,0 @@
"""allow bash-completion for argparse with argcomplete if installed
needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
to find the magic string, so _ARGCOMPLETE env. var is never set, and
this does not need special code.
Function try_argcomplete(parser) should be called directly before
the call to ArgumentParser.parse_args().
The filescompleter is what you normally would use on the positional
arguments specification, in order to get "dirname/" after "dirn<TAB>"
instead of the default "dirname ":
optparser.add_argument(Config._file_or_dir, nargs='*'
).completer=filescompleter
Other, application specific, completers should go in the file
doing the add_argument calls as they need to be specified as .completer
attributes as well. (If argcomplete is not installed, the function the
attribute points to will not be used).
SPEEDUP
=======
The generic argcomplete script for bash-completion
(/etc/bash_completion.d/python-argcomplete.sh )
uses a python program to determine startup script generated by pip.
You can speed up completion somewhat by changing this script to include
# PYTHON_ARGCOMPLETE_OK
so the the python-argcomplete-check-easy-install-script does not
need to be called to find the entry point of the code and see if that is
marked with PYTHON_ARGCOMPLETE_OK
INSTALL/DEBUGGING
=================
To include this support in another application that has setup.py generated
scripts:
- add the line:
# PYTHON_ARGCOMPLETE_OK
near the top of the main python entry point
- include in the file calling parse_args():
from _argcomplete import try_argcomplete, filescompleter
, call try_argcomplete just before parse_args(), and optionally add
filescompleter to the positional arguments' add_argument()
If things do not work right away:
- switch on argcomplete debugging with (also helpful when doing custom
completers):
export _ARC_DEBUG=1
- run:
python-argcomplete-check-easy-install-script $(which appname)
echo $?
will echo 0 if the magic line has been found, 1 if not
- sometimes it helps to find early on errors using:
_ARGCOMPLETE=1 _ARC_DEBUG=1 appname
which should throw a KeyError: 'COMPLINE' (which is properly set by the
global argcomplete script).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from glob import glob
class FastFilesCompleter(object):
"Fast file completer class"
def __init__(self, directories=True):
self.directories = directories
def __call__(self, prefix, **kwargs):
"""only called on non option completions"""
if os.path.sep in prefix[1:]:
prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
else:
prefix_dir = 0
completion = []
globbed = []
if "*" not in prefix and "?" not in prefix:
# we are on unix, otherwise no bash
if not prefix or prefix[-1] == os.path.sep:
globbed.extend(glob(prefix + ".*"))
prefix += "*"
globbed.extend(glob(prefix))
for x in sorted(globbed):
if os.path.isdir(x):
x += "/"
# append stripping the prefix (like bash, not like compgen)
completion.append(x[prefix_dir:])
return completion
if os.environ.get("_ARGCOMPLETE"):
try:
import argcomplete.completers
except ImportError:
sys.exit(-1)
filescompleter = FastFilesCompleter()
def try_argcomplete(parser):
argcomplete.autocomplete(parser, always_complete_options=False)
else:
def try_argcomplete(parser):
pass
filescompleter = None

@ -1,14 +0,0 @@
""" python inspection/code generation API """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .code import Code # noqa
from .code import ExceptionInfo # noqa
from .code import filter_traceback # noqa
from .code import Frame # noqa
from .code import getrawcode # noqa
from .code import Traceback # noqa
from .source import compile_ as compile # noqa
from .source import getfslineno # noqa
from .source import Source # noqa

@ -1,94 +0,0 @@
# copied from python-2.7.3's traceback.py
# CHANGES:
# - some_str is replaced, trying to create unicode strings
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import types
from six import text_type
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would throw another exception and mask the original problem.
if (
isinstance(etype, BaseException)
or isinstance(etype, types.InstanceType)
or etype is None
or type(etype) is str
):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "{}", line {}\n'.format(filename, lineno))
if badline is not None:
if isinstance(badline, bytes): # python 2 only
badline = badline.decode("utf-8", "replace")
lines.append(" {}\n".format(badline.strip()))
if offset is not None:
caretspace = badline.rstrip("\n")[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or " ") for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(" {}^\n".format("".join(caretspace)))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "{}\n".format(etype)
else:
line = "{}: {}\n".format(etype, valuestr)
return line
def _some_str(value):
try:
return text_type(value)
except Exception:
try:
return bytes(value).decode("UTF-8", "replace")
except Exception:
pass
return "<unprintable {} object>".format(type(value).__name__)

File diff suppressed because it is too large Load Diff

@ -1,328 +0,0 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import inspect
import linecache
import sys
import textwrap
import tokenize
import warnings
from ast import PyCF_ONLY_AST as _AST_FLAG
from bisect import bisect_right
import py
import six
class Source(object):
""" an immutable object holding a source code fragment,
possibly deindenting it.
"""
_compilecounter = 0
def __init__(self, *parts, **kwargs):
self.lines = lines = []
de = kwargs.get("deindent", True)
for part in parts:
if not part:
partlines = []
elif isinstance(part, Source):
partlines = part.lines
elif isinstance(part, (tuple, list)):
partlines = [x.rstrip("\n") for x in part]
elif isinstance(part, six.string_types):
partlines = part.split("\n")
else:
partlines = getsource(part, deindent=de).lines
if de:
partlines = deindent(partlines)
lines.extend(partlines)
def __eq__(self, other):
try:
return self.lines == other.lines
except AttributeError:
if isinstance(other, str):
return str(self) == other
return False
__hash__ = None
def __getitem__(self, key):
if isinstance(key, int):
return self.lines[key]
else:
if key.step not in (None, 1):
raise IndexError("cannot slice a Source with a step")
newsource = Source()
newsource.lines = self.lines[key.start : key.stop]
return newsource
def __len__(self):
return len(self.lines)
def strip(self):
""" return new source object with trailing
and leading blank lines removed.
"""
start, end = 0, len(self)
while start < end and not self.lines[start].strip():
start += 1
while end > start and not self.lines[end - 1].strip():
end -= 1
source = Source()
source.lines[:] = self.lines[start:end]
return source
def putaround(self, before="", after="", indent=" " * 4):
""" return a copy of the source object with
'before' and 'after' wrapped around it.
"""
before = Source(before)
after = Source(after)
newsource = Source()
lines = [(indent + line) for line in self.lines]
newsource.lines = before.lines + lines + after.lines
return newsource
def indent(self, indent=" " * 4):
""" return a copy of the source object with
all lines indented by the given indent-string.
"""
newsource = Source()
newsource.lines = [(indent + line) for line in self.lines]
return newsource
def getstatement(self, lineno):
""" return Source statement which contains the
given linenumber (counted from 0).
"""
start, end = self.getstatementrange(lineno)
return self[start:end]
def getstatementrange(self, lineno):
""" return (start, end) tuple which spans the minimal
statement region which containing the given lineno.
"""
if not (0 <= lineno < len(self)):
raise IndexError("lineno out of range")
ast, start, end = getstatementrange_ast(lineno, self)
return start, end
def deindent(self):
"""return a new source object deindented."""
newsource = Source()
newsource.lines[:] = deindent(self.lines)
return newsource
def isparseable(self, deindent=True):
""" return True if source is parseable, heuristically
deindenting it by default.
"""
from parser import suite as syntax_checker
if deindent:
source = str(self.deindent())
else:
source = str(self)
try:
# compile(source+'\n', "x", "exec")
syntax_checker(source + "\n")
except KeyboardInterrupt:
raise
except Exception:
return False
else:
return True
def __str__(self):
return "\n".join(self.lines)
def compile(
self, filename=None, mode="exec", flag=0, dont_inherit=0, _genframe=None
):
""" return compiled code object. if filename is None
invent an artificial filename which displays
the source/line position of the caller frame.
"""
if not filename or py.path.local(filename).check(file=0):
if _genframe is None:
_genframe = sys._getframe(1) # the caller
fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno
base = "<%d-codegen " % self._compilecounter
self.__class__._compilecounter += 1
if not filename:
filename = base + "%s:%d>" % (fn, lineno)
else:
filename = base + "%r %s:%d>" % (filename, fn, lineno)
source = "\n".join(self.lines) + "\n"
try:
co = compile(source, filename, mode, flag)
except SyntaxError:
ex = sys.exc_info()[1]
# re-represent syntax errors from parsing python strings
msglines = self.lines[: ex.lineno]
if ex.offset:
msglines.append(" " * ex.offset + "^")
msglines.append("(code was compiled probably from here: %s)" % filename)
newex = SyntaxError("\n".join(msglines))
newex.offset = ex.offset
newex.lineno = ex.lineno
newex.text = ex.text
raise newex
else:
if flag & _AST_FLAG:
return co
lines = [(x + "\n") for x in self.lines]
linecache.cache[filename] = (1, None, lines, filename)
return co
#
# public API shortcut functions
#
def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0):
""" compile the given source to a raw code object,
and maintain an internal cache which allows later
retrieval of the source code for the code object
and any recursively created code objects.
"""
if isinstance(source, ast.AST):
# XXX should Source support having AST?
return compile(source, filename, mode, flags, dont_inherit)
_genframe = sys._getframe(1) # the caller
s = Source(source)
co = s.compile(filename, mode, flags, _genframe=_genframe)
return co
def getfslineno(obj):
""" Return source location (path, lineno) for the given object.
If the source cannot be determined return ("", -1)
"""
from .code import Code
try:
code = Code(obj)
except TypeError:
try:
fn = inspect.getsourcefile(obj) or inspect.getfile(obj)
except TypeError:
return "", -1
fspath = fn and py.path.local(fn) or None
lineno = -1
if fspath:
try:
_, lineno = findsource(obj)
except IOError:
pass
else:
fspath = code.path
lineno = code.firstlineno
assert isinstance(lineno, int)
return fspath, lineno
#
# helper functions
#
def findsource(obj):
try:
sourcelines, lineno = inspect.findsource(obj)
except py.builtin._sysex:
raise
except: # noqa
return None, -1
source = Source()
source.lines = [line.rstrip() for line in sourcelines]
return source, lineno
def getsource(obj, **kwargs):
from .code import getrawcode
obj = getrawcode(obj)
try:
strsrc = inspect.getsource(obj)
except IndentationError:
strsrc = '"Buggy python version consider upgrading, cannot get source"'
assert isinstance(strsrc, str)
return Source(strsrc, **kwargs)
def deindent(lines):
return textwrap.dedent("\n".join(lines)).splitlines()
def get_statement_startend2(lineno, node):
import ast
# flatten all statements and except handlers into one lineno-list
# AST's line numbers start indexing at 1
values = []
for x in ast.walk(node):
if isinstance(x, (ast.stmt, ast.ExceptHandler)):
values.append(x.lineno - 1)
for name in ("finalbody", "orelse"):
val = getattr(x, name, None)
if val:
# treat the finally/orelse part as its own statement
values.append(val[0].lineno - 1 - 1)
values.sort()
insert_index = bisect_right(values, lineno)
start = values[insert_index - 1]
if insert_index >= len(values):
end = None
else:
end = values[insert_index]
return start, end
def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
if astnode is None:
content = str(source)
# See #4260:
# don't produce duplicate warnings when compiling source to find ast
with warnings.catch_warnings():
warnings.simplefilter("ignore")
astnode = compile(content, "source", "exec", _AST_FLAG)
start, end = get_statement_startend2(lineno, astnode)
# we need to correct the end:
# - ast-parsing strips comments
# - there might be empty lines
# - we might have lesser indented code blocks at the end
if end is None:
end = len(source.lines)
if end > start + 1:
# make sure we don't span differently indented code blocks
# by using the BlockFinder helper used which inspect.getsource() uses itself
block_finder = inspect.BlockFinder()
# if we start with an indented line, put blockfinder to "started" mode
block_finder.started = source.lines[start][0].isspace()
it = ((x + "\n") for x in source.lines[start:end])
try:
for tok in tokenize.generate_tokens(lambda: next(it)):
block_finder.tokeneater(*tok)
except (inspect.EndOfBlock, IndentationError):
end = block_finder.last + start
except Exception:
pass
# the end might still point to a comment or empty line, correct it
while end:
line = source.lines[end - 1].lstrip()
if line.startswith("#") or not line:
end -= 1
else:
break
return astnode, start, end

@ -1,4 +0,0 @@
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
version = '3.10.1'

@ -1,155 +0,0 @@
"""
support for presenting detailed information in failing assertions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from _pytest.assertion import rewrite
from _pytest.assertion import truncate
from _pytest.assertion import util
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--assert",
action="store",
dest="assertmode",
choices=("rewrite", "plain"),
default="rewrite",
metavar="MODE",
help="""Control assertion debugging tools. 'plain'
performs no assertion debugging. 'rewrite'
(the default) rewrites assert statements in
test modules on import to provide assert
expression information.""",
)
def register_assert_rewrite(*names):
"""Register one or more module names to be rewritten on import.
This function will make sure that this module or all modules inside
the package will get their assert statements rewritten.
Thus you should make sure to call this before the module is
actually imported, usually in your __init__.py if you are a plugin
using a package.
:raise TypeError: if the given module names are not strings.
"""
for name in names:
if not isinstance(name, str):
msg = "expected module names as *args, got {0} instead"
raise TypeError(msg.format(repr(names)))
for hook in sys.meta_path:
if isinstance(hook, rewrite.AssertionRewritingHook):
importhook = hook
break
else:
importhook = DummyRewriteHook()
importhook.mark_rewrite(*names)
class DummyRewriteHook(object):
"""A no-op import hook for when rewriting is disabled."""
def mark_rewrite(self, *names):
pass
class AssertionState(object):
"""State for the assertion plugin."""
def __init__(self, config, mode):
self.mode = mode
self.trace = config.trace.root.get("assertion")
self.hook = None
def install_importhook(config):
"""Try to install the rewrite hook, raise SystemError if it fails."""
# Jython has an AST bug that make the assertion rewriting hook malfunction.
if sys.platform.startswith("java"):
raise SystemError("rewrite not supported")
config._assertstate = AssertionState(config, "rewrite")
config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config)
sys.meta_path.insert(0, hook)
config._assertstate.trace("installed rewrite import hook")
def undo():
hook = config._assertstate.hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
return hook
def pytest_collection(session):
# this hook is only called when test modules are collected
# so for example not in the master process of pytest-xdist
# (which does not collect test modules)
assertstate = getattr(session.config, "_assertstate", None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(session)
def pytest_runtest_setup(item):
"""Setup the pytest_assertrepr_compare hook
The newinterpret and rewrite modules will use util._reprcompare if
it exists to use custom reporting via the
pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
def callbinrepr(op, left, right):
"""Call the pytest_assertrepr_compare hook and prepare the result
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are truncated unless configured otherwise
(eg. if running in verbose mode).
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = item.ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right
)
for new_expl in hook_result:
if new_expl:
new_expl = truncate.truncate_if_required(new_expl, item)
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = six.text_type("\n~").join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
util._reprcompare = callbinrepr
def pytest_runtest_teardown(item):
util._reprcompare = None
def pytest_sessionfinish(session):
assertstate = getattr(session.config, "_assertstate", None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(None)
# Expose this plugin's implementation for the pytest_assertrepr_compare hook
pytest_assertrepr_compare = util.assertrepr_compare

@ -1,102 +0,0 @@
"""
Utilities for truncating assertion output.
Current default behaviour is to truncate assertion explanations at
~8 terminal lines, unless running in "-vv" mode or running on CI.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
DEFAULT_MAX_LINES = 8
DEFAULT_MAX_CHARS = 8 * 80
USAGE_MSG = "use '-vv' to show"
def truncate_if_required(explanation, item, max_length=None):
"""
Truncate this assertion explanation if the given test item is eligible.
"""
if _should_truncate_item(item):
return _truncate_explanation(explanation)
return explanation
def _should_truncate_item(item):
"""
Whether or not this test item is eligible for truncation.
"""
verbose = item.config.option.verbose
return verbose < 2 and not _running_on_ci()
def _running_on_ci():
"""Check if we're currently running on a CI system."""
env_vars = ["CI", "BUILD_NUMBER"]
return any(var in os.environ for var in env_vars)
def _truncate_explanation(input_lines, max_lines=None, max_chars=None):
"""
Truncate given list of strings that makes up the assertion explanation.
Truncates to either 8 lines, or 640 characters - whichever the input reaches
first. The remaining lines will be replaced by a usage message.
"""
if max_lines is None:
max_lines = DEFAULT_MAX_LINES
if max_chars is None:
max_chars = DEFAULT_MAX_CHARS
# Check if truncation required
input_char_count = len("".join(input_lines))
if len(input_lines) <= max_lines and input_char_count <= max_chars:
return input_lines
# Truncate first to max_lines, and then truncate to max_chars if max_chars
# is exceeded.
truncated_explanation = input_lines[:max_lines]
truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars)
# Add ellipsis to final line
truncated_explanation[-1] = truncated_explanation[-1] + "..."
# Append useful message to explanation
truncated_line_count = len(input_lines) - len(truncated_explanation)
truncated_line_count += 1 # Account for the part-truncated final line
msg = "...Full output truncated"
if truncated_line_count == 1:
msg += " ({} line hidden)".format(truncated_line_count)
else:
msg += " ({} lines hidden)".format(truncated_line_count)
msg += ", {}".format(USAGE_MSG)
truncated_explanation.extend([six.text_type(""), six.text_type(msg)])
return truncated_explanation
def _truncate_by_char_count(input_lines, max_chars):
# Check if truncation required
if len("".join(input_lines)) <= max_chars:
return input_lines
# Find point at which input length exceeds total allowed length
iterated_char_count = 0
for iterated_index, input_line in enumerate(input_lines):
if iterated_char_count + len(input_line) > max_chars:
break
iterated_char_count += len(input_line)
# Create truncated explanation with modified final line
truncated_result = input_lines[:iterated_index]
final_line = input_lines[iterated_index]
if final_line:
final_line_truncate_point = max_chars - iterated_char_count
final_line = final_line[:final_line_truncate_point]
truncated_result.append(final_line)
return truncated_result

@ -1,334 +0,0 @@
"""Utilities for assertion debugging"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pprint
import py
import six
import _pytest._code
from ..compat import Sequence
# The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the
# DebugInterpreter.
_reprcompare = None
# the re-encoding is needed for python2 repr
# with non-ascii characters (see issue 877 and 1379)
def ecu(s):
if isinstance(s, bytes):
return s.decode("UTF-8", "replace")
else:
return s
def format_explanation(explanation):
"""This formats an explanation
Normally all embedded newlines are escaped, however there are
three exceptions: \n{, \n} and \n~. The first two are intended
cover nested explanations, see function and attribute explanations
for examples (.visit_Call(), visit_Attribute()). The last one is
for when one explanation needs to span multiple lines, e.g. when
displaying diffs.
"""
explanation = ecu(explanation)
lines = _split_explanation(explanation)
result = _format_lines(lines)
return u"\n".join(result)
def _split_explanation(explanation):
"""Return a list of individual lines in the explanation
This will return a list of lines split on '\n{', '\n}' and '\n~'.
Any other newlines will be escaped and appear in the line as the
literal '\n' characters.
"""
raw_lines = (explanation or u"").split("\n")
lines = [raw_lines[0]]
for values in raw_lines[1:]:
if values and values[0] in ["{", "}", "~", ">"]:
lines.append(values)
else:
lines[-1] += "\\n" + values
return lines
def _format_lines(lines):
"""Format the individual lines
This will replace the '{', '}' and '~' characters of our mini
formatting language with the proper 'where ...', 'and ...' and ' +
...' text, taking care of indentation along the way.
Return a list of formatted lines.
"""
result = lines[:1]
stack = [0]
stackcnt = [0]
for line in lines[1:]:
if line.startswith("{"):
if stackcnt[-1]:
s = u"and "
else:
s = u"where "
stack.append(len(result))
stackcnt[-1] += 1
stackcnt.append(0)
result.append(u" +" + u" " * (len(stack) - 1) + s + line[1:])
elif line.startswith("}"):
stack.pop()
stackcnt.pop()
result[stack[-1]] += line[1:]
else:
assert line[0] in ["~", ">"]
stack[-1] += 1
indent = len(stack) if line.startswith("~") else len(stack) - 1
result.append(u" " * indent + line[1:])
assert len(stack) == 1
return result
# Provide basestring in python3
try:
basestring = basestring
except NameError:
basestring = str
def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands"""
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
left_repr = py.io.saferepr(left, maxsize=int(width // 2))
right_repr = py.io.saferepr(right, maxsize=width - len(left_repr))
summary = u"%s %s %s" % (ecu(left_repr), op, ecu(right_repr))
def issequence(x):
return isinstance(x, Sequence) and not isinstance(x, basestring)
def istext(x):
return isinstance(x, basestring)
def isdict(x):
return isinstance(x, dict)
def isset(x):
return isinstance(x, (set, frozenset))
def isiterable(obj):
try:
iter(obj)
return not istext(obj)
except TypeError:
return False
verbose = config.getoption("verbose")
explanation = None
try:
if op == "==":
if istext(left) and istext(right):
explanation = _diff_text(left, right, verbose)
else:
if issequence(left) and issequence(right):
explanation = _compare_eq_sequence(left, right, verbose)
elif isset(left) and isset(right):
explanation = _compare_eq_set(left, right, verbose)
elif isdict(left) and isdict(right):
explanation = _compare_eq_dict(left, right, verbose)
if isiterable(left) and isiterable(right):
expl = _compare_eq_iterable(left, right, verbose)
if explanation is not None:
explanation.extend(expl)
else:
explanation = expl
elif op == "not in":
if istext(left) and istext(right):
explanation = _notin_text(left, right, verbose)
except Exception:
explanation = [
u"(pytest_assertion plugin: representation of details failed. "
u"Probably an object has a faulty __repr__.)",
six.text_type(_pytest._code.ExceptionInfo()),
]
if not explanation:
return None
return [summary] + explanation
def _diff_text(left, right, verbose=False):
"""Return the explanation for the diff between text or bytes
Unless --verbose is used this will skip leading and trailing
characters which are identical to keep the diff minimal.
If the input are bytes they will be safely converted to text.
"""
from difflib import ndiff
explanation = []
def escape_for_readable_diff(binary_text):
"""
Ensures that the internal string is always valid unicode, converting any bytes safely to valid unicode.
This is done using repr() which then needs post-processing to fix the encompassing quotes and un-escape
newlines and carriage returns (#429).
"""
r = six.text_type(repr(binary_text)[1:-1])
r = r.replace(r"\n", "\n")
r = r.replace(r"\r", "\r")
return r
if isinstance(left, bytes):
left = escape_for_readable_diff(left)
if isinstance(right, bytes):
right = escape_for_readable_diff(right)
if not verbose:
i = 0 # just in case left or right has zero length
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
break
if i > 42:
i -= 10 # Provide some context
explanation = [
u"Skipping %s identical leading characters in diff, use -v to show" % i
]
left = left[i:]
right = right[i:]
if len(left) == len(right):
for i in range(len(left)):
if left[-i] != right[-i]:
break
if i > 42:
i -= 10 # Provide some context
explanation += [
u"Skipping {} identical trailing "
u"characters in diff, use -v to show".format(i)
]
left = left[:-i]
right = right[:-i]
keepends = True
if left.isspace() or right.isspace():
left = repr(str(left))
right = repr(str(right))
explanation += [u"Strings contain only whitespace, escaping them using repr()"]
explanation += [
line.strip("\n")
for line in ndiff(left.splitlines(keepends), right.splitlines(keepends))
]
return explanation
def _compare_eq_iterable(left, right, verbose=False):
if not verbose:
return [u"Use -v to get the full diff"]
# dynamic import to speedup pytest
import difflib
try:
left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines()
explanation = [u"Full diff:"]
except Exception:
# hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
# sorted() on a list would raise. See issue #718.
# As a workaround, the full diff is generated by using the repr() string of each item of each container.
left_formatting = sorted(repr(x) for x in left)
right_formatting = sorted(repr(x) for x in right)
explanation = [u"Full diff (fallback to calling repr on each item):"]
explanation.extend(
line.strip() for line in difflib.ndiff(left_formatting, right_formatting)
)
return explanation
def _compare_eq_sequence(left, right, verbose=False):
explanation = []
for i in range(min(len(left), len(right))):
if left[i] != right[i]:
explanation += [u"At index %s diff: %r != %r" % (i, left[i], right[i])]
break
if len(left) > len(right):
explanation += [
u"Left contains more items, first extra item: %s"
% py.io.saferepr(left[len(right)])
]
elif len(left) < len(right):
explanation += [
u"Right contains more items, first extra item: %s"
% py.io.saferepr(right[len(left)])
]
return explanation
def _compare_eq_set(left, right, verbose=False):
explanation = []
diff_left = left - right
diff_right = right - left
if diff_left:
explanation.append(u"Extra items in the left set:")
for item in diff_left:
explanation.append(py.io.saferepr(item))
if diff_right:
explanation.append(u"Extra items in the right set:")
for item in diff_right:
explanation.append(py.io.saferepr(item))
return explanation
def _compare_eq_dict(left, right, verbose=False):
explanation = []
common = set(left).intersection(set(right))
same = {k: left[k] for k in common if left[k] == right[k]}
if same and verbose < 2:
explanation += [u"Omitting %s identical items, use -vv to show" % len(same)]
elif same:
explanation += [u"Common items:"]
explanation += pprint.pformat(same).splitlines()
diff = {k for k in common if left[k] != right[k]}
if diff:
explanation += [u"Differing items:"]
for k in diff:
explanation += [
py.io.saferepr({k: left[k]}) + " != " + py.io.saferepr({k: right[k]})
]
extra_left = set(left) - set(right)
if extra_left:
explanation.append(u"Left contains more items:")
explanation.extend(
pprint.pformat({k: left[k] for k in extra_left}).splitlines()
)
extra_right = set(right) - set(left)
if extra_right:
explanation.append(u"Right contains more items:")
explanation.extend(
pprint.pformat({k: right[k] for k in extra_right}).splitlines()
)
return explanation
def _notin_text(term, text, verbose=False):
index = text.find(term)
head = text[:index]
tail = text[index + len(term) :]
correct_text = head + tail
diff = _diff_text(correct_text, text, verbose)
newdiff = [u"%s is contained here:" % py.io.saferepr(term, maxsize=42)]
for line in diff:
if line.startswith(u"Skipping"):
continue
if line.startswith(u"- "):
continue
if line.startswith(u"+ "):
newdiff.append(u" " + line[2:])
else:
newdiff.append(line)
return newdiff

@ -1,367 +0,0 @@
"""
merged implementation of the cache provider
the name cache was not chosen to ensure pluggy automatically
ignores the external pytest-cache
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from collections import OrderedDict
import attr
import py
import six
import pytest
from .compat import _PY2 as PY2
from .pathlib import Path
from .pathlib import resolve_from_str
from .pathlib import rmtree
README_CONTENT = u"""\
# pytest cache directory #
This directory contains data from the pytest's cache plugin,
which provides the `--lf` and `--ff` options, as well as the `cache` fixture.
**Do not** commit this to version control.
See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information.
"""
@attr.s
class Cache(object):
_cachedir = attr.ib(repr=False)
_config = attr.ib(repr=False)
@classmethod
def for_config(cls, config):
cachedir = cls.cache_dir_from_config(config)
if config.getoption("cacheclear") and cachedir.exists():
rmtree(cachedir, force=True)
cachedir.mkdir()
return cls(cachedir, config)
@staticmethod
def cache_dir_from_config(config):
return resolve_from_str(config.getini("cache_dir"), config.rootdir)
def warn(self, fmt, **args):
from _pytest.warnings import _issue_config_warning
from _pytest.warning_types import PytestWarning
_issue_config_warning(
PytestWarning(fmt.format(**args) if args else fmt), self._config
)
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
name = Path(name)
if len(name.parts) > 1:
raise ValueError("name is not allowed to contain path separators")
res = self._cachedir.joinpath("d", name)
res.mkdir(exist_ok=True, parents=True)
return py.path.local(res)
def _getvaluepath(self, key):
return self._cachedir.joinpath("v", Path(key))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
try:
with path.open("r") as f:
return json.load(f)
except (ValueError, IOError, OSError):
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
path.parent.mkdir(exist_ok=True, parents=True)
except (IOError, OSError):
self.warn("could not create cache path {path}", path=path)
return
try:
f = path.open("wb" if PY2 else "w")
except (IOError, OSError):
self.warn("cache could not write path {path}", path=path)
else:
with f:
json.dump(value, f, indent=2, sort_keys=True)
self._ensure_supporting_files()
def _ensure_supporting_files(self):
"""Create supporting files in the cache dir that are not really part of the cache."""
if self._cachedir.is_dir():
readme_path = self._cachedir / "README.md"
if not readme_path.is_file():
readme_path.write_text(README_CONTENT)
msg = u"# created by pytest automatically, do not change\n*"
self._cachedir.joinpath(".gitignore").write_text(msg, encoding="UTF-8")
class LFPlugin(object):
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = "lf", "failedfirst"
self.active = any(config.getoption(key) for key in active_keys)
self.lastfailed = config.cache.get("cache/lastfailed", {})
self._previously_failed_count = None
self._no_failures_behavior = self.config.getoption("last_failed_no_failures")
def pytest_report_collectionfinish(self):
if self.active and self.config.getoption("verbose") >= 0:
if not self._previously_failed_count:
return None
noun = "failure" if self._previously_failed_count == 1 else "failures"
suffix = " first" if self.config.getoption("failedfirst") else ""
mode = "rerun previous {count} {noun}{suffix}".format(
count=self._previously_failed_count, suffix=suffix, noun=noun
)
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if (report.when == "call" and report.passed) or report.skipped:
self.lastfailed.pop(report.nodeid, None)
elif report.failed:
self.lastfailed[report.nodeid] = True
def pytest_collectreport(self, report):
passed = report.outcome in ("passed", "skipped")
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update((item.nodeid, True) for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
if self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
items[:] = previously_failed + previously_passed
elif self._no_failures_behavior == "none":
config.hook.pytest_deselected(items=items)
items[:] = []
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
saved_lastfailed = config.cache.get("cache/lastfailed", {})
if saved_lastfailed != self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
class NFPlugin(object):
""" Plugin which implements the --nf (run new-first) option """
def __init__(self, config):
self.config = config
self.active = config.option.newfirst
self.cached_nodeids = config.cache.get("cache/nodeids", [])
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
new_items = OrderedDict()
other_items = OrderedDict()
for item in items:
if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
else:
other_items[item.nodeid] = item
items[:] = self._get_increasing_order(
six.itervalues(new_items)
) + self._get_increasing_order(six.itervalues(other_items))
self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]
def _get_increasing_order(self, items):
return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/nodeids", self.cached_nodeids)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--lf",
"--last-failed",
action="store_true",
dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)",
)
group.addoption(
"--ff",
"--failed-first",
action="store_true",
dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown",
)
group.addoption(
"--nf",
"--new-first",
action="store_true",
dest="newfirst",
help="run tests from new files first, then the rest of the tests "
"sorted by file mtime",
)
group.addoption(
"--cache-show",
action="store_true",
dest="cacheshow",
help="show cache contents, don't perform collection or tests",
)
group.addoption(
"--cache-clear",
action="store_true",
dest="cacheclear",
help="remove all cache contents at start of test run.",
)
parser.addini("cache_dir", default=".pytest_cache", help="cache directory path.")
group.addoption(
"--lfnf",
"--last-failed-no-failures",
action="store",
dest="last_failed_no_failures",
choices=("all", "none"),
default="all",
help="change the behavior when no test failed in the last run or no "
"information about the last failures was found in the cache",
)
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache.for_config(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
config.pluginmanager.register(NFPlugin(config), "nfplugin")
@pytest.fixture
def cache(request):
"""
Return a cache object that can persist state between testing sessions.
cache.get(key, default)
cache.set(key, value)
Keys must be a ``/`` separated value, where the first part is usually the
name of your plugin or application to avoid clashes with other cache users.
Values can be any object handled by the json stdlib module.
"""
return request.config.cache
def pytest_report_header(config):
"""Display cachedir with --cache-show and if non-default."""
if config.option.verbose or config.getini("cache_dir") != ".pytest_cache":
cachedir = config.cache._cachedir
# TODO: evaluate generating upward relative paths
# starting with .., ../.. if sensible
try:
displaypath = cachedir.relative_to(config.rootdir)
except ValueError:
displaypath = cachedir
return "cachedir: {}".format(displaypath)
def cacheshow(config, session):
from pprint import pformat
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.is_dir():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir / "v"
tw.sep("-", "cache values")
for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()):
key = valpath.relative_to(vdir)
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, will be ignored" % key)
else:
tw.line("%s contains:" % key)
for line in pformat(val).splitlines():
tw.line(" " + line)
ddir = basedir / "d"
if ddir.is_dir():
contents = sorted(ddir.rglob("*"))
tw.sep("-", "cache directories")
for p in contents:
# if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.is_file():
key = p.relative_to(basedir)
tw.line("{} is a file of length {:d}".format(key, p.stat().st_size))
return 0

@ -1,786 +0,0 @@
"""
per-test stdout/stderr capturing mechanism.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import io
import os
import sys
from io import UnsupportedOperation
from tempfile import TemporaryFile
import six
import pytest
from _pytest.compat import CaptureIO
patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"}
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"--capture",
action="store",
default="fd" if hasattr(os, "dup") else "sys",
metavar="method",
choices=["fd", "sys", "no"],
help="per-test capturing method: one of fd|sys|no.",
)
group._addoption(
"-s",
action="store_const",
const="no",
dest="capture",
help="shortcut for --capture=no.",
)
@pytest.hookimpl(hookwrapper=True)
def pytest_load_initial_conftests(early_config, parser, args):
ns = early_config.known_args_namespace
if ns.capture == "fd":
_py36_windowsconsoleio_workaround(sys.stdout)
_colorama_workaround()
_readline_workaround()
pluginmanager = early_config.pluginmanager
capman = CaptureManager(ns.capture)
pluginmanager.register(capman, "capturemanager")
# make sure that capturemanager is properly reset at final shutdown
early_config.add_cleanup(capman.stop_global_capturing)
# make sure logging does not raise exceptions at the end
def silence_logging_at_shutdown():
if "logging" in sys.modules:
sys.modules["logging"].raiseExceptions = False
early_config.add_cleanup(silence_logging_at_shutdown)
# finally trigger conftest loading but while capturing (issue93)
capman.start_global_capturing()
outcome = yield
capman.suspend_global_capture()
if outcome.excinfo is not None:
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
class CaptureManager(object):
"""
Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each
test phase (setup, call, teardown). After each of those points, the captured output is obtained and
attached to the collection/runtest report.
There are two levels of capture:
* global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled
during collection and each test phase.
* fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this
case special handling is needed to ensure the fixtures take precedence over the global capture.
"""
def __init__(self, method):
self._method = method
self._global_capturing = None
self._current_item = None
def _getcapture(self, method):
if method == "fd":
return MultiCapture(out=True, err=True, Capture=FDCapture)
elif method == "sys":
return MultiCapture(out=True, err=True, Capture=SysCapture)
elif method == "no":
return MultiCapture(out=False, err=False, in_=False)
else:
raise ValueError("unknown capturing method: %r" % method)
# Global capturing control
def is_globally_capturing(self):
return self._method != "no"
def start_global_capturing(self):
assert self._global_capturing is None
self._global_capturing = self._getcapture(self._method)
self._global_capturing.start_capturing()
def stop_global_capturing(self):
if self._global_capturing is not None:
self._global_capturing.pop_outerr_to_orig()
self._global_capturing.stop_capturing()
self._global_capturing = None
def resume_global_capture(self):
self._global_capturing.resume_capturing()
def suspend_global_capture(self, in_=False):
cap = getattr(self, "_global_capturing", None)
if cap is not None:
cap.suspend_capturing(in_=in_)
def read_global_capture(self):
return self._global_capturing.readouterr()
# Fixture Control (it's just forwarding, think about removing this later)
def activate_fixture(self, item):
"""If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over
the global capture.
"""
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture._start()
def deactivate_fixture(self, item):
"""Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any."""
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture.close()
def suspend_fixture(self, item):
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture._suspend()
def resume_fixture(self, item):
fixture = getattr(item, "_capture_fixture", None)
if fixture is not None:
fixture._resume()
# Helper context managers
@contextlib.contextmanager
def global_and_fixture_disabled(self):
"""Context manager to temporarily disables global and current fixture capturing."""
# Need to undo local capsys-et-al if exists before disabling global capture
self.suspend_fixture(self._current_item)
self.suspend_global_capture(in_=False)
try:
yield
finally:
self.resume_global_capture()
self.resume_fixture(self._current_item)
@contextlib.contextmanager
def item_capture(self, when, item):
self.resume_global_capture()
self.activate_fixture(item)
try:
yield
finally:
self.deactivate_fixture(item)
self.suspend_global_capture(in_=False)
out, err = self.read_global_capture()
item.add_report_section(when, "stdout", out)
item.add_report_section(when, "stderr", err)
# Hooks
@pytest.hookimpl(hookwrapper=True)
def pytest_make_collect_report(self, collector):
if isinstance(collector, pytest.File):
self.resume_global_capture()
outcome = yield
self.suspend_global_capture()
out, err = self.read_global_capture()
rep = outcome.get_result()
if out:
rep.sections.append(("Captured stdout", out))
if err:
rep.sections.append(("Captured stderr", err))
else:
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(self, item):
self._current_item = item
yield
self._current_item = None
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item):
with self.item_capture("setup", item):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
with self.item_capture("call", item):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item):
with self.item_capture("teardown", item):
yield
@pytest.hookimpl(tryfirst=True)
def pytest_keyboard_interrupt(self, excinfo):
self.stop_global_capturing()
@pytest.hookimpl(tryfirst=True)
def pytest_internalerror(self, excinfo):
self.stop_global_capturing()
capture_fixtures = {"capfd", "capfdbinary", "capsys", "capsysbinary"}
def _ensure_only_one_capture_fixture(request, name):
fixtures = set(request.fixturenames) & capture_fixtures - {name}
if fixtures:
fixtures = sorted(fixtures)
fixtures = fixtures[0] if len(fixtures) == 1 else fixtures
raise request.raiseerror(
"cannot use {} and {} at the same time".format(fixtures, name)
)
@pytest.fixture
def capsys(request):
"""Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` namedtuple. ``out`` and ``err`` will be ``text``
objects.
"""
_ensure_only_one_capture_fixture(request, "capsys")
with _install_capture_fixture_on_item(request, SysCapture) as fixture:
yield fixture
@pytest.fixture
def capsysbinary(request):
"""Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make
captured output available via ``capsys.readouterr()`` method calls
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes``
objects.
"""
_ensure_only_one_capture_fixture(request, "capsysbinary")
# Currently, the implementation uses the python3 specific `.buffer`
# property of CaptureIO.
if sys.version_info < (3,):
raise request.raiseerror("capsysbinary is only supported on python 3")
with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture:
yield fixture
@pytest.fixture
def capfd(request):
"""Enable capturing of writes to file descriptors ``1`` and ``2`` and make
captured output available via ``capfd.readouterr()`` method calls
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text``
objects.
"""
_ensure_only_one_capture_fixture(request, "capfd")
if not hasattr(os, "dup"):
pytest.skip(
"capfd fixture needs os.dup function which is not available in this system"
)
with _install_capture_fixture_on_item(request, FDCapture) as fixture:
yield fixture
@pytest.fixture
def capfdbinary(request):
"""Enable capturing of write to file descriptors 1 and 2 and make
captured output available via ``capfdbinary.readouterr`` method calls
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be
``bytes`` objects.
"""
_ensure_only_one_capture_fixture(request, "capfdbinary")
if not hasattr(os, "dup"):
pytest.skip(
"capfdbinary fixture needs os.dup function which is not available in this system"
)
with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture:
yield fixture
@contextlib.contextmanager
def _install_capture_fixture_on_item(request, capture_class):
"""
Context manager which creates a ``CaptureFixture`` instance and "installs" it on
the item/node of the given request. Used by ``capsys`` and ``capfd``.
The CaptureFixture is added as attribute of the item because it needs to accessed
by ``CaptureManager`` during its ``pytest_runtest_*`` hooks.
"""
request.node._capture_fixture = fixture = CaptureFixture(capture_class, request)
capmanager = request.config.pluginmanager.getplugin("capturemanager")
# need to active this fixture right away in case it is being used by another fixture (setup phase)
# if this fixture is being used only by a test function (call phase), then we wouldn't need this
# activation, but it doesn't hurt
capmanager.activate_fixture(request.node)
yield fixture
fixture.close()
del request.node._capture_fixture
class CaptureFixture(object):
"""
Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary`
fixtures.
"""
def __init__(self, captureclass, request):
self.captureclass = captureclass
self.request = request
self._capture = None
self._captured_out = self.captureclass.EMPTY_BUFFER
self._captured_err = self.captureclass.EMPTY_BUFFER
def _start(self):
# Start if not started yet
if getattr(self, "_capture", None) is None:
self._capture = MultiCapture(
out=True, err=True, in_=False, Capture=self.captureclass
)
self._capture.start_capturing()
def close(self):
if self._capture is not None:
out, err = self._capture.pop_outerr_to_orig()
self._captured_out += out
self._captured_err += err
self._capture.stop_capturing()
self._capture = None
def readouterr(self):
"""Read and return the captured output so far, resetting the internal buffer.
:return: captured content as a namedtuple with ``out`` and ``err`` string attributes
"""
captured_out, captured_err = self._captured_out, self._captured_err
if self._capture is not None:
out, err = self._capture.readouterr()
captured_out += out
captured_err += err
self._captured_out = self.captureclass.EMPTY_BUFFER
self._captured_err = self.captureclass.EMPTY_BUFFER
return CaptureResult(captured_out, captured_err)
def _suspend(self):
"""Suspends this fixture's own capturing temporarily."""
self._capture.suspend_capturing()
def _resume(self):
"""Resumes this fixture's own capturing temporarily."""
self._capture.resume_capturing()
@contextlib.contextmanager
def disabled(self):
"""Temporarily disables capture while inside the 'with' block."""
capmanager = self.request.config.pluginmanager.getplugin("capturemanager")
with capmanager.global_and_fixture_disabled():
yield
def safe_text_dupfile(f, mode, default_encoding="UTF8"):
""" return an open text file object that's a duplicate of f on the
FD-level if possible.
"""
encoding = getattr(f, "encoding", None)
try:
fd = f.fileno()
except Exception:
if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"):
# we seem to have a text stream, let's just use it
return f
else:
newfd = os.dup(fd)
if "b" not in mode:
mode += "b"
f = os.fdopen(newfd, mode, 0) # no buffering
return EncodedFile(f, encoding or default_encoding)
class EncodedFile(object):
errors = "strict" # possibly needed by py3 code (issue555)
def __init__(self, buffer, encoding):
self.buffer = buffer
self.encoding = encoding
def write(self, obj):
if isinstance(obj, six.text_type):
obj = obj.encode(self.encoding, "replace")
self.buffer.write(obj)
def writelines(self, linelist):
data = "".join(linelist)
self.write(data)
@property
def name(self):
"""Ensure that file.name is a string."""
return repr(self.buffer)
def __getattr__(self, name):
return getattr(object.__getattribute__(self, "buffer"), name)
CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"])
class MultiCapture(object):
out = err = in_ = None
def __init__(self, out=True, err=True, in_=True, Capture=None):
if in_:
self.in_ = Capture(0)
if out:
self.out = Capture(1)
if err:
self.err = Capture(2)
def start_capturing(self):
if self.in_:
self.in_.start()
if self.out:
self.out.start()
if self.err:
self.err.start()
def pop_outerr_to_orig(self):
""" pop current snapshot out/err capture and flush to orig streams. """
out, err = self.readouterr()
if out:
self.out.writeorg(out)
if err:
self.err.writeorg(err)
return out, err
def suspend_capturing(self, in_=False):
if self.out:
self.out.suspend()
if self.err:
self.err.suspend()
if in_ and self.in_:
self.in_.suspend()
self._in_suspended = True
def resume_capturing(self):
if self.out:
self.out.resume()
if self.err:
self.err.resume()
if hasattr(self, "_in_suspended"):
self.in_.resume()
del self._in_suspended
def stop_capturing(self):
""" stop capturing and reset capturing streams """
if hasattr(self, "_reset"):
raise ValueError("was already stopped")
self._reset = True
if self.out:
self.out.done()
if self.err:
self.err.done()
if self.in_:
self.in_.done()
def readouterr(self):
""" return snapshot unicode value of stdout/stderr capturings. """
return CaptureResult(
self.out.snap() if self.out is not None else "",
self.err.snap() if self.err is not None else "",
)
class NoCapture(object):
EMPTY_BUFFER = None
__init__ = start = done = suspend = resume = lambda *args: None
class FDCaptureBinary(object):
"""Capture IO to/from a given os-level filedescriptor.
snap() produces `bytes`
"""
EMPTY_BUFFER = b""
def __init__(self, targetfd, tmpfile=None):
self.targetfd = targetfd
try:
self.targetfd_save = os.dup(self.targetfd)
except OSError:
self.start = lambda: None
self.done = lambda: None
else:
if targetfd == 0:
assert not tmpfile, "cannot set tmpfile with stdin"
tmpfile = open(os.devnull, "r")
self.syscapture = SysCapture(targetfd)
else:
if tmpfile is None:
f = TemporaryFile()
with f:
tmpfile = safe_text_dupfile(f, mode="wb+")
if targetfd in patchsysdict:
self.syscapture = SysCapture(targetfd, tmpfile)
else:
self.syscapture = NoCapture()
self.tmpfile = tmpfile
self.tmpfile_fd = tmpfile.fileno()
def __repr__(self):
return "<FDCapture %s oldfd=%s>" % (self.targetfd, self.targetfd_save)
def start(self):
""" Start capturing on targetfd using memorized tmpfile. """
try:
os.fstat(self.targetfd_save)
except (AttributeError, OSError):
raise ValueError("saved filedescriptor not valid anymore")
os.dup2(self.tmpfile_fd, self.targetfd)
self.syscapture.start()
def snap(self):
self.tmpfile.seek(0)
res = self.tmpfile.read()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def done(self):
""" stop capturing, restore streams, return original capture file,
seeked to position zero. """
targetfd_save = self.__dict__.pop("targetfd_save")
os.dup2(targetfd_save, self.targetfd)
os.close(targetfd_save)
self.syscapture.done()
_attempt_to_close_capture_file(self.tmpfile)
def suspend(self):
self.syscapture.suspend()
os.dup2(self.targetfd_save, self.targetfd)
def resume(self):
self.syscapture.resume()
os.dup2(self.tmpfile_fd, self.targetfd)
def writeorg(self, data):
""" write to original file descriptor. """
if isinstance(data, six.text_type):
data = data.encode("utf8") # XXX use encoding of original stream
os.write(self.targetfd_save, data)
class FDCapture(FDCaptureBinary):
"""Capture IO to/from a given os-level filedescriptor.
snap() produces text
"""
EMPTY_BUFFER = str()
def snap(self):
res = FDCaptureBinary.snap(self)
enc = getattr(self.tmpfile, "encoding", None)
if enc and isinstance(res, bytes):
res = six.text_type(res, enc, "replace")
return res
class SysCapture(object):
EMPTY_BUFFER = str()
def __init__(self, fd, tmpfile=None):
name = patchsysdict[fd]
self._old = getattr(sys, name)
self.name = name
if tmpfile is None:
if name == "stdin":
tmpfile = DontReadFromInput()
else:
tmpfile = CaptureIO()
self.tmpfile = tmpfile
def start(self):
setattr(sys, self.name, self.tmpfile)
def snap(self):
res = self.tmpfile.getvalue()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
def done(self):
setattr(sys, self.name, self._old)
del self._old
_attempt_to_close_capture_file(self.tmpfile)
def suspend(self):
setattr(sys, self.name, self._old)
def resume(self):
setattr(sys, self.name, self.tmpfile)
def writeorg(self, data):
self._old.write(data)
self._old.flush()
class SysCaptureBinary(SysCapture):
EMPTY_BUFFER = b""
def snap(self):
res = self.tmpfile.buffer.getvalue()
self.tmpfile.seek(0)
self.tmpfile.truncate()
return res
class DontReadFromInput(six.Iterator):
"""Temporary stub class. Ideally when stdin is accessed, the
capturing should be turned off, with possibly all data captured
so far sent to the screen. This should be configurable, though,
because in automated test runs it is better to crash than
hang indefinitely.
"""
encoding = None
def read(self, *args):
raise IOError("reading from stdin while output is captured")
readline = read
readlines = read
__next__ = read
def __iter__(self):
return self
def fileno(self):
raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()")
def isatty(self):
return False
def close(self):
pass
@property
def buffer(self):
if sys.version_info >= (3, 0):
return self
else:
raise AttributeError("redirected stdin has no attribute buffer")
def _colorama_workaround():
"""
Ensure colorama is imported so that it attaches to the correct stdio
handles on Windows.
colorama uses the terminal on import time. So if something does the
first import of colorama while I/O capture is active, colorama will
fail in various ways.
"""
if not sys.platform.startswith("win32"):
return
try:
import colorama # noqa
except ImportError:
pass
def _readline_workaround():
"""
Ensure readline is imported so that it attaches to the correct stdio
handles on Windows.
Pdb uses readline support where available--when not running from the Python
prompt, the readline module is not imported until running the pdb REPL. If
running pytest with the --pdb option this means the readline module is not
imported until after I/O capture has been started.
This is a problem for pyreadline, which is often used to implement readline
support on Windows, as it does not attach to the correct handles for stdout
and/or stdin if they have been redirected by the FDCapture mechanism. This
workaround ensures that readline is imported before I/O capture is setup so
that it can attach to the actual stdin/out for the console.
See https://github.com/pytest-dev/pytest/pull/1281
"""
if not sys.platform.startswith("win32"):
return
try:
import readline # noqa
except ImportError:
pass
def _py36_windowsconsoleio_workaround(stream):
"""
Python 3.6 implemented unicode console handling for Windows. This works
by reading/writing to the raw console handle using
``{Read,Write}ConsoleW``.
The problem is that we are going to ``dup2`` over the stdio file
descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the
handles used by Python to write to the console. Though there is still some
weirdness and the console handle seems to only be closed randomly and not
on the first call to ``CloseHandle``, or maybe it gets reopened with the
same handle value when we suspend capturing.
The workaround in this case will reopen stdio with a different fd which
also means a different handle by replicating the logic in
"Py_lifecycle.c:initstdio/create_stdio".
:param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given
here as parameter for unittesting purposes.
See https://github.com/pytest-dev/py/issues/103
"""
if not sys.platform.startswith("win32") or sys.version_info[:2] < (3, 6):
return
# bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666)
if not hasattr(stream, "buffer"):
return
buffered = hasattr(stream.buffer, "raw")
raw_stdout = stream.buffer.raw if buffered else stream.buffer
if not isinstance(raw_stdout, io._WindowsConsoleIO):
return
def _reopen_stdio(f, mode):
if not buffered and mode[0] == "w":
buffering = 0
else:
buffering = -1
return io.TextIOWrapper(
open(os.dup(f.fileno()), mode, buffering),
f.encoding,
f.errors,
f.newlines,
f.line_buffering,
)
sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, "rb")
sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, "wb")
sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, "wb")
def _attempt_to_close_capture_file(f):
"""Suppress IOError when closing the temporary file used for capturing streams in py27 (#2370)"""
if six.PY2:
try:
f.close()
except IOError:
pass
else:
f.close()

@ -1,443 +0,0 @@
"""
python version compatibility code
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import functools
import inspect
import re
import sys
from contextlib import contextmanager
import py
import six
from six import text_type
import _pytest
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
_PY3 = sys.version_info > (3, 0)
_PY2 = not _PY3
if _PY3:
from inspect import signature, Parameter as Parameter
else:
from funcsigs import signature, Parameter as Parameter
NoneType = type(None)
NOTSET = object()
PY35 = sys.version_info[:2] >= (3, 5)
PY36 = sys.version_info[:2] >= (3, 6)
MODULE_NOT_FOUND_ERROR = "ModuleNotFoundError" if PY36 else "ImportError"
if _PY3:
from collections.abc import MutableMapping as MappingMixin
from collections.abc import Mapping, Sequence
else:
# those raise DeprecationWarnings in Python >=3.7
from collections import MutableMapping as MappingMixin # noqa
from collections import Mapping, Sequence # noqa
if sys.version_info >= (3, 4):
from importlib.util import spec_from_file_location
else:
def spec_from_file_location(*_, **__):
return None
def _format_args(func):
return str(signature(func))
isfunction = inspect.isfunction
isclass = inspect.isclass
# used to work around a python2 exception info leak
exc_clear = getattr(sys, "exc_clear", lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(""))
def is_generator(func):
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly,
which in turns also initializes the "logging" module as side-effect (see issue #8).
"""
return getattr(func, "_is_coroutine", False) or (
hasattr(inspect, "iscoroutinefunction") and inspect.iscoroutinefunction(func)
)
def getlocation(function, curdir):
function = get_real_func(function)
fn = py.path.local(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_modules = [sys.modules.get("mock"), sys.modules.get("unittest.mock")]
if any(mock_modules):
sentinels = [m.DEFAULT for m in mock_modules if m is not None]
return len(
[p for p in patchings if not p.attribute_name and p.new in sentinels]
)
return len(patchings)
def getfuncargnames(function, is_method=False, cls=None):
"""Returns the names of a function's mandatory arguments.
This should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
@RonnyPfannschmidt: This function should be refactored when we
revisit fixtures. The fixture mechanism should ask the node for
the fixture names, and not try to obtain directly from the
function object well after collection has occurred.
"""
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
"Could not determine arguments of {!r}: {}".format(function, e),
pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
# parameter name.
if is_method or (
cls and not isinstance(cls.__dict__.get(function.__name__, None), staticmethod)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
@contextmanager
def dummy_context_manager():
"""Context manager that does nothing, useful in situations where you might need an actual context manager or not
depending on some condition. Using this allow to keep the same code"""
yield
def get_default_arg_names(function):
# Note: this code intentionally mirrors the code at the beginning of getfuncargnames,
# to get the arguments which were excluded from its result because they had default values
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
if _PY3:
STRING_TYPES = bytes, str
UNICODE_TYPES = six.text_type
if PY35:
def _bytes_to_ascii(val):
return val.decode("ascii", "backslashreplace")
else:
def _bytes_to_ascii(val):
if val:
# source: http://goo.gl/bGsnwC
encoded_bytes, _ = codecs.escape_encode(val)
return encoded_bytes.decode("ascii")
else:
# empty bytes crashes codecs.escape_encode (#1087)
return ""
def ascii_escaped(val):
"""If val is pure ascii, returns it as a str(). Otherwise, escapes
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
'4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
note:
the obvious "v.decode('unicode-escape')" will return
valid utf-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a utf-8 string.
"""
if isinstance(val, bytes):
return _bytes_to_ascii(val)
else:
return val.encode("unicode_escape").decode("ascii")
else:
STRING_TYPES = six.string_types
UNICODE_TYPES = six.text_type
def ascii_escaped(val):
"""In py2 bytes and str are the same type, so return if it's a bytes
object, return it unchanged if it is a full ascii string,
otherwise escape it into its binary form.
If it's a unicode string, change the unicode characters into
unicode escapes.
"""
if isinstance(val, bytes):
try:
return val.encode("ascii")
except UnicodeDecodeError:
return val.encode("string-escape")
else:
return val.encode("unicode-escape")
class _PytestWrapper(object):
"""Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object
when we are creating fixtures, because we wrap the function object ourselves with a decorator
to issue warnings when the fixture function is called directly.
"""
def __init__(self, obj):
self.obj = obj
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
# to trigger a warning if it gets called directly instead of by pytest: we don't
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=py.io.saferepr(start_obj), current=py.io.saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
"""
Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time
returning a bound method to ``holder`` if the original object was a bound method.
"""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception:
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, "place_as"):
obj = obj.place_as
fslineno = _pytest._code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException
instead of Exception (for more details check #2707)
"""
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj):
"""Ignore any exception via isinstance on Python 3."""
try:
return isclass(obj)
except Exception:
return False
def _is_unittest_unexpected_success_a_failure():
"""Return if the test suite should fail if an @expectedFailure unittest test PASSES.
From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:
Changed in version 3.4: Returns False if there were any
unexpectedSuccesses from tests marked with the expectedFailure() decorator.
"""
return sys.version_info >= (3, 4)
if _PY3:
def safe_str(v):
"""returns v as string"""
return str(v)
else:
def safe_str(v):
"""returns v as string, converting to ascii if necessary"""
try:
return str(v)
except UnicodeError:
if not isinstance(v, text_type):
v = text_type(v)
errors = "replace"
return v.encode("utf-8", errors)
COLLECT_FAKEMODULE_ATTRIBUTES = (
"Collector",
"Module",
"Generator",
"Function",
"Instance",
"Session",
"Item",
"Class",
"File",
"_fillfuncargs",
)
def _setup_collect_fakemodule():
from types import ModuleType
import pytest
pytest.collect = ModuleType("pytest.collect")
pytest.collect.__all__ = [] # used for setns
for attr in COLLECT_FAKEMODULE_ATTRIBUTES:
setattr(pytest.collect, attr, getattr(pytest, attr))
if _PY2:
# Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO.
from py.io import TextIO
class CaptureIO(TextIO):
@property
def encoding(self):
return getattr(self, "_encoding", "UTF-8")
else:
import io
class CaptureIO(io.TextIOWrapper):
def __init__(self):
super(CaptureIO, self).__init__(
io.BytesIO(), encoding="UTF-8", newline="", write_through=True
)
def getvalue(self):
return self.buffer.getvalue().decode("UTF-8")
class FuncargnamesCompatAttr(object):
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
if six.PY2:
def lru_cache(*_, **__):
def dec(fn):
return fn
return dec
else:
from functools import lru_cache # noqa: F401

File diff suppressed because it is too large Load Diff

@ -1,409 +0,0 @@
import argparse
import sys as _sys
import warnings
from gettext import gettext as _
import py
import six
from ..main import EXIT_USAGEERROR
FILE_OR_DIR = "file_or_dir"
class Parser(object):
""" Parser for command line arguments and ini-file values.
:ivar extra_info: dict of generic param -> value to display in case
there's an error processing the command line arguments.
"""
def __init__(self, usage=None, processopt=None):
self._anonymous = OptionGroup("custom options", parser=self)
self._groups = []
self._processopt = processopt
self._usage = usage
self._inidict = {}
self._ininames = []
self.extra_info = {}
def processoption(self, option):
if self._processopt:
if option.dest:
self._processopt(option)
def getgroup(self, name, description="", after=None):
""" get (or create) a named option Group.
:name: name of the option group.
:description: long description for --help output.
:after: name of other group, used for ordering --help output.
The returned group object has an ``addoption`` method with the same
signature as :py:func:`parser.addoption
<_pytest.config.Parser.addoption>` but will be shown in the
respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
return group
group = OptionGroup(name, description, parser=self)
i = 0
for i, grp in enumerate(self._groups):
if grp.name == after:
break
self._groups.insert(i + 1, group)
return group
def addoption(self, *opts, **attrs):
""" register a command line option.
:opts: option names, can be short or long options.
:attrs: same attributes which the ``add_option()`` function of the
`argparse library
<http://docs.python.org/2/library/argparse.html>`_
accepts.
After command line parsing options are available on the pytest config
object via ``config.option.NAME`` where ``NAME`` is usually set
by passing a ``dest`` attribute, for example
``addoption("--long", dest="NAME", ...)``.
"""
self._anonymous.addoption(*opts, **attrs)
def parse(self, args, namespace=None):
from _pytest._argcomplete import try_argcomplete
self.optparser = self._getparser()
try_argcomplete(self.optparser)
args = [str(x) if isinstance(x, py.path.local) else x for x in args]
return self.optparser.parse_args(args, namespace=namespace)
def _getparser(self):
from _pytest._argcomplete import filescompleter
optparser = MyOptionParser(self, self.extra_info)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
arggroup = optparser.add_argument_group(desc)
for option in group.options:
n = option.names()
a = option.attrs()
arggroup.add_argument(*n, **a)
# bash like autocompletion for dirs (appending '/')
optparser.add_argument(FILE_OR_DIR, nargs="*").completer = filescompleter
return optparser
def parse_setoption(self, args, option, namespace=None):
parsedoption = self.parse(args, namespace=namespace)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
return getattr(parsedoption, FILE_OR_DIR)
def parse_known_args(self, args, namespace=None):
"""parses and returns a namespace object with known arguments at this
point.
"""
return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
def parse_known_and_unknown_args(self, args, namespace=None):
"""parses and returns a namespace object with known arguments, and
the remaining arguments unknown at this point.
"""
optparser = self._getparser()
args = [str(x) if isinstance(x, py.path.local) else x for x in args]
return optparser.parse_known_args(args, namespace=namespace)
def addini(self, name, help, type=None, default=None):
""" register an ini-file option.
:name: name of the ini-variable
:type: type of the variable, can be ``pathlist``, ``args``, ``linelist``
or ``bool``.
:default: default value if no ini-file option exists but is queried.
The value of ini-variables can be retrieved via a call to
:py:func:`config.getini(name) <_pytest.config.Config.getini>`.
"""
assert type in (None, "pathlist", "args", "linelist", "bool")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
class ArgumentError(Exception):
"""
Raised if an Argument instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class Argument(object):
"""class that mimics the necessary behaviour of optparse.Option
it's currently a least effort implementation
and ignoring choices and integer prefixes
https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
"""
_typ_map = {"int": int, "string": str, "float": float, "complex": complex}
def __init__(self, *names, **attrs):
"""store parms in private vars for use in add_argument"""
self._attrs = attrs
self._short_opts = []
self._long_opts = []
self.dest = attrs.get("dest")
if "%default" in (attrs.get("help") or ""):
warnings.warn(
'pytest now uses argparse. "%default" should be'
' changed to "%(default)s" ',
DeprecationWarning,
stacklevel=3,
)
try:
typ = attrs["type"]
except KeyError:
pass
else:
# this might raise a keyerror as well, don't want to catch that
if isinstance(typ, six.string_types):
if typ == "choice":
warnings.warn(
"`type` argument to addoption() is the string %r."
" For choices this is optional and can be omitted, "
" but when supplied should be a type (for example `str` or `int`)."
" (options: %s)" % (typ, names),
DeprecationWarning,
stacklevel=4,
)
# argparse expects a type here take it from
# the type of the first element
attrs["type"] = type(attrs["choices"][0])
else:
warnings.warn(
"`type` argument to addoption() is the string %r, "
" but when supplied should be a type (for example `str` or `int`)."
" (options: %s)" % (typ, names),
DeprecationWarning,
stacklevel=4,
)
attrs["type"] = Argument._typ_map[typ]
# used in test_parseopt -> test_parse_defaultgetter
self.type = attrs["type"]
else:
self.type = typ
try:
# attribute existence is tested in Config._processopt
self.default = attrs["default"]
except KeyError:
pass
self._set_opt_strings(names)
if not self.dest:
if self._long_opts:
self.dest = self._long_opts[0][2:].replace("-", "_")
else:
try:
self.dest = self._short_opts[0][1:]
except IndexError:
raise ArgumentError("need a long or short option", self)
def names(self):
return self._short_opts + self._long_opts
def attrs(self):
# update any attributes set by processopt
attrs = "default dest help".split()
if self.dest:
attrs.append(self.dest)
for attr in attrs:
try:
self._attrs[attr] = getattr(self, attr)
except AttributeError:
pass
if self._attrs.get("help"):
a = self._attrs["help"]
a = a.replace("%default", "%(default)s")
# a = a.replace('%prog', '%(prog)s')
self._attrs["help"] = a
return self._attrs
def _set_opt_strings(self, opts):
"""directly from optparse
might not be necessary as this is passed to argparse later on"""
for opt in opts:
if len(opt) < 2:
raise ArgumentError(
"invalid option string %r: "
"must be at least two characters long" % opt,
self,
)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise ArgumentError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self,
)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise ArgumentError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self,
)
self._long_opts.append(opt)
def __repr__(self):
args = []
if self._short_opts:
args += ["_short_opts: " + repr(self._short_opts)]
if self._long_opts:
args += ["_long_opts: " + repr(self._long_opts)]
args += ["dest: " + repr(self.dest)]
if hasattr(self, "type"):
args += ["type: " + repr(self.type)]
if hasattr(self, "default"):
args += ["default: " + repr(self.default)]
return "Argument({})".format(", ".join(args))
class OptionGroup(object):
def __init__(self, name, description="", parser=None):
self.name = name
self.description = description
self.options = []
self.parser = parser
def addoption(self, *optnames, **attrs):
""" add an option to this group.
if a shortened version of a long option is specified it will
be suppressed in the help. addoption('--twowords', '--two-words')
results in help showing '--two-words' only, but --twowords gets
accepted **and** the automatic destination is in args.twowords
"""
conflict = set(optnames).intersection(
name for opt in self.options for name in opt.names()
)
if conflict:
raise ValueError("option names %s already added" % conflict)
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=False)
def _addoption(self, *optnames, **attrs):
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=True)
def _addoption_instance(self, option, shortupper=False):
if not shortupper:
for opt in option._short_opts:
if opt[0] == "-" and opt[1].islower():
raise ValueError("lowercase shortoptions reserved")
if self.parser:
self.parser.processoption(option)
self.options.append(option)
class MyOptionParser(argparse.ArgumentParser):
def __init__(self, parser, extra_info=None):
if not extra_info:
extra_info = {}
self._parser = parser
argparse.ArgumentParser.__init__(
self,
usage=parser._usage,
add_help=False,
formatter_class=DropShorterLongHelpFormatter,
)
# extra_info is a dict of (param -> value) to display if there's
# an usage error to provide more contextual information to the user
self.extra_info = extra_info
def error(self, message):
"""error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
Overrides the method in parent class to change exit code"""
self.print_usage(_sys.stderr)
args = {"prog": self.prog, "message": message}
self.exit(EXIT_USAGEERROR, _("%(prog)s: error: %(message)s\n") % args)
def parse_args(self, args=None, namespace=None):
"""allow splitting of positional arguments"""
args, argv = self.parse_known_args(args, namespace)
if argv:
for arg in argv:
if arg and arg[0] == "-":
lines = ["unrecognized arguments: %s" % (" ".join(argv))]
for k, v in sorted(self.extra_info.items()):
lines.append(" %s: %s" % (k, v))
self.error("\n".join(lines))
getattr(args, FILE_OR_DIR).extend(argv)
return args
class DropShorterLongHelpFormatter(argparse.HelpFormatter):
"""shorten help for long options that differ only in extra hyphens
- collapse **long** options that are the same except for extra hyphens
- special action attribute map_long_option allows surpressing additional
long options
- shortcut if there are only two options and one of them is a short one
- cache result on action object as this is called at least 2 times
"""
def _format_action_invocation(self, action):
orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
if orgstr and orgstr[0] != "-": # only optional arguments
return orgstr
res = getattr(action, "_formatted_action_invocation", None)
if res:
return res
options = orgstr.split(", ")
if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
# a shortcut for '-h, --help' or '--abc', '-a'
action._formatted_action_invocation = orgstr
return orgstr
return_list = []
option_map = getattr(action, "map_long_option", {})
if option_map is None:
option_map = {}
short_long = {}
for option in options:
if len(option) == 2 or option[2] == " ":
continue
if not option.startswith("--"):
raise ArgumentError(
'long optional argument without "--": [%s]' % (option), self
)
xxoption = option[2:]
if xxoption.split()[0] not in option_map:
shortened = xxoption.replace("-", "")
if shortened not in short_long or len(short_long[shortened]) < len(
xxoption
):
short_long[shortened] = xxoption
# now short_long has been filled out to the longest with dashes
# **and** we keep the right option ordering from add_argument
for option in options:
if len(option) == 2 or option[2] == " ":
return_list.append(option)
if option[2:] == short_long.get(option.replace("-", "")):
return_list.append(option.replace(" ", "=", 1))
action._formatted_action_invocation = ", ".join(return_list)
return action._formatted_action_invocation

@ -1,9 +0,0 @@
class UsageError(Exception):
""" error in pytest usage or invocation"""
class PrintHelp(Exception):
"""Raised when pytest should print it's help to skip the rest of the
argument parsing and validation."""
pass

@ -1,148 +0,0 @@
import os
import py
from .exceptions import UsageError
def exists(path, ignore=EnvironmentError):
try:
return path.check()
except ignore:
return False
def getcfg(args, config=None):
"""
Search the list of arguments for a valid ini-file for pytest,
and return a tuple of (rootdir, inifile, cfg-dict).
note: config is optional and used only to issue warnings explicitly (#2891).
"""
from _pytest.deprecated import CFG_PYTEST_SECTION
inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"]
args = [x for x in args if not str(x).startswith("-")]
if not args:
args = [py.path.local()]
for arg in args:
arg = py.path.local(arg)
for base in arg.parts(reverse=True):
for inibasename in inibasenames:
p = base.join(inibasename)
if exists(p):
iniconfig = py.iniconfig.IniConfig(p)
if "pytest" in iniconfig.sections:
if inibasename == "setup.cfg" and config is not None:
from _pytest.warnings import _issue_config_warning
from _pytest.warning_types import RemovedInPytest4Warning
_issue_config_warning(
RemovedInPytest4Warning(
CFG_PYTEST_SECTION.format(filename=inibasename)
),
config=config,
)
return base, p, iniconfig["pytest"]
if (
inibasename == "setup.cfg"
and "tool:pytest" in iniconfig.sections
):
return base, p, iniconfig["tool:pytest"]
elif inibasename == "pytest.ini":
# allowed to be empty
return base, p, {}
return None, None, None
def get_common_ancestor(paths):
common_ancestor = None
for path in paths:
if not path.exists():
continue
if common_ancestor is None:
common_ancestor = path
else:
if path.relto(common_ancestor) or path == common_ancestor:
continue
elif common_ancestor.relto(path):
common_ancestor = path
else:
shared = path.common(common_ancestor)
if shared is not None:
common_ancestor = shared
if common_ancestor is None:
common_ancestor = py.path.local()
elif common_ancestor.isfile():
common_ancestor = common_ancestor.dirpath()
return common_ancestor
def get_dirs_from_args(args):
def is_option(x):
return str(x).startswith("-")
def get_file_part_from_node_id(x):
return str(x).split("::")[0]
def get_dir_from_path(path):
if path.isdir():
return path
return py.path.local(path.dirname)
# These look like paths but may not exist
possible_paths = (
py.path.local(get_file_part_from_node_id(arg))
for arg in args
if not is_option(arg)
)
return [get_dir_from_path(path) for path in possible_paths if path.exists()]
def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None):
dirs = get_dirs_from_args(args)
if inifile:
iniconfig = py.iniconfig.IniConfig(inifile)
is_cfg_file = str(inifile).endswith(".cfg")
sections = ["tool:pytest", "pytest"] if is_cfg_file else ["pytest"]
for section in sections:
try:
inicfg = iniconfig[section]
if is_cfg_file and section == "pytest" and config is not None:
from _pytest.deprecated import CFG_PYTEST_SECTION
from _pytest.warnings import _issue_config_warning
# TODO: [pytest] section in *.cfg files is deprecated. Need refactoring once
# the deprecation expires.
_issue_config_warning(
CFG_PYTEST_SECTION.format(filename=str(inifile)), config
)
break
except KeyError:
inicfg = None
rootdir = get_common_ancestor(dirs)
else:
ancestor = get_common_ancestor(dirs)
rootdir, inifile, inicfg = getcfg([ancestor], config=config)
if rootdir is None:
for rootdir in ancestor.parts(reverse=True):
if rootdir.join("setup.py").exists():
break
else:
rootdir, inifile, inicfg = getcfg(dirs, config=config)
if rootdir is None:
rootdir = get_common_ancestor([py.path.local(), ancestor])
is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/"
if is_fs_root:
rootdir = ancestor
if rootdir_cmd_arg:
rootdir_abs_path = py.path.local(os.path.expandvars(rootdir_cmd_arg))
if not os.path.isdir(str(rootdir_abs_path)):
raise UsageError(
"Directory '{}' not found. Check your '--rootdir' option.".format(
rootdir_abs_path
)
)
rootdir = rootdir_abs_path
return rootdir, inifile, inicfg or {}

@ -1,237 +0,0 @@
""" interactive debugging with PDB, the Python Debugger. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pdb
import sys
from doctest import UnexpectedException
from _pytest import outcomes
from _pytest.config import hookimpl
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"--pdb",
dest="usepdb",
action="store_true",
help="start the interactive Python debugger on errors or KeyboardInterrupt.",
)
group._addoption(
"--pdbcls",
dest="usepdb_cls",
metavar="modulename:classname",
help="start a custom interactive Python debugger on errors. "
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
)
group._addoption(
"--trace",
dest="trace",
action="store_true",
help="Immediately break when running each test.",
)
def pytest_configure(config):
if config.getvalue("usepdb_cls"):
modname, classname = config.getvalue("usepdb_cls").split(":")
__import__(modname)
pdb_cls = getattr(sys.modules[modname], classname)
else:
pdb_cls = pdb.Pdb
if config.getvalue("trace"):
config.pluginmanager.register(PdbTrace(), "pdbtrace")
if config.getvalue("usepdb"):
config.pluginmanager.register(PdbInvoke(), "pdbinvoke")
pytestPDB._saved.append(
(pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config, pytestPDB._pdb_cls)
)
pdb.set_trace = pytestPDB.set_trace
pytestPDB._pluginmanager = config.pluginmanager
pytestPDB._config = config
pytestPDB._pdb_cls = pdb_cls
# NOTE: not using pytest_unconfigure, since it might get called although
# pytest_configure was not (if another plugin raises UsageError).
def fin():
(
pdb.set_trace,
pytestPDB._pluginmanager,
pytestPDB._config,
pytestPDB._pdb_cls,
) = pytestPDB._saved.pop()
config._cleanup.append(fin)
class pytestPDB(object):
""" Pseudo PDB that defers to the real pdb. """
_pluginmanager = None
_config = None
_pdb_cls = pdb.Pdb
_saved = []
@classmethod
def set_trace(cls, set_break=True):
""" invoke PDB set_trace debugging, dropping any IO capturing. """
import _pytest.config
frame = sys._getframe().f_back
if cls._pluginmanager is not None:
capman = cls._pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
tw = _pytest.config.create_terminal_writer(cls._config)
tw.line()
if capman and capman.is_globally_capturing():
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
else:
tw.sep(">", "PDB set_trace")
class _PdbWrapper(cls._pdb_cls, object):
_pytest_capman = capman
_continued = False
def do_continue(self, arg):
ret = super(_PdbWrapper, self).do_continue(arg)
if self._pytest_capman:
tw = _pytest.config.create_terminal_writer(cls._config)
tw.line()
if self._pytest_capman.is_globally_capturing():
tw.sep(">", "PDB continue (IO-capturing resumed)")
else:
tw.sep(">", "PDB continue")
self._pytest_capman.resume_global_capture()
cls._pluginmanager.hook.pytest_leave_pdb(
config=cls._config, pdb=self
)
self._continued = True
return ret
do_c = do_cont = do_continue
def setup(self, f, tb):
"""Suspend on setup().
Needed after do_continue resumed, and entering another
breakpoint again.
"""
ret = super(_PdbWrapper, self).setup(f, tb)
if not ret and self._continued:
# pdb.setup() returns True if the command wants to exit
# from the interaction: do not suspend capturing then.
if self._pytest_capman:
self._pytest_capman.suspend_global_capture(in_=True)
return ret
_pdb = _PdbWrapper()
cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb)
else:
_pdb = cls._pdb_cls()
if set_break:
_pdb.set_trace(frame)
class PdbInvoke(object):
def pytest_exception_interact(self, node, call, report):
capman = node.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stdout.write(err)
_enter_pdb(node, call.excinfo, report)
def pytest_internalerror(self, excrepr, excinfo):
tb = _postmortem_traceback(excinfo)
post_mortem(tb)
class PdbTrace(object):
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(self, pyfuncitem):
_test_pytest_function(pyfuncitem)
yield
def _test_pytest_function(pyfuncitem):
pytestPDB.set_trace(set_break=False)
testfunction = pyfuncitem.obj
pyfuncitem.obj = pdb.runcall
if pyfuncitem._isyieldedfunction():
arg_list = list(pyfuncitem._args)
arg_list.insert(0, testfunction)
pyfuncitem._args = tuple(arg_list)
else:
if "func" in pyfuncitem._fixtureinfo.argnames:
raise ValueError("--trace can't be used with a fixture named func!")
pyfuncitem.funcargs["func"] = testfunction
new_list = list(pyfuncitem._fixtureinfo.argnames)
new_list.append("func")
pyfuncitem._fixtureinfo.argnames = tuple(new_list)
def _enter_pdb(node, excinfo, rep):
# XXX we re-use the TerminalReporter's terminalwriter
# because this seems to avoid some encoding related troubles
# for not completely clear reasons.
tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
tw.line()
showcapture = node.config.option.showcapture
for sectionname, content in (
("stdout", rep.capstdout),
("stderr", rep.capstderr),
("log", rep.caplog),
):
if showcapture in (sectionname, "all") and content:
tw.sep(">", "captured " + sectionname)
if content[-1:] == "\n":
content = content[:-1]
tw.line(content)
tw.sep(">", "traceback")
rep.toterminal(tw)
tw.sep(">", "entering PDB")
tb = _postmortem_traceback(excinfo)
rep._pdbshown = True
if post_mortem(tb):
outcomes.exit("Quitting debugger")
return rep
def _postmortem_traceback(excinfo):
if isinstance(excinfo.value, UnexpectedException):
# A doctest.UnexpectedException is not useful for post_mortem.
# Use the underlying exception instead:
return excinfo.value.exc_info[2]
else:
return excinfo._excinfo[2]
def _find_last_non_hidden_frame(stack):
i = max(0, len(stack) - 1)
while i and stack[i][0].f_locals.get("__tracebackhide__", False):
i -= 1
return i
def post_mortem(t):
class Pdb(pytestPDB._pdb_cls):
def get_stack(self, f, t):
stack, i = pdb.Pdb.get_stack(self, f, t)
if f is None:
i = _find_last_non_hidden_frame(stack)
return stack, i
p = Pdb()
p.reset()
p.interaction(None, t)
return p.quitting

@ -1,123 +0,0 @@
"""
This module contains deprecation messages and bits of code used elsewhere in the codebase
that is planned to be removed in the next pytest release.
Keeping it in a central location makes it easy to track what is deprecated and should
be removed when the time comes.
All constants defined in this module should be either PytestWarning instances or UnformattedWarning
in case of warnings which need to format their messages.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from _pytest.warning_types import PytestDeprecationWarning
from _pytest.warning_types import RemovedInPytest4Warning
from _pytest.warning_types import UnformattedWarning
MAIN_STR_ARGS = RemovedInPytest4Warning(
"passing a string to pytest.main() is deprecated, "
"pass a list of arguments instead."
)
YIELD_TESTS = RemovedInPytest4Warning(
"yield tests are deprecated, and scheduled to be removed in pytest 4.0"
)
CACHED_SETUP = RemovedInPytest4Warning(
"cached_setup is deprecated and will be removed in a future release. "
"Use standard fixture functions instead."
)
COMPAT_PROPERTY = UnformattedWarning(
RemovedInPytest4Warning,
"usage of {owner}.{name} is deprecated, please use pytest.{name} instead",
)
CUSTOM_CLASS = UnformattedWarning(
RemovedInPytest4Warning,
'use of special named "{name}" objects in collectors of type "{type_name}" to '
"customize the created nodes is deprecated. "
"Use pytest_pycollect_makeitem(...) to create custom "
"collection nodes instead.",
)
FUNCARG_PREFIX = UnformattedWarning(
RemovedInPytest4Warning,
'{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated '
"and scheduled to be removed in pytest 4.0. "
"Please remove the prefix and use the @pytest.fixture decorator instead.",
)
FIXTURE_FUNCTION_CALL = UnformattedWarning(
RemovedInPytest4Warning,
'Fixture "{name}" called directly. Fixtures are not meant to be called directly, '
"are created automatically when test functions request them as parameters. "
"See https://docs.pytest.org/en/latest/fixture.html for more information.",
)
FIXTURE_NAMED_REQUEST = PytestDeprecationWarning(
"'request' is a reserved name for fixtures and will raise an error in future versions"
)
CFG_PYTEST_SECTION = UnformattedWarning(
RemovedInPytest4Warning,
"[pytest] section in {filename} files is deprecated, use [tool:pytest] instead.",
)
GETFUNCARGVALUE = RemovedInPytest4Warning(
"getfuncargvalue is deprecated, use getfixturevalue"
)
RESULT_LOG = RemovedInPytest4Warning(
"--result-log is deprecated and scheduled for removal in pytest 4.0.\n"
"See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information."
)
MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning(
"MarkInfo objects are deprecated as they contain merged marks which are hard to deal with correctly.\n"
"Please use node.get_closest_marker(name) or node.iter_markers(name).\n"
"Docs: https://docs.pytest.org/en/latest/mark.html#updating-code"
)
MARK_PARAMETERSET_UNPACKING = RemovedInPytest4Warning(
"Applying marks directly to parameters is deprecated,"
" please use pytest.param(..., marks=...) instead.\n"
"For more details, see: https://docs.pytest.org/en/latest/parametrize.html"
)
NODE_WARN = RemovedInPytest4Warning(
"Node.warn(code, message) form has been deprecated, use Node.warn(warning_instance) instead."
)
RECORD_XML_PROPERTY = RemovedInPytest4Warning(
'Fixture renamed from "record_xml_property" to "record_property" as user '
"properties are now available to all reporters.\n"
'"record_xml_property" is now deprecated.'
)
COLLECTOR_MAKEITEM = RemovedInPytest4Warning(
"pycollector makeitem was removed as it is an accidentially leaked internal api"
)
METAFUNC_ADD_CALL = RemovedInPytest4Warning(
"Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0.\n"
"Please use Metafunc.parametrize instead."
)
PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = RemovedInPytest4Warning(
"Defining pytest_plugins in a non-top-level conftest is deprecated, "
"because it affects the entire directory tree in a non-explicit way.\n"
"Please move it to the top level conftest file instead."
)
PYTEST_NAMESPACE = RemovedInPytest4Warning(
"pytest_namespace is deprecated and will be removed soon"
)
PYTEST_ENSURETEMP = RemovedInPytest4Warning(
"pytest/tmpdir_factory.ensuretemp is deprecated, \n"
"please use the tmp_path fixture or tmp_path_factory.mktemp"
)

@ -1,516 +0,0 @@
""" discover and run doctests in modules and test files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import platform
import sys
import traceback
import pytest
from _pytest._code.code import ExceptionInfo
from _pytest._code.code import ReprFileLocation
from _pytest._code.code import TerminalRepr
from _pytest.fixtures import FixtureRequest
DOCTEST_REPORT_CHOICE_NONE = "none"
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
DOCTEST_REPORT_CHOICE_NDIFF = "ndiff"
DOCTEST_REPORT_CHOICE_UDIFF = "udiff"
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure"
DOCTEST_REPORT_CHOICES = (
DOCTEST_REPORT_CHOICE_NONE,
DOCTEST_REPORT_CHOICE_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF,
DOCTEST_REPORT_CHOICE_UDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
)
# Lazy definition of runner class
RUNNER_CLASS = None
def pytest_addoption(parser):
parser.addini(
"doctest_optionflags",
"option flags for doctests",
type="args",
default=["ELLIPSIS"],
)
parser.addini(
"doctest_encoding", "encoding used for doctest files", default="utf-8"
)
group = parser.getgroup("collect")
group.addoption(
"--doctest-modules",
action="store_true",
default=False,
help="run doctests in all .py modules",
dest="doctestmodules",
)
group.addoption(
"--doctest-report",
type=str.lower,
default="udiff",
help="choose another output format for diffs on doctest failure",
choices=DOCTEST_REPORT_CHOICES,
dest="doctestreport",
)
group.addoption(
"--doctest-glob",
action="append",
default=[],
metavar="pat",
help="doctests file matching pattern, default: test*.txt",
dest="doctestglob",
)
group.addoption(
"--doctest-ignore-import-errors",
action="store_true",
default=False,
help="ignore doctest ImportErrors",
dest="doctest_ignore_import_errors",
)
group.addoption(
"--doctest-continue-on-failure",
action="store_true",
default=False,
help="for a given doctest, continue to run after the first failure",
dest="doctest_continue_on_failure",
)
def pytest_collect_file(path, parent):
config = parent.config
if path.ext == ".py":
if config.option.doctestmodules and not _is_setup_py(config, path, parent):
return DoctestModule(path, parent)
elif _is_doctest(config, path, parent):
return DoctestTextfile(path, parent)
def _is_setup_py(config, path, parent):
if path.basename != "setup.py":
return False
contents = path.read()
return "setuptools" in contents or "distutils" in contents
def _is_doctest(config, path, parent):
if path.ext in (".txt", ".rst") and parent.session.isinitpath(path):
return True
globs = config.getoption("doctestglob") or ["test*.txt"]
for glob in globs:
if path.check(fnmatch=glob):
return True
return False
class ReprFailDoctest(TerminalRepr):
def __init__(self, reprlocation_lines):
# List of (reprlocation, lines) tuples
self.reprlocation_lines = reprlocation_lines
def toterminal(self, tw):
for reprlocation, lines in self.reprlocation_lines:
for line in lines:
tw.line(line)
reprlocation.toterminal(tw)
class MultipleDoctestFailures(Exception):
def __init__(self, failures):
super(MultipleDoctestFailures, self).__init__()
self.failures = failures
def _init_runner_class():
import doctest
class PytestDoctestRunner(doctest.DebugRunner):
"""
Runner to collect failures. Note that the out variable in this case is
a list instead of a stdout-like object
"""
def __init__(
self, checker=None, verbose=None, optionflags=0, continue_on_failure=True
):
doctest.DebugRunner.__init__(
self, checker=checker, verbose=verbose, optionflags=optionflags
)
self.continue_on_failure = continue_on_failure
def report_failure(self, out, test, example, got):
failure = doctest.DocTestFailure(test, example, got)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
def report_unexpected_exception(self, out, test, example, exc_info):
failure = doctest.UnexpectedException(test, example, exc_info)
if self.continue_on_failure:
out.append(failure)
else:
raise failure
return PytestDoctestRunner
def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True):
# We need this in order to do a lazy import on doctest
global RUNNER_CLASS
if RUNNER_CLASS is None:
RUNNER_CLASS = _init_runner_class()
return RUNNER_CLASS(
checker=checker,
verbose=verbose,
optionflags=optionflags,
continue_on_failure=continue_on_failure,
)
class DoctestItem(pytest.Item):
def __init__(self, name, parent, runner=None, dtest=None):
super(DoctestItem, self).__init__(name, parent)
self.runner = runner
self.dtest = dtest
self.obj = None
self.fixture_request = None
def setup(self):
if self.dtest is not None:
self.fixture_request = _setup_fixtures(self)
globs = dict(getfixture=self.fixture_request.getfixturevalue)
for name, value in self.fixture_request.getfixturevalue(
"doctest_namespace"
).items():
globs[name] = value
self.dtest.globs.update(globs)
def runtest(self):
_check_all_skipped(self.dtest)
self._disable_output_capturing_for_darwin()
failures = []
self.runner.run(self.dtest, out=failures)
if failures:
raise MultipleDoctestFailures(failures)
def _disable_output_capturing_for_darwin(self):
"""
Disable output capturing. Otherwise, stdout is lost to doctest (#985)
"""
if platform.system() != "Darwin":
return
capman = self.config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture(in_=True)
out, err = capman.read_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
def repr_failure(self, excinfo):
import doctest
failures = None
if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)):
failures = [excinfo.value]
elif excinfo.errisinstance(MultipleDoctestFailures):
failures = excinfo.value.failures
if failures is not None:
reprlocation_lines = []
for failure in failures:
example = failure.example
test = failure.test
filename = test.filename
if test.lineno is None:
lineno = None
else:
lineno = test.lineno + example.lineno + 1
message = type(failure).__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = _get_checker()
report_choice = _get_report_choice(
self.config.getoption("doctestreport")
)
if lineno is not None:
lines = failure.test.docstring.splitlines(False)
# add line numbers to the left of the error message
lines = [
"%03d %s" % (i + test.lineno + 1, x)
for (i, x) in enumerate(lines)
]
# trim docstring error lines to 10
lines = lines[max(example.lineno - 9, 0) : example.lineno + 1]
else:
lines = [
"EXAMPLE LOCATION UNKNOWN, not showing all tests of that example"
]
indent = ">>>"
for line in example.source.splitlines():
lines.append("??? %s %s" % (indent, line))
indent = "..."
if isinstance(failure, doctest.DocTestFailure):
lines += checker.output_difference(
example, failure.got, report_choice
).split("\n")
else:
inner_excinfo = ExceptionInfo(failure.exc_info)
lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
lines += traceback.format_exception(*failure.exc_info)
reprlocation_lines.append((reprlocation, lines))
return ReprFailDoctest(reprlocation_lines)
else:
return super(DoctestItem, self).repr_failure(excinfo)
def reportinfo(self):
return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name
def _get_flag_lookup():
import doctest
return dict(
DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
ELLIPSIS=doctest.ELLIPSIS,
IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
ALLOW_UNICODE=_get_allow_unicode_flag(),
ALLOW_BYTES=_get_allow_bytes_flag(),
)
def get_optionflags(parent):
optionflags_str = parent.config.getini("doctest_optionflags")
flag_lookup_table = _get_flag_lookup()
flag_acc = 0
for flag in optionflags_str:
flag_acc |= flag_lookup_table[flag]
return flag_acc
def _get_continue_on_failure(config):
continue_on_failure = config.getvalue("doctest_continue_on_failure")
if continue_on_failure:
# We need to turn off this if we use pdb since we should stop at
# the first failure
if config.getvalue("usepdb"):
continue_on_failure = False
return continue_on_failure
class DoctestTextfile(pytest.Module):
obj = None
def collect(self):
import doctest
# inspired by doctest.testfile; ideally we would use it directly,
# but it doesn't support passing a custom checker
encoding = self.config.getini("doctest_encoding")
text = self.fspath.read_text(encoding)
filename = str(self.fspath)
name = self.fspath.basename
globs = {"__name__": "__main__"}
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
_fix_spoof_python2(runner, encoding)
parser = doctest.DocTestParser()
test = parser.get_doctest(text, globs, name, filename, 0)
if test.examples:
yield DoctestItem(test.name, self, runner, test)
def _check_all_skipped(test):
"""raises pytest.skip() if all examples in the given DocTest have the SKIP
option set.
"""
import doctest
all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
if all_skipped:
pytest.skip("all tests skipped by +SKIP option")
class DoctestModule(pytest.Module):
def collect(self):
import doctest
if self.fspath.basename == "conftest.py":
module = self.config.pluginmanager._importconftest(self.fspath)
else:
try:
module = self.fspath.pyimport()
except ImportError:
if self.config.getvalue("doctest_ignore_import_errors"):
pytest.skip("unable to import module %r" % self.fspath)
else:
raise
# uses internal doctest module parsing mechanism
finder = doctest.DocTestFinder()
optionflags = get_optionflags(self)
runner = _get_runner(
verbose=0,
optionflags=optionflags,
checker=_get_checker(),
continue_on_failure=_get_continue_on_failure(self.config),
)
for test in finder.find(module, module.__name__):
if test.examples: # skip empty doctests
yield DoctestItem(test.name, self, runner, test)
def _setup_fixtures(doctest_item):
"""
Used by DoctestTextfile and DoctestItem to setup fixture information.
"""
def func():
pass
doctest_item.funcargs = {}
fm = doctest_item.session._fixturemanager
doctest_item._fixtureinfo = fm.getfixtureinfo(
node=doctest_item, func=func, cls=None, funcargs=False
)
fixture_request = FixtureRequest(doctest_item)
fixture_request._fillfixtures()
return fixture_request
def _get_checker():
"""
Returns a doctest.OutputChecker subclass that takes in account the
ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
to strip b'' prefixes.
Useful when the same doctest should run in Python 2 and Python 3.
An inner class is used to avoid importing "doctest" at the module
level.
"""
if hasattr(_get_checker, "LiteralsOutputChecker"):
return _get_checker.LiteralsOutputChecker()
import doctest
import re
class LiteralsOutputChecker(doctest.OutputChecker):
"""
Copied from doctest_nose_plugin.py from the nltk project:
https://github.com/nltk/nltk
Further extended to also support byte literals.
"""
_unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
_bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
if res:
return True
allow_unicode = optionflags & _get_allow_unicode_flag()
allow_bytes = optionflags & _get_allow_bytes_flag()
if not allow_unicode and not allow_bytes:
return False
else: # pragma: no cover
def remove_prefixes(regex, txt):
return re.sub(regex, r"\1\2", txt)
if allow_unicode:
want = remove_prefixes(self._unicode_literal_re, want)
got = remove_prefixes(self._unicode_literal_re, got)
if allow_bytes:
want = remove_prefixes(self._bytes_literal_re, want)
got = remove_prefixes(self._bytes_literal_re, got)
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
return res
_get_checker.LiteralsOutputChecker = LiteralsOutputChecker
return _get_checker.LiteralsOutputChecker()
def _get_allow_unicode_flag():
"""
Registers and returns the ALLOW_UNICODE flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_UNICODE")
def _get_allow_bytes_flag():
"""
Registers and returns the ALLOW_BYTES flag.
"""
import doctest
return doctest.register_optionflag("ALLOW_BYTES")
def _get_report_choice(key):
"""
This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests.
"""
import doctest
return {
DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
DOCTEST_REPORT_CHOICE_NONE: 0,
}[key]
def _fix_spoof_python2(runner, encoding):
"""
Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This
should patch only doctests for text files because they don't have a way to declare their
encoding. Doctests in docstrings from Python modules don't have the same problem given that
Python already decoded the strings.
This fixes the problem related in issue #2434.
"""
from _pytest.compat import _PY2
if not _PY2:
return
from doctest import _SpoofOut
class UnicodeSpoof(_SpoofOut):
def getvalue(self):
result = _SpoofOut.getvalue(self)
if encoding and isinstance(result, bytes):
result = result.decode(encoding)
return result
runner._fakeout = UnicodeSpoof()
@pytest.fixture(scope="session")
def doctest_namespace():
"""
Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.
"""
return dict()

File diff suppressed because it is too large Load Diff

@ -1,47 +0,0 @@
"""
Provides a function to report all internal modules for using freezing tools
pytest
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def freeze_includes():
"""
Returns a list of module names used by pytest that should be
included by cx_freeze.
"""
import py
import _pytest
result = list(_iter_all_modules(py))
result += list(_iter_all_modules(_pytest))
return result
def _iter_all_modules(package, prefix=""):
"""
Iterates over the names of all modules that can be found in the given
package, recursively.
Example:
_iter_all_modules(_pytest) ->
['_pytest.assertion.newinterpret',
'_pytest.capture',
'_pytest.core',
...
]
"""
import os
import pkgutil
if type(package) is not str:
path, prefix = package.__path__[0], package.__name__ + "."
else:
path = package
for _, name, is_package in pkgutil.iter_modules([path]):
if is_package:
for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."):
yield prefix + m
else:
yield prefix + name

@ -1,217 +0,0 @@
""" version info, help messages, tracing configuration. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from argparse import Action
import py
import pytest
from _pytest.config import PrintHelp
class HelpAction(Action):
"""This is an argparse Action that will raise an exception in
order to skip the rest of the argument parsing when --help is passed.
This prevents argparse from quitting due to missing required arguments
when any are defined, for example by ``pytest_addoption``.
This is similar to the way that the builtin argparse --help option is
implemented by raising SystemExit.
"""
def __init__(self, option_strings, dest=None, default=False, help=None):
super(HelpAction, self).__init__(
option_strings=option_strings,
dest=dest,
const=True,
default=default,
nargs=0,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, self.const)
# We should only skip the rest of the parsing after preparse is done
if getattr(parser._parser, "after_preparse", False):
raise PrintHelp
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--version",
action="store_true",
help="display pytest lib version and import information.",
)
group._addoption(
"-h",
"--help",
action=HelpAction,
dest="help",
help="show help message and configuration info",
)
group._addoption(
"-p",
action="append",
dest="plugins",
default=[],
metavar="name",
help="early-load given plugin (multi-allowed). "
"To avoid loading of plugins, use the `no:` prefix, e.g. "
"`no:doctest`.",
)
group.addoption(
"--traceconfig",
"--trace-config",
action="store_true",
default=False,
help="trace considerations of conftest.py files.",
),
group.addoption(
"--debug",
action="store_true",
dest="debug",
default=False,
help="store internal tracing debug information in 'pytestdebug.log'.",
)
group._addoption(
"-o",
"--override-ini",
dest="override_ini",
action="append",
help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.',
)
@pytest.hookimpl(hookwrapper=True)
def pytest_cmdline_parse():
outcome = yield
config = outcome.get_result()
if config.option.debug:
path = os.path.abspath("pytestdebug.log")
debugfile = open(path, "w")
debugfile.write(
"versions pytest-%s, py-%s, "
"python-%s\ncwd=%s\nargs=%s\n\n"
% (
pytest.__version__,
py.__version__,
".".join(map(str, sys.version_info)),
os.getcwd(),
config._origargs,
)
)
config.trace.root.setwriter(debugfile.write)
undo_tracing = config.pluginmanager.enable_tracing()
sys.stderr.write("writing pytestdebug information to %s\n" % path)
def unset_tracing():
debugfile.close()
sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name)
config.trace.root.setwriter(None)
undo_tracing()
config.add_cleanup(unset_tracing)
def pytest_cmdline_main(config):
if config.option.version:
p = py.path.local(pytest.__file__)
sys.stderr.write(
"This is pytest version %s, imported from %s\n" % (pytest.__version__, p)
)
plugininfo = getpluginversioninfo(config)
if plugininfo:
for line in plugininfo:
sys.stderr.write(line + "\n")
return 0
elif config.option.help:
config._do_configure()
showhelp(config)
config._ensure_unconfigure()
return 0
def showhelp(config):
reporter = config.pluginmanager.get_plugin("terminalreporter")
tw = reporter._tw
tw.write(config._parser.optparser.format_help())
tw.line()
tw.line()
tw.line(
"[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:"
)
tw.line()
for name in config._parser._ininames:
help, type, default = config._parser._inidict[name]
if type is None:
type = "string"
spec = "%s (%s)" % (name, type)
line = " %-24s %s" % (spec, help)
tw.line(line[: tw.fullwidth])
tw.line()
tw.line("environment variables:")
vars = [
("PYTEST_ADDOPTS", "extra command line options"),
("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"),
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"),
]
for name, help in vars:
tw.line(" %-24s %s" % (name, help))
tw.line()
tw.line()
tw.line("to see available markers type: pytest --markers")
tw.line("to see available fixtures type: pytest --fixtures")
tw.line(
"(shown according to specified file_or_dir or current dir "
"if not specified; fixtures with leading '_' are only shown "
"with the '-v' option"
)
for warningreport in reporter.stats.get("warnings", []):
tw.line("warning : " + warningreport.message, red=True)
return
conftest_options = [("pytest_plugins", "list of plugin names to load")]
def getpluginversioninfo(config):
lines = []
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append("setuptools registered plugins:")
for plugin, dist in plugininfo:
loc = getattr(plugin, "__file__", repr(plugin))
content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
lines.append(" " + content)
return lines
def pytest_report_header(config):
lines = []
if config.option.debug or config.option.traceconfig:
lines.append("using: pytest-%s pylib-%s" % (pytest.__version__, py.__version__))
verinfo = getpluginversioninfo(config)
if verinfo:
lines.extend(verinfo)
if config.option.traceconfig:
lines.append("active plugins:")
items = config.pluginmanager.list_name_plugin()
for name, plugin in items:
if hasattr(plugin, "__file__"):
r = plugin.__file__
else:
r = repr(plugin)
lines.append(" %-20s: %s" % (name, r))
return lines

@ -1,623 +0,0 @@
""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
from pluggy import HookspecMarker
from .deprecated import PYTEST_NAMESPACE
hookspec = HookspecMarker("pytest")
# -------------------------------------------------------------------------
# Initialization hooks called for every plugin
# -------------------------------------------------------------------------
@hookspec(historic=True)
def pytest_addhooks(pluginmanager):
"""called at plugin registration time to allow adding new hooks via a call to
``pluginmanager.add_hookspecs(module_or_class, prefix)``.
:param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager
.. note::
This hook is incompatible with ``hookwrapper=True``.
"""
@hookspec(historic=True, warn_on_impl=PYTEST_NAMESPACE)
def pytest_namespace():
"""
return dict of name->object to be made globally available in
the pytest namespace.
This hook is called at plugin registration time.
.. note::
This hook is incompatible with ``hookwrapper=True``.
.. warning::
This hook has been **deprecated** and will be removed in pytest 4.0.
Plugins whose users depend on the current namespace functionality should prepare to migrate to a
namespace they actually own.
To support the migration it's suggested to trigger ``DeprecationWarnings`` for objects they put into the
pytest namespace.
A stopgap measure to avoid the warning is to monkeypatch the ``pytest`` module, but just as the
``pytest_namespace`` hook this should be seen as a temporary measure to be removed in future versions after
an appropriate transition period.
"""
@hookspec(historic=True)
def pytest_plugin_registered(plugin, manager):
""" a new pytest plugin got registered.
:param plugin: the plugin module or instance
:param _pytest.config.PytestPluginManager manager: pytest plugin manager
.. note::
This hook is incompatible with ``hookwrapper=True``.
"""
@hookspec(historic=True)
def pytest_addoption(parser):
"""register argparse-style options and ini-style config values,
called once at the beginning of a test run.
.. note::
This function should be implemented only in plugins or ``conftest.py``
files situated at the tests root directory due to how pytest
:ref:`discovers plugins during startup <pluginorder>`.
:arg _pytest.config.Parser parser: To add command line options, call
:py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`.
To add ini-file values call :py:func:`parser.addini(...)
<_pytest.config.Parser.addini>`.
Options can later be accessed through the
:py:class:`config <_pytest.config.Config>` object, respectively:
- :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to
retrieve the value of a command line option.
- :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve
a value read from an ini-style file.
The config object is passed around on many internal objects via the ``.config``
attribute or can be retrieved as the ``pytestconfig`` fixture.
.. note::
This hook is incompatible with ``hookwrapper=True``.
"""
@hookspec(historic=True)
def pytest_configure(config):
"""
Allows plugins and conftest files to perform initial configuration.
This hook is called for every plugin and initial conftest file
after command line options have been parsed.
After that, the hook is called for other conftest files as they are
imported.
.. note::
This hook is incompatible with ``hookwrapper=True``.
:arg _pytest.config.Config config: pytest config object
"""
# -------------------------------------------------------------------------
# Bootstrapping hooks called for plugins registered early enough:
# internal and 3rd party plugins.
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_cmdline_parse(pluginmanager, args):
"""return initialized config object, parsing the specified args.
Stops at first non-None result, see :ref:`firstresult`
.. note::
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
:param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager
:param list[str] args: list of arguments passed on the command line
"""
def pytest_cmdline_preparse(config, args):
"""(**Deprecated**) modify command line arguments before option parsing.
This hook is considered deprecated and will be removed in a future pytest version. Consider
using :func:`pytest_load_initial_conftests` instead.
.. note::
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
:param _pytest.config.Config config: pytest config object
:param list[str] args: list of arguments passed on the command line
"""
@hookspec(firstresult=True)
def pytest_cmdline_main(config):
""" called for performing the main command line action. The default
implementation will invoke the configure hooks and runtest_mainloop.
.. note::
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
Stops at first non-None result, see :ref:`firstresult`
:param _pytest.config.Config config: pytest config object
"""
def pytest_load_initial_conftests(early_config, parser, args):
""" implements the loading of initial conftest files ahead
of command line option parsing.
.. note::
This hook will not be called for ``conftest.py`` files, only for setuptools plugins.
:param _pytest.config.Config early_config: pytest config object
:param list[str] args: list of arguments passed on the command line
:param _pytest.config.Parser parser: to add command line options
"""
# -------------------------------------------------------------------------
# collection hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_collection(session):
"""Perform the collection protocol for the given session.
Stops at first non-None result, see :ref:`firstresult`.
:param _pytest.main.Session session: the pytest session object
"""
def pytest_collection_modifyitems(session, config, items):
""" called after collection has been performed, may filter or re-order
the items in-place.
:param _pytest.main.Session session: the pytest session object
:param _pytest.config.Config config: pytest config object
:param List[_pytest.nodes.Item] items: list of item objects
"""
def pytest_collection_finish(session):
""" called after collection has been performed and modified.
:param _pytest.main.Session session: the pytest session object
"""
@hookspec(firstresult=True)
def pytest_ignore_collect(path, config):
""" return True to prevent considering this path for collection.
This hook is consulted for all files and directories prior to calling
more specific hooks.
Stops at first non-None result, see :ref:`firstresult`
:param str path: the path to analyze
:param _pytest.config.Config config: pytest config object
"""
@hookspec(firstresult=True)
def pytest_collect_directory(path, parent):
""" called before traversing a directory for collection files.
Stops at first non-None result, see :ref:`firstresult`
:param str path: the path to analyze
"""
def pytest_collect_file(path, parent):
""" return collection Node or None for the given path. Any new node
needs to have the specified ``parent`` as a parent.
:param str path: the path to collect
"""
# logging hooks for collection
def pytest_collectstart(collector):
""" collector starts collecting. """
def pytest_itemcollected(item):
""" we just collected a test item. """
def pytest_collectreport(report):
""" collector finished collecting. """
def pytest_deselected(items):
""" called for test items deselected by keyword. """
@hookspec(firstresult=True)
def pytest_make_collect_report(collector):
""" perform ``collector.collect()`` and return a CollectReport.
Stops at first non-None result, see :ref:`firstresult` """
# -------------------------------------------------------------------------
# Python test function related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_pycollect_makemodule(path, parent):
""" return a Module collector or None for the given path.
This hook will be called for each matching test module path.
The pytest_collect_file hook needs to be used if you want to
create test modules for files that do not match as a test module.
Stops at first non-None result, see :ref:`firstresult` """
@hookspec(firstresult=True)
def pytest_pycollect_makeitem(collector, name, obj):
""" return custom item/collector for a python object in a module, or None.
Stops at first non-None result, see :ref:`firstresult` """
@hookspec(firstresult=True)
def pytest_pyfunc_call(pyfuncitem):
""" call underlying test function.
Stops at first non-None result, see :ref:`firstresult` """
def pytest_generate_tests(metafunc):
""" generate (multiple) parametrized calls to a test function."""
@hookspec(firstresult=True)
def pytest_make_parametrize_id(config, val, argname):
"""Return a user-friendly string representation of the given ``val`` that will be used
by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``.
The parameter name is available as ``argname``, if required.
Stops at first non-None result, see :ref:`firstresult`
:param _pytest.config.Config config: pytest config object
:param val: the parametrized value
:param str argname: the automatic parameter name produced by pytest
"""
# -------------------------------------------------------------------------
# generic runtest related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_runtestloop(session):
""" called for performing the main runtest loop
(after collection finished).
Stops at first non-None result, see :ref:`firstresult`
:param _pytest.main.Session session: the pytest session object
"""
def pytest_itemstart(item, node):
"""(**Deprecated**) use pytest_runtest_logstart. """
@hookspec(firstresult=True)
def pytest_runtest_protocol(item, nextitem):
""" implements the runtest_setup/call/teardown protocol for
the given test item, including capturing exceptions and calling
reporting hooks.
:arg item: test item for which the runtest protocol is performed.
:arg nextitem: the scheduled-to-be-next test item (or None if this
is the end my friend). This argument is passed on to
:py:func:`pytest_runtest_teardown`.
:return boolean: True if no further hook implementations should be invoked.
Stops at first non-None result, see :ref:`firstresult` """
def pytest_runtest_logstart(nodeid, location):
""" signal the start of running a single test item.
This hook will be called **before** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and
:func:`pytest_runtest_teardown` hooks.
:param str nodeid: full id of the item
:param location: a triple of ``(filename, linenum, testname)``
"""
def pytest_runtest_logfinish(nodeid, location):
""" signal the complete finish of running a single test item.
This hook will be called **after** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and
:func:`pytest_runtest_teardown` hooks.
:param str nodeid: full id of the item
:param location: a triple of ``(filename, linenum, testname)``
"""
def pytest_runtest_setup(item):
""" called before ``pytest_runtest_call(item)``. """
def pytest_runtest_call(item):
""" called to execute the test ``item``. """
def pytest_runtest_teardown(item, nextitem):
""" called after ``pytest_runtest_call``.
:arg nextitem: the scheduled-to-be-next test item (None if no further
test item is scheduled). This argument can be used to
perform exact teardowns, i.e. calling just enough finalizers
so that nextitem only needs to call setup-functions.
"""
@hookspec(firstresult=True)
def pytest_runtest_makereport(item, call):
""" return a :py:class:`_pytest.runner.TestReport` object
for the given :py:class:`pytest.Item <_pytest.main.Item>` and
:py:class:`_pytest.runner.CallInfo`.
Stops at first non-None result, see :ref:`firstresult` """
def pytest_runtest_logreport(report):
""" process a test setup/call/teardown report relating to
the respective phase of executing a test. """
# -------------------------------------------------------------------------
# Fixture related hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_fixture_setup(fixturedef, request):
""" performs fixture setup execution.
:return: The return value of the call to the fixture function
Stops at first non-None result, see :ref:`firstresult`
.. note::
If the fixture function returns None, other implementations of
this hook function will continue to be called, according to the
behavior of the :ref:`firstresult` option.
"""
def pytest_fixture_post_finalizer(fixturedef, request):
""" called after fixture teardown, but before the cache is cleared so
the fixture result cache ``fixturedef.cached_result`` can
still be accessed."""
# -------------------------------------------------------------------------
# test session related hooks
# -------------------------------------------------------------------------
def pytest_sessionstart(session):
""" called after the ``Session`` object has been created and before performing collection
and entering the run test loop.
:param _pytest.main.Session session: the pytest session object
"""
def pytest_sessionfinish(session, exitstatus):
""" called after whole test run finished, right before returning the exit status to the system.
:param _pytest.main.Session session: the pytest session object
:param int exitstatus: the status which pytest will return to the system
"""
def pytest_unconfigure(config):
""" called before test process is exited.
:param _pytest.config.Config config: pytest config object
"""
# -------------------------------------------------------------------------
# hooks for customizing the assert methods
# -------------------------------------------------------------------------
def pytest_assertrepr_compare(config, op, left, right):
"""return explanation for comparisons in failing assert expressions.
Return None for no custom explanation, otherwise return a list
of strings. The strings will be joined by newlines but any newlines
*in* a string will be escaped. Note that all but the first line will
be indented slightly, the intention is for the first line to be a summary.
:param _pytest.config.Config config: pytest config object
"""
# -------------------------------------------------------------------------
# hooks for influencing reporting (invoked from _pytest_terminal)
# -------------------------------------------------------------------------
def pytest_report_header(config, startdir):
""" return a string or list of strings to be displayed as header info for terminal reporting.
:param _pytest.config.Config config: pytest config object
:param startdir: py.path object with the starting dir
.. note::
This function should be implemented only in plugins or ``conftest.py``
files situated at the tests root directory due to how pytest
:ref:`discovers plugins during startup <pluginorder>`.
"""
def pytest_report_collectionfinish(config, startdir, items):
"""
.. versionadded:: 3.2
return a string or list of strings to be displayed after collection has finished successfully.
This strings will be displayed after the standard "collected X items" message.
:param _pytest.config.Config config: pytest config object
:param startdir: py.path object with the starting dir
:param items: list of pytest items that are going to be executed; this list should not be modified.
"""
@hookspec(firstresult=True)
def pytest_report_teststatus(report):
""" return result-category, shortletter and verbose word for reporting.
Stops at first non-None result, see :ref:`firstresult` """
def pytest_terminal_summary(terminalreporter, exitstatus):
"""Add a section to terminal summary reporting.
:param _pytest.terminal.TerminalReporter terminalreporter: the internal terminal reporter object
:param int exitstatus: the exit status that will be reported back to the OS
.. versionadded:: 3.5
The ``config`` parameter.
"""
@hookspec(historic=True)
def pytest_logwarning(message, code, nodeid, fslocation):
"""
.. deprecated:: 3.8
This hook is will stop working in a future release.
pytest no longer triggers this hook, but the
terminal writer still implements it to display warnings issued by
:meth:`_pytest.config.Config.warn` and :meth:`_pytest.nodes.Node.warn`. Calling those functions will be
an error in future releases.
process a warning specified by a message, a code string,
a nodeid and fslocation (both of which may be None
if the warning is not tied to a particular node/location).
.. note::
This hook is incompatible with ``hookwrapper=True``.
"""
@hookspec(historic=True)
def pytest_warning_captured(warning_message, when, item):
"""
Process a warning captured by the internal pytest warnings plugin.
:param warnings.WarningMessage warning_message:
The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains
the same attributes as the parameters of :py:func:`warnings.showwarning`.
:param str when:
Indicates when the warning was captured. Possible values:
* ``"config"``: during pytest configuration/initialization stage.
* ``"collect"``: during test collection.
* ``"runtest"``: during test execution.
:param pytest.Item|None item:
**DEPRECATED**: This parameter is incompatible with ``pytest-xdist``, and will always receive ``None``
in a future release.
The item being executed if ``when`` is ``"runtest"``, otherwise ``None``.
"""
# -------------------------------------------------------------------------
# doctest hooks
# -------------------------------------------------------------------------
@hookspec(firstresult=True)
def pytest_doctest_prepare_content(content):
""" return processed content for a given doctest
Stops at first non-None result, see :ref:`firstresult` """
# -------------------------------------------------------------------------
# error handling and internal debugging hooks
# -------------------------------------------------------------------------
def pytest_internalerror(excrepr, excinfo):
""" called for internal errors. """
def pytest_keyboard_interrupt(excinfo):
""" called for keyboard interrupt. """
def pytest_exception_interact(node, call, report):
"""called when an exception was raised which can potentially be
interactively handled.
This hook is only called if an exception was raised
that is not an internal exception like ``skip.Exception``.
"""
def pytest_enter_pdb(config, pdb):
""" called upon pdb.set_trace(), can be used by plugins to take special
action just before the python debugger enters in interactive mode.
:param _pytest.config.Config config: pytest config object
:param pdb.Pdb pdb: Pdb instance
"""
def pytest_leave_pdb(config, pdb):
""" called when leaving pdb (e.g. with continue after pdb.set_trace()).
Can be used by plugins to take special action just after the python
debugger leaves interactive mode.
:param _pytest.config.Config config: pytest config object
:param pdb.Pdb pdb: Pdb instance
"""

@ -1,569 +0,0 @@
"""
report test results in JUnit-XML format,
for use with Jenkins and build integration servers.
Based on initial code from Ross Lawley.
Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import re
import sys
import time
import py
import pytest
from _pytest import nodes
from _pytest.config import filename_arg
# Python 2.X and 3.X compatibility
if sys.version_info[0] < 3:
from codecs import open
else:
unichr = chr
unicode = str
long = int
class Junit(py.xml.Namespace):
pass
# We need to get the subset of the invalid unicode ranges according to
# XML 1.0 which are valid in this python build. Hence we calculate
# this dynamically instead of hardcoding it. The spec range of valid
# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
# | [#x10000-#x10FFFF]
_legal_chars = (0x09, 0x0A, 0x0D)
_legal_ranges = ((0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF))
_legal_xml_re = [
unicode("%s-%s") % (unichr(low), unichr(high))
for (low, high) in _legal_ranges
if low < sys.maxunicode
]
_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
illegal_xml_re = re.compile(unicode("[^%s]") % unicode("").join(_legal_xml_re))
del _legal_chars
del _legal_ranges
del _legal_xml_re
_py_ext_re = re.compile(r"\.py$")
def bin_xml_escape(arg):
def repl(matchobj):
i = ord(matchobj.group())
if i <= 0xFF:
return unicode("#x%02X") % i
else:
return unicode("#x%04X") % i
return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
class _NodeReporter(object):
def __init__(self, nodeid, xml):
self.id = nodeid
self.xml = xml
self.add_stats = self.xml.add_stats
self.duration = 0
self.properties = []
self.nodes = []
self.testcase = None
self.attrs = {}
def append(self, node):
self.xml.add_stats(type(node).__name__)
self.nodes.append(node)
def add_property(self, name, value):
self.properties.append((str(name), bin_xml_escape(value)))
def add_attribute(self, name, value):
self.attrs[str(name)] = bin_xml_escape(value)
def make_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.properties:
return Junit.properties(
[
Junit.property(name=name, value=value)
for name, value in self.properties
]
)
return ""
def record_testreport(self, testreport):
assert not self.testcase
names = mangle_test_address(testreport.nodeid)
existing_attrs = self.attrs
classnames = names[:-1]
if self.xml.prefix:
classnames.insert(0, self.xml.prefix)
attrs = {
"classname": ".".join(classnames),
"name": bin_xml_escape(names[-1]),
"file": testreport.location[0],
}
if testreport.location[1] is not None:
attrs["line"] = testreport.location[1]
if hasattr(testreport, "url"):
attrs["url"] = testreport.url
self.attrs = attrs
self.attrs.update(existing_attrs) # restore any user-defined attributes
def to_xml(self):
testcase = Junit.testcase(time=self.duration, **self.attrs)
testcase.append(self.make_properties_node())
for node in self.nodes:
testcase.append(node)
return testcase
def _add_simple(self, kind, message, data=None):
data = bin_xml_escape(data)
node = kind(data, message=message)
self.append(node)
def write_captured_output(self, report):
content_out = report.capstdout
content_log = report.caplog
content_err = report.capstderr
if content_log or content_out:
if content_log and self.xml.logging == "system-out":
if content_out:
# syncing stdout and the log-output is not done yet. It's
# probably not worth the effort. Therefore, first the captured
# stdout is shown and then the captured logs.
content = "\n".join(
[
" Captured Stdout ".center(80, "-"),
content_out,
"",
" Captured Log ".center(80, "-"),
content_log,
]
)
else:
content = content_log
else:
content = content_out
if content:
tag = getattr(Junit, "system-out")
self.append(tag(bin_xml_escape(content)))
if content_log or content_err:
if content_log and self.xml.logging == "system-err":
if content_err:
content = "\n".join(
[
" Captured Stderr ".center(80, "-"),
content_err,
"",
" Captured Log ".center(80, "-"),
content_log,
]
)
else:
content = content_log
else:
content = content_err
if content:
tag = getattr(Junit, "system-err")
self.append(tag(bin_xml_escape(content)))
def append_pass(self, report):
self.add_stats("passed")
def append_failure(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
if hasattr(report, "wasxfail"):
self._add_simple(Junit.skipped, "xfail-marked test passes unexpectedly")
else:
if hasattr(report.longrepr, "reprcrash"):
message = report.longrepr.reprcrash.message
elif isinstance(report.longrepr, (unicode, str)):
message = report.longrepr
else:
message = str(report.longrepr)
message = bin_xml_escape(message)
fail = Junit.failure(message=message)
fail.append(bin_xml_escape(report.longrepr))
self.append(fail)
def append_collect_error(self, report):
# msg = str(report.longrepr.reprtraceback.extraline)
self.append(
Junit.error(bin_xml_escape(report.longrepr), message="collection failure")
)
def append_collect_skipped(self, report):
self._add_simple(Junit.skipped, "collection skipped", report.longrepr)
def append_error(self, report):
if getattr(report, "when", None) == "teardown":
msg = "test teardown failure"
else:
msg = "test setup failure"
self._add_simple(Junit.error, msg, report.longrepr)
def append_skipped(self, report):
if hasattr(report, "wasxfail"):
self._add_simple(Junit.skipped, "expected test failure", report.wasxfail)
else:
filename, lineno, skipreason = report.longrepr
if skipreason.startswith("Skipped: "):
skipreason = skipreason[9:]
details = "%s:%s: %s" % (filename, lineno, skipreason)
self.append(
Junit.skipped(
bin_xml_escape(details),
type="pytest.skip",
message=bin_xml_escape(skipreason),
)
)
self.write_captured_output(report)
def finalize(self):
data = self.to_xml().unicode(indent=0)
self.__dict__.clear()
self.to_xml = lambda: py.xml.raw(data)
@pytest.fixture
def record_property(request):
"""Add an extra properties the calling test.
User properties become part of the test report and are available to the
configured reporters, like JUnit XML.
The fixture is callable with ``(name, value)``, with value being automatically
xml-encoded.
Example::
def test_function(record_property):
record_property("example_key", 1)
"""
def append_property(name, value):
request.node.user_properties.append((name, value))
return append_property
@pytest.fixture
def record_xml_property(record_property, request):
"""(Deprecated) use record_property."""
from _pytest import deprecated
request.node.warn(deprecated.RECORD_XML_PROPERTY)
return record_property
@pytest.fixture
def record_xml_attribute(request):
"""Add extra xml attributes to the tag for the calling test.
The fixture is callable with ``(name, value)``, with value being
automatically xml-encoded
"""
from _pytest.warning_types import PytestWarning
request.node.warn(PytestWarning("record_xml_attribute is an experimental feature"))
xml = getattr(request.config, "_xml", None)
if xml is not None:
node_reporter = xml.node_reporter(request.node.nodeid)
return node_reporter.add_attribute
else:
def add_attr_noop(name, value):
pass
return add_attr_noop
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group.addoption(
"--junitxml",
"--junit-xml",
action="store",
dest="xmlpath",
metavar="path",
type=functools.partial(filename_arg, optname="--junitxml"),
default=None,
help="create junit-xml style report file at given path.",
)
group.addoption(
"--junitprefix",
"--junit-prefix",
action="store",
metavar="str",
default=None,
help="prepend prefix to classnames in junit-xml output",
)
parser.addini(
"junit_suite_name", "Test suite name for JUnit report", default="pytest"
)
parser.addini(
"junit_logging",
"Write captured log messages to JUnit report: "
"one of no|system-out|system-err",
default="no",
) # choices=['no', 'stdout', 'stderr'])
def pytest_configure(config):
xmlpath = config.option.xmlpath
# prevent opening xmllog on slave nodes (xdist)
if xmlpath and not hasattr(config, "slaveinput"):
config._xml = LogXML(
xmlpath,
config.option.junitprefix,
config.getini("junit_suite_name"),
config.getini("junit_logging"),
)
config.pluginmanager.register(config._xml)
def pytest_unconfigure(config):
xml = getattr(config, "_xml", None)
if xml:
del config._xml
config.pluginmanager.unregister(xml)
def mangle_test_address(address):
path, possible_open_bracket, params = address.partition("[")
names = path.split("::")
try:
names.remove("()")
except ValueError:
pass
# convert file path to dotted path
names[0] = names[0].replace(nodes.SEP, ".")
names[0] = _py_ext_re.sub("", names[0])
# put any params back
names[-1] += possible_open_bracket + params
return names
class LogXML(object):
def __init__(self, logfile, prefix, suite_name="pytest", logging="no"):
logfile = os.path.expanduser(os.path.expandvars(logfile))
self.logfile = os.path.normpath(os.path.abspath(logfile))
self.prefix = prefix
self.suite_name = suite_name
self.logging = logging
self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0)
self.node_reporters = {} # nodeid -> _NodeReporter
self.node_reporters_ordered = []
self.global_properties = []
# List of reports that failed on call but teardown is pending.
self.open_reports = []
self.cnt_double_fail_tests = 0
def finalize(self, report):
nodeid = getattr(report, "nodeid", report)
# local hack to handle xdist report order
slavenode = getattr(report, "node", None)
reporter = self.node_reporters.pop((nodeid, slavenode))
if reporter is not None:
reporter.finalize()
def node_reporter(self, report):
nodeid = getattr(report, "nodeid", report)
# local hack to handle xdist report order
slavenode = getattr(report, "node", None)
key = nodeid, slavenode
if key in self.node_reporters:
# TODO: breasks for --dist=each
return self.node_reporters[key]
reporter = _NodeReporter(nodeid, self)
self.node_reporters[key] = reporter
self.node_reporters_ordered.append(reporter)
return reporter
def add_stats(self, key):
if key in self.stats:
self.stats[key] += 1
def _opentestcase(self, report):
reporter = self.node_reporter(report)
reporter.record_testreport(report)
return reporter
def pytest_runtest_logreport(self, report):
"""handle a setup/call/teardown report, generating the appropriate
xml tags as necessary.
note: due to plugins like xdist, this hook may be called in interlaced
order with reports from other nodes. for example:
usual call order:
-> setup node1
-> call node1
-> teardown node1
-> setup node2
-> call node2
-> teardown node2
possible call order in xdist:
-> setup node1
-> call node1
-> setup node2
-> call node2
-> teardown node2
-> teardown node1
"""
close_report = None
if report.passed:
if report.when == "call": # ignore setup/teardown
reporter = self._opentestcase(report)
reporter.append_pass(report)
elif report.failed:
if report.when == "teardown":
# The following vars are needed when xdist plugin is used
report_wid = getattr(report, "worker_id", None)
report_ii = getattr(report, "item_index", None)
close_report = next(
(
rep
for rep in self.open_reports
if (
rep.nodeid == report.nodeid
and getattr(rep, "item_index", None) == report_ii
and getattr(rep, "worker_id", None) == report_wid
)
),
None,
)
if close_report:
# We need to open new testcase in case we have failure in
# call and error in teardown in order to follow junit
# schema
self.finalize(close_report)
self.cnt_double_fail_tests += 1
reporter = self._opentestcase(report)
if report.when == "call":
reporter.append_failure(report)
self.open_reports.append(report)
else:
reporter.append_error(report)
elif report.skipped:
reporter = self._opentestcase(report)
reporter.append_skipped(report)
self.update_testcase_duration(report)
if report.when == "teardown":
reporter = self._opentestcase(report)
reporter.write_captured_output(report)
for propname, propvalue in report.user_properties:
reporter.add_property(propname, propvalue)
self.finalize(report)
report_wid = getattr(report, "worker_id", None)
report_ii = getattr(report, "item_index", None)
close_report = next(
(
rep
for rep in self.open_reports
if (
rep.nodeid == report.nodeid
and getattr(rep, "item_index", None) == report_ii
and getattr(rep, "worker_id", None) == report_wid
)
),
None,
)
if close_report:
self.open_reports.remove(close_report)
def update_testcase_duration(self, report):
"""accumulates total duration for nodeid from given report and updates
the Junit.testcase with the new total if already created.
"""
reporter = self.node_reporter(report)
reporter.duration += getattr(report, "duration", 0.0)
def pytest_collectreport(self, report):
if not report.passed:
reporter = self._opentestcase(report)
if report.failed:
reporter.append_collect_error(report)
else:
reporter.append_collect_skipped(report)
def pytest_internalerror(self, excrepr):
reporter = self.node_reporter("internal")
reporter.attrs.update(classname="pytest", name="internal")
reporter._add_simple(Junit.error, "internal error", excrepr)
def pytest_sessionstart(self):
self.suite_start_time = time.time()
def pytest_sessionfinish(self):
dirname = os.path.dirname(os.path.abspath(self.logfile))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(self.logfile, "w", encoding="utf-8")
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = (
self.stats["passed"]
+ self.stats["failure"]
+ self.stats["skipped"]
+ self.stats["error"]
- self.cnt_double_fail_tests
)
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
logfile.write(
Junit.testsuite(
self._get_global_properties_node(),
[x.to_xml() for x in self.node_reporters_ordered],
name=self.suite_name,
errors=self.stats["error"],
failures=self.stats["failure"],
skips=self.stats["skipped"],
tests=numtests,
time="%.3f" % suite_time_delta,
).unicode(indent=0)
)
logfile.close()
def pytest_terminal_summary(self, terminalreporter):
terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile))
def add_global_property(self, name, value):
self.global_properties.append((str(name), bin_xml_escape(value)))
def _get_global_properties_node(self):
"""Return a Junit node containing custom properties, if any.
"""
if self.global_properties:
return Junit.properties(
[
Junit.property(name=name, value=value)
for name, value in self.global_properties
]
)
return ""

@ -1,625 +0,0 @@
""" Access and control log capturing. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import re
from contextlib import contextmanager
import py
import six
import pytest
from _pytest.compat import dummy_context_manager
from _pytest.config import create_terminal_writer
DEFAULT_LOG_FORMAT = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s"
DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S"
class ColoredLevelFormatter(logging.Formatter):
"""
Colorize the %(levelname)..s part of the log format passed to __init__.
"""
LOGLEVEL_COLOROPTS = {
logging.CRITICAL: {"red"},
logging.ERROR: {"red", "bold"},
logging.WARNING: {"yellow"},
logging.WARN: {"yellow"},
logging.INFO: {"green"},
logging.DEBUG: {"purple"},
logging.NOTSET: set(),
}
LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-]?\d*s)")
def __init__(self, terminalwriter, *args, **kwargs):
super(ColoredLevelFormatter, self).__init__(*args, **kwargs)
if six.PY2:
self._original_fmt = self._fmt
else:
self._original_fmt = self._style._fmt
self._level_to_fmt_mapping = {}
levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt)
if not levelname_fmt_match:
return
levelname_fmt = levelname_fmt_match.group()
for level, color_opts in self.LOGLEVEL_COLOROPTS.items():
formatted_levelname = levelname_fmt % {
"levelname": logging.getLevelName(level)
}
# add ANSI escape sequences around the formatted levelname
color_kwargs = {name: True for name in color_opts}
colorized_formatted_levelname = terminalwriter.markup(
formatted_levelname, **color_kwargs
)
self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub(
colorized_formatted_levelname, self._fmt
)
def format(self, record):
fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt)
if six.PY2:
self._fmt = fmt
else:
self._style._fmt = fmt
return super(ColoredLevelFormatter, self).format(record)
def get_option_ini(config, *names):
for name in names:
ret = config.getoption(name) # 'default' arg won't work as expected
if ret is None:
ret = config.getini(name)
if ret:
return ret
def pytest_addoption(parser):
"""Add options to control log capturing."""
group = parser.getgroup("logging")
def add_option_ini(option, dest, default=None, type=None, **kwargs):
parser.addini(
dest, default=default, type=type, help="default value for " + option
)
group.addoption(option, dest=dest, **kwargs)
add_option_ini(
"--no-print-logs",
dest="log_print",
action="store_const",
const=False,
default=True,
type="bool",
help="disable printing caught logs on failed tests.",
)
add_option_ini(
"--log-level",
dest="log_level",
default=None,
help="logging level used by the logging module",
)
add_option_ini(
"--log-format",
dest="log_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-date-format",
dest="log_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
parser.addini(
"log_cli",
default=False,
type="bool",
help='enable log display during test run (also known as "live logging").',
)
add_option_ini(
"--log-cli-level", dest="log_cli_level", default=None, help="cli logging level."
)
add_option_ini(
"--log-cli-format",
dest="log_cli_format",
default=None,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-cli-date-format",
dest="log_cli_date_format",
default=None,
help="log date format as used by the logging module.",
)
add_option_ini(
"--log-file",
dest="log_file",
default=None,
help="path to a file when logging will be written to.",
)
add_option_ini(
"--log-file-level",
dest="log_file_level",
default=None,
help="log file logging level.",
)
add_option_ini(
"--log-file-format",
dest="log_file_format",
default=DEFAULT_LOG_FORMAT,
help="log format as used by the logging module.",
)
add_option_ini(
"--log-file-date-format",
dest="log_file_date_format",
default=DEFAULT_LOG_DATE_FORMAT,
help="log date format as used by the logging module.",
)
@contextmanager
def catching_logs(handler, formatter=None, level=None):
"""Context manager that prepares the whole logging machinery properly."""
root_logger = logging.getLogger()
if formatter is not None:
handler.setFormatter(formatter)
if level is not None:
handler.setLevel(level)
# Adding the same handler twice would confuse logging system.
# Just don't do that.
add_new_handler = handler not in root_logger.handlers
if add_new_handler:
root_logger.addHandler(handler)
if level is not None:
orig_level = root_logger.level
root_logger.setLevel(min(orig_level, level))
try:
yield handler
finally:
if level is not None:
root_logger.setLevel(orig_level)
if add_new_handler:
root_logger.removeHandler(handler)
class LogCaptureHandler(logging.StreamHandler):
"""A logging handler that stores log records and the log text."""
def __init__(self):
"""Creates a new log handler."""
logging.StreamHandler.__init__(self, py.io.TextIO())
self.records = []
def emit(self, record):
"""Keep the log records in a list in addition to the log text."""
self.records.append(record)
logging.StreamHandler.emit(self, record)
def reset(self):
self.records = []
self.stream = py.io.TextIO()
class LogCaptureFixture(object):
"""Provides access and control of log capturing."""
def __init__(self, item):
"""Creates a new funcarg."""
self._item = item
# dict of log name -> log level
self._initial_log_levels = {} # type: Dict[str, int]
def _finalize(self):
"""Finalizes the fixture.
This restores the log levels changed by :meth:`set_level`.
"""
# restore log levels
for logger_name, level in self._initial_log_levels.items():
logger = logging.getLogger(logger_name)
logger.setLevel(level)
@property
def handler(self):
"""
:rtype: LogCaptureHandler
"""
return self._item.catch_log_handler
def get_records(self, when):
"""
Get the logging records for one of the possible test phases.
:param str when:
Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown".
:rtype: List[logging.LogRecord]
:return: the list of captured records at the given stage
.. versionadded:: 3.4
"""
handler = self._item.catch_log_handlers.get(when)
if handler:
return handler.records
else:
return []
@property
def text(self):
"""Returns the log text."""
return self.handler.stream.getvalue()
@property
def records(self):
"""Returns the list of log records."""
return self.handler.records
@property
def record_tuples(self):
"""Returns a list of a stripped down version of log records intended
for use in assertion comparison.
The format of the tuple is:
(logger_name, log_level, message)
"""
return [(r.name, r.levelno, r.getMessage()) for r in self.records]
@property
def messages(self):
"""Returns a list of format-interpolated log messages.
Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list
are all interpolated.
Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with
levels, timestamps, etc, making exact comparisons more reliable.
Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments
to the logging functions) is not included, as this is added by the formatter in the handler.
.. versionadded:: 3.7
"""
return [r.getMessage() for r in self.records]
def clear(self):
"""Reset the list of log records and the captured log text."""
self.handler.reset()
def set_level(self, level, logger=None):
"""Sets the level for capturing of logs. The level will be restored to its previous value at the end of
the test.
:param int level: the logger to level.
:param str logger: the logger to update the level. If not given, the root logger level is updated.
.. versionchanged:: 3.4
The levels of the loggers changed by this function will be restored to their initial values at the
end of the test.
"""
logger_name = logger
logger = logging.getLogger(logger_name)
# save the original log-level to restore it during teardown
self._initial_log_levels.setdefault(logger_name, logger.level)
logger.setLevel(level)
@contextmanager
def at_level(self, level, logger=None):
"""Context manager that sets the level for capturing of logs. After the end of the 'with' statement the
level is restored to its original value.
:param int level: the logger to level.
:param str logger: the logger to update the level. If not given, the root logger level is updated.
"""
logger = logging.getLogger(logger)
orig_level = logger.level
logger.setLevel(level)
try:
yield
finally:
logger.setLevel(orig_level)
@pytest.fixture
def caplog(request):
"""Access and control log capturing.
Captured logs are available through the following properties/methods::
* caplog.text -> string containing formatted log output
* caplog.records -> list of logging.LogRecord instances
* caplog.record_tuples -> list of (logger_name, level, message) tuples
* caplog.clear() -> clear captured records and formatted log output string
"""
result = LogCaptureFixture(request.node)
yield result
result._finalize()
def get_actual_log_level(config, *setting_names):
"""Return the actual logging level."""
for setting_name in setting_names:
log_level = config.getoption(setting_name)
if log_level is None:
log_level = config.getini(setting_name)
if log_level:
break
else:
return
if isinstance(log_level, six.string_types):
log_level = log_level.upper()
try:
return int(getattr(logging, log_level, log_level))
except ValueError:
# Python logging does not recognise this as a logging level
raise pytest.UsageError(
"'{}' is not recognized as a logging level name for "
"'{}'. Please consider passing the "
"logging level num instead.".format(log_level, setting_name)
)
def pytest_configure(config):
config.pluginmanager.register(LoggingPlugin(config), "logging-plugin")
class LoggingPlugin(object):
"""Attaches to the logging module and captures log messages for each test.
"""
def __init__(self, config):
"""Creates a new plugin to capture log messages.
The formatter can be safely shared across all handlers so
create a single one for the entire test session here.
"""
self._config = config
# enable verbose output automatically if live logging is enabled
if self._log_cli_enabled() and not config.getoption("verbose"):
# sanity check: terminal reporter should not have been loaded at this point
assert self._config.pluginmanager.get_plugin("terminalreporter") is None
config.option.verbose = 1
self.print_logs = get_option_ini(config, "log_print")
self.formatter = logging.Formatter(
get_option_ini(config, "log_format"),
get_option_ini(config, "log_date_format"),
)
self.log_level = get_actual_log_level(config, "log_level")
log_file = get_option_ini(config, "log_file")
if log_file:
self.log_file_level = get_actual_log_level(config, "log_file_level")
log_file_format = get_option_ini(config, "log_file_format", "log_format")
log_file_date_format = get_option_ini(
config, "log_file_date_format", "log_date_format"
)
# Each pytest runtests session will write to a clean logfile
self.log_file_handler = logging.FileHandler(
log_file, mode="w", encoding="UTF-8"
)
log_file_formatter = logging.Formatter(
log_file_format, datefmt=log_file_date_format
)
self.log_file_handler.setFormatter(log_file_formatter)
else:
self.log_file_handler = None
self.log_cli_handler = None
def _log_cli_enabled(self):
"""Return True if log_cli should be considered enabled, either explicitly
or because --log-cli-level was given in the command-line.
"""
return self._config.getoption(
"--log-cli-level"
) is not None or self._config.getini("log_cli")
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection(self):
# This has to be called before the first log message is logged,
# so we can access the terminal reporter plugin.
self._setup_cli_logging()
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("collection")
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@contextmanager
def _runtest_for(self, item, when):
"""Implements the internals of pytest_runtest_xxx() hook."""
with catching_logs(
LogCaptureHandler(), formatter=self.formatter, level=self.log_level
) as log_handler:
if self.log_cli_handler:
self.log_cli_handler.set_when(when)
if item is None:
yield # run the test
return
if not hasattr(item, "catch_log_handlers"):
item.catch_log_handlers = {}
item.catch_log_handlers[when] = log_handler
item.catch_log_handler = log_handler
try:
yield # run test
finally:
if when == "teardown":
del item.catch_log_handler
del item.catch_log_handlers
if self.print_logs:
# Add a captured log section to the report.
log = log_handler.stream.getvalue().strip()
item.add_report_section(when, "log", log)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_setup(self, item):
with self._runtest_for(item, "setup"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
with self._runtest_for(item, "call"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_teardown(self, item):
with self._runtest_for(item, "teardown"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logstart(self):
if self.log_cli_handler:
self.log_cli_handler.reset()
with self._runtest_for(None, "start"):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_logfinish(self):
with self._runtest_for(None, "finish"):
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionfinish(self):
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("sessionfinish")
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_sessionstart(self):
self._setup_cli_logging()
with self.live_logs_context():
if self.log_cli_handler:
self.log_cli_handler.set_when("sessionstart")
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield
else:
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_runtestloop(self, session):
"""Runs all collected test items."""
with self.live_logs_context():
if self.log_file_handler is not None:
with catching_logs(self.log_file_handler, level=self.log_file_level):
yield # run all the tests
else:
yield # run all the tests
def _setup_cli_logging(self):
"""Sets up the handler and logger for the Live Logs feature, if enabled."""
terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter")
if self._log_cli_enabled() and terminal_reporter is not None:
capture_manager = self._config.pluginmanager.get_plugin("capturemanager")
log_cli_handler = _LiveLoggingStreamHandler(
terminal_reporter, capture_manager
)
log_cli_format = get_option_ini(
self._config, "log_cli_format", "log_format"
)
log_cli_date_format = get_option_ini(
self._config, "log_cli_date_format", "log_date_format"
)
if (
self._config.option.color != "no"
and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format)
):
log_cli_formatter = ColoredLevelFormatter(
create_terminal_writer(self._config),
log_cli_format,
datefmt=log_cli_date_format,
)
else:
log_cli_formatter = logging.Formatter(
log_cli_format, datefmt=log_cli_date_format
)
log_cli_level = get_actual_log_level(
self._config, "log_cli_level", "log_level"
)
self.log_cli_handler = log_cli_handler
self.live_logs_context = lambda: catching_logs(
log_cli_handler, formatter=log_cli_formatter, level=log_cli_level
)
else:
self.live_logs_context = lambda: dummy_context_manager()
# Note that the lambda for the live_logs_context is needed because
# live_logs_context can otherwise not be entered multiple times due
# to limitations of contextlib.contextmanager
class _LiveLoggingStreamHandler(logging.StreamHandler):
"""
Custom StreamHandler used by the live logging feature: it will write a newline before the first log message
in each test.
During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured
and won't appear in the terminal.
"""
def __init__(self, terminal_reporter, capture_manager):
"""
:param _pytest.terminal.TerminalReporter terminal_reporter:
:param _pytest.capture.CaptureManager capture_manager:
"""
logging.StreamHandler.__init__(self, stream=terminal_reporter)
self.capture_manager = capture_manager
self.reset()
self.set_when(None)
self._test_outcome_written = False
def reset(self):
"""Reset the handler; should be called before the start of each test"""
self._first_record_emitted = False
def set_when(self, when):
"""Prepares for the given test phase (setup/call/teardown)"""
self._when = when
self._section_name_shown = False
if when == "start":
self._test_outcome_written = False
def emit(self, record):
ctx_manager = (
self.capture_manager.global_and_fixture_disabled()
if self.capture_manager
else dummy_context_manager()
)
with ctx_manager:
if not self._first_record_emitted:
self.stream.write("\n")
self._first_record_emitted = True
elif self._when in ("teardown", "finish"):
if not self._test_outcome_written:
self._test_outcome_written = True
self.stream.write("\n")
if not self._section_name_shown and self._when:
self.stream.section("live log " + self._when, sep="-", bold=True)
self._section_name_shown = True
logging.StreamHandler.emit(self, record)

@ -1,171 +0,0 @@
""" generic mechanism for marking and selecting python functions. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .legacy import matchkeyword
from .legacy import matchmark
from .structures import EMPTY_PARAMETERSET_OPTION
from .structures import get_empty_parameterset_mark
from .structures import Mark
from .structures import MARK_GEN
from .structures import MarkDecorator
from .structures import MarkGenerator
from .structures import MarkInfo
from .structures import ParameterSet
from .structures import transfer_markers
from _pytest.config import UsageError
__all__ = [
"Mark",
"MarkInfo",
"MarkDecorator",
"MarkGenerator",
"transfer_markers",
"get_empty_parameterset_mark",
]
def param(*values, **kw):
"""Specify a parameter in `pytest.mark.parametrize`_ calls or
:ref:`parametrized fixtures <fixture-parametrize-marks>`.
.. code-block:: python
@pytest.mark.parametrize("test_input,expected", [
("3+5", 8),
pytest.param("6*9", 42, marks=pytest.mark.xfail),
])
def test_eval(test_input, expected):
assert eval(test_input) == expected
:param values: variable args of the values of the parameter set, in order.
:keyword marks: a single mark or a list of marks to be applied to this parameter set.
:keyword str id: the id to attribute to this parameter set.
"""
return ParameterSet.param(*values, **kw)
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption(
"-k",
action="store",
dest="keyword",
default="",
metavar="EXPRESSION",
help="only run tests which match the given substring expression. "
"An expression is a python evaluatable expression "
"where all names are substring-matched against test names "
"and their parent classes. Example: -k 'test_method or test_"
"other' matches all test functions and classes whose name "
"contains 'test_method' or 'test_other', while -k 'not test_method' "
"matches those that don't contain 'test_method' in their names. "
"Additionally keywords are matched to classes and functions "
"containing extra names in their 'extra_keyword_matches' set, "
"as well as functions which have names assigned directly to them.",
)
group._addoption(
"-m",
action="store",
dest="markexpr",
default="",
metavar="MARKEXPR",
help="only run tests matching given mark expression. "
"example: -m 'mark1 and not mark2'.",
)
group.addoption(
"--markers",
action="store_true",
help="show markers (builtin, plugin and per-project ones).",
)
parser.addini("markers", "markers for test functions", "linelist")
parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets")
def pytest_cmdline_main(config):
import _pytest.config
if config.option.markers:
config._do_configure()
tw = _pytest.config.create_terminal_writer(config)
for line in config.getini("markers"):
parts = line.split(":", 1)
name = parts[0]
rest = parts[1] if len(parts) == 2 else ""
tw.write("@pytest.mark.%s:" % name, bold=True)
tw.line(rest)
tw.line()
config._ensure_unconfigure()
return 0
pytest_cmdline_main.tryfirst = True
def deselect_by_keyword(items, config):
keywordexpr = config.option.keyword.lstrip()
if keywordexpr.startswith("-"):
keywordexpr = "not " + keywordexpr[1:]
selectuntil = False
if keywordexpr[-1:] == ":":
selectuntil = True
keywordexpr = keywordexpr[:-1]
remaining = []
deselected = []
for colitem in items:
if keywordexpr and not matchkeyword(colitem, keywordexpr):
deselected.append(colitem)
else:
if selectuntil:
keywordexpr = None
remaining.append(colitem)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
def deselect_by_mark(items, config):
matchexpr = config.option.markexpr
if not matchexpr:
return
remaining = []
deselected = []
for item in items:
if matchmark(item, matchexpr):
remaining.append(item)
else:
deselected.append(item)
if deselected:
config.hook.pytest_deselected(items=deselected)
items[:] = remaining
def pytest_collection_modifyitems(items, config):
deselect_by_keyword(items, config)
deselect_by_mark(items, config)
def pytest_configure(config):
config._old_mark_config = MARK_GEN._config
if config.option.strict:
MARK_GEN._config = config
empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION)
if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""):
raise UsageError(
"{!s} must be one of skip, xfail or fail_at_collect"
" but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset)
)
def pytest_unconfigure(config):
MARK_GEN._config = getattr(config, "_old_mark_config", None)

@ -1,125 +0,0 @@
import os
import platform
import sys
import traceback
import six
from ..outcomes import fail
from ..outcomes import TEST_OUTCOME
def cached_eval(config, expr, d):
if not hasattr(config, "_evalcache"):
config._evalcache = {}
try:
return config._evalcache[expr]
except KeyError:
import _pytest._code
exprcode = _pytest._code.compile(expr, mode="eval")
config._evalcache[expr] = x = eval(exprcode, d)
return x
class MarkEvaluator(object):
def __init__(self, item, name):
self.item = item
self._marks = None
self._mark = None
self._mark_name = name
def __bool__(self):
# dont cache here to prevent staleness
return bool(self._get_marks())
__nonzero__ = __bool__
def wasvalid(self):
return not hasattr(self, "exc")
def _get_marks(self):
return list(self.item.iter_markers(name=self._mark_name))
def invalidraise(self, exc):
raises = self.get("raises")
if not raises:
return
return not isinstance(exc, raises)
def istrue(self):
try:
return self._istrue()
except TEST_OUTCOME:
self.exc = sys.exc_info()
if isinstance(self.exc[1], SyntaxError):
msg = [" " * (self.exc[1].offset + 4) + "^"]
msg.append("SyntaxError: invalid syntax")
else:
msg = traceback.format_exception_only(*self.exc[:2])
fail(
"Error evaluating %r expression\n"
" %s\n"
"%s" % (self._mark_name, self.expr, "\n".join(msg)),
pytrace=False,
)
def _getglobals(self):
d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config}
if hasattr(self.item, "obj"):
d.update(self.item.obj.__globals__)
return d
def _istrue(self):
if hasattr(self, "result"):
return self.result
self._marks = self._get_marks()
if self._marks:
self.result = False
for mark in self._marks:
self._mark = mark
if "condition" in mark.kwargs:
args = (mark.kwargs["condition"],)
else:
args = mark.args
for expr in args:
self.expr = expr
if isinstance(expr, six.string_types):
d = self._getglobals()
result = cached_eval(self.item.config, expr, d)
else:
if "reason" not in mark.kwargs:
# XXX better be checked at collection time
msg = (
"you need to specify reason=STRING "
"when using booleans as conditions."
)
fail(msg)
result = bool(expr)
if result:
self.result = True
self.reason = mark.kwargs.get("reason", None)
self.expr = expr
return self.result
if not args:
self.result = True
self.reason = mark.kwargs.get("reason", None)
return self.result
return False
def get(self, attr, default=None):
if self._mark is None:
return default
return self._mark.kwargs.get(attr, default)
def getexplanation(self):
expl = getattr(self, "reason", None) or self.get("reason", None)
if not expl:
if not hasattr(self, "expr"):
return ""
else:
return "condition: " + str(self.expr)
return expl

@ -1,101 +0,0 @@
"""
this is a place where we put datastructures used by legacy apis
we hope ot remove
"""
import keyword
import attr
from _pytest.config import UsageError
@attr.s
class MarkMapping(object):
"""Provides a local mapping for markers where item access
resolves to True if the marker is present. """
own_mark_names = attr.ib()
@classmethod
def from_item(cls, item):
mark_names = {mark.name for mark in item.iter_markers()}
return cls(mark_names)
def __getitem__(self, name):
return name in self.own_mark_names
class KeywordMapping(object):
"""Provides a local mapping for keywords.
Given a list of names, map any substring of one of these names to True.
"""
def __init__(self, names):
self._names = names
@classmethod
def from_item(cls, item):
mapped_names = set()
# Add the names of the current item and any parent items
import pytest
for item in item.listchain():
if not isinstance(item, pytest.Instance):
mapped_names.add(item.name)
# Add the names added as extra keywords to current or parent items
for name in item.listextrakeywords():
mapped_names.add(name)
# Add the names attached to the current function through direct assignment
if hasattr(item, "function"):
for name in item.function.__dict__:
mapped_names.add(name)
return cls(mapped_names)
def __getitem__(self, subname):
for name in self._names:
if subname in name:
return True
return False
python_keywords_allowed_list = ["or", "and", "not"]
def matchmark(colitem, markexpr):
"""Tries to match on any marker names, attached to the given colitem."""
try:
return eval(markexpr, {}, MarkMapping.from_item(colitem))
except SyntaxError as e:
raise SyntaxError(str(e) + "\nMarker expression must be valid Python!")
def matchkeyword(colitem, keywordexpr):
"""Tries to match given keyword expression to given collector item.
Will match on the name of colitem, including the names of its parents.
Only matches names of items which are either a :class:`Class` or a
:class:`Function`.
Additionally, matches on names in the 'extra_keyword_matches' set of
any item, as well as names directly assigned to test functions.
"""
mapping = KeywordMapping.from_item(colitem)
if " " not in keywordexpr:
# special case to allow for simple "-k pass" and "-k 1.3"
return mapping[keywordexpr]
elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]:
return not mapping[keywordexpr[4:]]
for kwd in keywordexpr.split():
if keyword.iskeyword(kwd) and kwd not in python_keywords_allowed_list:
raise UsageError(
"Python keyword '{}' not accepted in expressions passed to '-k'".format(
kwd
)
)
try:
return eval(keywordexpr, {}, mapping)
except SyntaxError:
raise UsageError("Wrong expression passed to '-k': {}".format(keywordexpr))

@ -1,471 +0,0 @@
import inspect
import warnings
from collections import namedtuple
from functools import reduce
from operator import attrgetter
import attr
from six.moves import map
from ..compat import getfslineno
from ..compat import MappingMixin
from ..compat import NOTSET
from ..deprecated import MARK_INFO_ATTRIBUTE
from ..deprecated import MARK_PARAMETERSET_UNPACKING
from _pytest.outcomes import fail
EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark"
def alias(name, warning=None):
getter = attrgetter(name)
def warned(self):
warnings.warn(warning, stacklevel=2)
return getter(self)
return property(getter if warning is None else warned, doc="alias for " + name)
def istestfunc(func):
return (
hasattr(func, "__call__")
and getattr(func, "__name__", "<lambda>") != "<lambda>"
)
def get_empty_parameterset_mark(config, argnames, func):
from ..nodes import Collector
requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION)
if requested_mark in ("", None, "skip"):
mark = MARK_GEN.skip
elif requested_mark == "xfail":
mark = MARK_GEN.xfail(run=False)
elif requested_mark == "fail_at_collect":
f_name = func.__name__
_, lineno = getfslineno(func)
raise Collector.CollectError(
"Empty parameter set in '%s' at line %d" % (f_name, lineno)
)
else:
raise LookupError(requested_mark)
fs, lineno = getfslineno(func)
reason = "got empty parameter set %r, function %s at %s:%d" % (
argnames,
func.__name__,
fs,
lineno,
)
return mark(reason=reason)
class ParameterSet(namedtuple("ParameterSet", "values, marks, id")):
@classmethod
def param(cls, *values, **kw):
marks = kw.pop("marks", ())
if isinstance(marks, MarkDecorator):
marks = (marks,)
else:
assert isinstance(marks, (tuple, list, set))
def param_extract_id(id=None):
return id
id_ = param_extract_id(**kw)
return cls(values, marks, id_)
@classmethod
def extract_from(cls, parameterset, belonging_definition, legacy_force_tuple=False):
"""
:param parameterset:
a legacy style parameterset that may or may not be a tuple,
and may or may not be wrapped into a mess of mark objects
:param legacy_force_tuple:
enforce tuple wrapping so single argument tuple values
don't get decomposed and break tests
:param belonging_definition: the item that we will be extracting the parameters from.
"""
if isinstance(parameterset, cls):
return parameterset
if not isinstance(parameterset, MarkDecorator) and legacy_force_tuple:
return cls.param(parameterset)
newmarks = []
argval = parameterset
while isinstance(argval, MarkDecorator):
newmarks.append(
MarkDecorator(Mark(argval.markname, argval.args[:-1], argval.kwargs))
)
argval = argval.args[-1]
assert not isinstance(argval, ParameterSet)
if legacy_force_tuple:
argval = (argval,)
if newmarks and belonging_definition is not None:
belonging_definition.warn(MARK_PARAMETERSET_UNPACKING)
return cls(argval, marks=newmarks, id=None)
@classmethod
def _for_parametrize(cls, argnames, argvalues, func, config, function_definition):
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
force_tuple = len(argnames) == 1
else:
force_tuple = False
parameters = [
ParameterSet.extract_from(
x,
legacy_force_tuple=force_tuple,
belonging_definition=function_definition,
)
for x in argvalues
]
del argvalues
if parameters:
# check all parameter sets have the correct number of values
for param in parameters:
if len(param.values) != len(argnames):
raise ValueError(
'In "parametrize" the number of values ({}) must be '
"equal to the number of names ({})".format(
param.values, argnames
)
)
else:
# empty parameter set (likely computed at runtime): create a single
# parameter set with NOSET values, with the "empty parameter set" mark applied to it
mark = get_empty_parameterset_mark(config, argnames, func)
parameters.append(
ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None)
)
return argnames, parameters
@attr.s(frozen=True)
class Mark(object):
#: name of the mark
name = attr.ib(type=str)
#: positional arguments of the mark decorator
args = attr.ib() # type: List[object]
#: keyword arguments of the mark decorator
kwargs = attr.ib() # type: Dict[str, object]
def combined_with(self, other):
"""
:param other: the mark to combine with
:type other: Mark
:rtype: Mark
combines by appending aargs and merging the mappings
"""
assert self.name == other.name
return Mark(
self.name, self.args + other.args, dict(self.kwargs, **other.kwargs)
)
@attr.s
class MarkDecorator(object):
""" A decorator for test functions and test classes. When applied
it will create :class:`MarkInfo` objects which may be
:ref:`retrieved by hooks as item keywords <excontrolskip>`.
MarkDecorator instances are often created like this::
mark1 = pytest.mark.NAME # simple MarkDecorator
mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator
and can then be applied as decorators to test functions::
@mark2
def test_function():
pass
When a MarkDecorator instance is called it does the following:
1. If called with a single class as its only positional argument and no
additional keyword arguments, it attaches itself to the class so it
gets applied automatically to all test cases found in that class.
2. If called with a single function as its only positional argument and
no additional keyword arguments, it attaches a MarkInfo object to the
function, containing all the arguments already stored internally in
the MarkDecorator.
3. When called in any other case, it performs a 'fake construction' call,
i.e. it returns a new MarkDecorator instance with the original
MarkDecorator's content updated with the arguments passed to this
call.
Note: The rules above prevent MarkDecorator objects from storing only a
single function or class reference as their positional argument with no
additional keyword or positional arguments.
"""
mark = attr.ib(validator=attr.validators.instance_of(Mark))
name = alias("mark.name")
args = alias("mark.args")
kwargs = alias("mark.kwargs")
@property
def markname(self):
return self.name # for backward-compat (2.4.1 had this attr)
def __eq__(self, other):
return self.mark == other.mark if isinstance(other, MarkDecorator) else False
def __repr__(self):
return "<MarkDecorator %r>" % (self.mark,)
def with_args(self, *args, **kwargs):
""" return a MarkDecorator with extra arguments added
unlike call this can be used even if the sole argument is a callable/class
:return: MarkDecorator
"""
mark = Mark(self.name, args, kwargs)
return self.__class__(self.mark.combined_with(mark))
def __call__(self, *args, **kwargs):
""" if passed a single callable argument: decorate it with mark info.
otherwise add *args/**kwargs in-place to mark information. """
if args and not kwargs:
func = args[0]
is_class = inspect.isclass(func)
if len(args) == 1 and (istestfunc(func) or is_class):
if is_class:
store_mark(func, self.mark)
else:
store_legacy_markinfo(func, self.mark)
store_mark(func, self.mark)
return func
return self.with_args(*args, **kwargs)
def get_unpacked_marks(obj):
"""
obtain the unpacked marks that are stored on an object
"""
mark_list = getattr(obj, "pytestmark", [])
if not isinstance(mark_list, list):
mark_list = [mark_list]
return normalize_mark_list(mark_list)
def normalize_mark_list(mark_list):
"""
normalizes marker decorating helpers to mark objects
:type mark_list: List[Union[Mark, Markdecorator]]
:rtype: List[Mark]
"""
return [getattr(mark, "mark", mark) for mark in mark_list] # unpack MarkDecorator
def store_mark(obj, mark):
"""store a Mark on an object
this is used to implement the Mark declarations/decorators correctly
"""
assert isinstance(mark, Mark), mark
# always reassign name to avoid updating pytestmark
# in a reference that was only borrowed
obj.pytestmark = get_unpacked_marks(obj) + [mark]
def store_legacy_markinfo(func, mark):
"""create the legacy MarkInfo objects and put them onto the function
"""
if not isinstance(mark, Mark):
raise TypeError("got {mark!r} instead of a Mark".format(mark=mark))
holder = getattr(func, mark.name, None)
if holder is None:
holder = MarkInfo.for_mark(mark)
setattr(func, mark.name, holder)
elif isinstance(holder, MarkInfo):
holder.add_mark(mark)
def transfer_markers(funcobj, cls, mod):
"""
this function transfers class level markers and module level markers
into function level markinfo objects
this is the main reason why marks are so broken
the resolution will involve phasing out function level MarkInfo objects
"""
for obj in (cls, mod):
for mark in get_unpacked_marks(obj):
if not _marked(funcobj, mark):
store_legacy_markinfo(funcobj, mark)
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, getattr(mark, "combined", mark).name)
except AttributeError:
return False
return any(mark == info.combined for info in func_mark)
@attr.s(repr=False)
class MarkInfo(object):
""" Marking object created by :class:`MarkDecorator` instances. """
_marks = attr.ib(converter=list)
@_marks.validator
def validate_marks(self, attribute, value):
for item in value:
if not isinstance(item, Mark):
raise ValueError(
"MarkInfo expects Mark instances, got {!r} ({!r})".format(
item, type(item)
)
)
combined = attr.ib(
repr=False,
default=attr.Factory(
lambda self: reduce(Mark.combined_with, self._marks), takes_self=True
),
)
name = alias("combined.name", warning=MARK_INFO_ATTRIBUTE)
args = alias("combined.args", warning=MARK_INFO_ATTRIBUTE)
kwargs = alias("combined.kwargs", warning=MARK_INFO_ATTRIBUTE)
@classmethod
def for_mark(cls, mark):
return cls([mark])
def __repr__(self):
return "<MarkInfo {!r}>".format(self.combined)
def add_mark(self, mark):
""" add a MarkInfo with the given args and kwargs. """
self._marks.append(mark)
self.combined = self.combined.combined_with(mark)
def __iter__(self):
""" yield MarkInfo objects each relating to a marking-call. """
return map(MarkInfo.for_mark, self._marks)
class MarkGenerator(object):
""" Factory for :class:`MarkDecorator` objects - exposed as
a ``pytest.mark`` singleton instance. Example::
import pytest
@pytest.mark.slowtest
def test_function():
pass
will set a 'slowtest' :class:`MarkInfo` object
on the ``test_function`` object. """
_config = None
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError("Marker name must NOT start with underscore")
if self._config is not None:
self._check(name)
return MarkDecorator(Mark(name, (), {}))
def _check(self, name):
try:
if name in self._markers:
return
except AttributeError:
pass
self._markers = values = set()
for line in self._config.getini("markers"):
marker = line.split(":", 1)[0]
marker = marker.rstrip()
x = marker.split("(", 1)[0]
values.add(x)
if name not in self._markers:
fail("{!r} not a registered marker".format(name), pytrace=False)
MARK_GEN = MarkGenerator()
class NodeKeywords(MappingMixin):
def __init__(self, node):
self.node = node
self.parent = node.parent
self._markers = {node.name: True}
def __getitem__(self, key):
try:
return self._markers[key]
except KeyError:
if self.parent is None:
raise
return self.parent.keywords[key]
def __setitem__(self, key, value):
self._markers[key] = value
def __delitem__(self, key):
raise ValueError("cannot delete key in keywords dict")
def __iter__(self):
seen = self._seen()
return iter(seen)
def _seen(self):
seen = set(self._markers)
if self.parent is not None:
seen.update(self.parent.keywords)
return seen
def __len__(self):
return len(self._seen())
def __repr__(self):
return "<NodeKeywords for node %s>" % (self.node,)
@attr.s(cmp=False, hash=False)
class NodeMarkers(object):
"""
internal structure for storing marks belonging to a node
..warning::
unstable api
"""
own_markers = attr.ib(default=attr.Factory(list))
def update(self, add_markers):
"""update the own markers
"""
self.own_markers.extend(add_markers)
def find(self, name):
"""
find markers in own nodes or parent nodes
needs a better place
"""
for mark in self.own_markers:
if mark.name == name:
yield mark
def __iter__(self):
return iter(self.own_markers)

@ -1,308 +0,0 @@
""" monkeypatching and mocking functionality. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import warnings
from contextlib import contextmanager
import six
import pytest
from _pytest.fixtures import fixture
RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
@fixture
def monkeypatch():
"""The returned ``monkeypatch`` fixture provides these
helper methods to modify objects, dictionaries or os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=False)
monkeypatch.delenv(name, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting
test function or fixture has finished. The ``raising``
parameter determines if a KeyError or AttributeError
will be raised if the set/deletion operation has no target.
"""
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
def resolve(name):
# simplified from zope.dottedname
parts = name.split(".")
used = parts.pop(0)
found = __import__(used)
for part in parts:
used += "." + part
try:
found = getattr(found, part)
except AttributeError:
pass
else:
continue
# we use explicit un-nesting of the handling block in order
# to avoid nested exceptions on python 3
try:
__import__(used)
except ImportError as ex:
# str is used for py2 vs py3
expected = str(ex).split()[-1]
if expected == used:
raise
else:
raise ImportError("import error in %s: %s" % (used, ex))
found = annotated_getattr(found, part, used)
return found
def annotated_getattr(obj, name, ann):
try:
obj = getattr(obj, name)
except AttributeError:
raise AttributeError(
"%r object at %s has no attribute %r" % (type(obj).__name__, ann, name)
)
return obj
def derive_importpath(import_path, raising):
if not isinstance(import_path, six.string_types) or "." not in import_path:
raise TypeError("must be absolute import path string, not %r" % (import_path,))
module, attr = import_path.rsplit(".", 1)
target = resolve(module)
if raising:
annotated_getattr(target, attr, ann=module)
return attr, target
class Notset(object):
def __repr__(self):
return "<notset>"
notset = Notset()
class MonkeyPatch(object):
""" Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes.
"""
def __init__(self):
self._setattr = []
self._setitem = []
self._cwd = None
self._savesyspath = None
@contextmanager
def context(self):
"""
Context manager that returns a new :class:`MonkeyPatch` object which
undoes any patching done inside the ``with`` block upon exit:
.. code-block:: python
import functools
def test_partial(monkeypatch):
with monkeypatch.context() as m:
m.setattr(functools, "partial", 3)
Useful in situations where it is desired to undo some patches before the test ends,
such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples
of this see `#3290 <https://github.com/pytest-dev/pytest/issues/3290>`_.
"""
m = MonkeyPatch()
try:
yield m
finally:
m.undo()
def setattr(self, target, name, value=notset, raising=True):
""" Set attribute value on target, memorizing the old value.
By default raise AttributeError if the attribute did not exist.
For convenience you can specify a string as ``target`` which
will be interpreted as a dotted import path, with the last part
being the attribute name. Example:
``monkeypatch.setattr("os.getcwd", lambda: "/")``
would set the ``getcwd`` function of the ``os`` module.
The ``raising`` value determines if the setattr should fail
if the attribute is not already present (defaults to True
which means it will raise).
"""
__tracebackhide__ = True
import inspect
if value is notset:
if not isinstance(target, six.string_types):
raise TypeError(
"use setattr(target, name, value) or "
"setattr(target, value) with target being a dotted "
"import string"
)
value = name
name, target = derive_importpath(target, raising)
oldval = getattr(target, name, notset)
if raising and oldval is notset:
raise AttributeError("%r has no attribute %r" % (target, name))
# avoid class descriptors like staticmethod/classmethod
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
setattr(target, name, value)
def delattr(self, target, name=notset, raising=True):
""" Delete attribute ``name`` from ``target``, by default raise
AttributeError it the attribute did not previously exist.
If no ``name`` is specified and ``target`` is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
If ``raising`` is set to False, no exception will be raised if the
attribute is missing.
"""
__tracebackhide__ = True
if name is notset:
if not isinstance(target, six.string_types):
raise TypeError(
"use delattr(target, name) or "
"delattr(target) with target being a dotted "
"import string"
)
name, target = derive_importpath(target, raising)
if not hasattr(target, name):
if raising:
raise AttributeError(name)
else:
self._setattr.append((target, name, getattr(target, name, notset)))
delattr(target, name)
def setitem(self, dic, name, value):
""" Set dictionary entry ``name`` to value. """
self._setitem.append((dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic, name, raising=True):
""" Delete ``name`` from dict. Raise KeyError if it doesn't exist.
If ``raising`` is set to False, no exception will be raised if the
key is missing.
"""
if name not in dic:
if raising:
raise KeyError(name)
else:
self._setitem.append((dic, name, dic.get(name, notset)))
del dic[name]
def _warn_if_env_name_is_not_str(self, name):
"""On Python 2, warn if the given environment variable name is not a native str (#4056)"""
if six.PY2 and not isinstance(name, str):
warnings.warn(
pytest.PytestWarning(
"Environment variable name {!r} should be str".format(name)
)
)
def setenv(self, name, value, prepend=None):
""" Set environment variable ``name`` to ``value``. If ``prepend``
is a character, read the current environment variable value
and prepend the ``value`` adjoined with the ``prepend`` character."""
if not isinstance(value, str):
warnings.warn(
pytest.PytestWarning(
"Value of environment variable {name} type should be str, but got "
"{value!r} (type: {type}); converted to str implicitly".format(
name=name, value=value, type=type(value).__name__
)
),
stacklevel=2,
)
value = str(value)
if prepend and name in os.environ:
value = value + prepend + os.environ[name]
self._warn_if_env_name_is_not_str(name)
self.setitem(os.environ, name, value)
def delenv(self, name, raising=True):
""" Delete ``name`` from the environment. Raise KeyError if it does
not exist.
If ``raising`` is set to False, no exception will be raised if the
environment variable is missing.
"""
self._warn_if_env_name_is_not_str(name)
self.delitem(os.environ, name, raising=raising)
def syspath_prepend(self, path):
""" Prepend ``path`` to ``sys.path`` list of import locations. """
if self._savesyspath is None:
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
def chdir(self, path):
""" Change the current working directory to the specified path.
Path can be a string or a py.path.local object.
"""
if self._cwd is None:
self._cwd = os.getcwd()
if hasattr(path, "chdir"):
path.chdir()
else:
os.chdir(path)
def undo(self):
""" Undo previous changes. This call consumes the
undo stack. Calling it a second time has no effect unless
you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
Note that the same `monkeypatch` fixture is used across a
single test function invocation. If `monkeypatch` is used both by
the test function itself and one of the test fixtures,
calling `undo()` will undo all of the changes made in
both functions.
"""
for obj, name, value in reversed(self._setattr):
if value is not notset:
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for dictionary, name, value in reversed(self._setitem):
if value is notset:
try:
del dictionary[name]
except KeyError:
pass # was already deleted, so we have the desired state
else:
dictionary[name] = value
self._setitem[:] = []
if self._savesyspath is not None:
sys.path[:] = self._savesyspath
self._savesyspath = None
if self._cwd is not None:
os.chdir(self._cwd)
self._cwd = None

@ -1,535 +0,0 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import warnings
import attr
import py
import six
import _pytest._code
from _pytest.compat import getfslineno
from _pytest.mark.structures import MarkInfo
from _pytest.mark.structures import NodeKeywords
from _pytest.outcomes import fail
SEP = "/"
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
def _splitnode(nodeid):
"""Split a nodeid into constituent 'parts'.
Node IDs are strings, and can be things like:
''
'testing/code'
'testing/code/test_excinfo.py'
'testing/code/test_excinfo.py::TestFormattedExcinfo::()'
Return values are lists e.g.
[]
['testing', 'code']
['testing', 'code', 'test_excinfo.py']
['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()']
"""
if nodeid == "":
# If there is no root node at all, return an empty list so the caller's logic can remain sane
return []
parts = nodeid.split(SEP)
# Replace single last element 'test_foo.py::Bar::()' with multiple elements 'test_foo.py', 'Bar', '()'
parts[-1:] = parts[-1].split("::")
return parts
def ischildnode(baseid, nodeid):
"""Return True if the nodeid is a child node of the baseid.
E.g. 'foo/bar::Baz::()' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp'
"""
base_parts = _splitnode(baseid)
node_parts = _splitnode(nodeid)
if len(node_parts) < len(base_parts):
return False
return node_parts[: len(base_parts)] == base_parts
@attr.s
class _CompatProperty(object):
name = attr.ib()
def __get__(self, obj, owner):
if obj is None:
return self
from _pytest.deprecated import COMPAT_PROPERTY
warnings.warn(
COMPAT_PROPERTY.format(name=self.name, owner=owner.__name__), stacklevel=2
)
return getattr(__import__("pytest"), self.name)
class Node(object):
""" base class for Collector and Item the test collection tree.
Collector subclasses have children, Items are terminal nodes."""
def __init__(
self, name, parent=None, config=None, session=None, fspath=None, nodeid=None
):
#: a unique name within the scope of the parent node
self.name = name
#: the parent collector node.
self.parent = parent
#: the pytest config object
self.config = config or parent.config
#: the session this node is part of
self.session = session or parent.session
#: filesystem path where this node was collected from (can be None)
self.fspath = fspath or getattr(parent, "fspath", None)
#: keywords/markers collected from all scopes
self.keywords = NodeKeywords(self)
#: the marker objects belonging to this node
self.own_markers = []
#: allow adding of extra keywords to use for matching
self.extra_keyword_matches = set()
# used for storing artificial fixturedefs for direct parametrization
self._name2pseudofixturedef = {}
if nodeid is not None:
self._nodeid = nodeid
else:
assert parent is not None
self._nodeid = self.parent.nodeid + "::" + self.name
@property
def ihook(self):
""" fspath sensitive hook proxy used to call pytest hooks"""
return self.session.gethookproxy(self.fspath)
Module = _CompatProperty("Module")
Class = _CompatProperty("Class")
Instance = _CompatProperty("Instance")
Function = _CompatProperty("Function")
File = _CompatProperty("File")
Item = _CompatProperty("Item")
def _getcustomclass(self, name):
maybe_compatprop = getattr(type(self), name)
if isinstance(maybe_compatprop, _CompatProperty):
return getattr(__import__("pytest"), name)
else:
from _pytest.deprecated import CUSTOM_CLASS
cls = getattr(self, name)
self.warn(CUSTOM_CLASS.format(name=name, type_name=type(self).__name__))
return cls
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, getattr(self, "name", None))
def warn(self, _code_or_warning=None, message=None, code=None):
"""Issue a warning for this item.
Warnings will be displayed after the test session, unless explicitly suppressed.
This can be called in two forms:
**Warning instance**
This was introduced in pytest 3.8 and uses the standard warning mechanism to issue warnings.
.. code-block:: python
node.warn(PytestWarning("some message"))
The warning instance must be a subclass of :class:`pytest.PytestWarning`.
**code/message (deprecated)**
This form was used in pytest prior to 3.8 and is considered deprecated. Using this form will emit another
warning about the deprecation:
.. code-block:: python
node.warn("CI", "some message")
:param Union[Warning,str] _code_or_warning:
warning instance or warning code (legacy). This parameter receives an underscore for backward
compatibility with the legacy code/message form, and will be replaced for something
more usual when the legacy form is removed.
:param Union[str,None] message: message to display when called in the legacy form.
:param str code: code for the warning, in legacy form when using keyword arguments.
:return:
"""
if message is None:
if _code_or_warning is None:
raise ValueError("code_or_warning must be given")
self._std_warn(_code_or_warning)
else:
if _code_or_warning and code:
raise ValueError(
"code_or_warning and code cannot both be passed to this function"
)
code = _code_or_warning or code
self._legacy_warn(code, message)
def _legacy_warn(self, code, message):
"""
.. deprecated:: 3.8
Use :meth:`Node.std_warn <_pytest.nodes.Node.std_warn>` instead.
Generate a warning with the given code and message for this item.
"""
from _pytest.deprecated import NODE_WARN
self._std_warn(NODE_WARN)
assert isinstance(code, str)
fslocation = get_fslocation_from_item(self)
self.ihook.pytest_logwarning.call_historic(
kwargs=dict(
code=code, message=message, nodeid=self.nodeid, fslocation=fslocation
)
)
def _std_warn(self, warning):
"""Issue a warning for this item.
Warnings will be displayed after the test session, unless explicitly suppressed
:param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning.
:raise ValueError: if ``warning`` instance is not a subclass of PytestWarning.
"""
from _pytest.warning_types import PytestWarning
if not isinstance(warning, PytestWarning):
raise ValueError(
"warning must be an instance of PytestWarning or subclass, got {!r}".format(
warning
)
)
path, lineno = get_fslocation_from_item(self)
warnings.warn_explicit(
warning,
category=None,
filename=str(path),
lineno=lineno + 1 if lineno is not None else None,
)
# methods for ordering nodes
@property
def nodeid(self):
""" a ::-separated string denoting its collection tree address. """
return self._nodeid
def __hash__(self):
return hash(self.nodeid)
def setup(self):
pass
def teardown(self):
pass
def listchain(self):
""" return list of all parent collectors up to self,
starting from root of collection tree. """
chain = []
item = self
while item is not None:
chain.append(item)
item = item.parent
chain.reverse()
return chain
def add_marker(self, marker, append=True):
"""dynamically add a marker object to the node.
:type marker: ``str`` or ``pytest.mark.*`` object
:param marker:
``append=True`` whether to append the marker,
if ``False`` insert at position ``0``.
"""
from _pytest.mark import MarkDecorator, MARK_GEN
if isinstance(marker, six.string_types):
marker = getattr(MARK_GEN, marker)
elif not isinstance(marker, MarkDecorator):
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker.name] = marker
if append:
self.own_markers.append(marker.mark)
else:
self.own_markers.insert(0, marker.mark)
def iter_markers(self, name=None):
"""
:param name: if given, filter the results by the name attribute
iterate over all markers of the node
"""
return (x[1] for x in self.iter_markers_with_node(name=name))
def iter_markers_with_node(self, name=None):
"""
:param name: if given, filter the results by the name attribute
iterate over all markers of the node
returns sequence of tuples (node, mark)
"""
for node in reversed(self.listchain()):
for mark in node.own_markers:
if name is None or getattr(mark, "name", None) == name:
yield node, mark
def get_closest_marker(self, name, default=None):
"""return the first marker matching the name, from closest (for example function) to farther level (for example
module level).
:param default: fallback return value of no marker was found
:param name: name to filter by
"""
return next(self.iter_markers(name=name), default)
def get_marker(self, name):
""" get a marker object from this node or None if
the node doesn't have a marker with that name.
.. deprecated:: 3.6
This function has been deprecated in favor of
:meth:`Node.get_closest_marker <_pytest.nodes.Node.get_closest_marker>` and
:meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>`, see :ref:`update marker code`
for more details.
"""
markers = list(self.iter_markers(name=name))
if markers:
return MarkInfo(markers)
def listextrakeywords(self):
""" Return a set of all extra keywords in self and any parents."""
extra_keywords = set()
for item in self.listchain():
extra_keywords.update(item.extra_keyword_matches)
return extra_keywords
def listnames(self):
return [x.name for x in self.listchain()]
def addfinalizer(self, fin):
""" register a function to be called when this node is finalized.
This method can only be called when this node is active
in a setup chain, for example during self.setup().
"""
self.session._setupstate.addfinalizer(fin, self)
def getparent(self, cls):
""" get the next parent node (including ourself)
which is an instance of the given class"""
current = self
while current and not isinstance(current, cls):
current = current.parent
return current
def _prunetraceback(self, excinfo):
pass
def _repr_failure_py(self, excinfo, style=None):
if excinfo.errisinstance(fail.Exception):
if not excinfo.value.pytrace:
return six.text_type(excinfo.value)
fm = self.session._fixturemanager
if excinfo.errisinstance(fm.FixtureLookupError):
return excinfo.value.formatrepr()
tbfilter = True
if self.config.option.fulltrace:
style = "long"
else:
tb = _pytest._code.Traceback([excinfo.traceback[-1]])
self._prunetraceback(excinfo)
if len(excinfo.traceback) == 0:
excinfo.traceback = tb
tbfilter = False # prunetraceback already does it
if style == "auto":
style = "long"
# XXX should excinfo.getrepr record all data and toterminal() process it?
if style is None:
if self.config.option.tbstyle == "short":
style = "short"
else:
style = "long"
if self.config.option.verbose > 1:
truncate_locals = False
else:
truncate_locals = True
try:
os.getcwd()
abspath = False
except OSError:
abspath = True
return excinfo.getrepr(
funcargs=True,
abspath=abspath,
showlocals=self.config.option.showlocals,
style=style,
tbfilter=tbfilter,
truncate_locals=truncate_locals,
)
repr_failure = _repr_failure_py
def get_fslocation_from_item(item):
"""Tries to extract the actual location from an item, depending on available attributes:
* "fslocation": a pair (path, lineno)
* "obj": a Python object that the item wraps.
* "fspath": just a path
:rtype: a tuple of (str|LocalPath, int) with filename and line number.
"""
result = getattr(item, "location", None)
if result is not None:
return result[:2]
obj = getattr(item, "obj", None)
if obj is not None:
return getfslineno(obj)
return getattr(item, "fspath", "unknown location"), -1
class Collector(Node):
""" Collector instances create children through collect()
and thus iteratively build a tree.
"""
class CollectError(Exception):
""" an error during collection, contains a custom message. """
def collect(self):
""" returns a list of children (items and collectors)
for this collection node.
"""
raise NotImplementedError("abstract")
def repr_failure(self, excinfo):
""" represent a collection failure. """
if excinfo.errisinstance(self.CollectError):
exc = excinfo.value
return str(exc.args[0])
return self._repr_failure_py(excinfo, style="short")
def _prunetraceback(self, excinfo):
if hasattr(self, "fspath"):
traceback = excinfo.traceback
ntraceback = traceback.cut(path=self.fspath)
if ntraceback == traceback:
ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
excinfo.traceback = ntraceback.filter()
def _check_initialpaths_for_relpath(session, fspath):
for initial_path in session._initialpaths:
if fspath.common(initial_path) == initial_path:
return fspath.relto(initial_path)
class FSCollector(Collector):
def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None):
fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
name = fspath.basename
if parent is not None:
rel = fspath.relto(parent.fspath)
if rel:
name = rel
name = name.replace(os.sep, SEP)
self.fspath = fspath
session = session or parent.session
if nodeid is None:
nodeid = self.fspath.relto(session.config.rootdir)
if not nodeid:
nodeid = _check_initialpaths_for_relpath(session, fspath)
if nodeid and os.sep != SEP:
nodeid = nodeid.replace(os.sep, SEP)
super(FSCollector, self).__init__(
name, parent, config, session, nodeid=nodeid, fspath=fspath
)
class File(FSCollector):
""" base class for collecting tests from a file. """
class Item(Node):
""" a basic test invocation item. Note that for a single function
there might be multiple test invocation items.
"""
nextitem = None
def __init__(self, name, parent=None, config=None, session=None, nodeid=None):
super(Item, self).__init__(name, parent, config, session, nodeid=nodeid)
self._report_sections = []
#: user properties is a list of tuples (name, value) that holds user
#: defined properties for this test.
self.user_properties = []
def add_report_section(self, when, key, content):
"""
Adds a new report section, similar to what's done internally to add stdout and
stderr captured output::
item.add_report_section("call", "stdout", "report section contents")
:param str when:
One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``.
:param str key:
Name of the section, can be customized at will. Pytest uses ``"stdout"`` and
``"stderr"`` internally.
:param str content:
The full contents as a string.
"""
if content:
self._report_sections.append((when, key, content))
def reportinfo(self):
return self.fspath, None, ""
@property
def location(self):
try:
return self._location
except AttributeError:
location = self.reportinfo()
# bestrelpath is a quite slow function
cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
try:
fspath = cache[location[0]]
except KeyError:
fspath = self.session.fspath.bestrelpath(location[0])
cache[location[0]] = fspath
location = (fspath, location[1], str(location[2]))
self._location = location
return location

@ -1,76 +0,0 @@
""" run test suites written for nose. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from _pytest import python
from _pytest import runner
from _pytest import unittest
from _pytest.config import hookimpl
def get_skip_exceptions():
skip_classes = set()
for module_name in ("unittest", "unittest2", "nose"):
mod = sys.modules.get(module_name)
if hasattr(mod, "SkipTest"):
skip_classes.add(mod.SkipTest)
return tuple(skip_classes)
def pytest_runtest_makereport(item, call):
if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
# let's substitute the excinfo with a pytest.skip one
call2 = call.__class__(lambda: runner.skip(str(call.excinfo.value)), call.when)
call.excinfo = call2.excinfo
@hookimpl(trylast=True)
def pytest_runtest_setup(item):
if is_potential_nosetest(item):
if isinstance(item.parent, python.Generator):
gen = item.parent
if not hasattr(gen, "_nosegensetup"):
call_optional(gen.obj, "setup")
if isinstance(gen.parent, python.Instance):
call_optional(gen.parent.obj, "setup")
gen._nosegensetup = True
if not call_optional(item.obj, "setup"):
# call module level setup if there is no object level one
call_optional(item.parent.obj, "setup")
# XXX this implies we only call teardown when setup worked
item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
def teardown_nose(item):
if is_potential_nosetest(item):
if not call_optional(item.obj, "teardown"):
call_optional(item.parent.obj, "teardown")
# if hasattr(item.parent, '_nosegensetup'):
# #call_optional(item._nosegensetup, 'teardown')
# del item.parent._nosegensetup
def pytest_make_collect_report(collector):
if isinstance(collector, python.Generator):
call_optional(collector.obj, "setup")
def is_potential_nosetest(item):
# extra check needed since we do not do nose style setup/teardown
# on direct unittest style classes
return isinstance(item, python.Function) and not isinstance(
item, unittest.TestCaseFunction
)
def call_optional(obj, name):
method = getattr(obj, name, None)
isfixture = hasattr(method, "_pytestfixturefunction")
if method is not None and not isfixture and callable(method):
# If there's any problems allow the exception to raise rather than
# silently ignoring them
method()
return True

@ -1,182 +0,0 @@
"""
exception classes and constants handling test outcomes
as well as functions creating them
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
class OutcomeException(BaseException):
""" OutcomeException and its subclass instances indicate and
contain info about test and collection outcomes.
"""
def __init__(self, msg=None, pytrace=True):
BaseException.__init__(self, msg)
self.msg = msg
self.pytrace = pytrace
def __repr__(self):
if self.msg:
val = self.msg
if isinstance(val, bytes):
val = val.decode("UTF-8", errors="replace")
return val
return "<%s instance>" % (self.__class__.__name__,)
__str__ = __repr__
TEST_OUTCOME = (OutcomeException, Exception)
class Skipped(OutcomeException):
# XXX hackish: on 3k we fake to live in the builtins
# in order to have Skipped exception printing shorter/nicer
__module__ = "builtins"
def __init__(self, msg=None, pytrace=True, allow_module_level=False):
OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
self.allow_module_level = allow_module_level
class Failed(OutcomeException):
""" raised from an explicit call to pytest.fail() """
__module__ = "builtins"
class Exit(KeyboardInterrupt):
""" raised for immediate program exits (no tracebacks/summaries)"""
def __init__(self, msg="unknown reason", returncode=None):
self.msg = msg
self.returncode = returncode
KeyboardInterrupt.__init__(self, msg)
# exposed helper methods
def exit(msg, returncode=None):
"""
Exit testing process as if KeyboardInterrupt was triggered.
:param str msg: message to display upon exit.
:param int returncode: return code to be used when exiting pytest.
"""
__tracebackhide__ = True
raise Exit(msg, returncode)
exit.Exception = Exit
def skip(msg="", **kwargs):
"""
Skip an executing test with the given message.
This function should be called only during testing (setup, call or teardown) or
during collection by using the ``allow_module_level`` flag.
:kwarg bool allow_module_level: allows this function to be called at
module level, skipping the rest of the module. Default to False.
.. note::
It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be
skipped under certain conditions like mismatching platforms or
dependencies.
"""
__tracebackhide__ = True
allow_module_level = kwargs.pop("allow_module_level", False)
if kwargs:
keys = [k for k in kwargs.keys()]
raise TypeError("unexpected keyword arguments: {}".format(keys))
raise Skipped(msg=msg, allow_module_level=allow_module_level)
skip.Exception = Skipped
def fail(msg="", pytrace=True):
"""
Explicitly fail an executing test with the given message.
:param str msg: the message to show the user as reason for the failure.
:param bool pytrace: if false the msg represents the full failure information and no
python traceback will be reported.
"""
__tracebackhide__ = True
raise Failed(msg=msg, pytrace=pytrace)
fail.Exception = Failed
class XFailed(fail.Exception):
""" raised from an explicit call to pytest.xfail() """
def xfail(reason=""):
"""
Imperatively xfail an executing test or setup functions with the given reason.
This function should be called only during testing (setup, call or teardown).
.. note::
It is better to use the :ref:`pytest.mark.xfail ref` marker when possible to declare a test to be
xfailed under certain conditions like known bugs or missing features.
"""
__tracebackhide__ = True
raise XFailed(reason)
xfail.Exception = XFailed
def importorskip(modname, minversion=None):
""" return imported module if it has at least "minversion" as its
__version__ attribute. If no minversion is specified the a skip
is only triggered if the module can not be imported.
"""
import warnings
__tracebackhide__ = True
compile(modname, "", "eval") # to catch syntaxerrors
should_skip = False
with warnings.catch_warnings():
# make sure to ignore ImportWarnings that might happen because
# of existing directories with the same name we're trying to
# import but without a __init__.py file
warnings.simplefilter("ignore")
try:
__import__(modname)
except ImportError:
# Do not raise chained exception here(#1485)
should_skip = True
if should_skip:
raise Skipped("could not import %r" % (modname,), allow_module_level=True)
mod = sys.modules[modname]
if minversion is None:
return mod
verattr = getattr(mod, "__version__", None)
if minversion is not None:
try:
from pkg_resources import parse_version as pv
except ImportError:
raise Skipped(
"we have a required version for %r but can not import "
"pkg_resources to parse version strings." % (modname,),
allow_module_level=True,
)
if verattr is None or pv(verattr) < pv(minversion):
raise Skipped(
"module %r has __version__ %r, required is: %r"
% (modname, verattr, minversion),
allow_module_level=True,
)
return mod

@ -1,113 +0,0 @@
""" submit failure or test session information to a pastebin service. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tempfile
import six
import pytest
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group._addoption(
"--pastebin",
metavar="mode",
action="store",
dest="pastebin",
default=None,
choices=["failed", "all"],
help="send failed|all info to bpaste.net pastebin service.",
)
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
if config.option.pastebin == "all":
tr = config.pluginmanager.getplugin("terminalreporter")
# if no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a slave node
# when using pytest-xdist, for example
if tr is not None:
# pastebin file will be utf-8 encoded binary file
config._pastebinfile = tempfile.TemporaryFile("w+b")
oldwrite = tr._tw.write
def tee_write(s, **kwargs):
oldwrite(s, **kwargs)
if isinstance(s, six.text_type):
s = s.encode("utf-8")
config._pastebinfile.write(s)
tr._tw.write = tee_write
def pytest_unconfigure(config):
if hasattr(config, "_pastebinfile"):
# get terminal contents and delete file
config._pastebinfile.seek(0)
sessionlog = config._pastebinfile.read()
config._pastebinfile.close()
del config._pastebinfile
# undo our patching in the terminal reporter
tr = config.pluginmanager.getplugin("terminalreporter")
del tr._tw.__dict__["write"]
# write summary
tr.write_sep("=", "Sending information to Paste Service")
pastebinurl = create_new_paste(sessionlog)
tr.write_line("pastebin session-log: %s\n" % pastebinurl)
def create_new_paste(contents):
"""
Creates a new paste using bpaste.net service.
:contents: paste contents as utf-8 encoded bytes
:returns: url to the pasted contents
"""
import re
if sys.version_info < (3, 0):
from urllib import urlopen, urlencode
else:
from urllib.request import urlopen
from urllib.parse import urlencode
params = {
"code": contents,
"lexer": "python3" if sys.version_info[0] == 3 else "python",
"expiry": "1week",
}
url = "https://bpaste.net"
response = urlopen(url, data=urlencode(params).encode("ascii")).read()
m = re.search(r'href="/raw/(\w+)"', response.decode("utf-8"))
if m:
return "%s/show/%s" % (url, m.group(1))
else:
return "bad response: " + response
def pytest_terminal_summary(terminalreporter):
import _pytest.config
if terminalreporter.config.option.pastebin != "failed":
return
tr = terminalreporter
if "failed" in tr.stats:
terminalreporter.write_sep("=", "Sending information to Paste Service")
for rep in terminalreporter.stats.get("failed"):
try:
msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
except AttributeError:
msg = tr._getfailureheadline(rep)
tw = _pytest.config.create_terminal_writer(
terminalreporter.config, stringio=True
)
rep.toterminal(tw)
s = tw.stringio.getvalue()
assert len(s)
pastebinurl = create_new_paste(s)
tr.write_line("%s --> %s" % (msg, pastebinurl))

@ -1,320 +0,0 @@
import atexit
import errno
import fnmatch
import itertools
import operator
import os
import shutil
import sys
import uuid
from functools import reduce
from os.path import expanduser
from os.path import expandvars
from os.path import isabs
from os.path import sep
from posixpath import sep as posix_sep
import six
from six.moves import map
from .compat import PY36
if PY36:
from pathlib import Path, PurePath
else:
from pathlib2 import Path, PurePath
__all__ = ["Path", "PurePath"]
LOCK_TIMEOUT = 60 * 60 * 3
get_lock_path = operator.methodcaller("joinpath", ".lock")
def ensure_reset_dir(path):
"""
ensures the given path is an empty directory
"""
if path.exists():
rmtree(path, force=True)
path.mkdir()
def rmtree(path, force=False):
if force:
# NOTE: ignore_errors might leave dead folders around.
# Python needs a rm -rf as a followup.
shutil.rmtree(str(path), ignore_errors=True)
else:
shutil.rmtree(str(path))
def find_prefixed(root, prefix):
"""finds all elements in root that begin with the prefix, case insensitive"""
l_prefix = prefix.lower()
for x in root.iterdir():
if x.name.lower().startswith(l_prefix):
yield x
def extract_suffixes(iter, prefix):
"""
:param iter: iterator over path names
:param prefix: expected prefix of the path names
:returns: the parts of the paths following the prefix
"""
p_len = len(prefix)
for p in iter:
yield p.name[p_len:]
def find_suffixes(root, prefix):
"""combines find_prefixes and extract_suffixes
"""
return extract_suffixes(find_prefixed(root, prefix), prefix)
def parse_num(maybe_num):
"""parses number path suffixes, returns -1 on error"""
try:
return int(maybe_num)
except ValueError:
return -1
if six.PY2:
def _max(iterable, default):
"""needed due to python2.7 lacking the default argument for max"""
return reduce(max, iterable, default)
else:
_max = max
def _force_symlink(root, target, link_to):
"""helper to create the current symlink
it's full of race conditions that are reasonably ok to ignore
for the context of best effort linking to the latest testrun
the presumption being thatin case of much parallelism
the inaccuracy is going to be acceptable
"""
current_symlink = root.joinpath(target)
try:
current_symlink.unlink()
except OSError:
pass
try:
current_symlink.symlink_to(link_to)
except Exception:
pass
def make_numbered_dir(root, prefix):
"""create a directory with an increased number as suffix for the given prefix"""
for i in range(10):
# try up to 10 times to create the folder
max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1)
new_number = max_existing + 1
new_path = root.joinpath("{}{}".format(prefix, new_number))
try:
new_path.mkdir()
except Exception:
pass
else:
_force_symlink(root, prefix + "current", new_path)
return new_path
else:
raise EnvironmentError(
"could not create numbered dir with prefix "
"{prefix} in {root} after 10 tries".format(prefix=prefix, root=root)
)
def create_cleanup_lock(p):
"""crates a lock to prevent premature folder cleanup"""
lock_path = get_lock_path(p)
try:
fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
except OSError as e:
if e.errno == errno.EEXIST:
six.raise_from(
EnvironmentError("cannot create lockfile in {path}".format(path=p)), e
)
else:
raise
else:
pid = os.getpid()
spid = str(pid)
if not isinstance(spid, bytes):
spid = spid.encode("ascii")
os.write(fd, spid)
os.close(fd)
if not lock_path.is_file():
raise EnvironmentError("lock path got renamed after successful creation")
return lock_path
def register_cleanup_lock_removal(lock_path, register=atexit.register):
"""registers a cleanup function for removing a lock, by default on atexit"""
pid = os.getpid()
def cleanup_on_exit(lock_path=lock_path, original_pid=pid):
current_pid = os.getpid()
if current_pid != original_pid:
# fork
return
try:
lock_path.unlink()
except (OSError, IOError):
pass
return register(cleanup_on_exit)
def maybe_delete_a_numbered_dir(path):
"""removes a numbered directory if its lock can be obtained and it does not seem to be in use"""
lock_path = None
try:
lock_path = create_cleanup_lock(path)
parent = path.parent
garbage = parent.joinpath("garbage-{}".format(uuid.uuid4()))
path.rename(garbage)
rmtree(garbage, force=True)
except (OSError, EnvironmentError):
# known races:
# * other process did a cleanup at the same time
# * deletable folder was found
# * process cwd (Windows)
return
finally:
# if we created the lock, ensure we remove it even if we failed
# to properly remove the numbered dir
if lock_path is not None:
try:
lock_path.unlink()
except (OSError, IOError):
pass
def ensure_deletable(path, consider_lock_dead_if_created_before):
"""checks if a lock exists and breaks it if its considered dead"""
if path.is_symlink():
return False
lock = get_lock_path(path)
if not lock.exists():
return True
try:
lock_time = lock.stat().st_mtime
except Exception:
return False
else:
if lock_time < consider_lock_dead_if_created_before:
lock.unlink()
return True
else:
return False
def try_cleanup(path, consider_lock_dead_if_created_before):
"""tries to cleanup a folder if we can ensure it's deletable"""
if ensure_deletable(path, consider_lock_dead_if_created_before):
maybe_delete_a_numbered_dir(path)
def cleanup_candidates(root, prefix, keep):
"""lists candidates for numbered directories to be removed - follows py.path"""
max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1)
max_delete = max_existing - keep
paths = find_prefixed(root, prefix)
paths, paths2 = itertools.tee(paths)
numbers = map(parse_num, extract_suffixes(paths2, prefix))
for path, number in zip(paths, numbers):
if number <= max_delete:
yield path
def cleanup_numbered_dir(root, prefix, keep, consider_lock_dead_if_created_before):
"""cleanup for lock driven numbered directories"""
for path in cleanup_candidates(root, prefix, keep):
try_cleanup(path, consider_lock_dead_if_created_before)
for path in root.glob("garbage-*"):
try_cleanup(path, consider_lock_dead_if_created_before)
def make_numbered_dir_with_cleanup(root, prefix, keep, lock_timeout):
"""creates a numbered dir with a cleanup lock and removes old ones"""
e = None
for i in range(10):
try:
p = make_numbered_dir(root, prefix)
lock_path = create_cleanup_lock(p)
register_cleanup_lock_removal(lock_path)
except Exception as exc:
e = exc
else:
consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout
cleanup_numbered_dir(
root=root,
prefix=prefix,
keep=keep,
consider_lock_dead_if_created_before=consider_lock_dead_if_created_before,
)
return p
assert e is not None
raise e
def resolve_from_str(input, root):
assert not isinstance(input, Path), "would break on py2"
root = Path(root)
input = expanduser(input)
input = expandvars(input)
if isabs(input):
return Path(input)
else:
return root.joinpath(input)
def fnmatch_ex(pattern, path):
"""FNMatcher port from py.path.common which works with PurePath() instances.
The difference between this algorithm and PurePath.match() is that the latter matches "**" glob expressions
for each part of the path, while this algorithm uses the whole path instead.
For example:
"tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" with this algorithm, but not with
PurePath.match().
This algorithm was ported to keep backward-compatibility with existing settings which assume paths match according
this logic.
References:
* https://bugs.python.org/issue29249
* https://bugs.python.org/issue34731
"""
path = PurePath(path)
iswin32 = sys.platform.startswith("win")
if iswin32 and sep not in pattern and posix_sep in pattern:
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posix_sep, sep)
if sep not in pattern:
name = path.name
else:
name = six.text_type(path)
return fnmatch.fnmatch(name, pattern)
def parts(s):
parts = s.split(sep)
return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,721 +0,0 @@
import math
import pprint
import sys
from decimal import Decimal
from numbers import Number
import six
from more_itertools.more import always_iterable
from six.moves import filterfalse
from six.moves import zip
import _pytest._code
from _pytest.compat import isclass
from _pytest.compat import Mapping
from _pytest.compat import Sequence
from _pytest.compat import STRING_TYPES
from _pytest.outcomes import fail
BASE_TYPE = (type, STRING_TYPES)
def _cmp_raises_type_error(self, other):
"""__cmp__ implementation which raises TypeError. Used
by Approx base classes to implement only == and != and raise a
TypeError for other comparisons.
Needed in Python 2 only, Python 3 all it takes is not implementing the
other operators at all.
"""
__tracebackhide__ = True
raise TypeError(
"Comparison operators other than == and != not supported by approx objects"
)
def _non_numeric_type_error(value, at):
at_str = " at {}".format(at) if at else ""
return TypeError(
"cannot make approximate comparisons to non-numeric values: {!r} {}".format(
value, at_str
)
)
# builtin pytest.approx helper
class ApproxBase(object):
"""
Provide shared utilities for making approximate comparisons between numbers
or sequences of numbers.
"""
# Tell numpy to use our `__eq__` operator instead of its.
__array_ufunc__ = None
__array_priority__ = 100
def __init__(self, expected, rel=None, abs=None, nan_ok=False):
__tracebackhide__ = True
self.expected = expected
self.abs = abs
self.rel = rel
self.nan_ok = nan_ok
self._check_type()
def __repr__(self):
raise NotImplementedError
def __eq__(self, actual):
return all(
a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual)
)
__hash__ = None
def __ne__(self, actual):
return not (actual == self)
if sys.version_info[0] == 2:
__cmp__ = _cmp_raises_type_error
def _approx_scalar(self, x):
return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
def _yield_comparisons(self, actual):
"""
Yield all the pairs of numbers to be compared. This is used to
implement the `__eq__` method.
"""
raise NotImplementedError
def _check_type(self):
"""
Raise a TypeError if the expected value is not a valid type.
"""
# This is only a concern if the expected value is a sequence. In every
# other case, the approx() function ensures that the expected value has
# a numeric type. For this reason, the default is to do nothing. The
# classes that deal with sequences should reimplement this method to
# raise if there are any non-numeric elements in the sequence.
pass
def _recursive_list_map(f, x):
if isinstance(x, list):
return list(_recursive_list_map(f, xi) for xi in x)
else:
return f(x)
class ApproxNumpy(ApproxBase):
"""
Perform approximate comparisons where the expected value is numpy array.
"""
def __repr__(self):
list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist())
return "approx({!r})".format(list_scalars)
if sys.version_info[0] == 2:
__cmp__ = _cmp_raises_type_error
def __eq__(self, actual):
import numpy as np
# self.expected is supposed to always be an array here
if not np.isscalar(actual):
try:
actual = np.asarray(actual)
except: # noqa
raise TypeError("cannot compare '{}' to numpy.ndarray".format(actual))
if not np.isscalar(actual) and actual.shape != self.expected.shape:
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
import numpy as np
# `actual` can either be a numpy array or a scalar, it is treated in
# `__eq__` before being passed to `ApproxBase.__eq__`, which is the
# only method that calls this one.
if np.isscalar(actual):
for i in np.ndindex(self.expected.shape):
yield actual, np.asscalar(self.expected[i])
else:
for i in np.ndindex(self.expected.shape):
yield np.asscalar(actual[i]), np.asscalar(self.expected[i])
class ApproxMapping(ApproxBase):
"""
Perform approximate comparisons where the expected value is a mapping with
numeric values (the keys can be anything).
"""
def __repr__(self):
return "approx({!r})".format(
{k: self._approx_scalar(v) for k, v in self.expected.items()}
)
def __eq__(self, actual):
if set(actual.keys()) != set(self.expected.keys()):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
for k in self.expected.keys():
yield actual[k], self.expected[k]
def _check_type(self):
__tracebackhide__ = True
for key, value in self.expected.items():
if isinstance(value, type(self.expected)):
msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}"
raise TypeError(msg.format(key, value, pprint.pformat(self.expected)))
elif not isinstance(value, Number):
raise _non_numeric_type_error(self.expected, at="key={!r}".format(key))
class ApproxSequence(ApproxBase):
"""
Perform approximate comparisons where the expected value is a sequence of
numbers.
"""
def __repr__(self):
seq_type = type(self.expected)
if seq_type not in (tuple, list, set):
seq_type = list
return "approx({!r})".format(
seq_type(self._approx_scalar(x) for x in self.expected)
)
def __eq__(self, actual):
if len(actual) != len(self.expected):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
return zip(actual, self.expected)
def _check_type(self):
__tracebackhide__ = True
for index, x in enumerate(self.expected):
if isinstance(x, type(self.expected)):
msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}"
raise TypeError(msg.format(x, index, pprint.pformat(self.expected)))
elif not isinstance(x, Number):
raise _non_numeric_type_error(
self.expected, at="index {}".format(index)
)
class ApproxScalar(ApproxBase):
"""
Perform approximate comparisons where the expected value is a single number.
"""
DEFAULT_ABSOLUTE_TOLERANCE = 1e-12
DEFAULT_RELATIVE_TOLERANCE = 1e-6
def __repr__(self):
"""
Return a string communicating both the expected value and the tolerance
for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode
plus/minus symbol if this is python3 (it's too hard to get right for
python2).
"""
if isinstance(self.expected, complex):
return str(self.expected)
# Infinities aren't compared using tolerances, so don't show a
# tolerance.
if math.isinf(self.expected):
return str(self.expected)
# If a sensible tolerance can't be calculated, self.tolerance will
# raise a ValueError. In this case, display '???'.
try:
vetted_tolerance = "{:.1e}".format(self.tolerance)
except ValueError:
vetted_tolerance = "???"
if sys.version_info[0] == 2:
return "{} +- {}".format(self.expected, vetted_tolerance)
else:
return u"{} \u00b1 {}".format(self.expected, vetted_tolerance)
def __eq__(self, actual):
"""
Return true if the given value is equal to the expected value within
the pre-specified tolerance.
"""
if _is_numpy_array(actual):
# Call ``__eq__()`` manually to prevent infinite-recursion with
# numpy<1.13. See #3748.
return all(self.__eq__(a) for a in actual.flat)
# Short-circuit exact equality.
if actual == self.expected:
return True
# Allow the user to control whether NaNs are considered equal to each
# other or not. The abs() calls are for compatibility with complex
# numbers.
if math.isnan(abs(self.expected)):
return self.nan_ok and math.isnan(abs(actual))
# Infinity shouldn't be approximately equal to anything but itself, but
# if there's a relative tolerance, it will be infinite and infinity
# will seem approximately equal to everything. The equal-to-itself
# case would have been short circuited above, so here we can just
# return false if the expected value is infinite. The abs() call is
# for compatibility with complex numbers.
if math.isinf(abs(self.expected)):
return False
# Return true if the two numbers are within the tolerance.
return abs(self.expected - actual) <= self.tolerance
__hash__ = None
@property
def tolerance(self):
"""
Return the tolerance for the comparison. This could be either an
absolute tolerance or a relative tolerance, depending on what the user
specified or which would be larger.
"""
def set_default(x, default):
return x if x is not None else default
# Figure out what the absolute tolerance should be. ``self.abs`` is
# either None or a value specified by the user.
absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE)
if absolute_tolerance < 0:
raise ValueError(
"absolute tolerance can't be negative: {}".format(absolute_tolerance)
)
if math.isnan(absolute_tolerance):
raise ValueError("absolute tolerance can't be NaN.")
# If the user specified an absolute tolerance but not a relative one,
# just return the absolute tolerance.
if self.rel is None:
if self.abs is not None:
return absolute_tolerance
# Figure out what the relative tolerance should be. ``self.rel`` is
# either None or a value specified by the user. This is done after
# we've made sure the user didn't ask for an absolute tolerance only,
# because we don't want to raise errors about the relative tolerance if
# we aren't even going to use it.
relative_tolerance = set_default(
self.rel, self.DEFAULT_RELATIVE_TOLERANCE
) * abs(self.expected)
if relative_tolerance < 0:
raise ValueError(
"relative tolerance can't be negative: {}".format(absolute_tolerance)
)
if math.isnan(relative_tolerance):
raise ValueError("relative tolerance can't be NaN.")
# Return the larger of the relative and absolute tolerances.
return max(relative_tolerance, absolute_tolerance)
class ApproxDecimal(ApproxScalar):
"""
Perform approximate comparisons where the expected value is a decimal.
"""
DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12")
DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6")
def approx(expected, rel=None, abs=None, nan_ok=False):
"""
Assert that two numbers (or two sets of numbers) are equal to each other
within some tolerance.
Due to the `intricacies of floating-point arithmetic`__, numbers that we
would intuitively expect to be equal are not always so::
>>> 0.1 + 0.2 == 0.3
False
__ https://docs.python.org/3/tutorial/floatingpoint.html
This problem is commonly encountered when writing tests, e.g. when making
sure that floating-point values are what you expect them to be. One way to
deal with this problem is to assert that two floating-point numbers are
equal to within some appropriate tolerance::
>>> abs((0.1 + 0.2) - 0.3) < 1e-6
True
However, comparisons like this are tedious to write and difficult to
understand. Furthermore, absolute comparisons like the one above are
usually discouraged because there's no tolerance that works well for all
situations. ``1e-6`` is good for numbers around ``1``, but too small for
very big numbers and too big for very small ones. It's better to express
the tolerance as a fraction of the expected value, but relative comparisons
like that are even more difficult to write correctly and concisely.
The ``approx`` class performs floating-point comparisons using a syntax
that's as intuitive as possible::
>>> from pytest import approx
>>> 0.1 + 0.2 == approx(0.3)
True
The same syntax also works for sequences of numbers::
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
True
Dictionary *values*::
>>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
True
``numpy`` arrays::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP
True
And for a ``numpy`` array against a scalar::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP
True
By default, ``approx`` considers numbers within a relative tolerance of
``1e-6`` (i.e. one part in a million) of its expected value to be equal.
This treatment would lead to surprising results if the expected value was
``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
To handle this case less surprisingly, ``approx`` also considers numbers
within an absolute tolerance of ``1e-12`` of its expected value to be
equal. Infinity and NaN are special cases. Infinity is only considered
equal to itself, regardless of the relative tolerance. NaN is not
considered equal to anything by default, but you can make it be equal to
itself by setting the ``nan_ok`` argument to True. (This is meant to
facilitate comparing arrays that use NaN to mean "no data".)
Both the relative and absolute tolerances can be changed by passing
arguments to the ``approx`` constructor::
>>> 1.0001 == approx(1)
False
>>> 1.0001 == approx(1, rel=1e-3)
True
>>> 1.0001 == approx(1, abs=1e-3)
True
If you specify ``abs`` but not ``rel``, the comparison will not consider
the relative tolerance at all. In other words, two numbers that are within
the default relative tolerance of ``1e-6`` will still be considered unequal
if they exceed the specified absolute tolerance. If you specify both
``abs`` and ``rel``, the numbers will be considered equal if either
tolerance is met::
>>> 1 + 1e-8 == approx(1)
True
>>> 1 + 1e-8 == approx(1, abs=1e-12)
False
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
True
If you're thinking about using ``approx``, then you might want to know how
it compares to other good ways of comparing floating-point numbers. All of
these algorithms are based on relative and absolute tolerances and should
agree for the most part, but they do have meaningful differences:
- ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
tolerance is met. Because the relative tolerance is calculated w.r.t.
both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
``b`` is a "reference value"). You have to specify an absolute tolerance
if you want to compare to ``0.0`` because there is no tolerance by
default. Only available in python>=3.5. `More information...`__
__ https://docs.python.org/3/library/math.html#math.isclose
- ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
between ``a`` and ``b`` is less that the sum of the relative tolerance
w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
is only calculated w.r.t. ``b``, this test is asymmetric and you can
think of ``b`` as the reference value. Support for comparing sequences
is provided by ``numpy.allclose``. `More information...`__
__ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
- ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
are within an absolute tolerance of ``1e-7``. No relative tolerance is
considered and the absolute tolerance cannot be changed, so this function
is not appropriate for very large or very small numbers. Also, it's only
available in subclasses of ``unittest.TestCase`` and it's ugly because it
doesn't follow PEP8. `More information...`__
__ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
- ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
Because the relative tolerance is only calculated w.r.t. ``b``, this test
is asymmetric and you can think of ``b`` as the reference value. In the
special case that you explicitly specify an absolute tolerance but not a
relative tolerance, only the absolute tolerance is considered.
.. warning::
.. versionchanged:: 3.2
In order to avoid inconsistent behavior, ``TypeError`` is
raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons.
The example below illustrates the problem::
assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)
assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)
In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)``
to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to
comparison. This is because the call hierarchy of rich comparisons
follows a fixed behavior. `More information...`__
__ https://docs.python.org/3/reference/datamodel.html#object.__ge__
"""
# Delegate the comparison to a class that knows how to deal with the type
# of the expected value (e.g. int, float, list, dict, numpy.array, etc).
#
# The primary responsibility of these classes is to implement ``__eq__()``
# and ``__repr__()``. The former is used to actually check if some
# "actual" value is equivalent to the given expected value within the
# allowed tolerance. The latter is used to show the user the expected
# value and tolerance, in the case that a test failed.
#
# The actual logic for making approximate comparisons can be found in
# ApproxScalar, which is used to compare individual numbers. All of the
# other Approx classes eventually delegate to this class. The ApproxBase
# class provides some convenient methods and overloads, but isn't really
# essential.
__tracebackhide__ = True
if isinstance(expected, Decimal):
cls = ApproxDecimal
elif isinstance(expected, Number):
cls = ApproxScalar
elif isinstance(expected, Mapping):
cls = ApproxMapping
elif isinstance(expected, Sequence) and not isinstance(expected, STRING_TYPES):
cls = ApproxSequence
elif _is_numpy_array(expected):
cls = ApproxNumpy
else:
raise _non_numeric_type_error(expected, at=None)
return cls(expected, rel, abs, nan_ok)
def _is_numpy_array(obj):
"""
Return true if the given object is a numpy array. Make a special effort to
avoid importing numpy unless it's really necessary.
"""
import sys
np = sys.modules.get("numpy")
if np is not None:
return isinstance(obj, np.ndarray)
return False
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
r"""
Assert that a code block/function call raises ``expected_exception``
and raise a failure exception otherwise.
:arg message: if specified, provides a custom failure message if the
exception is not raised
:arg match: if specified, asserts that the exception matches a text or regex
This helper produces a ``ExceptionInfo()`` object (see below).
You may use this function as a context manager::
>>> with raises(ZeroDivisionError):
... 1/0
.. versionchanged:: 2.10
In the context manager form you may use the keyword argument
``message`` to specify a custom failure message::
>>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"):
... pass
Traceback (most recent call last):
...
Failed: Expecting ZeroDivisionError
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> value = 15
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
... assert exc_info.type == ValueError # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
...
>>> assert exc_info.type == ValueError
Since version ``3.1`` you can use the keyword argument ``match`` to assert that the
exception matches a text or regex::
>>> with raises(ValueError, match='must be 0 or None'):
... raise ValueError("value must be 0 or None")
>>> with raises(ValueError, match=r'must be \d+$'):
... raise ValueError("value must be 42")
**Legacy forms**
The forms below are fully supported but are discouraged for new code because the
context manager form is regarded as more readable and less error-prone.
It is possible to specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
It is also possible to pass a string to be evaluated at runtime::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
The string will be evaluated using the same ``locals()`` and ``globals()``
at the moment of the ``raises`` call.
.. currentmodule:: _pytest._code
Consult the API of ``excinfo`` objects: :class:`ExceptionInfo`.
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
for exc in filterfalse(isclass, always_iterable(expected_exception, BASE_TYPE)):
msg = (
"exceptions must be old-style classes or"
" derived from BaseException, not %s"
)
raise TypeError(msg % type(exc))
message = "DID NOT RAISE {}".format(expected_exception)
match_expr = None
if not args:
if "message" in kwargs:
message = kwargs.pop("message")
if "match" in kwargs:
match_expr = kwargs.pop("match")
if kwargs:
msg = "Unexpected keyword arguments passed to pytest.raises: "
msg += ", ".join(kwargs.keys())
raise TypeError(msg)
return RaisesContext(expected_exception, message, match_expr)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
# print "raises frame scope: %r" % frame.f_locals
try:
code = _pytest._code.Source(code).compile()
six.exec_(code, frame.f_globals, loc)
# XXX didn't mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return _pytest._code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return _pytest._code.ExceptionInfo()
fail(message)
raises.Exception = fail.Exception
class RaisesContext(object):
def __init__(self, expected_exception, message, match_expr):
self.expected_exception = expected_exception
self.message = message
self.match_expr = match_expr
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
fail(self.message)
self.excinfo.__init__(tp)
suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
if sys.version_info[0] == 2 and suppress_exception:
sys.exc_clear()
if self.match_expr and suppress_exception:
self.excinfo.match(self.match_expr)
return suppress_exception

@ -1,241 +0,0 @@
""" recording warnings during test function execution. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import re
import sys
import warnings
import six
import _pytest._code
from _pytest.fixtures import yield_fixture
from _pytest.outcomes import fail
@yield_fixture
def recwarn():
"""Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions.
See http://docs.python.org/library/warnings.html for information
on warning categories.
"""
wrec = WarningsRecorder()
with wrec:
warnings.simplefilter("default")
yield wrec
def deprecated_call(func=None, *args, **kwargs):
"""context manager that can be used to ensure a block of code triggers a
``DeprecationWarning`` or ``PendingDeprecationWarning``::
>>> import warnings
>>> def api_call_v2():
... warnings.warn('use v3 of this api', DeprecationWarning)
... return 200
>>> with deprecated_call():
... assert api_call_v2() == 200
``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``,
in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings
types above.
"""
__tracebackhide__ = True
if func is not None:
args = (func,) + args
return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs)
def warns(expected_warning, *args, **kwargs):
r"""Assert that code raises a particular class of warning.
Specifically, the parameter ``expected_warning`` can be a warning class or
sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or
classes.
This helper produces a list of :class:`warnings.WarningMessage` objects,
one for each warning raised.
This function can be used as a context manager, or any of the other ways
``pytest.raises`` can be used::
>>> with warns(RuntimeWarning):
... warnings.warn("my warning", RuntimeWarning)
In the context manager form you may use the keyword argument ``match`` to assert
that the exception matches a text or regex::
>>> with warns(UserWarning, match='must be 0 or None'):
... warnings.warn("value must be 0 or None", UserWarning)
>>> with warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("value must be 42", UserWarning)
>>> with warns(UserWarning, match=r'must be \d+$'):
... warnings.warn("this is not here", UserWarning)
Traceback (most recent call last):
...
Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted...
"""
__tracebackhide__ = True
match_expr = None
if not args:
if "match" in kwargs:
match_expr = kwargs.pop("match")
return WarningsChecker(expected_warning, match_expr=match_expr)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
with WarningsChecker(expected_warning, match_expr=match_expr):
code = _pytest._code.Source(code).compile()
six.exec_(code, frame.f_globals, loc)
else:
func = args[0]
with WarningsChecker(expected_warning, match_expr=match_expr):
return func(*args[1:], **kwargs)
class WarningsRecorder(warnings.catch_warnings):
"""A context manager to record raised warnings.
Adapted from `warnings.catch_warnings`.
"""
def __init__(self):
super(WarningsRecorder, self).__init__(record=True)
self._entered = False
self._list = []
@property
def list(self):
"""The list of recorded warnings."""
return self._list
def __getitem__(self, i):
"""Get a recorded warning by index."""
return self._list[i]
def __iter__(self):
"""Iterate through the recorded warnings."""
return iter(self._list)
def __len__(self):
"""The number of recorded warnings."""
return len(self._list)
def pop(self, cls=Warning):
"""Pop the first recorded warning, raise exception if not exists."""
for i, w in enumerate(self._list):
if issubclass(w.category, cls):
return self._list.pop(i)
__tracebackhide__ = True
raise AssertionError("%r not found in warning list" % cls)
def clear(self):
"""Clear the list of recorded warnings."""
self._list[:] = []
def __enter__(self):
if self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot enter %r twice" % self)
self._list = super(WarningsRecorder, self).__enter__()
warnings.simplefilter("always")
# python3 keeps track of a "filter version", when the filters are
# updated previously seen warnings can be re-warned. python2 has no
# concept of this so we must reset the warnings registry manually.
# trivial patching of `warnings.warn` seems to be enough somehow?
if six.PY2:
def warn(message, category=None, stacklevel=1):
# duplicate the stdlib logic due to
# bad handing in the c version of warnings
if isinstance(message, Warning):
category = message.__class__
# Check category argument
if category is None:
category = UserWarning
assert issubclass(category, Warning)
# emulate resetting the warn registry
f_globals = sys._getframe(stacklevel).f_globals
if "__warningregistry__" in f_globals:
orig = f_globals["__warningregistry__"]
f_globals["__warningregistry__"] = None
try:
return self._saved_warn(message, category, stacklevel + 1)
finally:
f_globals["__warningregistry__"] = orig
else:
return self._saved_warn(message, category, stacklevel + 1)
warnings.warn, self._saved_warn = warn, warnings.warn
return self
def __exit__(self, *exc_info):
if not self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot exit %r without entering first" % self)
# see above where `self._saved_warn` is assigned
if six.PY2:
warnings.warn = self._saved_warn
super(WarningsRecorder, self).__exit__(*exc_info)
class WarningsChecker(WarningsRecorder):
def __init__(self, expected_warning=None, match_expr=None):
super(WarningsChecker, self).__init__()
msg = "exceptions must be old-style classes or derived from Warning, not %s"
if isinstance(expected_warning, tuple):
for exc in expected_warning:
if not inspect.isclass(exc):
raise TypeError(msg % type(exc))
elif inspect.isclass(expected_warning):
expected_warning = (expected_warning,)
elif expected_warning is not None:
raise TypeError(msg % type(expected_warning))
self.expected_warning = expected_warning
self.match_expr = match_expr
def __exit__(self, *exc_info):
super(WarningsChecker, self).__exit__(*exc_info)
__tracebackhide__ = True
# only check if we're not currently handling an exception
if all(a is None for a in exc_info):
if self.expected_warning is not None:
if not any(issubclass(r.category, self.expected_warning) for r in self):
__tracebackhide__ = True
fail(
"DID NOT WARN. No warnings of type {} was emitted. "
"The list of emitted warnings is: {}.".format(
self.expected_warning, [each.message for each in self]
)
)
elif self.match_expr is not None:
for r in self:
if issubclass(r.category, self.expected_warning):
if re.compile(self.match_expr).search(str(r.message)):
break
else:
fail(
"DID NOT WARN. No warnings of type {} matching"
" ('{}') was emitted. The list of emitted warnings"
" is: {}.".format(
self.expected_warning,
self.match_expr,
[each.message for each in self],
)
)

@ -1,197 +0,0 @@
import py
from _pytest._code.code import TerminalRepr
def getslaveinfoline(node):
try:
return node._slaveinfocache
except AttributeError:
d = node.slaveinfo
ver = "%s.%s.%s" % d["version_info"][:3]
node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
d["id"],
d["sysplatform"],
ver,
d["executable"],
)
return s
class BaseReport(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def toterminal(self, out):
if hasattr(self, "node"):
out.line(getslaveinfoline(self.node))
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, "toterminal"):
longrepr.toterminal(out)
else:
try:
out.line(longrepr)
except UnicodeEncodeError:
out.line("<unprintable longrepr>")
def get_sections(self, prefix):
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
@property
def longreprtext(self):
"""
Read-only property that returns the full string representation
of ``longrepr``.
.. versionadded:: 3.0
"""
tw = py.io.TerminalWriter(stringio=True)
tw.hasmarkup = False
self.toterminal(tw)
exc = tw.stringio.getvalue()
return exc.strip()
@property
def caplog(self):
"""Return captured log lines, if log capturing is enabled
.. versionadded:: 3.5
"""
return "\n".join(
content for (prefix, content) in self.get_sections("Captured log")
)
@property
def capstdout(self):
"""Return captured text from stdout, if capturing is enabled
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stdout")
)
@property
def capstderr(self):
"""Return captured text from stderr, if capturing is enabled
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stderr")
)
passed = property(lambda x: x.outcome == "passed")
failed = property(lambda x: x.outcome == "failed")
skipped = property(lambda x: x.outcome == "skipped")
@property
def fspath(self):
return self.nodeid.split("::")[0]
class TestReport(BaseReport):
""" Basic test report object (also used for setup and teardown calls if
they fail).
"""
def __init__(
self,
nodeid,
location,
keywords,
outcome,
longrepr,
when,
sections=(),
duration=0,
user_properties=None,
**extra
):
#: normalized collection node id
self.nodeid = nodeid
#: a (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
self.location = location
#: a name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords = keywords
#: test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when = when
#: user properties is a list of tuples (name, value) that holds user
#: defined properties of the test
self.user_properties = list(user_properties or [])
#: list of pairs ``(str, str)`` of extra information which needs to
#: marshallable. Used by pytest to add captured text
#: from ``stdout`` and ``stderr``, but may be used by other plugins
#: to add arbitrary information to reports.
self.sections = list(sections)
#: time it took to run just the test
self.duration = duration
self.__dict__.update(extra)
def __repr__(self):
return "<TestReport %r when=%r outcome=%r>" % (
self.nodeid,
self.when,
self.outcome,
)
class TeardownErrorReport(BaseReport):
outcome = "failed"
when = "teardown"
def __init__(self, longrepr, **extra):
self.longrepr = longrepr
self.sections = []
self.__dict__.update(extra)
class CollectReport(BaseReport):
def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
self.nodeid = nodeid
self.outcome = outcome
self.longrepr = longrepr
self.result = result or []
self.sections = list(sections)
self.__dict__.update(extra)
@property
def location(self):
return (self.fspath, None, self.fspath)
def __repr__(self):
return "<CollectReport %r lenresult=%s outcome=%r>" % (
self.nodeid,
len(self.result),
self.outcome,
)
class CollectErrorRepr(TerminalRepr):
def __init__(self, msg):
self.longrepr = msg
def toterminal(self, out):
out.line(self.longrepr, red=True)

@ -1,123 +0,0 @@
""" log machine-parseable test session result information in a plain
text file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import py
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "resultlog plugin options")
group.addoption(
"--resultlog",
"--result-log",
action="store",
metavar="path",
default=None,
help="DEPRECATED path for machine-readable result log.",
)
def pytest_configure(config):
resultlog = config.option.resultlog
# prevent opening resultlog on slave nodes (xdist)
if resultlog and not hasattr(config, "slaveinput"):
dirname = os.path.dirname(os.path.abspath(resultlog))
if not os.path.isdir(dirname):
os.makedirs(dirname)
logfile = open(resultlog, "w", 1) # line buffered
config._resultlog = ResultLog(config, logfile)
config.pluginmanager.register(config._resultlog)
from _pytest.deprecated import RESULT_LOG
from _pytest.warnings import _issue_config_warning
_issue_config_warning(RESULT_LOG, config)
def pytest_unconfigure(config):
resultlog = getattr(config, "_resultlog", None)
if resultlog:
resultlog.logfile.close()
del config._resultlog
config.pluginmanager.unregister(resultlog)
def generic_path(item):
chain = item.listchain()
gpath = [chain[0].name]
fspath = chain[0].fspath
fspart = False
for node in chain[1:]:
newfspath = node.fspath
if newfspath == fspath:
if fspart:
gpath.append(":")
fspart = False
else:
gpath.append(".")
else:
gpath.append("/")
fspart = True
name = node.name
if name[0] in "([":
gpath.pop()
gpath.append(name)
fspath = newfspath
return "".join(gpath)
class ResultLog(object):
def __init__(self, config, logfile):
self.config = config
self.logfile = logfile # preferably line buffered
def write_log_entry(self, testpath, lettercode, longrepr):
print("%s %s" % (lettercode, testpath), file=self.logfile)
for line in longrepr.splitlines():
print(" %s" % line, file=self.logfile)
def log_outcome(self, report, lettercode, longrepr):
testpath = getattr(report, "nodeid", None)
if testpath is None:
testpath = report.fspath
self.write_log_entry(testpath, lettercode, longrepr)
def pytest_runtest_logreport(self, report):
if report.when != "call" and report.passed:
return
res = self.config.hook.pytest_report_teststatus(report=report)
code = res[1]
if code == "x":
longrepr = str(report.longrepr)
elif code == "X":
longrepr = ""
elif report.passed:
longrepr = ""
elif report.failed:
longrepr = str(report.longrepr)
elif report.skipped:
longrepr = str(report.longrepr[2])
self.log_outcome(report, code, longrepr)
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
code = "F"
longrepr = str(report.longrepr)
else:
assert report.skipped
code = "S"
longrepr = "%s:%d: %s" % report.longrepr
self.log_outcome(report, code, longrepr)
def pytest_internalerror(self, excrepr):
reprcrash = getattr(excrepr, "reprcrash", None)
path = getattr(reprcrash, "path", None)
if path is None:
path = "cwd:%s" % py.path.local()
self.write_log_entry(path, "!", str(excrepr))

@ -1,394 +0,0 @@
""" basic collect and runtest protocol implementations """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import bdb
import os
import sys
from time import time
import six
from .reports import CollectErrorRepr
from .reports import CollectReport
from .reports import TestReport
from _pytest._code.code import ExceptionInfo
from _pytest.outcomes import skip
from _pytest.outcomes import Skipped
from _pytest.outcomes import TEST_OUTCOME
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group.addoption(
"--durations",
action="store",
type=int,
default=None,
metavar="N",
help="show N slowest setup/test durations (N=0 for all).",
),
def pytest_terminal_summary(terminalreporter):
durations = terminalreporter.config.option.durations
verbose = terminalreporter.config.getvalue("verbose")
if durations is None:
return
tr = terminalreporter
dlist = []
for replist in tr.stats.values():
for rep in replist:
if hasattr(rep, "duration"):
dlist.append(rep)
if not dlist:
return
dlist.sort(key=lambda x: x.duration)
dlist.reverse()
if not durations:
tr.write_sep("=", "slowest test durations")
else:
tr.write_sep("=", "slowest %s test durations" % durations)
dlist = dlist[:durations]
for rep in dlist:
if verbose < 2 and rep.duration < 0.005:
tr.write_line("")
tr.write_line("(0.00 durations hidden. Use -vv to show these durations.)")
break
nodeid = rep.nodeid.replace("::()::", "::")
tr.write_line("%02.2fs %-8s %s" % (rep.duration, rep.when, nodeid))
def pytest_sessionstart(session):
session._setupstate = SetupState()
def pytest_sessionfinish(session):
session._setupstate.teardown_all()
def pytest_runtest_protocol(item, nextitem):
item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location)
runtestprotocol(item, nextitem=nextitem)
item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location)
return True
def runtestprotocol(item, log=True, nextitem=None):
hasrequest = hasattr(item, "_request")
if hasrequest and not item._request:
item._initrequest()
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
if item.config.option.setupshow:
show_test_item(item)
if not item.config.option.setuponly:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log, nextitem=nextitem))
# after all teardown hooks have been called
# want funcargs and request info to go away
if hasrequest:
item._request = False
item.funcargs = None
return reports
def show_test_item(item):
"""Show test function, parameters and the fixtures of the test item."""
tw = item.config.get_terminal_writer()
tw.line()
tw.write(" " * 8)
tw.write(item._nodeid)
used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
if used_fixtures:
tw.write(" (fixtures used: {})".format(", ".join(used_fixtures)))
def pytest_runtest_setup(item):
_update_current_test_var(item, "setup")
item.session._setupstate.prepare(item)
def pytest_runtest_call(item):
_update_current_test_var(item, "call")
sys.last_type, sys.last_value, sys.last_traceback = (None, None, None)
try:
item.runtest()
except Exception:
# Store trace info to allow postmortem debugging
type, value, tb = sys.exc_info()
tb = tb.tb_next # Skip *this* frame
sys.last_type = type
sys.last_value = value
sys.last_traceback = tb
del type, value, tb # Get rid of these in this frame
raise
def pytest_runtest_teardown(item, nextitem):
_update_current_test_var(item, "teardown")
item.session._setupstate.teardown_exact(item, nextitem)
_update_current_test_var(item, None)
def _update_current_test_var(item, when):
"""
Update PYTEST_CURRENT_TEST to reflect the current item and stage.
If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment.
"""
var_name = "PYTEST_CURRENT_TEST"
if when:
value = "{} ({})".format(item.nodeid, when)
# don't allow null bytes on environment variables (see #2644, #2957)
value = value.replace("\x00", "(null)")
os.environ[var_name] = value
else:
os.environ.pop(var_name)
def pytest_report_teststatus(report):
if report.when in ("setup", "teardown"):
if report.failed:
# category, shortletter, verbose-word
return "error", "E", "ERROR"
elif report.skipped:
return "skipped", "s", "SKIPPED"
else:
return "", "", ""
#
# Implementation
def call_and_report(item, when, log=True, **kwds):
call = call_runtest_hook(item, when, **kwds)
hook = item.ihook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log:
hook.pytest_runtest_logreport(report=report)
if check_interactive_exception(call, report):
hook.pytest_exception_interact(node=item, call=call, report=report)
return report
def check_interactive_exception(call, report):
return call.excinfo and not (
hasattr(report, "wasxfail")
or call.excinfo.errisinstance(skip.Exception)
or call.excinfo.errisinstance(bdb.BdbQuit)
)
def call_runtest_hook(item, when, **kwds):
hookname = "pytest_runtest_" + when
ihook = getattr(item.ihook, hookname)
return CallInfo(
lambda: ihook(item=item, **kwds),
when=when,
treat_keyboard_interrupt_as_exception=item.config.getvalue("usepdb"),
)
class CallInfo(object):
""" Result/Exception info a function invocation. """
#: None or ExceptionInfo object.
excinfo = None
def __init__(self, func, when, treat_keyboard_interrupt_as_exception=False):
#: context of invocation: one of "setup", "call",
#: "teardown", "memocollect"
self.when = when
self.start = time()
try:
self.result = func()
except KeyboardInterrupt:
if treat_keyboard_interrupt_as_exception:
self.excinfo = ExceptionInfo()
else:
self.stop = time()
raise
except: # noqa
self.excinfo = ExceptionInfo()
self.stop = time()
def __repr__(self):
if self.excinfo:
status = "exception: %s" % str(self.excinfo.value)
else:
status = "result: %r" % (self.result,)
return "<CallInfo when=%r %s>" % (self.when, status)
def pytest_runtest_makereport(item, call):
when = call.when
duration = call.stop - call.start
keywords = {x: 1 for x in item.keywords}
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome = "passed"
longrepr = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif excinfo.errisinstance(skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else: # exception in setup or teardown
longrepr = item._repr_failure_py(
excinfo, style=item.config.option.tbstyle
)
for rwhen, key, content in item._report_sections:
sections.append(("Captured %s %s" % (key, rwhen), content))
return TestReport(
item.nodeid,
item.location,
keywords,
outcome,
longrepr,
when,
sections,
duration,
user_properties=item.user_properties,
)
def pytest_make_collect_report(collector):
call = CallInfo(lambda: list(collector.collect()), "collect")
longrepr = None
if not call.excinfo:
outcome = "passed"
else:
from _pytest import nose
skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
if call.excinfo.errisinstance(skip_exceptions):
outcome = "skipped"
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
longrepr = (str(r.path), r.lineno, r.message)
else:
outcome = "failed"
errorinfo = collector.repr_failure(call.excinfo)
if not hasattr(errorinfo, "toterminal"):
errorinfo = CollectErrorRepr(errorinfo)
longrepr = errorinfo
rep = CollectReport(
collector.nodeid, outcome, longrepr, getattr(call, "result", None)
)
rep.call = call # see collect_one_node
return rep
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
"""
assert colitem and not isinstance(colitem, tuple)
assert callable(finalizer)
# assert colitem in self.stack # some unit tests don't setup stack :/
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
exc = None
while finalizers:
fin = finalizers.pop()
try:
fin()
except TEST_OUTCOME:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = sys.exc_info()
if exc:
six.reraise(*exc)
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if hasattr(colitem, "teardown"):
colitem.teardown()
for colitem in self._finalizers:
assert (
colitem is None or colitem in self.stack or isinstance(colitem, tuple)
)
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
for key in list(self._finalizers):
self._teardown_with_finalization(key)
assert not self._finalizers
def teardown_exact(self, item, nextitem):
needed_collectors = nextitem and nextitem.listchain() or []
self._teardown_towards(needed_collectors)
def _teardown_towards(self, needed_collectors):
exc = None
while self.stack:
if self.stack == needed_collectors[: len(self.stack)]:
break
try:
self._pop_and_teardown()
except TEST_OUTCOME:
# XXX Only first exception will be seen by user,
# ideally all should be reported.
if exc is None:
exc = sys.exc_info()
if exc:
six.reraise(*exc)
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
self._teardown_towards(needed_collectors)
# check if the last collection node has raised an error
for col in self.stack:
if hasattr(col, "_prepare_exc"):
six.reraise(*col._prepare_exc)
for col in needed_collectors[len(self.stack) :]:
self.stack.append(col)
try:
col.setup()
except TEST_OUTCOME:
col._prepare_exc = sys.exc_info()
raise
def collect_one_node(collector):
ihook = collector.ihook
ihook.pytest_collectstart(collector=collector)
rep = ihook.pytest_make_collect_report(collector=collector)
call = rep.__dict__.pop("call", None)
if call and check_interactive_exception(call, rep):
ihook.pytest_exception_interact(node=collector, call=call, report=rep)
return rep

@ -1,88 +0,0 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import pytest
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--setuponly",
"--setup-only",
action="store_true",
help="only setup fixtures, do not execute tests.",
)
group.addoption(
"--setupshow",
"--setup-show",
action="store_true",
help="show setup of fixtures while executing tests.",
)
@pytest.hookimpl(hookwrapper=True)
def pytest_fixture_setup(fixturedef, request):
yield
config = request.config
if config.option.setupshow:
if hasattr(request, "param"):
# Save the fixture parameter so ._show_fixture_action() can
# display it now and during the teardown (in .finish()).
if fixturedef.ids:
if callable(fixturedef.ids):
fixturedef.cached_param = fixturedef.ids(request.param)
else:
fixturedef.cached_param = fixturedef.ids[request.param_index]
else:
fixturedef.cached_param = request.param
_show_fixture_action(fixturedef, "SETUP")
def pytest_fixture_post_finalizer(fixturedef):
if hasattr(fixturedef, "cached_result"):
config = fixturedef._fixturemanager.config
if config.option.setupshow:
_show_fixture_action(fixturedef, "TEARDOWN")
if hasattr(fixturedef, "cached_param"):
del fixturedef.cached_param
def _show_fixture_action(fixturedef, msg):
config = fixturedef._fixturemanager.config
capman = config.pluginmanager.getplugin("capturemanager")
if capman:
capman.suspend_global_capture()
out, err = capman.read_global_capture()
tw = config.get_terminal_writer()
tw.line()
tw.write(" " * 2 * fixturedef.scopenum)
tw.write(
"{step} {scope} {fixture}".format(
step=msg.ljust(8), # align the output to TEARDOWN
scope=fixturedef.scope[0].upper(),
fixture=fixturedef.argname,
)
)
if msg == "SETUP":
deps = sorted(arg for arg in fixturedef.argnames if arg != "request")
if deps:
tw.write(" (fixtures used: {})".format(", ".join(deps)))
if hasattr(fixturedef, "cached_param"):
tw.write("[{}]".format(fixturedef.cached_param))
if capman:
capman.resume_global_capture()
sys.stdout.write(out)
sys.stderr.write(err)
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config):
if config.option.setuponly:
config.option.setupshow = True

@ -1,31 +0,0 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--setupplan",
"--setup-plan",
action="store_true",
help="show what fixtures and tests would be executed but "
"don't execute anything.",
)
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(fixturedef, request):
# Will return a dummy fixture if the setuponly option is provided.
if request.config.option.setupplan:
fixturedef.cached_result = (None, None, None)
return fixturedef.cached_result
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config):
if config.option.setupplan:
config.option.setuponly = True
config.option.setupshow = True

@ -1,298 +0,0 @@
""" support for skip/xfail functions and markers. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from _pytest.config import hookimpl
from _pytest.mark.evaluate import MarkEvaluator
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--runxfail",
action="store_true",
dest="runxfail",
default=False,
help="run tests even if they are marked xfail",
)
parser.addini(
"xfail_strict",
"default for the strict parameter of xfail "
"markers when not given explicitly (default: False)",
default=False,
type="bool",
)
def pytest_configure(config):
if config.option.runxfail:
# yay a hack
import pytest
old = pytest.xfail
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
def nop(*args, **kwargs):
pass
nop.Exception = xfail.Exception
setattr(pytest, "xfail", nop)
config.addinivalue_line(
"markers",
"skip(reason=None): skip the given test function with an optional reason. "
'Example: skip(reason="no way of currently testing this") skips the '
"test.",
)
config.addinivalue_line(
"markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"https://docs.pytest.org/en/latest/skipping.html",
)
config.addinivalue_line(
"markers",
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
"mark the test function as an expected failure if eval(condition) "
"has a True value. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See https://docs.pytest.org/en/latest/skipping.html",
)
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
# Check if skip or skipif are specified as pytest marks
item._skipped_by_mark = False
eval_skipif = MarkEvaluator(item, "skipif")
if eval_skipif.istrue():
item._skipped_by_mark = True
skip(eval_skipif.getexplanation())
for skip_info in item.iter_markers(name="skip"):
item._skipped_by_mark = True
if "reason" in skip_info.kwargs:
skip(skip_info.kwargs["reason"])
elif skip_info.args:
skip(skip_info.args[0])
else:
skip("unconditional skip")
item._evalxfail = MarkEvaluator(item, "xfail")
check_xfail_no_run(item)
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
outcome = yield
passed = outcome.excinfo is None
if passed:
check_strict_xfail(pyfuncitem)
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._evalxfail
if evalxfail.istrue():
if not evalxfail.get("run", True):
xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
fail("[XPASS(strict)] " + explanation, pytrace=False)
@hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield
rep = outcome.get_result()
evalxfail = getattr(item, "_evalxfail", None)
# unitttest special case, see setting of _unexpectedsuccess
if hasattr(item, "_unexpectedsuccess") and rep.when == "call":
from _pytest.compat import _is_unittest_unexpected_success_a_failure
if item._unexpectedsuccess:
rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)
else:
rep.longrepr = "Unexpected success"
if _is_unittest_unexpected_success_a_failure():
rep.outcome = "failed"
else:
rep.outcome = "passed"
rep.wasxfail = rep.longrepr
elif item.config.option.runxfail:
pass # don't interefere
elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():
if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value):
rep.outcome = "failed"
else:
rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation()
elif call.when == "call":
strict_default = item.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
explanation = evalxfail.getexplanation()
if is_strict_xfail:
rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] {}".format(explanation)
else:
rep.outcome = "passed"
rep.wasxfail = explanation
elif (
getattr(item, "_skipped_by_mark", False)
and rep.skipped
and type(rep.longrepr) is tuple
):
# skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display
# the location of where the skip exception was raised within pytest
filename, line, reason = rep.longrepr
filename, line = item.location[:2]
rep.longrepr = filename, line, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
if hasattr(report, "wasxfail"):
if report.skipped:
return "xfailed", "x", "xfail"
elif report.passed:
return "xpassed", "X", ("XPASS", {"yellow": True})
# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter):
tr = terminalreporter
if not tr.reportchars:
# for name in "xfailed skipped failed xpassed":
# if not tr.stats.get(name, 0):
# tr.write_line("HINT: use '-r' option to see extra "
# "summary info about tests")
# break
return
lines = []
for char in tr.reportchars:
action = REPORTCHAR_ACTIONS.get(char, lambda tr, lines: None)
action(terminalreporter, lines)
if lines:
tr._tw.sep("=", "short test summary info")
for line in lines:
tr._tw.line(line)
def show_simple(terminalreporter, lines, stat, format):
failed = terminalreporter.stats.get(stat)
if failed:
for rep in failed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
lines.append(format % (pos,))
def show_xfailed(terminalreporter, lines):
xfailed = terminalreporter.stats.get("xfailed")
if xfailed:
for rep in xfailed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
lines.append("XFAIL %s" % (pos,))
if reason:
lines.append(" " + str(reason))
def show_xpassed(terminalreporter, lines):
xpassed = terminalreporter.stats.get("xpassed")
if xpassed:
for rep in xpassed:
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
reason = rep.wasxfail
lines.append("XPASS %s %s" % (pos, reason))
def folded_skips(skipped):
d = {}
for event in skipped:
key = event.longrepr
assert len(key) == 3, (event, key)
keywords = getattr(event, "keywords", {})
# folding reports with global pytestmark variable
# this is workaround, because for now we cannot identify the scope of a skip marker
# TODO: revisit after marks scope would be fixed
when = getattr(event, "when", None)
if when == "setup" and "skip" in keywords and "pytestmark" not in keywords:
key = (key[0], None, key[2])
d.setdefault(key, []).append(event)
values = []
for key, events in d.items():
values.append((len(events),) + key)
return values
def show_skipped(terminalreporter, lines):
tr = terminalreporter
skipped = tr.stats.get("skipped", [])
if skipped:
# if not tr.hasopt('skipped'):
# tr.write_line(
# "%d skipped tests, specify -rs for more info" %
# len(skipped))
# return
fskips = folded_skips(skipped)
if fskips:
# tr.write_sep("_", "skipped test summary")
for num, fspath, lineno, reason in fskips:
if reason.startswith("Skipped: "):
reason = reason[9:]
if lineno is not None:
lines.append(
"SKIP [%d] %s:%d: %s" % (num, fspath, lineno + 1, reason)
)
else:
lines.append("SKIP [%d] %s: %s" % (num, fspath, reason))
def shower(stat, format):
def show_(terminalreporter, lines):
return show_simple(terminalreporter, lines, stat, format)
return show_
REPORTCHAR_ACTIONS = {
"x": show_xfailed,
"X": show_xpassed,
"f": shower("failed", "FAIL %s"),
"F": shower("failed", "FAIL %s"),
"s": show_skipped,
"S": show_skipped,
"p": shower("passed", "PASSED %s"),
"E": shower("error", "ERROR %s"),
}

@ -1,102 +0,0 @@
import pytest
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
"--sw",
"--stepwise",
action="store_true",
dest="stepwise",
help="exit on test fail and continue from last failing test next time",
)
group.addoption(
"--stepwise-skip",
action="store_true",
dest="stepwise_skip",
help="ignore the first failing test but stop on the next failing test",
)
@pytest.hookimpl
def pytest_configure(config):
config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin")
class StepwisePlugin:
def __init__(self, config):
self.config = config
self.active = config.getvalue("stepwise")
self.session = None
if self.active:
self.lastfailed = config.cache.get("cache/stepwise", None)
self.skip = config.getvalue("stepwise_skip")
def pytest_sessionstart(self, session):
self.session = session
def pytest_collection_modifyitems(self, session, config, items):
if not self.active or not self.lastfailed:
return
already_passed = []
found = False
# Make a list of all tests that have been run before the last failing one.
for item in items:
if item.nodeid == self.lastfailed:
found = True
break
else:
already_passed.append(item)
# If the previously failed test was not found among the test items,
# do not skip any tests.
if not found:
already_passed = []
for item in already_passed:
items.remove(item)
config.hook.pytest_deselected(items=already_passed)
def pytest_collectreport(self, report):
if self.active and report.failed:
self.session.shouldstop = (
"Error when collecting test, stopping test execution."
)
def pytest_runtest_logreport(self, report):
# Skip this hook if plugin is not active or the test is xfailed.
if not self.active or "xfail" in report.keywords:
return
if report.failed:
if self.skip:
# Remove test from the failed ones (if it exists) and unset the skip option
# to make sure the following tests will not be skipped.
if report.nodeid == self.lastfailed:
self.lastfailed = None
self.skip = False
else:
# Mark test as the last failing and interrupt the test session.
self.lastfailed = report.nodeid
self.session.shouldstop = (
"Test failed, continuing from this test next run."
)
else:
# If the test was actually run and did pass.
if report.when == "call":
# Remove test from the failed ones, if exists.
if report.nodeid == self.lastfailed:
self.lastfailed = None
def pytest_sessionfinish(self, session):
if self.active:
self.config.cache.set("cache/stepwise", self.lastfailed)
else:
# Clear the list of failing tests if the plugin is not active.
self.config.cache.set("cache/stepwise", [])

@ -1,893 +0,0 @@
""" terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import itertools
import platform
import sys
import time
import attr
import pluggy
import py
import six
from more_itertools import collapse
import pytest
from _pytest import nodes
from _pytest.main import EXIT_INTERRUPTED
from _pytest.main import EXIT_NOTESTSCOLLECTED
from _pytest.main import EXIT_OK
from _pytest.main import EXIT_TESTSFAILED
from _pytest.main import EXIT_USAGEERROR
class MoreQuietAction(argparse.Action):
"""
a modified copy of the argparse count action which counts down and updates
the legacy quiet attribute at the same time
used to unify verbosity handling
"""
def __init__(self, option_strings, dest, default=None, required=False, help=None):
super(MoreQuietAction, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help,
)
def __call__(self, parser, namespace, values, option_string=None):
new_count = getattr(namespace, self.dest, 0) - 1
setattr(namespace, self.dest, new_count)
# todo Deprecate config.quiet
namespace.quiet = getattr(namespace, "quiet", 0) + 1
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption(
"-v",
"--verbose",
action="count",
default=0,
dest="verbose",
help="increase verbosity.",
),
group._addoption(
"-q",
"--quiet",
action=MoreQuietAction,
default=0,
dest="verbose",
help="decrease verbosity.",
),
group._addoption(
"--verbosity", dest="verbose", type=int, default=0, help="set verbosity"
)
group._addoption(
"-r",
action="store",
dest="reportchars",
default="",
metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
"(E)error, (s)skipped, (x)failed, (X)passed, "
"(p)passed, (P)passed with output, (a)all except pP. "
"Warnings are displayed at all times except when "
"--disable-warnings is set",
)
group._addoption(
"--disable-warnings",
"--disable-pytest-warnings",
default=False,
dest="disable_warnings",
action="store_true",
help="disable warnings summary",
)
group._addoption(
"-l",
"--showlocals",
action="store_true",
dest="showlocals",
default=False,
help="show locals in tracebacks (disabled by default).",
)
group._addoption(
"--tb",
metavar="style",
action="store",
dest="tbstyle",
default="auto",
choices=["auto", "long", "short", "no", "line", "native"],
help="traceback print mode (auto/long/short/line/native/no).",
)
group._addoption(
"--show-capture",
action="store",
dest="showcapture",
choices=["no", "stdout", "stderr", "log", "all"],
default="all",
help="Controls how captured stdout/stderr/log is shown on failed tests. "
"Default is 'all'.",
)
group._addoption(
"--fulltrace",
"--full-trace",
action="store_true",
default=False,
help="don't cut any tracebacks (default is to cut).",
)
group._addoption(
"--color",
metavar="color",
action="store",
dest="color",
default="auto",
choices=["yes", "no", "auto"],
help="color terminal output (yes/no/auto).",
)
parser.addini(
"console_output_style",
help="console output: classic or with additional progress information (classic|progress).",
default="progress",
)
def pytest_configure(config):
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, "terminalreporter")
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config):
reportopts = ""
reportchars = config.option.reportchars
if not config.option.disable_warnings and "w" not in reportchars:
reportchars += "w"
elif config.option.disable_warnings and "w" in reportchars:
reportchars = reportchars.replace("w", "")
if reportchars:
for char in reportchars:
if char not in reportopts and char != "a":
reportopts += char
elif char == "a":
reportopts = "fEsxXw"
return reportopts
def pytest_report_teststatus(report):
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
elif report.failed:
letter = "F"
if report.when != "call":
letter = "f"
return report.outcome, letter, report.outcome.upper()
@attr.s
class WarningReport(object):
"""
Simple structure to hold warnings information captured by ``pytest_logwarning`` and ``pytest_warning_captured``.
:ivar str message: user friendly message about the warning
:ivar str|None nodeid: node id that generated the warning (see ``get_location``).
:ivar tuple|py.path.local fslocation:
file system location of the source of the warning (see ``get_location``).
:ivar bool legacy: if this warning report was generated from the deprecated ``pytest_logwarning`` hook.
"""
message = attr.ib()
nodeid = attr.ib(default=None)
fslocation = attr.ib(default=None)
legacy = attr.ib(default=False)
def get_location(self, config):
"""
Returns the more user-friendly information about the location
of a warning, or None.
"""
if self.nodeid:
return self.nodeid
if self.fslocation:
if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
filename, linenum = self.fslocation[:2]
relpath = py.path.local(filename).relto(config.invocation_dir)
if not relpath:
relpath = str(filename)
return "%s:%s" % (relpath, linenum)
else:
return str(self.fslocation)
return None
class TerminalReporter(object):
def __init__(self, config, file=None):
import _pytest.config
self.config = config
self.verbosity = self.config.option.verbose
self.showheader = self.verbosity >= 0
self.showfspath = self.verbosity >= 0
self.showlongtestinfo = self.verbosity > 0
self._numcollected = 0
self._session = None
self.stats = {}
self.startdir = py.path.local()
if file is None:
file = sys.stdout
self._tw = _pytest.config.create_terminal_writer(config, file)
# self.writer will be deprecated in pytest-3.4
self.writer = self._tw
self._screen_width = self._tw.fullwidth
self.currentfspath = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
self._progress_nodeids_reported = set()
self._show_progress_info = self._determine_show_progress_info()
self._collect_report_last_write = None
def _determine_show_progress_info(self):
"""Return True if we should display progress information based on the current config"""
# do not show progress if we are not capturing output (#3038)
if self.config.getoption("capture") == "no":
return False
# do not show progress if we are showing fixture setup/teardown
if self.config.getoption("setupshow"):
return False
return self.config.getini("console_output_style") in ("progress", "count")
def hasopt(self, char):
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid, res, **markup):
fspath = self.config.rootdir.join(nodeid.split("::")[0])
if fspath != self.currentfspath:
if self.currentfspath is not None and self._show_progress_info:
self._write_progress_information_filling_space()
self.currentfspath = fspath
fspath = self.startdir.bestrelpath(fspath)
self._tw.line()
self._tw.write(fspath + " ")
self._tw.write(res, **markup)
def write_ensure_prefix(self, prefix, extra="", **kwargs):
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self):
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content, **markup):
self._tw.write(content, **markup)
def write_line(self, line, **markup):
if not isinstance(line, six.text_type):
line = six.text_type(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line, **markup):
"""
Rewinds the terminal cursor to the beginning and writes the given line.
:kwarg erase: if True, will also add spaces until the full terminal width to ensure
previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
"""
erase = markup.pop("erase", False)
if erase:
fill_count = self._tw.fullwidth - len(line) - 1
fill = " " * fill_count
else:
fill = ""
line = str(line)
self._tw.write("\r" + line + fill, **markup)
def write_sep(self, sep, title=None, **markup):
self.ensure_newline()
self._tw.sep(sep, title, **markup)
def section(self, title, sep="=", **kw):
self._tw.sep(sep, title, **kw)
def line(self, msg, **kw):
self._tw.line(msg, **kw)
def pytest_internalerror(self, excrepr):
for line in six.text_type(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return 1
def pytest_logwarning(self, fslocation, message, nodeid):
warnings = self.stats.setdefault("warnings", [])
warning = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid, legacy=True
)
warnings.append(warning)
def pytest_warning_captured(self, warning_message, item):
# from _pytest.nodes import get_fslocation_from_item
from _pytest.warnings import warning_record_to_str
warnings = self.stats.setdefault("warnings", [])
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)
nodeid = item.nodeid if item is not None else ""
warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid
)
warnings.append(warning_report)
def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
msg = "PLUGIN registered: %s" % (plugin,)
# XXX this event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line
self.write_line(msg)
def pytest_deselected(self, items):
self.stats.setdefault("deselected", []).extend(items)
def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the
# 1st test of a module starts running
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
elif self.showfspath:
fsid = nodeid.split("::")[0]
self.write_fspath_result(fsid, "")
def pytest_runtest_logreport(self, report):
rep = report
res = self.config.hook.pytest_report_teststatus(report=rep)
category, letter, word = res
if isinstance(word, tuple):
word, markup = word
else:
markup = None
self.stats.setdefault(category, []).append(rep)
self._tests_ran = True
if not letter and not word:
# probably passed setup/teardown
return
running_xdist = hasattr(rep, "node")
if markup is None:
if rep.passed:
markup = {"green": True}
elif rep.failed:
markup = {"red": True}
elif rep.skipped:
markup = {"yellow": True}
else:
markup = {}
if self.verbosity <= 0:
if not running_xdist and self.showfspath:
self.write_fspath_result(rep.nodeid, letter, **markup)
else:
self._tw.write(letter, **markup)
else:
self._progress_nodeids_reported.add(rep.nodeid)
line = self._locationline(rep.nodeid, *rep.location)
if not running_xdist:
self.write_ensure_prefix(line, word, **markup)
if self._show_progress_info:
self._write_progress_information_filling_space()
else:
self.ensure_newline()
self._tw.write("[%s]" % rep.node.gateway.id)
if self._show_progress_info:
self._tw.write(
self._get_progress_information_message() + " ", cyan=True
)
else:
self._tw.write(" ")
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def pytest_runtest_logfinish(self, nodeid):
if self.config.getini("console_output_style") == "count":
num_tests = self._session.testscollected
progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests)))
else:
progress_length = len(" [100%]")
if self.verbosity <= 0 and self._show_progress_info:
self._progress_nodeids_reported.add(nodeid)
last_item = (
len(self._progress_nodeids_reported) == self._session.testscollected
)
if last_item:
self._write_progress_information_filling_space()
else:
w = self._width_of_current_line
past_edge = w + progress_length + 1 >= self._screen_width
if past_edge:
msg = self._get_progress_information_message()
self._tw.write(msg + "\n", cyan=True)
def _get_progress_information_message(self):
if self.config.getoption("capture") == "no":
return ""
collected = self._session.testscollected
if self.config.getini("console_output_style") == "count":
if collected:
progress = self._progress_nodeids_reported
counter_format = "{{:{}d}}".format(len(str(collected)))
format_string = " [{}/{{}}]".format(counter_format)
return format_string.format(len(progress), collected)
return " [ {} / {} ]".format(collected, collected)
else:
if collected:
progress = len(self._progress_nodeids_reported) * 100 // collected
return " [{:3d}%]".format(progress)
return " [100%]"
def _write_progress_information_filling_space(self):
msg = self._get_progress_information_message()
w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1
self.write(msg.rjust(fill), cyan=True)
@property
def _width_of_current_line(self):
"""Return the width of current line, using the superior implementation of py-1.6 when available"""
try:
return self._tw.width_of_current_line
except AttributeError:
# py < 1.6.0
return self._tw.chars_on_current_line
def pytest_collection(self):
if self.isatty:
if self.config.option.verbose >= 0:
self.write("collecting ... ", bold=True)
self._collect_report_last_write = time.time()
elif self.config.option.verbose >= 1:
self.write("collecting ... ", bold=True)
def pytest_collectreport(self, report):
if report.failed:
self.stats.setdefault("error", []).append(report)
elif report.skipped:
self.stats.setdefault("skipped", []).append(report)
items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items)
if self.isatty:
self.report_collect()
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
if not final:
# Only write "collecting" report every 0.5s.
t = time.time()
if (
self._collect_report_last_write is not None
and self._collect_report_last_write > t - 0.5
):
return
self._collect_report_last_write = t
errors = len(self.stats.get("error", []))
skipped = len(self.stats.get("skipped", []))
deselected = len(self.stats.get("deselected", []))
if final:
line = "collected "
else:
line = "collecting "
line += (
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
)
if errors:
line += " / %d errors" % errors
if deselected:
line += " / %d deselected" % deselected
if skipped:
line += " / %d skipped" % skipped
if self.isatty:
self.rewrite(line, bold=True, erase=True)
if final:
self.write("\n")
else:
self.write_line(line)
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(self):
self.report_collect(True)
@pytest.hookimpl(trylast=True)
def pytest_sessionstart(self, session):
self._session = session
self._sessionstarttime = time.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
msg = "platform %s -- Python %s" % (sys.platform, verinfo)
if hasattr(sys, "pypy_version_info"):
verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
msg += ", pytest-%s, py-%s, pluggy-%s" % (
pytest.__version__,
py.__version__,
pluggy.__version__,
)
if (
self.verbosity > 0
or self.config.option.debug
or getattr(self.config.option, "pastebin", None)
):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startdir=self.startdir
)
self._write_report_lines_from_hooks(lines)
def _write_report_lines_from_hooks(self, lines):
lines.reverse()
for line in collapse(lines):
self.write_line(line)
def pytest_report_header(self, config):
inifile = ""
if config.inifile:
inifile = " " + config.rootdir.bestrelpath(config.inifile)
lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
lines.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return lines
def pytest_collection_finish(self, session):
if self.config.option.collectonly:
self._printcollecteditems(session.items)
if self.stats.get("failed"):
self._tw.sep("!", "collection failures")
for rep in self.stats.get("failed"):
rep.toterminal(self._tw)
return 1
return 0
lines = self.config.hook.pytest_report_collectionfinish(
config=self.config, startdir=self.startdir, items=session.items
)
self._write_report_lines_from_hooks(lines)
def _printcollecteditems(self, items):
# to print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = {}
for item in items:
name = item.nodeid.split("::", 1)[0]
counts[name] = counts.get(name, 0) + 1
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
nodeid = item.nodeid
nodeid = nodeid.replace("::()::", "::")
self._tw.line(nodeid)
return
stack = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[: len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack) :]:
stack.append(col)
# if col.name == "()":
# continue
indent = (len(stack) - 1) * " "
self._tw.line("%s%s" % (indent, col))
@pytest.hookimpl(hookwrapper=True)
def pytest_sessionfinish(self, exitstatus):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
EXIT_OK,
EXIT_TESTSFAILED,
EXIT_INTERRUPTED,
EXIT_USAGEERROR,
EXIT_NOTESTSCOLLECTED,
)
if exitstatus in summary_exit_codes:
self.config.hook.pytest_terminal_summary(
terminalreporter=self, exitstatus=exitstatus
)
if exitstatus == EXIT_INTERRUPTED:
self._report_keyboardinterrupt()
del self._keyboardinterrupt_memo
self.summary_stats()
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(self):
self.summary_errors()
self.summary_failures()
yield
self.summary_warnings()
self.summary_passes()
def pytest_keyboard_interrupt(self, excinfo):
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self):
if hasattr(self, "_keyboardinterrupt_memo"):
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self):
excrepr = self._keyboardinterrupt_memo
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
excrepr.reprcrash.toterminal(self._tw)
self._tw.line(
"(to show a full traceback on KeyboardInterrupt use --fulltrace)",
yellow=True,
)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[: -len(domain)]
values = domain.split("[")
values[0] = values[0].replace(".", "::") # don't replace '.' in params
line += "[".join(values)
return line
# collect_fspath comes from testid which has a "/"-normalized path
if fspath:
res = mkrel(nodeid).replace("::()", "") # parens-normalization
if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace(
"\\", nodes.SEP
):
res += " <- " + self.startdir.bestrelpath(fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
if hasattr(rep, "location"):
fspath, lineno, domain = rep.location
return domain
else:
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# summaries for sessionfinish
#
def getreports(self, name):
values = []
for x in self.stats.get(name, []):
if not hasattr(x, "_pdbshown"):
values.append(x)
return values
def summary_warnings(self):
if self.hasopt("w"):
all_warnings = self.stats.get("warnings")
if not all_warnings:
return
grouped = itertools.groupby(
all_warnings, key=lambda wr: wr.get_location(self.config)
)
self.write_sep("=", "warnings summary", yellow=True, bold=False)
for location, warning_records in grouped:
# legacy warnings show their location explicitly, while standard warnings look better without
# it because the location is already formatted into the message
warning_records = list(warning_records)
if location:
self._tw.line(str(location))
for w in warning_records:
if location:
lines = w.message.splitlines()
indented = "\n".join(" " + x for x in lines)
message = indented.rstrip()
else:
message = w.message.rstrip()
self._tw.line(message)
self._tw.line()
self._tw.line("-- Docs: https://docs.pytest.org/en/latest/warnings.html")
def summary_passes(self):
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports = self.getreports("passed")
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
if rep.sections:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
self._outrep_summary(rep)
def print_teardown_sections(self, rep):
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
if "teardown" in secname:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self):
if self.config.option.tbstyle != "no":
reports = self.getreports("failed")
if not reports:
return
self.write_sep("=", "FAILURES")
for rep in reports:
if self.config.option.tbstyle == "line":
line = self._getcrashline(rep)
self.write_line(line)
else:
msg = self._getfailureheadline(rep)
markup = {"red": True, "bold": True}
self.write_sep("_", msg, **markup)
self._outrep_summary(rep)
for report in self.getreports(""):
if report.nodeid == rep.nodeid and report.when == "teardown":
self.print_teardown_sections(report)
def summary_errors(self):
if self.config.option.tbstyle != "no":
reports = self.getreports("error")
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats["error"]:
msg = self._getfailureheadline(rep)
if not hasattr(rep, "when"):
# collect
msg = "ERROR collecting " + msg
elif rep.when == "setup":
msg = "ERROR at setup of " + msg
elif rep.when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
self._outrep_summary(rep)
def _outrep_summary(self, rep):
rep.toterminal(self._tw)
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self):
session_duration = time.time() - self._sessionstarttime
(line, color) = build_summary_stats_line(self.stats)
msg = "%s in %.2f seconds" % (line, session_duration)
markup = {color: True, "bold": True}
if self.verbosity >= 0:
self.write_sep("=", msg, **markup)
if self.verbosity == -1:
self.write_line(msg, **markup)
def repr_pythonversion(v=None):
if v is None:
v = sys.version_info
try:
return "%s.%s.%s-%s-%s" % v
except (TypeError, ValueError):
return str(v)
def build_summary_stats_line(stats):
keys = ("failed passed skipped deselected xfailed xpassed warnings error").split()
unknown_key_seen = False
for key in stats.keys():
if key not in keys:
if key: # setup/teardown reports have an empty key, ignore them
keys.append(key)
unknown_key_seen = True
parts = []
for key in keys:
val = stats.get(key, None)
if val:
parts.append("%d %s" % (len(val), key))
if parts:
line = ", ".join(parts)
else:
line = "no tests ran"
if "failed" in stats or "error" in stats:
color = "red"
elif "warnings" in stats or unknown_key_seen:
color = "yellow"
elif "passed" in stats:
color = "green"
else:
color = "yellow"
return (line, color)
def _plugin_nameversions(plugininfo):
values = []
for plugin, dist in plugininfo:
# gets us name and version!
name = "{dist.project_name}-{dist.version}".format(dist=dist)
# questionable convenience, but it keeps things short
if name.startswith("pytest-"):
name = name[7:]
# we decided to print python package names
# they can have more than one plugin
if name not in values:
values.append(name)
return values

@ -1,187 +0,0 @@
""" support for providing temporary directories to test functions. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import tempfile
import warnings
import attr
import py
import pytest
from .pathlib import ensure_reset_dir
from .pathlib import LOCK_TIMEOUT
from .pathlib import make_numbered_dir
from .pathlib import make_numbered_dir_with_cleanup
from .pathlib import Path
from _pytest.monkeypatch import MonkeyPatch
@attr.s
class TempPathFactory(object):
"""Factory for temporary directories under the common base temp directory.
The base directory can be configured using the ``--basetemp`` option."""
_given_basetemp = attr.ib()
_trace = attr.ib()
_basetemp = attr.ib(default=None)
@classmethod
def from_config(cls, config):
"""
:param config: a pytest configuration
"""
return cls(
given_basetemp=config.option.basetemp, trace=config.trace.get("tmpdir")
)
def mktemp(self, basename, numbered=True):
"""makes a temporary directory managed by the factory"""
if not numbered:
p = self.getbasetemp().joinpath(basename)
p.mkdir()
else:
p = make_numbered_dir(root=self.getbasetemp(), prefix=basename)
self._trace("mktemp", p)
return p
def getbasetemp(self):
""" return base temporary directory. """
if self._basetemp is None:
if self._given_basetemp is not None:
basetemp = Path(self._given_basetemp)
ensure_reset_dir(basetemp)
else:
from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT")
temproot = Path(from_env or tempfile.gettempdir())
user = get_user() or "unknown"
# use a sub-directory in the temproot to speed-up
# make_numbered_dir() call
rootdir = temproot.joinpath("pytest-of-{}".format(user))
rootdir.mkdir(exist_ok=True)
basetemp = make_numbered_dir_with_cleanup(
prefix="pytest-", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT
)
assert basetemp is not None
self._basetemp = t = basetemp
self._trace("new basetemp", t)
return t
else:
return self._basetemp
@attr.s
class TempdirFactory(object):
"""
backward comptibility wrapper that implements
:class:``py.path.local`` for :class:``TempPathFactory``
"""
_tmppath_factory = attr.ib()
def ensuretemp(self, string, dir=1):
""" (deprecated) return temporary directory path with
the given string as the trailing part. It is usually
better to use the 'tmpdir' function argument which
provides an empty unique-per-test-invocation directory
and is guaranteed to be empty.
"""
# py.log._apiwarn(">1.1", "use tmpdir function argument")
from .deprecated import PYTEST_ENSURETEMP
warnings.warn(PYTEST_ENSURETEMP, stacklevel=2)
return self.getbasetemp().ensure(string, dir=dir)
def mktemp(self, basename, numbered=True):
"""Create a subdirectory of the base temporary directory and return it.
If ``numbered``, ensure the directory is unique by adding a number
prefix greater than any existing one.
"""
return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve())
def getbasetemp(self):
"""backward compat wrapper for ``_tmppath_factory.getbasetemp``"""
return py.path.local(self._tmppath_factory.getbasetemp().resolve())
def get_user():
"""Return the current user name, or None if getuser() does not work
in the current environment (see #1010).
"""
import getpass
try:
return getpass.getuser()
except (ImportError, KeyError):
return None
def pytest_configure(config):
"""Create a TempdirFactory and attach it to the config object.
This is to comply with existing plugins which expect the handler to be
available at pytest_configure time, but ideally should be moved entirely
to the tmpdir_factory session fixture.
"""
mp = MonkeyPatch()
tmppath_handler = TempPathFactory.from_config(config)
t = TempdirFactory(tmppath_handler)
config._cleanup.append(mp.undo)
mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False)
mp.setattr(config, "_tmpdirhandler", t, raising=False)
mp.setattr(pytest, "ensuretemp", t.ensuretemp, raising=False)
@pytest.fixture(scope="session")
def tmpdir_factory(request):
"""Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session.
"""
return request.config._tmpdirhandler
@pytest.fixture(scope="session")
def tmp_path_factory(request):
"""Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session.
"""
return request.config._tmp_path_factory
def _mk_tmp(request, factory):
name = request.node.name
name = re.sub(r"[\W]", "_", name)
MAXVAL = 30
name = name[:MAXVAL]
return factory.mktemp(name, numbered=True)
@pytest.fixture
def tmpdir(request, tmpdir_factory):
"""Return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a `py.path.local`_
path object.
.. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html
"""
return _mk_tmp(request, tmpdir_factory)
@pytest.fixture
def tmp_path(request, tmp_path_factory):
"""Return a temporary directory path object
which is unique to each test function invocation,
created as a sub directory of the base temporary
directory. The returned object is a :class:`pathlib.Path`
object.
.. note::
in python < 3.6 this is a pathlib2.Path
"""
return _mk_tmp(request, tmp_path_factory)

@ -1,259 +0,0 @@
""" discovery and running of std-library "unittest" style tests. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import traceback
import _pytest._code
from _pytest.compat import getimfunc
from _pytest.config import hookimpl
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
from _pytest.python import Class
from _pytest.python import Function
from _pytest.python import Module
from _pytest.python import transfer_markers
def pytest_pycollect_makeitem(collector, name, obj):
# has unittest been imported and is obj a subclass of its TestCase?
try:
if not issubclass(obj, sys.modules["unittest"].TestCase):
return
except Exception:
return
# yes, so let's collect it
return UnitTestCase(name, parent=collector)
class UnitTestCase(Class):
# marker for fixturemanger.getfixtureinfo()
# to declare that our children do not support funcargs
nofuncargs = True
def setup(self):
cls = self.obj
if getattr(cls, "__unittest_skip__", False):
return # skipped
setup = getattr(cls, "setUpClass", None)
if setup is not None:
setup()
teardown = getattr(cls, "tearDownClass", None)
if teardown is not None:
self.addfinalizer(teardown)
super(UnitTestCase, self).setup()
def collect(self):
from unittest import TestLoader
cls = self.obj
if not getattr(cls, "__test__", True):
return
self.session._fixturemanager.parsefactories(self, unittest=True)
loader = TestLoader()
module = self.getparent(Module).obj
foundsomething = False
for name in loader.getTestCaseNames(self.obj):
x = getattr(self.obj, name)
if not getattr(x, "__test__", True):
continue
funcobj = getimfunc(x)
transfer_markers(funcobj, cls, module)
yield TestCaseFunction(name, parent=self, callobj=funcobj)
foundsomething = True
if not foundsomething:
runtest = getattr(self.obj, "runTest", None)
if runtest is not None:
ut = sys.modules.get("twisted.trial.unittest", None)
if ut is None or runtest != ut.TestCase.runTest:
yield TestCaseFunction("runTest", parent=self)
class TestCaseFunction(Function):
nofuncargs = True
_excinfo = None
_testcase = None
def setup(self):
self._testcase = self.parent.obj(self.name)
self._fix_unittest_skip_decorator()
self._obj = getattr(self._testcase, self.name)
if hasattr(self._testcase, "setup_method"):
self._testcase.setup_method(self._obj)
if hasattr(self, "_request"):
self._request._fillfixtures()
def _fix_unittest_skip_decorator(self):
"""
The @unittest.skip decorator calls functools.wraps(self._testcase)
The call to functools.wraps() fails unless self._testcase
has a __name__ attribute. This is usually automatically supplied
if the test is a function or method, but we need to add manually
here.
See issue #1169
"""
if sys.version_info[0] == 2:
setattr(self._testcase, "__name__", self.name)
def teardown(self):
if hasattr(self._testcase, "teardown_method"):
self._testcase.teardown_method(self._obj)
# Allow garbage collection on TestCase instance attributes.
self._testcase = None
self._obj = None
def startTest(self, testcase):
pass
def _addexcinfo(self, rawexcinfo):
# unwrap potential exception info (see twisted trial support below)
rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo)
try:
excinfo = _pytest._code.ExceptionInfo(rawexcinfo)
except TypeError:
try:
try:
values = traceback.format_exception(*rawexcinfo)
values.insert(
0,
"NOTE: Incompatible Exception Representation, "
"displaying natively:\n\n",
)
fail("".join(values), pytrace=False)
except (fail.Exception, KeyboardInterrupt):
raise
except: # noqa
fail(
"ERROR: Unknown Incompatible Exception "
"representation:\n%r" % (rawexcinfo,),
pytrace=False,
)
except KeyboardInterrupt:
raise
except fail.Exception:
excinfo = _pytest._code.ExceptionInfo()
self.__dict__.setdefault("_excinfo", []).append(excinfo)
def addError(self, testcase, rawexcinfo):
self._addexcinfo(rawexcinfo)
def addFailure(self, testcase, rawexcinfo):
self._addexcinfo(rawexcinfo)
def addSkip(self, testcase, reason):
try:
skip(reason)
except skip.Exception:
self._skipped_by_mark = True
self._addexcinfo(sys.exc_info())
def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
try:
xfail(str(reason))
except xfail.Exception:
self._addexcinfo(sys.exc_info())
def addUnexpectedSuccess(self, testcase, reason=""):
self._unexpectedsuccess = reason
def addSuccess(self, testcase):
pass
def stopTest(self, testcase):
pass
def _handle_skip(self):
# implements the skipping machinery (see #2137)
# analog to pythons Lib/unittest/case.py:run
testMethod = getattr(self._testcase, self._testcase._testMethodName)
if getattr(self._testcase.__class__, "__unittest_skip__", False) or getattr(
testMethod, "__unittest_skip__", False
):
# If the class or method was skipped.
skip_why = getattr(
self._testcase.__class__, "__unittest_skip_why__", ""
) or getattr(testMethod, "__unittest_skip_why__", "")
try: # PY3, unittest2 on PY2
self._testcase._addSkip(self, self._testcase, skip_why)
except TypeError: # PY2
if sys.version_info[0] != 2:
raise
self._testcase._addSkip(self, skip_why)
return True
return False
def runtest(self):
if self.config.pluginmanager.get_plugin("pdbinvoke") is None:
self._testcase(result=self)
else:
# disables tearDown and cleanups for post mortem debugging (see #1890)
if self._handle_skip():
return
self._testcase.debug()
def _prunetraceback(self, excinfo):
Function._prunetraceback(self, excinfo)
traceback = excinfo.traceback.filter(
lambda x: not x.frame.f_globals.get("__unittest")
)
if traceback:
excinfo.traceback = traceback
@hookimpl(tryfirst=True)
def pytest_runtest_makereport(item, call):
if isinstance(item, TestCaseFunction):
if item._excinfo:
call.excinfo = item._excinfo.pop(0)
try:
del call.result
except AttributeError:
pass
# twisted trial support
@hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item):
if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules:
ut = sys.modules["twisted.python.failure"]
Failure__init__ = ut.Failure.__init__
check_testcase_implements_trial_reporter()
def excstore(
self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None
):
if exc_value is None:
self._rawexcinfo = sys.exc_info()
else:
if exc_type is None:
exc_type = type(exc_value)
self._rawexcinfo = (exc_type, exc_value, exc_tb)
try:
Failure__init__(
self, exc_value, exc_type, exc_tb, captureVars=captureVars
)
except TypeError:
Failure__init__(self, exc_value, exc_type, exc_tb)
ut.Failure.__init__ = excstore
yield
ut.Failure.__init__ = Failure__init__
else:
yield
def check_testcase_implements_trial_reporter(done=[]):
if done:
return
from zope.interface import classImplements
from twisted.trial.itrial import IReporter
classImplements(TestCaseFunction, IReporter)
done.append(1)

@ -1,60 +0,0 @@
import attr
class PytestWarning(UserWarning):
"""
Bases: :class:`UserWarning`.
Base class for all warnings emitted by pytest.
"""
class PytestDeprecationWarning(PytestWarning, DeprecationWarning):
"""
Bases: :class:`pytest.PytestWarning`, :class:`DeprecationWarning`.
Warning class for features that will be removed in a future version.
"""
class RemovedInPytest4Warning(PytestDeprecationWarning):
"""
Bases: :class:`pytest.PytestDeprecationWarning`.
Warning class for features scheduled to be removed in pytest 4.0.
"""
class PytestExperimentalApiWarning(PytestWarning, FutureWarning):
"""
Bases: :class:`pytest.PytestWarning`, :class:`FutureWarning`.
Warning category used to denote experiments in pytest. Use sparingly as the API might change or even be
removed completely in future version
"""
@classmethod
def simple(cls, apiname):
return cls(
"{apiname} is an experimental api that may change over time".format(
apiname=apiname
)
)
@attr.s
class UnformattedWarning(object):
"""Used to hold warnings that need to format their message at runtime, as opposed to a direct message.
Using this class avoids to keep all the warning types and messages in this module, avoiding misuse.
"""
category = attr.ib()
template = attr.ib()
def format(self, **kwargs):
"""Returns an instance of the warning category, formatted with given kwargs"""
return self.category(self.template.format(**kwargs))
PYTESTER_COPY_EXAMPLE = PytestExperimentalApiWarning.simple("testdir.copy_example")

@ -1,173 +0,0 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import warnings
from contextlib import contextmanager
import pytest
from _pytest import compat
def _setoption(wmod, arg):
"""
Copy of the warning._setoption function but does not escape arguments.
"""
parts = arg.split(":")
if len(parts) > 5:
raise wmod._OptionError("too many fields (max 5): %r" % (arg,))
while len(parts) < 5:
parts.append("")
action, message, category, module, lineno = [s.strip() for s in parts]
action = wmod._getaction(action)
category = wmod._getcategory(category)
if lineno:
try:
lineno = int(lineno)
if lineno < 0:
raise ValueError
except (ValueError, OverflowError):
raise wmod._OptionError("invalid lineno %r" % (lineno,))
else:
lineno = 0
wmod.filterwarnings(action, message, category, module, lineno)
def pytest_addoption(parser):
group = parser.getgroup("pytest-warnings")
group.addoption(
"-W",
"--pythonwarnings",
action="append",
help="set which warnings to report, see -W option of python itself.",
)
parser.addini(
"filterwarnings",
type="linelist",
help="Each line specifies a pattern for "
"warnings.filterwarnings. "
"Processed after -W and --pythonwarnings.",
)
def pytest_configure(config):
config.addinivalue_line(
"markers",
"filterwarnings(warning): add a warning filter to the given test. "
"see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings ",
)
@contextmanager
def catch_warnings_for_item(config, ihook, when, item):
"""
Context manager that catches warnings generated in the contained execution block.
``item`` can be None if we are not in the context of an item execution.
Each warning captured triggers the ``pytest_warning_captured`` hook.
"""
cmdline_filters = config.getoption("pythonwarnings") or []
inifilters = config.getini("filterwarnings")
with warnings.catch_warnings(record=True) as log:
if not sys.warnoptions:
# if user is not explicitly configuring warning filters, show deprecation warnings by default (#2908)
warnings.filterwarnings("always", category=DeprecationWarning)
warnings.filterwarnings("always", category=PendingDeprecationWarning)
# filters should have this precedence: mark, cmdline options, ini
# filters should be applied in the inverse order of precedence
for arg in inifilters:
_setoption(warnings, arg)
for arg in cmdline_filters:
warnings._setoption(arg)
if item is not None:
for mark in item.iter_markers(name="filterwarnings"):
for arg in mark.args:
_setoption(warnings, arg)
yield
for warning_message in log:
ihook.pytest_warning_captured.call_historic(
kwargs=dict(warning_message=warning_message, when=when, item=item)
)
def warning_record_to_str(warning_message):
"""Convert a warnings.WarningMessage to a string, taking in account a lot of unicode shenaningans in Python 2.
When Python 2 support is dropped this function can be greatly simplified.
"""
warn_msg = warning_message.message
unicode_warning = False
if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args):
new_args = []
for m in warn_msg.args:
new_args.append(
compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m
)
unicode_warning = list(warn_msg.args) != new_args
warn_msg.args = new_args
msg = warnings.formatwarning(
warn_msg,
warning_message.category,
warning_message.filename,
warning_message.lineno,
warning_message.line,
)
if unicode_warning:
warnings.warn(
"Warning is using unicode non convertible to ascii, "
"converting to a safe representation:\n {!r}".format(compat.safe_str(msg)),
UnicodeWarning,
)
return msg
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_protocol(item):
with catch_warnings_for_item(
config=item.config, ihook=item.ihook, when="runtest", item=item
):
yield
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_collection(session):
config = session.config
with catch_warnings_for_item(
config=config, ihook=config.hook, when="collect", item=None
):
yield
@pytest.hookimpl(hookwrapper=True)
def pytest_terminal_summary(terminalreporter):
config = terminalreporter.config
with catch_warnings_for_item(
config=config, ihook=config.hook, when="config", item=None
):
yield
def _issue_config_warning(warning, config):
"""
This function should be used instead of calling ``warnings.warn`` directly when we are in the "configure" stage:
at this point the actual options might not have been set, so we manually trigger the pytest_warning_captured
hook so we can display this warnings in the terminal. This is a hack until we can sort out #2891.
:param warning: the warning instance.
:param config:
"""
with warnings.catch_warnings(record=True) as records:
warnings.simplefilter("always", type(warning))
warnings.warn(warning, stacklevel=2)
config.hook.pytest_warning_captured.call_historic(
kwargs=dict(warning_message=records[0], when="config", item=None)
)

@ -1,227 +0,0 @@
.. image:: https://secure.travis-ci.org/ActiveState/appdirs.png
:target: http://travis-ci.org/ActiveState/appdirs
the problem
===========
What directory should your app use for storing user data? If running on Mac OS X, you
should use::
~/Library/Application Support/<AppName>
If on Windows (at least English Win XP) that should be::
C:\Documents and Settings\<User>\Application Data\Local Settings\<AppAuthor>\<AppName>
or possibly::
C:\Documents and Settings\<User>\Application Data\<AppAuthor>\<AppName>
for `roaming profiles <http://bit.ly/9yl3b6>`_ but that is another story.
On Linux (and other Unices) the dir, according to the `XDG
spec <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_, is::
~/.local/share/<AppName>
``appdirs`` to the rescue
=========================
This kind of thing is what the ``appdirs`` module is for. ``appdirs`` will
help you choose an appropriate:
- user data dir (``user_data_dir``)
- user config dir (``user_config_dir``)
- user cache dir (``user_cache_dir``)
- site data dir (``site_data_dir``)
- site config dir (``site_config_dir``)
- user log dir (``user_log_dir``)
and also:
- is a single module so other Python packages can include their own private copy
- is slightly opinionated on the directory names used. Look for "OPINION" in
documentation and code for when an opinion is being applied.
some example output
===================
On Mac OS X::
>>> from appdirs import *
>>> appname = "SuperApp"
>>> appauthor = "Acme"
>>> user_data_dir(appname, appauthor)
'/Users/trentm/Library/Application Support/SuperApp'
>>> site_data_dir(appname, appauthor)
'/Library/Application Support/SuperApp'
>>> user_cache_dir(appname, appauthor)
'/Users/trentm/Library/Caches/SuperApp'
>>> user_log_dir(appname, appauthor)
'/Users/trentm/Library/Logs/SuperApp'
On Windows 7::
>>> from appdirs import *
>>> appname = "SuperApp"
>>> appauthor = "Acme"
>>> user_data_dir(appname, appauthor)
'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp'
>>> user_data_dir(appname, appauthor, roaming=True)
'C:\\Users\\trentm\\AppData\\Roaming\\Acme\\SuperApp'
>>> user_cache_dir(appname, appauthor)
'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Cache'
>>> user_log_dir(appname, appauthor)
'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Logs'
On Linux::
>>> from appdirs import *
>>> appname = "SuperApp"
>>> appauthor = "Acme"
>>> user_data_dir(appname, appauthor)
'/home/trentm/.local/share/SuperApp
>>> site_data_dir(appname, appauthor)
'/usr/local/share/SuperApp'
>>> site_data_dir(appname, appauthor, multipath=True)
'/usr/local/share/SuperApp:/usr/share/SuperApp'
>>> user_cache_dir(appname, appauthor)
'/home/trentm/.cache/SuperApp'
>>> user_log_dir(appname, appauthor)
'/home/trentm/.cache/SuperApp/log'
>>> user_config_dir(appname)
'/home/trentm/.config/SuperApp'
>>> site_config_dir(appname)
'/etc/xdg/SuperApp'
>>> os.environ['XDG_CONFIG_DIRS'] = '/etc:/usr/local/etc'
>>> site_config_dir(appname, multipath=True)
'/etc/SuperApp:/usr/local/etc/SuperApp'
``AppDirs`` for convenience
===========================
::
>>> from appdirs import AppDirs
>>> dirs = AppDirs("SuperApp", "Acme")
>>> dirs.user_data_dir
'/Users/trentm/Library/Application Support/SuperApp'
>>> dirs.site_data_dir
'/Library/Application Support/SuperApp'
>>> dirs.user_cache_dir
'/Users/trentm/Library/Caches/SuperApp'
>>> dirs.user_log_dir
'/Users/trentm/Library/Logs/SuperApp'
Per-version isolation
=====================
If you have multiple versions of your app in use that you want to be
able to run side-by-side, then you may want version-isolation for these
dirs::
>>> from appdirs import AppDirs
>>> dirs = AppDirs("SuperApp", "Acme", version="1.0")
>>> dirs.user_data_dir
'/Users/trentm/Library/Application Support/SuperApp/1.0'
>>> dirs.site_data_dir
'/Library/Application Support/SuperApp/1.0'
>>> dirs.user_cache_dir
'/Users/trentm/Library/Caches/SuperApp/1.0'
>>> dirs.user_log_dir
'/Users/trentm/Library/Logs/SuperApp/1.0'
appdirs Changelog
=================
appdirs 1.4.3
-------------
- [PR #76] Python 3.6 invalid escape sequence deprecation fixes
- Fix for Python 3.6 support
appdirs 1.4.2
-------------
- [PR #84] Allow installing without setuptools
- [PR #86] Fix string delimiters in setup.py description
- Add Python 3.6 support
appdirs 1.4.1
-------------
- [issue #38] Fix _winreg import on Windows Py3
- [issue #55] Make appname optional
appdirs 1.4.0
-------------
- [PR #42] AppAuthor is now optional on Windows
- [issue 41] Support Jython on Windows, Mac, and Unix-like platforms. Windows
support requires `JNA <https://github.com/twall/jna>`_.
- [PR #44] Fix incorrect behaviour of the site_config_dir method
appdirs 1.3.0
-------------
- [Unix, issue 16] Conform to XDG standard, instead of breaking it for
everybody
- [Unix] Removes gratuitous case mangling of the case, since \*nix-es are
usually case sensitive, so mangling is not wise
- [Unix] Fixes the utterly wrong behaviour in ``site_data_dir``, return result
based on XDG_DATA_DIRS and make room for respecting the standard which
specifies XDG_DATA_DIRS is a multiple-value variable
- [Issue 6] Add ``*_config_dir`` which are distinct on nix-es, according to
XDG specs; on Windows and Mac return the corresponding ``*_data_dir``
appdirs 1.2.0
-------------
- [Unix] Put ``user_log_dir`` under the *cache* dir on Unix. Seems to be more
typical.
- [issue 9] Make ``unicode`` work on py3k.
appdirs 1.1.0
-------------
- [issue 4] Add ``AppDirs.user_log_dir``.
- [Unix, issue 2, issue 7] appdirs now conforms to `XDG base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
- [Mac, issue 5] Fix ``site_data_dir()`` on Mac.
- [Mac] Drop use of 'Carbon' module in favour of hardcoded paths; supports
Python3 now.
- [Windows] Append "Cache" to ``user_cache_dir`` on Windows by default. Use
``opinion=False`` option to disable this.
- Add ``appdirs.AppDirs`` convenience class. Usage:
>>> dirs = AppDirs("SuperApp", "Acme", version="1.0")
>>> dirs.user_data_dir
'/Users/trentm/Library/Application Support/SuperApp/1.0'
- [Windows] Cherry-pick Komodo's change to downgrade paths to the Windows short
paths if there are high bit chars.
- [Linux] Change default ``user_cache_dir()`` on Linux to be singular, e.g.
"~/.superapp/cache".
- [Windows] Add ``roaming`` option to ``user_data_dir()`` (for use on Windows only)
and change the default ``user_data_dir`` behaviour to use a *non*-roaming
profile dir (``CSIDL_LOCAL_APPDATA`` instead of ``CSIDL_APPDATA``). Why? Because
a large roaming profile can cause login speed issues. The "only syncs on
logout" behaviour can cause surprises in appdata info.
appdirs 1.0.1 (never released)
------------------------------
Started this changelog 27 July 2010. Before that this module originated in the
`Komodo <http://www.activestate.com/komodo>`_ product as ``applib.py`` and then
as `applib/location.py
<http://github.com/ActiveState/applib/blob/master/applib/location.py>`_ (used by
`PyPM <http://code.activestate.com/pypm/>`_ in `ActivePython
<http://www.activestate.com/activepython>`_). This is basically a fork of
applib.py 1.0.1 and applib/location.py 1.0.1.

@ -1,254 +0,0 @@
Metadata-Version: 2.0
Name: appdirs
Version: 1.4.3
Summary: A small Python module for determining appropriate platform-specific dirs, e.g. a "user data dir".
Home-page: http://github.com/ActiveState/appdirs
Author: Trent Mick; Sridhar Ratnakumar; Jeff Rouse
Author-email: trentm@gmail.com; github@srid.name; jr@its.to
License: MIT
Keywords: application directory log cache user
Platform: UNKNOWN
Classifier: Development Status :: 4 - Beta
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.2
Classifier: Programming Language :: Python :: 3.3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Topic :: Software Development :: Libraries :: Python Modules
.. image:: https://secure.travis-ci.org/ActiveState/appdirs.png
:target: http://travis-ci.org/ActiveState/appdirs
the problem
===========
What directory should your app use for storing user data? If running on Mac OS X, you
should use::
~/Library/Application Support/<AppName>
If on Windows (at least English Win XP) that should be::
C:\Documents and Settings\<User>\Application Data\Local Settings\<AppAuthor>\<AppName>
or possibly::
C:\Documents and Settings\<User>\Application Data\<AppAuthor>\<AppName>
for `roaming profiles <http://bit.ly/9yl3b6>`_ but that is another story.
On Linux (and other Unices) the dir, according to the `XDG
spec <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_, is::
~/.local/share/<AppName>
``appdirs`` to the rescue
=========================
This kind of thing is what the ``appdirs`` module is for. ``appdirs`` will
help you choose an appropriate:
- user data dir (``user_data_dir``)
- user config dir (``user_config_dir``)
- user cache dir (``user_cache_dir``)
- site data dir (``site_data_dir``)
- site config dir (``site_config_dir``)
- user log dir (``user_log_dir``)
and also:
- is a single module so other Python packages can include their own private copy
- is slightly opinionated on the directory names used. Look for "OPINION" in
documentation and code for when an opinion is being applied.
some example output
===================
On Mac OS X::
>>> from appdirs import *
>>> appname = "SuperApp"
>>> appauthor = "Acme"
>>> user_data_dir(appname, appauthor)
'/Users/trentm/Library/Application Support/SuperApp'
>>> site_data_dir(appname, appauthor)
'/Library/Application Support/SuperApp'
>>> user_cache_dir(appname, appauthor)
'/Users/trentm/Library/Caches/SuperApp'
>>> user_log_dir(appname, appauthor)
'/Users/trentm/Library/Logs/SuperApp'
On Windows 7::
>>> from appdirs import *
>>> appname = "SuperApp"
>>> appauthor = "Acme"
>>> user_data_dir(appname, appauthor)
'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp'
>>> user_data_dir(appname, appauthor, roaming=True)
'C:\\Users\\trentm\\AppData\\Roaming\\Acme\\SuperApp'
>>> user_cache_dir(appname, appauthor)
'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Cache'
>>> user_log_dir(appname, appauthor)
'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Logs'
On Linux::
>>> from appdirs import *
>>> appname = "SuperApp"
>>> appauthor = "Acme"
>>> user_data_dir(appname, appauthor)
'/home/trentm/.local/share/SuperApp
>>> site_data_dir(appname, appauthor)
'/usr/local/share/SuperApp'
>>> site_data_dir(appname, appauthor, multipath=True)
'/usr/local/share/SuperApp:/usr/share/SuperApp'
>>> user_cache_dir(appname, appauthor)
'/home/trentm/.cache/SuperApp'
>>> user_log_dir(appname, appauthor)
'/home/trentm/.cache/SuperApp/log'
>>> user_config_dir(appname)
'/home/trentm/.config/SuperApp'
>>> site_config_dir(appname)
'/etc/xdg/SuperApp'
>>> os.environ['XDG_CONFIG_DIRS'] = '/etc:/usr/local/etc'
>>> site_config_dir(appname, multipath=True)
'/etc/SuperApp:/usr/local/etc/SuperApp'
``AppDirs`` for convenience
===========================
::
>>> from appdirs import AppDirs
>>> dirs = AppDirs("SuperApp", "Acme")
>>> dirs.user_data_dir
'/Users/trentm/Library/Application Support/SuperApp'
>>> dirs.site_data_dir
'/Library/Application Support/SuperApp'
>>> dirs.user_cache_dir
'/Users/trentm/Library/Caches/SuperApp'
>>> dirs.user_log_dir
'/Users/trentm/Library/Logs/SuperApp'
Per-version isolation
=====================
If you have multiple versions of your app in use that you want to be
able to run side-by-side, then you may want version-isolation for these
dirs::
>>> from appdirs import AppDirs
>>> dirs = AppDirs("SuperApp", "Acme", version="1.0")
>>> dirs.user_data_dir
'/Users/trentm/Library/Application Support/SuperApp/1.0'
>>> dirs.site_data_dir
'/Library/Application Support/SuperApp/1.0'
>>> dirs.user_cache_dir
'/Users/trentm/Library/Caches/SuperApp/1.0'
>>> dirs.user_log_dir
'/Users/trentm/Library/Logs/SuperApp/1.0'
appdirs Changelog
=================
appdirs 1.4.3
-------------
- [PR #76] Python 3.6 invalid escape sequence deprecation fixes
- Fix for Python 3.6 support
appdirs 1.4.2
-------------
- [PR #84] Allow installing without setuptools
- [PR #86] Fix string delimiters in setup.py description
- Add Python 3.6 support
appdirs 1.4.1
-------------
- [issue #38] Fix _winreg import on Windows Py3
- [issue #55] Make appname optional
appdirs 1.4.0
-------------
- [PR #42] AppAuthor is now optional on Windows
- [issue 41] Support Jython on Windows, Mac, and Unix-like platforms. Windows
support requires `JNA <https://github.com/twall/jna>`_.
- [PR #44] Fix incorrect behaviour of the site_config_dir method
appdirs 1.3.0
-------------
- [Unix, issue 16] Conform to XDG standard, instead of breaking it for
everybody
- [Unix] Removes gratuitous case mangling of the case, since \*nix-es are
usually case sensitive, so mangling is not wise
- [Unix] Fixes the utterly wrong behaviour in ``site_data_dir``, return result
based on XDG_DATA_DIRS and make room for respecting the standard which
specifies XDG_DATA_DIRS is a multiple-value variable
- [Issue 6] Add ``*_config_dir`` which are distinct on nix-es, according to
XDG specs; on Windows and Mac return the corresponding ``*_data_dir``
appdirs 1.2.0
-------------
- [Unix] Put ``user_log_dir`` under the *cache* dir on Unix. Seems to be more
typical.
- [issue 9] Make ``unicode`` work on py3k.
appdirs 1.1.0
-------------
- [issue 4] Add ``AppDirs.user_log_dir``.
- [Unix, issue 2, issue 7] appdirs now conforms to `XDG base directory spec
<http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
- [Mac, issue 5] Fix ``site_data_dir()`` on Mac.
- [Mac] Drop use of 'Carbon' module in favour of hardcoded paths; supports
Python3 now.
- [Windows] Append "Cache" to ``user_cache_dir`` on Windows by default. Use
``opinion=False`` option to disable this.
- Add ``appdirs.AppDirs`` convenience class. Usage:
>>> dirs = AppDirs("SuperApp", "Acme", version="1.0")
>>> dirs.user_data_dir
'/Users/trentm/Library/Application Support/SuperApp/1.0'
- [Windows] Cherry-pick Komodo's change to downgrade paths to the Windows short
paths if there are high bit chars.
- [Linux] Change default ``user_cache_dir()`` on Linux to be singular, e.g.
"~/.superapp/cache".
- [Windows] Add ``roaming`` option to ``user_data_dir()`` (for use on Windows only)
and change the default ``user_data_dir`` behaviour to use a *non*-roaming
profile dir (``CSIDL_LOCAL_APPDATA`` instead of ``CSIDL_APPDATA``). Why? Because
a large roaming profile can cause login speed issues. The "only syncs on
logout" behaviour can cause surprises in appdata info.
appdirs 1.0.1 (never released)
------------------------------
Started this changelog 27 July 2010. Before that this module originated in the
`Komodo <http://www.activestate.com/komodo>`_ product as ``applib.py`` and then
as `applib/location.py
<http://github.com/ActiveState/applib/blob/master/applib/location.py>`_ (used by
`PyPM <http://code.activestate.com/pypm/>`_ in `ActivePython
<http://www.activestate.com/activepython>`_). This is basically a fork of
applib.py 1.0.1 and applib/location.py 1.0.1.

@ -1,9 +0,0 @@
__pycache__/appdirs.cpython-38.pyc,,
appdirs-1.4.3.dist-info/DESCRIPTION.rst,sha256=77Fe8OIOLSjDSNdLiL5xywMKO-AGE42rdXkqKo4Ee-k,7531
appdirs-1.4.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
appdirs-1.4.3.dist-info/METADATA,sha256=3IFw6jTfImdOqsCb2GYvVR157tL7KEzfRAszn382csk,8773
appdirs-1.4.3.dist-info/RECORD,,
appdirs-1.4.3.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110
appdirs-1.4.3.dist-info/metadata.json,sha256=fL_Q-GuFJu3PJxMrwU7SdsI8RGqjIfi2AvouCSF5DSA,1359
appdirs-1.4.3.dist-info/top_level.txt,sha256=nKncE8CUqZERJ6VuQWL4_bkunSPDNfn7KZqb4Tr5YEM,8
appdirs.py,sha256=MievUEuv3l_mQISH5SF0shDk_BNhHHzYiAPrT3ITN4I,24701

@ -1,6 +0,0 @@
Wheel-Version: 1.0
Generator: bdist_wheel (0.29.0)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any

@ -1 +0,0 @@
{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.details": {"contacts": [{"email": "trentm@gmail.com; github@srid.name; jr@its.to", "name": "Trent Mick; Sridhar Ratnakumar; Jeff Rouse", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/ActiveState/appdirs"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["application", "directory", "log", "cache", "user"], "license": "MIT", "metadata_version": "2.0", "name": "appdirs", "summary": "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\".", "test_requires": [{"requires": []}], "version": "1.4.3"}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save