diff --git a/venv/bin/Activate.ps1 b/venv/bin/Activate.ps1 deleted file mode 100644 index 900f50e8..00000000 --- a/venv/bin/Activate.ps1 +++ /dev/null @@ -1,230 +0,0 @@ -<# -.Synopsis -Activate a Python virtual environment for the current Powershell session. - -.Description -Pushes the python executable for a virtual environment to the front of the -$Env:PATH environment variable and sets the prompt to signify that you are -in a Python virtual environment. Makes use of the command line switches as -well as the `pyvenv.cfg` file values present in the virtual environment. - -.Parameter VenvDir -Path to the directory that contains the virtual environment to activate. The -default value for this is the parent of the directory that the Activate.ps1 -script is located within. - -.Parameter Prompt -The prompt prefix to display when this virtual environment is activated. By -default, this prompt is the name of the virtual environment folder (VenvDir) -surrounded by parentheses and followed by a single space (ie. '(.venv) '). - -.Example -Activate.ps1 -Activates the Python virtual environment that contains the Activate.ps1 script. - -.Example -Activate.ps1 -Verbose -Activates the Python virtual environment that contains the Activate.ps1 script, -and shows extra information about the activation as it executes. - -.Example -Activate.ps1 -VenvDir C:\Users\MyUser\Common\.venv -Activates the Python virtual environment located in the specified location. - -.Example -Activate.ps1 -Prompt "MyPython" -Activates the Python virtual environment that contains the Activate.ps1 script, -and prefixes the current prompt with the specified string (surrounded in -parentheses) while the virtual environment is active. - - -#> -Param( - [Parameter(Mandatory = $false)] - [String] - $VenvDir, - [Parameter(Mandatory = $false)] - [String] - $Prompt -) - -<# Function declarations --------------------------------------------------- #> - -<# -.Synopsis -Remove all shell session elements added by the Activate script, including the -addition of the virtual environment's Python executable from the beginning of -the PATH variable. - -.Parameter NonDestructive -If present, do not remove this function from the global namespace for the -session. - -#> -function global:deactivate ([switch]$NonDestructive) { - # Revert to original values - - # The prior prompt: - if (Test-Path -Path Function:_OLD_VIRTUAL_PROMPT) { - Copy-Item -Path Function:_OLD_VIRTUAL_PROMPT -Destination Function:prompt - Remove-Item -Path Function:_OLD_VIRTUAL_PROMPT - } - - # The prior PYTHONHOME: - if (Test-Path -Path Env:_OLD_VIRTUAL_PYTHONHOME) { - Copy-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME -Destination Env:PYTHONHOME - Remove-Item -Path Env:_OLD_VIRTUAL_PYTHONHOME - } - - # The prior PATH: - if (Test-Path -Path Env:_OLD_VIRTUAL_PATH) { - Copy-Item -Path Env:_OLD_VIRTUAL_PATH -Destination Env:PATH - Remove-Item -Path Env:_OLD_VIRTUAL_PATH - } - - # Just remove the VIRTUAL_ENV altogether: - if (Test-Path -Path Env:VIRTUAL_ENV) { - Remove-Item -Path env:VIRTUAL_ENV - } - - # Just remove the _PYTHON_VENV_PROMPT_PREFIX altogether: - if (Get-Variable -Name "_PYTHON_VENV_PROMPT_PREFIX" -ErrorAction SilentlyContinue) { - Remove-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Scope Global -Force - } - - # Leave deactivate function in the global namespace if requested: - if (-not $NonDestructive) { - Remove-Item -Path function:deactivate - } -} - -<# -.Description -Get-PyVenvConfig parses the values from the pyvenv.cfg file located in the -given folder, and returns them in a map. - -For each line in the pyvenv.cfg file, if that line can be parsed into exactly -two strings separated by `=` (with any amount of whitespace surrounding the =) -then it is considered a `key = value` line. The left hand string is the key, -the right hand is the value. - -If the value starts with a `'` or a `"` then the first and last character is -stripped from the value before being captured. - -.Parameter ConfigDir -Path to the directory that contains the `pyvenv.cfg` file. -#> -function Get-PyVenvConfig( - [String] - $ConfigDir -) { - Write-Verbose "Given ConfigDir=$ConfigDir, obtain values in pyvenv.cfg" - - # Ensure the file exists, and issue a warning if it doesn't (but still allow the function to continue). - $pyvenvConfigPath = Join-Path -Resolve -Path $ConfigDir -ChildPath 'pyvenv.cfg' -ErrorAction Continue - - # An empty map will be returned if no config file is found. - $pyvenvConfig = @{ } - - if ($pyvenvConfigPath) { - - Write-Verbose "File exists, parse `key = value` lines" - $pyvenvConfigContent = Get-Content -Path $pyvenvConfigPath - - $pyvenvConfigContent | ForEach-Object { - $keyval = $PSItem -split "\s*=\s*", 2 - if ($keyval[0] -and $keyval[1]) { - $val = $keyval[1] - - # Remove extraneous quotations around a string value. - if ("'""".Contains($val.Substring(0,1))) { - $val = $val.Substring(1, $val.Length - 2) - } - - $pyvenvConfig[$keyval[0]] = $val - Write-Verbose "Adding Key: '$($keyval[0])'='$val'" - } - } - } - return $pyvenvConfig -} - - -<# Begin Activate script --------------------------------------------------- #> - -# Determine the containing directory of this script -$VenvExecPath = Split-Path -Parent $MyInvocation.MyCommand.Definition -$VenvExecDir = Get-Item -Path $VenvExecPath - -Write-Verbose "Activation script is located in path: '$VenvExecPath'" -Write-Verbose "VenvExecDir Fullname: '$($VenvExecDir.FullName)" -Write-Verbose "VenvExecDir Name: '$($VenvExecDir.Name)" - -# Set values required in priority: CmdLine, ConfigFile, Default -# First, get the location of the virtual environment, it might not be -# VenvExecDir if specified on the command line. -if ($VenvDir) { - Write-Verbose "VenvDir given as parameter, using '$VenvDir' to determine values" -} else { - Write-Verbose "VenvDir not given as a parameter, using parent directory name as VenvDir." - $VenvDir = $VenvExecDir.Parent.FullName.TrimEnd("\\/") - Write-Verbose "VenvDir=$VenvDir" -} - -# Next, read the `pyvenv.cfg` file to determine any required value such -# as `prompt`. -$pyvenvCfg = Get-PyVenvConfig -ConfigDir $VenvDir - -# Next, set the prompt from the command line, or the config file, or -# just use the name of the virtual environment folder. -if ($Prompt) { - Write-Verbose "Prompt specified as argument, using '$Prompt'" -} else { - Write-Verbose "Prompt not specified as argument to script, checking pyvenv.cfg value" - if ($pyvenvCfg -and $pyvenvCfg['prompt']) { - Write-Verbose " Setting based on value in pyvenv.cfg='$($pyvenvCfg['prompt'])'" - $Prompt = $pyvenvCfg['prompt']; - } - else { - Write-Verbose " Setting prompt based on parent's directory's name. (Is the directory name passed to venv module when creating the virutal environment)" - Write-Verbose " Got leaf-name of $VenvDir='$(Split-Path -Path $venvDir -Leaf)'" - $Prompt = Split-Path -Path $venvDir -Leaf - } -} - -Write-Verbose "Prompt = '$Prompt'" -Write-Verbose "VenvDir='$VenvDir'" - -# Deactivate any currently active virtual environment, but leave the -# deactivate function in place. -deactivate -nondestructive - -# Now set the environment variable VIRTUAL_ENV, used by many tools to determine -# that there is an activated venv. -$env:VIRTUAL_ENV = $VenvDir - -if (-not $Env:VIRTUAL_ENV_DISABLE_PROMPT) { - - Write-Verbose "Setting prompt to '$Prompt'" - - # Set the prompt to include the env name - # Make sure _OLD_VIRTUAL_PROMPT is global - function global:_OLD_VIRTUAL_PROMPT { "" } - Copy-Item -Path function:prompt -Destination function:_OLD_VIRTUAL_PROMPT - New-Variable -Name _PYTHON_VENV_PROMPT_PREFIX -Description "Python virtual environment prompt prefix" -Scope Global -Option ReadOnly -Visibility Public -Value $Prompt - - function global:prompt { - Write-Host -NoNewline -ForegroundColor Green "($_PYTHON_VENV_PROMPT_PREFIX) " - _OLD_VIRTUAL_PROMPT - } -} - -# Clear PYTHONHOME -if (Test-Path -Path Env:PYTHONHOME) { - Copy-Item -Path Env:PYTHONHOME -Destination Env:_OLD_VIRTUAL_PYTHONHOME - Remove-Item -Path Env:PYTHONHOME -} - -# Add the venv to the PATH -Copy-Item -Path Env:PATH -Destination Env:_OLD_VIRTUAL_PATH -$Env:PATH = "$VenvExecDir$([System.IO.Path]::PathSeparator)$Env:PATH" diff --git a/venv/bin/activate b/venv/bin/activate deleted file mode 100644 index cb39b7e1..00000000 --- a/venv/bin/activate +++ /dev/null @@ -1,76 +0,0 @@ -# This file must be used with "source bin/activate" *from bash* -# you cannot run it directly - -deactivate () { - # reset old environment variables - if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then - PATH="${_OLD_VIRTUAL_PATH:-}" - export PATH - unset _OLD_VIRTUAL_PATH - fi - if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then - PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" - export PYTHONHOME - unset _OLD_VIRTUAL_PYTHONHOME - fi - - # This should detect bash and zsh, which have a hash command that must - # be called to get it to forget past commands. Without forgetting - # past commands the $PATH changes we made may not be respected - if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r - fi - - if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then - PS1="${_OLD_VIRTUAL_PS1:-}" - export PS1 - unset _OLD_VIRTUAL_PS1 - fi - - unset VIRTUAL_ENV - if [ ! "${1:-}" = "nondestructive" ] ; then - # Self destruct! - unset -f deactivate - fi -} - -# unset irrelevant variables -deactivate nondestructive - -VIRTUAL_ENV="/home/nicolas/Projects/diagrams/venv" -export VIRTUAL_ENV - -_OLD_VIRTUAL_PATH="$PATH" -PATH="$VIRTUAL_ENV/bin:$PATH" -export PATH - -# unset PYTHONHOME if set -# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) -# could use `if (set -u; : $PYTHONHOME) ;` in bash -if [ -n "${PYTHONHOME:-}" ] ; then - _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" - unset PYTHONHOME -fi - -if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then - _OLD_VIRTUAL_PS1="${PS1:-}" - if [ "x(venv) " != x ] ; then - PS1="(venv) ${PS1:-}" - else - if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1" - else - PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1" - fi - fi - export PS1 -fi - -# This should detect bash and zsh, which have a hash command that must -# be called to get it to forget past commands. Without forgetting -# past commands the $PATH changes we made may not be respected -if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r -fi diff --git a/venv/bin/activate.csh b/venv/bin/activate.csh deleted file mode 100644 index 97d94f37..00000000 --- a/venv/bin/activate.csh +++ /dev/null @@ -1,37 +0,0 @@ -# This file must be used with "source bin/activate.csh" *from csh*. -# You cannot run it directly. -# Created by Davide Di Blasi . -# Ported to Python 3.3 venv by Andrew Svetlov - -alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' - -# Unset irrelevant variables. -deactivate nondestructive - -setenv VIRTUAL_ENV "/home/nicolas/Projects/diagrams/venv" - -set _OLD_VIRTUAL_PATH="$PATH" -setenv PATH "$VIRTUAL_ENV/bin:$PATH" - - -set _OLD_VIRTUAL_PROMPT="$prompt" - -if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then - if ("venv" != "") then - set env_name = "venv" - else - if (`basename "VIRTUAL_ENV"` == "__") then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - set env_name = `basename \`dirname "$VIRTUAL_ENV"\`` - else - set env_name = `basename "$VIRTUAL_ENV"` - endif - endif - set prompt = "[$env_name] $prompt" - unset env_name -endif - -alias pydoc python -m pydoc - -rehash diff --git a/venv/bin/activate.fish b/venv/bin/activate.fish deleted file mode 100644 index b521dd66..00000000 --- a/venv/bin/activate.fish +++ /dev/null @@ -1,75 +0,0 @@ -# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org) -# you cannot run it directly - -function deactivate -d "Exit virtualenv and return to normal shell environment" - # reset old environment variables - if test -n "$_OLD_VIRTUAL_PATH" - set -gx PATH $_OLD_VIRTUAL_PATH - set -e _OLD_VIRTUAL_PATH - end - if test -n "$_OLD_VIRTUAL_PYTHONHOME" - set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME - set -e _OLD_VIRTUAL_PYTHONHOME - end - - if test -n "$_OLD_FISH_PROMPT_OVERRIDE" - functions -e fish_prompt - set -e _OLD_FISH_PROMPT_OVERRIDE - functions -c _old_fish_prompt fish_prompt - functions -e _old_fish_prompt - end - - set -e VIRTUAL_ENV - if test "$argv[1]" != "nondestructive" - # Self destruct! - functions -e deactivate - end -end - -# unset irrelevant variables -deactivate nondestructive - -set -gx VIRTUAL_ENV "/home/nicolas/Projects/diagrams/venv" - -set -gx _OLD_VIRTUAL_PATH $PATH -set -gx PATH "$VIRTUAL_ENV/bin" $PATH - -# unset PYTHONHOME if set -if set -q PYTHONHOME - set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME - set -e PYTHONHOME -end - -if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" - # fish uses a function instead of an env var to generate the prompt. - - # save the current fish_prompt function as the function _old_fish_prompt - functions -c fish_prompt _old_fish_prompt - - # with the original prompt function renamed, we can override with our own. - function fish_prompt - # Save the return status of the last command - set -l old_status $status - - # Prompt override? - if test -n "(venv) " - printf "%s%s" "(venv) " (set_color normal) - else - # ...Otherwise, prepend env - set -l _checkbase (basename "$VIRTUAL_ENV") - if test $_checkbase = "__" - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal) - else - printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal) - end - end - - # Restore the return status of the previous command. - echo "exit $old_status" | . - _old_fish_prompt - end - - set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" -end diff --git a/venv/bin/black b/venv/bin/black deleted file mode 100755 index 3aaeac3b..00000000 --- a/venv/bin/black +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from black import patched_main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(patched_main()) diff --git a/venv/bin/blackd b/venv/bin/blackd deleted file mode 100755 index c754e389..00000000 --- a/venv/bin/blackd +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from blackd import patched_main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(patched_main()) diff --git a/venv/bin/easy_install b/venv/bin/easy_install deleted file mode 100755 index db5a4fd3..00000000 --- a/venv/bin/easy_install +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from setuptools.command.easy_install import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/easy_install-3.8 b/venv/bin/easy_install-3.8 deleted file mode 100755 index db5a4fd3..00000000 --- a/venv/bin/easy_install-3.8 +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from setuptools.command.easy_install import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/epylint b/venv/bin/epylint deleted file mode 100755 index 5feef070..00000000 --- a/venv/bin/epylint +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pylint import run_epylint - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(run_epylint()) diff --git a/venv/bin/isort b/venv/bin/isort deleted file mode 100755 index a4b62f20..00000000 --- a/venv/bin/isort +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from isort.main import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/pip b/venv/bin/pip deleted file mode 100755 index 001952e0..00000000 --- a/venv/bin/pip +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pip._internal import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/pip3 b/venv/bin/pip3 deleted file mode 100755 index 001952e0..00000000 --- a/venv/bin/pip3 +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pip._internal import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/pip3.8 b/venv/bin/pip3.8 deleted file mode 100755 index 001952e0..00000000 --- a/venv/bin/pip3.8 +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pip._internal import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/py.test b/venv/bin/py.test deleted file mode 100755 index c89fa1db..00000000 --- a/venv/bin/py.test +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pytest import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/pylint b/venv/bin/pylint deleted file mode 100755 index e37d693b..00000000 --- a/venv/bin/pylint +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pylint import run_pylint - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(run_pylint()) diff --git a/venv/bin/pyreverse b/venv/bin/pyreverse deleted file mode 100755 index f6bf2e9e..00000000 --- a/venv/bin/pyreverse +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pylint import run_pyreverse - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(run_pyreverse()) diff --git a/venv/bin/pytest b/venv/bin/pytest deleted file mode 100755 index c89fa1db..00000000 --- a/venv/bin/pytest +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pytest import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/venv/bin/python b/venv/bin/python deleted file mode 120000 index b8a0adbb..00000000 --- a/venv/bin/python +++ /dev/null @@ -1 +0,0 @@ -python3 \ No newline at end of file diff --git a/venv/bin/python3 b/venv/bin/python3 deleted file mode 120000 index ae65fdaa..00000000 --- a/venv/bin/python3 +++ /dev/null @@ -1 +0,0 @@ -/usr/bin/python3 \ No newline at end of file diff --git a/venv/bin/symilar b/venv/bin/symilar deleted file mode 100755 index c5654cf6..00000000 --- a/venv/bin/symilar +++ /dev/null @@ -1,10 +0,0 @@ -#!/home/nicolas/Projects/diagrams/venv/bin/python3 -# -*- coding: utf-8 -*- -import re -import sys - -from pylint import run_symilar - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(run_symilar()) diff --git a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/INSTALLER b/venv/lib/python3.8/site-packages/Click-7.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e3..00000000 --- a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/LICENSE.txt b/venv/lib/python3.8/site-packages/Click-7.0.dist-info/LICENSE.txt deleted file mode 100644 index 87ce152a..00000000 --- a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/LICENSE.txt +++ /dev/null @@ -1,39 +0,0 @@ -Copyright © 2014 by the Pallets team. - -Some rights reserved. - -Redistribution and use in source and binary forms of the software as -well as documentation, with or without modification, are permitted -provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -- Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND -CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, -BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT -NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF -USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE AND DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF -SUCH DAMAGE. - ----- - -Click uses parts of optparse written by Gregory P. Ward and maintained -by the Python Software Foundation. This is limited to code in parser.py. - -Copyright © 2001-2006 Gregory P. Ward. All rights reserved. -Copyright © 2002-2006 Python Software Foundation. All rights reserved. diff --git a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/METADATA b/venv/lib/python3.8/site-packages/Click-7.0.dist-info/METADATA deleted file mode 100644 index 625bdadd..00000000 --- a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/METADATA +++ /dev/null @@ -1,121 +0,0 @@ -Metadata-Version: 2.1 -Name: Click -Version: 7.0 -Summary: Composable command line interface toolkit -Home-page: https://palletsprojects.com/p/click/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: Pallets Team -Maintainer-email: contact@palletsprojects.com -License: BSD -Project-URL: Documentation, https://click.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/click -Project-URL: Issue tracker, https://github.com/pallets/click/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* - -\$ click\_ -========== - -Click is a Python package for creating beautiful command line interfaces -in a composable way with as little code as necessary. It's the "Command -Line Interface Creation Kit". It's highly configurable but comes with -sensible defaults out of the box. - -It aims to make the process of writing command line tools quick and fun -while also preventing any frustration caused by the inability to -implement an intended CLI API. - -Click in three points: - -- Arbitrary nesting of commands -- Automatic help page generation -- Supports lazy loading of subcommands at runtime - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - $ pip install click - -Click supports Python 3.4 and newer, Python 2.7, and PyPy. - -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - -A Simple Example ----------------- - -What does it look like? Here is an example of a simple Click program: - -.. code-block:: python - - import click - - @click.command() - @click.option("--count", default=1, help="Number of greetings.") - @click.option("--name", prompt="Your name", - help="The person to greet.") - def hello(count, name): - """Simple program that greets NAME for a total of COUNT times.""" - for _ in range(count): - click.echo("Hello, %s!" % name) - - if __name__ == '__main__': - hello() - -And what it looks like when run: - -.. code-block:: text - - $ python hello.py --count=3 - Your name: Click - Hello, Click! - Hello, Click! - Hello, Click! - - -Donate ------- - -The Pallets organization develops and supports Click and other popular -packages. In order to grow the community of contributors and users, and -allow the maintainers to devote more time to the projects, `please -donate today`_. - -.. _please donate today: https://palletsprojects.com/donate - - -Links ------ - -* Website: https://palletsprojects.com/p/click/ -* Documentation: https://click.palletsprojects.com/ -* License: `BSD `_ -* Releases: https://pypi.org/project/click/ -* Code: https://github.com/pallets/click -* Issue tracker: https://github.com/pallets/click/issues -* Test status: - - * Linux, Mac: https://travis-ci.org/pallets/click - * Windows: https://ci.appveyor.com/project/pallets/click - -* Test coverage: https://codecov.io/gh/pallets/click - - diff --git a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/RECORD b/venv/lib/python3.8/site-packages/Click-7.0.dist-info/RECORD deleted file mode 100644 index 768bf988..00000000 --- a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/RECORD +++ /dev/null @@ -1,40 +0,0 @@ -Click-7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -Click-7.0.dist-info/LICENSE.txt,sha256=4hIxn676T0Wcisk3_chVcECjyrivKTZsoqSNI5AlIlw,1876 -Click-7.0.dist-info/METADATA,sha256=-r8jeke3Zer4diRvT1MjFZuiJ6yTT_qFP39svLqdaLI,3516 -Click-7.0.dist-info/RECORD,, -Click-7.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 -Click-7.0.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6 -click/__init__.py,sha256=HjGThQ7tef9kkwCV371TBnrf0SAi6fKfU_jtEnbYTvQ,2789 -click/__pycache__/__init__.cpython-38.pyc,, -click/__pycache__/_bashcomplete.cpython-38.pyc,, -click/__pycache__/_compat.cpython-38.pyc,, -click/__pycache__/_termui_impl.cpython-38.pyc,, -click/__pycache__/_textwrap.cpython-38.pyc,, -click/__pycache__/_unicodefun.cpython-38.pyc,, -click/__pycache__/_winconsole.cpython-38.pyc,, -click/__pycache__/core.cpython-38.pyc,, -click/__pycache__/decorators.cpython-38.pyc,, -click/__pycache__/exceptions.cpython-38.pyc,, -click/__pycache__/formatting.cpython-38.pyc,, -click/__pycache__/globals.cpython-38.pyc,, -click/__pycache__/parser.cpython-38.pyc,, -click/__pycache__/termui.cpython-38.pyc,, -click/__pycache__/testing.cpython-38.pyc,, -click/__pycache__/types.cpython-38.pyc,, -click/__pycache__/utils.cpython-38.pyc,, -click/_bashcomplete.py,sha256=iaNUmtxag0YPfxba3TDYCNietiTMQIrvhRLj-H8okFU,11014 -click/_compat.py,sha256=vYmvoj4opPxo-c-2GMQQjYT_r_QkOKybkfGoeVrt0dA,23399 -click/_termui_impl.py,sha256=xHmLtOJhKUCVD6168yucJ9fknUJPAMs0eUTPgVUO-GQ,19611 -click/_textwrap.py,sha256=gwS4m7bdQiJnzaDG8osFcRb-5vn4t4l2qSCy-5csCEc,1198 -click/_unicodefun.py,sha256=QHy2_5jYlX-36O-JVrTHNnHOqg8tquUR0HmQFev7Ics,4364 -click/_winconsole.py,sha256=PPWVak8Iikm_gAPsxMrzwsVFCvHgaW3jPaDWZ1JBl3U,8965 -click/core.py,sha256=q8FLcDZsagBGSRe5Y9Hi_FGvAeZvusNfoO5EkhkSQ8Y,75305 -click/decorators.py,sha256=idKt6duLUUfAFftrHoREi8MJSd39XW36pUVHthdglwk,11226 -click/exceptions.py,sha256=CNpAjBAE7qjaV4WChxQeak95e5yUOau8AsvT-8m6wss,7663 -click/formatting.py,sha256=eh-cypTUAhpI3HD-K4ZpR3vCiURIO62xXvKkR3tNUTM,8889 -click/globals.py,sha256=oQkou3ZQ5DgrbVM6BwIBirwiqozbjfirzsLGAlLRRdg,1514 -click/parser.py,sha256=m-nGZz4VwprM42_qtFlWFGo7yRJQxkBlRcZodoH593Y,15510 -click/termui.py,sha256=o_ZXB2jyvL2Rce7P_bFGq452iyBq9ykJyRApIPMCZO0,23207 -click/testing.py,sha256=aYGqY_iWLu2p4k7lkuJ6t3fqpf6aPGqTsyLzNY_ngKg,13062 -click/types.py,sha256=2Q929p-aBP_ZYuMFJqJR-Ipucofv3fmDc5JzBDPmzJU,23287 -click/utils.py,sha256=6-D0WkAxvv9FkgHXSHwDIv0l9Gdx9Mm6Z5vuKNLIfZI,15763 diff --git a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/WHEEL b/venv/lib/python3.8/site-packages/Click-7.0.dist-info/WHEEL deleted file mode 100644 index 1316c41d..00000000 --- a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.31.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/top_level.txt b/venv/lib/python3.8/site-packages/Click-7.0.dist-info/top_level.txt deleted file mode 100644 index dca9a909..00000000 --- a/venv/lib/python3.8/site-packages/Click-7.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -click diff --git a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/INSTALLER b/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e3..00000000 --- a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/LICENSE.rst b/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/LICENSE.rst deleted file mode 100644 index c37cae49..00000000 --- a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/LICENSE.rst +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2007 Pallets - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/METADATA b/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/METADATA deleted file mode 100644 index 3d5bbb4d..00000000 --- a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/METADATA +++ /dev/null @@ -1,106 +0,0 @@ -Metadata-Version: 2.1 -Name: Jinja2 -Version: 2.11.1 -Summary: A very fast and expressive template engine. -Home-page: https://palletsprojects.com/p/jinja/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: Pallets -Maintainer-email: contact@palletsprojects.com -License: BSD-3-Clause -Project-URL: Documentation, https://jinja.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/jinja -Project-URL: Issue tracker, https://github.com/pallets/jinja/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup :: HTML -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.* -Description-Content-Type: text/x-rst -Requires-Dist: MarkupSafe (>=0.23) -Provides-Extra: i18n -Requires-Dist: Babel (>=0.8) ; extra == 'i18n' - -Jinja -===== - -Jinja is a fast, expressive, extensible templating engine. Special -placeholders in the template allow writing code similar to Python -syntax. Then the template is passed data to render the final document. - -It includes: - -- Template inheritance and inclusion. -- Define and import macros within templates. -- HTML templates can use autoescaping to prevent XSS from untrusted - user input. -- A sandboxed environment can safely render untrusted templates. -- AsyncIO support for generating templates and calling async - functions. -- I18N support with Babel. -- Templates are compiled to optimized Python code just-in-time and - cached, or can be compiled ahead-of-time. -- Exceptions point to the correct line in templates to make debugging - easier. -- Extensible filters, tests, functions, and even syntax. - -Jinja's philosophy is that while application logic belongs in Python if -possible, it shouldn't make the template designer's job difficult by -restricting functionality too much. - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - $ pip install -U Jinja2 - -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - -In A Nutshell -------------- - -.. code-block:: jinja - - {% extends "base.html" %} - {% block title %}Members{% endblock %} - {% block content %} - - {% endblock %} - - -Links ------ - -- Website: https://palletsprojects.com/p/jinja/ -- Documentation: https://jinja.palletsprojects.com/ -- Releases: https://pypi.org/project/Jinja2/ -- Code: https://github.com/pallets/jinja -- Issue tracker: https://github.com/pallets/jinja/issues -- Test status: https://dev.azure.com/pallets/jinja/_build -- Official chat: https://discord.gg/t6rrQZH - - diff --git a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/RECORD b/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/RECORD deleted file mode 100644 index 13e7ee41..00000000 --- a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/RECORD +++ /dev/null @@ -1,61 +0,0 @@ -Jinja2-2.11.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -Jinja2-2.11.1.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475 -Jinja2-2.11.1.dist-info/METADATA,sha256=7e9_tz7RirTbxIeiHTSq3e5g6ddCjoym3o5vdlRLuxU,3535 -Jinja2-2.11.1.dist-info/RECORD,, -Jinja2-2.11.1.dist-info/WHEEL,sha256=hq9T7ntHzQqUTLUmJ2UVhPL-W4tJi3Yb2Lh5lMfs2mk,110 -Jinja2-2.11.1.dist-info/entry_points.txt,sha256=Qy_DkVo6Xj_zzOtmErrATe8lHZhOqdjpt3e4JJAGyi8,61 -Jinja2-2.11.1.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7 -jinja2/__init__.py,sha256=Nq1rzGErXYjIQnqc1pDCJht5LmInBRIZkeL2qkrYEyI,1549 -jinja2/__pycache__/__init__.cpython-38.pyc,, -jinja2/__pycache__/_compat.cpython-38.pyc,, -jinja2/__pycache__/_identifier.cpython-38.pyc,, -jinja2/__pycache__/asyncfilters.cpython-38.pyc,, -jinja2/__pycache__/asyncsupport.cpython-38.pyc,, -jinja2/__pycache__/bccache.cpython-38.pyc,, -jinja2/__pycache__/compiler.cpython-38.pyc,, -jinja2/__pycache__/constants.cpython-38.pyc,, -jinja2/__pycache__/debug.cpython-38.pyc,, -jinja2/__pycache__/defaults.cpython-38.pyc,, -jinja2/__pycache__/environment.cpython-38.pyc,, -jinja2/__pycache__/exceptions.cpython-38.pyc,, -jinja2/__pycache__/ext.cpython-38.pyc,, -jinja2/__pycache__/filters.cpython-38.pyc,, -jinja2/__pycache__/idtracking.cpython-38.pyc,, -jinja2/__pycache__/lexer.cpython-38.pyc,, -jinja2/__pycache__/loaders.cpython-38.pyc,, -jinja2/__pycache__/meta.cpython-38.pyc,, -jinja2/__pycache__/nativetypes.cpython-38.pyc,, -jinja2/__pycache__/nodes.cpython-38.pyc,, -jinja2/__pycache__/optimizer.cpython-38.pyc,, -jinja2/__pycache__/parser.cpython-38.pyc,, -jinja2/__pycache__/runtime.cpython-38.pyc,, -jinja2/__pycache__/sandbox.cpython-38.pyc,, -jinja2/__pycache__/tests.cpython-38.pyc,, -jinja2/__pycache__/utils.cpython-38.pyc,, -jinja2/__pycache__/visitor.cpython-38.pyc,, -jinja2/_compat.py,sha256=B6Se8HjnXVpzz9-vfHejn-DV2NjaVK-Iewupc5kKlu8,3191 -jinja2/_identifier.py,sha256=EdgGJKi7O1yvr4yFlvqPNEqV6M1qHyQr8Gt8GmVTKVM,1775 -jinja2/asyncfilters.py,sha256=8uwjG1zgHTv3K4nEvsj4HXWkK4NsOlfx7-CcnCULDWw,4185 -jinja2/asyncsupport.py,sha256=ZBFsDLuq3Gtji3Ia87lcyuDbqaHZJRdtShZcqwpFnSQ,7209 -jinja2/bccache.py,sha256=3Pmp4jo65M9FQuIxdxoDBbEDFwe4acDMQf77nEJfrHA,12139 -jinja2/compiler.py,sha256=xCNpF7-xAduODbGKSVEyzU7XZGeLWHZr1cwcZTQob30,66236 -jinja2/constants.py,sha256=RR1sTzNzUmKco6aZicw4JpQpJGCuPuqm1h1YmCNUEFY,1458 -jinja2/debug.py,sha256=UmsW6OxNmbIGvIkwytOyM1NsZB6xJvl_nSz3VgNETUk,8597 -jinja2/defaults.py,sha256=85B6YUUCyWPSdrSeVhcqFVuu_bHUAQXeey--FIwSeVQ,1126 -jinja2/environment.py,sha256=XqCM_GmncAXPm--CxpRPVF6uV_sPKb0Q0jVa7Znry04,50605 -jinja2/exceptions.py,sha256=VjNLawcmf2ODffqVMCQK1cRmvFaUfQWF4u8ouP3QPcE,5425 -jinja2/ext.py,sha256=AtwL5O5enT_L3HR9-oBvhGyUTdGoyaqG_ICtnR_EVd4,26441 -jinja2/filters.py,sha256=4xEq1qfJ7burpHW5GyL6bkGomp0W47jOXg-HG5aLP-Y,41401 -jinja2/idtracking.py,sha256=J3O4VHsrbf3wzwiBc7Cro26kHb6_5kbULeIOzocchIU,9211 -jinja2/lexer.py,sha256=VeGdW_t82Le4H-jLy-hX6UeosLf7ApUq2kuUos8YF4Y,29942 -jinja2/loaders.py,sha256=UUy5ud3lNtGtnn8iorlF9o1FJ6UqZZKMxd0VGnnqMHI,20350 -jinja2/meta.py,sha256=QjyYhfNRD3QCXjBJpiPl9KgkEkGXJbAkCUq4-Ur10EQ,4131 -jinja2/nativetypes.py,sha256=Arb2_3IuM386vWZbGPY7DmxryrXg3WzXAEnaHJNdWa0,3576 -jinja2/nodes.py,sha256=YwErhE9plVWeoxTQPtMwl10wovsyBRY4x9eAVgtP6zg,31071 -jinja2/optimizer.py,sha256=gQLlMYzvQhluhzmAIFA1tXS0cwgWYOjprN-gTRcHVsc,1457 -jinja2/parser.py,sha256=fcfdqePNTNyvosIvczbytVA332qpsURvYnCGcjDHSkA,35660 -jinja2/runtime.py,sha256=94chnK20a1m1t5AaLWeuiTq6L3g3GLs6AxVPfbNXIHE,30582 -jinja2/sandbox.py,sha256=knayyUvXsZ-F0mk15mO2-ehK9gsw04UhB8td-iUOtLc,17127 -jinja2/tests.py,sha256=iO_Y-9Vo60zrVe1lMpSl5sKHqAxe2leZHC08OoZ8K24,4799 -jinja2/utils.py,sha256=26B9HI2lVWaHY8iOnQTJzAcCL4PYOLiA3V79dm3oOSE,22456 -jinja2/visitor.py,sha256=DUHupl0a4PGp7nxRtZFttUzAi1ccxzqc2hzetPYUz8U,3240 diff --git a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/WHEEL b/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/WHEEL deleted file mode 100644 index 03bcde77..00000000 --- a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.34.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/entry_points.txt b/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/entry_points.txt deleted file mode 100644 index 3619483f..00000000 --- a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/entry_points.txt +++ /dev/null @@ -1,3 +0,0 @@ -[babel.extractors] -jinja2 = jinja2.ext:babel_extract [i18n] - diff --git a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/top_level.txt b/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/top_level.txt deleted file mode 100644 index 7f7afbf3..00000000 --- a/venv/lib/python3.8/site-packages/Jinja2-2.11.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -jinja2 diff --git a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER b/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e3..00000000 --- a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.txt b/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.txt deleted file mode 100644 index 9d227a0c..00000000 --- a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.txt +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 Pallets - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/METADATA b/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/METADATA deleted file mode 100644 index c50370d6..00000000 --- a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/METADATA +++ /dev/null @@ -1,105 +0,0 @@ -Metadata-Version: 2.1 -Name: MarkupSafe -Version: 1.1.1 -Summary: Safely add untrusted strings to HTML/XML markup. -Home-page: https://palletsprojects.com/p/markupsafe/ -Author: Armin Ronacher -Author-email: armin.ronacher@active-4.com -Maintainer: The Pallets Team -Maintainer-email: contact@palletsprojects.com -License: BSD-3-Clause -Project-URL: Documentation, https://markupsafe.palletsprojects.com/ -Project-URL: Code, https://github.com/pallets/markupsafe -Project-URL: Issue tracker, https://github.com/pallets/markupsafe/issues -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Environment :: Web Environment -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: BSD License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Text Processing :: Markup :: HTML -Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* -Description-Content-Type: text/x-rst - -MarkupSafe -========== - -MarkupSafe implements a text object that escapes characters so it is -safe to use in HTML and XML. Characters that have special meanings are -replaced so that they display as the actual characters. This mitigates -injection attacks, meaning untrusted user input can safely be displayed -on a page. - - -Installing ----------- - -Install and update using `pip`_: - -.. code-block:: text - - pip install -U MarkupSafe - -.. _pip: https://pip.pypa.io/en/stable/quickstart/ - - -Examples --------- - -.. code-block:: pycon - - >>> from markupsafe import Markup, escape - >>> # escape replaces special characters and wraps in Markup - >>> escape('') - Markup(u'<script>alert(document.cookie);</script>') - >>> # wrap in Markup to mark text "safe" and prevent escaping - >>> Markup('Hello') - Markup('hello') - >>> escape(Markup('Hello')) - Markup('hello') - >>> # Markup is a text subclass (str on Python 3, unicode on Python 2) - >>> # methods and operators escape their arguments - >>> template = Markup("Hello %s") - >>> template % '"World"' - Markup('Hello "World"') - - -Donate ------- - -The Pallets organization develops and supports MarkupSafe and other -libraries that use it. In order to grow the community of contributors -and users, and allow the maintainers to devote more time to the -projects, `please donate today`_. - -.. _please donate today: https://palletsprojects.com/donate - - -Links ------ - -* Website: https://palletsprojects.com/p/markupsafe/ -* Documentation: https://markupsafe.palletsprojects.com/ -* License: `BSD-3-Clause `_ -* Releases: https://pypi.org/project/MarkupSafe/ -* Code: https://github.com/pallets/markupsafe -* Issue tracker: https://github.com/pallets/markupsafe/issues -* Test status: - - * Linux, Mac: https://travis-ci.org/pallets/markupsafe - * Windows: https://ci.appveyor.com/project/pallets/markupsafe - -* Test coverage: https://codecov.io/gh/pallets/markupsafe -* Official chat: https://discord.gg/t6rrQZH - - diff --git a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/RECORD b/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/RECORD deleted file mode 100644 index 95c33399..00000000 --- a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/RECORD +++ /dev/null @@ -1,16 +0,0 @@ -MarkupSafe-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -MarkupSafe-1.1.1.dist-info/LICENSE.txt,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475 -MarkupSafe-1.1.1.dist-info/METADATA,sha256=IFCP4hCNGjXJgMoSvdjPiKDLAMUTTWoxKXQsQvmyMNU,3653 -MarkupSafe-1.1.1.dist-info/RECORD,, -MarkupSafe-1.1.1.dist-info/WHEEL,sha256=VEyGcIFAmk_1KbI6gaZGw_mMiT-pdGweASQLX-DzYaY,108 -MarkupSafe-1.1.1.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11 -markupsafe/__init__.py,sha256=oTblO5f9KFM-pvnq9bB0HgElnqkJyqHnFN1Nx2NIvnY,10126 -markupsafe/__pycache__/__init__.cpython-38.pyc,, -markupsafe/__pycache__/_compat.cpython-38.pyc,, -markupsafe/__pycache__/_constants.cpython-38.pyc,, -markupsafe/__pycache__/_native.cpython-38.pyc,, -markupsafe/_compat.py,sha256=uEW1ybxEjfxIiuTbRRaJpHsPFf4yQUMMKaPgYEC5XbU,558 -markupsafe/_constants.py,sha256=zo2ajfScG-l1Sb_52EP3MlDCqO7Y1BVHUXXKRsVDRNk,4690 -markupsafe/_native.py,sha256=d-8S_zzYt2y512xYcuSxq0NeG2DUUvG80wVdTn-4KI8,1873 -markupsafe/_speedups.c,sha256=k0fzEIK3CP6MmMqeY0ob43TP90mVN0DTyn7BAl3RqSg,9884 -markupsafe/_speedups.cpython-38-x86_64-linux-gnu.so,sha256=SbJwN321Xn7OPYGv5a6Ghzga75uT8RHQUGkoQUASF-o,48016 diff --git a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL b/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL deleted file mode 100644 index ae40efdd..00000000 --- a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.31.1) -Root-Is-Purelib: false -Tag: cp38-cp38-manylinux1_x86_64 - diff --git a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt b/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt deleted file mode 100644 index 75bf7292..00000000 --- a/venv/lib/python3.8/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -markupsafe diff --git a/venv/lib/python3.8/site-packages/_black_version.py b/venv/lib/python3.8/site-packages/_black_version.py deleted file mode 100644 index 473d1991..00000000 --- a/venv/lib/python3.8/site-packages/_black_version.py +++ /dev/null @@ -1 +0,0 @@ -version = "19.10b0" diff --git a/venv/lib/python3.8/site-packages/_pytest/__init__.py b/venv/lib/python3.8/site-packages/_pytest/__init__.py deleted file mode 100644 index 46c7827e..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -__all__ = ["__version__"] - -try: - from ._version import version as __version__ -except ImportError: - # broken installation, we don't even try - # unknown only works because we do poor mans version compare - __version__ = "unknown" diff --git a/venv/lib/python3.8/site-packages/_pytest/_argcomplete.py b/venv/lib/python3.8/site-packages/_pytest/_argcomplete.py deleted file mode 100644 index 455c3a7b..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/_argcomplete.py +++ /dev/null @@ -1,109 +0,0 @@ -"""allow bash-completion for argparse with argcomplete if installed -needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail -to find the magic string, so _ARGCOMPLETE env. var is never set, and -this does not need special code. - -Function try_argcomplete(parser) should be called directly before -the call to ArgumentParser.parse_args(). - -The filescompleter is what you normally would use on the positional -arguments specification, in order to get "dirname/" after "dirn" -instead of the default "dirname ": - - optparser.add_argument(Config._file_or_dir, nargs='*' - ).completer=filescompleter - -Other, application specific, completers should go in the file -doing the add_argument calls as they need to be specified as .completer -attributes as well. (If argcomplete is not installed, the function the -attribute points to will not be used). - -SPEEDUP -======= -The generic argcomplete script for bash-completion -(/etc/bash_completion.d/python-argcomplete.sh ) -uses a python program to determine startup script generated by pip. -You can speed up completion somewhat by changing this script to include - # PYTHON_ARGCOMPLETE_OK -so the the python-argcomplete-check-easy-install-script does not -need to be called to find the entry point of the code and see if that is -marked with PYTHON_ARGCOMPLETE_OK - -INSTALL/DEBUGGING -================= -To include this support in another application that has setup.py generated -scripts: -- add the line: - # PYTHON_ARGCOMPLETE_OK - near the top of the main python entry point -- include in the file calling parse_args(): - from _argcomplete import try_argcomplete, filescompleter - , call try_argcomplete just before parse_args(), and optionally add - filescompleter to the positional arguments' add_argument() -If things do not work right away: -- switch on argcomplete debugging with (also helpful when doing custom - completers): - export _ARC_DEBUG=1 -- run: - python-argcomplete-check-easy-install-script $(which appname) - echo $? - will echo 0 if the magic line has been found, 1 if not -- sometimes it helps to find early on errors using: - _ARGCOMPLETE=1 _ARC_DEBUG=1 appname - which should throw a KeyError: 'COMPLINE' (which is properly set by the - global argcomplete script). -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import sys -from glob import glob - - -class FastFilesCompleter(object): - "Fast file completer class" - - def __init__(self, directories=True): - self.directories = directories - - def __call__(self, prefix, **kwargs): - """only called on non option completions""" - if os.path.sep in prefix[1:]: - prefix_dir = len(os.path.dirname(prefix) + os.path.sep) - else: - prefix_dir = 0 - completion = [] - globbed = [] - if "*" not in prefix and "?" not in prefix: - # we are on unix, otherwise no bash - if not prefix or prefix[-1] == os.path.sep: - globbed.extend(glob(prefix + ".*")) - prefix += "*" - globbed.extend(glob(prefix)) - for x in sorted(globbed): - if os.path.isdir(x): - x += "/" - # append stripping the prefix (like bash, not like compgen) - completion.append(x[prefix_dir:]) - return completion - - -if os.environ.get("_ARGCOMPLETE"): - try: - import argcomplete.completers - except ImportError: - sys.exit(-1) - filescompleter = FastFilesCompleter() - - def try_argcomplete(parser): - argcomplete.autocomplete(parser, always_complete_options=False) - - -else: - - def try_argcomplete(parser): - pass - - filescompleter = None diff --git a/venv/lib/python3.8/site-packages/_pytest/_code/__init__.py b/venv/lib/python3.8/site-packages/_pytest/_code/__init__.py deleted file mode 100644 index fe755a39..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/_code/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -""" python inspection/code generation API """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from .code import Code # noqa -from .code import ExceptionInfo # noqa -from .code import filter_traceback # noqa -from .code import Frame # noqa -from .code import getrawcode # noqa -from .code import Traceback # noqa -from .source import compile_ as compile # noqa -from .source import getfslineno # noqa -from .source import Source # noqa diff --git a/venv/lib/python3.8/site-packages/_pytest/_code/_py2traceback.py b/venv/lib/python3.8/site-packages/_pytest/_code/_py2traceback.py deleted file mode 100644 index 8a8b7bc0..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/_code/_py2traceback.py +++ /dev/null @@ -1,94 +0,0 @@ -# copied from python-2.7.3's traceback.py -# CHANGES: -# - some_str is replaced, trying to create unicode strings -# -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import types - -from six import text_type - - -def format_exception_only(etype, value): - """Format the exception part of a traceback. - - The arguments are the exception type and value such as given by - sys.last_type and sys.last_value. The return value is a list of - strings, each ending in a newline. - - Normally, the list contains a single string; however, for - SyntaxError exceptions, it contains several lines that (when - printed) display detailed information about where the syntax - error occurred. - - The message indicating which exception occurred is always the last - string in the list. - - """ - - # An instance should not have a meaningful value parameter, but - # sometimes does, particularly for string exceptions, such as - # >>> raise string1, string2 # deprecated - # - # Clear these out first because issubtype(string1, SyntaxError) - # would throw another exception and mask the original problem. - if ( - isinstance(etype, BaseException) - or isinstance(etype, types.InstanceType) - or etype is None - or type(etype) is str - ): - return [_format_final_exc_line(etype, value)] - - stype = etype.__name__ - - if not issubclass(etype, SyntaxError): - return [_format_final_exc_line(stype, value)] - - # It was a syntax error; show exactly where the problem was found. - lines = [] - try: - msg, (filename, lineno, offset, badline) = value.args - except Exception: - pass - else: - filename = filename or "" - lines.append(' File "{}", line {}\n'.format(filename, lineno)) - if badline is not None: - if isinstance(badline, bytes): # python 2 only - badline = badline.decode("utf-8", "replace") - lines.append(" {}\n".format(badline.strip())) - if offset is not None: - caretspace = badline.rstrip("\n")[:offset].lstrip() - # non-space whitespace (likes tabs) must be kept for alignment - caretspace = ((c.isspace() and c or " ") for c in caretspace) - # only three spaces to account for offset1 == pos 0 - lines.append(" {}^\n".format("".join(caretspace))) - value = msg - - lines.append(_format_final_exc_line(stype, value)) - return lines - - -def _format_final_exc_line(etype, value): - """Return a list of a single line -- normal case for format_exception_only""" - valuestr = _some_str(value) - if value is None or not valuestr: - line = "{}\n".format(etype) - else: - line = "{}: {}\n".format(etype, valuestr) - return line - - -def _some_str(value): - try: - return text_type(value) - except Exception: - try: - return bytes(value).decode("UTF-8", "replace") - except Exception: - pass - return "".format(type(value).__name__) diff --git a/venv/lib/python3.8/site-packages/_pytest/_code/code.py b/venv/lib/python3.8/site-packages/_pytest/_code/code.py deleted file mode 100644 index d06e24f0..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/_code/code.py +++ /dev/null @@ -1,1052 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import inspect -import pprint -import re -import sys -import traceback -from inspect import CO_VARARGS -from inspect import CO_VARKEYWORDS -from weakref import ref - -import attr -import pluggy -import py -import six -from six import text_type - -import _pytest -from _pytest.compat import _PY2 -from _pytest.compat import _PY3 -from _pytest.compat import PY35 -from _pytest.compat import safe_str - -builtin_repr = repr - -if _PY3: - from traceback import format_exception_only -else: - from ._py2traceback import format_exception_only - - -class Code(object): - """ wrapper around Python code objects """ - - def __init__(self, rawcode): - if not hasattr(rawcode, "co_filename"): - rawcode = getrawcode(rawcode) - try: - self.filename = rawcode.co_filename - self.firstlineno = rawcode.co_firstlineno - 1 - self.name = rawcode.co_name - except AttributeError: - raise TypeError("not a code object: %r" % (rawcode,)) - self.raw = rawcode - - def __eq__(self, other): - return self.raw == other.raw - - __hash__ = None - - def __ne__(self, other): - return not self == other - - @property - def path(self): - """ return a path object pointing to source code (note that it - might not point to an actually existing file). """ - try: - p = py.path.local(self.raw.co_filename) - # maybe don't try this checking - if not p.check(): - raise OSError("py.path check failed.") - except OSError: - # XXX maybe try harder like the weird logic - # in the standard lib [linecache.updatecache] does? - p = self.raw.co_filename - - return p - - @property - def fullsource(self): - """ return a _pytest._code.Source object for the full source file of the code - """ - from _pytest._code import source - - full, _ = source.findsource(self.raw) - return full - - def source(self): - """ return a _pytest._code.Source object for the code object's source only - """ - # return source only for that part of code - import _pytest._code - - return _pytest._code.Source(self.raw) - - def getargs(self, var=False): - """ return a tuple with the argument names for the code object - - if 'var' is set True also return the names of the variable and - keyword arguments when present - """ - # handfull shortcut for getting args - raw = self.raw - argcount = raw.co_argcount - if var: - argcount += raw.co_flags & CO_VARARGS - argcount += raw.co_flags & CO_VARKEYWORDS - return raw.co_varnames[:argcount] - - -class Frame(object): - """Wrapper around a Python frame holding f_locals and f_globals - in which expressions can be evaluated.""" - - def __init__(self, frame): - self.lineno = frame.f_lineno - 1 - self.f_globals = frame.f_globals - self.f_locals = frame.f_locals - self.raw = frame - self.code = Code(frame.f_code) - - @property - def statement(self): - """ statement this frame is at """ - import _pytest._code - - if self.code.fullsource is None: - return _pytest._code.Source("") - return self.code.fullsource.getstatement(self.lineno) - - def eval(self, code, **vars): - """ evaluate 'code' in the frame - - 'vars' are optional additional local variables - - returns the result of the evaluation - """ - f_locals = self.f_locals.copy() - f_locals.update(vars) - return eval(code, self.f_globals, f_locals) - - def exec_(self, code, **vars): - """ exec 'code' in the frame - - 'vars' are optiona; additional local variables - """ - f_locals = self.f_locals.copy() - f_locals.update(vars) - six.exec_(code, self.f_globals, f_locals) - - def repr(self, object): - """ return a 'safe' (non-recursive, one-line) string repr for 'object' - """ - return py.io.saferepr(object) - - def is_true(self, object): - return object - - def getargs(self, var=False): - """ return a list of tuples (name, value) for all arguments - - if 'var' is set True also include the variable and keyword - arguments when present - """ - retval = [] - for arg in self.code.getargs(var): - try: - retval.append((arg, self.f_locals[arg])) - except KeyError: - pass # this can occur when using Psyco - return retval - - -class TracebackEntry(object): - """ a single entry in a traceback """ - - _repr_style = None - exprinfo = None - - def __init__(self, rawentry, excinfo=None): - self._excinfo = excinfo - self._rawentry = rawentry - self.lineno = rawentry.tb_lineno - 1 - - def set_repr_style(self, mode): - assert mode in ("short", "long") - self._repr_style = mode - - @property - def frame(self): - import _pytest._code - - return _pytest._code.Frame(self._rawentry.tb_frame) - - @property - def relline(self): - return self.lineno - self.frame.code.firstlineno - - def __repr__(self): - return "" % (self.frame.code.path, self.lineno + 1) - - @property - def statement(self): - """ _pytest._code.Source object for the current statement """ - source = self.frame.code.fullsource - return source.getstatement(self.lineno) - - @property - def path(self): - """ path to the source code """ - return self.frame.code.path - - def getlocals(self): - return self.frame.f_locals - - locals = property(getlocals, None, None, "locals of underlaying frame") - - def getfirstlinesource(self): - # on Jython this firstlineno can be -1 apparently - return max(self.frame.code.firstlineno, 0) - - def getsource(self, astcache=None): - """ return failing source code. """ - # we use the passed in astcache to not reparse asttrees - # within exception info printing - from _pytest._code.source import getstatementrange_ast - - source = self.frame.code.fullsource - if source is None: - return None - key = astnode = None - if astcache is not None: - key = self.frame.code.path - if key is not None: - astnode = astcache.get(key, None) - start = self.getfirstlinesource() - try: - astnode, _, end = getstatementrange_ast( - self.lineno, source, astnode=astnode - ) - except SyntaxError: - end = self.lineno + 1 - else: - if key is not None: - astcache[key] = astnode - return source[start:end] - - source = property(getsource) - - def ishidden(self): - """ return True if the current frame has a var __tracebackhide__ - resolving to True - - If __tracebackhide__ is a callable, it gets called with the - ExceptionInfo instance and can decide whether to hide the traceback. - - mostly for internal use - """ - try: - tbh = self.frame.f_locals["__tracebackhide__"] - except KeyError: - try: - tbh = self.frame.f_globals["__tracebackhide__"] - except KeyError: - return False - - if callable(tbh): - return tbh(None if self._excinfo is None else self._excinfo()) - else: - return tbh - - def __str__(self): - try: - fn = str(self.path) - except py.error.Error: - fn = "???" - name = self.frame.code.name - try: - line = str(self.statement).lstrip() - except KeyboardInterrupt: - raise - except: # noqa - line = "???" - return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line) - - def name(self): - return self.frame.code.raw.co_name - - name = property(name, None, None, "co_name of underlaying code") - - -class Traceback(list): - """ Traceback objects encapsulate and offer higher level - access to Traceback entries. - """ - - Entry = TracebackEntry - - def __init__(self, tb, excinfo=None): - """ initialize from given python traceback object and ExceptionInfo """ - self._excinfo = excinfo - if hasattr(tb, "tb_next"): - - def f(cur): - while cur is not None: - yield self.Entry(cur, excinfo=excinfo) - cur = cur.tb_next - - list.__init__(self, f(tb)) - else: - list.__init__(self, tb) - - def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None): - """ return a Traceback instance wrapping part of this Traceback - - by provding any combination of path, lineno and firstlineno, the - first frame to start the to-be-returned traceback is determined - - this allows cutting the first part of a Traceback instance e.g. - for formatting reasons (removing some uninteresting bits that deal - with handling of the exception/traceback) - """ - for x in self: - code = x.frame.code - codepath = code.path - if ( - (path is None or codepath == path) - and ( - excludepath is None - or not hasattr(codepath, "relto") - or not codepath.relto(excludepath) - ) - and (lineno is None or x.lineno == lineno) - and (firstlineno is None or x.frame.code.firstlineno == firstlineno) - ): - return Traceback(x._rawentry, self._excinfo) - return self - - def __getitem__(self, key): - val = super(Traceback, self).__getitem__(key) - if isinstance(key, type(slice(0))): - val = self.__class__(val) - return val - - def filter(self, fn=lambda x: not x.ishidden()): - """ return a Traceback instance with certain items removed - - fn is a function that gets a single argument, a TracebackEntry - instance, and should return True when the item should be added - to the Traceback, False when not - - by default this removes all the TracebackEntries which are hidden - (see ishidden() above) - """ - return Traceback(filter(fn, self), self._excinfo) - - def getcrashentry(self): - """ return last non-hidden traceback entry that lead - to the exception of a traceback. - """ - for i in range(-1, -len(self) - 1, -1): - entry = self[i] - if not entry.ishidden(): - return entry - return self[-1] - - def recursionindex(self): - """ return the index of the frame/TracebackEntry where recursion - originates if appropriate, None if no recursion occurred - """ - cache = {} - for i, entry in enumerate(self): - # id for the code.raw is needed to work around - # the strange metaprogramming in the decorator lib from pypi - # which generates code objects that have hash/value equality - # XXX needs a test - key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno - # print "checking for recursion at", key - values = cache.setdefault(key, []) - if values: - f = entry.frame - loc = f.f_locals - for otherloc in values: - if f.is_true( - f.eval( - co_equal, - __recursioncache_locals_1=loc, - __recursioncache_locals_2=otherloc, - ) - ): - return i - values.append(entry.frame.f_locals) - return None - - -co_equal = compile( - "__recursioncache_locals_1 == __recursioncache_locals_2", "?", "eval" -) - - -class ExceptionInfo(object): - """ wraps sys.exc_info() objects and offers - help for navigating the traceback. - """ - - _striptext = "" - _assert_start_repr = ( - "AssertionError(u'assert " if _PY2 else "AssertionError('assert " - ) - - def __init__(self, tup=None, exprinfo=None): - import _pytest._code - - if tup is None: - tup = sys.exc_info() - if exprinfo is None and isinstance(tup[1], AssertionError): - exprinfo = getattr(tup[1], "msg", None) - if exprinfo is None: - exprinfo = py.io.saferepr(tup[1]) - if exprinfo and exprinfo.startswith(self._assert_start_repr): - self._striptext = "AssertionError: " - self._excinfo = tup - #: the exception class - self.type = tup[0] - #: the exception instance - self.value = tup[1] - #: the exception raw traceback - self.tb = tup[2] - #: the exception type name - self.typename = self.type.__name__ - #: the exception traceback (_pytest._code.Traceback instance) - self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self)) - - def __repr__(self): - return "" % (self.typename, len(self.traceback)) - - def exconly(self, tryshort=False): - """ return the exception as a string - - when 'tryshort' resolves to True, and the exception is a - _pytest._code._AssertionError, only the actual exception part of - the exception representation is returned (so 'AssertionError: ' is - removed from the beginning) - """ - lines = format_exception_only(self.type, self.value) - text = "".join(lines) - text = text.rstrip() - if tryshort: - if text.startswith(self._striptext): - text = text[len(self._striptext) :] - return text - - def errisinstance(self, exc): - """ return True if the exception is an instance of exc """ - return isinstance(self.value, exc) - - def _getreprcrash(self): - exconly = self.exconly(tryshort=True) - entry = self.traceback.getcrashentry() - path, lineno = entry.frame.code.raw.co_filename, entry.lineno - return ReprFileLocation(path, lineno + 1, exconly) - - def getrepr( - self, - showlocals=False, - style="long", - abspath=False, - tbfilter=True, - funcargs=False, - truncate_locals=True, - chain=True, - ): - """ - Return str()able representation of this exception info. - - :param bool showlocals: - Show locals per traceback entry. - Ignored if ``style=="native"``. - - :param str style: long|short|no|native traceback style - - :param bool abspath: - If paths should be changed to absolute or left unchanged. - - :param bool tbfilter: - Hide entries that contain a local variable ``__tracebackhide__==True``. - Ignored if ``style=="native"``. - - :param bool funcargs: - Show fixtures ("funcargs" for legacy purposes) per traceback entry. - - :param bool truncate_locals: - With ``showlocals==True``, make sure locals can be safely represented as strings. - - :param bool chain: if chained exceptions in Python 3 should be shown. - - .. versionchanged:: 3.9 - - Added the ``chain`` parameter. - """ - if style == "native": - return ReprExceptionInfo( - ReprTracebackNative( - traceback.format_exception( - self.type, self.value, self.traceback[0]._rawentry - ) - ), - self._getreprcrash(), - ) - - fmt = FormattedExcinfo( - showlocals=showlocals, - style=style, - abspath=abspath, - tbfilter=tbfilter, - funcargs=funcargs, - truncate_locals=truncate_locals, - chain=chain, - ) - return fmt.repr_excinfo(self) - - def __str__(self): - entry = self.traceback[-1] - loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) - return str(loc) - - def __unicode__(self): - entry = self.traceback[-1] - loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) - return text_type(loc) - - def match(self, regexp): - """ - Match the regular expression 'regexp' on the string representation of - the exception. If it matches then True is returned (so that it is - possible to write 'assert excinfo.match()'). If it doesn't match an - AssertionError is raised. - """ - __tracebackhide__ = True - if not re.search(regexp, str(self.value)): - assert 0, "Pattern '{!s}' not found in '{!s}'".format(regexp, self.value) - return True - - -@attr.s -class FormattedExcinfo(object): - """ presenting information about failing Functions and Generators. """ - - # for traceback entries - flow_marker = ">" - fail_marker = "E" - - showlocals = attr.ib(default=False) - style = attr.ib(default="long") - abspath = attr.ib(default=True) - tbfilter = attr.ib(default=True) - funcargs = attr.ib(default=False) - truncate_locals = attr.ib(default=True) - chain = attr.ib(default=True) - astcache = attr.ib(default=attr.Factory(dict), init=False, repr=False) - - def _getindent(self, source): - # figure out indent for given source - try: - s = str(source.getstatement(len(source) - 1)) - except KeyboardInterrupt: - raise - except: # noqa - try: - s = str(source[-1]) - except KeyboardInterrupt: - raise - except: # noqa - return 0 - return 4 + (len(s) - len(s.lstrip())) - - def _getentrysource(self, entry): - source = entry.getsource(self.astcache) - if source is not None: - source = source.deindent() - return source - - def _saferepr(self, obj): - return py.io.saferepr(obj) - - def repr_args(self, entry): - if self.funcargs: - args = [] - for argname, argvalue in entry.frame.getargs(var=True): - args.append((argname, self._saferepr(argvalue))) - return ReprFuncArgs(args) - - def get_source(self, source, line_index=-1, excinfo=None, short=False): - """ return formatted and marked up source lines. """ - import _pytest._code - - lines = [] - if source is None or line_index >= len(source.lines): - source = _pytest._code.Source("???") - line_index = 0 - if line_index < 0: - line_index += len(source) - space_prefix = " " - if short: - lines.append(space_prefix + source.lines[line_index].strip()) - else: - for line in source.lines[:line_index]: - lines.append(space_prefix + line) - lines.append(self.flow_marker + " " + source.lines[line_index]) - for line in source.lines[line_index + 1 :]: - lines.append(space_prefix + line) - if excinfo is not None: - indent = 4 if short else self._getindent(source) - lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) - return lines - - def get_exconly(self, excinfo, indent=4, markall=False): - lines = [] - indent = " " * indent - # get the real exception information out - exlines = excinfo.exconly(tryshort=True).split("\n") - failindent = self.fail_marker + indent[1:] - for line in exlines: - lines.append(failindent + line) - if not markall: - failindent = indent - return lines - - def repr_locals(self, locals): - if self.showlocals: - lines = [] - keys = [loc for loc in locals if loc[0] != "@"] - keys.sort() - for name in keys: - value = locals[name] - if name == "__builtins__": - lines.append("__builtins__ = ") - else: - # This formatting could all be handled by the - # _repr() function, which is only reprlib.Repr in - # disguise, so is very configurable. - if self.truncate_locals: - str_repr = self._saferepr(value) - else: - str_repr = pprint.pformat(value) - # if len(str_repr) < 70 or not isinstance(value, - # (list, tuple, dict)): - lines.append("%-10s = %s" % (name, str_repr)) - # else: - # self._line("%-10s =\\" % (name,)) - # # XXX - # pprint.pprint(value, stream=self.excinfowriter) - return ReprLocals(lines) - - def repr_traceback_entry(self, entry, excinfo=None): - import _pytest._code - - source = self._getentrysource(entry) - if source is None: - source = _pytest._code.Source("???") - line_index = 0 - else: - # entry.getfirstlinesource() can be -1, should be 0 on jython - line_index = entry.lineno - max(entry.getfirstlinesource(), 0) - - lines = [] - style = entry._repr_style - if style is None: - style = self.style - if style in ("short", "long"): - short = style == "short" - reprargs = self.repr_args(entry) if not short else None - s = self.get_source(source, line_index, excinfo, short=short) - lines.extend(s) - if short: - message = "in %s" % (entry.name) - else: - message = excinfo and excinfo.typename or "" - path = self._makepath(entry.path) - filelocrepr = ReprFileLocation(path, entry.lineno + 1, message) - localsrepr = None - if not short: - localsrepr = self.repr_locals(entry.locals) - return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) - if excinfo: - lines.extend(self.get_exconly(excinfo, indent=4)) - return ReprEntry(lines, None, None, None, style) - - def _makepath(self, path): - if not self.abspath: - try: - np = py.path.local().bestrelpath(path) - except OSError: - return path - if len(np) < len(str(path)): - path = np - return path - - def repr_traceback(self, excinfo): - traceback = excinfo.traceback - if self.tbfilter: - traceback = traceback.filter() - - if is_recursion_error(excinfo): - traceback, extraline = self._truncate_recursive_traceback(traceback) - else: - extraline = None - - last = traceback[-1] - entries = [] - for index, entry in enumerate(traceback): - einfo = (last == entry) and excinfo or None - reprentry = self.repr_traceback_entry(entry, einfo) - entries.append(reprentry) - return ReprTraceback(entries, extraline, style=self.style) - - def _truncate_recursive_traceback(self, traceback): - """ - Truncate the given recursive traceback trying to find the starting point - of the recursion. - - The detection is done by going through each traceback entry and finding the - point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``. - - Handle the situation where the recursion process might raise an exception (for example - comparing numpy arrays using equality raises a TypeError), in which case we do our best to - warn the user of the error and show a limited traceback. - """ - try: - recursionindex = traceback.recursionindex() - except Exception as e: - max_frames = 10 - extraline = ( - "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" - " The following exception happened when comparing locals in the stack frame:\n" - " {exc_type}: {exc_msg}\n" - " Displaying first and last {max_frames} stack frames out of {total}." - ).format( - exc_type=type(e).__name__, - exc_msg=safe_str(e), - max_frames=max_frames, - total=len(traceback), - ) - traceback = traceback[:max_frames] + traceback[-max_frames:] - else: - if recursionindex is not None: - extraline = "!!! Recursion detected (same locals & position)" - traceback = traceback[: recursionindex + 1] - else: - extraline = None - - return traceback, extraline - - def repr_excinfo(self, excinfo): - if _PY2: - reprtraceback = self.repr_traceback(excinfo) - reprcrash = excinfo._getreprcrash() - - return ReprExceptionInfo(reprtraceback, reprcrash) - else: - repr_chain = [] - e = excinfo.value - descr = None - seen = set() - while e is not None and id(e) not in seen: - seen.add(id(e)) - if excinfo: - reprtraceback = self.repr_traceback(excinfo) - reprcrash = excinfo._getreprcrash() - else: - # fallback to native repr if the exception doesn't have a traceback: - # ExceptionInfo objects require a full traceback to work - reprtraceback = ReprTracebackNative( - traceback.format_exception(type(e), e, None) - ) - reprcrash = None - - repr_chain += [(reprtraceback, reprcrash, descr)] - if e.__cause__ is not None and self.chain: - e = e.__cause__ - excinfo = ( - ExceptionInfo((type(e), e, e.__traceback__)) - if e.__traceback__ - else None - ) - descr = "The above exception was the direct cause of the following exception:" - elif ( - e.__context__ is not None - and not e.__suppress_context__ - and self.chain - ): - e = e.__context__ - excinfo = ( - ExceptionInfo((type(e), e, e.__traceback__)) - if e.__traceback__ - else None - ) - descr = "During handling of the above exception, another exception occurred:" - else: - e = None - repr_chain.reverse() - return ExceptionChainRepr(repr_chain) - - -class TerminalRepr(object): - def __str__(self): - s = self.__unicode__() - if _PY2: - s = s.encode("utf-8") - return s - - def __unicode__(self): - # FYI this is called from pytest-xdist's serialization of exception - # information. - io = py.io.TextIO() - tw = py.io.TerminalWriter(file=io) - self.toterminal(tw) - return io.getvalue().strip() - - def __repr__(self): - return "<%s instance at %0x>" % (self.__class__, id(self)) - - -class ExceptionRepr(TerminalRepr): - def __init__(self): - self.sections = [] - - def addsection(self, name, content, sep="-"): - self.sections.append((name, content, sep)) - - def toterminal(self, tw): - for name, content, sep in self.sections: - tw.sep(sep, name) - tw.line(content) - - -class ExceptionChainRepr(ExceptionRepr): - def __init__(self, chain): - super(ExceptionChainRepr, self).__init__() - self.chain = chain - # reprcrash and reprtraceback of the outermost (the newest) exception - # in the chain - self.reprtraceback = chain[-1][0] - self.reprcrash = chain[-1][1] - - def toterminal(self, tw): - for element in self.chain: - element[0].toterminal(tw) - if element[2] is not None: - tw.line("") - tw.line(element[2], yellow=True) - super(ExceptionChainRepr, self).toterminal(tw) - - -class ReprExceptionInfo(ExceptionRepr): - def __init__(self, reprtraceback, reprcrash): - super(ReprExceptionInfo, self).__init__() - self.reprtraceback = reprtraceback - self.reprcrash = reprcrash - - def toterminal(self, tw): - self.reprtraceback.toterminal(tw) - super(ReprExceptionInfo, self).toterminal(tw) - - -class ReprTraceback(TerminalRepr): - entrysep = "_ " - - def __init__(self, reprentries, extraline, style): - self.reprentries = reprentries - self.extraline = extraline - self.style = style - - def toterminal(self, tw): - # the entries might have different styles - for i, entry in enumerate(self.reprentries): - if entry.style == "long": - tw.line("") - entry.toterminal(tw) - if i < len(self.reprentries) - 1: - next_entry = self.reprentries[i + 1] - if ( - entry.style == "long" - or entry.style == "short" - and next_entry.style == "long" - ): - tw.sep(self.entrysep) - - if self.extraline: - tw.line(self.extraline) - - -class ReprTracebackNative(ReprTraceback): - def __init__(self, tblines): - self.style = "native" - self.reprentries = [ReprEntryNative(tblines)] - self.extraline = None - - -class ReprEntryNative(TerminalRepr): - style = "native" - - def __init__(self, tblines): - self.lines = tblines - - def toterminal(self, tw): - tw.write("".join(self.lines)) - - -class ReprEntry(TerminalRepr): - localssep = "_ " - - def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style): - self.lines = lines - self.reprfuncargs = reprfuncargs - self.reprlocals = reprlocals - self.reprfileloc = filelocrepr - self.style = style - - def toterminal(self, tw): - if self.style == "short": - self.reprfileloc.toterminal(tw) - for line in self.lines: - red = line.startswith("E ") - tw.line(line, bold=True, red=red) - # tw.line("") - return - if self.reprfuncargs: - self.reprfuncargs.toterminal(tw) - for line in self.lines: - red = line.startswith("E ") - tw.line(line, bold=True, red=red) - if self.reprlocals: - # tw.sep(self.localssep, "Locals") - tw.line("") - self.reprlocals.toterminal(tw) - if self.reprfileloc: - if self.lines: - tw.line("") - self.reprfileloc.toterminal(tw) - - def __str__(self): - return "%s\n%s\n%s" % ("\n".join(self.lines), self.reprlocals, self.reprfileloc) - - -class ReprFileLocation(TerminalRepr): - def __init__(self, path, lineno, message): - self.path = str(path) - self.lineno = lineno - self.message = message - - def toterminal(self, tw): - # filename and lineno output for each entry, - # using an output format that most editors unterstand - msg = self.message - i = msg.find("\n") - if i != -1: - msg = msg[:i] - tw.write(self.path, bold=True, red=True) - tw.line(":%s: %s" % (self.lineno, msg)) - - -class ReprLocals(TerminalRepr): - def __init__(self, lines): - self.lines = lines - - def toterminal(self, tw): - for line in self.lines: - tw.line(line) - - -class ReprFuncArgs(TerminalRepr): - def __init__(self, args): - self.args = args - - def toterminal(self, tw): - if self.args: - linesofar = "" - for name, value in self.args: - ns = "%s = %s" % (safe_str(name), safe_str(value)) - if len(ns) + len(linesofar) + 2 > tw.fullwidth: - if linesofar: - tw.line(linesofar) - linesofar = ns - else: - if linesofar: - linesofar += ", " + ns - else: - linesofar = ns - if linesofar: - tw.line(linesofar) - tw.line("") - - -def getrawcode(obj, trycall=True): - """ return code object for given function. """ - try: - return obj.__code__ - except AttributeError: - obj = getattr(obj, "im_func", obj) - obj = getattr(obj, "func_code", obj) - obj = getattr(obj, "f_code", obj) - obj = getattr(obj, "__code__", obj) - if trycall and not hasattr(obj, "co_firstlineno"): - if hasattr(obj, "__call__") and not inspect.isclass(obj): - x = getrawcode(obj.__call__, trycall=False) - if hasattr(x, "co_firstlineno"): - return x - return obj - - -if PY35: # RecursionError introduced in 3.5 - - def is_recursion_error(excinfo): - return excinfo.errisinstance(RecursionError) # noqa - - -else: - - def is_recursion_error(excinfo): - if not excinfo.errisinstance(RuntimeError): - return False - try: - return "maximum recursion depth exceeded" in str(excinfo.value) - except UnicodeError: - return False - - -# relative paths that we use to filter traceback entries from appearing to the user; -# see filter_traceback -# note: if we need to add more paths than what we have now we should probably use a list -# for better maintenance - -_PLUGGY_DIR = py.path.local(pluggy.__file__.rstrip("oc")) -# pluggy is either a package or a single module depending on the version -if _PLUGGY_DIR.basename == "__init__.py": - _PLUGGY_DIR = _PLUGGY_DIR.dirpath() -_PYTEST_DIR = py.path.local(_pytest.__file__).dirpath() -_PY_DIR = py.path.local(py.__file__).dirpath() - - -def filter_traceback(entry): - """Return True if a TracebackEntry instance should be removed from tracebacks: - * dynamically generated code (no code to show up for it); - * internal traceback from pytest or its internal libraries, py and pluggy. - """ - # entry.path might sometimes return a str object when the entry - # points to dynamically generated code - # see https://bitbucket.org/pytest-dev/py/issues/71 - raw_filename = entry.frame.code.raw.co_filename - is_generated = "<" in raw_filename and ">" in raw_filename - if is_generated: - return False - # entry.path might point to a non-existing file, in which case it will - # also return a str object. see #1133 - p = py.path.local(entry.path) - return ( - not p.relto(_PLUGGY_DIR) and not p.relto(_PYTEST_DIR) and not p.relto(_PY_DIR) - ) diff --git a/venv/lib/python3.8/site-packages/_pytest/_code/source.py b/venv/lib/python3.8/site-packages/_pytest/_code/source.py deleted file mode 100644 index b74ecf88..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/_code/source.py +++ /dev/null @@ -1,328 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import ast -import inspect -import linecache -import sys -import textwrap -import tokenize -import warnings -from ast import PyCF_ONLY_AST as _AST_FLAG -from bisect import bisect_right - -import py -import six - - -class Source(object): - """ an immutable object holding a source code fragment, - possibly deindenting it. - """ - - _compilecounter = 0 - - def __init__(self, *parts, **kwargs): - self.lines = lines = [] - de = kwargs.get("deindent", True) - for part in parts: - if not part: - partlines = [] - elif isinstance(part, Source): - partlines = part.lines - elif isinstance(part, (tuple, list)): - partlines = [x.rstrip("\n") for x in part] - elif isinstance(part, six.string_types): - partlines = part.split("\n") - else: - partlines = getsource(part, deindent=de).lines - if de: - partlines = deindent(partlines) - lines.extend(partlines) - - def __eq__(self, other): - try: - return self.lines == other.lines - except AttributeError: - if isinstance(other, str): - return str(self) == other - return False - - __hash__ = None - - def __getitem__(self, key): - if isinstance(key, int): - return self.lines[key] - else: - if key.step not in (None, 1): - raise IndexError("cannot slice a Source with a step") - newsource = Source() - newsource.lines = self.lines[key.start : key.stop] - return newsource - - def __len__(self): - return len(self.lines) - - def strip(self): - """ return new source object with trailing - and leading blank lines removed. - """ - start, end = 0, len(self) - while start < end and not self.lines[start].strip(): - start += 1 - while end > start and not self.lines[end - 1].strip(): - end -= 1 - source = Source() - source.lines[:] = self.lines[start:end] - return source - - def putaround(self, before="", after="", indent=" " * 4): - """ return a copy of the source object with - 'before' and 'after' wrapped around it. - """ - before = Source(before) - after = Source(after) - newsource = Source() - lines = [(indent + line) for line in self.lines] - newsource.lines = before.lines + lines + after.lines - return newsource - - def indent(self, indent=" " * 4): - """ return a copy of the source object with - all lines indented by the given indent-string. - """ - newsource = Source() - newsource.lines = [(indent + line) for line in self.lines] - return newsource - - def getstatement(self, lineno): - """ return Source statement which contains the - given linenumber (counted from 0). - """ - start, end = self.getstatementrange(lineno) - return self[start:end] - - def getstatementrange(self, lineno): - """ return (start, end) tuple which spans the minimal - statement region which containing the given lineno. - """ - if not (0 <= lineno < len(self)): - raise IndexError("lineno out of range") - ast, start, end = getstatementrange_ast(lineno, self) - return start, end - - def deindent(self): - """return a new source object deindented.""" - newsource = Source() - newsource.lines[:] = deindent(self.lines) - return newsource - - def isparseable(self, deindent=True): - """ return True if source is parseable, heuristically - deindenting it by default. - """ - from parser import suite as syntax_checker - - if deindent: - source = str(self.deindent()) - else: - source = str(self) - try: - # compile(source+'\n', "x", "exec") - syntax_checker(source + "\n") - except KeyboardInterrupt: - raise - except Exception: - return False - else: - return True - - def __str__(self): - return "\n".join(self.lines) - - def compile( - self, filename=None, mode="exec", flag=0, dont_inherit=0, _genframe=None - ): - """ return compiled code object. if filename is None - invent an artificial filename which displays - the source/line position of the caller frame. - """ - if not filename or py.path.local(filename).check(file=0): - if _genframe is None: - _genframe = sys._getframe(1) # the caller - fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno - base = "<%d-codegen " % self._compilecounter - self.__class__._compilecounter += 1 - if not filename: - filename = base + "%s:%d>" % (fn, lineno) - else: - filename = base + "%r %s:%d>" % (filename, fn, lineno) - source = "\n".join(self.lines) + "\n" - try: - co = compile(source, filename, mode, flag) - except SyntaxError: - ex = sys.exc_info()[1] - # re-represent syntax errors from parsing python strings - msglines = self.lines[: ex.lineno] - if ex.offset: - msglines.append(" " * ex.offset + "^") - msglines.append("(code was compiled probably from here: %s)" % filename) - newex = SyntaxError("\n".join(msglines)) - newex.offset = ex.offset - newex.lineno = ex.lineno - newex.text = ex.text - raise newex - else: - if flag & _AST_FLAG: - return co - lines = [(x + "\n") for x in self.lines] - linecache.cache[filename] = (1, None, lines, filename) - return co - - -# -# public API shortcut functions -# - - -def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0): - """ compile the given source to a raw code object, - and maintain an internal cache which allows later - retrieval of the source code for the code object - and any recursively created code objects. - """ - if isinstance(source, ast.AST): - # XXX should Source support having AST? - return compile(source, filename, mode, flags, dont_inherit) - _genframe = sys._getframe(1) # the caller - s = Source(source) - co = s.compile(filename, mode, flags, _genframe=_genframe) - return co - - -def getfslineno(obj): - """ Return source location (path, lineno) for the given object. - If the source cannot be determined return ("", -1) - """ - from .code import Code - - try: - code = Code(obj) - except TypeError: - try: - fn = inspect.getsourcefile(obj) or inspect.getfile(obj) - except TypeError: - return "", -1 - - fspath = fn and py.path.local(fn) or None - lineno = -1 - if fspath: - try: - _, lineno = findsource(obj) - except IOError: - pass - else: - fspath = code.path - lineno = code.firstlineno - assert isinstance(lineno, int) - return fspath, lineno - - -# -# helper functions -# - - -def findsource(obj): - try: - sourcelines, lineno = inspect.findsource(obj) - except py.builtin._sysex: - raise - except: # noqa - return None, -1 - source = Source() - source.lines = [line.rstrip() for line in sourcelines] - return source, lineno - - -def getsource(obj, **kwargs): - from .code import getrawcode - - obj = getrawcode(obj) - try: - strsrc = inspect.getsource(obj) - except IndentationError: - strsrc = '"Buggy python version consider upgrading, cannot get source"' - assert isinstance(strsrc, str) - return Source(strsrc, **kwargs) - - -def deindent(lines): - return textwrap.dedent("\n".join(lines)).splitlines() - - -def get_statement_startend2(lineno, node): - import ast - - # flatten all statements and except handlers into one lineno-list - # AST's line numbers start indexing at 1 - values = [] - for x in ast.walk(node): - if isinstance(x, (ast.stmt, ast.ExceptHandler)): - values.append(x.lineno - 1) - for name in ("finalbody", "orelse"): - val = getattr(x, name, None) - if val: - # treat the finally/orelse part as its own statement - values.append(val[0].lineno - 1 - 1) - values.sort() - insert_index = bisect_right(values, lineno) - start = values[insert_index - 1] - if insert_index >= len(values): - end = None - else: - end = values[insert_index] - return start, end - - -def getstatementrange_ast(lineno, source, assertion=False, astnode=None): - if astnode is None: - content = str(source) - # See #4260: - # don't produce duplicate warnings when compiling source to find ast - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - astnode = compile(content, "source", "exec", _AST_FLAG) - - start, end = get_statement_startend2(lineno, astnode) - # we need to correct the end: - # - ast-parsing strips comments - # - there might be empty lines - # - we might have lesser indented code blocks at the end - if end is None: - end = len(source.lines) - - if end > start + 1: - # make sure we don't span differently indented code blocks - # by using the BlockFinder helper used which inspect.getsource() uses itself - block_finder = inspect.BlockFinder() - # if we start with an indented line, put blockfinder to "started" mode - block_finder.started = source.lines[start][0].isspace() - it = ((x + "\n") for x in source.lines[start:end]) - try: - for tok in tokenize.generate_tokens(lambda: next(it)): - block_finder.tokeneater(*tok) - except (inspect.EndOfBlock, IndentationError): - end = block_finder.last + start - except Exception: - pass - - # the end might still point to a comment or empty line, correct it - while end: - line = source.lines[end - 1].lstrip() - if line.startswith("#") or not line: - end -= 1 - else: - break - return astnode, start, end diff --git a/venv/lib/python3.8/site-packages/_pytest/_version.py b/venv/lib/python3.8/site-packages/_pytest/_version.py deleted file mode 100644 index 321711f3..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/_version.py +++ /dev/null @@ -1,4 +0,0 @@ -# coding: utf-8 -# file generated by setuptools_scm -# don't change, don't track in version control -version = '3.10.1' diff --git a/venv/lib/python3.8/site-packages/_pytest/assertion/__init__.py b/venv/lib/python3.8/site-packages/_pytest/assertion/__init__.py deleted file mode 100644 index b5c846c2..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/assertion/__init__.py +++ /dev/null @@ -1,155 +0,0 @@ -""" -support for presenting detailed information in failing assertions. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys - -import six - -from _pytest.assertion import rewrite -from _pytest.assertion import truncate -from _pytest.assertion import util - - -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group.addoption( - "--assert", - action="store", - dest="assertmode", - choices=("rewrite", "plain"), - default="rewrite", - metavar="MODE", - help="""Control assertion debugging tools. 'plain' - performs no assertion debugging. 'rewrite' - (the default) rewrites assert statements in - test modules on import to provide assert - expression information.""", - ) - - -def register_assert_rewrite(*names): - """Register one or more module names to be rewritten on import. - - This function will make sure that this module or all modules inside - the package will get their assert statements rewritten. - Thus you should make sure to call this before the module is - actually imported, usually in your __init__.py if you are a plugin - using a package. - - :raise TypeError: if the given module names are not strings. - """ - for name in names: - if not isinstance(name, str): - msg = "expected module names as *args, got {0} instead" - raise TypeError(msg.format(repr(names))) - for hook in sys.meta_path: - if isinstance(hook, rewrite.AssertionRewritingHook): - importhook = hook - break - else: - importhook = DummyRewriteHook() - importhook.mark_rewrite(*names) - - -class DummyRewriteHook(object): - """A no-op import hook for when rewriting is disabled.""" - - def mark_rewrite(self, *names): - pass - - -class AssertionState(object): - """State for the assertion plugin.""" - - def __init__(self, config, mode): - self.mode = mode - self.trace = config.trace.root.get("assertion") - self.hook = None - - -def install_importhook(config): - """Try to install the rewrite hook, raise SystemError if it fails.""" - # Jython has an AST bug that make the assertion rewriting hook malfunction. - if sys.platform.startswith("java"): - raise SystemError("rewrite not supported") - - config._assertstate = AssertionState(config, "rewrite") - config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config) - sys.meta_path.insert(0, hook) - config._assertstate.trace("installed rewrite import hook") - - def undo(): - hook = config._assertstate.hook - if hook is not None and hook in sys.meta_path: - sys.meta_path.remove(hook) - - config.add_cleanup(undo) - return hook - - -def pytest_collection(session): - # this hook is only called when test modules are collected - # so for example not in the master process of pytest-xdist - # (which does not collect test modules) - assertstate = getattr(session.config, "_assertstate", None) - if assertstate: - if assertstate.hook is not None: - assertstate.hook.set_session(session) - - -def pytest_runtest_setup(item): - """Setup the pytest_assertrepr_compare hook - - The newinterpret and rewrite modules will use util._reprcompare if - it exists to use custom reporting via the - pytest_assertrepr_compare hook. This sets up this custom - comparison for the test. - """ - - def callbinrepr(op, left, right): - """Call the pytest_assertrepr_compare hook and prepare the result - - This uses the first result from the hook and then ensures the - following: - * Overly verbose explanations are truncated unless configured otherwise - (eg. if running in verbose mode). - * Embedded newlines are escaped to help util.format_explanation() - later. - * If the rewrite mode is used embedded %-characters are replaced - to protect later % formatting. - - The result can be formatted by util.format_explanation() for - pretty printing. - """ - hook_result = item.ihook.pytest_assertrepr_compare( - config=item.config, op=op, left=left, right=right - ) - for new_expl in hook_result: - if new_expl: - new_expl = truncate.truncate_if_required(new_expl, item) - new_expl = [line.replace("\n", "\\n") for line in new_expl] - res = six.text_type("\n~").join(new_expl) - if item.config.getvalue("assertmode") == "rewrite": - res = res.replace("%", "%%") - return res - - util._reprcompare = callbinrepr - - -def pytest_runtest_teardown(item): - util._reprcompare = None - - -def pytest_sessionfinish(session): - assertstate = getattr(session.config, "_assertstate", None) - if assertstate: - if assertstate.hook is not None: - assertstate.hook.set_session(None) - - -# Expose this plugin's implementation for the pytest_assertrepr_compare hook -pytest_assertrepr_compare = util.assertrepr_compare diff --git a/venv/lib/python3.8/site-packages/_pytest/assertion/rewrite.py b/venv/lib/python3.8/site-packages/_pytest/assertion/rewrite.py deleted file mode 100644 index ecb24ff7..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/assertion/rewrite.py +++ /dev/null @@ -1,1035 +0,0 @@ -"""Rewrite assertion AST to produce nice error messages""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import ast -import errno -import imp -import itertools -import marshal -import os -import re -import string -import struct -import sys -import types - -import atomicwrites -import py -import six - -from _pytest.assertion import util -from _pytest.compat import spec_from_file_location -from _pytest.pathlib import fnmatch_ex -from _pytest.pathlib import PurePath - -# pytest caches rewritten pycs in __pycache__. -if hasattr(imp, "get_tag"): - PYTEST_TAG = imp.get_tag() + "-PYTEST" -else: - if hasattr(sys, "pypy_version_info"): - impl = "pypy" - elif sys.platform == "java": - impl = "jython" - else: - impl = "cpython" - ver = sys.version_info - PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1]) - del ver, impl - -PYC_EXT = ".py" + (__debug__ and "c" or "o") -PYC_TAIL = "." + PYTEST_TAG + PYC_EXT - -ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 - -if sys.version_info >= (3, 5): - ast_Call = ast.Call -else: - - def ast_Call(a, b, c): - return ast.Call(a, b, c, None, None) - - -class AssertionRewritingHook(object): - """PEP302 Import hook which rewrites asserts.""" - - def __init__(self, config): - self.config = config - self.fnpats = config.getini("python_files") - self.session = None - self.modules = {} - self._rewritten_names = set() - self._register_with_pkg_resources() - self._must_rewrite = set() - # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, - # which might result in infinite recursion (#3506) - self._writing_pyc = False - self._basenames_to_check_rewrite = {"conftest"} - self._marked_for_rewrite_cache = {} - self._session_paths_checked = False - - def set_session(self, session): - self.session = session - self._session_paths_checked = False - - def _imp_find_module(self, name, path=None): - """Indirection so we can mock calls to find_module originated from the hook during testing""" - return imp.find_module(name, path) - - def find_module(self, name, path=None): - if self._writing_pyc: - return None - state = self.config._assertstate - if self._early_rewrite_bailout(name, state): - return None - state.trace("find_module called for: %s" % name) - names = name.rsplit(".", 1) - lastname = names[-1] - pth = None - if path is not None: - # Starting with Python 3.3, path is a _NamespacePath(), which - # causes problems if not converted to list. - path = list(path) - if len(path) == 1: - pth = path[0] - if pth is None: - try: - fd, fn, desc = self._imp_find_module(lastname, path) - except ImportError: - return None - if fd is not None: - fd.close() - tp = desc[2] - if tp == imp.PY_COMPILED: - if hasattr(imp, "source_from_cache"): - try: - fn = imp.source_from_cache(fn) - except ValueError: - # Python 3 doesn't like orphaned but still-importable - # .pyc files. - fn = fn[:-1] - else: - fn = fn[:-1] - elif tp != imp.PY_SOURCE: - # Don't know what this is. - return None - else: - fn = os.path.join(pth, name.rpartition(".")[2] + ".py") - - fn_pypath = py.path.local(fn) - if not self._should_rewrite(name, fn_pypath, state): - return None - - self._rewritten_names.add(name) - - # The requested module looks like a test file, so rewrite it. This is - # the most magical part of the process: load the source, rewrite the - # asserts, and load the rewritten source. We also cache the rewritten - # module code in a special pyc. We must be aware of the possibility of - # concurrent pytest processes rewriting and loading pycs. To avoid - # tricky race conditions, we maintain the following invariant: The - # cached pyc is always a complete, valid pyc. Operations on it must be - # atomic. POSIX's atomic rename comes in handy. - write = not sys.dont_write_bytecode - cache_dir = os.path.join(fn_pypath.dirname, "__pycache__") - if write: - try: - os.mkdir(cache_dir) - except OSError: - e = sys.exc_info()[1].errno - if e == errno.EEXIST: - # Either the __pycache__ directory already exists (the - # common case) or it's blocked by a non-dir node. In the - # latter case, we'll ignore it in _write_pyc. - pass - elif e in [errno.ENOENT, errno.ENOTDIR]: - # One of the path components was not a directory, likely - # because we're in a zip file. - write = False - elif e in [errno.EACCES, errno.EROFS, errno.EPERM]: - state.trace("read only directory: %r" % fn_pypath.dirname) - write = False - else: - raise - cache_name = fn_pypath.basename[:-3] + PYC_TAIL - pyc = os.path.join(cache_dir, cache_name) - # Notice that even if we're in a read-only directory, I'm going - # to check for a cached pyc. This may not be optimal... - co = _read_pyc(fn_pypath, pyc, state.trace) - if co is None: - state.trace("rewriting %r" % (fn,)) - source_stat, co = _rewrite_test(self.config, fn_pypath) - if co is None: - # Probably a SyntaxError in the test. - return None - if write: - self._writing_pyc = True - try: - _write_pyc(state, co, source_stat, pyc) - finally: - self._writing_pyc = False - else: - state.trace("found cached rewritten pyc for %r" % (fn,)) - self.modules[name] = co, pyc - return self - - def _early_rewrite_bailout(self, name, state): - """ - This is a fast way to get out of rewriting modules. Profiling has - shown that the call to imp.find_module (inside of the find_module - from this class) is a major slowdown, so, this method tries to - filter what we're sure won't be rewritten before getting to it. - """ - if self.session is not None and not self._session_paths_checked: - self._session_paths_checked = True - for path in self.session._initialpaths: - # Make something as c:/projects/my_project/path.py -> - # ['c:', 'projects', 'my_project', 'path.py'] - parts = str(path).split(os.path.sep) - # add 'path' to basenames to be checked. - self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) - - # Note: conftest already by default in _basenames_to_check_rewrite. - parts = name.split(".") - if parts[-1] in self._basenames_to_check_rewrite: - return False - - # For matching the name it must be as if it was a filename. - path = PurePath(os.path.sep.join(parts) + ".py") - - for pat in self.fnpats: - # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based - # on the name alone because we need to match against the full path - if os.path.dirname(pat): - return False - if fnmatch_ex(pat, path): - return False - - if self._is_marked_for_rewrite(name, state): - return False - - state.trace("early skip of rewriting module: %s" % (name,)) - return True - - def _should_rewrite(self, name, fn_pypath, state): - # always rewrite conftest files - fn = str(fn_pypath) - if fn_pypath.basename == "conftest.py": - state.trace("rewriting conftest file: %r" % (fn,)) - return True - - if self.session is not None: - if self.session.isinitpath(fn): - state.trace("matched test file (was specified on cmdline): %r" % (fn,)) - return True - - # modules not passed explicitly on the command line are only - # rewritten if they match the naming convention for test files - for pat in self.fnpats: - if fn_pypath.fnmatch(pat): - state.trace("matched test file %r" % (fn,)) - return True - - return self._is_marked_for_rewrite(name, state) - - def _is_marked_for_rewrite(self, name, state): - try: - return self._marked_for_rewrite_cache[name] - except KeyError: - for marked in self._must_rewrite: - if name == marked or name.startswith(marked + "."): - state.trace("matched marked file %r (from %r)" % (name, marked)) - self._marked_for_rewrite_cache[name] = True - return True - - self._marked_for_rewrite_cache[name] = False - return False - - def mark_rewrite(self, *names): - """Mark import names as needing to be rewritten. - - The named module or package as well as any nested modules will - be rewritten on import. - """ - already_imported = ( - set(names).intersection(sys.modules).difference(self._rewritten_names) - ) - for name in already_imported: - if not AssertionRewriter.is_rewrite_disabled( - sys.modules[name].__doc__ or "" - ): - self._warn_already_imported(name) - self._must_rewrite.update(names) - self._marked_for_rewrite_cache.clear() - - def _warn_already_imported(self, name): - from _pytest.warning_types import PytestWarning - from _pytest.warnings import _issue_config_warning - - _issue_config_warning( - PytestWarning("Module already imported so cannot be rewritten: %s" % name), - self.config, - ) - - def load_module(self, name): - co, pyc = self.modules.pop(name) - if name in sys.modules: - # If there is an existing module object named 'fullname' in - # sys.modules, the loader must use that existing module. (Otherwise, - # the reload() builtin will not work correctly.) - mod = sys.modules[name] - else: - # I wish I could just call imp.load_compiled here, but __file__ has to - # be set properly. In Python 3.2+, this all would be handled correctly - # by load_compiled. - mod = sys.modules[name] = imp.new_module(name) - try: - mod.__file__ = co.co_filename - # Normally, this attribute is 3.2+. - mod.__cached__ = pyc - mod.__loader__ = self - # Normally, this attribute is 3.4+ - mod.__spec__ = spec_from_file_location(name, co.co_filename, loader=self) - six.exec_(co, mod.__dict__) - except: # noqa - if name in sys.modules: - del sys.modules[name] - raise - return sys.modules[name] - - def is_package(self, name): - try: - fd, fn, desc = self._imp_find_module(name) - except ImportError: - return False - if fd is not None: - fd.close() - tp = desc[2] - return tp == imp.PKG_DIRECTORY - - @classmethod - def _register_with_pkg_resources(cls): - """ - Ensure package resources can be loaded from this loader. May be called - multiple times, as the operation is idempotent. - """ - try: - import pkg_resources - - # access an attribute in case a deferred importer is present - pkg_resources.__name__ - except ImportError: - return - - # Since pytest tests are always located in the file system, the - # DefaultProvider is appropriate. - pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider) - - def get_data(self, pathname): - """Optional PEP302 get_data API. - """ - with open(pathname, "rb") as f: - return f.read() - - -def _write_pyc(state, co, source_stat, pyc): - # Technically, we don't have to have the same pyc format as - # (C)Python, since these "pycs" should never be seen by builtin - # import. However, there's little reason deviate, and I hope - # sometime to be able to use imp.load_compiled to load them. (See - # the comment in load_module above.) - try: - with atomicwrites.atomic_write(pyc, mode="wb", overwrite=True) as fp: - fp.write(imp.get_magic()) - mtime = int(source_stat.mtime) - size = source_stat.size & 0xFFFFFFFF - fp.write(struct.pack(">", - ast.Add: "+", - ast.Sub: "-", - ast.Mult: "*", - ast.Div: "/", - ast.FloorDiv: "//", - ast.Mod: "%%", # escaped for string formatting - ast.Eq: "==", - ast.NotEq: "!=", - ast.Lt: "<", - ast.LtE: "<=", - ast.Gt: ">", - ast.GtE: ">=", - ast.Pow: "**", - ast.Is: "is", - ast.IsNot: "is not", - ast.In: "in", - ast.NotIn: "not in", -} -# Python 3.5+ compatibility -try: - binop_map[ast.MatMult] = "@" -except AttributeError: - pass - -# Python 3.4+ compatibility -if hasattr(ast, "NameConstant"): - _NameConstant = ast.NameConstant -else: - - def _NameConstant(c): - return ast.Name(str(c), ast.Load()) - - -def set_location(node, lineno, col_offset): - """Set node location information recursively.""" - - def _fix(node, lineno, col_offset): - if "lineno" in node._attributes: - node.lineno = lineno - if "col_offset" in node._attributes: - node.col_offset = col_offset - for child in ast.iter_child_nodes(node): - _fix(child, lineno, col_offset) - - _fix(node, lineno, col_offset) - return node - - -class AssertionRewriter(ast.NodeVisitor): - """Assertion rewriting implementation. - - The main entrypoint is to call .run() with an ast.Module instance, - this will then find all the assert statements and rewrite them to - provide intermediate values and a detailed assertion error. See - http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html - for an overview of how this works. - - The entry point here is .run() which will iterate over all the - statements in an ast.Module and for each ast.Assert statement it - finds call .visit() with it. Then .visit_Assert() takes over and - is responsible for creating new ast statements to replace the - original assert statement: it rewrites the test of an assertion - to provide intermediate values and replace it with an if statement - which raises an assertion error with a detailed explanation in - case the expression is false. - - For this .visit_Assert() uses the visitor pattern to visit all the - AST nodes of the ast.Assert.test field, each visit call returning - an AST node and the corresponding explanation string. During this - state is kept in several instance attributes: - - :statements: All the AST statements which will replace the assert - statement. - - :variables: This is populated by .variable() with each variable - used by the statements so that they can all be set to None at - the end of the statements. - - :variable_counter: Counter to create new unique variables needed - by statements. Variables are created using .variable() and - have the form of "@py_assert0". - - :on_failure: The AST statements which will be executed if the - assertion test fails. This is the code which will construct - the failure message and raises the AssertionError. - - :explanation_specifiers: A dict filled by .explanation_param() - with %-formatting placeholders and their corresponding - expressions to use in the building of an assertion message. - This is used by .pop_format_context() to build a message. - - :stack: A stack of the explanation_specifiers dicts maintained by - .push_format_context() and .pop_format_context() which allows - to build another %-formatted string while already building one. - - This state is reset on every new assert statement visited and used - by the other visitors. - - """ - - def __init__(self, module_path, config): - super(AssertionRewriter, self).__init__() - self.module_path = module_path - self.config = config - - def run(self, mod): - """Find all assert statements in *mod* and rewrite them.""" - if not mod.body: - # Nothing to do. - return - # Insert some special imports at the top of the module but after any - # docstrings and __future__ imports. - aliases = [ - ast.alias(py.builtin.builtins.__name__, "@py_builtins"), - ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), - ] - doc = getattr(mod, "docstring", None) - expect_docstring = doc is None - if doc is not None and self.is_rewrite_disabled(doc): - return - pos = 0 - lineno = 1 - for item in mod.body: - if ( - expect_docstring - and isinstance(item, ast.Expr) - and isinstance(item.value, ast.Str) - ): - doc = item.value.s - if self.is_rewrite_disabled(doc): - return - expect_docstring = False - elif ( - not isinstance(item, ast.ImportFrom) - or item.level > 0 - or item.module != "__future__" - ): - lineno = item.lineno - break - pos += 1 - else: - lineno = item.lineno - imports = [ - ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases - ] - mod.body[pos:pos] = imports - # Collect asserts. - nodes = [mod] - while nodes: - node = nodes.pop() - for name, field in ast.iter_fields(node): - if isinstance(field, list): - new = [] - for i, child in enumerate(field): - if isinstance(child, ast.Assert): - # Transform assert. - new.extend(self.visit(child)) - else: - new.append(child) - if isinstance(child, ast.AST): - nodes.append(child) - setattr(node, name, new) - elif ( - isinstance(field, ast.AST) - # Don't recurse into expressions as they can't contain - # asserts. - and not isinstance(field, ast.expr) - ): - nodes.append(field) - - @staticmethod - def is_rewrite_disabled(docstring): - return "PYTEST_DONT_REWRITE" in docstring - - def variable(self): - """Get a new variable.""" - # Use a character invalid in python identifiers to avoid clashing. - name = "@py_assert" + str(next(self.variable_counter)) - self.variables.append(name) - return name - - def assign(self, expr): - """Give *expr* a name.""" - name = self.variable() - self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) - return ast.Name(name, ast.Load()) - - def display(self, expr): - """Call py.io.saferepr on the expression.""" - return self.helper("saferepr", expr) - - def helper(self, name, *args): - """Call a helper in this module.""" - py_name = ast.Name("@pytest_ar", ast.Load()) - attr = ast.Attribute(py_name, "_" + name, ast.Load()) - return ast_Call(attr, list(args), []) - - def builtin(self, name): - """Return the builtin called *name*.""" - builtin_name = ast.Name("@py_builtins", ast.Load()) - return ast.Attribute(builtin_name, name, ast.Load()) - - def explanation_param(self, expr): - """Return a new named %-formatting placeholder for expr. - - This creates a %-formatting placeholder for expr in the - current formatting context, e.g. ``%(py0)s``. The placeholder - and expr are placed in the current format context so that it - can be used on the next call to .pop_format_context(). - - """ - specifier = "py" + str(next(self.variable_counter)) - self.explanation_specifiers[specifier] = expr - return "%(" + specifier + ")s" - - def push_format_context(self): - """Create a new formatting context. - - The format context is used for when an explanation wants to - have a variable value formatted in the assertion message. In - this case the value required can be added using - .explanation_param(). Finally .pop_format_context() is used - to format a string of %-formatted values as added by - .explanation_param(). - - """ - self.explanation_specifiers = {} - self.stack.append(self.explanation_specifiers) - - def pop_format_context(self, expl_expr): - """Format the %-formatted string with current format context. - - The expl_expr should be an ast.Str instance constructed from - the %-placeholders created by .explanation_param(). This will - add the required code to format said string to .on_failure and - return the ast.Name instance of the formatted string. - - """ - current = self.stack.pop() - if self.stack: - self.explanation_specifiers = self.stack[-1] - keys = [ast.Str(key) for key in current.keys()] - format_dict = ast.Dict(keys, list(current.values())) - form = ast.BinOp(expl_expr, ast.Mod(), format_dict) - name = "@py_format" + str(next(self.variable_counter)) - self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form)) - return ast.Name(name, ast.Load()) - - def generic_visit(self, node): - """Handle expressions we don't have custom code for.""" - assert isinstance(node, ast.expr) - res = self.assign(node) - return res, self.explanation_param(self.display(res)) - - def visit_Assert(self, assert_): - """Return the AST statements to replace the ast.Assert instance. - - This rewrites the test of an assertion to provide - intermediate values and replace it with an if statement which - raises an assertion error with a detailed explanation in case - the expression is false. - - """ - if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: - from _pytest.warning_types import PytestWarning - import warnings - - warnings.warn_explicit( - PytestWarning("assertion is always true, perhaps remove parentheses?"), - category=None, - filename=str(self.module_path), - lineno=assert_.lineno, - ) - - self.statements = [] - self.variables = [] - self.variable_counter = itertools.count() - self.stack = [] - self.on_failure = [] - self.push_format_context() - # Rewrite assert into a bunch of statements. - top_condition, explanation = self.visit(assert_.test) - # Create failure message. - body = self.on_failure - negation = ast.UnaryOp(ast.Not(), top_condition) - self.statements.append(ast.If(negation, body, [])) - if assert_.msg: - assertmsg = self.helper("format_assertmsg", assert_.msg) - explanation = "\n>assert " + explanation - else: - assertmsg = ast.Str("") - explanation = "assert " + explanation - template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation)) - msg = self.pop_format_context(template) - fmt = self.helper("format_explanation", msg) - err_name = ast.Name("AssertionError", ast.Load()) - exc = ast_Call(err_name, [fmt], []) - if sys.version_info[0] >= 3: - raise_ = ast.Raise(exc, None) - else: - raise_ = ast.Raise(exc, None, None) - body.append(raise_) - # Clear temporary variables by setting them to None. - if self.variables: - variables = [ast.Name(name, ast.Store()) for name in self.variables] - clear = ast.Assign(variables, _NameConstant(None)) - self.statements.append(clear) - # Fix line numbers. - for stmt in self.statements: - set_location(stmt, assert_.lineno, assert_.col_offset) - return self.statements - - def visit_Name(self, name): - # Display the repr of the name if it's a local variable or - # _should_repr_global_name() thinks it's acceptable. - locs = ast_Call(self.builtin("locals"), [], []) - inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs]) - dorepr = self.helper("should_repr_global_name", name) - test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) - expr = ast.IfExp(test, self.display(name), ast.Str(name.id)) - return name, self.explanation_param(expr) - - def visit_BoolOp(self, boolop): - res_var = self.variable() - expl_list = self.assign(ast.List([], ast.Load())) - app = ast.Attribute(expl_list, "append", ast.Load()) - is_or = int(isinstance(boolop.op, ast.Or)) - body = save = self.statements - fail_save = self.on_failure - levels = len(boolop.values) - 1 - self.push_format_context() - # Process each operand, short-circuting if needed. - for i, v in enumerate(boolop.values): - if i: - fail_inner = [] - # cond is set in a prior loop iteration below - self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa - self.on_failure = fail_inner - self.push_format_context() - res, expl = self.visit(v) - body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) - expl_format = self.pop_format_context(ast.Str(expl)) - call = ast_Call(app, [expl_format], []) - self.on_failure.append(ast.Expr(call)) - if i < levels: - cond = res - if is_or: - cond = ast.UnaryOp(ast.Not(), cond) - inner = [] - self.statements.append(ast.If(cond, inner, [])) - self.statements = body = inner - self.statements = save - self.on_failure = fail_save - expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or)) - expl = self.pop_format_context(expl_template) - return ast.Name(res_var, ast.Load()), self.explanation_param(expl) - - def visit_UnaryOp(self, unary): - pattern = unary_map[unary.op.__class__] - operand_res, operand_expl = self.visit(unary.operand) - res = self.assign(ast.UnaryOp(unary.op, operand_res)) - return res, pattern % (operand_expl,) - - def visit_BinOp(self, binop): - symbol = binop_map[binop.op.__class__] - left_expr, left_expl = self.visit(binop.left) - right_expr, right_expl = self.visit(binop.right) - explanation = "(%s %s %s)" % (left_expl, symbol, right_expl) - res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) - return res, explanation - - def visit_Call_35(self, call): - """ - visit `ast.Call` nodes on Python3.5 and after - """ - new_func, func_expl = self.visit(call.func) - arg_expls = [] - new_args = [] - new_kwargs = [] - for arg in call.args: - res, expl = self.visit(arg) - arg_expls.append(expl) - new_args.append(res) - for keyword in call.keywords: - res, expl = self.visit(keyword.value) - new_kwargs.append(ast.keyword(keyword.arg, res)) - if keyword.arg: - arg_expls.append(keyword.arg + "=" + expl) - else: # **args have `arg` keywords with an .arg of None - arg_expls.append("**" + expl) - - expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs) - res = self.assign(new_call) - res_expl = self.explanation_param(self.display(res)) - outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) - return res, outer_expl - - def visit_Starred(self, starred): - # From Python 3.5, a Starred node can appear in a function call - res, expl = self.visit(starred.value) - return starred, "*" + expl - - def visit_Call_legacy(self, call): - """ - visit `ast.Call nodes on 3.4 and below` - """ - new_func, func_expl = self.visit(call.func) - arg_expls = [] - new_args = [] - new_kwargs = [] - new_star = new_kwarg = None - for arg in call.args: - res, expl = self.visit(arg) - new_args.append(res) - arg_expls.append(expl) - for keyword in call.keywords: - res, expl = self.visit(keyword.value) - new_kwargs.append(ast.keyword(keyword.arg, res)) - arg_expls.append(keyword.arg + "=" + expl) - if call.starargs: - new_star, expl = self.visit(call.starargs) - arg_expls.append("*" + expl) - if call.kwargs: - new_kwarg, expl = self.visit(call.kwargs) - arg_expls.append("**" + expl) - expl = "%s(%s)" % (func_expl, ", ".join(arg_expls)) - new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg) - res = self.assign(new_call) - res_expl = self.explanation_param(self.display(res)) - outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl) - return res, outer_expl - - # ast.Call signature changed on 3.5, - # conditionally change which methods is named - # visit_Call depending on Python version - if sys.version_info >= (3, 5): - visit_Call = visit_Call_35 - else: - visit_Call = visit_Call_legacy - - def visit_Attribute(self, attr): - if not isinstance(attr.ctx, ast.Load): - return self.generic_visit(attr) - value, value_expl = self.visit(attr.value) - res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) - res_expl = self.explanation_param(self.display(res)) - pat = "%s\n{%s = %s.%s\n}" - expl = pat % (res_expl, res_expl, value_expl, attr.attr) - return res, expl - - def visit_Compare(self, comp): - self.push_format_context() - left_res, left_expl = self.visit(comp.left) - if isinstance(comp.left, (ast.Compare, ast.BoolOp)): - left_expl = "({})".format(left_expl) - res_variables = [self.variable() for i in range(len(comp.ops))] - load_names = [ast.Name(v, ast.Load()) for v in res_variables] - store_names = [ast.Name(v, ast.Store()) for v in res_variables] - it = zip(range(len(comp.ops)), comp.ops, comp.comparators) - expls = [] - syms = [] - results = [left_res] - for i, op, next_operand in it: - next_res, next_expl = self.visit(next_operand) - if isinstance(next_operand, (ast.Compare, ast.BoolOp)): - next_expl = "({})".format(next_expl) - results.append(next_res) - sym = binop_map[op.__class__] - syms.append(ast.Str(sym)) - expl = "%s %s %s" % (left_expl, sym, next_expl) - expls.append(ast.Str(expl)) - res_expr = ast.Compare(left_res, [op], [next_res]) - self.statements.append(ast.Assign([store_names[i]], res_expr)) - left_res, left_expl = next_res, next_expl - # Use pytest.assertion.util._reprcompare if that's available. - expl_call = self.helper( - "call_reprcompare", - ast.Tuple(syms, ast.Load()), - ast.Tuple(load_names, ast.Load()), - ast.Tuple(expls, ast.Load()), - ast.Tuple(results, ast.Load()), - ) - if len(comp.ops) > 1: - res = ast.BoolOp(ast.And(), load_names) - else: - res = load_names[0] - return res, self.explanation_param(self.pop_format_context(expl_call)) diff --git a/venv/lib/python3.8/site-packages/_pytest/assertion/truncate.py b/venv/lib/python3.8/site-packages/_pytest/assertion/truncate.py deleted file mode 100644 index d19c8b61..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/assertion/truncate.py +++ /dev/null @@ -1,102 +0,0 @@ -""" -Utilities for truncating assertion output. - -Current default behaviour is to truncate assertion explanations at -~8 terminal lines, unless running in "-vv" mode or running on CI. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import six - - -DEFAULT_MAX_LINES = 8 -DEFAULT_MAX_CHARS = 8 * 80 -USAGE_MSG = "use '-vv' to show" - - -def truncate_if_required(explanation, item, max_length=None): - """ - Truncate this assertion explanation if the given test item is eligible. - """ - if _should_truncate_item(item): - return _truncate_explanation(explanation) - return explanation - - -def _should_truncate_item(item): - """ - Whether or not this test item is eligible for truncation. - """ - verbose = item.config.option.verbose - return verbose < 2 and not _running_on_ci() - - -def _running_on_ci(): - """Check if we're currently running on a CI system.""" - env_vars = ["CI", "BUILD_NUMBER"] - return any(var in os.environ for var in env_vars) - - -def _truncate_explanation(input_lines, max_lines=None, max_chars=None): - """ - Truncate given list of strings that makes up the assertion explanation. - - Truncates to either 8 lines, or 640 characters - whichever the input reaches - first. The remaining lines will be replaced by a usage message. - """ - - if max_lines is None: - max_lines = DEFAULT_MAX_LINES - if max_chars is None: - max_chars = DEFAULT_MAX_CHARS - - # Check if truncation required - input_char_count = len("".join(input_lines)) - if len(input_lines) <= max_lines and input_char_count <= max_chars: - return input_lines - - # Truncate first to max_lines, and then truncate to max_chars if max_chars - # is exceeded. - truncated_explanation = input_lines[:max_lines] - truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars) - - # Add ellipsis to final line - truncated_explanation[-1] = truncated_explanation[-1] + "..." - - # Append useful message to explanation - truncated_line_count = len(input_lines) - len(truncated_explanation) - truncated_line_count += 1 # Account for the part-truncated final line - msg = "...Full output truncated" - if truncated_line_count == 1: - msg += " ({} line hidden)".format(truncated_line_count) - else: - msg += " ({} lines hidden)".format(truncated_line_count) - msg += ", {}".format(USAGE_MSG) - truncated_explanation.extend([six.text_type(""), six.text_type(msg)]) - return truncated_explanation - - -def _truncate_by_char_count(input_lines, max_chars): - # Check if truncation required - if len("".join(input_lines)) <= max_chars: - return input_lines - - # Find point at which input length exceeds total allowed length - iterated_char_count = 0 - for iterated_index, input_line in enumerate(input_lines): - if iterated_char_count + len(input_line) > max_chars: - break - iterated_char_count += len(input_line) - - # Create truncated explanation with modified final line - truncated_result = input_lines[:iterated_index] - final_line = input_lines[iterated_index] - if final_line: - final_line_truncate_point = max_chars - iterated_char_count - final_line = final_line[:final_line_truncate_point] - truncated_result.append(final_line) - return truncated_result diff --git a/venv/lib/python3.8/site-packages/_pytest/assertion/util.py b/venv/lib/python3.8/site-packages/_pytest/assertion/util.py deleted file mode 100644 index 451e4549..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/assertion/util.py +++ /dev/null @@ -1,334 +0,0 @@ -"""Utilities for assertion debugging""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import pprint - -import py -import six - -import _pytest._code -from ..compat import Sequence - -# The _reprcompare attribute on the util module is used by the new assertion -# interpretation code and assertion rewriter to detect this plugin was -# loaded and in turn call the hooks defined here as part of the -# DebugInterpreter. -_reprcompare = None - - -# the re-encoding is needed for python2 repr -# with non-ascii characters (see issue 877 and 1379) -def ecu(s): - if isinstance(s, bytes): - return s.decode("UTF-8", "replace") - else: - return s - - -def format_explanation(explanation): - """This formats an explanation - - Normally all embedded newlines are escaped, however there are - three exceptions: \n{, \n} and \n~. The first two are intended - cover nested explanations, see function and attribute explanations - for examples (.visit_Call(), visit_Attribute()). The last one is - for when one explanation needs to span multiple lines, e.g. when - displaying diffs. - """ - explanation = ecu(explanation) - lines = _split_explanation(explanation) - result = _format_lines(lines) - return u"\n".join(result) - - -def _split_explanation(explanation): - """Return a list of individual lines in the explanation - - This will return a list of lines split on '\n{', '\n}' and '\n~'. - Any other newlines will be escaped and appear in the line as the - literal '\n' characters. - """ - raw_lines = (explanation or u"").split("\n") - lines = [raw_lines[0]] - for values in raw_lines[1:]: - if values and values[0] in ["{", "}", "~", ">"]: - lines.append(values) - else: - lines[-1] += "\\n" + values - return lines - - -def _format_lines(lines): - """Format the individual lines - - This will replace the '{', '}' and '~' characters of our mini - formatting language with the proper 'where ...', 'and ...' and ' + - ...' text, taking care of indentation along the way. - - Return a list of formatted lines. - """ - result = lines[:1] - stack = [0] - stackcnt = [0] - for line in lines[1:]: - if line.startswith("{"): - if stackcnt[-1]: - s = u"and " - else: - s = u"where " - stack.append(len(result)) - stackcnt[-1] += 1 - stackcnt.append(0) - result.append(u" +" + u" " * (len(stack) - 1) + s + line[1:]) - elif line.startswith("}"): - stack.pop() - stackcnt.pop() - result[stack[-1]] += line[1:] - else: - assert line[0] in ["~", ">"] - stack[-1] += 1 - indent = len(stack) if line.startswith("~") else len(stack) - 1 - result.append(u" " * indent + line[1:]) - assert len(stack) == 1 - return result - - -# Provide basestring in python3 -try: - basestring = basestring -except NameError: - basestring = str - - -def assertrepr_compare(config, op, left, right): - """Return specialised explanations for some operators/operands""" - width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op - left_repr = py.io.saferepr(left, maxsize=int(width // 2)) - right_repr = py.io.saferepr(right, maxsize=width - len(left_repr)) - - summary = u"%s %s %s" % (ecu(left_repr), op, ecu(right_repr)) - - def issequence(x): - return isinstance(x, Sequence) and not isinstance(x, basestring) - - def istext(x): - return isinstance(x, basestring) - - def isdict(x): - return isinstance(x, dict) - - def isset(x): - return isinstance(x, (set, frozenset)) - - def isiterable(obj): - try: - iter(obj) - return not istext(obj) - except TypeError: - return False - - verbose = config.getoption("verbose") - explanation = None - try: - if op == "==": - if istext(left) and istext(right): - explanation = _diff_text(left, right, verbose) - else: - if issequence(left) and issequence(right): - explanation = _compare_eq_sequence(left, right, verbose) - elif isset(left) and isset(right): - explanation = _compare_eq_set(left, right, verbose) - elif isdict(left) and isdict(right): - explanation = _compare_eq_dict(left, right, verbose) - if isiterable(left) and isiterable(right): - expl = _compare_eq_iterable(left, right, verbose) - if explanation is not None: - explanation.extend(expl) - else: - explanation = expl - elif op == "not in": - if istext(left) and istext(right): - explanation = _notin_text(left, right, verbose) - except Exception: - explanation = [ - u"(pytest_assertion plugin: representation of details failed. " - u"Probably an object has a faulty __repr__.)", - six.text_type(_pytest._code.ExceptionInfo()), - ] - - if not explanation: - return None - - return [summary] + explanation - - -def _diff_text(left, right, verbose=False): - """Return the explanation for the diff between text or bytes - - Unless --verbose is used this will skip leading and trailing - characters which are identical to keep the diff minimal. - - If the input are bytes they will be safely converted to text. - """ - from difflib import ndiff - - explanation = [] - - def escape_for_readable_diff(binary_text): - """ - Ensures that the internal string is always valid unicode, converting any bytes safely to valid unicode. - This is done using repr() which then needs post-processing to fix the encompassing quotes and un-escape - newlines and carriage returns (#429). - """ - r = six.text_type(repr(binary_text)[1:-1]) - r = r.replace(r"\n", "\n") - r = r.replace(r"\r", "\r") - return r - - if isinstance(left, bytes): - left = escape_for_readable_diff(left) - if isinstance(right, bytes): - right = escape_for_readable_diff(right) - if not verbose: - i = 0 # just in case left or right has zero length - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - break - if i > 42: - i -= 10 # Provide some context - explanation = [ - u"Skipping %s identical leading characters in diff, use -v to show" % i - ] - left = left[i:] - right = right[i:] - if len(left) == len(right): - for i in range(len(left)): - if left[-i] != right[-i]: - break - if i > 42: - i -= 10 # Provide some context - explanation += [ - u"Skipping {} identical trailing " - u"characters in diff, use -v to show".format(i) - ] - left = left[:-i] - right = right[:-i] - keepends = True - if left.isspace() or right.isspace(): - left = repr(str(left)) - right = repr(str(right)) - explanation += [u"Strings contain only whitespace, escaping them using repr()"] - explanation += [ - line.strip("\n") - for line in ndiff(left.splitlines(keepends), right.splitlines(keepends)) - ] - return explanation - - -def _compare_eq_iterable(left, right, verbose=False): - if not verbose: - return [u"Use -v to get the full diff"] - # dynamic import to speedup pytest - import difflib - - try: - left_formatting = pprint.pformat(left).splitlines() - right_formatting = pprint.pformat(right).splitlines() - explanation = [u"Full diff:"] - except Exception: - # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling - # sorted() on a list would raise. See issue #718. - # As a workaround, the full diff is generated by using the repr() string of each item of each container. - left_formatting = sorted(repr(x) for x in left) - right_formatting = sorted(repr(x) for x in right) - explanation = [u"Full diff (fallback to calling repr on each item):"] - explanation.extend( - line.strip() for line in difflib.ndiff(left_formatting, right_formatting) - ) - return explanation - - -def _compare_eq_sequence(left, right, verbose=False): - explanation = [] - for i in range(min(len(left), len(right))): - if left[i] != right[i]: - explanation += [u"At index %s diff: %r != %r" % (i, left[i], right[i])] - break - if len(left) > len(right): - explanation += [ - u"Left contains more items, first extra item: %s" - % py.io.saferepr(left[len(right)]) - ] - elif len(left) < len(right): - explanation += [ - u"Right contains more items, first extra item: %s" - % py.io.saferepr(right[len(left)]) - ] - return explanation - - -def _compare_eq_set(left, right, verbose=False): - explanation = [] - diff_left = left - right - diff_right = right - left - if diff_left: - explanation.append(u"Extra items in the left set:") - for item in diff_left: - explanation.append(py.io.saferepr(item)) - if diff_right: - explanation.append(u"Extra items in the right set:") - for item in diff_right: - explanation.append(py.io.saferepr(item)) - return explanation - - -def _compare_eq_dict(left, right, verbose=False): - explanation = [] - common = set(left).intersection(set(right)) - same = {k: left[k] for k in common if left[k] == right[k]} - if same and verbose < 2: - explanation += [u"Omitting %s identical items, use -vv to show" % len(same)] - elif same: - explanation += [u"Common items:"] - explanation += pprint.pformat(same).splitlines() - diff = {k for k in common if left[k] != right[k]} - if diff: - explanation += [u"Differing items:"] - for k in diff: - explanation += [ - py.io.saferepr({k: left[k]}) + " != " + py.io.saferepr({k: right[k]}) - ] - extra_left = set(left) - set(right) - if extra_left: - explanation.append(u"Left contains more items:") - explanation.extend( - pprint.pformat({k: left[k] for k in extra_left}).splitlines() - ) - extra_right = set(right) - set(left) - if extra_right: - explanation.append(u"Right contains more items:") - explanation.extend( - pprint.pformat({k: right[k] for k in extra_right}).splitlines() - ) - return explanation - - -def _notin_text(term, text, verbose=False): - index = text.find(term) - head = text[:index] - tail = text[index + len(term) :] - correct_text = head + tail - diff = _diff_text(correct_text, text, verbose) - newdiff = [u"%s is contained here:" % py.io.saferepr(term, maxsize=42)] - for line in diff: - if line.startswith(u"Skipping"): - continue - if line.startswith(u"- "): - continue - if line.startswith(u"+ "): - newdiff.append(u" " + line[2:]) - else: - newdiff.append(line) - return newdiff diff --git a/venv/lib/python3.8/site-packages/_pytest/cacheprovider.py b/venv/lib/python3.8/site-packages/_pytest/cacheprovider.py deleted file mode 100644 index a78d857f..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/cacheprovider.py +++ /dev/null @@ -1,367 +0,0 @@ -""" -merged implementation of the cache provider - -the name cache was not chosen to ensure pluggy automatically -ignores the external pytest-cache -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import json -from collections import OrderedDict - -import attr -import py -import six - -import pytest -from .compat import _PY2 as PY2 -from .pathlib import Path -from .pathlib import resolve_from_str -from .pathlib import rmtree - -README_CONTENT = u"""\ -# pytest cache directory # - -This directory contains data from the pytest's cache plugin, -which provides the `--lf` and `--ff` options, as well as the `cache` fixture. - -**Do not** commit this to version control. - -See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information. -""" - - -@attr.s -class Cache(object): - _cachedir = attr.ib(repr=False) - _config = attr.ib(repr=False) - - @classmethod - def for_config(cls, config): - cachedir = cls.cache_dir_from_config(config) - if config.getoption("cacheclear") and cachedir.exists(): - rmtree(cachedir, force=True) - cachedir.mkdir() - return cls(cachedir, config) - - @staticmethod - def cache_dir_from_config(config): - return resolve_from_str(config.getini("cache_dir"), config.rootdir) - - def warn(self, fmt, **args): - from _pytest.warnings import _issue_config_warning - from _pytest.warning_types import PytestWarning - - _issue_config_warning( - PytestWarning(fmt.format(**args) if args else fmt), self._config - ) - - def makedir(self, name): - """ return a directory path object with the given name. If the - directory does not yet exist, it will be created. You can use it - to manage files likes e. g. store/retrieve database - dumps across test sessions. - - :param name: must be a string not containing a ``/`` separator. - Make sure the name contains your plugin or application - identifiers to prevent clashes with other cache users. - """ - name = Path(name) - if len(name.parts) > 1: - raise ValueError("name is not allowed to contain path separators") - res = self._cachedir.joinpath("d", name) - res.mkdir(exist_ok=True, parents=True) - return py.path.local(res) - - def _getvaluepath(self, key): - return self._cachedir.joinpath("v", Path(key)) - - def get(self, key, default): - """ return cached value for the given key. If no value - was yet cached or the value cannot be read, the specified - default is returned. - - :param key: must be a ``/`` separated value. Usually the first - name is the name of your plugin or your application. - :param default: must be provided in case of a cache-miss or - invalid cache values. - - """ - path = self._getvaluepath(key) - try: - with path.open("r") as f: - return json.load(f) - except (ValueError, IOError, OSError): - return default - - def set(self, key, value): - """ save value for the given key. - - :param key: must be a ``/`` separated value. Usually the first - name is the name of your plugin or your application. - :param value: must be of any combination of basic - python types, including nested types - like e. g. lists of dictionaries. - """ - path = self._getvaluepath(key) - try: - path.parent.mkdir(exist_ok=True, parents=True) - except (IOError, OSError): - self.warn("could not create cache path {path}", path=path) - return - try: - f = path.open("wb" if PY2 else "w") - except (IOError, OSError): - self.warn("cache could not write path {path}", path=path) - else: - with f: - json.dump(value, f, indent=2, sort_keys=True) - self._ensure_supporting_files() - - def _ensure_supporting_files(self): - """Create supporting files in the cache dir that are not really part of the cache.""" - if self._cachedir.is_dir(): - readme_path = self._cachedir / "README.md" - if not readme_path.is_file(): - readme_path.write_text(README_CONTENT) - - msg = u"# created by pytest automatically, do not change\n*" - self._cachedir.joinpath(".gitignore").write_text(msg, encoding="UTF-8") - - -class LFPlugin(object): - """ Plugin which implements the --lf (run last-failing) option """ - - def __init__(self, config): - self.config = config - active_keys = "lf", "failedfirst" - self.active = any(config.getoption(key) for key in active_keys) - self.lastfailed = config.cache.get("cache/lastfailed", {}) - self._previously_failed_count = None - self._no_failures_behavior = self.config.getoption("last_failed_no_failures") - - def pytest_report_collectionfinish(self): - if self.active and self.config.getoption("verbose") >= 0: - if not self._previously_failed_count: - return None - noun = "failure" if self._previously_failed_count == 1 else "failures" - suffix = " first" if self.config.getoption("failedfirst") else "" - mode = "rerun previous {count} {noun}{suffix}".format( - count=self._previously_failed_count, suffix=suffix, noun=noun - ) - return "run-last-failure: %s" % mode - - def pytest_runtest_logreport(self, report): - if (report.when == "call" and report.passed) or report.skipped: - self.lastfailed.pop(report.nodeid, None) - elif report.failed: - self.lastfailed[report.nodeid] = True - - def pytest_collectreport(self, report): - passed = report.outcome in ("passed", "skipped") - if passed: - if report.nodeid in self.lastfailed: - self.lastfailed.pop(report.nodeid) - self.lastfailed.update((item.nodeid, True) for item in report.result) - else: - self.lastfailed[report.nodeid] = True - - def pytest_collection_modifyitems(self, session, config, items): - if self.active: - if self.lastfailed: - previously_failed = [] - previously_passed = [] - for item in items: - if item.nodeid in self.lastfailed: - previously_failed.append(item) - else: - previously_passed.append(item) - self._previously_failed_count = len(previously_failed) - if not previously_failed: - # running a subset of all tests with recorded failures outside - # of the set of tests currently executing - return - if self.config.getoption("lf"): - items[:] = previously_failed - config.hook.pytest_deselected(items=previously_passed) - else: - items[:] = previously_failed + previously_passed - elif self._no_failures_behavior == "none": - config.hook.pytest_deselected(items=items) - items[:] = [] - - def pytest_sessionfinish(self, session): - config = self.config - if config.getoption("cacheshow") or hasattr(config, "slaveinput"): - return - - saved_lastfailed = config.cache.get("cache/lastfailed", {}) - if saved_lastfailed != self.lastfailed: - config.cache.set("cache/lastfailed", self.lastfailed) - - -class NFPlugin(object): - """ Plugin which implements the --nf (run new-first) option """ - - def __init__(self, config): - self.config = config - self.active = config.option.newfirst - self.cached_nodeids = config.cache.get("cache/nodeids", []) - - def pytest_collection_modifyitems(self, session, config, items): - if self.active: - new_items = OrderedDict() - other_items = OrderedDict() - for item in items: - if item.nodeid not in self.cached_nodeids: - new_items[item.nodeid] = item - else: - other_items[item.nodeid] = item - - items[:] = self._get_increasing_order( - six.itervalues(new_items) - ) + self._get_increasing_order(six.itervalues(other_items)) - self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)] - - def _get_increasing_order(self, items): - return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True) - - def pytest_sessionfinish(self, session): - config = self.config - if config.getoption("cacheshow") or hasattr(config, "slaveinput"): - return - - config.cache.set("cache/nodeids", self.cached_nodeids) - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group.addoption( - "--lf", - "--last-failed", - action="store_true", - dest="lf", - help="rerun only the tests that failed " - "at the last run (or all if none failed)", - ) - group.addoption( - "--ff", - "--failed-first", - action="store_true", - dest="failedfirst", - help="run all tests but run the last failures first. " - "This may re-order tests and thus lead to " - "repeated fixture setup/teardown", - ) - group.addoption( - "--nf", - "--new-first", - action="store_true", - dest="newfirst", - help="run tests from new files first, then the rest of the tests " - "sorted by file mtime", - ) - group.addoption( - "--cache-show", - action="store_true", - dest="cacheshow", - help="show cache contents, don't perform collection or tests", - ) - group.addoption( - "--cache-clear", - action="store_true", - dest="cacheclear", - help="remove all cache contents at start of test run.", - ) - parser.addini("cache_dir", default=".pytest_cache", help="cache directory path.") - group.addoption( - "--lfnf", - "--last-failed-no-failures", - action="store", - dest="last_failed_no_failures", - choices=("all", "none"), - default="all", - help="change the behavior when no test failed in the last run or no " - "information about the last failures was found in the cache", - ) - - -def pytest_cmdline_main(config): - if config.option.cacheshow: - from _pytest.main import wrap_session - - return wrap_session(config, cacheshow) - - -@pytest.hookimpl(tryfirst=True) -def pytest_configure(config): - config.cache = Cache.for_config(config) - config.pluginmanager.register(LFPlugin(config), "lfplugin") - config.pluginmanager.register(NFPlugin(config), "nfplugin") - - -@pytest.fixture -def cache(request): - """ - Return a cache object that can persist state between testing sessions. - - cache.get(key, default) - cache.set(key, value) - - Keys must be a ``/`` separated value, where the first part is usually the - name of your plugin or application to avoid clashes with other cache users. - - Values can be any object handled by the json stdlib module. - """ - return request.config.cache - - -def pytest_report_header(config): - """Display cachedir with --cache-show and if non-default.""" - if config.option.verbose or config.getini("cache_dir") != ".pytest_cache": - cachedir = config.cache._cachedir - # TODO: evaluate generating upward relative paths - # starting with .., ../.. if sensible - - try: - displaypath = cachedir.relative_to(config.rootdir) - except ValueError: - displaypath = cachedir - return "cachedir: {}".format(displaypath) - - -def cacheshow(config, session): - from pprint import pformat - - tw = py.io.TerminalWriter() - tw.line("cachedir: " + str(config.cache._cachedir)) - if not config.cache._cachedir.is_dir(): - tw.line("cache is empty") - return 0 - dummy = object() - basedir = config.cache._cachedir - vdir = basedir / "v" - tw.sep("-", "cache values") - for valpath in sorted(x for x in vdir.rglob("*") if x.is_file()): - key = valpath.relative_to(vdir) - val = config.cache.get(key, dummy) - if val is dummy: - tw.line("%s contains unreadable content, will be ignored" % key) - else: - tw.line("%s contains:" % key) - for line in pformat(val).splitlines(): - tw.line(" " + line) - - ddir = basedir / "d" - if ddir.is_dir(): - contents = sorted(ddir.rglob("*")) - tw.sep("-", "cache directories") - for p in contents: - # if p.check(dir=1): - # print("%s/" % p.relto(basedir)) - if p.is_file(): - key = p.relative_to(basedir) - tw.line("{} is a file of length {:d}".format(key, p.stat().st_size)) - return 0 diff --git a/venv/lib/python3.8/site-packages/_pytest/capture.py b/venv/lib/python3.8/site-packages/_pytest/capture.py deleted file mode 100644 index ec72ae3e..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/capture.py +++ /dev/null @@ -1,786 +0,0 @@ -""" -per-test stdout/stderr capturing mechanism. - -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import contextlib -import io -import os -import sys -from io import UnsupportedOperation -from tempfile import TemporaryFile - -import six - -import pytest -from _pytest.compat import CaptureIO - -patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group._addoption( - "--capture", - action="store", - default="fd" if hasattr(os, "dup") else "sys", - metavar="method", - choices=["fd", "sys", "no"], - help="per-test capturing method: one of fd|sys|no.", - ) - group._addoption( - "-s", - action="store_const", - const="no", - dest="capture", - help="shortcut for --capture=no.", - ) - - -@pytest.hookimpl(hookwrapper=True) -def pytest_load_initial_conftests(early_config, parser, args): - ns = early_config.known_args_namespace - if ns.capture == "fd": - _py36_windowsconsoleio_workaround(sys.stdout) - _colorama_workaround() - _readline_workaround() - pluginmanager = early_config.pluginmanager - capman = CaptureManager(ns.capture) - pluginmanager.register(capman, "capturemanager") - - # make sure that capturemanager is properly reset at final shutdown - early_config.add_cleanup(capman.stop_global_capturing) - - # make sure logging does not raise exceptions at the end - def silence_logging_at_shutdown(): - if "logging" in sys.modules: - sys.modules["logging"].raiseExceptions = False - - early_config.add_cleanup(silence_logging_at_shutdown) - - # finally trigger conftest loading but while capturing (issue93) - capman.start_global_capturing() - outcome = yield - capman.suspend_global_capture() - if outcome.excinfo is not None: - out, err = capman.read_global_capture() - sys.stdout.write(out) - sys.stderr.write(err) - - -class CaptureManager(object): - """ - Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each - test phase (setup, call, teardown). After each of those points, the captured output is obtained and - attached to the collection/runtest report. - - There are two levels of capture: - * global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled - during collection and each test phase. - * fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this - case special handling is needed to ensure the fixtures take precedence over the global capture. - """ - - def __init__(self, method): - self._method = method - self._global_capturing = None - self._current_item = None - - def _getcapture(self, method): - if method == "fd": - return MultiCapture(out=True, err=True, Capture=FDCapture) - elif method == "sys": - return MultiCapture(out=True, err=True, Capture=SysCapture) - elif method == "no": - return MultiCapture(out=False, err=False, in_=False) - else: - raise ValueError("unknown capturing method: %r" % method) - - # Global capturing control - - def is_globally_capturing(self): - return self._method != "no" - - def start_global_capturing(self): - assert self._global_capturing is None - self._global_capturing = self._getcapture(self._method) - self._global_capturing.start_capturing() - - def stop_global_capturing(self): - if self._global_capturing is not None: - self._global_capturing.pop_outerr_to_orig() - self._global_capturing.stop_capturing() - self._global_capturing = None - - def resume_global_capture(self): - self._global_capturing.resume_capturing() - - def suspend_global_capture(self, in_=False): - cap = getattr(self, "_global_capturing", None) - if cap is not None: - cap.suspend_capturing(in_=in_) - - def read_global_capture(self): - return self._global_capturing.readouterr() - - # Fixture Control (it's just forwarding, think about removing this later) - - def activate_fixture(self, item): - """If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over - the global capture. - """ - fixture = getattr(item, "_capture_fixture", None) - if fixture is not None: - fixture._start() - - def deactivate_fixture(self, item): - """Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any.""" - fixture = getattr(item, "_capture_fixture", None) - if fixture is not None: - fixture.close() - - def suspend_fixture(self, item): - fixture = getattr(item, "_capture_fixture", None) - if fixture is not None: - fixture._suspend() - - def resume_fixture(self, item): - fixture = getattr(item, "_capture_fixture", None) - if fixture is not None: - fixture._resume() - - # Helper context managers - - @contextlib.contextmanager - def global_and_fixture_disabled(self): - """Context manager to temporarily disables global and current fixture capturing.""" - # Need to undo local capsys-et-al if exists before disabling global capture - self.suspend_fixture(self._current_item) - self.suspend_global_capture(in_=False) - try: - yield - finally: - self.resume_global_capture() - self.resume_fixture(self._current_item) - - @contextlib.contextmanager - def item_capture(self, when, item): - self.resume_global_capture() - self.activate_fixture(item) - try: - yield - finally: - self.deactivate_fixture(item) - self.suspend_global_capture(in_=False) - - out, err = self.read_global_capture() - item.add_report_section(when, "stdout", out) - item.add_report_section(when, "stderr", err) - - # Hooks - - @pytest.hookimpl(hookwrapper=True) - def pytest_make_collect_report(self, collector): - if isinstance(collector, pytest.File): - self.resume_global_capture() - outcome = yield - self.suspend_global_capture() - out, err = self.read_global_capture() - rep = outcome.get_result() - if out: - rep.sections.append(("Captured stdout", out)) - if err: - rep.sections.append(("Captured stderr", err)) - else: - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_protocol(self, item): - self._current_item = item - yield - self._current_item = None - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_setup(self, item): - with self.item_capture("setup", item): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_call(self, item): - with self.item_capture("call", item): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_teardown(self, item): - with self.item_capture("teardown", item): - yield - - @pytest.hookimpl(tryfirst=True) - def pytest_keyboard_interrupt(self, excinfo): - self.stop_global_capturing() - - @pytest.hookimpl(tryfirst=True) - def pytest_internalerror(self, excinfo): - self.stop_global_capturing() - - -capture_fixtures = {"capfd", "capfdbinary", "capsys", "capsysbinary"} - - -def _ensure_only_one_capture_fixture(request, name): - fixtures = set(request.fixturenames) & capture_fixtures - {name} - if fixtures: - fixtures = sorted(fixtures) - fixtures = fixtures[0] if len(fixtures) == 1 else fixtures - raise request.raiseerror( - "cannot use {} and {} at the same time".format(fixtures, name) - ) - - -@pytest.fixture -def capsys(request): - """Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make - captured output available via ``capsys.readouterr()`` method calls - which return a ``(out, err)`` namedtuple. ``out`` and ``err`` will be ``text`` - objects. - """ - _ensure_only_one_capture_fixture(request, "capsys") - with _install_capture_fixture_on_item(request, SysCapture) as fixture: - yield fixture - - -@pytest.fixture -def capsysbinary(request): - """Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make - captured output available via ``capsys.readouterr()`` method calls - which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes`` - objects. - """ - _ensure_only_one_capture_fixture(request, "capsysbinary") - # Currently, the implementation uses the python3 specific `.buffer` - # property of CaptureIO. - if sys.version_info < (3,): - raise request.raiseerror("capsysbinary is only supported on python 3") - with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture: - yield fixture - - -@pytest.fixture -def capfd(request): - """Enable capturing of writes to file descriptors ``1`` and ``2`` and make - captured output available via ``capfd.readouterr()`` method calls - which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text`` - objects. - """ - _ensure_only_one_capture_fixture(request, "capfd") - if not hasattr(os, "dup"): - pytest.skip( - "capfd fixture needs os.dup function which is not available in this system" - ) - with _install_capture_fixture_on_item(request, FDCapture) as fixture: - yield fixture - - -@pytest.fixture -def capfdbinary(request): - """Enable capturing of write to file descriptors 1 and 2 and make - captured output available via ``capfdbinary.readouterr`` method calls - which return a ``(out, err)`` tuple. ``out`` and ``err`` will be - ``bytes`` objects. - """ - _ensure_only_one_capture_fixture(request, "capfdbinary") - if not hasattr(os, "dup"): - pytest.skip( - "capfdbinary fixture needs os.dup function which is not available in this system" - ) - with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture: - yield fixture - - -@contextlib.contextmanager -def _install_capture_fixture_on_item(request, capture_class): - """ - Context manager which creates a ``CaptureFixture`` instance and "installs" it on - the item/node of the given request. Used by ``capsys`` and ``capfd``. - - The CaptureFixture is added as attribute of the item because it needs to accessed - by ``CaptureManager`` during its ``pytest_runtest_*`` hooks. - """ - request.node._capture_fixture = fixture = CaptureFixture(capture_class, request) - capmanager = request.config.pluginmanager.getplugin("capturemanager") - # need to active this fixture right away in case it is being used by another fixture (setup phase) - # if this fixture is being used only by a test function (call phase), then we wouldn't need this - # activation, but it doesn't hurt - capmanager.activate_fixture(request.node) - yield fixture - fixture.close() - del request.node._capture_fixture - - -class CaptureFixture(object): - """ - Object returned by :py:func:`capsys`, :py:func:`capsysbinary`, :py:func:`capfd` and :py:func:`capfdbinary` - fixtures. - """ - - def __init__(self, captureclass, request): - self.captureclass = captureclass - self.request = request - self._capture = None - self._captured_out = self.captureclass.EMPTY_BUFFER - self._captured_err = self.captureclass.EMPTY_BUFFER - - def _start(self): - # Start if not started yet - if getattr(self, "_capture", None) is None: - self._capture = MultiCapture( - out=True, err=True, in_=False, Capture=self.captureclass - ) - self._capture.start_capturing() - - def close(self): - if self._capture is not None: - out, err = self._capture.pop_outerr_to_orig() - self._captured_out += out - self._captured_err += err - self._capture.stop_capturing() - self._capture = None - - def readouterr(self): - """Read and return the captured output so far, resetting the internal buffer. - - :return: captured content as a namedtuple with ``out`` and ``err`` string attributes - """ - captured_out, captured_err = self._captured_out, self._captured_err - if self._capture is not None: - out, err = self._capture.readouterr() - captured_out += out - captured_err += err - self._captured_out = self.captureclass.EMPTY_BUFFER - self._captured_err = self.captureclass.EMPTY_BUFFER - return CaptureResult(captured_out, captured_err) - - def _suspend(self): - """Suspends this fixture's own capturing temporarily.""" - self._capture.suspend_capturing() - - def _resume(self): - """Resumes this fixture's own capturing temporarily.""" - self._capture.resume_capturing() - - @contextlib.contextmanager - def disabled(self): - """Temporarily disables capture while inside the 'with' block.""" - capmanager = self.request.config.pluginmanager.getplugin("capturemanager") - with capmanager.global_and_fixture_disabled(): - yield - - -def safe_text_dupfile(f, mode, default_encoding="UTF8"): - """ return an open text file object that's a duplicate of f on the - FD-level if possible. - """ - encoding = getattr(f, "encoding", None) - try: - fd = f.fileno() - except Exception: - if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"): - # we seem to have a text stream, let's just use it - return f - else: - newfd = os.dup(fd) - if "b" not in mode: - mode += "b" - f = os.fdopen(newfd, mode, 0) # no buffering - return EncodedFile(f, encoding or default_encoding) - - -class EncodedFile(object): - errors = "strict" # possibly needed by py3 code (issue555) - - def __init__(self, buffer, encoding): - self.buffer = buffer - self.encoding = encoding - - def write(self, obj): - if isinstance(obj, six.text_type): - obj = obj.encode(self.encoding, "replace") - self.buffer.write(obj) - - def writelines(self, linelist): - data = "".join(linelist) - self.write(data) - - @property - def name(self): - """Ensure that file.name is a string.""" - return repr(self.buffer) - - def __getattr__(self, name): - return getattr(object.__getattribute__(self, "buffer"), name) - - -CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"]) - - -class MultiCapture(object): - out = err = in_ = None - - def __init__(self, out=True, err=True, in_=True, Capture=None): - if in_: - self.in_ = Capture(0) - if out: - self.out = Capture(1) - if err: - self.err = Capture(2) - - def start_capturing(self): - if self.in_: - self.in_.start() - if self.out: - self.out.start() - if self.err: - self.err.start() - - def pop_outerr_to_orig(self): - """ pop current snapshot out/err capture and flush to orig streams. """ - out, err = self.readouterr() - if out: - self.out.writeorg(out) - if err: - self.err.writeorg(err) - return out, err - - def suspend_capturing(self, in_=False): - if self.out: - self.out.suspend() - if self.err: - self.err.suspend() - if in_ and self.in_: - self.in_.suspend() - self._in_suspended = True - - def resume_capturing(self): - if self.out: - self.out.resume() - if self.err: - self.err.resume() - if hasattr(self, "_in_suspended"): - self.in_.resume() - del self._in_suspended - - def stop_capturing(self): - """ stop capturing and reset capturing streams """ - if hasattr(self, "_reset"): - raise ValueError("was already stopped") - self._reset = True - if self.out: - self.out.done() - if self.err: - self.err.done() - if self.in_: - self.in_.done() - - def readouterr(self): - """ return snapshot unicode value of stdout/stderr capturings. """ - return CaptureResult( - self.out.snap() if self.out is not None else "", - self.err.snap() if self.err is not None else "", - ) - - -class NoCapture(object): - EMPTY_BUFFER = None - __init__ = start = done = suspend = resume = lambda *args: None - - -class FDCaptureBinary(object): - """Capture IO to/from a given os-level filedescriptor. - - snap() produces `bytes` - """ - - EMPTY_BUFFER = b"" - - def __init__(self, targetfd, tmpfile=None): - self.targetfd = targetfd - try: - self.targetfd_save = os.dup(self.targetfd) - except OSError: - self.start = lambda: None - self.done = lambda: None - else: - if targetfd == 0: - assert not tmpfile, "cannot set tmpfile with stdin" - tmpfile = open(os.devnull, "r") - self.syscapture = SysCapture(targetfd) - else: - if tmpfile is None: - f = TemporaryFile() - with f: - tmpfile = safe_text_dupfile(f, mode="wb+") - if targetfd in patchsysdict: - self.syscapture = SysCapture(targetfd, tmpfile) - else: - self.syscapture = NoCapture() - self.tmpfile = tmpfile - self.tmpfile_fd = tmpfile.fileno() - - def __repr__(self): - return "" % (self.targetfd, self.targetfd_save) - - def start(self): - """ Start capturing on targetfd using memorized tmpfile. """ - try: - os.fstat(self.targetfd_save) - except (AttributeError, OSError): - raise ValueError("saved filedescriptor not valid anymore") - os.dup2(self.tmpfile_fd, self.targetfd) - self.syscapture.start() - - def snap(self): - self.tmpfile.seek(0) - res = self.tmpfile.read() - self.tmpfile.seek(0) - self.tmpfile.truncate() - return res - - def done(self): - """ stop capturing, restore streams, return original capture file, - seeked to position zero. """ - targetfd_save = self.__dict__.pop("targetfd_save") - os.dup2(targetfd_save, self.targetfd) - os.close(targetfd_save) - self.syscapture.done() - _attempt_to_close_capture_file(self.tmpfile) - - def suspend(self): - self.syscapture.suspend() - os.dup2(self.targetfd_save, self.targetfd) - - def resume(self): - self.syscapture.resume() - os.dup2(self.tmpfile_fd, self.targetfd) - - def writeorg(self, data): - """ write to original file descriptor. """ - if isinstance(data, six.text_type): - data = data.encode("utf8") # XXX use encoding of original stream - os.write(self.targetfd_save, data) - - -class FDCapture(FDCaptureBinary): - """Capture IO to/from a given os-level filedescriptor. - - snap() produces text - """ - - EMPTY_BUFFER = str() - - def snap(self): - res = FDCaptureBinary.snap(self) - enc = getattr(self.tmpfile, "encoding", None) - if enc and isinstance(res, bytes): - res = six.text_type(res, enc, "replace") - return res - - -class SysCapture(object): - - EMPTY_BUFFER = str() - - def __init__(self, fd, tmpfile=None): - name = patchsysdict[fd] - self._old = getattr(sys, name) - self.name = name - if tmpfile is None: - if name == "stdin": - tmpfile = DontReadFromInput() - else: - tmpfile = CaptureIO() - self.tmpfile = tmpfile - - def start(self): - setattr(sys, self.name, self.tmpfile) - - def snap(self): - res = self.tmpfile.getvalue() - self.tmpfile.seek(0) - self.tmpfile.truncate() - return res - - def done(self): - setattr(sys, self.name, self._old) - del self._old - _attempt_to_close_capture_file(self.tmpfile) - - def suspend(self): - setattr(sys, self.name, self._old) - - def resume(self): - setattr(sys, self.name, self.tmpfile) - - def writeorg(self, data): - self._old.write(data) - self._old.flush() - - -class SysCaptureBinary(SysCapture): - EMPTY_BUFFER = b"" - - def snap(self): - res = self.tmpfile.buffer.getvalue() - self.tmpfile.seek(0) - self.tmpfile.truncate() - return res - - -class DontReadFromInput(six.Iterator): - """Temporary stub class. Ideally when stdin is accessed, the - capturing should be turned off, with possibly all data captured - so far sent to the screen. This should be configurable, though, - because in automated test runs it is better to crash than - hang indefinitely. - """ - - encoding = None - - def read(self, *args): - raise IOError("reading from stdin while output is captured") - - readline = read - readlines = read - __next__ = read - - def __iter__(self): - return self - - def fileno(self): - raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") - - def isatty(self): - return False - - def close(self): - pass - - @property - def buffer(self): - if sys.version_info >= (3, 0): - return self - else: - raise AttributeError("redirected stdin has no attribute buffer") - - -def _colorama_workaround(): - """ - Ensure colorama is imported so that it attaches to the correct stdio - handles on Windows. - - colorama uses the terminal on import time. So if something does the - first import of colorama while I/O capture is active, colorama will - fail in various ways. - """ - - if not sys.platform.startswith("win32"): - return - try: - import colorama # noqa - except ImportError: - pass - - -def _readline_workaround(): - """ - Ensure readline is imported so that it attaches to the correct stdio - handles on Windows. - - Pdb uses readline support where available--when not running from the Python - prompt, the readline module is not imported until running the pdb REPL. If - running pytest with the --pdb option this means the readline module is not - imported until after I/O capture has been started. - - This is a problem for pyreadline, which is often used to implement readline - support on Windows, as it does not attach to the correct handles for stdout - and/or stdin if they have been redirected by the FDCapture mechanism. This - workaround ensures that readline is imported before I/O capture is setup so - that it can attach to the actual stdin/out for the console. - - See https://github.com/pytest-dev/pytest/pull/1281 - """ - - if not sys.platform.startswith("win32"): - return - try: - import readline # noqa - except ImportError: - pass - - -def _py36_windowsconsoleio_workaround(stream): - """ - Python 3.6 implemented unicode console handling for Windows. This works - by reading/writing to the raw console handle using - ``{Read,Write}ConsoleW``. - - The problem is that we are going to ``dup2`` over the stdio file - descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the - handles used by Python to write to the console. Though there is still some - weirdness and the console handle seems to only be closed randomly and not - on the first call to ``CloseHandle``, or maybe it gets reopened with the - same handle value when we suspend capturing. - - The workaround in this case will reopen stdio with a different fd which - also means a different handle by replicating the logic in - "Py_lifecycle.c:initstdio/create_stdio". - - :param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given - here as parameter for unittesting purposes. - - See https://github.com/pytest-dev/py/issues/103 - """ - if not sys.platform.startswith("win32") or sys.version_info[:2] < (3, 6): - return - - # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666) - if not hasattr(stream, "buffer"): - return - - buffered = hasattr(stream.buffer, "raw") - raw_stdout = stream.buffer.raw if buffered else stream.buffer - - if not isinstance(raw_stdout, io._WindowsConsoleIO): - return - - def _reopen_stdio(f, mode): - if not buffered and mode[0] == "w": - buffering = 0 - else: - buffering = -1 - - return io.TextIOWrapper( - open(os.dup(f.fileno()), mode, buffering), - f.encoding, - f.errors, - f.newlines, - f.line_buffering, - ) - - sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, "rb") - sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, "wb") - sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, "wb") - - -def _attempt_to_close_capture_file(f): - """Suppress IOError when closing the temporary file used for capturing streams in py27 (#2370)""" - if six.PY2: - try: - f.close() - except IOError: - pass - else: - f.close() diff --git a/venv/lib/python3.8/site-packages/_pytest/compat.py b/venv/lib/python3.8/site-packages/_pytest/compat.py deleted file mode 100644 index ead9ffd8..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/compat.py +++ /dev/null @@ -1,443 +0,0 @@ -""" -python version compatibility code -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import codecs -import functools -import inspect -import re -import sys -from contextlib import contextmanager - -import py -import six -from six import text_type - -import _pytest -from _pytest.outcomes import fail -from _pytest.outcomes import TEST_OUTCOME - -try: - import enum -except ImportError: # pragma: no cover - # Only available in Python 3.4+ or as a backport - enum = None - -_PY3 = sys.version_info > (3, 0) -_PY2 = not _PY3 - - -if _PY3: - from inspect import signature, Parameter as Parameter -else: - from funcsigs import signature, Parameter as Parameter - -NoneType = type(None) -NOTSET = object() - -PY35 = sys.version_info[:2] >= (3, 5) -PY36 = sys.version_info[:2] >= (3, 6) -MODULE_NOT_FOUND_ERROR = "ModuleNotFoundError" if PY36 else "ImportError" - - -if _PY3: - from collections.abc import MutableMapping as MappingMixin - from collections.abc import Mapping, Sequence -else: - # those raise DeprecationWarnings in Python >=3.7 - from collections import MutableMapping as MappingMixin # noqa - from collections import Mapping, Sequence # noqa - - -if sys.version_info >= (3, 4): - from importlib.util import spec_from_file_location -else: - - def spec_from_file_location(*_, **__): - return None - - -def _format_args(func): - return str(signature(func)) - - -isfunction = inspect.isfunction -isclass = inspect.isclass -# used to work around a python2 exception info leak -exc_clear = getattr(sys, "exc_clear", lambda: None) -# The type of re.compile objects is not exposed in Python. -REGEX_TYPE = type(re.compile("")) - - -def is_generator(func): - genfunc = inspect.isgeneratorfunction(func) - return genfunc and not iscoroutinefunction(func) - - -def iscoroutinefunction(func): - """Return True if func is a decorated coroutine function. - - Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly, - which in turns also initializes the "logging" module as side-effect (see issue #8). - """ - return getattr(func, "_is_coroutine", False) or ( - hasattr(inspect, "iscoroutinefunction") and inspect.iscoroutinefunction(func) - ) - - -def getlocation(function, curdir): - function = get_real_func(function) - fn = py.path.local(inspect.getfile(function)) - lineno = function.__code__.co_firstlineno - if fn.relto(curdir): - fn = fn.relto(curdir) - return "%s:%d" % (fn, lineno + 1) - - -def num_mock_patch_args(function): - """ return number of arguments used up by mock arguments (if any) """ - patchings = getattr(function, "patchings", None) - if not patchings: - return 0 - mock_modules = [sys.modules.get("mock"), sys.modules.get("unittest.mock")] - if any(mock_modules): - sentinels = [m.DEFAULT for m in mock_modules if m is not None] - return len( - [p for p in patchings if not p.attribute_name and p.new in sentinels] - ) - return len(patchings) - - -def getfuncargnames(function, is_method=False, cls=None): - """Returns the names of a function's mandatory arguments. - - This should return the names of all function arguments that: - * Aren't bound to an instance or type as in instance or class methods. - * Don't have default values. - * Aren't bound with functools.partial. - * Aren't replaced with mocks. - - The is_method and cls arguments indicate that the function should - be treated as a bound method even though it's not unless, only in - the case of cls, the function is a static method. - - @RonnyPfannschmidt: This function should be refactored when we - revisit fixtures. The fixture mechanism should ask the node for - the fixture names, and not try to obtain directly from the - function object well after collection has occurred. - - """ - # The parameters attribute of a Signature object contains an - # ordered mapping of parameter names to Parameter instances. This - # creates a tuple of the names of the parameters that don't have - # defaults. - try: - parameters = signature(function).parameters - except (ValueError, TypeError) as e: - fail( - "Could not determine arguments of {!r}: {}".format(function, e), - pytrace=False, - ) - - arg_names = tuple( - p.name - for p in parameters.values() - if ( - p.kind is Parameter.POSITIONAL_OR_KEYWORD - or p.kind is Parameter.KEYWORD_ONLY - ) - and p.default is Parameter.empty - ) - # If this function should be treated as a bound method even though - # it's passed as an unbound method or function, remove the first - # parameter name. - if is_method or ( - cls and not isinstance(cls.__dict__.get(function.__name__, None), staticmethod) - ): - arg_names = arg_names[1:] - # Remove any names that will be replaced with mocks. - if hasattr(function, "__wrapped__"): - arg_names = arg_names[num_mock_patch_args(function) :] - return arg_names - - -@contextmanager -def dummy_context_manager(): - """Context manager that does nothing, useful in situations where you might need an actual context manager or not - depending on some condition. Using this allow to keep the same code""" - yield - - -def get_default_arg_names(function): - # Note: this code intentionally mirrors the code at the beginning of getfuncargnames, - # to get the arguments which were excluded from its result because they had default values - return tuple( - p.name - for p in signature(function).parameters.values() - if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) - and p.default is not Parameter.empty - ) - - -if _PY3: - STRING_TYPES = bytes, str - UNICODE_TYPES = six.text_type - - if PY35: - - def _bytes_to_ascii(val): - return val.decode("ascii", "backslashreplace") - - else: - - def _bytes_to_ascii(val): - if val: - # source: http://goo.gl/bGsnwC - encoded_bytes, _ = codecs.escape_encode(val) - return encoded_bytes.decode("ascii") - else: - # empty bytes crashes codecs.escape_encode (#1087) - return "" - - def ascii_escaped(val): - """If val is pure ascii, returns it as a str(). Otherwise, escapes - bytes objects into a sequence of escaped bytes: - - b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6' - - and escapes unicode objects into a sequence of escaped unicode - ids, e.g.: - - '4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944' - - note: - the obvious "v.decode('unicode-escape')" will return - valid utf-8 unicode if it finds them in bytes, but we - want to return escaped bytes for any byte, even if they match - a utf-8 string. - - """ - if isinstance(val, bytes): - return _bytes_to_ascii(val) - else: - return val.encode("unicode_escape").decode("ascii") - - -else: - STRING_TYPES = six.string_types - UNICODE_TYPES = six.text_type - - def ascii_escaped(val): - """In py2 bytes and str are the same type, so return if it's a bytes - object, return it unchanged if it is a full ascii string, - otherwise escape it into its binary form. - - If it's a unicode string, change the unicode characters into - unicode escapes. - - """ - if isinstance(val, bytes): - try: - return val.encode("ascii") - except UnicodeDecodeError: - return val.encode("string-escape") - else: - return val.encode("unicode-escape") - - -class _PytestWrapper(object): - """Dummy wrapper around a function object for internal use only. - - Used to correctly unwrap the underlying function object - when we are creating fixtures, because we wrap the function object ourselves with a decorator - to issue warnings when the fixture function is called directly. - """ - - def __init__(self, obj): - self.obj = obj - - -def get_real_func(obj): - """ gets the real function object of the (possibly) wrapped object by - functools.wraps or functools.partial. - """ - start_obj = obj - for i in range(100): - # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function - # to trigger a warning if it gets called directly instead of by pytest: we don't - # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774) - new_obj = getattr(obj, "__pytest_wrapped__", None) - if isinstance(new_obj, _PytestWrapper): - obj = new_obj.obj - break - new_obj = getattr(obj, "__wrapped__", None) - if new_obj is None: - break - obj = new_obj - else: - raise ValueError( - ("could not find real function of {start}\nstopped at {current}").format( - start=py.io.saferepr(start_obj), current=py.io.saferepr(obj) - ) - ) - if isinstance(obj, functools.partial): - obj = obj.func - return obj - - -def get_real_method(obj, holder): - """ - Attempts to obtain the real function object that might be wrapping ``obj``, while at the same time - returning a bound method to ``holder`` if the original object was a bound method. - """ - try: - is_method = hasattr(obj, "__func__") - obj = get_real_func(obj) - except Exception: - return obj - if is_method and hasattr(obj, "__get__") and callable(obj.__get__): - obj = obj.__get__(holder) - return obj - - -def getfslineno(obj): - # xxx let decorators etc specify a sane ordering - obj = get_real_func(obj) - if hasattr(obj, "place_as"): - obj = obj.place_as - fslineno = _pytest._code.getfslineno(obj) - assert isinstance(fslineno[1], int), obj - return fslineno - - -def getimfunc(func): - try: - return func.__func__ - except AttributeError: - return func - - -def safe_getattr(object, name, default): - """ Like getattr but return default upon any Exception or any OutcomeException. - - Attribute access can potentially fail for 'evil' Python objects. - See issue #214. - It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException - instead of Exception (for more details check #2707) - """ - try: - return getattr(object, name, default) - except TEST_OUTCOME: - return default - - -def safe_isclass(obj): - """Ignore any exception via isinstance on Python 3.""" - try: - return isclass(obj) - except Exception: - return False - - -def _is_unittest_unexpected_success_a_failure(): - """Return if the test suite should fail if an @expectedFailure unittest test PASSES. - - From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful: - Changed in version 3.4: Returns False if there were any - unexpectedSuccesses from tests marked with the expectedFailure() decorator. - """ - return sys.version_info >= (3, 4) - - -if _PY3: - - def safe_str(v): - """returns v as string""" - return str(v) - - -else: - - def safe_str(v): - """returns v as string, converting to ascii if necessary""" - try: - return str(v) - except UnicodeError: - if not isinstance(v, text_type): - v = text_type(v) - errors = "replace" - return v.encode("utf-8", errors) - - -COLLECT_FAKEMODULE_ATTRIBUTES = ( - "Collector", - "Module", - "Generator", - "Function", - "Instance", - "Session", - "Item", - "Class", - "File", - "_fillfuncargs", -) - - -def _setup_collect_fakemodule(): - from types import ModuleType - import pytest - - pytest.collect = ModuleType("pytest.collect") - pytest.collect.__all__ = [] # used for setns - for attr in COLLECT_FAKEMODULE_ATTRIBUTES: - setattr(pytest.collect, attr, getattr(pytest, attr)) - - -if _PY2: - # Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO. - from py.io import TextIO - - class CaptureIO(TextIO): - @property - def encoding(self): - return getattr(self, "_encoding", "UTF-8") - - -else: - import io - - class CaptureIO(io.TextIOWrapper): - def __init__(self): - super(CaptureIO, self).__init__( - io.BytesIO(), encoding="UTF-8", newline="", write_through=True - ) - - def getvalue(self): - return self.buffer.getvalue().decode("UTF-8") - - -class FuncargnamesCompatAttr(object): - """ helper class so that Metafunc, Function and FixtureRequest - don't need to each define the "funcargnames" compatibility attribute. - """ - - @property - def funcargnames(self): - """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" - return self.fixturenames - - -if six.PY2: - - def lru_cache(*_, **__): - def dec(fn): - return fn - - return dec - - -else: - from functools import lru_cache # noqa: F401 diff --git a/venv/lib/python3.8/site-packages/_pytest/config/__init__.py b/venv/lib/python3.8/site-packages/_pytest/config/__init__.py deleted file mode 100644 index 0fc89554..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/config/__init__.py +++ /dev/null @@ -1,1052 +0,0 @@ -""" command line options, ini-file and conftest.py processing. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import copy -import inspect -import os -import shlex -import sys -import types -import warnings -from distutils.version import LooseVersion - -import py -import six -from pluggy import HookimplMarker -from pluggy import HookspecMarker -from pluggy import PluginManager - -import _pytest._code -import _pytest.assertion -import _pytest.hookspec # the extension point definitions -from .exceptions import PrintHelp -from .exceptions import UsageError -from .findpaths import determine_setup -from .findpaths import exists -from _pytest._code import ExceptionInfo -from _pytest._code import filter_traceback -from _pytest.compat import lru_cache -from _pytest.compat import safe_str -from _pytest.outcomes import Skipped - -hookimpl = HookimplMarker("pytest") -hookspec = HookspecMarker("pytest") - - -class ConftestImportFailure(Exception): - def __init__(self, path, excinfo): - Exception.__init__(self, path, excinfo) - self.path = path - self.excinfo = excinfo - - -def main(args=None, plugins=None): - """ return exit code, after performing an in-process test run. - - :arg args: list of command line arguments. - - :arg plugins: list of plugin objects to be auto-registered during - initialization. - """ - from _pytest.main import EXIT_USAGEERROR - - try: - try: - config = _prepareconfig(args, plugins) - except ConftestImportFailure as e: - exc_info = ExceptionInfo(e.excinfo) - tw = py.io.TerminalWriter(sys.stderr) - tw.line( - "ImportError while loading conftest '{e.path}'.".format(e=e), red=True - ) - exc_info.traceback = exc_info.traceback.filter(filter_traceback) - exc_repr = ( - exc_info.getrepr(style="short", chain=False) - if exc_info.traceback - else exc_info.exconly() - ) - formatted_tb = safe_str(exc_repr) - for line in formatted_tb.splitlines(): - tw.line(line.rstrip(), red=True) - return 4 - else: - try: - return config.hook.pytest_cmdline_main(config=config) - finally: - config._ensure_unconfigure() - except UsageError as e: - tw = py.io.TerminalWriter(sys.stderr) - for msg in e.args: - tw.line("ERROR: {}\n".format(msg), red=True) - return EXIT_USAGEERROR - - -class cmdline(object): # compatibility namespace - main = staticmethod(main) - - -def filename_arg(path, optname): - """ Argparse type validator for filename arguments. - - :path: path of filename - :optname: name of the option - """ - if os.path.isdir(path): - raise UsageError("{} must be a filename, given: {}".format(optname, path)) - return path - - -def directory_arg(path, optname): - """Argparse type validator for directory arguments. - - :path: path of directory - :optname: name of the option - """ - if not os.path.isdir(path): - raise UsageError("{} must be a directory, given: {}".format(optname, path)) - return path - - -default_plugins = ( - "mark", - "main", - "terminal", - "runner", - "python", - "fixtures", - "debugging", - "unittest", - "capture", - "skipping", - "tmpdir", - "monkeypatch", - "recwarn", - "pastebin", - "helpconfig", - "nose", - "assertion", - "junitxml", - "resultlog", - "doctest", - "cacheprovider", - "freeze_support", - "setuponly", - "setupplan", - "stepwise", - "warnings", - "logging", -) - - -builtin_plugins = set(default_plugins) -builtin_plugins.add("pytester") - - -def get_config(): - # subsequent calls to main will create a fresh instance - pluginmanager = PytestPluginManager() - config = Config(pluginmanager) - for spec in default_plugins: - pluginmanager.import_plugin(spec) - return config - - -def get_plugin_manager(): - """ - Obtain a new instance of the - :py:class:`_pytest.config.PytestPluginManager`, with default plugins - already loaded. - - This function can be used by integration with other tools, like hooking - into pytest to run tests into an IDE. - """ - return get_config().pluginmanager - - -def _prepareconfig(args=None, plugins=None): - warning = None - if args is None: - args = sys.argv[1:] - elif isinstance(args, py.path.local): - args = [str(args)] - elif not isinstance(args, (tuple, list)): - if not isinstance(args, str): - raise ValueError("not a string or argument list: %r" % (args,)) - args = shlex.split(args, posix=sys.platform != "win32") - from _pytest import deprecated - - warning = deprecated.MAIN_STR_ARGS - config = get_config() - pluginmanager = config.pluginmanager - try: - if plugins: - for plugin in plugins: - if isinstance(plugin, six.string_types): - pluginmanager.consider_pluginarg(plugin) - else: - pluginmanager.register(plugin) - if warning: - from _pytest.warnings import _issue_config_warning - - _issue_config_warning(warning, config=config) - return pluginmanager.hook.pytest_cmdline_parse( - pluginmanager=pluginmanager, args=args - ) - except BaseException: - config._ensure_unconfigure() - raise - - -class PytestPluginManager(PluginManager): - """ - Overwrites :py:class:`pluggy.PluginManager ` to add pytest-specific - functionality: - - * loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and - ``pytest_plugins`` global variables found in plugins being loaded; - * ``conftest.py`` loading during start-up; - """ - - def __init__(self): - super(PytestPluginManager, self).__init__("pytest") - self._conftest_plugins = set() - - # state related to local conftest plugins - self._dirpath2confmods = {} - self._conftestpath2mod = {} - self._confcutdir = None - self._noconftest = False - self._duplicatepaths = set() - - self.add_hookspecs(_pytest.hookspec) - self.register(self) - if os.environ.get("PYTEST_DEBUG"): - err = sys.stderr - encoding = getattr(err, "encoding", "utf8") - try: - err = py.io.dupfile(err, encoding=encoding) - except Exception: - pass - self.trace.root.setwriter(err.write) - self.enable_tracing() - - # Config._consider_importhook will set a real object if required. - self.rewrite_hook = _pytest.assertion.DummyRewriteHook() - # Used to know when we are importing conftests after the pytest_configure stage - self._configured = False - - def addhooks(self, module_or_class): - """ - .. deprecated:: 2.8 - - Use :py:meth:`pluggy.PluginManager.add_hookspecs ` - instead. - """ - warning = dict( - code="I2", - fslocation=_pytest._code.getfslineno(sys._getframe(1)), - nodeid=None, - message="use pluginmanager.add_hookspecs instead of " - "deprecated addhooks() method.", - ) - self._warn(warning) - return self.add_hookspecs(module_or_class) - - def parse_hookimpl_opts(self, plugin, name): - # pytest hooks are always prefixed with pytest_ - # so we avoid accessing possibly non-readable attributes - # (see issue #1073) - if not name.startswith("pytest_"): - return - # ignore some historic special names which can not be hooks anyway - if name == "pytest_plugins" or name.startswith("pytest_funcarg__"): - return - - method = getattr(plugin, name) - opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name) - - # consider only actual functions for hooks (#3775) - if not inspect.isroutine(method): - return - - # collect unmarked hooks as long as they have the `pytest_' prefix - if opts is None and name.startswith("pytest_"): - opts = {} - - if opts is not None: - for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): - opts.setdefault(name, hasattr(method, name)) - return opts - - def parse_hookspec_opts(self, module_or_class, name): - opts = super(PytestPluginManager, self).parse_hookspec_opts( - module_or_class, name - ) - if opts is None: - method = getattr(module_or_class, name) - if name.startswith("pytest_"): - opts = { - "firstresult": hasattr(method, "firstresult"), - "historic": hasattr(method, "historic"), - } - return opts - - def register(self, plugin, name=None): - if name in ["pytest_catchlog", "pytest_capturelog"]: - self._warn( - "{} plugin has been merged into the core, " - "please remove it from your requirements.".format( - name.replace("_", "-") - ) - ) - return - ret = super(PytestPluginManager, self).register(plugin, name) - if ret: - self.hook.pytest_plugin_registered.call_historic( - kwargs=dict(plugin=plugin, manager=self) - ) - - if isinstance(plugin, types.ModuleType): - self.consider_module(plugin) - return ret - - def getplugin(self, name): - # support deprecated naming because plugins (xdist e.g.) use it - return self.get_plugin(name) - - def hasplugin(self, name): - """Return True if the plugin with the given name is registered.""" - return bool(self.get_plugin(name)) - - def pytest_configure(self, config): - # XXX now that the pluginmanager exposes hookimpl(tryfirst...) - # we should remove tryfirst/trylast as markers - config.addinivalue_line( - "markers", - "tryfirst: mark a hook implementation function such that the " - "plugin machinery will try to call it first/as early as possible.", - ) - config.addinivalue_line( - "markers", - "trylast: mark a hook implementation function such that the " - "plugin machinery will try to call it last/as late as possible.", - ) - self._configured = True - - def _warn(self, message): - kwargs = ( - message - if isinstance(message, dict) - else {"code": "I1", "message": message, "fslocation": None, "nodeid": None} - ) - self.hook.pytest_logwarning.call_historic(kwargs=kwargs) - - # - # internal API for local conftest plugin handling - # - def _set_initial_conftests(self, namespace): - """ load initial conftest files given a preparsed "namespace". - As conftest files may add their own command line options - which have arguments ('--my-opt somepath') we might get some - false positives. All builtin and 3rd party plugins will have - been loaded, however, so common options will not confuse our logic - here. - """ - current = py.path.local() - self._confcutdir = ( - current.join(namespace.confcutdir, abs=True) - if namespace.confcutdir - else None - ) - self._noconftest = namespace.noconftest - self._using_pyargs = namespace.pyargs - testpaths = namespace.file_or_dir - foundanchor = False - for path in testpaths: - path = str(path) - # remove node-id syntax - i = path.find("::") - if i != -1: - path = path[:i] - anchor = current.join(path, abs=1) - if exists(anchor): # we found some file object - self._try_load_conftest(anchor) - foundanchor = True - if not foundanchor: - self._try_load_conftest(current) - - def _try_load_conftest(self, anchor): - self._getconftestmodules(anchor) - # let's also consider test* subdirs - if anchor.check(dir=1): - for x in anchor.listdir("test*"): - if x.check(dir=1): - self._getconftestmodules(x) - - @lru_cache(maxsize=128) - def _getconftestmodules(self, path): - if self._noconftest: - return [] - - if path.isfile(): - directory = path.dirpath() - else: - directory = path - - if six.PY2: # py2 is not using lru_cache. - try: - return self._dirpath2confmods[directory] - except KeyError: - pass - - # XXX these days we may rather want to use config.rootdir - # and allow users to opt into looking into the rootdir parent - # directories instead of requiring to specify confcutdir - clist = [] - for parent in directory.realpath().parts(): - if self._confcutdir and self._confcutdir.relto(parent): - continue - conftestpath = parent.join("conftest.py") - if conftestpath.isfile(): - mod = self._importconftest(conftestpath) - clist.append(mod) - self._dirpath2confmods[directory] = clist - return clist - - def _rget_with_confmod(self, name, path): - modules = self._getconftestmodules(path) - for mod in reversed(modules): - try: - return mod, getattr(mod, name) - except AttributeError: - continue - raise KeyError(name) - - def _importconftest(self, conftestpath): - try: - return self._conftestpath2mod[conftestpath] - except KeyError: - pkgpath = conftestpath.pypkgpath() - if pkgpath is None: - _ensure_removed_sysmodule(conftestpath.purebasename) - try: - mod = conftestpath.pyimport() - if ( - hasattr(mod, "pytest_plugins") - and self._configured - and not self._using_pyargs - ): - from _pytest.deprecated import ( - PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST - ) - - warnings.warn_explicit( - PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST, - category=None, - filename=str(conftestpath), - lineno=0, - ) - except Exception: - raise ConftestImportFailure(conftestpath, sys.exc_info()) - - self._conftest_plugins.add(mod) - self._conftestpath2mod[conftestpath] = mod - dirpath = conftestpath.dirpath() - if dirpath in self._dirpath2confmods: - for path, mods in self._dirpath2confmods.items(): - if path and path.relto(dirpath) or path == dirpath: - assert mod not in mods - mods.append(mod) - self.trace("loaded conftestmodule %r" % (mod)) - self.consider_conftest(mod) - return mod - - # - # API for bootstrapping plugin loading - # - # - - def consider_preparse(self, args): - for opt1, opt2 in zip(args, args[1:]): - if opt1 == "-p": - self.consider_pluginarg(opt2) - - def consider_pluginarg(self, arg): - if arg.startswith("no:"): - name = arg[3:] - # PR #4304 : remove stepwise if cacheprovider is blocked - if name == "cacheprovider": - self.set_blocked("stepwise") - self.set_blocked("pytest_stepwise") - - self.set_blocked(name) - if not name.startswith("pytest_"): - self.set_blocked("pytest_" + name) - else: - self.import_plugin(arg) - - def consider_conftest(self, conftestmodule): - self.register(conftestmodule, name=conftestmodule.__file__) - - def consider_env(self): - self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) - - def consider_module(self, mod): - self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) - - def _import_plugin_specs(self, spec): - plugins = _get_plugin_specs_as_list(spec) - for import_spec in plugins: - self.import_plugin(import_spec) - - def import_plugin(self, modname): - # most often modname refers to builtin modules, e.g. "pytester", - # "terminal" or "capture". Those plugins are registered under their - # basename for historic purposes but must be imported with the - # _pytest prefix. - assert isinstance(modname, (six.text_type, str)), ( - "module name as text required, got %r" % modname - ) - modname = str(modname) - if self.is_blocked(modname) or self.get_plugin(modname) is not None: - return - if modname in builtin_plugins: - importspec = "_pytest." + modname - else: - importspec = modname - self.rewrite_hook.mark_rewrite(importspec) - try: - __import__(importspec) - except ImportError as e: - new_exc_type = ImportError - new_exc_message = 'Error importing plugin "%s": %s' % ( - modname, - safe_str(e.args[0]), - ) - new_exc = new_exc_type(new_exc_message) - - six.reraise(new_exc_type, new_exc, sys.exc_info()[2]) - - except Skipped as e: - self._warn("skipped plugin %r: %s" % ((modname, e.msg))) - else: - mod = sys.modules[importspec] - self.register(mod, modname) - - -def _get_plugin_specs_as_list(specs): - """ - Parses a list of "plugin specs" and returns a list of plugin names. - - Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in - which case it is returned as a list. Specs can also be `None` in which case an - empty list is returned. - """ - if specs is not None: - if isinstance(specs, str): - specs = specs.split(",") if specs else [] - if not isinstance(specs, (list, tuple)): - raise UsageError( - "Plugin specs must be a ','-separated string or a " - "list/tuple of strings for plugin names. Given: %r" % specs - ) - return list(specs) - return [] - - -def _ensure_removed_sysmodule(modname): - try: - del sys.modules[modname] - except KeyError: - pass - - -class Notset(object): - def __repr__(self): - return "" - - -notset = Notset() - - -def _iter_rewritable_modules(package_files): - for fn in package_files: - is_simple_module = "/" not in fn and fn.endswith(".py") - is_package = fn.count("/") == 1 and fn.endswith("__init__.py") - if is_simple_module: - module_name, _ = os.path.splitext(fn) - yield module_name - elif is_package: - package_name = os.path.dirname(fn) - yield package_name - - -class Config(object): - """ access to configuration values, pluginmanager and plugin hooks. """ - - def __init__(self, pluginmanager): - #: access to command line option as attributes. - #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead - self.option = argparse.Namespace() - from .argparsing import Parser, FILE_OR_DIR - - _a = FILE_OR_DIR - self._parser = Parser( - usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a), - processopt=self._processopt, - ) - #: a pluginmanager instance - self.pluginmanager = pluginmanager - self.trace = self.pluginmanager.trace.root.get("config") - self.hook = self.pluginmanager.hook - self._inicache = {} - self._override_ini = () - self._opt2dest = {} - self._cleanup = [] - self._warn = self.pluginmanager._warn - self.pluginmanager.register(self, "pytestconfig") - self._configured = False - - def do_setns(dic): - import pytest - - setns(pytest, dic) - - self.hook.pytest_namespace.call_historic(do_setns, {}) - self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser)) - - def add_cleanup(self, func): - """ Add a function to be called when the config object gets out of - use (usually coninciding with pytest_unconfigure).""" - self._cleanup.append(func) - - def _do_configure(self): - assert not self._configured - self._configured = True - self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) - - def _ensure_unconfigure(self): - if self._configured: - self._configured = False - self.hook.pytest_unconfigure(config=self) - self.hook.pytest_configure._call_history = [] - while self._cleanup: - fin = self._cleanup.pop() - fin() - - def warn(self, code, message, fslocation=None, nodeid=None): - """ - .. deprecated:: 3.8 - - Use :py:func:`warnings.warn` or :py:func:`warnings.warn_explicit` directly instead. - - Generate a warning for this test session. - """ - from _pytest.warning_types import RemovedInPytest4Warning - - if isinstance(fslocation, (tuple, list)) and len(fslocation) > 2: - filename, lineno = fslocation[:2] - else: - filename = "unknown file" - lineno = 0 - msg = "config.warn has been deprecated, use warnings.warn instead" - if nodeid: - msg = "{}: {}".format(nodeid, msg) - warnings.warn_explicit( - RemovedInPytest4Warning(msg), - category=None, - filename=filename, - lineno=lineno, - ) - self.hook.pytest_logwarning.call_historic( - kwargs=dict( - code=code, message=message, fslocation=fslocation, nodeid=nodeid - ) - ) - - def get_terminal_writer(self): - return self.pluginmanager.get_plugin("terminalreporter")._tw - - def pytest_cmdline_parse(self, pluginmanager, args): - # REF1 assert self == pluginmanager.config, (self, pluginmanager.config) - self.parse(args) - return self - - def notify_exception(self, excinfo, option=None): - if option and option.fulltrace: - style = "long" - else: - style = "native" - excrepr = excinfo.getrepr( - funcargs=True, showlocals=getattr(option, "showlocals", False), style=style - ) - res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) - if not any(res): - for line in str(excrepr).split("\n"): - sys.stderr.write("INTERNALERROR> %s\n" % line) - sys.stderr.flush() - - def cwd_relative_nodeid(self, nodeid): - # nodeid's are relative to the rootpath, compute relative to cwd - if self.invocation_dir != self.rootdir: - fullpath = self.rootdir.join(nodeid) - nodeid = self.invocation_dir.bestrelpath(fullpath) - return nodeid - - @classmethod - def fromdictargs(cls, option_dict, args): - """ constructor useable for subprocesses. """ - config = get_config() - config.option.__dict__.update(option_dict) - config.parse(args, addopts=False) - for x in config.option.plugins: - config.pluginmanager.consider_pluginarg(x) - return config - - def _processopt(self, opt): - for name in opt._short_opts + opt._long_opts: - self._opt2dest[name] = opt.dest - - if hasattr(opt, "default") and opt.dest: - if not hasattr(self.option, opt.dest): - setattr(self.option, opt.dest, opt.default) - - @hookimpl(trylast=True) - def pytest_load_initial_conftests(self, early_config): - self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) - - def _initini(self, args): - ns, unknown_args = self._parser.parse_known_and_unknown_args( - args, namespace=copy.copy(self.option) - ) - r = determine_setup( - ns.inifilename, - ns.file_or_dir + unknown_args, - rootdir_cmd_arg=ns.rootdir or None, - config=self, - ) - self.rootdir, self.inifile, self.inicfg = r - self._parser.extra_info["rootdir"] = self.rootdir - self._parser.extra_info["inifile"] = self.inifile - self.invocation_dir = py.path.local() - self._parser.addini("addopts", "extra command line options", "args") - self._parser.addini("minversion", "minimally required pytest version") - self._override_ini = ns.override_ini or () - - def _consider_importhook(self, args): - """Install the PEP 302 import hook if using assertion rewriting. - - Needs to parse the --assert= option from the commandline - and find all the installed plugins to mark them for rewriting - by the importhook. - """ - ns, unknown_args = self._parser.parse_known_and_unknown_args(args) - mode = ns.assertmode - if mode == "rewrite": - try: - hook = _pytest.assertion.install_importhook(self) - except SystemError: - mode = "plain" - else: - self._mark_plugins_for_rewrite(hook) - _warn_about_missing_assertion(mode) - - def _mark_plugins_for_rewrite(self, hook): - """ - Given an importhook, mark for rewrite any top-level - modules or packages in the distribution package for - all pytest plugins. - """ - import pkg_resources - - self.pluginmanager.rewrite_hook = hook - - if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): - # We don't autoload from setuptools entry points, no need to continue. - return - - # 'RECORD' available for plugins installed normally (pip install) - # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e) - # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa - # so it shouldn't be an issue - metadata_files = "RECORD", "SOURCES.txt" - - package_files = ( - entry.split(",")[0] - for entrypoint in pkg_resources.iter_entry_points("pytest11") - for metadata in metadata_files - for entry in entrypoint.dist._get_metadata(metadata) - ) - - for name in _iter_rewritable_modules(package_files): - hook.mark_rewrite(name) - - def _preparse(self, args, addopts=True): - if addopts: - args[:] = shlex.split(os.environ.get("PYTEST_ADDOPTS", "")) + args - self._initini(args) - if addopts: - args[:] = self.getini("addopts") + args - self._checkversion() - self._consider_importhook(args) - self.pluginmanager.consider_preparse(args) - if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): - # Don't autoload from setuptools entry point. Only explicitly specified - # plugins are going to be loaded. - self.pluginmanager.load_setuptools_entrypoints("pytest11") - self.pluginmanager.consider_env() - self.known_args_namespace = ns = self._parser.parse_known_args( - args, namespace=copy.copy(self.option) - ) - if self.known_args_namespace.confcutdir is None and self.inifile: - confcutdir = py.path.local(self.inifile).dirname - self.known_args_namespace.confcutdir = confcutdir - try: - self.hook.pytest_load_initial_conftests( - early_config=self, args=args, parser=self._parser - ) - except ConftestImportFailure: - e = sys.exc_info()[1] - if ns.help or ns.version: - # we don't want to prevent --help/--version to work - # so just let is pass and print a warning at the end - self._warn("could not load initial conftests (%s)\n" % e.path) - else: - raise - - def _checkversion(self): - import pytest - - minver = self.inicfg.get("minversion", None) - if minver: - if LooseVersion(minver) > LooseVersion(pytest.__version__): - raise pytest.UsageError( - "%s:%d: requires pytest-%s, actual pytest-%s'" - % ( - self.inicfg.config.path, - self.inicfg.lineof("minversion"), - minver, - pytest.__version__, - ) - ) - - def parse(self, args, addopts=True): - # parse given cmdline arguments into this config object. - assert not hasattr( - self, "args" - ), "can only parse cmdline args at most once per Config object" - self._origargs = args - self.hook.pytest_addhooks.call_historic( - kwargs=dict(pluginmanager=self.pluginmanager) - ) - self._preparse(args, addopts=addopts) - # XXX deprecated hook: - self.hook.pytest_cmdline_preparse(config=self, args=args) - self._parser.after_preparse = True - try: - args = self._parser.parse_setoption( - args, self.option, namespace=self.option - ) - if not args: - cwd = os.getcwd() - if cwd == self.rootdir: - args = self.getini("testpaths") - if not args: - args = [cwd] - self.args = args - except PrintHelp: - pass - - def addinivalue_line(self, name, line): - """ add a line to an ini-file option. The option must have been - declared but might not yet be set in which case the line becomes the - the first line in its value. """ - x = self.getini(name) - assert isinstance(x, list) - x.append(line) # modifies the cached list inline - - def getini(self, name): - """ return configuration value from an :ref:`ini file `. If the - specified name hasn't been registered through a prior - :py:func:`parser.addini <_pytest.config.Parser.addini>` - call (usually from a plugin), a ValueError is raised. """ - try: - return self._inicache[name] - except KeyError: - self._inicache[name] = val = self._getini(name) - return val - - def _getini(self, name): - try: - description, type, default = self._parser._inidict[name] - except KeyError: - raise ValueError("unknown configuration value: %r" % (name,)) - value = self._get_override_ini_value(name) - if value is None: - try: - value = self.inicfg[name] - except KeyError: - if default is not None: - return default - if type is None: - return "" - return [] - if type == "pathlist": - dp = py.path.local(self.inicfg.config.path).dirpath() - values = [] - for relpath in shlex.split(value): - values.append(dp.join(relpath, abs=True)) - return values - elif type == "args": - return shlex.split(value) - elif type == "linelist": - return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] - elif type == "bool": - return bool(_strtobool(value.strip())) - else: - assert type is None - return value - - def _getconftest_pathlist(self, name, path): - try: - mod, relroots = self.pluginmanager._rget_with_confmod(name, path) - except KeyError: - return None - modpath = py.path.local(mod.__file__).dirpath() - values = [] - for relroot in relroots: - if not isinstance(relroot, py.path.local): - relroot = relroot.replace("/", py.path.local.sep) - relroot = modpath.join(relroot, abs=True) - values.append(relroot) - return values - - def _get_override_ini_value(self, name): - value = None - # override_ini is a list of "ini=value" options - # always use the last item if multiple values are set for same ini-name, - # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2 - for ini_config in self._override_ini: - try: - key, user_ini_value = ini_config.split("=", 1) - except ValueError: - raise UsageError("-o/--override-ini expects option=value style.") - else: - if key == name: - value = user_ini_value - return value - - def getoption(self, name, default=notset, skip=False): - """ return command line option value. - - :arg name: name of the option. You may also specify - the literal ``--OPT`` option instead of the "dest" option name. - :arg default: default value if no option of that name exists. - :arg skip: if True raise pytest.skip if option does not exists - or has a None value. - """ - name = self._opt2dest.get(name, name) - try: - val = getattr(self.option, name) - if val is None and skip: - raise AttributeError(name) - return val - except AttributeError: - if default is not notset: - return default - if skip: - import pytest - - pytest.skip("no %r option found" % (name,)) - raise ValueError("no option named %r" % (name,)) - - def getvalue(self, name, path=None): - """ (deprecated, use getoption()) """ - return self.getoption(name) - - def getvalueorskip(self, name, path=None): - """ (deprecated, use getoption(skip=True)) """ - return self.getoption(name, skip=True) - - -def _assertion_supported(): - try: - assert False - except AssertionError: - return True - else: - return False - - -def _warn_about_missing_assertion(mode): - if not _assertion_supported(): - if mode == "plain": - sys.stderr.write( - "WARNING: ASSERTIONS ARE NOT EXECUTED" - " and FAILING TESTS WILL PASS. Are you" - " using python -O?" - ) - else: - sys.stderr.write( - "WARNING: assertions not in test modules or" - " plugins will be ignored" - " because assert statements are not executed " - "by the underlying Python interpreter " - "(are you using python -O?)\n" - ) - - -def setns(obj, dic): - import pytest - - for name, value in dic.items(): - if isinstance(value, dict): - mod = getattr(obj, name, None) - if mod is None: - modname = "pytest.%s" % name - mod = types.ModuleType(modname) - sys.modules[modname] = mod - mod.__all__ = [] - setattr(obj, name, mod) - obj.__all__.append(name) - setns(mod, value) - else: - setattr(obj, name, value) - obj.__all__.append(name) - # if obj != pytest: - # pytest.__all__.append(name) - setattr(pytest, name, value) - - -def create_terminal_writer(config, *args, **kwargs): - """Create a TerminalWriter instance configured according to the options - in the config object. Every code which requires a TerminalWriter object - and has access to a config object should use this function. - """ - tw = py.io.TerminalWriter(*args, **kwargs) - if config.option.color == "yes": - tw.hasmarkup = True - if config.option.color == "no": - tw.hasmarkup = False - return tw - - -def _strtobool(val): - """Convert a string representation of truth to true (1) or false (0). - - True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values - are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if - 'val' is anything else. - - .. note:: copied from distutils.util - """ - val = val.lower() - if val in ("y", "yes", "t", "true", "on", "1"): - return 1 - elif val in ("n", "no", "f", "false", "off", "0"): - return 0 - else: - raise ValueError("invalid truth value %r" % (val,)) diff --git a/venv/lib/python3.8/site-packages/_pytest/config/argparsing.py b/venv/lib/python3.8/site-packages/_pytest/config/argparsing.py deleted file mode 100644 index 5b8306dd..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/config/argparsing.py +++ /dev/null @@ -1,409 +0,0 @@ -import argparse -import sys as _sys -import warnings -from gettext import gettext as _ - -import py -import six - -from ..main import EXIT_USAGEERROR - -FILE_OR_DIR = "file_or_dir" - - -class Parser(object): - """ Parser for command line arguments and ini-file values. - - :ivar extra_info: dict of generic param -> value to display in case - there's an error processing the command line arguments. - """ - - def __init__(self, usage=None, processopt=None): - self._anonymous = OptionGroup("custom options", parser=self) - self._groups = [] - self._processopt = processopt - self._usage = usage - self._inidict = {} - self._ininames = [] - self.extra_info = {} - - def processoption(self, option): - if self._processopt: - if option.dest: - self._processopt(option) - - def getgroup(self, name, description="", after=None): - """ get (or create) a named option Group. - - :name: name of the option group. - :description: long description for --help output. - :after: name of other group, used for ordering --help output. - - The returned group object has an ``addoption`` method with the same - signature as :py:func:`parser.addoption - <_pytest.config.Parser.addoption>` but will be shown in the - respective group in the output of ``pytest. --help``. - """ - for group in self._groups: - if group.name == name: - return group - group = OptionGroup(name, description, parser=self) - i = 0 - for i, grp in enumerate(self._groups): - if grp.name == after: - break - self._groups.insert(i + 1, group) - return group - - def addoption(self, *opts, **attrs): - """ register a command line option. - - :opts: option names, can be short or long options. - :attrs: same attributes which the ``add_option()`` function of the - `argparse library - `_ - accepts. - - After command line parsing options are available on the pytest config - object via ``config.option.NAME`` where ``NAME`` is usually set - by passing a ``dest`` attribute, for example - ``addoption("--long", dest="NAME", ...)``. - """ - self._anonymous.addoption(*opts, **attrs) - - def parse(self, args, namespace=None): - from _pytest._argcomplete import try_argcomplete - - self.optparser = self._getparser() - try_argcomplete(self.optparser) - args = [str(x) if isinstance(x, py.path.local) else x for x in args] - return self.optparser.parse_args(args, namespace=namespace) - - def _getparser(self): - from _pytest._argcomplete import filescompleter - - optparser = MyOptionParser(self, self.extra_info) - groups = self._groups + [self._anonymous] - for group in groups: - if group.options: - desc = group.description or group.name - arggroup = optparser.add_argument_group(desc) - for option in group.options: - n = option.names() - a = option.attrs() - arggroup.add_argument(*n, **a) - # bash like autocompletion for dirs (appending '/') - optparser.add_argument(FILE_OR_DIR, nargs="*").completer = filescompleter - return optparser - - def parse_setoption(self, args, option, namespace=None): - parsedoption = self.parse(args, namespace=namespace) - for name, value in parsedoption.__dict__.items(): - setattr(option, name, value) - return getattr(parsedoption, FILE_OR_DIR) - - def parse_known_args(self, args, namespace=None): - """parses and returns a namespace object with known arguments at this - point. - """ - return self.parse_known_and_unknown_args(args, namespace=namespace)[0] - - def parse_known_and_unknown_args(self, args, namespace=None): - """parses and returns a namespace object with known arguments, and - the remaining arguments unknown at this point. - """ - optparser = self._getparser() - args = [str(x) if isinstance(x, py.path.local) else x for x in args] - return optparser.parse_known_args(args, namespace=namespace) - - def addini(self, name, help, type=None, default=None): - """ register an ini-file option. - - :name: name of the ini-variable - :type: type of the variable, can be ``pathlist``, ``args``, ``linelist`` - or ``bool``. - :default: default value if no ini-file option exists but is queried. - - The value of ini-variables can be retrieved via a call to - :py:func:`config.getini(name) <_pytest.config.Config.getini>`. - """ - assert type in (None, "pathlist", "args", "linelist", "bool") - self._inidict[name] = (help, type, default) - self._ininames.append(name) - - -class ArgumentError(Exception): - """ - Raised if an Argument instance is created with invalid or - inconsistent arguments. - """ - - def __init__(self, msg, option): - self.msg = msg - self.option_id = str(option) - - def __str__(self): - if self.option_id: - return "option %s: %s" % (self.option_id, self.msg) - else: - return self.msg - - -class Argument(object): - """class that mimics the necessary behaviour of optparse.Option - - it's currently a least effort implementation - and ignoring choices and integer prefixes - https://docs.python.org/3/library/optparse.html#optparse-standard-option-types - """ - - _typ_map = {"int": int, "string": str, "float": float, "complex": complex} - - def __init__(self, *names, **attrs): - """store parms in private vars for use in add_argument""" - self._attrs = attrs - self._short_opts = [] - self._long_opts = [] - self.dest = attrs.get("dest") - if "%default" in (attrs.get("help") or ""): - warnings.warn( - 'pytest now uses argparse. "%default" should be' - ' changed to "%(default)s" ', - DeprecationWarning, - stacklevel=3, - ) - try: - typ = attrs["type"] - except KeyError: - pass - else: - # this might raise a keyerror as well, don't want to catch that - if isinstance(typ, six.string_types): - if typ == "choice": - warnings.warn( - "`type` argument to addoption() is the string %r." - " For choices this is optional and can be omitted, " - " but when supplied should be a type (for example `str` or `int`)." - " (options: %s)" % (typ, names), - DeprecationWarning, - stacklevel=4, - ) - # argparse expects a type here take it from - # the type of the first element - attrs["type"] = type(attrs["choices"][0]) - else: - warnings.warn( - "`type` argument to addoption() is the string %r, " - " but when supplied should be a type (for example `str` or `int`)." - " (options: %s)" % (typ, names), - DeprecationWarning, - stacklevel=4, - ) - attrs["type"] = Argument._typ_map[typ] - # used in test_parseopt -> test_parse_defaultgetter - self.type = attrs["type"] - else: - self.type = typ - try: - # attribute existence is tested in Config._processopt - self.default = attrs["default"] - except KeyError: - pass - self._set_opt_strings(names) - if not self.dest: - if self._long_opts: - self.dest = self._long_opts[0][2:].replace("-", "_") - else: - try: - self.dest = self._short_opts[0][1:] - except IndexError: - raise ArgumentError("need a long or short option", self) - - def names(self): - return self._short_opts + self._long_opts - - def attrs(self): - # update any attributes set by processopt - attrs = "default dest help".split() - if self.dest: - attrs.append(self.dest) - for attr in attrs: - try: - self._attrs[attr] = getattr(self, attr) - except AttributeError: - pass - if self._attrs.get("help"): - a = self._attrs["help"] - a = a.replace("%default", "%(default)s") - # a = a.replace('%prog', '%(prog)s') - self._attrs["help"] = a - return self._attrs - - def _set_opt_strings(self, opts): - """directly from optparse - - might not be necessary as this is passed to argparse later on""" - for opt in opts: - if len(opt) < 2: - raise ArgumentError( - "invalid option string %r: " - "must be at least two characters long" % opt, - self, - ) - elif len(opt) == 2: - if not (opt[0] == "-" and opt[1] != "-"): - raise ArgumentError( - "invalid short option string %r: " - "must be of the form -x, (x any non-dash char)" % opt, - self, - ) - self._short_opts.append(opt) - else: - if not (opt[0:2] == "--" and opt[2] != "-"): - raise ArgumentError( - "invalid long option string %r: " - "must start with --, followed by non-dash" % opt, - self, - ) - self._long_opts.append(opt) - - def __repr__(self): - args = [] - if self._short_opts: - args += ["_short_opts: " + repr(self._short_opts)] - if self._long_opts: - args += ["_long_opts: " + repr(self._long_opts)] - args += ["dest: " + repr(self.dest)] - if hasattr(self, "type"): - args += ["type: " + repr(self.type)] - if hasattr(self, "default"): - args += ["default: " + repr(self.default)] - return "Argument({})".format(", ".join(args)) - - -class OptionGroup(object): - def __init__(self, name, description="", parser=None): - self.name = name - self.description = description - self.options = [] - self.parser = parser - - def addoption(self, *optnames, **attrs): - """ add an option to this group. - - if a shortened version of a long option is specified it will - be suppressed in the help. addoption('--twowords', '--two-words') - results in help showing '--two-words' only, but --twowords gets - accepted **and** the automatic destination is in args.twowords - """ - conflict = set(optnames).intersection( - name for opt in self.options for name in opt.names() - ) - if conflict: - raise ValueError("option names %s already added" % conflict) - option = Argument(*optnames, **attrs) - self._addoption_instance(option, shortupper=False) - - def _addoption(self, *optnames, **attrs): - option = Argument(*optnames, **attrs) - self._addoption_instance(option, shortupper=True) - - def _addoption_instance(self, option, shortupper=False): - if not shortupper: - for opt in option._short_opts: - if opt[0] == "-" and opt[1].islower(): - raise ValueError("lowercase shortoptions reserved") - if self.parser: - self.parser.processoption(option) - self.options.append(option) - - -class MyOptionParser(argparse.ArgumentParser): - def __init__(self, parser, extra_info=None): - if not extra_info: - extra_info = {} - self._parser = parser - argparse.ArgumentParser.__init__( - self, - usage=parser._usage, - add_help=False, - formatter_class=DropShorterLongHelpFormatter, - ) - # extra_info is a dict of (param -> value) to display if there's - # an usage error to provide more contextual information to the user - self.extra_info = extra_info - - def error(self, message): - """error(message: string) - - Prints a usage message incorporating the message to stderr and - exits. - Overrides the method in parent class to change exit code""" - self.print_usage(_sys.stderr) - args = {"prog": self.prog, "message": message} - self.exit(EXIT_USAGEERROR, _("%(prog)s: error: %(message)s\n") % args) - - def parse_args(self, args=None, namespace=None): - """allow splitting of positional arguments""" - args, argv = self.parse_known_args(args, namespace) - if argv: - for arg in argv: - if arg and arg[0] == "-": - lines = ["unrecognized arguments: %s" % (" ".join(argv))] - for k, v in sorted(self.extra_info.items()): - lines.append(" %s: %s" % (k, v)) - self.error("\n".join(lines)) - getattr(args, FILE_OR_DIR).extend(argv) - return args - - -class DropShorterLongHelpFormatter(argparse.HelpFormatter): - """shorten help for long options that differ only in extra hyphens - - - collapse **long** options that are the same except for extra hyphens - - special action attribute map_long_option allows surpressing additional - long options - - shortcut if there are only two options and one of them is a short one - - cache result on action object as this is called at least 2 times - """ - - def _format_action_invocation(self, action): - orgstr = argparse.HelpFormatter._format_action_invocation(self, action) - if orgstr and orgstr[0] != "-": # only optional arguments - return orgstr - res = getattr(action, "_formatted_action_invocation", None) - if res: - return res - options = orgstr.split(", ") - if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): - # a shortcut for '-h, --help' or '--abc', '-a' - action._formatted_action_invocation = orgstr - return orgstr - return_list = [] - option_map = getattr(action, "map_long_option", {}) - if option_map is None: - option_map = {} - short_long = {} - for option in options: - if len(option) == 2 or option[2] == " ": - continue - if not option.startswith("--"): - raise ArgumentError( - 'long optional argument without "--": [%s]' % (option), self - ) - xxoption = option[2:] - if xxoption.split()[0] not in option_map: - shortened = xxoption.replace("-", "") - if shortened not in short_long or len(short_long[shortened]) < len( - xxoption - ): - short_long[shortened] = xxoption - # now short_long has been filled out to the longest with dashes - # **and** we keep the right option ordering from add_argument - for option in options: - if len(option) == 2 or option[2] == " ": - return_list.append(option) - if option[2:] == short_long.get(option.replace("-", "")): - return_list.append(option.replace(" ", "=", 1)) - action._formatted_action_invocation = ", ".join(return_list) - return action._formatted_action_invocation diff --git a/venv/lib/python3.8/site-packages/_pytest/config/exceptions.py b/venv/lib/python3.8/site-packages/_pytest/config/exceptions.py deleted file mode 100644 index 19fe5cb0..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/config/exceptions.py +++ /dev/null @@ -1,9 +0,0 @@ -class UsageError(Exception): - """ error in pytest usage or invocation""" - - -class PrintHelp(Exception): - """Raised when pytest should print it's help to skip the rest of the - argument parsing and validation.""" - - pass diff --git a/venv/lib/python3.8/site-packages/_pytest/config/findpaths.py b/venv/lib/python3.8/site-packages/_pytest/config/findpaths.py deleted file mode 100644 index 4f371ec7..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/config/findpaths.py +++ /dev/null @@ -1,148 +0,0 @@ -import os - -import py - -from .exceptions import UsageError - - -def exists(path, ignore=EnvironmentError): - try: - return path.check() - except ignore: - return False - - -def getcfg(args, config=None): - """ - Search the list of arguments for a valid ini-file for pytest, - and return a tuple of (rootdir, inifile, cfg-dict). - - note: config is optional and used only to issue warnings explicitly (#2891). - """ - from _pytest.deprecated import CFG_PYTEST_SECTION - - inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"] - args = [x for x in args if not str(x).startswith("-")] - if not args: - args = [py.path.local()] - for arg in args: - arg = py.path.local(arg) - for base in arg.parts(reverse=True): - for inibasename in inibasenames: - p = base.join(inibasename) - if exists(p): - iniconfig = py.iniconfig.IniConfig(p) - if "pytest" in iniconfig.sections: - if inibasename == "setup.cfg" and config is not None: - from _pytest.warnings import _issue_config_warning - from _pytest.warning_types import RemovedInPytest4Warning - - _issue_config_warning( - RemovedInPytest4Warning( - CFG_PYTEST_SECTION.format(filename=inibasename) - ), - config=config, - ) - return base, p, iniconfig["pytest"] - if ( - inibasename == "setup.cfg" - and "tool:pytest" in iniconfig.sections - ): - return base, p, iniconfig["tool:pytest"] - elif inibasename == "pytest.ini": - # allowed to be empty - return base, p, {} - return None, None, None - - -def get_common_ancestor(paths): - common_ancestor = None - for path in paths: - if not path.exists(): - continue - if common_ancestor is None: - common_ancestor = path - else: - if path.relto(common_ancestor) or path == common_ancestor: - continue - elif common_ancestor.relto(path): - common_ancestor = path - else: - shared = path.common(common_ancestor) - if shared is not None: - common_ancestor = shared - if common_ancestor is None: - common_ancestor = py.path.local() - elif common_ancestor.isfile(): - common_ancestor = common_ancestor.dirpath() - return common_ancestor - - -def get_dirs_from_args(args): - def is_option(x): - return str(x).startswith("-") - - def get_file_part_from_node_id(x): - return str(x).split("::")[0] - - def get_dir_from_path(path): - if path.isdir(): - return path - return py.path.local(path.dirname) - - # These look like paths but may not exist - possible_paths = ( - py.path.local(get_file_part_from_node_id(arg)) - for arg in args - if not is_option(arg) - ) - - return [get_dir_from_path(path) for path in possible_paths if path.exists()] - - -def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None): - dirs = get_dirs_from_args(args) - if inifile: - iniconfig = py.iniconfig.IniConfig(inifile) - is_cfg_file = str(inifile).endswith(".cfg") - sections = ["tool:pytest", "pytest"] if is_cfg_file else ["pytest"] - for section in sections: - try: - inicfg = iniconfig[section] - if is_cfg_file and section == "pytest" and config is not None: - from _pytest.deprecated import CFG_PYTEST_SECTION - from _pytest.warnings import _issue_config_warning - - # TODO: [pytest] section in *.cfg files is deprecated. Need refactoring once - # the deprecation expires. - _issue_config_warning( - CFG_PYTEST_SECTION.format(filename=str(inifile)), config - ) - break - except KeyError: - inicfg = None - rootdir = get_common_ancestor(dirs) - else: - ancestor = get_common_ancestor(dirs) - rootdir, inifile, inicfg = getcfg([ancestor], config=config) - if rootdir is None: - for rootdir in ancestor.parts(reverse=True): - if rootdir.join("setup.py").exists(): - break - else: - rootdir, inifile, inicfg = getcfg(dirs, config=config) - if rootdir is None: - rootdir = get_common_ancestor([py.path.local(), ancestor]) - is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/" - if is_fs_root: - rootdir = ancestor - if rootdir_cmd_arg: - rootdir_abs_path = py.path.local(os.path.expandvars(rootdir_cmd_arg)) - if not os.path.isdir(str(rootdir_abs_path)): - raise UsageError( - "Directory '{}' not found. Check your '--rootdir' option.".format( - rootdir_abs_path - ) - ) - rootdir = rootdir_abs_path - return rootdir, inifile, inicfg or {} diff --git a/venv/lib/python3.8/site-packages/_pytest/debugging.py b/venv/lib/python3.8/site-packages/_pytest/debugging.py deleted file mode 100644 index fe54d493..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/debugging.py +++ /dev/null @@ -1,237 +0,0 @@ -""" interactive debugging with PDB, the Python Debugger. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import pdb -import sys -from doctest import UnexpectedException - -from _pytest import outcomes -from _pytest.config import hookimpl - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group._addoption( - "--pdb", - dest="usepdb", - action="store_true", - help="start the interactive Python debugger on errors or KeyboardInterrupt.", - ) - group._addoption( - "--pdbcls", - dest="usepdb_cls", - metavar="modulename:classname", - help="start a custom interactive Python debugger on errors. " - "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", - ) - group._addoption( - "--trace", - dest="trace", - action="store_true", - help="Immediately break when running each test.", - ) - - -def pytest_configure(config): - if config.getvalue("usepdb_cls"): - modname, classname = config.getvalue("usepdb_cls").split(":") - __import__(modname) - pdb_cls = getattr(sys.modules[modname], classname) - else: - pdb_cls = pdb.Pdb - - if config.getvalue("trace"): - config.pluginmanager.register(PdbTrace(), "pdbtrace") - if config.getvalue("usepdb"): - config.pluginmanager.register(PdbInvoke(), "pdbinvoke") - - pytestPDB._saved.append( - (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config, pytestPDB._pdb_cls) - ) - pdb.set_trace = pytestPDB.set_trace - pytestPDB._pluginmanager = config.pluginmanager - pytestPDB._config = config - pytestPDB._pdb_cls = pdb_cls - - # NOTE: not using pytest_unconfigure, since it might get called although - # pytest_configure was not (if another plugin raises UsageError). - def fin(): - ( - pdb.set_trace, - pytestPDB._pluginmanager, - pytestPDB._config, - pytestPDB._pdb_cls, - ) = pytestPDB._saved.pop() - - config._cleanup.append(fin) - - -class pytestPDB(object): - """ Pseudo PDB that defers to the real pdb. """ - - _pluginmanager = None - _config = None - _pdb_cls = pdb.Pdb - _saved = [] - - @classmethod - def set_trace(cls, set_break=True): - """ invoke PDB set_trace debugging, dropping any IO capturing. """ - import _pytest.config - - frame = sys._getframe().f_back - if cls._pluginmanager is not None: - capman = cls._pluginmanager.getplugin("capturemanager") - if capman: - capman.suspend_global_capture(in_=True) - tw = _pytest.config.create_terminal_writer(cls._config) - tw.line() - if capman and capman.is_globally_capturing(): - tw.sep(">", "PDB set_trace (IO-capturing turned off)") - else: - tw.sep(">", "PDB set_trace") - - class _PdbWrapper(cls._pdb_cls, object): - _pytest_capman = capman - _continued = False - - def do_continue(self, arg): - ret = super(_PdbWrapper, self).do_continue(arg) - if self._pytest_capman: - tw = _pytest.config.create_terminal_writer(cls._config) - tw.line() - if self._pytest_capman.is_globally_capturing(): - tw.sep(">", "PDB continue (IO-capturing resumed)") - else: - tw.sep(">", "PDB continue") - self._pytest_capman.resume_global_capture() - cls._pluginmanager.hook.pytest_leave_pdb( - config=cls._config, pdb=self - ) - self._continued = True - return ret - - do_c = do_cont = do_continue - - def setup(self, f, tb): - """Suspend on setup(). - - Needed after do_continue resumed, and entering another - breakpoint again. - """ - ret = super(_PdbWrapper, self).setup(f, tb) - if not ret and self._continued: - # pdb.setup() returns True if the command wants to exit - # from the interaction: do not suspend capturing then. - if self._pytest_capman: - self._pytest_capman.suspend_global_capture(in_=True) - return ret - - _pdb = _PdbWrapper() - cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) - else: - _pdb = cls._pdb_cls() - - if set_break: - _pdb.set_trace(frame) - - -class PdbInvoke(object): - def pytest_exception_interact(self, node, call, report): - capman = node.config.pluginmanager.getplugin("capturemanager") - if capman: - capman.suspend_global_capture(in_=True) - out, err = capman.read_global_capture() - sys.stdout.write(out) - sys.stdout.write(err) - _enter_pdb(node, call.excinfo, report) - - def pytest_internalerror(self, excrepr, excinfo): - tb = _postmortem_traceback(excinfo) - post_mortem(tb) - - -class PdbTrace(object): - @hookimpl(hookwrapper=True) - def pytest_pyfunc_call(self, pyfuncitem): - _test_pytest_function(pyfuncitem) - yield - - -def _test_pytest_function(pyfuncitem): - pytestPDB.set_trace(set_break=False) - testfunction = pyfuncitem.obj - pyfuncitem.obj = pdb.runcall - if pyfuncitem._isyieldedfunction(): - arg_list = list(pyfuncitem._args) - arg_list.insert(0, testfunction) - pyfuncitem._args = tuple(arg_list) - else: - if "func" in pyfuncitem._fixtureinfo.argnames: - raise ValueError("--trace can't be used with a fixture named func!") - pyfuncitem.funcargs["func"] = testfunction - new_list = list(pyfuncitem._fixtureinfo.argnames) - new_list.append("func") - pyfuncitem._fixtureinfo.argnames = tuple(new_list) - - -def _enter_pdb(node, excinfo, rep): - # XXX we re-use the TerminalReporter's terminalwriter - # because this seems to avoid some encoding related troubles - # for not completely clear reasons. - tw = node.config.pluginmanager.getplugin("terminalreporter")._tw - tw.line() - - showcapture = node.config.option.showcapture - - for sectionname, content in ( - ("stdout", rep.capstdout), - ("stderr", rep.capstderr), - ("log", rep.caplog), - ): - if showcapture in (sectionname, "all") and content: - tw.sep(">", "captured " + sectionname) - if content[-1:] == "\n": - content = content[:-1] - tw.line(content) - - tw.sep(">", "traceback") - rep.toterminal(tw) - tw.sep(">", "entering PDB") - tb = _postmortem_traceback(excinfo) - rep._pdbshown = True - if post_mortem(tb): - outcomes.exit("Quitting debugger") - return rep - - -def _postmortem_traceback(excinfo): - if isinstance(excinfo.value, UnexpectedException): - # A doctest.UnexpectedException is not useful for post_mortem. - # Use the underlying exception instead: - return excinfo.value.exc_info[2] - else: - return excinfo._excinfo[2] - - -def _find_last_non_hidden_frame(stack): - i = max(0, len(stack) - 1) - while i and stack[i][0].f_locals.get("__tracebackhide__", False): - i -= 1 - return i - - -def post_mortem(t): - class Pdb(pytestPDB._pdb_cls): - def get_stack(self, f, t): - stack, i = pdb.Pdb.get_stack(self, f, t) - if f is None: - i = _find_last_non_hidden_frame(stack) - return stack, i - - p = Pdb() - p.reset() - p.interaction(None, t) - return p.quitting diff --git a/venv/lib/python3.8/site-packages/_pytest/deprecated.py b/venv/lib/python3.8/site-packages/_pytest/deprecated.py deleted file mode 100644 index 8d7a17bc..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/deprecated.py +++ /dev/null @@ -1,123 +0,0 @@ -""" -This module contains deprecation messages and bits of code used elsewhere in the codebase -that is planned to be removed in the next pytest release. - -Keeping it in a central location makes it easy to track what is deprecated and should -be removed when the time comes. - -All constants defined in this module should be either PytestWarning instances or UnformattedWarning -in case of warnings which need to format their messages. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from _pytest.warning_types import PytestDeprecationWarning -from _pytest.warning_types import RemovedInPytest4Warning -from _pytest.warning_types import UnformattedWarning - - -MAIN_STR_ARGS = RemovedInPytest4Warning( - "passing a string to pytest.main() is deprecated, " - "pass a list of arguments instead." -) - -YIELD_TESTS = RemovedInPytest4Warning( - "yield tests are deprecated, and scheduled to be removed in pytest 4.0" -) - -CACHED_SETUP = RemovedInPytest4Warning( - "cached_setup is deprecated and will be removed in a future release. " - "Use standard fixture functions instead." -) - -COMPAT_PROPERTY = UnformattedWarning( - RemovedInPytest4Warning, - "usage of {owner}.{name} is deprecated, please use pytest.{name} instead", -) - -CUSTOM_CLASS = UnformattedWarning( - RemovedInPytest4Warning, - 'use of special named "{name}" objects in collectors of type "{type_name}" to ' - "customize the created nodes is deprecated. " - "Use pytest_pycollect_makeitem(...) to create custom " - "collection nodes instead.", -) - -FUNCARG_PREFIX = UnformattedWarning( - RemovedInPytest4Warning, - '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated ' - "and scheduled to be removed in pytest 4.0. " - "Please remove the prefix and use the @pytest.fixture decorator instead.", -) - -FIXTURE_FUNCTION_CALL = UnformattedWarning( - RemovedInPytest4Warning, - 'Fixture "{name}" called directly. Fixtures are not meant to be called directly, ' - "are created automatically when test functions request them as parameters. " - "See https://docs.pytest.org/en/latest/fixture.html for more information.", -) - -FIXTURE_NAMED_REQUEST = PytestDeprecationWarning( - "'request' is a reserved name for fixtures and will raise an error in future versions" -) - -CFG_PYTEST_SECTION = UnformattedWarning( - RemovedInPytest4Warning, - "[pytest] section in {filename} files is deprecated, use [tool:pytest] instead.", -) - -GETFUNCARGVALUE = RemovedInPytest4Warning( - "getfuncargvalue is deprecated, use getfixturevalue" -) - -RESULT_LOG = RemovedInPytest4Warning( - "--result-log is deprecated and scheduled for removal in pytest 4.0.\n" - "See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information." -) - -MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning( - "MarkInfo objects are deprecated as they contain merged marks which are hard to deal with correctly.\n" - "Please use node.get_closest_marker(name) or node.iter_markers(name).\n" - "Docs: https://docs.pytest.org/en/latest/mark.html#updating-code" -) - -MARK_PARAMETERSET_UNPACKING = RemovedInPytest4Warning( - "Applying marks directly to parameters is deprecated," - " please use pytest.param(..., marks=...) instead.\n" - "For more details, see: https://docs.pytest.org/en/latest/parametrize.html" -) - -NODE_WARN = RemovedInPytest4Warning( - "Node.warn(code, message) form has been deprecated, use Node.warn(warning_instance) instead." -) - -RECORD_XML_PROPERTY = RemovedInPytest4Warning( - 'Fixture renamed from "record_xml_property" to "record_property" as user ' - "properties are now available to all reporters.\n" - '"record_xml_property" is now deprecated.' -) - -COLLECTOR_MAKEITEM = RemovedInPytest4Warning( - "pycollector makeitem was removed as it is an accidentially leaked internal api" -) - -METAFUNC_ADD_CALL = RemovedInPytest4Warning( - "Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0.\n" - "Please use Metafunc.parametrize instead." -) - -PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = RemovedInPytest4Warning( - "Defining pytest_plugins in a non-top-level conftest is deprecated, " - "because it affects the entire directory tree in a non-explicit way.\n" - "Please move it to the top level conftest file instead." -) - -PYTEST_NAMESPACE = RemovedInPytest4Warning( - "pytest_namespace is deprecated and will be removed soon" -) - -PYTEST_ENSURETEMP = RemovedInPytest4Warning( - "pytest/tmpdir_factory.ensuretemp is deprecated, \n" - "please use the tmp_path fixture or tmp_path_factory.mktemp" -) diff --git a/venv/lib/python3.8/site-packages/_pytest/doctest.py b/venv/lib/python3.8/site-packages/_pytest/doctest.py deleted file mode 100644 index dbf7df82..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/doctest.py +++ /dev/null @@ -1,516 +0,0 @@ -""" discover and run doctests in modules and test files.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import platform -import sys -import traceback - -import pytest -from _pytest._code.code import ExceptionInfo -from _pytest._code.code import ReprFileLocation -from _pytest._code.code import TerminalRepr -from _pytest.fixtures import FixtureRequest - - -DOCTEST_REPORT_CHOICE_NONE = "none" -DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" -DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" -DOCTEST_REPORT_CHOICE_UDIFF = "udiff" -DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" - -DOCTEST_REPORT_CHOICES = ( - DOCTEST_REPORT_CHOICE_NONE, - DOCTEST_REPORT_CHOICE_CDIFF, - DOCTEST_REPORT_CHOICE_NDIFF, - DOCTEST_REPORT_CHOICE_UDIFF, - DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, -) - -# Lazy definition of runner class -RUNNER_CLASS = None - - -def pytest_addoption(parser): - parser.addini( - "doctest_optionflags", - "option flags for doctests", - type="args", - default=["ELLIPSIS"], - ) - parser.addini( - "doctest_encoding", "encoding used for doctest files", default="utf-8" - ) - group = parser.getgroup("collect") - group.addoption( - "--doctest-modules", - action="store_true", - default=False, - help="run doctests in all .py modules", - dest="doctestmodules", - ) - group.addoption( - "--doctest-report", - type=str.lower, - default="udiff", - help="choose another output format for diffs on doctest failure", - choices=DOCTEST_REPORT_CHOICES, - dest="doctestreport", - ) - group.addoption( - "--doctest-glob", - action="append", - default=[], - metavar="pat", - help="doctests file matching pattern, default: test*.txt", - dest="doctestglob", - ) - group.addoption( - "--doctest-ignore-import-errors", - action="store_true", - default=False, - help="ignore doctest ImportErrors", - dest="doctest_ignore_import_errors", - ) - group.addoption( - "--doctest-continue-on-failure", - action="store_true", - default=False, - help="for a given doctest, continue to run after the first failure", - dest="doctest_continue_on_failure", - ) - - -def pytest_collect_file(path, parent): - config = parent.config - if path.ext == ".py": - if config.option.doctestmodules and not _is_setup_py(config, path, parent): - return DoctestModule(path, parent) - elif _is_doctest(config, path, parent): - return DoctestTextfile(path, parent) - - -def _is_setup_py(config, path, parent): - if path.basename != "setup.py": - return False - contents = path.read() - return "setuptools" in contents or "distutils" in contents - - -def _is_doctest(config, path, parent): - if path.ext in (".txt", ".rst") and parent.session.isinitpath(path): - return True - globs = config.getoption("doctestglob") or ["test*.txt"] - for glob in globs: - if path.check(fnmatch=glob): - return True - return False - - -class ReprFailDoctest(TerminalRepr): - def __init__(self, reprlocation_lines): - # List of (reprlocation, lines) tuples - self.reprlocation_lines = reprlocation_lines - - def toterminal(self, tw): - for reprlocation, lines in self.reprlocation_lines: - for line in lines: - tw.line(line) - reprlocation.toterminal(tw) - - -class MultipleDoctestFailures(Exception): - def __init__(self, failures): - super(MultipleDoctestFailures, self).__init__() - self.failures = failures - - -def _init_runner_class(): - import doctest - - class PytestDoctestRunner(doctest.DebugRunner): - """ - Runner to collect failures. Note that the out variable in this case is - a list instead of a stdout-like object - """ - - def __init__( - self, checker=None, verbose=None, optionflags=0, continue_on_failure=True - ): - doctest.DebugRunner.__init__( - self, checker=checker, verbose=verbose, optionflags=optionflags - ) - self.continue_on_failure = continue_on_failure - - def report_failure(self, out, test, example, got): - failure = doctest.DocTestFailure(test, example, got) - if self.continue_on_failure: - out.append(failure) - else: - raise failure - - def report_unexpected_exception(self, out, test, example, exc_info): - failure = doctest.UnexpectedException(test, example, exc_info) - if self.continue_on_failure: - out.append(failure) - else: - raise failure - - return PytestDoctestRunner - - -def _get_runner(checker=None, verbose=None, optionflags=0, continue_on_failure=True): - # We need this in order to do a lazy import on doctest - global RUNNER_CLASS - if RUNNER_CLASS is None: - RUNNER_CLASS = _init_runner_class() - return RUNNER_CLASS( - checker=checker, - verbose=verbose, - optionflags=optionflags, - continue_on_failure=continue_on_failure, - ) - - -class DoctestItem(pytest.Item): - def __init__(self, name, parent, runner=None, dtest=None): - super(DoctestItem, self).__init__(name, parent) - self.runner = runner - self.dtest = dtest - self.obj = None - self.fixture_request = None - - def setup(self): - if self.dtest is not None: - self.fixture_request = _setup_fixtures(self) - globs = dict(getfixture=self.fixture_request.getfixturevalue) - for name, value in self.fixture_request.getfixturevalue( - "doctest_namespace" - ).items(): - globs[name] = value - self.dtest.globs.update(globs) - - def runtest(self): - _check_all_skipped(self.dtest) - self._disable_output_capturing_for_darwin() - failures = [] - self.runner.run(self.dtest, out=failures) - if failures: - raise MultipleDoctestFailures(failures) - - def _disable_output_capturing_for_darwin(self): - """ - Disable output capturing. Otherwise, stdout is lost to doctest (#985) - """ - if platform.system() != "Darwin": - return - capman = self.config.pluginmanager.getplugin("capturemanager") - if capman: - capman.suspend_global_capture(in_=True) - out, err = capman.read_global_capture() - sys.stdout.write(out) - sys.stderr.write(err) - - def repr_failure(self, excinfo): - import doctest - - failures = None - if excinfo.errisinstance((doctest.DocTestFailure, doctest.UnexpectedException)): - failures = [excinfo.value] - elif excinfo.errisinstance(MultipleDoctestFailures): - failures = excinfo.value.failures - - if failures is not None: - reprlocation_lines = [] - for failure in failures: - example = failure.example - test = failure.test - filename = test.filename - if test.lineno is None: - lineno = None - else: - lineno = test.lineno + example.lineno + 1 - message = type(failure).__name__ - reprlocation = ReprFileLocation(filename, lineno, message) - checker = _get_checker() - report_choice = _get_report_choice( - self.config.getoption("doctestreport") - ) - if lineno is not None: - lines = failure.test.docstring.splitlines(False) - # add line numbers to the left of the error message - lines = [ - "%03d %s" % (i + test.lineno + 1, x) - for (i, x) in enumerate(lines) - ] - # trim docstring error lines to 10 - lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] - else: - lines = [ - "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" - ] - indent = ">>>" - for line in example.source.splitlines(): - lines.append("??? %s %s" % (indent, line)) - indent = "..." - if isinstance(failure, doctest.DocTestFailure): - lines += checker.output_difference( - example, failure.got, report_choice - ).split("\n") - else: - inner_excinfo = ExceptionInfo(failure.exc_info) - lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)] - lines += traceback.format_exception(*failure.exc_info) - reprlocation_lines.append((reprlocation, lines)) - return ReprFailDoctest(reprlocation_lines) - else: - return super(DoctestItem, self).repr_failure(excinfo) - - def reportinfo(self): - return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name - - -def _get_flag_lookup(): - import doctest - - return dict( - DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, - DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, - NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, - ELLIPSIS=doctest.ELLIPSIS, - IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, - COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, - ALLOW_UNICODE=_get_allow_unicode_flag(), - ALLOW_BYTES=_get_allow_bytes_flag(), - ) - - -def get_optionflags(parent): - optionflags_str = parent.config.getini("doctest_optionflags") - flag_lookup_table = _get_flag_lookup() - flag_acc = 0 - for flag in optionflags_str: - flag_acc |= flag_lookup_table[flag] - return flag_acc - - -def _get_continue_on_failure(config): - continue_on_failure = config.getvalue("doctest_continue_on_failure") - if continue_on_failure: - # We need to turn off this if we use pdb since we should stop at - # the first failure - if config.getvalue("usepdb"): - continue_on_failure = False - return continue_on_failure - - -class DoctestTextfile(pytest.Module): - obj = None - - def collect(self): - import doctest - - # inspired by doctest.testfile; ideally we would use it directly, - # but it doesn't support passing a custom checker - encoding = self.config.getini("doctest_encoding") - text = self.fspath.read_text(encoding) - filename = str(self.fspath) - name = self.fspath.basename - globs = {"__name__": "__main__"} - - optionflags = get_optionflags(self) - - runner = _get_runner( - verbose=0, - optionflags=optionflags, - checker=_get_checker(), - continue_on_failure=_get_continue_on_failure(self.config), - ) - _fix_spoof_python2(runner, encoding) - - parser = doctest.DocTestParser() - test = parser.get_doctest(text, globs, name, filename, 0) - if test.examples: - yield DoctestItem(test.name, self, runner, test) - - -def _check_all_skipped(test): - """raises pytest.skip() if all examples in the given DocTest have the SKIP - option set. - """ - import doctest - - all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) - if all_skipped: - pytest.skip("all tests skipped by +SKIP option") - - -class DoctestModule(pytest.Module): - def collect(self): - import doctest - - if self.fspath.basename == "conftest.py": - module = self.config.pluginmanager._importconftest(self.fspath) - else: - try: - module = self.fspath.pyimport() - except ImportError: - if self.config.getvalue("doctest_ignore_import_errors"): - pytest.skip("unable to import module %r" % self.fspath) - else: - raise - # uses internal doctest module parsing mechanism - finder = doctest.DocTestFinder() - optionflags = get_optionflags(self) - runner = _get_runner( - verbose=0, - optionflags=optionflags, - checker=_get_checker(), - continue_on_failure=_get_continue_on_failure(self.config), - ) - - for test in finder.find(module, module.__name__): - if test.examples: # skip empty doctests - yield DoctestItem(test.name, self, runner, test) - - -def _setup_fixtures(doctest_item): - """ - Used by DoctestTextfile and DoctestItem to setup fixture information. - """ - - def func(): - pass - - doctest_item.funcargs = {} - fm = doctest_item.session._fixturemanager - doctest_item._fixtureinfo = fm.getfixtureinfo( - node=doctest_item, func=func, cls=None, funcargs=False - ) - fixture_request = FixtureRequest(doctest_item) - fixture_request._fillfixtures() - return fixture_request - - -def _get_checker(): - """ - Returns a doctest.OutputChecker subclass that takes in account the - ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES - to strip b'' prefixes. - Useful when the same doctest should run in Python 2 and Python 3. - - An inner class is used to avoid importing "doctest" at the module - level. - """ - if hasattr(_get_checker, "LiteralsOutputChecker"): - return _get_checker.LiteralsOutputChecker() - - import doctest - import re - - class LiteralsOutputChecker(doctest.OutputChecker): - """ - Copied from doctest_nose_plugin.py from the nltk project: - https://github.com/nltk/nltk - - Further extended to also support byte literals. - """ - - _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) - _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) - - def check_output(self, want, got, optionflags): - res = doctest.OutputChecker.check_output(self, want, got, optionflags) - if res: - return True - - allow_unicode = optionflags & _get_allow_unicode_flag() - allow_bytes = optionflags & _get_allow_bytes_flag() - if not allow_unicode and not allow_bytes: - return False - - else: # pragma: no cover - - def remove_prefixes(regex, txt): - return re.sub(regex, r"\1\2", txt) - - if allow_unicode: - want = remove_prefixes(self._unicode_literal_re, want) - got = remove_prefixes(self._unicode_literal_re, got) - if allow_bytes: - want = remove_prefixes(self._bytes_literal_re, want) - got = remove_prefixes(self._bytes_literal_re, got) - res = doctest.OutputChecker.check_output(self, want, got, optionflags) - return res - - _get_checker.LiteralsOutputChecker = LiteralsOutputChecker - return _get_checker.LiteralsOutputChecker() - - -def _get_allow_unicode_flag(): - """ - Registers and returns the ALLOW_UNICODE flag. - """ - import doctest - - return doctest.register_optionflag("ALLOW_UNICODE") - - -def _get_allow_bytes_flag(): - """ - Registers and returns the ALLOW_BYTES flag. - """ - import doctest - - return doctest.register_optionflag("ALLOW_BYTES") - - -def _get_report_choice(key): - """ - This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid - importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests. - """ - import doctest - - return { - DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, - DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, - DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, - DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, - DOCTEST_REPORT_CHOICE_NONE: 0, - }[key] - - -def _fix_spoof_python2(runner, encoding): - """ - Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This - should patch only doctests for text files because they don't have a way to declare their - encoding. Doctests in docstrings from Python modules don't have the same problem given that - Python already decoded the strings. - - This fixes the problem related in issue #2434. - """ - from _pytest.compat import _PY2 - - if not _PY2: - return - - from doctest import _SpoofOut - - class UnicodeSpoof(_SpoofOut): - def getvalue(self): - result = _SpoofOut.getvalue(self) - if encoding and isinstance(result, bytes): - result = result.decode(encoding) - return result - - runner._fakeout = UnicodeSpoof() - - -@pytest.fixture(scope="session") -def doctest_namespace(): - """ - Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. - """ - return dict() diff --git a/venv/lib/python3.8/site-packages/_pytest/fixtures.py b/venv/lib/python3.8/site-packages/_pytest/fixtures.py deleted file mode 100644 index 124b611d..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/fixtures.py +++ /dev/null @@ -1,1398 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools -import inspect -import sys -import warnings -from collections import defaultdict -from collections import deque -from collections import OrderedDict - -import attr -import py -import six -from more_itertools import flatten -from py._code.code import FormattedExcinfo - -import _pytest -from _pytest import nodes -from _pytest._code.code import TerminalRepr -from _pytest.compat import _format_args -from _pytest.compat import _PytestWrapper -from _pytest.compat import exc_clear -from _pytest.compat import FuncargnamesCompatAttr -from _pytest.compat import get_real_func -from _pytest.compat import get_real_method -from _pytest.compat import getfslineno -from _pytest.compat import getfuncargnames -from _pytest.compat import getimfunc -from _pytest.compat import getlocation -from _pytest.compat import is_generator -from _pytest.compat import isclass -from _pytest.compat import NOTSET -from _pytest.compat import safe_getattr -from _pytest.deprecated import FIXTURE_FUNCTION_CALL -from _pytest.deprecated import FIXTURE_NAMED_REQUEST -from _pytest.outcomes import fail -from _pytest.outcomes import TEST_OUTCOME - -FIXTURE_MSG = 'fixtures cannot have "pytest_funcarg__" prefix and be decorated with @pytest.fixture:\n{}' - - -@attr.s(frozen=True) -class PseudoFixtureDef(object): - cached_result = attr.ib() - scope = attr.ib() - - -def pytest_sessionstart(session): - import _pytest.python - import _pytest.nodes - - scopename2class.update( - { - "package": _pytest.python.Package, - "class": _pytest.python.Class, - "module": _pytest.python.Module, - "function": _pytest.nodes.Item, - "session": _pytest.main.Session, - } - ) - session._fixturemanager = FixtureManager(session) - - -scopename2class = {} - - -scope2props = dict(session=()) -scope2props["package"] = ("fspath",) -scope2props["module"] = ("fspath", "module") -scope2props["class"] = scope2props["module"] + ("cls",) -scope2props["instance"] = scope2props["class"] + ("instance",) -scope2props["function"] = scope2props["instance"] + ("function", "keywords") - - -def scopeproperty(name=None, doc=None): - def decoratescope(func): - scopename = name or func.__name__ - - def provide(self): - if func.__name__ in scope2props[self.scope]: - return func(self) - raise AttributeError( - "%s not available in %s-scoped context" % (scopename, self.scope) - ) - - return property(provide, None, None, func.__doc__) - - return decoratescope - - -def get_scope_package(node, fixturedef): - import pytest - - cls = pytest.Package - current = node - fixture_package_name = "%s/%s" % (fixturedef.baseid, "__init__.py") - while current and ( - type(current) is not cls or fixture_package_name != current.nodeid - ): - current = current.parent - if current is None: - return node.session - return current - - -def get_scope_node(node, scope): - cls = scopename2class.get(scope) - if cls is None: - raise ValueError("unknown scope") - return node.getparent(cls) - - -def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): - # this function will transform all collected calls to a functions - # if they use direct funcargs (i.e. direct parametrization) - # because we want later test execution to be able to rely on - # an existing FixtureDef structure for all arguments. - # XXX we can probably avoid this algorithm if we modify CallSpec2 - # to directly care for creating the fixturedefs within its methods. - if not metafunc._calls[0].funcargs: - return # this function call does not have direct parametrization - # collect funcargs of all callspecs into a list of values - arg2params = {} - arg2scope = {} - for callspec in metafunc._calls: - for argname, argvalue in callspec.funcargs.items(): - assert argname not in callspec.params - callspec.params[argname] = argvalue - arg2params_list = arg2params.setdefault(argname, []) - callspec.indices[argname] = len(arg2params_list) - arg2params_list.append(argvalue) - if argname not in arg2scope: - scopenum = callspec._arg2scopenum.get(argname, scopenum_function) - arg2scope[argname] = scopes[scopenum] - callspec.funcargs.clear() - - # register artificial FixtureDef's so that later at test execution - # time we can rely on a proper FixtureDef to exist for fixture setup. - arg2fixturedefs = metafunc._arg2fixturedefs - for argname, valuelist in arg2params.items(): - # if we have a scope that is higher than function we need - # to make sure we only ever create an according fixturedef on - # a per-scope basis. We thus store and cache the fixturedef on the - # node related to the scope. - scope = arg2scope[argname] - node = None - if scope != "function": - node = get_scope_node(collector, scope) - if node is None: - assert scope == "class" and isinstance(collector, _pytest.python.Module) - # use module-level collector for class-scope (for now) - node = collector - if node and argname in node._name2pseudofixturedef: - arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] - else: - fixturedef = FixtureDef( - fixturemanager, - "", - argname, - get_direct_param_fixture_func, - arg2scope[argname], - valuelist, - False, - False, - ) - arg2fixturedefs[argname] = [fixturedef] - if node is not None: - node._name2pseudofixturedef[argname] = fixturedef - - -def getfixturemarker(obj): - """ return fixturemarker or None if it doesn't exist or raised - exceptions.""" - try: - return getattr(obj, "_pytestfixturefunction", None) - except TEST_OUTCOME: - # some objects raise errors like request (from flask import request) - # we don't expect them to be fixture functions - return None - - -def get_parametrized_fixture_keys(item, scopenum): - """ return list of keys for all parametrized arguments which match - the specified scope. """ - assert scopenum < scopenum_function # function - try: - cs = item.callspec - except AttributeError: - pass - else: - # cs.indices.items() is random order of argnames. Need to - # sort this so that different calls to - # get_parametrized_fixture_keys will be deterministic. - for argname, param_index in sorted(cs.indices.items()): - if cs._arg2scopenum[argname] != scopenum: - continue - if scopenum == 0: # session - key = (argname, param_index) - elif scopenum == 1: # package - key = (argname, param_index, item.fspath.dirpath()) - elif scopenum == 2: # module - key = (argname, param_index, item.fspath) - elif scopenum == 3: # class - key = (argname, param_index, item.fspath, item.cls) - yield key - - -# algorithm for sorting on a per-parametrized resource setup basis -# it is called for scopenum==0 (session) first and performs sorting -# down to the lower scopes such as to minimize number of "high scope" -# setups and teardowns - - -def reorder_items(items): - argkeys_cache = {} - items_by_argkey = {} - for scopenum in range(0, scopenum_function): - argkeys_cache[scopenum] = d = {} - items_by_argkey[scopenum] = item_d = defaultdict(deque) - for item in items: - keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum)) - if keys: - d[item] = keys - for key in keys: - item_d[key].append(item) - items = OrderedDict.fromkeys(items) - return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0)) - - -def fix_cache_order(item, argkeys_cache, items_by_argkey): - for scopenum in range(0, scopenum_function): - for key in argkeys_cache[scopenum].get(item, []): - items_by_argkey[scopenum][key].appendleft(item) - - -def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum): - if scopenum >= scopenum_function or len(items) < 3: - return items - ignore = set() - items_deque = deque(items) - items_done = OrderedDict() - scoped_items_by_argkey = items_by_argkey[scopenum] - scoped_argkeys_cache = argkeys_cache[scopenum] - while items_deque: - no_argkey_group = OrderedDict() - slicing_argkey = None - while items_deque: - item = items_deque.popleft() - if item in items_done or item in no_argkey_group: - continue - argkeys = OrderedDict.fromkeys( - k for k in scoped_argkeys_cache.get(item, []) if k not in ignore - ) - if not argkeys: - no_argkey_group[item] = None - else: - slicing_argkey, _ = argkeys.popitem() - # we don't have to remove relevant items from later in the deque because they'll just be ignored - matching_items = [ - i for i in scoped_items_by_argkey[slicing_argkey] if i in items - ] - for i in reversed(matching_items): - fix_cache_order(i, argkeys_cache, items_by_argkey) - items_deque.appendleft(i) - break - if no_argkey_group: - no_argkey_group = reorder_items_atscope( - no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1 - ) - for item in no_argkey_group: - items_done[item] = None - ignore.add(slicing_argkey) - return items_done - - -def fillfixtures(function): - """ fill missing funcargs for a test function. """ - try: - request = function._request - except AttributeError: - # XXX this special code path is only expected to execute - # with the oejskit plugin. It uses classes with funcargs - # and we thus have to work a bit to allow this. - fm = function.session._fixturemanager - fi = fm.getfixtureinfo(function.parent, function.obj, None) - function._fixtureinfo = fi - request = function._request = FixtureRequest(function) - request._fillfixtures() - # prune out funcargs for jstests - newfuncargs = {} - for name in fi.argnames: - newfuncargs[name] = function.funcargs[name] - function.funcargs = newfuncargs - else: - request._fillfixtures() - - -def get_direct_param_fixture_func(request): - return request.param - - -@attr.s(slots=True) -class FuncFixtureInfo(object): - # original function argument names - argnames = attr.ib(type=tuple) - # argnames that function immediately requires. These include argnames + - # fixture names specified via usefixtures and via autouse=True in fixture - # definitions. - initialnames = attr.ib(type=tuple) - names_closure = attr.ib() # type: List[str] - name2fixturedefs = attr.ib() # type: List[str, List[FixtureDef]] - - def prune_dependency_tree(self): - """Recompute names_closure from initialnames and name2fixturedefs - - Can only reduce names_closure, which means that the new closure will - always be a subset of the old one. The order is preserved. - - This method is needed because direct parametrization may shadow some - of the fixtures that were included in the originally built dependency - tree. In this way the dependency tree can get pruned, and the closure - of argnames may get reduced. - """ - closure = set() - working_set = set(self.initialnames) - while working_set: - argname = working_set.pop() - # argname may be smth not included in the original names_closure, - # in which case we ignore it. This currently happens with pseudo - # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. - # So they introduce the new dependency 'request' which might have - # been missing in the original tree (closure). - if argname not in closure and argname in self.names_closure: - closure.add(argname) - if argname in self.name2fixturedefs: - working_set.update(self.name2fixturedefs[argname][-1].argnames) - - self.names_closure[:] = sorted(closure, key=self.names_closure.index) - - -class FixtureRequest(FuncargnamesCompatAttr): - """ A request for a fixture from a test or fixture function. - - A request object gives access to the requesting test context - and has an optional ``param`` attribute in case - the fixture is parametrized indirectly. - """ - - def __init__(self, pyfuncitem): - self._pyfuncitem = pyfuncitem - #: fixture for which this request is being performed - self.fixturename = None - #: Scope string, one of "function", "class", "module", "session" - self.scope = "function" - self._fixture_defs = {} # argname -> FixtureDef - fixtureinfo = pyfuncitem._fixtureinfo - self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy() - self._arg2index = {} - self._fixturemanager = pyfuncitem.session._fixturemanager - - @property - def fixturenames(self): - """names of all active fixtures in this request""" - result = list(self._pyfuncitem._fixtureinfo.names_closure) - result.extend(set(self._fixture_defs).difference(result)) - return result - - @property - def node(self): - """ underlying collection node (depends on current request scope)""" - return self._getscopeitem(self.scope) - - def _getnextfixturedef(self, argname): - fixturedefs = self._arg2fixturedefs.get(argname, None) - if fixturedefs is None: - # we arrive here because of a dynamic call to - # getfixturevalue(argname) usage which was naturally - # not known at parsing/collection time - parentid = self._pyfuncitem.parent.nodeid - fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid) - self._arg2fixturedefs[argname] = fixturedefs - # fixturedefs list is immutable so we maintain a decreasing index - index = self._arg2index.get(argname, 0) - 1 - if fixturedefs is None or (-index > len(fixturedefs)): - raise FixtureLookupError(argname, self) - self._arg2index[argname] = index - return fixturedefs[index] - - @property - def config(self): - """ the pytest config object associated with this request. """ - return self._pyfuncitem.config - - @scopeproperty() - def function(self): - """ test function object if the request has a per-function scope. """ - return self._pyfuncitem.obj - - @scopeproperty("class") - def cls(self): - """ class (can be None) where the test function was collected. """ - clscol = self._pyfuncitem.getparent(_pytest.python.Class) - if clscol: - return clscol.obj - - @property - def instance(self): - """ instance (can be None) on which test function was collected. """ - # unittest support hack, see _pytest.unittest.TestCaseFunction - try: - return self._pyfuncitem._testcase - except AttributeError: - function = getattr(self, "function", None) - return getattr(function, "__self__", None) - - @scopeproperty() - def module(self): - """ python module object where the test function was collected. """ - return self._pyfuncitem.getparent(_pytest.python.Module).obj - - @scopeproperty() - def fspath(self): - """ the file system path of the test module which collected this test. """ - return self._pyfuncitem.fspath - - @property - def keywords(self): - """ keywords/markers dictionary for the underlying node. """ - return self.node.keywords - - @property - def session(self): - """ pytest session object. """ - return self._pyfuncitem.session - - def addfinalizer(self, finalizer): - """ add finalizer/teardown function to be called after the - last test within the requesting test context finished - execution. """ - # XXX usually this method is shadowed by fixturedef specific ones - self._addfinalizer(finalizer, scope=self.scope) - - def _addfinalizer(self, finalizer, scope): - colitem = self._getscopeitem(scope) - self._pyfuncitem.session._setupstate.addfinalizer( - finalizer=finalizer, colitem=colitem - ) - - def applymarker(self, marker): - """ Apply a marker to a single test function invocation. - This method is useful if you don't want to have a keyword/marker - on all function invocations. - - :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object - created by a call to ``pytest.mark.NAME(...)``. - """ - self.node.add_marker(marker) - - def raiseerror(self, msg): - """ raise a FixtureLookupError with the given message. """ - raise self._fixturemanager.FixtureLookupError(None, self, msg) - - def _fillfixtures(self): - item = self._pyfuncitem - fixturenames = getattr(item, "fixturenames", self.fixturenames) - for argname in fixturenames: - if argname not in item.funcargs: - item.funcargs[argname] = self.getfixturevalue(argname) - - def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): - """ (deprecated) Return a testing resource managed by ``setup`` & - ``teardown`` calls. ``scope`` and ``extrakey`` determine when the - ``teardown`` function will be called so that subsequent calls to - ``setup`` would recreate the resource. With pytest-2.3 you often - do not need ``cached_setup()`` as you can directly declare a scope - on a fixture function and register a finalizer through - ``request.addfinalizer()``. - - :arg teardown: function receiving a previously setup resource. - :arg setup: a no-argument function creating a resource. - :arg scope: a string value out of ``function``, ``class``, ``module`` - or ``session`` indicating the caching lifecycle of the resource. - :arg extrakey: added to internal caching key of (funcargname, scope). - """ - from _pytest.deprecated import CACHED_SETUP - - warnings.warn(CACHED_SETUP, stacklevel=2) - if not hasattr(self.config, "_setupcache"): - self.config._setupcache = {} # XXX weakref? - cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) - cache = self.config._setupcache - try: - val = cache[cachekey] - except KeyError: - self._check_scope(self.fixturename, self.scope, scope) - val = setup() - cache[cachekey] = val - if teardown is not None: - - def finalizer(): - del cache[cachekey] - teardown(val) - - self._addfinalizer(finalizer, scope=scope) - return val - - def getfixturevalue(self, argname): - """ Dynamically run a named fixture function. - - Declaring fixtures via function argument is recommended where possible. - But if you can only decide whether to use another fixture at test - setup time, you may use this function to retrieve it inside a fixture - or test function body. - """ - return self._get_active_fixturedef(argname).cached_result[0] - - def getfuncargvalue(self, argname): - """ Deprecated, use getfixturevalue. """ - from _pytest import deprecated - - warnings.warn(deprecated.GETFUNCARGVALUE, stacklevel=2) - return self.getfixturevalue(argname) - - def _get_active_fixturedef(self, argname): - try: - return self._fixture_defs[argname] - except KeyError: - try: - fixturedef = self._getnextfixturedef(argname) - except FixtureLookupError: - if argname == "request": - cached_result = (self, [0], None) - scope = "function" - return PseudoFixtureDef(cached_result, scope) - raise - # remove indent to prevent the python3 exception - # from leaking into the call - self._compute_fixture_value(fixturedef) - self._fixture_defs[argname] = fixturedef - return fixturedef - - def _get_fixturestack(self): - current = self - values = [] - while 1: - fixturedef = getattr(current, "_fixturedef", None) - if fixturedef is None: - values.reverse() - return values - values.append(fixturedef) - current = current._parent_request - - def _compute_fixture_value(self, fixturedef): - """ - Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will - force the FixtureDef object to throw away any previous results and compute a new fixture value, which - will be stored into the FixtureDef object itself. - - :param FixtureDef fixturedef: - """ - # prepare a subrequest object before calling fixture function - # (latter managed by fixturedef) - argname = fixturedef.argname - funcitem = self._pyfuncitem - scope = fixturedef.scope - try: - param = funcitem.callspec.getparam(argname) - except (AttributeError, ValueError): - param = NOTSET - param_index = 0 - has_params = fixturedef.params is not None - fixtures_not_supported = getattr(funcitem, "nofuncargs", False) - if has_params and fixtures_not_supported: - msg = ( - "{name} does not support fixtures, maybe unittest.TestCase subclass?\n" - "Node id: {nodeid}\n" - "Function type: {typename}" - ).format( - name=funcitem.name, - nodeid=funcitem.nodeid, - typename=type(funcitem).__name__, - ) - fail(msg, pytrace=False) - if has_params: - frame = inspect.stack()[3] - frameinfo = inspect.getframeinfo(frame[0]) - source_path = frameinfo.filename - source_lineno = frameinfo.lineno - source_path = py.path.local(source_path) - if source_path.relto(funcitem.config.rootdir): - source_path = source_path.relto(funcitem.config.rootdir) - msg = ( - "The requested fixture has no parameter defined for test:\n" - " {}\n\n" - "Requested fixture '{}' defined in:\n{}" - "\n\nRequested here:\n{}:{}".format( - funcitem.nodeid, - fixturedef.argname, - getlocation(fixturedef.func, funcitem.config.rootdir), - source_path, - source_lineno, - ) - ) - fail(msg, pytrace=False) - else: - # indices might not be set if old-style metafunc.addcall() was used - param_index = funcitem.callspec.indices.get(argname, 0) - # if a parametrize invocation set a scope it will override - # the static scope defined with the fixture function - paramscopenum = funcitem.callspec._arg2scopenum.get(argname) - if paramscopenum is not None: - scope = scopes[paramscopenum] - - subrequest = SubRequest(self, scope, param, param_index, fixturedef) - - # check if a higher-level scoped fixture accesses a lower level one - subrequest._check_scope(argname, self.scope, scope) - - # clear sys.exc_info before invoking the fixture (python bug?) - # if it's not explicitly cleared it will leak into the call - exc_clear() - try: - # call the fixture function - fixturedef.execute(request=subrequest) - finally: - # if fixture function failed it might have registered finalizers - self.session._setupstate.addfinalizer( - functools.partial(fixturedef.finish, request=subrequest), - subrequest.node, - ) - - def _check_scope(self, argname, invoking_scope, requested_scope): - if argname == "request": - return - if scopemismatch(invoking_scope, requested_scope): - # try to report something helpful - lines = self._factorytraceback() - fail( - "ScopeMismatch: You tried to access the %r scoped " - "fixture %r with a %r scoped request object, " - "involved factories\n%s" - % ((requested_scope, argname, invoking_scope, "\n".join(lines))), - pytrace=False, - ) - - def _factorytraceback(self): - lines = [] - for fixturedef in self._get_fixturestack(): - factory = fixturedef.func - fs, lineno = getfslineno(factory) - p = self._pyfuncitem.session.fspath.bestrelpath(fs) - args = _format_args(factory) - lines.append("%s:%d: def %s%s" % (p, lineno, factory.__name__, args)) - return lines - - def _getscopeitem(self, scope): - if scope == "function": - # this might also be a non-function Item despite its attribute name - return self._pyfuncitem - if scope == "package": - node = get_scope_package(self._pyfuncitem, self._fixturedef) - else: - node = get_scope_node(self._pyfuncitem, scope) - if node is None and scope == "class": - # fallback to function item itself - node = self._pyfuncitem - assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format( - scope, self._pyfuncitem - ) - return node - - def __repr__(self): - return "" % (self.node) - - -class SubRequest(FixtureRequest): - """ a sub request for handling getting a fixture from a - test function/fixture. """ - - def __init__(self, request, scope, param, param_index, fixturedef): - self._parent_request = request - self.fixturename = fixturedef.argname - if param is not NOTSET: - self.param = param - self.param_index = param_index - self.scope = scope - self._fixturedef = fixturedef - self._pyfuncitem = request._pyfuncitem - self._fixture_defs = request._fixture_defs - self._arg2fixturedefs = request._arg2fixturedefs - self._arg2index = request._arg2index - self._fixturemanager = request._fixturemanager - - def __repr__(self): - return "" % (self.fixturename, self._pyfuncitem) - - def addfinalizer(self, finalizer): - self._fixturedef.addfinalizer(finalizer) - - -class ScopeMismatchError(Exception): - """ A fixture function tries to use a different fixture function which - which has a lower scope (e.g. a Session one calls a function one) - """ - - -scopes = "session package module class function".split() -scopenum_function = scopes.index("function") - - -def scopemismatch(currentscope, newscope): - return scopes.index(newscope) > scopes.index(currentscope) - - -def scope2index(scope, descr, where=None): - """Look up the index of ``scope`` and raise a descriptive value error - if not defined. - """ - try: - return scopes.index(scope) - except ValueError: - fail( - "{} {}got an unexpected scope value '{}'".format( - descr, "from {} ".format(where) if where else "", scope - ), - pytrace=False, - ) - - -class FixtureLookupError(LookupError): - """ could not return a requested Fixture (missing or invalid). """ - - def __init__(self, argname, request, msg=None): - self.argname = argname - self.request = request - self.fixturestack = request._get_fixturestack() - self.msg = msg - - def formatrepr(self): - tblines = [] - addline = tblines.append - stack = [self.request._pyfuncitem.obj] - stack.extend(map(lambda x: x.func, self.fixturestack)) - msg = self.msg - if msg is not None: - # the last fixture raise an error, let's present - # it at the requesting side - stack = stack[:-1] - for function in stack: - fspath, lineno = getfslineno(function) - try: - lines, _ = inspect.getsourcelines(get_real_func(function)) - except (IOError, IndexError, TypeError): - error_msg = "file %s, line %s: source code not available" - addline(error_msg % (fspath, lineno + 1)) - else: - addline("file %s, line %s" % (fspath, lineno + 1)) - for i, line in enumerate(lines): - line = line.rstrip() - addline(" " + line) - if line.lstrip().startswith("def"): - break - - if msg is None: - fm = self.request._fixturemanager - available = set() - parentid = self.request._pyfuncitem.parent.nodeid - for name, fixturedefs in fm._arg2fixturedefs.items(): - faclist = list(fm._matchfactories(fixturedefs, parentid)) - if faclist: - available.add(name) - if self.argname in available: - msg = " recursive dependency involving fixture '{}' detected".format( - self.argname - ) - else: - msg = "fixture '{}' not found".format(self.argname) - msg += "\n available fixtures: {}".format(", ".join(sorted(available))) - msg += "\n use 'pytest --fixtures [testpath]' for help on them." - - return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) - - -class FixtureLookupErrorRepr(TerminalRepr): - def __init__(self, filename, firstlineno, tblines, errorstring, argname): - self.tblines = tblines - self.errorstring = errorstring - self.filename = filename - self.firstlineno = firstlineno - self.argname = argname - - def toterminal(self, tw): - # tw.line("FixtureLookupError: %s" %(self.argname), red=True) - for tbline in self.tblines: - tw.line(tbline.rstrip()) - lines = self.errorstring.split("\n") - if lines: - tw.line( - "{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()), - red=True, - ) - for line in lines[1:]: - tw.line( - "{} {}".format(FormattedExcinfo.flow_marker, line.strip()), - red=True, - ) - tw.line() - tw.line("%s:%d" % (self.filename, self.firstlineno + 1)) - - -def fail_fixturefunc(fixturefunc, msg): - fs, lineno = getfslineno(fixturefunc) - location = "%s:%s" % (fs, lineno + 1) - source = _pytest._code.Source(fixturefunc) - fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False) - - -def call_fixture_func(fixturefunc, request, kwargs): - yieldctx = is_generator(fixturefunc) - if yieldctx: - it = fixturefunc(**kwargs) - res = next(it) - finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, it) - request.addfinalizer(finalizer) - else: - res = fixturefunc(**kwargs) - return res - - -def _teardown_yield_fixture(fixturefunc, it): - """Executes the teardown of a fixture function by advancing the iterator after the - yield and ensure the iteration ends (if not it means there is more than one yield in the function)""" - try: - next(it) - except StopIteration: - pass - else: - fail_fixturefunc( - fixturefunc, "yield_fixture function has more than one 'yield'" - ) - - -class FixtureDef(object): - """ A container for a factory definition. """ - - def __init__( - self, - fixturemanager, - baseid, - argname, - func, - scope, - params, - unittest=False, - ids=None, - ): - self._fixturemanager = fixturemanager - self.baseid = baseid or "" - self.has_location = baseid is not None - self.func = func - self.argname = argname - self.scope = scope - self.scopenum = scope2index( - scope or "function", - descr="Fixture '{}'".format(func.__name__), - where=baseid, - ) - self.params = params - self.argnames = getfuncargnames(func, is_method=unittest) - self.unittest = unittest - self.ids = ids - self._finalizers = [] - - def addfinalizer(self, finalizer): - self._finalizers.append(finalizer) - - def finish(self, request): - exceptions = [] - try: - while self._finalizers: - try: - func = self._finalizers.pop() - func() - except: # noqa - exceptions.append(sys.exc_info()) - if exceptions: - e = exceptions[0] - del exceptions # ensure we don't keep all frames alive because of the traceback - six.reraise(*e) - - finally: - hook = self._fixturemanager.session.gethookproxy(request.node.fspath) - hook.pytest_fixture_post_finalizer(fixturedef=self, request=request) - # even if finalization fails, we invalidate - # the cached fixture value and remove - # all finalizers because they may be bound methods which will - # keep instances alive - if hasattr(self, "cached_result"): - del self.cached_result - self._finalizers = [] - - def execute(self, request): - # get required arguments and register our own finish() - # with their finalization - for argname in self.argnames: - fixturedef = request._get_active_fixturedef(argname) - if argname != "request": - fixturedef.addfinalizer(functools.partial(self.finish, request=request)) - - my_cache_key = request.param_index - cached_result = getattr(self, "cached_result", None) - if cached_result is not None: - result, cache_key, err = cached_result - if my_cache_key == cache_key: - if err is not None: - six.reraise(*err) - else: - return result - # we have a previous but differently parametrized fixture instance - # so we need to tear it down before creating a new one - self.finish(request) - assert not hasattr(self, "cached_result") - - hook = self._fixturemanager.session.gethookproxy(request.node.fspath) - return hook.pytest_fixture_setup(fixturedef=self, request=request) - - def __repr__(self): - return "" % ( - self.argname, - self.scope, - self.baseid, - ) - - -def resolve_fixture_function(fixturedef, request): - """Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific - instances and bound methods. - """ - fixturefunc = fixturedef.func - if fixturedef.unittest: - if request.instance is not None: - # bind the unbound method to the TestCase instance - fixturefunc = fixturedef.func.__get__(request.instance) - else: - # the fixture function needs to be bound to the actual - # request.instance so that code working with "fixturedef" behaves - # as expected. - if request.instance is not None: - fixturefunc = getimfunc(fixturedef.func) - if fixturefunc != fixturedef.func: - fixturefunc = fixturefunc.__get__(request.instance) - return fixturefunc - - -def pytest_fixture_setup(fixturedef, request): - """ Execution of fixture setup. """ - kwargs = {} - for argname in fixturedef.argnames: - fixdef = request._get_active_fixturedef(argname) - result, arg_cache_key, exc = fixdef.cached_result - request._check_scope(argname, request.scope, fixdef.scope) - kwargs[argname] = result - - fixturefunc = resolve_fixture_function(fixturedef, request) - my_cache_key = request.param_index - try: - result = call_fixture_func(fixturefunc, request, kwargs) - except TEST_OUTCOME: - fixturedef.cached_result = (None, my_cache_key, sys.exc_info()) - raise - fixturedef.cached_result = (result, my_cache_key, None) - return result - - -def _ensure_immutable_ids(ids): - if ids is None: - return - if callable(ids): - return ids - return tuple(ids) - - -def wrap_function_to_warning_if_called_directly(function, fixture_marker): - """Wrap the given fixture function so we can issue warnings about it being called directly, instead of - used as an argument in a test function. - """ - is_yield_function = is_generator(function) - warning = FIXTURE_FUNCTION_CALL.format( - name=fixture_marker.name or function.__name__ - ) - - if is_yield_function: - - @functools.wraps(function) - def result(*args, **kwargs): - __tracebackhide__ = True - warnings.warn(warning, stacklevel=3) - for x in function(*args, **kwargs): - yield x - - else: - - @functools.wraps(function) - def result(*args, **kwargs): - __tracebackhide__ = True - warnings.warn(warning, stacklevel=3) - return function(*args, **kwargs) - - if six.PY2: - result.__wrapped__ = function - - # keep reference to the original function in our own custom attribute so we don't unwrap - # further than this point and lose useful wrappings like @mock.patch (#3774) - result.__pytest_wrapped__ = _PytestWrapper(function) - - return result - - -@attr.s(frozen=True) -class FixtureFunctionMarker(object): - scope = attr.ib() - params = attr.ib(converter=attr.converters.optional(tuple)) - autouse = attr.ib(default=False) - ids = attr.ib(default=None, converter=_ensure_immutable_ids) - name = attr.ib(default=None) - - def __call__(self, function): - if isclass(function): - raise ValueError("class fixtures not supported (maybe in the future)") - - if getattr(function, "_pytestfixturefunction", False): - raise ValueError( - "fixture is being applied more than once to the same function" - ) - - function = wrap_function_to_warning_if_called_directly(function, self) - - name = self.name or function.__name__ - if name == "request": - warnings.warn(FIXTURE_NAMED_REQUEST) - function._pytestfixturefunction = self - return function - - -def fixture(scope="function", params=None, autouse=False, ids=None, name=None): - """Decorator to mark a fixture factory function. - - This decorator can be used, with or without parameters, to define a - fixture function. - - The name of the fixture function can later be referenced to cause its - invocation ahead of running tests: test - modules or classes can use the ``pytest.mark.usefixtures(fixturename)`` - marker. - - Test functions can directly use fixture names as input - arguments in which case the fixture instance returned from the fixture - function will be injected. - - Fixtures can provide their values to test functions using ``return`` or ``yield`` - statements. When using ``yield`` the code block after the ``yield`` statement is executed - as teardown code regardless of the test outcome, and must yield exactly once. - - :arg scope: the scope for which this fixture is shared, one of - ``"function"`` (default), ``"class"``, ``"module"``, - ``"package"`` or ``"session"``. - - ``"package"`` is considered **experimental** at this time. - - :arg params: an optional list of parameters which will cause multiple - invocations of the fixture function and all of the tests - using it. - - :arg autouse: if True, the fixture func is activated for all tests that - can see it. If False (the default) then an explicit - reference is needed to activate the fixture. - - :arg ids: list of string ids each corresponding to the params - so that they are part of the test id. If no ids are provided - they will be generated automatically from the params. - - :arg name: the name of the fixture. This defaults to the name of the - decorated function. If a fixture is used in the same module in - which it is defined, the function name of the fixture will be - shadowed by the function arg that requests the fixture; one way - to resolve this is to name the decorated function - ``fixture_`` and then use - ``@pytest.fixture(name='')``. - """ - if callable(scope) and params is None and autouse is False: - # direct decoration - return FixtureFunctionMarker("function", params, autouse, name=name)(scope) - if params is not None and not isinstance(params, (list, tuple)): - params = list(params) - return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) - - -def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None): - """ (return a) decorator to mark a yield-fixture factory function. - - .. deprecated:: 3.0 - Use :py:func:`pytest.fixture` directly instead. - """ - return fixture(scope=scope, params=params, autouse=autouse, ids=ids, name=name) - - -defaultfuncargprefixmarker = fixture() - - -@fixture(scope="session") -def pytestconfig(request): - """Session-scoped fixture that returns the :class:`_pytest.config.Config` object. - - Example:: - - def test_foo(pytestconfig): - if pytestconfig.getoption("verbose"): - ... - - """ - return request.config - - -class FixtureManager(object): - """ - pytest fixtures definitions and information is stored and managed - from this class. - - During collection fm.parsefactories() is called multiple times to parse - fixture function definitions into FixtureDef objects and internal - data structures. - - During collection of test functions, metafunc-mechanics instantiate - a FuncFixtureInfo object which is cached per node/func-name. - This FuncFixtureInfo object is later retrieved by Function nodes - which themselves offer a fixturenames attribute. - - The FuncFixtureInfo object holds information about fixtures and FixtureDefs - relevant for a particular function. An initial list of fixtures is - assembled like this: - - - ini-defined usefixtures - - autouse-marked fixtures along the collection chain up from the function - - usefixtures markers at module/class/function level - - test function funcargs - - Subsequently the funcfixtureinfo.fixturenames attribute is computed - as the closure of the fixtures needed to setup the initial fixtures, - i. e. fixtures needed by fixture functions themselves are appended - to the fixturenames list. - - Upon the test-setup phases all fixturenames are instantiated, retrieved - by a lookup of their FuncFixtureInfo. - """ - - _argprefix = "pytest_funcarg__" - FixtureLookupError = FixtureLookupError - FixtureLookupErrorRepr = FixtureLookupErrorRepr - - def __init__(self, session): - self.session = session - self.config = session.config - self._arg2fixturedefs = {} - self._holderobjseen = set() - self._arg2finish = {} - self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] - session.config.pluginmanager.register(self, "funcmanage") - - def getfixtureinfo(self, node, func, cls, funcargs=True): - if funcargs and not getattr(node, "nofuncargs", False): - argnames = getfuncargnames(func, cls=cls) - else: - argnames = () - usefixtures = flatten( - mark.args for mark in node.iter_markers(name="usefixtures") - ) - initialnames = tuple(usefixtures) + argnames - fm = node.session._fixturemanager - initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure( - initialnames, node - ) - return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) - - def pytest_plugin_registered(self, plugin): - nodeid = None - try: - p = py.path.local(plugin.__file__).realpath() - except AttributeError: - pass - else: - # construct the base nodeid which is later used to check - # what fixtures are visible for particular tests (as denoted - # by their test id) - if p.basename.startswith("conftest.py"): - nodeid = p.dirpath().relto(self.config.rootdir) - if p.sep != nodes.SEP: - nodeid = nodeid.replace(p.sep, nodes.SEP) - - self.parsefactories(plugin, nodeid) - - def _getautousenames(self, nodeid): - """ return a tuple of fixture names to be used. """ - autousenames = [] - for baseid, basenames in self._nodeid_and_autousenames: - if nodeid.startswith(baseid): - if baseid: - i = len(baseid) - nextchar = nodeid[i : i + 1] - if nextchar and nextchar not in ":/": - continue - autousenames.extend(basenames) - return autousenames - - def getfixtureclosure(self, fixturenames, parentnode): - # collect the closure of all fixtures , starting with the given - # fixturenames as the initial set. As we have to visit all - # factory definitions anyway, we also return an arg2fixturedefs - # mapping so that the caller can reuse it and does not have - # to re-discover fixturedefs again for each fixturename - # (discovering matching fixtures for a given name/node is expensive) - - parentid = parentnode.nodeid - fixturenames_closure = self._getautousenames(parentid) - - def merge(otherlist): - for arg in otherlist: - if arg not in fixturenames_closure: - fixturenames_closure.append(arg) - - merge(fixturenames) - - # at this point, fixturenames_closure contains what we call "initialnames", - # which is a set of fixturenames the function immediately requests. We - # need to return it as well, so save this. - initialnames = tuple(fixturenames_closure) - - arg2fixturedefs = {} - lastlen = -1 - while lastlen != len(fixturenames_closure): - lastlen = len(fixturenames_closure) - for argname in fixturenames_closure: - if argname in arg2fixturedefs: - continue - fixturedefs = self.getfixturedefs(argname, parentid) - if fixturedefs: - arg2fixturedefs[argname] = fixturedefs - merge(fixturedefs[-1].argnames) - - def sort_by_scope(arg_name): - try: - fixturedefs = arg2fixturedefs[arg_name] - except KeyError: - return scopes.index("function") - else: - return fixturedefs[-1].scopenum - - fixturenames_closure.sort(key=sort_by_scope) - return initialnames, fixturenames_closure, arg2fixturedefs - - def pytest_generate_tests(self, metafunc): - for argname in metafunc.fixturenames: - faclist = metafunc._arg2fixturedefs.get(argname) - if faclist: - fixturedef = faclist[-1] - if fixturedef.params is not None: - parametrize_func = getattr(metafunc.function, "parametrize", None) - if parametrize_func is not None: - parametrize_func = parametrize_func.combined - func_params = getattr(parametrize_func, "args", [[None]]) - func_kwargs = getattr(parametrize_func, "kwargs", {}) - # skip directly parametrized arguments - if "argnames" in func_kwargs: - argnames = parametrize_func.kwargs["argnames"] - else: - argnames = func_params[0] - if not isinstance(argnames, (tuple, list)): - argnames = [x.strip() for x in argnames.split(",") if x.strip()] - if argname not in func_params and argname not in argnames: - metafunc.parametrize( - argname, - fixturedef.params, - indirect=True, - scope=fixturedef.scope, - ids=fixturedef.ids, - ) - else: - continue # will raise FixtureLookupError at setup time - - def pytest_collection_modifyitems(self, items): - # separate parametrized setups - items[:] = reorder_items(items) - - def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): - from _pytest import deprecated - - if nodeid is not NOTSET: - holderobj = node_or_obj - else: - holderobj = node_or_obj.obj - nodeid = node_or_obj.nodeid - if holderobj in self._holderobjseen: - return - - from _pytest.nodes import _CompatProperty - - self._holderobjseen.add(holderobj) - autousenames = [] - for name in dir(holderobj): - # The attribute can be an arbitrary descriptor, so the attribute - # access below can raise. safe_getatt() ignores such exceptions. - maybe_property = safe_getattr(type(holderobj), name, None) - if isinstance(maybe_property, _CompatProperty): - # deprecated - continue - obj = safe_getattr(holderobj, name, None) - marker = getfixturemarker(obj) - # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) - # or are "@pytest.fixture" marked - if marker is None: - if not name.startswith(self._argprefix): - continue - if not callable(obj): - continue - marker = defaultfuncargprefixmarker - - filename, lineno = getfslineno(obj) - warnings.warn_explicit( - deprecated.FUNCARG_PREFIX.format(name=name), - category=None, - filename=str(filename), - lineno=lineno + 1, - ) - name = name[len(self._argprefix) :] - elif not isinstance(marker, FixtureFunctionMarker): - # magic globals with __getattr__ might have got us a wrong - # fixture attribute - continue - else: - if marker.name: - name = marker.name - assert not name.startswith(self._argprefix), FIXTURE_MSG.format(name) - - # during fixture definition we wrap the original fixture function - # to issue a warning if called directly, so here we unwrap it in order to not emit the warning - # when pytest itself calls the fixture function - if six.PY2 and unittest: - # hack on Python 2 because of the unbound methods - obj = get_real_func(obj) - else: - obj = get_real_method(obj, holderobj) - - fixture_def = FixtureDef( - self, - nodeid, - name, - obj, - marker.scope, - marker.params, - unittest=unittest, - ids=marker.ids, - ) - - faclist = self._arg2fixturedefs.setdefault(name, []) - if fixture_def.has_location: - faclist.append(fixture_def) - else: - # fixturedefs with no location are at the front - # so this inserts the current fixturedef after the - # existing fixturedefs from external plugins but - # before the fixturedefs provided in conftests. - i = len([f for f in faclist if not f.has_location]) - faclist.insert(i, fixture_def) - if marker.autouse: - autousenames.append(name) - - if autousenames: - self._nodeid_and_autousenames.append((nodeid or "", autousenames)) - - def getfixturedefs(self, argname, nodeid): - """ - Gets a list of fixtures which are applicable to the given node id. - - :param str argname: name of the fixture to search for - :param str nodeid: full node id of the requesting test. - :return: list[FixtureDef] - """ - try: - fixturedefs = self._arg2fixturedefs[argname] - except KeyError: - return None - return tuple(self._matchfactories(fixturedefs, nodeid)) - - def _matchfactories(self, fixturedefs, nodeid): - for fixturedef in fixturedefs: - if nodes.ischildnode(fixturedef.baseid, nodeid): - yield fixturedef diff --git a/venv/lib/python3.8/site-packages/_pytest/freeze_support.py b/venv/lib/python3.8/site-packages/_pytest/freeze_support.py deleted file mode 100644 index 5edf3454..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/freeze_support.py +++ /dev/null @@ -1,47 +0,0 @@ -""" -Provides a function to report all internal modules for using freezing tools -pytest -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - - -def freeze_includes(): - """ - Returns a list of module names used by pytest that should be - included by cx_freeze. - """ - import py - import _pytest - - result = list(_iter_all_modules(py)) - result += list(_iter_all_modules(_pytest)) - return result - - -def _iter_all_modules(package, prefix=""): - """ - Iterates over the names of all modules that can be found in the given - package, recursively. - Example: - _iter_all_modules(_pytest) -> - ['_pytest.assertion.newinterpret', - '_pytest.capture', - '_pytest.core', - ... - ] - """ - import os - import pkgutil - - if type(package) is not str: - path, prefix = package.__path__[0], package.__name__ + "." - else: - path = package - for _, name, is_package in pkgutil.iter_modules([path]): - if is_package: - for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): - yield prefix + m - else: - yield prefix + name diff --git a/venv/lib/python3.8/site-packages/_pytest/helpconfig.py b/venv/lib/python3.8/site-packages/_pytest/helpconfig.py deleted file mode 100644 index 5e60d2a7..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/helpconfig.py +++ /dev/null @@ -1,217 +0,0 @@ -""" version info, help messages, tracing configuration. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import sys -from argparse import Action - -import py - -import pytest -from _pytest.config import PrintHelp - - -class HelpAction(Action): - """This is an argparse Action that will raise an exception in - order to skip the rest of the argument parsing when --help is passed. - This prevents argparse from quitting due to missing required arguments - when any are defined, for example by ``pytest_addoption``. - This is similar to the way that the builtin argparse --help option is - implemented by raising SystemExit. - """ - - def __init__(self, option_strings, dest=None, default=False, help=None): - super(HelpAction, self).__init__( - option_strings=option_strings, - dest=dest, - const=True, - default=default, - nargs=0, - help=help, - ) - - def __call__(self, parser, namespace, values, option_string=None): - setattr(namespace, self.dest, self.const) - - # We should only skip the rest of the parsing after preparse is done - if getattr(parser._parser, "after_preparse", False): - raise PrintHelp - - -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group.addoption( - "--version", - action="store_true", - help="display pytest lib version and import information.", - ) - group._addoption( - "-h", - "--help", - action=HelpAction, - dest="help", - help="show help message and configuration info", - ) - group._addoption( - "-p", - action="append", - dest="plugins", - default=[], - metavar="name", - help="early-load given plugin (multi-allowed). " - "To avoid loading of plugins, use the `no:` prefix, e.g. " - "`no:doctest`.", - ) - group.addoption( - "--traceconfig", - "--trace-config", - action="store_true", - default=False, - help="trace considerations of conftest.py files.", - ), - group.addoption( - "--debug", - action="store_true", - dest="debug", - default=False, - help="store internal tracing debug information in 'pytestdebug.log'.", - ) - group._addoption( - "-o", - "--override-ini", - dest="override_ini", - action="append", - help='override ini option with "option=value" style, e.g. `-o xfail_strict=True -o cache_dir=cache`.', - ) - - -@pytest.hookimpl(hookwrapper=True) -def pytest_cmdline_parse(): - outcome = yield - config = outcome.get_result() - if config.option.debug: - path = os.path.abspath("pytestdebug.log") - debugfile = open(path, "w") - debugfile.write( - "versions pytest-%s, py-%s, " - "python-%s\ncwd=%s\nargs=%s\n\n" - % ( - pytest.__version__, - py.__version__, - ".".join(map(str, sys.version_info)), - os.getcwd(), - config._origargs, - ) - ) - config.trace.root.setwriter(debugfile.write) - undo_tracing = config.pluginmanager.enable_tracing() - sys.stderr.write("writing pytestdebug information to %s\n" % path) - - def unset_tracing(): - debugfile.close() - sys.stderr.write("wrote pytestdebug information to %s\n" % debugfile.name) - config.trace.root.setwriter(None) - undo_tracing() - - config.add_cleanup(unset_tracing) - - -def pytest_cmdline_main(config): - if config.option.version: - p = py.path.local(pytest.__file__) - sys.stderr.write( - "This is pytest version %s, imported from %s\n" % (pytest.__version__, p) - ) - plugininfo = getpluginversioninfo(config) - if plugininfo: - for line in plugininfo: - sys.stderr.write(line + "\n") - return 0 - elif config.option.help: - config._do_configure() - showhelp(config) - config._ensure_unconfigure() - return 0 - - -def showhelp(config): - reporter = config.pluginmanager.get_plugin("terminalreporter") - tw = reporter._tw - tw.write(config._parser.optparser.format_help()) - tw.line() - tw.line() - tw.line( - "[pytest] ini-options in the first pytest.ini|tox.ini|setup.cfg file found:" - ) - tw.line() - - for name in config._parser._ininames: - help, type, default = config._parser._inidict[name] - if type is None: - type = "string" - spec = "%s (%s)" % (name, type) - line = " %-24s %s" % (spec, help) - tw.line(line[: tw.fullwidth]) - - tw.line() - tw.line("environment variables:") - vars = [ - ("PYTEST_ADDOPTS", "extra command line options"), - ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"), - ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "set to disable plugin auto-loading"), - ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"), - ] - for name, help in vars: - tw.line(" %-24s %s" % (name, help)) - tw.line() - tw.line() - - tw.line("to see available markers type: pytest --markers") - tw.line("to see available fixtures type: pytest --fixtures") - tw.line( - "(shown according to specified file_or_dir or current dir " - "if not specified; fixtures with leading '_' are only shown " - "with the '-v' option" - ) - - for warningreport in reporter.stats.get("warnings", []): - tw.line("warning : " + warningreport.message, red=True) - return - - -conftest_options = [("pytest_plugins", "list of plugin names to load")] - - -def getpluginversioninfo(config): - lines = [] - plugininfo = config.pluginmanager.list_plugin_distinfo() - if plugininfo: - lines.append("setuptools registered plugins:") - for plugin, dist in plugininfo: - loc = getattr(plugin, "__file__", repr(plugin)) - content = "%s-%s at %s" % (dist.project_name, dist.version, loc) - lines.append(" " + content) - return lines - - -def pytest_report_header(config): - lines = [] - if config.option.debug or config.option.traceconfig: - lines.append("using: pytest-%s pylib-%s" % (pytest.__version__, py.__version__)) - - verinfo = getpluginversioninfo(config) - if verinfo: - lines.extend(verinfo) - - if config.option.traceconfig: - lines.append("active plugins:") - items = config.pluginmanager.list_name_plugin() - for name, plugin in items: - if hasattr(plugin, "__file__"): - r = plugin.__file__ - else: - r = repr(plugin) - lines.append(" %-20s: %s" % (name, r)) - return lines diff --git a/venv/lib/python3.8/site-packages/_pytest/hookspec.py b/venv/lib/python3.8/site-packages/_pytest/hookspec.py deleted file mode 100644 index 625f59e5..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/hookspec.py +++ /dev/null @@ -1,623 +0,0 @@ -""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ -from pluggy import HookspecMarker - -from .deprecated import PYTEST_NAMESPACE - - -hookspec = HookspecMarker("pytest") - -# ------------------------------------------------------------------------- -# Initialization hooks called for every plugin -# ------------------------------------------------------------------------- - - -@hookspec(historic=True) -def pytest_addhooks(pluginmanager): - """called at plugin registration time to allow adding new hooks via a call to - ``pluginmanager.add_hookspecs(module_or_class, prefix)``. - - - :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - """ - - -@hookspec(historic=True, warn_on_impl=PYTEST_NAMESPACE) -def pytest_namespace(): - """ - return dict of name->object to be made globally available in - the pytest namespace. - - This hook is called at plugin registration time. - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - - .. warning:: - This hook has been **deprecated** and will be removed in pytest 4.0. - - Plugins whose users depend on the current namespace functionality should prepare to migrate to a - namespace they actually own. - - To support the migration it's suggested to trigger ``DeprecationWarnings`` for objects they put into the - pytest namespace. - - A stopgap measure to avoid the warning is to monkeypatch the ``pytest`` module, but just as the - ``pytest_namespace`` hook this should be seen as a temporary measure to be removed in future versions after - an appropriate transition period. - """ - - -@hookspec(historic=True) -def pytest_plugin_registered(plugin, manager): - """ a new pytest plugin got registered. - - :param plugin: the plugin module or instance - :param _pytest.config.PytestPluginManager manager: pytest plugin manager - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - """ - - -@hookspec(historic=True) -def pytest_addoption(parser): - """register argparse-style options and ini-style config values, - called once at the beginning of a test run. - - .. note:: - - This function should be implemented only in plugins or ``conftest.py`` - files situated at the tests root directory due to how pytest - :ref:`discovers plugins during startup `. - - :arg _pytest.config.Parser parser: To add command line options, call - :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`. - To add ini-file values call :py:func:`parser.addini(...) - <_pytest.config.Parser.addini>`. - - Options can later be accessed through the - :py:class:`config <_pytest.config.Config>` object, respectively: - - - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to - retrieve the value of a command line option. - - - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve - a value read from an ini-style file. - - The config object is passed around on many internal objects via the ``.config`` - attribute or can be retrieved as the ``pytestconfig`` fixture. - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - """ - - -@hookspec(historic=True) -def pytest_configure(config): - """ - Allows plugins and conftest files to perform initial configuration. - - This hook is called for every plugin and initial conftest file - after command line options have been parsed. - - After that, the hook is called for other conftest files as they are - imported. - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - - :arg _pytest.config.Config config: pytest config object - """ - - -# ------------------------------------------------------------------------- -# Bootstrapping hooks called for plugins registered early enough: -# internal and 3rd party plugins. -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_cmdline_parse(pluginmanager, args): - """return initialized config object, parsing the specified args. - - Stops at first non-None result, see :ref:`firstresult` - - .. note:: - This hook will not be called for ``conftest.py`` files, only for setuptools plugins. - - :param _pytest.config.PytestPluginManager pluginmanager: pytest plugin manager - :param list[str] args: list of arguments passed on the command line - """ - - -def pytest_cmdline_preparse(config, args): - """(**Deprecated**) modify command line arguments before option parsing. - - This hook is considered deprecated and will be removed in a future pytest version. Consider - using :func:`pytest_load_initial_conftests` instead. - - .. note:: - This hook will not be called for ``conftest.py`` files, only for setuptools plugins. - - :param _pytest.config.Config config: pytest config object - :param list[str] args: list of arguments passed on the command line - """ - - -@hookspec(firstresult=True) -def pytest_cmdline_main(config): - """ called for performing the main command line action. The default - implementation will invoke the configure hooks and runtest_mainloop. - - .. note:: - This hook will not be called for ``conftest.py`` files, only for setuptools plugins. - - Stops at first non-None result, see :ref:`firstresult` - - :param _pytest.config.Config config: pytest config object - """ - - -def pytest_load_initial_conftests(early_config, parser, args): - """ implements the loading of initial conftest files ahead - of command line option parsing. - - .. note:: - This hook will not be called for ``conftest.py`` files, only for setuptools plugins. - - :param _pytest.config.Config early_config: pytest config object - :param list[str] args: list of arguments passed on the command line - :param _pytest.config.Parser parser: to add command line options - """ - - -# ------------------------------------------------------------------------- -# collection hooks -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_collection(session): - """Perform the collection protocol for the given session. - - Stops at first non-None result, see :ref:`firstresult`. - - :param _pytest.main.Session session: the pytest session object - """ - - -def pytest_collection_modifyitems(session, config, items): - """ called after collection has been performed, may filter or re-order - the items in-place. - - :param _pytest.main.Session session: the pytest session object - :param _pytest.config.Config config: pytest config object - :param List[_pytest.nodes.Item] items: list of item objects - """ - - -def pytest_collection_finish(session): - """ called after collection has been performed and modified. - - :param _pytest.main.Session session: the pytest session object - """ - - -@hookspec(firstresult=True) -def pytest_ignore_collect(path, config): - """ return True to prevent considering this path for collection. - This hook is consulted for all files and directories prior to calling - more specific hooks. - - Stops at first non-None result, see :ref:`firstresult` - - :param str path: the path to analyze - :param _pytest.config.Config config: pytest config object - """ - - -@hookspec(firstresult=True) -def pytest_collect_directory(path, parent): - """ called before traversing a directory for collection files. - - Stops at first non-None result, see :ref:`firstresult` - - :param str path: the path to analyze - """ - - -def pytest_collect_file(path, parent): - """ return collection Node or None for the given path. Any new node - needs to have the specified ``parent`` as a parent. - - :param str path: the path to collect - """ - - -# logging hooks for collection - - -def pytest_collectstart(collector): - """ collector starts collecting. """ - - -def pytest_itemcollected(item): - """ we just collected a test item. """ - - -def pytest_collectreport(report): - """ collector finished collecting. """ - - -def pytest_deselected(items): - """ called for test items deselected by keyword. """ - - -@hookspec(firstresult=True) -def pytest_make_collect_report(collector): - """ perform ``collector.collect()`` and return a CollectReport. - - Stops at first non-None result, see :ref:`firstresult` """ - - -# ------------------------------------------------------------------------- -# Python test function related hooks -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_pycollect_makemodule(path, parent): - """ return a Module collector or None for the given path. - This hook will be called for each matching test module path. - The pytest_collect_file hook needs to be used if you want to - create test modules for files that do not match as a test module. - - Stops at first non-None result, see :ref:`firstresult` """ - - -@hookspec(firstresult=True) -def pytest_pycollect_makeitem(collector, name, obj): - """ return custom item/collector for a python object in a module, or None. - - Stops at first non-None result, see :ref:`firstresult` """ - - -@hookspec(firstresult=True) -def pytest_pyfunc_call(pyfuncitem): - """ call underlying test function. - - Stops at first non-None result, see :ref:`firstresult` """ - - -def pytest_generate_tests(metafunc): - """ generate (multiple) parametrized calls to a test function.""" - - -@hookspec(firstresult=True) -def pytest_make_parametrize_id(config, val, argname): - """Return a user-friendly string representation of the given ``val`` that will be used - by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``. - The parameter name is available as ``argname``, if required. - - Stops at first non-None result, see :ref:`firstresult` - - :param _pytest.config.Config config: pytest config object - :param val: the parametrized value - :param str argname: the automatic parameter name produced by pytest - """ - - -# ------------------------------------------------------------------------- -# generic runtest related hooks -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_runtestloop(session): - """ called for performing the main runtest loop - (after collection finished). - - Stops at first non-None result, see :ref:`firstresult` - - :param _pytest.main.Session session: the pytest session object - """ - - -def pytest_itemstart(item, node): - """(**Deprecated**) use pytest_runtest_logstart. """ - - -@hookspec(firstresult=True) -def pytest_runtest_protocol(item, nextitem): - """ implements the runtest_setup/call/teardown protocol for - the given test item, including capturing exceptions and calling - reporting hooks. - - :arg item: test item for which the runtest protocol is performed. - - :arg nextitem: the scheduled-to-be-next test item (or None if this - is the end my friend). This argument is passed on to - :py:func:`pytest_runtest_teardown`. - - :return boolean: True if no further hook implementations should be invoked. - - - Stops at first non-None result, see :ref:`firstresult` """ - - -def pytest_runtest_logstart(nodeid, location): - """ signal the start of running a single test item. - - This hook will be called **before** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and - :func:`pytest_runtest_teardown` hooks. - - :param str nodeid: full id of the item - :param location: a triple of ``(filename, linenum, testname)`` - """ - - -def pytest_runtest_logfinish(nodeid, location): - """ signal the complete finish of running a single test item. - - This hook will be called **after** :func:`pytest_runtest_setup`, :func:`pytest_runtest_call` and - :func:`pytest_runtest_teardown` hooks. - - :param str nodeid: full id of the item - :param location: a triple of ``(filename, linenum, testname)`` - """ - - -def pytest_runtest_setup(item): - """ called before ``pytest_runtest_call(item)``. """ - - -def pytest_runtest_call(item): - """ called to execute the test ``item``. """ - - -def pytest_runtest_teardown(item, nextitem): - """ called after ``pytest_runtest_call``. - - :arg nextitem: the scheduled-to-be-next test item (None if no further - test item is scheduled). This argument can be used to - perform exact teardowns, i.e. calling just enough finalizers - so that nextitem only needs to call setup-functions. - """ - - -@hookspec(firstresult=True) -def pytest_runtest_makereport(item, call): - """ return a :py:class:`_pytest.runner.TestReport` object - for the given :py:class:`pytest.Item <_pytest.main.Item>` and - :py:class:`_pytest.runner.CallInfo`. - - Stops at first non-None result, see :ref:`firstresult` """ - - -def pytest_runtest_logreport(report): - """ process a test setup/call/teardown report relating to - the respective phase of executing a test. """ - - -# ------------------------------------------------------------------------- -# Fixture related hooks -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_fixture_setup(fixturedef, request): - """ performs fixture setup execution. - - :return: The return value of the call to the fixture function - - Stops at first non-None result, see :ref:`firstresult` - - .. note:: - If the fixture function returns None, other implementations of - this hook function will continue to be called, according to the - behavior of the :ref:`firstresult` option. - """ - - -def pytest_fixture_post_finalizer(fixturedef, request): - """ called after fixture teardown, but before the cache is cleared so - the fixture result cache ``fixturedef.cached_result`` can - still be accessed.""" - - -# ------------------------------------------------------------------------- -# test session related hooks -# ------------------------------------------------------------------------- - - -def pytest_sessionstart(session): - """ called after the ``Session`` object has been created and before performing collection - and entering the run test loop. - - :param _pytest.main.Session session: the pytest session object - """ - - -def pytest_sessionfinish(session, exitstatus): - """ called after whole test run finished, right before returning the exit status to the system. - - :param _pytest.main.Session session: the pytest session object - :param int exitstatus: the status which pytest will return to the system - """ - - -def pytest_unconfigure(config): - """ called before test process is exited. - - :param _pytest.config.Config config: pytest config object - """ - - -# ------------------------------------------------------------------------- -# hooks for customizing the assert methods -# ------------------------------------------------------------------------- - - -def pytest_assertrepr_compare(config, op, left, right): - """return explanation for comparisons in failing assert expressions. - - Return None for no custom explanation, otherwise return a list - of strings. The strings will be joined by newlines but any newlines - *in* a string will be escaped. Note that all but the first line will - be indented slightly, the intention is for the first line to be a summary. - - :param _pytest.config.Config config: pytest config object - """ - - -# ------------------------------------------------------------------------- -# hooks for influencing reporting (invoked from _pytest_terminal) -# ------------------------------------------------------------------------- - - -def pytest_report_header(config, startdir): - """ return a string or list of strings to be displayed as header info for terminal reporting. - - :param _pytest.config.Config config: pytest config object - :param startdir: py.path object with the starting dir - - .. note:: - - This function should be implemented only in plugins or ``conftest.py`` - files situated at the tests root directory due to how pytest - :ref:`discovers plugins during startup `. - """ - - -def pytest_report_collectionfinish(config, startdir, items): - """ - .. versionadded:: 3.2 - - return a string or list of strings to be displayed after collection has finished successfully. - - This strings will be displayed after the standard "collected X items" message. - - :param _pytest.config.Config config: pytest config object - :param startdir: py.path object with the starting dir - :param items: list of pytest items that are going to be executed; this list should not be modified. - """ - - -@hookspec(firstresult=True) -def pytest_report_teststatus(report): - """ return result-category, shortletter and verbose word for reporting. - - Stops at first non-None result, see :ref:`firstresult` """ - - -def pytest_terminal_summary(terminalreporter, exitstatus): - """Add a section to terminal summary reporting. - - :param _pytest.terminal.TerminalReporter terminalreporter: the internal terminal reporter object - :param int exitstatus: the exit status that will be reported back to the OS - - .. versionadded:: 3.5 - The ``config`` parameter. - """ - - -@hookspec(historic=True) -def pytest_logwarning(message, code, nodeid, fslocation): - """ - .. deprecated:: 3.8 - - This hook is will stop working in a future release. - - pytest no longer triggers this hook, but the - terminal writer still implements it to display warnings issued by - :meth:`_pytest.config.Config.warn` and :meth:`_pytest.nodes.Node.warn`. Calling those functions will be - an error in future releases. - - process a warning specified by a message, a code string, - a nodeid and fslocation (both of which may be None - if the warning is not tied to a particular node/location). - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - """ - - -@hookspec(historic=True) -def pytest_warning_captured(warning_message, when, item): - """ - Process a warning captured by the internal pytest warnings plugin. - - :param warnings.WarningMessage warning_message: - The captured warning. This is the same object produced by :py:func:`warnings.catch_warnings`, and contains - the same attributes as the parameters of :py:func:`warnings.showwarning`. - - :param str when: - Indicates when the warning was captured. Possible values: - - * ``"config"``: during pytest configuration/initialization stage. - * ``"collect"``: during test collection. - * ``"runtest"``: during test execution. - - :param pytest.Item|None item: - **DEPRECATED**: This parameter is incompatible with ``pytest-xdist``, and will always receive ``None`` - in a future release. - - The item being executed if ``when`` is ``"runtest"``, otherwise ``None``. - """ - - -# ------------------------------------------------------------------------- -# doctest hooks -# ------------------------------------------------------------------------- - - -@hookspec(firstresult=True) -def pytest_doctest_prepare_content(content): - """ return processed content for a given doctest - - Stops at first non-None result, see :ref:`firstresult` """ - - -# ------------------------------------------------------------------------- -# error handling and internal debugging hooks -# ------------------------------------------------------------------------- - - -def pytest_internalerror(excrepr, excinfo): - """ called for internal errors. """ - - -def pytest_keyboard_interrupt(excinfo): - """ called for keyboard interrupt. """ - - -def pytest_exception_interact(node, call, report): - """called when an exception was raised which can potentially be - interactively handled. - - This hook is only called if an exception was raised - that is not an internal exception like ``skip.Exception``. - """ - - -def pytest_enter_pdb(config, pdb): - """ called upon pdb.set_trace(), can be used by plugins to take special - action just before the python debugger enters in interactive mode. - - :param _pytest.config.Config config: pytest config object - :param pdb.Pdb pdb: Pdb instance - """ - - -def pytest_leave_pdb(config, pdb): - """ called when leaving pdb (e.g. with continue after pdb.set_trace()). - - Can be used by plugins to take special action just after the python - debugger leaves interactive mode. - - :param _pytest.config.Config config: pytest config object - :param pdb.Pdb pdb: Pdb instance - """ diff --git a/venv/lib/python3.8/site-packages/_pytest/junitxml.py b/venv/lib/python3.8/site-packages/_pytest/junitxml.py deleted file mode 100644 index 09847c94..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/junitxml.py +++ /dev/null @@ -1,569 +0,0 @@ -""" - report test results in JUnit-XML format, - for use with Jenkins and build integration servers. - - -Based on initial code from Ross Lawley. - -Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ -src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import functools -import os -import re -import sys -import time - -import py - -import pytest -from _pytest import nodes -from _pytest.config import filename_arg - -# Python 2.X and 3.X compatibility -if sys.version_info[0] < 3: - from codecs import open -else: - unichr = chr - unicode = str - long = int - - -class Junit(py.xml.Namespace): - pass - - -# We need to get the subset of the invalid unicode ranges according to -# XML 1.0 which are valid in this python build. Hence we calculate -# this dynamically instead of hardcoding it. The spec range of valid -# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] -# | [#x10000-#x10FFFF] -_legal_chars = (0x09, 0x0A, 0x0D) -_legal_ranges = ((0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF)) -_legal_xml_re = [ - unicode("%s-%s") % (unichr(low), unichr(high)) - for (low, high) in _legal_ranges - if low < sys.maxunicode -] -_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re -illegal_xml_re = re.compile(unicode("[^%s]") % unicode("").join(_legal_xml_re)) -del _legal_chars -del _legal_ranges -del _legal_xml_re - -_py_ext_re = re.compile(r"\.py$") - - -def bin_xml_escape(arg): - def repl(matchobj): - i = ord(matchobj.group()) - if i <= 0xFF: - return unicode("#x%02X") % i - else: - return unicode("#x%04X") % i - - return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg))) - - -class _NodeReporter(object): - def __init__(self, nodeid, xml): - - self.id = nodeid - self.xml = xml - self.add_stats = self.xml.add_stats - self.duration = 0 - self.properties = [] - self.nodes = [] - self.testcase = None - self.attrs = {} - - def append(self, node): - self.xml.add_stats(type(node).__name__) - self.nodes.append(node) - - def add_property(self, name, value): - self.properties.append((str(name), bin_xml_escape(value))) - - def add_attribute(self, name, value): - self.attrs[str(name)] = bin_xml_escape(value) - - def make_properties_node(self): - """Return a Junit node containing custom properties, if any. - """ - if self.properties: - return Junit.properties( - [ - Junit.property(name=name, value=value) - for name, value in self.properties - ] - ) - return "" - - def record_testreport(self, testreport): - assert not self.testcase - names = mangle_test_address(testreport.nodeid) - existing_attrs = self.attrs - classnames = names[:-1] - if self.xml.prefix: - classnames.insert(0, self.xml.prefix) - attrs = { - "classname": ".".join(classnames), - "name": bin_xml_escape(names[-1]), - "file": testreport.location[0], - } - if testreport.location[1] is not None: - attrs["line"] = testreport.location[1] - if hasattr(testreport, "url"): - attrs["url"] = testreport.url - self.attrs = attrs - self.attrs.update(existing_attrs) # restore any user-defined attributes - - def to_xml(self): - testcase = Junit.testcase(time=self.duration, **self.attrs) - testcase.append(self.make_properties_node()) - for node in self.nodes: - testcase.append(node) - return testcase - - def _add_simple(self, kind, message, data=None): - data = bin_xml_escape(data) - node = kind(data, message=message) - self.append(node) - - def write_captured_output(self, report): - content_out = report.capstdout - content_log = report.caplog - content_err = report.capstderr - - if content_log or content_out: - if content_log and self.xml.logging == "system-out": - if content_out: - # syncing stdout and the log-output is not done yet. It's - # probably not worth the effort. Therefore, first the captured - # stdout is shown and then the captured logs. - content = "\n".join( - [ - " Captured Stdout ".center(80, "-"), - content_out, - "", - " Captured Log ".center(80, "-"), - content_log, - ] - ) - else: - content = content_log - else: - content = content_out - - if content: - tag = getattr(Junit, "system-out") - self.append(tag(bin_xml_escape(content))) - - if content_log or content_err: - if content_log and self.xml.logging == "system-err": - if content_err: - content = "\n".join( - [ - " Captured Stderr ".center(80, "-"), - content_err, - "", - " Captured Log ".center(80, "-"), - content_log, - ] - ) - else: - content = content_log - else: - content = content_err - - if content: - tag = getattr(Junit, "system-err") - self.append(tag(bin_xml_escape(content))) - - def append_pass(self, report): - self.add_stats("passed") - - def append_failure(self, report): - # msg = str(report.longrepr.reprtraceback.extraline) - if hasattr(report, "wasxfail"): - self._add_simple(Junit.skipped, "xfail-marked test passes unexpectedly") - else: - if hasattr(report.longrepr, "reprcrash"): - message = report.longrepr.reprcrash.message - elif isinstance(report.longrepr, (unicode, str)): - message = report.longrepr - else: - message = str(report.longrepr) - message = bin_xml_escape(message) - fail = Junit.failure(message=message) - fail.append(bin_xml_escape(report.longrepr)) - self.append(fail) - - def append_collect_error(self, report): - # msg = str(report.longrepr.reprtraceback.extraline) - self.append( - Junit.error(bin_xml_escape(report.longrepr), message="collection failure") - ) - - def append_collect_skipped(self, report): - self._add_simple(Junit.skipped, "collection skipped", report.longrepr) - - def append_error(self, report): - if getattr(report, "when", None) == "teardown": - msg = "test teardown failure" - else: - msg = "test setup failure" - self._add_simple(Junit.error, msg, report.longrepr) - - def append_skipped(self, report): - if hasattr(report, "wasxfail"): - self._add_simple(Junit.skipped, "expected test failure", report.wasxfail) - else: - filename, lineno, skipreason = report.longrepr - if skipreason.startswith("Skipped: "): - skipreason = skipreason[9:] - details = "%s:%s: %s" % (filename, lineno, skipreason) - - self.append( - Junit.skipped( - bin_xml_escape(details), - type="pytest.skip", - message=bin_xml_escape(skipreason), - ) - ) - self.write_captured_output(report) - - def finalize(self): - data = self.to_xml().unicode(indent=0) - self.__dict__.clear() - self.to_xml = lambda: py.xml.raw(data) - - -@pytest.fixture -def record_property(request): - """Add an extra properties the calling test. - User properties become part of the test report and are available to the - configured reporters, like JUnit XML. - The fixture is callable with ``(name, value)``, with value being automatically - xml-encoded. - - Example:: - - def test_function(record_property): - record_property("example_key", 1) - """ - - def append_property(name, value): - request.node.user_properties.append((name, value)) - - return append_property - - -@pytest.fixture -def record_xml_property(record_property, request): - """(Deprecated) use record_property.""" - from _pytest import deprecated - - request.node.warn(deprecated.RECORD_XML_PROPERTY) - - return record_property - - -@pytest.fixture -def record_xml_attribute(request): - """Add extra xml attributes to the tag for the calling test. - The fixture is callable with ``(name, value)``, with value being - automatically xml-encoded - """ - from _pytest.warning_types import PytestWarning - - request.node.warn(PytestWarning("record_xml_attribute is an experimental feature")) - xml = getattr(request.config, "_xml", None) - if xml is not None: - node_reporter = xml.node_reporter(request.node.nodeid) - return node_reporter.add_attribute - else: - - def add_attr_noop(name, value): - pass - - return add_attr_noop - - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting") - group.addoption( - "--junitxml", - "--junit-xml", - action="store", - dest="xmlpath", - metavar="path", - type=functools.partial(filename_arg, optname="--junitxml"), - default=None, - help="create junit-xml style report file at given path.", - ) - group.addoption( - "--junitprefix", - "--junit-prefix", - action="store", - metavar="str", - default=None, - help="prepend prefix to classnames in junit-xml output", - ) - parser.addini( - "junit_suite_name", "Test suite name for JUnit report", default="pytest" - ) - parser.addini( - "junit_logging", - "Write captured log messages to JUnit report: " - "one of no|system-out|system-err", - default="no", - ) # choices=['no', 'stdout', 'stderr']) - - -def pytest_configure(config): - xmlpath = config.option.xmlpath - # prevent opening xmllog on slave nodes (xdist) - if xmlpath and not hasattr(config, "slaveinput"): - config._xml = LogXML( - xmlpath, - config.option.junitprefix, - config.getini("junit_suite_name"), - config.getini("junit_logging"), - ) - config.pluginmanager.register(config._xml) - - -def pytest_unconfigure(config): - xml = getattr(config, "_xml", None) - if xml: - del config._xml - config.pluginmanager.unregister(xml) - - -def mangle_test_address(address): - path, possible_open_bracket, params = address.partition("[") - names = path.split("::") - try: - names.remove("()") - except ValueError: - pass - # convert file path to dotted path - names[0] = names[0].replace(nodes.SEP, ".") - names[0] = _py_ext_re.sub("", names[0]) - # put any params back - names[-1] += possible_open_bracket + params - return names - - -class LogXML(object): - def __init__(self, logfile, prefix, suite_name="pytest", logging="no"): - logfile = os.path.expanduser(os.path.expandvars(logfile)) - self.logfile = os.path.normpath(os.path.abspath(logfile)) - self.prefix = prefix - self.suite_name = suite_name - self.logging = logging - self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0) - self.node_reporters = {} # nodeid -> _NodeReporter - self.node_reporters_ordered = [] - self.global_properties = [] - # List of reports that failed on call but teardown is pending. - self.open_reports = [] - self.cnt_double_fail_tests = 0 - - def finalize(self, report): - nodeid = getattr(report, "nodeid", report) - # local hack to handle xdist report order - slavenode = getattr(report, "node", None) - reporter = self.node_reporters.pop((nodeid, slavenode)) - if reporter is not None: - reporter.finalize() - - def node_reporter(self, report): - nodeid = getattr(report, "nodeid", report) - # local hack to handle xdist report order - slavenode = getattr(report, "node", None) - - key = nodeid, slavenode - - if key in self.node_reporters: - # TODO: breasks for --dist=each - return self.node_reporters[key] - - reporter = _NodeReporter(nodeid, self) - - self.node_reporters[key] = reporter - self.node_reporters_ordered.append(reporter) - - return reporter - - def add_stats(self, key): - if key in self.stats: - self.stats[key] += 1 - - def _opentestcase(self, report): - reporter = self.node_reporter(report) - reporter.record_testreport(report) - return reporter - - def pytest_runtest_logreport(self, report): - """handle a setup/call/teardown report, generating the appropriate - xml tags as necessary. - - note: due to plugins like xdist, this hook may be called in interlaced - order with reports from other nodes. for example: - - usual call order: - -> setup node1 - -> call node1 - -> teardown node1 - -> setup node2 - -> call node2 - -> teardown node2 - - possible call order in xdist: - -> setup node1 - -> call node1 - -> setup node2 - -> call node2 - -> teardown node2 - -> teardown node1 - """ - close_report = None - if report.passed: - if report.when == "call": # ignore setup/teardown - reporter = self._opentestcase(report) - reporter.append_pass(report) - elif report.failed: - if report.when == "teardown": - # The following vars are needed when xdist plugin is used - report_wid = getattr(report, "worker_id", None) - report_ii = getattr(report, "item_index", None) - close_report = next( - ( - rep - for rep in self.open_reports - if ( - rep.nodeid == report.nodeid - and getattr(rep, "item_index", None) == report_ii - and getattr(rep, "worker_id", None) == report_wid - ) - ), - None, - ) - if close_report: - # We need to open new testcase in case we have failure in - # call and error in teardown in order to follow junit - # schema - self.finalize(close_report) - self.cnt_double_fail_tests += 1 - reporter = self._opentestcase(report) - if report.when == "call": - reporter.append_failure(report) - self.open_reports.append(report) - else: - reporter.append_error(report) - elif report.skipped: - reporter = self._opentestcase(report) - reporter.append_skipped(report) - self.update_testcase_duration(report) - if report.when == "teardown": - reporter = self._opentestcase(report) - reporter.write_captured_output(report) - - for propname, propvalue in report.user_properties: - reporter.add_property(propname, propvalue) - - self.finalize(report) - report_wid = getattr(report, "worker_id", None) - report_ii = getattr(report, "item_index", None) - close_report = next( - ( - rep - for rep in self.open_reports - if ( - rep.nodeid == report.nodeid - and getattr(rep, "item_index", None) == report_ii - and getattr(rep, "worker_id", None) == report_wid - ) - ), - None, - ) - if close_report: - self.open_reports.remove(close_report) - - def update_testcase_duration(self, report): - """accumulates total duration for nodeid from given report and updates - the Junit.testcase with the new total if already created. - """ - reporter = self.node_reporter(report) - reporter.duration += getattr(report, "duration", 0.0) - - def pytest_collectreport(self, report): - if not report.passed: - reporter = self._opentestcase(report) - if report.failed: - reporter.append_collect_error(report) - else: - reporter.append_collect_skipped(report) - - def pytest_internalerror(self, excrepr): - reporter = self.node_reporter("internal") - reporter.attrs.update(classname="pytest", name="internal") - reporter._add_simple(Junit.error, "internal error", excrepr) - - def pytest_sessionstart(self): - self.suite_start_time = time.time() - - def pytest_sessionfinish(self): - dirname = os.path.dirname(os.path.abspath(self.logfile)) - if not os.path.isdir(dirname): - os.makedirs(dirname) - logfile = open(self.logfile, "w", encoding="utf-8") - suite_stop_time = time.time() - suite_time_delta = suite_stop_time - self.suite_start_time - - numtests = ( - self.stats["passed"] - + self.stats["failure"] - + self.stats["skipped"] - + self.stats["error"] - - self.cnt_double_fail_tests - ) - logfile.write('') - - logfile.write( - Junit.testsuite( - self._get_global_properties_node(), - [x.to_xml() for x in self.node_reporters_ordered], - name=self.suite_name, - errors=self.stats["error"], - failures=self.stats["failure"], - skips=self.stats["skipped"], - tests=numtests, - time="%.3f" % suite_time_delta, - ).unicode(indent=0) - ) - logfile.close() - - def pytest_terminal_summary(self, terminalreporter): - terminalreporter.write_sep("-", "generated xml file: %s" % (self.logfile)) - - def add_global_property(self, name, value): - self.global_properties.append((str(name), bin_xml_escape(value))) - - def _get_global_properties_node(self): - """Return a Junit node containing custom properties, if any. - """ - if self.global_properties: - return Junit.properties( - [ - Junit.property(name=name, value=value) - for name, value in self.global_properties - ] - ) - return "" diff --git a/venv/lib/python3.8/site-packages/_pytest/logging.py b/venv/lib/python3.8/site-packages/_pytest/logging.py deleted file mode 100644 index 45fd5950..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/logging.py +++ /dev/null @@ -1,625 +0,0 @@ -""" Access and control log capturing. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import logging -import re -from contextlib import contextmanager - -import py -import six - -import pytest -from _pytest.compat import dummy_context_manager -from _pytest.config import create_terminal_writer - - -DEFAULT_LOG_FORMAT = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s" -DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" - - -class ColoredLevelFormatter(logging.Formatter): - """ - Colorize the %(levelname)..s part of the log format passed to __init__. - """ - - LOGLEVEL_COLOROPTS = { - logging.CRITICAL: {"red"}, - logging.ERROR: {"red", "bold"}, - logging.WARNING: {"yellow"}, - logging.WARN: {"yellow"}, - logging.INFO: {"green"}, - logging.DEBUG: {"purple"}, - logging.NOTSET: set(), - } - LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-]?\d*s)") - - def __init__(self, terminalwriter, *args, **kwargs): - super(ColoredLevelFormatter, self).__init__(*args, **kwargs) - if six.PY2: - self._original_fmt = self._fmt - else: - self._original_fmt = self._style._fmt - self._level_to_fmt_mapping = {} - - levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) - if not levelname_fmt_match: - return - levelname_fmt = levelname_fmt_match.group() - - for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): - formatted_levelname = levelname_fmt % { - "levelname": logging.getLevelName(level) - } - - # add ANSI escape sequences around the formatted levelname - color_kwargs = {name: True for name in color_opts} - colorized_formatted_levelname = terminalwriter.markup( - formatted_levelname, **color_kwargs - ) - self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( - colorized_formatted_levelname, self._fmt - ) - - def format(self, record): - fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) - if six.PY2: - self._fmt = fmt - else: - self._style._fmt = fmt - return super(ColoredLevelFormatter, self).format(record) - - -def get_option_ini(config, *names): - for name in names: - ret = config.getoption(name) # 'default' arg won't work as expected - if ret is None: - ret = config.getini(name) - if ret: - return ret - - -def pytest_addoption(parser): - """Add options to control log capturing.""" - group = parser.getgroup("logging") - - def add_option_ini(option, dest, default=None, type=None, **kwargs): - parser.addini( - dest, default=default, type=type, help="default value for " + option - ) - group.addoption(option, dest=dest, **kwargs) - - add_option_ini( - "--no-print-logs", - dest="log_print", - action="store_const", - const=False, - default=True, - type="bool", - help="disable printing caught logs on failed tests.", - ) - add_option_ini( - "--log-level", - dest="log_level", - default=None, - help="logging level used by the logging module", - ) - add_option_ini( - "--log-format", - dest="log_format", - default=DEFAULT_LOG_FORMAT, - help="log format as used by the logging module.", - ) - add_option_ini( - "--log-date-format", - dest="log_date_format", - default=DEFAULT_LOG_DATE_FORMAT, - help="log date format as used by the logging module.", - ) - parser.addini( - "log_cli", - default=False, - type="bool", - help='enable log display during test run (also known as "live logging").', - ) - add_option_ini( - "--log-cli-level", dest="log_cli_level", default=None, help="cli logging level." - ) - add_option_ini( - "--log-cli-format", - dest="log_cli_format", - default=None, - help="log format as used by the logging module.", - ) - add_option_ini( - "--log-cli-date-format", - dest="log_cli_date_format", - default=None, - help="log date format as used by the logging module.", - ) - add_option_ini( - "--log-file", - dest="log_file", - default=None, - help="path to a file when logging will be written to.", - ) - add_option_ini( - "--log-file-level", - dest="log_file_level", - default=None, - help="log file logging level.", - ) - add_option_ini( - "--log-file-format", - dest="log_file_format", - default=DEFAULT_LOG_FORMAT, - help="log format as used by the logging module.", - ) - add_option_ini( - "--log-file-date-format", - dest="log_file_date_format", - default=DEFAULT_LOG_DATE_FORMAT, - help="log date format as used by the logging module.", - ) - - -@contextmanager -def catching_logs(handler, formatter=None, level=None): - """Context manager that prepares the whole logging machinery properly.""" - root_logger = logging.getLogger() - - if formatter is not None: - handler.setFormatter(formatter) - if level is not None: - handler.setLevel(level) - - # Adding the same handler twice would confuse logging system. - # Just don't do that. - add_new_handler = handler not in root_logger.handlers - - if add_new_handler: - root_logger.addHandler(handler) - if level is not None: - orig_level = root_logger.level - root_logger.setLevel(min(orig_level, level)) - try: - yield handler - finally: - if level is not None: - root_logger.setLevel(orig_level) - if add_new_handler: - root_logger.removeHandler(handler) - - -class LogCaptureHandler(logging.StreamHandler): - """A logging handler that stores log records and the log text.""" - - def __init__(self): - """Creates a new log handler.""" - logging.StreamHandler.__init__(self, py.io.TextIO()) - self.records = [] - - def emit(self, record): - """Keep the log records in a list in addition to the log text.""" - self.records.append(record) - logging.StreamHandler.emit(self, record) - - def reset(self): - self.records = [] - self.stream = py.io.TextIO() - - -class LogCaptureFixture(object): - """Provides access and control of log capturing.""" - - def __init__(self, item): - """Creates a new funcarg.""" - self._item = item - # dict of log name -> log level - self._initial_log_levels = {} # type: Dict[str, int] - - def _finalize(self): - """Finalizes the fixture. - - This restores the log levels changed by :meth:`set_level`. - """ - # restore log levels - for logger_name, level in self._initial_log_levels.items(): - logger = logging.getLogger(logger_name) - logger.setLevel(level) - - @property - def handler(self): - """ - :rtype: LogCaptureHandler - """ - return self._item.catch_log_handler - - def get_records(self, when): - """ - Get the logging records for one of the possible test phases. - - :param str when: - Which test phase to obtain the records from. Valid values are: "setup", "call" and "teardown". - - :rtype: List[logging.LogRecord] - :return: the list of captured records at the given stage - - .. versionadded:: 3.4 - """ - handler = self._item.catch_log_handlers.get(when) - if handler: - return handler.records - else: - return [] - - @property - def text(self): - """Returns the log text.""" - return self.handler.stream.getvalue() - - @property - def records(self): - """Returns the list of log records.""" - return self.handler.records - - @property - def record_tuples(self): - """Returns a list of a stripped down version of log records intended - for use in assertion comparison. - - The format of the tuple is: - - (logger_name, log_level, message) - """ - return [(r.name, r.levelno, r.getMessage()) for r in self.records] - - @property - def messages(self): - """Returns a list of format-interpolated log messages. - - Unlike 'records', which contains the format string and parameters for interpolation, log messages in this list - are all interpolated. - Unlike 'text', which contains the output from the handler, log messages in this list are unadorned with - levels, timestamps, etc, making exact comparisons more reliable. - - Note that traceback or stack info (from :func:`logging.exception` or the `exc_info` or `stack_info` arguments - to the logging functions) is not included, as this is added by the formatter in the handler. - - .. versionadded:: 3.7 - """ - return [r.getMessage() for r in self.records] - - def clear(self): - """Reset the list of log records and the captured log text.""" - self.handler.reset() - - def set_level(self, level, logger=None): - """Sets the level for capturing of logs. The level will be restored to its previous value at the end of - the test. - - :param int level: the logger to level. - :param str logger: the logger to update the level. If not given, the root logger level is updated. - - .. versionchanged:: 3.4 - The levels of the loggers changed by this function will be restored to their initial values at the - end of the test. - """ - logger_name = logger - logger = logging.getLogger(logger_name) - # save the original log-level to restore it during teardown - self._initial_log_levels.setdefault(logger_name, logger.level) - logger.setLevel(level) - - @contextmanager - def at_level(self, level, logger=None): - """Context manager that sets the level for capturing of logs. After the end of the 'with' statement the - level is restored to its original value. - - :param int level: the logger to level. - :param str logger: the logger to update the level. If not given, the root logger level is updated. - """ - logger = logging.getLogger(logger) - orig_level = logger.level - logger.setLevel(level) - try: - yield - finally: - logger.setLevel(orig_level) - - -@pytest.fixture -def caplog(request): - """Access and control log capturing. - - Captured logs are available through the following properties/methods:: - - * caplog.text -> string containing formatted log output - * caplog.records -> list of logging.LogRecord instances - * caplog.record_tuples -> list of (logger_name, level, message) tuples - * caplog.clear() -> clear captured records and formatted log output string - """ - result = LogCaptureFixture(request.node) - yield result - result._finalize() - - -def get_actual_log_level(config, *setting_names): - """Return the actual logging level.""" - - for setting_name in setting_names: - log_level = config.getoption(setting_name) - if log_level is None: - log_level = config.getini(setting_name) - if log_level: - break - else: - return - - if isinstance(log_level, six.string_types): - log_level = log_level.upper() - try: - return int(getattr(logging, log_level, log_level)) - except ValueError: - # Python logging does not recognise this as a logging level - raise pytest.UsageError( - "'{}' is not recognized as a logging level name for " - "'{}'. Please consider passing the " - "logging level num instead.".format(log_level, setting_name) - ) - - -def pytest_configure(config): - config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") - - -class LoggingPlugin(object): - """Attaches to the logging module and captures log messages for each test. - """ - - def __init__(self, config): - """Creates a new plugin to capture log messages. - - The formatter can be safely shared across all handlers so - create a single one for the entire test session here. - """ - self._config = config - - # enable verbose output automatically if live logging is enabled - if self._log_cli_enabled() and not config.getoption("verbose"): - # sanity check: terminal reporter should not have been loaded at this point - assert self._config.pluginmanager.get_plugin("terminalreporter") is None - config.option.verbose = 1 - - self.print_logs = get_option_ini(config, "log_print") - self.formatter = logging.Formatter( - get_option_ini(config, "log_format"), - get_option_ini(config, "log_date_format"), - ) - self.log_level = get_actual_log_level(config, "log_level") - - log_file = get_option_ini(config, "log_file") - if log_file: - self.log_file_level = get_actual_log_level(config, "log_file_level") - - log_file_format = get_option_ini(config, "log_file_format", "log_format") - log_file_date_format = get_option_ini( - config, "log_file_date_format", "log_date_format" - ) - # Each pytest runtests session will write to a clean logfile - self.log_file_handler = logging.FileHandler( - log_file, mode="w", encoding="UTF-8" - ) - log_file_formatter = logging.Formatter( - log_file_format, datefmt=log_file_date_format - ) - self.log_file_handler.setFormatter(log_file_formatter) - else: - self.log_file_handler = None - - self.log_cli_handler = None - - def _log_cli_enabled(self): - """Return True if log_cli should be considered enabled, either explicitly - or because --log-cli-level was given in the command-line. - """ - return self._config.getoption( - "--log-cli-level" - ) is not None or self._config.getini("log_cli") - - @pytest.hookimpl(hookwrapper=True, tryfirst=True) - def pytest_collection(self): - # This has to be called before the first log message is logged, - # so we can access the terminal reporter plugin. - self._setup_cli_logging() - - with self.live_logs_context(): - if self.log_cli_handler: - self.log_cli_handler.set_when("collection") - - if self.log_file_handler is not None: - with catching_logs(self.log_file_handler, level=self.log_file_level): - yield - else: - yield - - @contextmanager - def _runtest_for(self, item, when): - """Implements the internals of pytest_runtest_xxx() hook.""" - with catching_logs( - LogCaptureHandler(), formatter=self.formatter, level=self.log_level - ) as log_handler: - if self.log_cli_handler: - self.log_cli_handler.set_when(when) - - if item is None: - yield # run the test - return - - if not hasattr(item, "catch_log_handlers"): - item.catch_log_handlers = {} - item.catch_log_handlers[when] = log_handler - item.catch_log_handler = log_handler - try: - yield # run test - finally: - if when == "teardown": - del item.catch_log_handler - del item.catch_log_handlers - - if self.print_logs: - # Add a captured log section to the report. - log = log_handler.stream.getvalue().strip() - item.add_report_section(when, "log", log) - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_setup(self, item): - with self._runtest_for(item, "setup"): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_call(self, item): - with self._runtest_for(item, "call"): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_teardown(self, item): - with self._runtest_for(item, "teardown"): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_logstart(self): - if self.log_cli_handler: - self.log_cli_handler.reset() - with self._runtest_for(None, "start"): - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtest_logfinish(self): - with self._runtest_for(None, "finish"): - yield - - @pytest.hookimpl(hookwrapper=True, tryfirst=True) - def pytest_sessionfinish(self): - with self.live_logs_context(): - if self.log_cli_handler: - self.log_cli_handler.set_when("sessionfinish") - if self.log_file_handler is not None: - with catching_logs(self.log_file_handler, level=self.log_file_level): - yield - else: - yield - - @pytest.hookimpl(hookwrapper=True, tryfirst=True) - def pytest_sessionstart(self): - self._setup_cli_logging() - with self.live_logs_context(): - if self.log_cli_handler: - self.log_cli_handler.set_when("sessionstart") - if self.log_file_handler is not None: - with catching_logs(self.log_file_handler, level=self.log_file_level): - yield - else: - yield - - @pytest.hookimpl(hookwrapper=True) - def pytest_runtestloop(self, session): - """Runs all collected test items.""" - with self.live_logs_context(): - if self.log_file_handler is not None: - with catching_logs(self.log_file_handler, level=self.log_file_level): - yield # run all the tests - else: - yield # run all the tests - - def _setup_cli_logging(self): - """Sets up the handler and logger for the Live Logs feature, if enabled.""" - terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter") - if self._log_cli_enabled() and terminal_reporter is not None: - capture_manager = self._config.pluginmanager.get_plugin("capturemanager") - log_cli_handler = _LiveLoggingStreamHandler( - terminal_reporter, capture_manager - ) - log_cli_format = get_option_ini( - self._config, "log_cli_format", "log_format" - ) - log_cli_date_format = get_option_ini( - self._config, "log_cli_date_format", "log_date_format" - ) - if ( - self._config.option.color != "no" - and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search(log_cli_format) - ): - log_cli_formatter = ColoredLevelFormatter( - create_terminal_writer(self._config), - log_cli_format, - datefmt=log_cli_date_format, - ) - else: - log_cli_formatter = logging.Formatter( - log_cli_format, datefmt=log_cli_date_format - ) - log_cli_level = get_actual_log_level( - self._config, "log_cli_level", "log_level" - ) - self.log_cli_handler = log_cli_handler - self.live_logs_context = lambda: catching_logs( - log_cli_handler, formatter=log_cli_formatter, level=log_cli_level - ) - else: - self.live_logs_context = lambda: dummy_context_manager() - # Note that the lambda for the live_logs_context is needed because - # live_logs_context can otherwise not be entered multiple times due - # to limitations of contextlib.contextmanager - - -class _LiveLoggingStreamHandler(logging.StreamHandler): - """ - Custom StreamHandler used by the live logging feature: it will write a newline before the first log message - in each test. - - During live logging we must also explicitly disable stdout/stderr capturing otherwise it will get captured - and won't appear in the terminal. - """ - - def __init__(self, terminal_reporter, capture_manager): - """ - :param _pytest.terminal.TerminalReporter terminal_reporter: - :param _pytest.capture.CaptureManager capture_manager: - """ - logging.StreamHandler.__init__(self, stream=terminal_reporter) - self.capture_manager = capture_manager - self.reset() - self.set_when(None) - self._test_outcome_written = False - - def reset(self): - """Reset the handler; should be called before the start of each test""" - self._first_record_emitted = False - - def set_when(self, when): - """Prepares for the given test phase (setup/call/teardown)""" - self._when = when - self._section_name_shown = False - if when == "start": - self._test_outcome_written = False - - def emit(self, record): - ctx_manager = ( - self.capture_manager.global_and_fixture_disabled() - if self.capture_manager - else dummy_context_manager() - ) - with ctx_manager: - if not self._first_record_emitted: - self.stream.write("\n") - self._first_record_emitted = True - elif self._when in ("teardown", "finish"): - if not self._test_outcome_written: - self._test_outcome_written = True - self.stream.write("\n") - if not self._section_name_shown and self._when: - self.stream.section("live log " + self._when, sep="-", bold=True) - self._section_name_shown = True - logging.StreamHandler.emit(self, record) diff --git a/venv/lib/python3.8/site-packages/_pytest/mark/__init__.py b/venv/lib/python3.8/site-packages/_pytest/mark/__init__.py deleted file mode 100644 index b6495dd0..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/mark/__init__.py +++ /dev/null @@ -1,171 +0,0 @@ -""" generic mechanism for marking and selecting python functions. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from .legacy import matchkeyword -from .legacy import matchmark -from .structures import EMPTY_PARAMETERSET_OPTION -from .structures import get_empty_parameterset_mark -from .structures import Mark -from .structures import MARK_GEN -from .structures import MarkDecorator -from .structures import MarkGenerator -from .structures import MarkInfo -from .structures import ParameterSet -from .structures import transfer_markers -from _pytest.config import UsageError - -__all__ = [ - "Mark", - "MarkInfo", - "MarkDecorator", - "MarkGenerator", - "transfer_markers", - "get_empty_parameterset_mark", -] - - -def param(*values, **kw): - """Specify a parameter in `pytest.mark.parametrize`_ calls or - :ref:`parametrized fixtures `. - - .. code-block:: python - - @pytest.mark.parametrize("test_input,expected", [ - ("3+5", 8), - pytest.param("6*9", 42, marks=pytest.mark.xfail), - ]) - def test_eval(test_input, expected): - assert eval(test_input) == expected - - :param values: variable args of the values of the parameter set, in order. - :keyword marks: a single mark or a list of marks to be applied to this parameter set. - :keyword str id: the id to attribute to this parameter set. - """ - return ParameterSet.param(*values, **kw) - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group._addoption( - "-k", - action="store", - dest="keyword", - default="", - metavar="EXPRESSION", - help="only run tests which match the given substring expression. " - "An expression is a python evaluatable expression " - "where all names are substring-matched against test names " - "and their parent classes. Example: -k 'test_method or test_" - "other' matches all test functions and classes whose name " - "contains 'test_method' or 'test_other', while -k 'not test_method' " - "matches those that don't contain 'test_method' in their names. " - "Additionally keywords are matched to classes and functions " - "containing extra names in their 'extra_keyword_matches' set, " - "as well as functions which have names assigned directly to them.", - ) - - group._addoption( - "-m", - action="store", - dest="markexpr", - default="", - metavar="MARKEXPR", - help="only run tests matching given mark expression. " - "example: -m 'mark1 and not mark2'.", - ) - - group.addoption( - "--markers", - action="store_true", - help="show markers (builtin, plugin and per-project ones).", - ) - - parser.addini("markers", "markers for test functions", "linelist") - parser.addini(EMPTY_PARAMETERSET_OPTION, "default marker for empty parametersets") - - -def pytest_cmdline_main(config): - import _pytest.config - - if config.option.markers: - config._do_configure() - tw = _pytest.config.create_terminal_writer(config) - for line in config.getini("markers"): - parts = line.split(":", 1) - name = parts[0] - rest = parts[1] if len(parts) == 2 else "" - tw.write("@pytest.mark.%s:" % name, bold=True) - tw.line(rest) - tw.line() - config._ensure_unconfigure() - return 0 - - -pytest_cmdline_main.tryfirst = True - - -def deselect_by_keyword(items, config): - keywordexpr = config.option.keyword.lstrip() - if keywordexpr.startswith("-"): - keywordexpr = "not " + keywordexpr[1:] - selectuntil = False - if keywordexpr[-1:] == ":": - selectuntil = True - keywordexpr = keywordexpr[:-1] - - remaining = [] - deselected = [] - for colitem in items: - if keywordexpr and not matchkeyword(colitem, keywordexpr): - deselected.append(colitem) - else: - if selectuntil: - keywordexpr = None - remaining.append(colitem) - - if deselected: - config.hook.pytest_deselected(items=deselected) - items[:] = remaining - - -def deselect_by_mark(items, config): - matchexpr = config.option.markexpr - if not matchexpr: - return - - remaining = [] - deselected = [] - for item in items: - if matchmark(item, matchexpr): - remaining.append(item) - else: - deselected.append(item) - - if deselected: - config.hook.pytest_deselected(items=deselected) - items[:] = remaining - - -def pytest_collection_modifyitems(items, config): - deselect_by_keyword(items, config) - deselect_by_mark(items, config) - - -def pytest_configure(config): - config._old_mark_config = MARK_GEN._config - if config.option.strict: - MARK_GEN._config = config - - empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) - - if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): - raise UsageError( - "{!s} must be one of skip, xfail or fail_at_collect" - " but it is {!r}".format(EMPTY_PARAMETERSET_OPTION, empty_parameterset) - ) - - -def pytest_unconfigure(config): - MARK_GEN._config = getattr(config, "_old_mark_config", None) diff --git a/venv/lib/python3.8/site-packages/_pytest/mark/evaluate.py b/venv/lib/python3.8/site-packages/_pytest/mark/evaluate.py deleted file mode 100644 index 793bff79..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/mark/evaluate.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -import platform -import sys -import traceback - -import six - -from ..outcomes import fail -from ..outcomes import TEST_OUTCOME - - -def cached_eval(config, expr, d): - if not hasattr(config, "_evalcache"): - config._evalcache = {} - try: - return config._evalcache[expr] - except KeyError: - import _pytest._code - - exprcode = _pytest._code.compile(expr, mode="eval") - config._evalcache[expr] = x = eval(exprcode, d) - return x - - -class MarkEvaluator(object): - def __init__(self, item, name): - self.item = item - self._marks = None - self._mark = None - self._mark_name = name - - def __bool__(self): - # dont cache here to prevent staleness - return bool(self._get_marks()) - - __nonzero__ = __bool__ - - def wasvalid(self): - return not hasattr(self, "exc") - - def _get_marks(self): - return list(self.item.iter_markers(name=self._mark_name)) - - def invalidraise(self, exc): - raises = self.get("raises") - if not raises: - return - return not isinstance(exc, raises) - - def istrue(self): - try: - return self._istrue() - except TEST_OUTCOME: - self.exc = sys.exc_info() - if isinstance(self.exc[1], SyntaxError): - msg = [" " * (self.exc[1].offset + 4) + "^"] - msg.append("SyntaxError: invalid syntax") - else: - msg = traceback.format_exception_only(*self.exc[:2]) - fail( - "Error evaluating %r expression\n" - " %s\n" - "%s" % (self._mark_name, self.expr, "\n".join(msg)), - pytrace=False, - ) - - def _getglobals(self): - d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config} - if hasattr(self.item, "obj"): - d.update(self.item.obj.__globals__) - return d - - def _istrue(self): - if hasattr(self, "result"): - return self.result - self._marks = self._get_marks() - - if self._marks: - self.result = False - for mark in self._marks: - self._mark = mark - if "condition" in mark.kwargs: - args = (mark.kwargs["condition"],) - else: - args = mark.args - - for expr in args: - self.expr = expr - if isinstance(expr, six.string_types): - d = self._getglobals() - result = cached_eval(self.item.config, expr, d) - else: - if "reason" not in mark.kwargs: - # XXX better be checked at collection time - msg = ( - "you need to specify reason=STRING " - "when using booleans as conditions." - ) - fail(msg) - result = bool(expr) - if result: - self.result = True - self.reason = mark.kwargs.get("reason", None) - self.expr = expr - return self.result - - if not args: - self.result = True - self.reason = mark.kwargs.get("reason", None) - return self.result - return False - - def get(self, attr, default=None): - if self._mark is None: - return default - return self._mark.kwargs.get(attr, default) - - def getexplanation(self): - expl = getattr(self, "reason", None) or self.get("reason", None) - if not expl: - if not hasattr(self, "expr"): - return "" - else: - return "condition: " + str(self.expr) - return expl diff --git a/venv/lib/python3.8/site-packages/_pytest/mark/legacy.py b/venv/lib/python3.8/site-packages/_pytest/mark/legacy.py deleted file mode 100644 index cea136bf..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/mark/legacy.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -this is a place where we put datastructures used by legacy apis -we hope ot remove -""" -import keyword - -import attr - -from _pytest.config import UsageError - - -@attr.s -class MarkMapping(object): - """Provides a local mapping for markers where item access - resolves to True if the marker is present. """ - - own_mark_names = attr.ib() - - @classmethod - def from_item(cls, item): - mark_names = {mark.name for mark in item.iter_markers()} - return cls(mark_names) - - def __getitem__(self, name): - return name in self.own_mark_names - - -class KeywordMapping(object): - """Provides a local mapping for keywords. - Given a list of names, map any substring of one of these names to True. - """ - - def __init__(self, names): - self._names = names - - @classmethod - def from_item(cls, item): - mapped_names = set() - - # Add the names of the current item and any parent items - import pytest - - for item in item.listchain(): - if not isinstance(item, pytest.Instance): - mapped_names.add(item.name) - - # Add the names added as extra keywords to current or parent items - for name in item.listextrakeywords(): - mapped_names.add(name) - - # Add the names attached to the current function through direct assignment - if hasattr(item, "function"): - for name in item.function.__dict__: - mapped_names.add(name) - - return cls(mapped_names) - - def __getitem__(self, subname): - for name in self._names: - if subname in name: - return True - return False - - -python_keywords_allowed_list = ["or", "and", "not"] - - -def matchmark(colitem, markexpr): - """Tries to match on any marker names, attached to the given colitem.""" - try: - return eval(markexpr, {}, MarkMapping.from_item(colitem)) - except SyntaxError as e: - raise SyntaxError(str(e) + "\nMarker expression must be valid Python!") - - -def matchkeyword(colitem, keywordexpr): - """Tries to match given keyword expression to given collector item. - - Will match on the name of colitem, including the names of its parents. - Only matches names of items which are either a :class:`Class` or a - :class:`Function`. - Additionally, matches on names in the 'extra_keyword_matches' set of - any item, as well as names directly assigned to test functions. - """ - mapping = KeywordMapping.from_item(colitem) - if " " not in keywordexpr: - # special case to allow for simple "-k pass" and "-k 1.3" - return mapping[keywordexpr] - elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]: - return not mapping[keywordexpr[4:]] - for kwd in keywordexpr.split(): - if keyword.iskeyword(kwd) and kwd not in python_keywords_allowed_list: - raise UsageError( - "Python keyword '{}' not accepted in expressions passed to '-k'".format( - kwd - ) - ) - try: - return eval(keywordexpr, {}, mapping) - except SyntaxError: - raise UsageError("Wrong expression passed to '-k': {}".format(keywordexpr)) diff --git a/venv/lib/python3.8/site-packages/_pytest/mark/structures.py b/venv/lib/python3.8/site-packages/_pytest/mark/structures.py deleted file mode 100644 index b8fa011d..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/mark/structures.py +++ /dev/null @@ -1,471 +0,0 @@ -import inspect -import warnings -from collections import namedtuple -from functools import reduce -from operator import attrgetter - -import attr -from six.moves import map - -from ..compat import getfslineno -from ..compat import MappingMixin -from ..compat import NOTSET -from ..deprecated import MARK_INFO_ATTRIBUTE -from ..deprecated import MARK_PARAMETERSET_UNPACKING -from _pytest.outcomes import fail - - -EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" - - -def alias(name, warning=None): - getter = attrgetter(name) - - def warned(self): - warnings.warn(warning, stacklevel=2) - return getter(self) - - return property(getter if warning is None else warned, doc="alias for " + name) - - -def istestfunc(func): - return ( - hasattr(func, "__call__") - and getattr(func, "__name__", "") != "" - ) - - -def get_empty_parameterset_mark(config, argnames, func): - from ..nodes import Collector - - requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) - if requested_mark in ("", None, "skip"): - mark = MARK_GEN.skip - elif requested_mark == "xfail": - mark = MARK_GEN.xfail(run=False) - elif requested_mark == "fail_at_collect": - f_name = func.__name__ - _, lineno = getfslineno(func) - raise Collector.CollectError( - "Empty parameter set in '%s' at line %d" % (f_name, lineno) - ) - else: - raise LookupError(requested_mark) - fs, lineno = getfslineno(func) - reason = "got empty parameter set %r, function %s at %s:%d" % ( - argnames, - func.__name__, - fs, - lineno, - ) - return mark(reason=reason) - - -class ParameterSet(namedtuple("ParameterSet", "values, marks, id")): - @classmethod - def param(cls, *values, **kw): - marks = kw.pop("marks", ()) - if isinstance(marks, MarkDecorator): - marks = (marks,) - else: - assert isinstance(marks, (tuple, list, set)) - - def param_extract_id(id=None): - return id - - id_ = param_extract_id(**kw) - return cls(values, marks, id_) - - @classmethod - def extract_from(cls, parameterset, belonging_definition, legacy_force_tuple=False): - """ - :param parameterset: - a legacy style parameterset that may or may not be a tuple, - and may or may not be wrapped into a mess of mark objects - - :param legacy_force_tuple: - enforce tuple wrapping so single argument tuple values - don't get decomposed and break tests - - :param belonging_definition: the item that we will be extracting the parameters from. - """ - - if isinstance(parameterset, cls): - return parameterset - if not isinstance(parameterset, MarkDecorator) and legacy_force_tuple: - return cls.param(parameterset) - - newmarks = [] - argval = parameterset - while isinstance(argval, MarkDecorator): - newmarks.append( - MarkDecorator(Mark(argval.markname, argval.args[:-1], argval.kwargs)) - ) - argval = argval.args[-1] - assert not isinstance(argval, ParameterSet) - if legacy_force_tuple: - argval = (argval,) - - if newmarks and belonging_definition is not None: - belonging_definition.warn(MARK_PARAMETERSET_UNPACKING) - - return cls(argval, marks=newmarks, id=None) - - @classmethod - def _for_parametrize(cls, argnames, argvalues, func, config, function_definition): - if not isinstance(argnames, (tuple, list)): - argnames = [x.strip() for x in argnames.split(",") if x.strip()] - force_tuple = len(argnames) == 1 - else: - force_tuple = False - parameters = [ - ParameterSet.extract_from( - x, - legacy_force_tuple=force_tuple, - belonging_definition=function_definition, - ) - for x in argvalues - ] - del argvalues - - if parameters: - # check all parameter sets have the correct number of values - for param in parameters: - if len(param.values) != len(argnames): - raise ValueError( - 'In "parametrize" the number of values ({}) must be ' - "equal to the number of names ({})".format( - param.values, argnames - ) - ) - else: - # empty parameter set (likely computed at runtime): create a single - # parameter set with NOSET values, with the "empty parameter set" mark applied to it - mark = get_empty_parameterset_mark(config, argnames, func) - parameters.append( - ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) - ) - return argnames, parameters - - -@attr.s(frozen=True) -class Mark(object): - #: name of the mark - name = attr.ib(type=str) - #: positional arguments of the mark decorator - args = attr.ib() # type: List[object] - #: keyword arguments of the mark decorator - kwargs = attr.ib() # type: Dict[str, object] - - def combined_with(self, other): - """ - :param other: the mark to combine with - :type other: Mark - :rtype: Mark - - combines by appending aargs and merging the mappings - """ - assert self.name == other.name - return Mark( - self.name, self.args + other.args, dict(self.kwargs, **other.kwargs) - ) - - -@attr.s -class MarkDecorator(object): - """ A decorator for test functions and test classes. When applied - it will create :class:`MarkInfo` objects which may be - :ref:`retrieved by hooks as item keywords `. - MarkDecorator instances are often created like this:: - - mark1 = pytest.mark.NAME # simple MarkDecorator - mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator - - and can then be applied as decorators to test functions:: - - @mark2 - def test_function(): - pass - - When a MarkDecorator instance is called it does the following: - 1. If called with a single class as its only positional argument and no - additional keyword arguments, it attaches itself to the class so it - gets applied automatically to all test cases found in that class. - 2. If called with a single function as its only positional argument and - no additional keyword arguments, it attaches a MarkInfo object to the - function, containing all the arguments already stored internally in - the MarkDecorator. - 3. When called in any other case, it performs a 'fake construction' call, - i.e. it returns a new MarkDecorator instance with the original - MarkDecorator's content updated with the arguments passed to this - call. - - Note: The rules above prevent MarkDecorator objects from storing only a - single function or class reference as their positional argument with no - additional keyword or positional arguments. - - """ - - mark = attr.ib(validator=attr.validators.instance_of(Mark)) - - name = alias("mark.name") - args = alias("mark.args") - kwargs = alias("mark.kwargs") - - @property - def markname(self): - return self.name # for backward-compat (2.4.1 had this attr) - - def __eq__(self, other): - return self.mark == other.mark if isinstance(other, MarkDecorator) else False - - def __repr__(self): - return "" % (self.mark,) - - def with_args(self, *args, **kwargs): - """ return a MarkDecorator with extra arguments added - - unlike call this can be used even if the sole argument is a callable/class - - :return: MarkDecorator - """ - - mark = Mark(self.name, args, kwargs) - return self.__class__(self.mark.combined_with(mark)) - - def __call__(self, *args, **kwargs): - """ if passed a single callable argument: decorate it with mark info. - otherwise add *args/**kwargs in-place to mark information. """ - if args and not kwargs: - func = args[0] - is_class = inspect.isclass(func) - if len(args) == 1 and (istestfunc(func) or is_class): - if is_class: - store_mark(func, self.mark) - else: - store_legacy_markinfo(func, self.mark) - store_mark(func, self.mark) - return func - return self.with_args(*args, **kwargs) - - -def get_unpacked_marks(obj): - """ - obtain the unpacked marks that are stored on an object - """ - mark_list = getattr(obj, "pytestmark", []) - if not isinstance(mark_list, list): - mark_list = [mark_list] - return normalize_mark_list(mark_list) - - -def normalize_mark_list(mark_list): - """ - normalizes marker decorating helpers to mark objects - - :type mark_list: List[Union[Mark, Markdecorator]] - :rtype: List[Mark] - """ - return [getattr(mark, "mark", mark) for mark in mark_list] # unpack MarkDecorator - - -def store_mark(obj, mark): - """store a Mark on an object - this is used to implement the Mark declarations/decorators correctly - """ - assert isinstance(mark, Mark), mark - # always reassign name to avoid updating pytestmark - # in a reference that was only borrowed - obj.pytestmark = get_unpacked_marks(obj) + [mark] - - -def store_legacy_markinfo(func, mark): - """create the legacy MarkInfo objects and put them onto the function - """ - if not isinstance(mark, Mark): - raise TypeError("got {mark!r} instead of a Mark".format(mark=mark)) - holder = getattr(func, mark.name, None) - if holder is None: - holder = MarkInfo.for_mark(mark) - setattr(func, mark.name, holder) - elif isinstance(holder, MarkInfo): - holder.add_mark(mark) - - -def transfer_markers(funcobj, cls, mod): - """ - this function transfers class level markers and module level markers - into function level markinfo objects - - this is the main reason why marks are so broken - the resolution will involve phasing out function level MarkInfo objects - - """ - for obj in (cls, mod): - for mark in get_unpacked_marks(obj): - if not _marked(funcobj, mark): - store_legacy_markinfo(funcobj, mark) - - -def _marked(func, mark): - """ Returns True if :func: is already marked with :mark:, False otherwise. - This can happen if marker is applied to class and the test file is - invoked more than once. - """ - try: - func_mark = getattr(func, getattr(mark, "combined", mark).name) - except AttributeError: - return False - return any(mark == info.combined for info in func_mark) - - -@attr.s(repr=False) -class MarkInfo(object): - """ Marking object created by :class:`MarkDecorator` instances. """ - - _marks = attr.ib(converter=list) - - @_marks.validator - def validate_marks(self, attribute, value): - for item in value: - if not isinstance(item, Mark): - raise ValueError( - "MarkInfo expects Mark instances, got {!r} ({!r})".format( - item, type(item) - ) - ) - - combined = attr.ib( - repr=False, - default=attr.Factory( - lambda self: reduce(Mark.combined_with, self._marks), takes_self=True - ), - ) - - name = alias("combined.name", warning=MARK_INFO_ATTRIBUTE) - args = alias("combined.args", warning=MARK_INFO_ATTRIBUTE) - kwargs = alias("combined.kwargs", warning=MARK_INFO_ATTRIBUTE) - - @classmethod - def for_mark(cls, mark): - return cls([mark]) - - def __repr__(self): - return "".format(self.combined) - - def add_mark(self, mark): - """ add a MarkInfo with the given args and kwargs. """ - self._marks.append(mark) - self.combined = self.combined.combined_with(mark) - - def __iter__(self): - """ yield MarkInfo objects each relating to a marking-call. """ - return map(MarkInfo.for_mark, self._marks) - - -class MarkGenerator(object): - """ Factory for :class:`MarkDecorator` objects - exposed as - a ``pytest.mark`` singleton instance. Example:: - - import pytest - @pytest.mark.slowtest - def test_function(): - pass - - will set a 'slowtest' :class:`MarkInfo` object - on the ``test_function`` object. """ - - _config = None - - def __getattr__(self, name): - if name[0] == "_": - raise AttributeError("Marker name must NOT start with underscore") - if self._config is not None: - self._check(name) - return MarkDecorator(Mark(name, (), {})) - - def _check(self, name): - try: - if name in self._markers: - return - except AttributeError: - pass - self._markers = values = set() - for line in self._config.getini("markers"): - marker = line.split(":", 1)[0] - marker = marker.rstrip() - x = marker.split("(", 1)[0] - values.add(x) - if name not in self._markers: - fail("{!r} not a registered marker".format(name), pytrace=False) - - -MARK_GEN = MarkGenerator() - - -class NodeKeywords(MappingMixin): - def __init__(self, node): - self.node = node - self.parent = node.parent - self._markers = {node.name: True} - - def __getitem__(self, key): - try: - return self._markers[key] - except KeyError: - if self.parent is None: - raise - return self.parent.keywords[key] - - def __setitem__(self, key, value): - self._markers[key] = value - - def __delitem__(self, key): - raise ValueError("cannot delete key in keywords dict") - - def __iter__(self): - seen = self._seen() - return iter(seen) - - def _seen(self): - seen = set(self._markers) - if self.parent is not None: - seen.update(self.parent.keywords) - return seen - - def __len__(self): - return len(self._seen()) - - def __repr__(self): - return "" % (self.node,) - - -@attr.s(cmp=False, hash=False) -class NodeMarkers(object): - """ - internal structure for storing marks belonging to a node - - ..warning:: - - unstable api - - """ - - own_markers = attr.ib(default=attr.Factory(list)) - - def update(self, add_markers): - """update the own markers - """ - self.own_markers.extend(add_markers) - - def find(self, name): - """ - find markers in own nodes or parent nodes - needs a better place - """ - for mark in self.own_markers: - if mark.name == name: - yield mark - - def __iter__(self): - return iter(self.own_markers) diff --git a/venv/lib/python3.8/site-packages/_pytest/monkeypatch.py b/venv/lib/python3.8/site-packages/_pytest/monkeypatch.py deleted file mode 100644 index d536b774..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/monkeypatch.py +++ /dev/null @@ -1,308 +0,0 @@ -""" monkeypatching and mocking functionality. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import re -import sys -import warnings -from contextlib import contextmanager - -import six - -import pytest -from _pytest.fixtures import fixture - -RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$") - - -@fixture -def monkeypatch(): - """The returned ``monkeypatch`` fixture provides these - helper methods to modify objects, dictionaries or os.environ:: - - monkeypatch.setattr(obj, name, value, raising=True) - monkeypatch.delattr(obj, name, raising=True) - monkeypatch.setitem(mapping, name, value) - monkeypatch.delitem(obj, name, raising=True) - monkeypatch.setenv(name, value, prepend=False) - monkeypatch.delenv(name, raising=True) - monkeypatch.syspath_prepend(path) - monkeypatch.chdir(path) - - All modifications will be undone after the requesting - test function or fixture has finished. The ``raising`` - parameter determines if a KeyError or AttributeError - will be raised if the set/deletion operation has no target. - """ - mpatch = MonkeyPatch() - yield mpatch - mpatch.undo() - - -def resolve(name): - # simplified from zope.dottedname - parts = name.split(".") - - used = parts.pop(0) - found = __import__(used) - for part in parts: - used += "." + part - try: - found = getattr(found, part) - except AttributeError: - pass - else: - continue - # we use explicit un-nesting of the handling block in order - # to avoid nested exceptions on python 3 - try: - __import__(used) - except ImportError as ex: - # str is used for py2 vs py3 - expected = str(ex).split()[-1] - if expected == used: - raise - else: - raise ImportError("import error in %s: %s" % (used, ex)) - found = annotated_getattr(found, part, used) - return found - - -def annotated_getattr(obj, name, ann): - try: - obj = getattr(obj, name) - except AttributeError: - raise AttributeError( - "%r object at %s has no attribute %r" % (type(obj).__name__, ann, name) - ) - return obj - - -def derive_importpath(import_path, raising): - if not isinstance(import_path, six.string_types) or "." not in import_path: - raise TypeError("must be absolute import path string, not %r" % (import_path,)) - module, attr = import_path.rsplit(".", 1) - target = resolve(module) - if raising: - annotated_getattr(target, attr, ann=module) - return attr, target - - -class Notset(object): - def __repr__(self): - return "" - - -notset = Notset() - - -class MonkeyPatch(object): - """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes. - """ - - def __init__(self): - self._setattr = [] - self._setitem = [] - self._cwd = None - self._savesyspath = None - - @contextmanager - def context(self): - """ - Context manager that returns a new :class:`MonkeyPatch` object which - undoes any patching done inside the ``with`` block upon exit: - - .. code-block:: python - - import functools - def test_partial(monkeypatch): - with monkeypatch.context() as m: - m.setattr(functools, "partial", 3) - - Useful in situations where it is desired to undo some patches before the test ends, - such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples - of this see `#3290 `_. - """ - m = MonkeyPatch() - try: - yield m - finally: - m.undo() - - def setattr(self, target, name, value=notset, raising=True): - """ Set attribute value on target, memorizing the old value. - By default raise AttributeError if the attribute did not exist. - - For convenience you can specify a string as ``target`` which - will be interpreted as a dotted import path, with the last part - being the attribute name. Example: - ``monkeypatch.setattr("os.getcwd", lambda: "/")`` - would set the ``getcwd`` function of the ``os`` module. - - The ``raising`` value determines if the setattr should fail - if the attribute is not already present (defaults to True - which means it will raise). - """ - __tracebackhide__ = True - import inspect - - if value is notset: - if not isinstance(target, six.string_types): - raise TypeError( - "use setattr(target, name, value) or " - "setattr(target, value) with target being a dotted " - "import string" - ) - value = name - name, target = derive_importpath(target, raising) - - oldval = getattr(target, name, notset) - if raising and oldval is notset: - raise AttributeError("%r has no attribute %r" % (target, name)) - - # avoid class descriptors like staticmethod/classmethod - if inspect.isclass(target): - oldval = target.__dict__.get(name, notset) - self._setattr.append((target, name, oldval)) - setattr(target, name, value) - - def delattr(self, target, name=notset, raising=True): - """ Delete attribute ``name`` from ``target``, by default raise - AttributeError it the attribute did not previously exist. - - If no ``name`` is specified and ``target`` is a string - it will be interpreted as a dotted import path with the - last part being the attribute name. - - If ``raising`` is set to False, no exception will be raised if the - attribute is missing. - """ - __tracebackhide__ = True - if name is notset: - if not isinstance(target, six.string_types): - raise TypeError( - "use delattr(target, name) or " - "delattr(target) with target being a dotted " - "import string" - ) - name, target = derive_importpath(target, raising) - - if not hasattr(target, name): - if raising: - raise AttributeError(name) - else: - self._setattr.append((target, name, getattr(target, name, notset))) - delattr(target, name) - - def setitem(self, dic, name, value): - """ Set dictionary entry ``name`` to value. """ - self._setitem.append((dic, name, dic.get(name, notset))) - dic[name] = value - - def delitem(self, dic, name, raising=True): - """ Delete ``name`` from dict. Raise KeyError if it doesn't exist. - - If ``raising`` is set to False, no exception will be raised if the - key is missing. - """ - if name not in dic: - if raising: - raise KeyError(name) - else: - self._setitem.append((dic, name, dic.get(name, notset))) - del dic[name] - - def _warn_if_env_name_is_not_str(self, name): - """On Python 2, warn if the given environment variable name is not a native str (#4056)""" - if six.PY2 and not isinstance(name, str): - warnings.warn( - pytest.PytestWarning( - "Environment variable name {!r} should be str".format(name) - ) - ) - - def setenv(self, name, value, prepend=None): - """ Set environment variable ``name`` to ``value``. If ``prepend`` - is a character, read the current environment variable value - and prepend the ``value`` adjoined with the ``prepend`` character.""" - if not isinstance(value, str): - warnings.warn( - pytest.PytestWarning( - "Value of environment variable {name} type should be str, but got " - "{value!r} (type: {type}); converted to str implicitly".format( - name=name, value=value, type=type(value).__name__ - ) - ), - stacklevel=2, - ) - value = str(value) - if prepend and name in os.environ: - value = value + prepend + os.environ[name] - self._warn_if_env_name_is_not_str(name) - self.setitem(os.environ, name, value) - - def delenv(self, name, raising=True): - """ Delete ``name`` from the environment. Raise KeyError if it does - not exist. - - If ``raising`` is set to False, no exception will be raised if the - environment variable is missing. - """ - self._warn_if_env_name_is_not_str(name) - self.delitem(os.environ, name, raising=raising) - - def syspath_prepend(self, path): - """ Prepend ``path`` to ``sys.path`` list of import locations. """ - if self._savesyspath is None: - self._savesyspath = sys.path[:] - sys.path.insert(0, str(path)) - - def chdir(self, path): - """ Change the current working directory to the specified path. - Path can be a string or a py.path.local object. - """ - if self._cwd is None: - self._cwd = os.getcwd() - if hasattr(path, "chdir"): - path.chdir() - else: - os.chdir(path) - - def undo(self): - """ Undo previous changes. This call consumes the - undo stack. Calling it a second time has no effect unless - you do more monkeypatching after the undo call. - - There is generally no need to call `undo()`, since it is - called automatically during tear-down. - - Note that the same `monkeypatch` fixture is used across a - single test function invocation. If `monkeypatch` is used both by - the test function itself and one of the test fixtures, - calling `undo()` will undo all of the changes made in - both functions. - """ - for obj, name, value in reversed(self._setattr): - if value is not notset: - setattr(obj, name, value) - else: - delattr(obj, name) - self._setattr[:] = [] - for dictionary, name, value in reversed(self._setitem): - if value is notset: - try: - del dictionary[name] - except KeyError: - pass # was already deleted, so we have the desired state - else: - dictionary[name] = value - self._setitem[:] = [] - if self._savesyspath is not None: - sys.path[:] = self._savesyspath - self._savesyspath = None - - if self._cwd is not None: - os.chdir(self._cwd) - self._cwd = None diff --git a/venv/lib/python3.8/site-packages/_pytest/nodes.py b/venv/lib/python3.8/site-packages/_pytest/nodes.py deleted file mode 100644 index 032d4e1d..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/nodes.py +++ /dev/null @@ -1,535 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import warnings - -import attr -import py -import six - -import _pytest._code -from _pytest.compat import getfslineno -from _pytest.mark.structures import MarkInfo -from _pytest.mark.structures import NodeKeywords -from _pytest.outcomes import fail - -SEP = "/" - -tracebackcutdir = py.path.local(_pytest.__file__).dirpath() - - -def _splitnode(nodeid): - """Split a nodeid into constituent 'parts'. - - Node IDs are strings, and can be things like: - '' - 'testing/code' - 'testing/code/test_excinfo.py' - 'testing/code/test_excinfo.py::TestFormattedExcinfo::()' - - Return values are lists e.g. - [] - ['testing', 'code'] - ['testing', 'code', 'test_excinfo.py'] - ['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()'] - """ - if nodeid == "": - # If there is no root node at all, return an empty list so the caller's logic can remain sane - return [] - parts = nodeid.split(SEP) - # Replace single last element 'test_foo.py::Bar::()' with multiple elements 'test_foo.py', 'Bar', '()' - parts[-1:] = parts[-1].split("::") - return parts - - -def ischildnode(baseid, nodeid): - """Return True if the nodeid is a child node of the baseid. - - E.g. 'foo/bar::Baz::()' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp' - """ - base_parts = _splitnode(baseid) - node_parts = _splitnode(nodeid) - if len(node_parts) < len(base_parts): - return False - return node_parts[: len(base_parts)] == base_parts - - -@attr.s -class _CompatProperty(object): - name = attr.ib() - - def __get__(self, obj, owner): - if obj is None: - return self - - from _pytest.deprecated import COMPAT_PROPERTY - - warnings.warn( - COMPAT_PROPERTY.format(name=self.name, owner=owner.__name__), stacklevel=2 - ) - return getattr(__import__("pytest"), self.name) - - -class Node(object): - """ base class for Collector and Item the test collection tree. - Collector subclasses have children, Items are terminal nodes.""" - - def __init__( - self, name, parent=None, config=None, session=None, fspath=None, nodeid=None - ): - #: a unique name within the scope of the parent node - self.name = name - - #: the parent collector node. - self.parent = parent - - #: the pytest config object - self.config = config or parent.config - - #: the session this node is part of - self.session = session or parent.session - - #: filesystem path where this node was collected from (can be None) - self.fspath = fspath or getattr(parent, "fspath", None) - - #: keywords/markers collected from all scopes - self.keywords = NodeKeywords(self) - - #: the marker objects belonging to this node - self.own_markers = [] - - #: allow adding of extra keywords to use for matching - self.extra_keyword_matches = set() - - # used for storing artificial fixturedefs for direct parametrization - self._name2pseudofixturedef = {} - - if nodeid is not None: - self._nodeid = nodeid - else: - assert parent is not None - self._nodeid = self.parent.nodeid + "::" + self.name - - @property - def ihook(self): - """ fspath sensitive hook proxy used to call pytest hooks""" - return self.session.gethookproxy(self.fspath) - - Module = _CompatProperty("Module") - Class = _CompatProperty("Class") - Instance = _CompatProperty("Instance") - Function = _CompatProperty("Function") - File = _CompatProperty("File") - Item = _CompatProperty("Item") - - def _getcustomclass(self, name): - maybe_compatprop = getattr(type(self), name) - if isinstance(maybe_compatprop, _CompatProperty): - return getattr(__import__("pytest"), name) - else: - from _pytest.deprecated import CUSTOM_CLASS - - cls = getattr(self, name) - self.warn(CUSTOM_CLASS.format(name=name, type_name=type(self).__name__)) - return cls - - def __repr__(self): - return "<%s %r>" % (self.__class__.__name__, getattr(self, "name", None)) - - def warn(self, _code_or_warning=None, message=None, code=None): - """Issue a warning for this item. - - Warnings will be displayed after the test session, unless explicitly suppressed. - - This can be called in two forms: - - **Warning instance** - - This was introduced in pytest 3.8 and uses the standard warning mechanism to issue warnings. - - .. code-block:: python - - node.warn(PytestWarning("some message")) - - The warning instance must be a subclass of :class:`pytest.PytestWarning`. - - **code/message (deprecated)** - - This form was used in pytest prior to 3.8 and is considered deprecated. Using this form will emit another - warning about the deprecation: - - .. code-block:: python - - node.warn("CI", "some message") - - :param Union[Warning,str] _code_or_warning: - warning instance or warning code (legacy). This parameter receives an underscore for backward - compatibility with the legacy code/message form, and will be replaced for something - more usual when the legacy form is removed. - - :param Union[str,None] message: message to display when called in the legacy form. - :param str code: code for the warning, in legacy form when using keyword arguments. - :return: - """ - if message is None: - if _code_or_warning is None: - raise ValueError("code_or_warning must be given") - self._std_warn(_code_or_warning) - else: - if _code_or_warning and code: - raise ValueError( - "code_or_warning and code cannot both be passed to this function" - ) - code = _code_or_warning or code - self._legacy_warn(code, message) - - def _legacy_warn(self, code, message): - """ - .. deprecated:: 3.8 - - Use :meth:`Node.std_warn <_pytest.nodes.Node.std_warn>` instead. - - Generate a warning with the given code and message for this item. - """ - from _pytest.deprecated import NODE_WARN - - self._std_warn(NODE_WARN) - - assert isinstance(code, str) - fslocation = get_fslocation_from_item(self) - self.ihook.pytest_logwarning.call_historic( - kwargs=dict( - code=code, message=message, nodeid=self.nodeid, fslocation=fslocation - ) - ) - - def _std_warn(self, warning): - """Issue a warning for this item. - - Warnings will be displayed after the test session, unless explicitly suppressed - - :param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning. - - :raise ValueError: if ``warning`` instance is not a subclass of PytestWarning. - """ - from _pytest.warning_types import PytestWarning - - if not isinstance(warning, PytestWarning): - raise ValueError( - "warning must be an instance of PytestWarning or subclass, got {!r}".format( - warning - ) - ) - path, lineno = get_fslocation_from_item(self) - warnings.warn_explicit( - warning, - category=None, - filename=str(path), - lineno=lineno + 1 if lineno is not None else None, - ) - - # methods for ordering nodes - @property - def nodeid(self): - """ a ::-separated string denoting its collection tree address. """ - return self._nodeid - - def __hash__(self): - return hash(self.nodeid) - - def setup(self): - pass - - def teardown(self): - pass - - def listchain(self): - """ return list of all parent collectors up to self, - starting from root of collection tree. """ - chain = [] - item = self - while item is not None: - chain.append(item) - item = item.parent - chain.reverse() - return chain - - def add_marker(self, marker, append=True): - """dynamically add a marker object to the node. - - :type marker: ``str`` or ``pytest.mark.*`` object - :param marker: - ``append=True`` whether to append the marker, - if ``False`` insert at position ``0``. - """ - from _pytest.mark import MarkDecorator, MARK_GEN - - if isinstance(marker, six.string_types): - marker = getattr(MARK_GEN, marker) - elif not isinstance(marker, MarkDecorator): - raise ValueError("is not a string or pytest.mark.* Marker") - self.keywords[marker.name] = marker - if append: - self.own_markers.append(marker.mark) - else: - self.own_markers.insert(0, marker.mark) - - def iter_markers(self, name=None): - """ - :param name: if given, filter the results by the name attribute - - iterate over all markers of the node - """ - return (x[1] for x in self.iter_markers_with_node(name=name)) - - def iter_markers_with_node(self, name=None): - """ - :param name: if given, filter the results by the name attribute - - iterate over all markers of the node - returns sequence of tuples (node, mark) - """ - for node in reversed(self.listchain()): - for mark in node.own_markers: - if name is None or getattr(mark, "name", None) == name: - yield node, mark - - def get_closest_marker(self, name, default=None): - """return the first marker matching the name, from closest (for example function) to farther level (for example - module level). - - :param default: fallback return value of no marker was found - :param name: name to filter by - """ - return next(self.iter_markers(name=name), default) - - def get_marker(self, name): - """ get a marker object from this node or None if - the node doesn't have a marker with that name. - - .. deprecated:: 3.6 - This function has been deprecated in favor of - :meth:`Node.get_closest_marker <_pytest.nodes.Node.get_closest_marker>` and - :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>`, see :ref:`update marker code` - for more details. - """ - markers = list(self.iter_markers(name=name)) - if markers: - return MarkInfo(markers) - - def listextrakeywords(self): - """ Return a set of all extra keywords in self and any parents.""" - extra_keywords = set() - for item in self.listchain(): - extra_keywords.update(item.extra_keyword_matches) - return extra_keywords - - def listnames(self): - return [x.name for x in self.listchain()] - - def addfinalizer(self, fin): - """ register a function to be called when this node is finalized. - - This method can only be called when this node is active - in a setup chain, for example during self.setup(). - """ - self.session._setupstate.addfinalizer(fin, self) - - def getparent(self, cls): - """ get the next parent node (including ourself) - which is an instance of the given class""" - current = self - while current and not isinstance(current, cls): - current = current.parent - return current - - def _prunetraceback(self, excinfo): - pass - - def _repr_failure_py(self, excinfo, style=None): - if excinfo.errisinstance(fail.Exception): - if not excinfo.value.pytrace: - return six.text_type(excinfo.value) - fm = self.session._fixturemanager - if excinfo.errisinstance(fm.FixtureLookupError): - return excinfo.value.formatrepr() - tbfilter = True - if self.config.option.fulltrace: - style = "long" - else: - tb = _pytest._code.Traceback([excinfo.traceback[-1]]) - self._prunetraceback(excinfo) - if len(excinfo.traceback) == 0: - excinfo.traceback = tb - tbfilter = False # prunetraceback already does it - if style == "auto": - style = "long" - # XXX should excinfo.getrepr record all data and toterminal() process it? - if style is None: - if self.config.option.tbstyle == "short": - style = "short" - else: - style = "long" - - if self.config.option.verbose > 1: - truncate_locals = False - else: - truncate_locals = True - - try: - os.getcwd() - abspath = False - except OSError: - abspath = True - - return excinfo.getrepr( - funcargs=True, - abspath=abspath, - showlocals=self.config.option.showlocals, - style=style, - tbfilter=tbfilter, - truncate_locals=truncate_locals, - ) - - repr_failure = _repr_failure_py - - -def get_fslocation_from_item(item): - """Tries to extract the actual location from an item, depending on available attributes: - - * "fslocation": a pair (path, lineno) - * "obj": a Python object that the item wraps. - * "fspath": just a path - - :rtype: a tuple of (str|LocalPath, int) with filename and line number. - """ - result = getattr(item, "location", None) - if result is not None: - return result[:2] - obj = getattr(item, "obj", None) - if obj is not None: - return getfslineno(obj) - return getattr(item, "fspath", "unknown location"), -1 - - -class Collector(Node): - """ Collector instances create children through collect() - and thus iteratively build a tree. - """ - - class CollectError(Exception): - """ an error during collection, contains a custom message. """ - - def collect(self): - """ returns a list of children (items and collectors) - for this collection node. - """ - raise NotImplementedError("abstract") - - def repr_failure(self, excinfo): - """ represent a collection failure. """ - if excinfo.errisinstance(self.CollectError): - exc = excinfo.value - return str(exc.args[0]) - return self._repr_failure_py(excinfo, style="short") - - def _prunetraceback(self, excinfo): - if hasattr(self, "fspath"): - traceback = excinfo.traceback - ntraceback = traceback.cut(path=self.fspath) - if ntraceback == traceback: - ntraceback = ntraceback.cut(excludepath=tracebackcutdir) - excinfo.traceback = ntraceback.filter() - - -def _check_initialpaths_for_relpath(session, fspath): - for initial_path in session._initialpaths: - if fspath.common(initial_path) == initial_path: - return fspath.relto(initial_path) - - -class FSCollector(Collector): - def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): - fspath = py.path.local(fspath) # xxx only for test_resultlog.py? - name = fspath.basename - if parent is not None: - rel = fspath.relto(parent.fspath) - if rel: - name = rel - name = name.replace(os.sep, SEP) - self.fspath = fspath - - session = session or parent.session - - if nodeid is None: - nodeid = self.fspath.relto(session.config.rootdir) - - if not nodeid: - nodeid = _check_initialpaths_for_relpath(session, fspath) - if nodeid and os.sep != SEP: - nodeid = nodeid.replace(os.sep, SEP) - - super(FSCollector, self).__init__( - name, parent, config, session, nodeid=nodeid, fspath=fspath - ) - - -class File(FSCollector): - """ base class for collecting tests from a file. """ - - -class Item(Node): - """ a basic test invocation item. Note that for a single function - there might be multiple test invocation items. - """ - - nextitem = None - - def __init__(self, name, parent=None, config=None, session=None, nodeid=None): - super(Item, self).__init__(name, parent, config, session, nodeid=nodeid) - self._report_sections = [] - - #: user properties is a list of tuples (name, value) that holds user - #: defined properties for this test. - self.user_properties = [] - - def add_report_section(self, when, key, content): - """ - Adds a new report section, similar to what's done internally to add stdout and - stderr captured output:: - - item.add_report_section("call", "stdout", "report section contents") - - :param str when: - One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. - :param str key: - Name of the section, can be customized at will. Pytest uses ``"stdout"`` and - ``"stderr"`` internally. - - :param str content: - The full contents as a string. - """ - if content: - self._report_sections.append((when, key, content)) - - def reportinfo(self): - return self.fspath, None, "" - - @property - def location(self): - try: - return self._location - except AttributeError: - location = self.reportinfo() - # bestrelpath is a quite slow function - cache = self.config.__dict__.setdefault("_bestrelpathcache", {}) - try: - fspath = cache[location[0]] - except KeyError: - fspath = self.session.fspath.bestrelpath(location[0]) - cache[location[0]] = fspath - location = (fspath, location[1], str(location[2])) - self._location = location - return location diff --git a/venv/lib/python3.8/site-packages/_pytest/nose.py b/venv/lib/python3.8/site-packages/_pytest/nose.py deleted file mode 100644 index 4bfa9c58..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/nose.py +++ /dev/null @@ -1,76 +0,0 @@ -""" run test suites written for nose. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys - -from _pytest import python -from _pytest import runner -from _pytest import unittest -from _pytest.config import hookimpl - - -def get_skip_exceptions(): - skip_classes = set() - for module_name in ("unittest", "unittest2", "nose"): - mod = sys.modules.get(module_name) - if hasattr(mod, "SkipTest"): - skip_classes.add(mod.SkipTest) - return tuple(skip_classes) - - -def pytest_runtest_makereport(item, call): - if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): - # let's substitute the excinfo with a pytest.skip one - call2 = call.__class__(lambda: runner.skip(str(call.excinfo.value)), call.when) - call.excinfo = call2.excinfo - - -@hookimpl(trylast=True) -def pytest_runtest_setup(item): - if is_potential_nosetest(item): - if isinstance(item.parent, python.Generator): - gen = item.parent - if not hasattr(gen, "_nosegensetup"): - call_optional(gen.obj, "setup") - if isinstance(gen.parent, python.Instance): - call_optional(gen.parent.obj, "setup") - gen._nosegensetup = True - if not call_optional(item.obj, "setup"): - # call module level setup if there is no object level one - call_optional(item.parent.obj, "setup") - # XXX this implies we only call teardown when setup worked - item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) - - -def teardown_nose(item): - if is_potential_nosetest(item): - if not call_optional(item.obj, "teardown"): - call_optional(item.parent.obj, "teardown") - # if hasattr(item.parent, '_nosegensetup'): - # #call_optional(item._nosegensetup, 'teardown') - # del item.parent._nosegensetup - - -def pytest_make_collect_report(collector): - if isinstance(collector, python.Generator): - call_optional(collector.obj, "setup") - - -def is_potential_nosetest(item): - # extra check needed since we do not do nose style setup/teardown - # on direct unittest style classes - return isinstance(item, python.Function) and not isinstance( - item, unittest.TestCaseFunction - ) - - -def call_optional(obj, name): - method = getattr(obj, name, None) - isfixture = hasattr(method, "_pytestfixturefunction") - if method is not None and not isfixture and callable(method): - # If there's any problems allow the exception to raise rather than - # silently ignoring them - method() - return True diff --git a/venv/lib/python3.8/site-packages/_pytest/outcomes.py b/venv/lib/python3.8/site-packages/_pytest/outcomes.py deleted file mode 100644 index cd08c0d4..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/outcomes.py +++ /dev/null @@ -1,182 +0,0 @@ -""" -exception classes and constants handling test outcomes -as well as functions creating them -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys - - -class OutcomeException(BaseException): - """ OutcomeException and its subclass instances indicate and - contain info about test and collection outcomes. - """ - - def __init__(self, msg=None, pytrace=True): - BaseException.__init__(self, msg) - self.msg = msg - self.pytrace = pytrace - - def __repr__(self): - if self.msg: - val = self.msg - if isinstance(val, bytes): - val = val.decode("UTF-8", errors="replace") - return val - return "<%s instance>" % (self.__class__.__name__,) - - __str__ = __repr__ - - -TEST_OUTCOME = (OutcomeException, Exception) - - -class Skipped(OutcomeException): - # XXX hackish: on 3k we fake to live in the builtins - # in order to have Skipped exception printing shorter/nicer - __module__ = "builtins" - - def __init__(self, msg=None, pytrace=True, allow_module_level=False): - OutcomeException.__init__(self, msg=msg, pytrace=pytrace) - self.allow_module_level = allow_module_level - - -class Failed(OutcomeException): - """ raised from an explicit call to pytest.fail() """ - - __module__ = "builtins" - - -class Exit(KeyboardInterrupt): - """ raised for immediate program exits (no tracebacks/summaries)""" - - def __init__(self, msg="unknown reason", returncode=None): - self.msg = msg - self.returncode = returncode - KeyboardInterrupt.__init__(self, msg) - - -# exposed helper methods - - -def exit(msg, returncode=None): - """ - Exit testing process as if KeyboardInterrupt was triggered. - - :param str msg: message to display upon exit. - :param int returncode: return code to be used when exiting pytest. - """ - __tracebackhide__ = True - raise Exit(msg, returncode) - - -exit.Exception = Exit - - -def skip(msg="", **kwargs): - """ - Skip an executing test with the given message. - - This function should be called only during testing (setup, call or teardown) or - during collection by using the ``allow_module_level`` flag. - - :kwarg bool allow_module_level: allows this function to be called at - module level, skipping the rest of the module. Default to False. - - .. note:: - It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be - skipped under certain conditions like mismatching platforms or - dependencies. - """ - __tracebackhide__ = True - allow_module_level = kwargs.pop("allow_module_level", False) - if kwargs: - keys = [k for k in kwargs.keys()] - raise TypeError("unexpected keyword arguments: {}".format(keys)) - raise Skipped(msg=msg, allow_module_level=allow_module_level) - - -skip.Exception = Skipped - - -def fail(msg="", pytrace=True): - """ - Explicitly fail an executing test with the given message. - - :param str msg: the message to show the user as reason for the failure. - :param bool pytrace: if false the msg represents the full failure information and no - python traceback will be reported. - """ - __tracebackhide__ = True - raise Failed(msg=msg, pytrace=pytrace) - - -fail.Exception = Failed - - -class XFailed(fail.Exception): - """ raised from an explicit call to pytest.xfail() """ - - -def xfail(reason=""): - """ - Imperatively xfail an executing test or setup functions with the given reason. - - This function should be called only during testing (setup, call or teardown). - - .. note:: - It is better to use the :ref:`pytest.mark.xfail ref` marker when possible to declare a test to be - xfailed under certain conditions like known bugs or missing features. - """ - __tracebackhide__ = True - raise XFailed(reason) - - -xfail.Exception = XFailed - - -def importorskip(modname, minversion=None): - """ return imported module if it has at least "minversion" as its - __version__ attribute. If no minversion is specified the a skip - is only triggered if the module can not be imported. - """ - import warnings - - __tracebackhide__ = True - compile(modname, "", "eval") # to catch syntaxerrors - should_skip = False - - with warnings.catch_warnings(): - # make sure to ignore ImportWarnings that might happen because - # of existing directories with the same name we're trying to - # import but without a __init__.py file - warnings.simplefilter("ignore") - try: - __import__(modname) - except ImportError: - # Do not raise chained exception here(#1485) - should_skip = True - if should_skip: - raise Skipped("could not import %r" % (modname,), allow_module_level=True) - mod = sys.modules[modname] - if minversion is None: - return mod - verattr = getattr(mod, "__version__", None) - if minversion is not None: - try: - from pkg_resources import parse_version as pv - except ImportError: - raise Skipped( - "we have a required version for %r but can not import " - "pkg_resources to parse version strings." % (modname,), - allow_module_level=True, - ) - if verattr is None or pv(verattr) < pv(minversion): - raise Skipped( - "module %r has __version__ %r, required is: %r" - % (modname, verattr, minversion), - allow_module_level=True, - ) - return mod diff --git a/venv/lib/python3.8/site-packages/_pytest/pastebin.py b/venv/lib/python3.8/site-packages/_pytest/pastebin.py deleted file mode 100644 index 9559e326..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/pastebin.py +++ /dev/null @@ -1,113 +0,0 @@ -""" submit failure or test session information to a pastebin service. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys -import tempfile - -import six - -import pytest - - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting") - group._addoption( - "--pastebin", - metavar="mode", - action="store", - dest="pastebin", - default=None, - choices=["failed", "all"], - help="send failed|all info to bpaste.net pastebin service.", - ) - - -@pytest.hookimpl(trylast=True) -def pytest_configure(config): - if config.option.pastebin == "all": - tr = config.pluginmanager.getplugin("terminalreporter") - # if no terminal reporter plugin is present, nothing we can do here; - # this can happen when this function executes in a slave node - # when using pytest-xdist, for example - if tr is not None: - # pastebin file will be utf-8 encoded binary file - config._pastebinfile = tempfile.TemporaryFile("w+b") - oldwrite = tr._tw.write - - def tee_write(s, **kwargs): - oldwrite(s, **kwargs) - if isinstance(s, six.text_type): - s = s.encode("utf-8") - config._pastebinfile.write(s) - - tr._tw.write = tee_write - - -def pytest_unconfigure(config): - if hasattr(config, "_pastebinfile"): - # get terminal contents and delete file - config._pastebinfile.seek(0) - sessionlog = config._pastebinfile.read() - config._pastebinfile.close() - del config._pastebinfile - # undo our patching in the terminal reporter - tr = config.pluginmanager.getplugin("terminalreporter") - del tr._tw.__dict__["write"] - # write summary - tr.write_sep("=", "Sending information to Paste Service") - pastebinurl = create_new_paste(sessionlog) - tr.write_line("pastebin session-log: %s\n" % pastebinurl) - - -def create_new_paste(contents): - """ - Creates a new paste using bpaste.net service. - - :contents: paste contents as utf-8 encoded bytes - :returns: url to the pasted contents - """ - import re - - if sys.version_info < (3, 0): - from urllib import urlopen, urlencode - else: - from urllib.request import urlopen - from urllib.parse import urlencode - - params = { - "code": contents, - "lexer": "python3" if sys.version_info[0] == 3 else "python", - "expiry": "1week", - } - url = "https://bpaste.net" - response = urlopen(url, data=urlencode(params).encode("ascii")).read() - m = re.search(r'href="/raw/(\w+)"', response.decode("utf-8")) - if m: - return "%s/show/%s" % (url, m.group(1)) - else: - return "bad response: " + response - - -def pytest_terminal_summary(terminalreporter): - import _pytest.config - - if terminalreporter.config.option.pastebin != "failed": - return - tr = terminalreporter - if "failed" in tr.stats: - terminalreporter.write_sep("=", "Sending information to Paste Service") - for rep in terminalreporter.stats.get("failed"): - try: - msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc - except AttributeError: - msg = tr._getfailureheadline(rep) - tw = _pytest.config.create_terminal_writer( - terminalreporter.config, stringio=True - ) - rep.toterminal(tw) - s = tw.stringio.getvalue() - assert len(s) - pastebinurl = create_new_paste(s) - tr.write_line("%s --> %s" % (msg, pastebinurl)) diff --git a/venv/lib/python3.8/site-packages/_pytest/pathlib.py b/venv/lib/python3.8/site-packages/_pytest/pathlib.py deleted file mode 100644 index 5ecdd602..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/pathlib.py +++ /dev/null @@ -1,320 +0,0 @@ -import atexit -import errno -import fnmatch -import itertools -import operator -import os -import shutil -import sys -import uuid -from functools import reduce -from os.path import expanduser -from os.path import expandvars -from os.path import isabs -from os.path import sep -from posixpath import sep as posix_sep - -import six -from six.moves import map - -from .compat import PY36 - - -if PY36: - from pathlib import Path, PurePath -else: - from pathlib2 import Path, PurePath - -__all__ = ["Path", "PurePath"] - - -LOCK_TIMEOUT = 60 * 60 * 3 - -get_lock_path = operator.methodcaller("joinpath", ".lock") - - -def ensure_reset_dir(path): - """ - ensures the given path is an empty directory - """ - if path.exists(): - rmtree(path, force=True) - path.mkdir() - - -def rmtree(path, force=False): - if force: - # NOTE: ignore_errors might leave dead folders around. - # Python needs a rm -rf as a followup. - shutil.rmtree(str(path), ignore_errors=True) - else: - shutil.rmtree(str(path)) - - -def find_prefixed(root, prefix): - """finds all elements in root that begin with the prefix, case insensitive""" - l_prefix = prefix.lower() - for x in root.iterdir(): - if x.name.lower().startswith(l_prefix): - yield x - - -def extract_suffixes(iter, prefix): - """ - :param iter: iterator over path names - :param prefix: expected prefix of the path names - :returns: the parts of the paths following the prefix - """ - p_len = len(prefix) - for p in iter: - yield p.name[p_len:] - - -def find_suffixes(root, prefix): - """combines find_prefixes and extract_suffixes - """ - return extract_suffixes(find_prefixed(root, prefix), prefix) - - -def parse_num(maybe_num): - """parses number path suffixes, returns -1 on error""" - try: - return int(maybe_num) - except ValueError: - return -1 - - -if six.PY2: - - def _max(iterable, default): - """needed due to python2.7 lacking the default argument for max""" - return reduce(max, iterable, default) - - -else: - _max = max - - -def _force_symlink(root, target, link_to): - """helper to create the current symlink - - it's full of race conditions that are reasonably ok to ignore - for the context of best effort linking to the latest testrun - - the presumption being thatin case of much parallelism - the inaccuracy is going to be acceptable - """ - current_symlink = root.joinpath(target) - try: - current_symlink.unlink() - except OSError: - pass - try: - current_symlink.symlink_to(link_to) - except Exception: - pass - - -def make_numbered_dir(root, prefix): - """create a directory with an increased number as suffix for the given prefix""" - for i in range(10): - # try up to 10 times to create the folder - max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1) - new_number = max_existing + 1 - new_path = root.joinpath("{}{}".format(prefix, new_number)) - try: - new_path.mkdir() - except Exception: - pass - else: - _force_symlink(root, prefix + "current", new_path) - return new_path - else: - raise EnvironmentError( - "could not create numbered dir with prefix " - "{prefix} in {root} after 10 tries".format(prefix=prefix, root=root) - ) - - -def create_cleanup_lock(p): - """crates a lock to prevent premature folder cleanup""" - lock_path = get_lock_path(p) - try: - fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) - except OSError as e: - if e.errno == errno.EEXIST: - six.raise_from( - EnvironmentError("cannot create lockfile in {path}".format(path=p)), e - ) - else: - raise - else: - pid = os.getpid() - spid = str(pid) - if not isinstance(spid, bytes): - spid = spid.encode("ascii") - os.write(fd, spid) - os.close(fd) - if not lock_path.is_file(): - raise EnvironmentError("lock path got renamed after successful creation") - return lock_path - - -def register_cleanup_lock_removal(lock_path, register=atexit.register): - """registers a cleanup function for removing a lock, by default on atexit""" - pid = os.getpid() - - def cleanup_on_exit(lock_path=lock_path, original_pid=pid): - current_pid = os.getpid() - if current_pid != original_pid: - # fork - return - try: - lock_path.unlink() - except (OSError, IOError): - pass - - return register(cleanup_on_exit) - - -def maybe_delete_a_numbered_dir(path): - """removes a numbered directory if its lock can be obtained and it does not seem to be in use""" - lock_path = None - try: - lock_path = create_cleanup_lock(path) - parent = path.parent - - garbage = parent.joinpath("garbage-{}".format(uuid.uuid4())) - path.rename(garbage) - rmtree(garbage, force=True) - except (OSError, EnvironmentError): - # known races: - # * other process did a cleanup at the same time - # * deletable folder was found - # * process cwd (Windows) - return - finally: - # if we created the lock, ensure we remove it even if we failed - # to properly remove the numbered dir - if lock_path is not None: - try: - lock_path.unlink() - except (OSError, IOError): - pass - - -def ensure_deletable(path, consider_lock_dead_if_created_before): - """checks if a lock exists and breaks it if its considered dead""" - if path.is_symlink(): - return False - lock = get_lock_path(path) - if not lock.exists(): - return True - try: - lock_time = lock.stat().st_mtime - except Exception: - return False - else: - if lock_time < consider_lock_dead_if_created_before: - lock.unlink() - return True - else: - return False - - -def try_cleanup(path, consider_lock_dead_if_created_before): - """tries to cleanup a folder if we can ensure it's deletable""" - if ensure_deletable(path, consider_lock_dead_if_created_before): - maybe_delete_a_numbered_dir(path) - - -def cleanup_candidates(root, prefix, keep): - """lists candidates for numbered directories to be removed - follows py.path""" - max_existing = _max(map(parse_num, find_suffixes(root, prefix)), default=-1) - max_delete = max_existing - keep - paths = find_prefixed(root, prefix) - paths, paths2 = itertools.tee(paths) - numbers = map(parse_num, extract_suffixes(paths2, prefix)) - for path, number in zip(paths, numbers): - if number <= max_delete: - yield path - - -def cleanup_numbered_dir(root, prefix, keep, consider_lock_dead_if_created_before): - """cleanup for lock driven numbered directories""" - for path in cleanup_candidates(root, prefix, keep): - try_cleanup(path, consider_lock_dead_if_created_before) - for path in root.glob("garbage-*"): - try_cleanup(path, consider_lock_dead_if_created_before) - - -def make_numbered_dir_with_cleanup(root, prefix, keep, lock_timeout): - """creates a numbered dir with a cleanup lock and removes old ones""" - e = None - for i in range(10): - try: - p = make_numbered_dir(root, prefix) - lock_path = create_cleanup_lock(p) - register_cleanup_lock_removal(lock_path) - except Exception as exc: - e = exc - else: - consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout - cleanup_numbered_dir( - root=root, - prefix=prefix, - keep=keep, - consider_lock_dead_if_created_before=consider_lock_dead_if_created_before, - ) - return p - assert e is not None - raise e - - -def resolve_from_str(input, root): - assert not isinstance(input, Path), "would break on py2" - root = Path(root) - input = expanduser(input) - input = expandvars(input) - if isabs(input): - return Path(input) - else: - return root.joinpath(input) - - -def fnmatch_ex(pattern, path): - """FNMatcher port from py.path.common which works with PurePath() instances. - - The difference between this algorithm and PurePath.match() is that the latter matches "**" glob expressions - for each part of the path, while this algorithm uses the whole path instead. - - For example: - "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" with this algorithm, but not with - PurePath.match(). - - This algorithm was ported to keep backward-compatibility with existing settings which assume paths match according - this logic. - - References: - * https://bugs.python.org/issue29249 - * https://bugs.python.org/issue34731 - """ - path = PurePath(path) - iswin32 = sys.platform.startswith("win") - - if iswin32 and sep not in pattern and posix_sep in pattern: - # Running on Windows, the pattern has no Windows path separators, - # and the pattern has one or more Posix path separators. Replace - # the Posix path separators with the Windows path separator. - pattern = pattern.replace(posix_sep, sep) - - if sep not in pattern: - name = path.name - else: - name = six.text_type(path) - return fnmatch.fnmatch(name, pattern) - - -def parts(s): - parts = s.split(sep) - return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))} diff --git a/venv/lib/python3.8/site-packages/_pytest/pytester.py b/venv/lib/python3.8/site-packages/_pytest/pytester.py deleted file mode 100644 index 8d5a6b14..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/pytester.py +++ /dev/null @@ -1,1385 +0,0 @@ -"""(disabled by default) support for testing pytest and pytest plugins.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import codecs -import gc -import os -import platform -import re -import subprocess -import sys -import time -import traceback -from fnmatch import fnmatch -from weakref import WeakKeyDictionary - -import py -import six - -import pytest -from _pytest._code import Source -from _pytest.assertion.rewrite import AssertionRewritingHook -from _pytest.capture import MultiCapture -from _pytest.capture import SysCapture -from _pytest.compat import safe_str -from _pytest.main import EXIT_INTERRUPTED -from _pytest.main import EXIT_OK -from _pytest.main import Session -from _pytest.pathlib import Path - -IGNORE_PAM = [ # filenames added when obtaining details about the current user - u"/var/lib/sss/mc/passwd" -] - - -def pytest_addoption(parser): - parser.addoption( - "--lsof", - action="store_true", - dest="lsof", - default=False, - help="run FD checks if lsof is available", - ) - - parser.addoption( - "--runpytest", - default="inprocess", - dest="runpytest", - choices=("inprocess", "subprocess"), - help=( - "run pytest sub runs in tests using an 'inprocess' " - "or 'subprocess' (python -m main) method" - ), - ) - - parser.addini( - "pytester_example_dir", help="directory to take the pytester example files from" - ) - - -def pytest_configure(config): - if config.getvalue("lsof"): - checker = LsofFdLeakChecker() - if checker.matching_platform(): - config.pluginmanager.register(checker) - - -def raise_on_kwargs(kwargs): - if kwargs: - raise TypeError("Unexpected arguments: {}".format(", ".join(sorted(kwargs)))) - - -class LsofFdLeakChecker(object): - def get_open_files(self): - out = self._exec_lsof() - open_files = self._parse_lsof_output(out) - return open_files - - def _exec_lsof(self): - pid = os.getpid() - return py.process.cmdexec("lsof -Ffn0 -p %d" % pid) - - def _parse_lsof_output(self, out): - def isopen(line): - return line.startswith("f") and ( - "deleted" not in line - and "mem" not in line - and "txt" not in line - and "cwd" not in line - ) - - open_files = [] - - for line in out.split("\n"): - if isopen(line): - fields = line.split("\0") - fd = fields[0][1:] - filename = fields[1][1:] - if filename in IGNORE_PAM: - continue - if filename.startswith("/"): - open_files.append((fd, filename)) - - return open_files - - def matching_platform(self): - try: - py.process.cmdexec("lsof -v") - except (py.process.cmdexec.Error, UnicodeDecodeError): - # cmdexec may raise UnicodeDecodeError on Windows systems with - # locale other than English: - # https://bitbucket.org/pytest-dev/py/issues/66 - return False - else: - return True - - @pytest.hookimpl(hookwrapper=True, tryfirst=True) - def pytest_runtest_protocol(self, item): - lines1 = self.get_open_files() - yield - if hasattr(sys, "pypy_version_info"): - gc.collect() - lines2 = self.get_open_files() - - new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} - leaked_files = [t for t in lines2 if t[0] in new_fds] - if leaked_files: - error = [] - error.append("***** %s FD leakage detected" % len(leaked_files)) - error.extend([str(f) for f in leaked_files]) - error.append("*** Before:") - error.extend([str(f) for f in lines1]) - error.append("*** After:") - error.extend([str(f) for f in lines2]) - error.append(error[0]) - error.append("*** function %s:%s: %s " % item.location) - error.append("See issue #2366") - item.warn(pytest.PytestWarning("\n".join(error))) - - -# XXX copied from execnet's conftest.py - needs to be merged -winpymap = { - "python2.7": r"C:\Python27\python.exe", - "python3.4": r"C:\Python34\python.exe", - "python3.5": r"C:\Python35\python.exe", - "python3.6": r"C:\Python36\python.exe", -} - - -def getexecutable(name, cache={}): - try: - return cache[name] - except KeyError: - executable = py.path.local.sysfind(name) - if executable: - import subprocess - - popen = subprocess.Popen( - [str(executable), "--version"], - universal_newlines=True, - stderr=subprocess.PIPE, - ) - out, err = popen.communicate() - if name == "jython": - if not err or "2.5" not in err: - executable = None - if "2.5.2" in err: - executable = None # http://bugs.jython.org/issue1790 - elif popen.returncode != 0: - # handle pyenv's 127 - executable = None - cache[name] = executable - return executable - - -@pytest.fixture(params=["python2.7", "python3.4", "pypy", "pypy3"]) -def anypython(request): - name = request.param - executable = getexecutable(name) - if executable is None: - if sys.platform == "win32": - executable = winpymap.get(name, None) - if executable: - executable = py.path.local(executable) - if executable.check(): - return executable - pytest.skip("no suitable %s found" % (name,)) - return executable - - -# used at least by pytest-xdist plugin - - -@pytest.fixture -def _pytest(request): - """Return a helper which offers a gethookrecorder(hook) method which - returns a HookRecorder instance which helps to make assertions about called - hooks. - - """ - return PytestArg(request) - - -class PytestArg(object): - def __init__(self, request): - self.request = request - - def gethookrecorder(self, hook): - hookrecorder = HookRecorder(hook._pm) - self.request.addfinalizer(hookrecorder.finish_recording) - return hookrecorder - - -def get_public_names(values): - """Only return names from iterator values without a leading underscore.""" - return [x for x in values if x[0] != "_"] - - -class ParsedCall(object): - def __init__(self, name, kwargs): - self.__dict__.update(kwargs) - self._name = name - - def __repr__(self): - d = self.__dict__.copy() - del d["_name"] - return "" % (self._name, d) - - -class HookRecorder(object): - """Record all hooks called in a plugin manager. - - This wraps all the hook calls in the plugin manager, recording each call - before propagating the normal calls. - - """ - - def __init__(self, pluginmanager): - self._pluginmanager = pluginmanager - self.calls = [] - - def before(hook_name, hook_impls, kwargs): - self.calls.append(ParsedCall(hook_name, kwargs)) - - def after(outcome, hook_name, hook_impls, kwargs): - pass - - self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) - - def finish_recording(self): - self._undo_wrapping() - - def getcalls(self, names): - if isinstance(names, str): - names = names.split() - return [call for call in self.calls if call._name in names] - - def assert_contains(self, entries): - __tracebackhide__ = True - i = 0 - entries = list(entries) - backlocals = sys._getframe(1).f_locals - while entries: - name, check = entries.pop(0) - for ind, call in enumerate(self.calls[i:]): - if call._name == name: - print("NAMEMATCH", name, call) - if eval(check, backlocals, call.__dict__): - print("CHECKERMATCH", repr(check), "->", call) - else: - print("NOCHECKERMATCH", repr(check), "-", call) - continue - i += ind + 1 - break - print("NONAMEMATCH", name, "with", call) - else: - pytest.fail("could not find %r check %r" % (name, check)) - - def popcall(self, name): - __tracebackhide__ = True - for i, call in enumerate(self.calls): - if call._name == name: - del self.calls[i] - return call - lines = ["could not find call %r, in:" % (name,)] - lines.extend([" %s" % x for x in self.calls]) - pytest.fail("\n".join(lines)) - - def getcall(self, name): - values = self.getcalls(name) - assert len(values) == 1, (name, values) - return values[0] - - # functionality for test reports - - def getreports(self, names="pytest_runtest_logreport pytest_collectreport"): - return [x.report for x in self.getcalls(names)] - - def matchreport( - self, - inamepart="", - names="pytest_runtest_logreport pytest_collectreport", - when=None, - ): - """return a testreport whose dotted import path matches""" - values = [] - for rep in self.getreports(names=names): - try: - if not when and rep.when != "call" and rep.passed: - # setup/teardown passing reports - let's ignore those - continue - except AttributeError: - pass - if when and getattr(rep, "when", None) != when: - continue - if not inamepart or inamepart in rep.nodeid.split("::"): - values.append(rep) - if not values: - raise ValueError( - "could not find test report matching %r: " - "no test reports at all!" % (inamepart,) - ) - if len(values) > 1: - raise ValueError( - "found 2 or more testreports matching %r: %s" % (inamepart, values) - ) - return values[0] - - def getfailures(self, names="pytest_runtest_logreport pytest_collectreport"): - return [rep for rep in self.getreports(names) if rep.failed] - - def getfailedcollections(self): - return self.getfailures("pytest_collectreport") - - def listoutcomes(self): - passed = [] - skipped = [] - failed = [] - for rep in self.getreports("pytest_collectreport pytest_runtest_logreport"): - if rep.passed: - if getattr(rep, "when", None) == "call": - passed.append(rep) - elif rep.skipped: - skipped.append(rep) - elif rep.failed: - failed.append(rep) - return passed, skipped, failed - - def countoutcomes(self): - return [len(x) for x in self.listoutcomes()] - - def assertoutcome(self, passed=0, skipped=0, failed=0): - realpassed, realskipped, realfailed = self.listoutcomes() - assert passed == len(realpassed) - assert skipped == len(realskipped) - assert failed == len(realfailed) - - def clear(self): - self.calls[:] = [] - - -@pytest.fixture -def linecomp(request): - return LineComp() - - -@pytest.fixture(name="LineMatcher") -def LineMatcher_fixture(request): - return LineMatcher - - -@pytest.fixture -def testdir(request, tmpdir_factory): - return Testdir(request, tmpdir_factory) - - -rex_outcome = re.compile(r"(\d+) ([\w-]+)") - - -class RunResult(object): - """The result of running a command. - - Attributes: - - :ret: the return value - :outlines: list of lines captured from stdout - :errlines: list of lines captures from stderr - :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to - reconstruct stdout or the commonly used ``stdout.fnmatch_lines()`` - method - :stderr: :py:class:`LineMatcher` of stderr - :duration: duration in seconds - - """ - - def __init__(self, ret, outlines, errlines, duration): - self.ret = ret - self.outlines = outlines - self.errlines = errlines - self.stdout = LineMatcher(outlines) - self.stderr = LineMatcher(errlines) - self.duration = duration - - def parseoutcomes(self): - """Return a dictionary of outcomestring->num from parsing the terminal - output that the test process produced. - - """ - for line in reversed(self.outlines): - if "seconds" in line: - outcomes = rex_outcome.findall(line) - if outcomes: - d = {} - for num, cat in outcomes: - d[cat] = int(num) - return d - raise ValueError("Pytest terminal report not found") - - def assert_outcomes( - self, passed=0, skipped=0, failed=0, error=0, xpassed=0, xfailed=0 - ): - """Assert that the specified outcomes appear with the respective - numbers (0 means it didn't occur) in the text output from a test run. - - """ - d = self.parseoutcomes() - obtained = { - "passed": d.get("passed", 0), - "skipped": d.get("skipped", 0), - "failed": d.get("failed", 0), - "error": d.get("error", 0), - "xpassed": d.get("xpassed", 0), - "xfailed": d.get("xfailed", 0), - } - expected = { - "passed": passed, - "skipped": skipped, - "failed": failed, - "error": error, - "xpassed": xpassed, - "xfailed": xfailed, - } - assert obtained == expected - - -class CwdSnapshot(object): - def __init__(self): - self.__saved = os.getcwd() - - def restore(self): - os.chdir(self.__saved) - - -class SysModulesSnapshot(object): - def __init__(self, preserve=None): - self.__preserve = preserve - self.__saved = dict(sys.modules) - - def restore(self): - if self.__preserve: - self.__saved.update( - (k, m) for k, m in sys.modules.items() if self.__preserve(k) - ) - sys.modules.clear() - sys.modules.update(self.__saved) - - -class SysPathsSnapshot(object): - def __init__(self): - self.__saved = list(sys.path), list(sys.meta_path) - - def restore(self): - sys.path[:], sys.meta_path[:] = self.__saved - - -class Testdir(object): - """Temporary test directory with tools to test/run pytest itself. - - This is based on the ``tmpdir`` fixture but provides a number of methods - which aid with testing pytest itself. Unless :py:meth:`chdir` is used all - methods will use :py:attr:`tmpdir` as their current working directory. - - Attributes: - - :tmpdir: The :py:class:`py.path.local` instance of the temporary directory. - - :plugins: A list of plugins to use with :py:meth:`parseconfig` and - :py:meth:`runpytest`. Initially this is an empty list but plugins can - be added to the list. The type of items to add to the list depends on - the method using them so refer to them for details. - - """ - - class TimeoutExpired(Exception): - pass - - def __init__(self, request, tmpdir_factory): - self.request = request - self._mod_collections = WeakKeyDictionary() - name = request.function.__name__ - self.tmpdir = tmpdir_factory.mktemp(name, numbered=True) - self.test_tmproot = tmpdir_factory.mktemp("tmp-" + name, numbered=True) - os.environ["PYTEST_DEBUG_TEMPROOT"] = str(self.test_tmproot) - self.plugins = [] - self._cwd_snapshot = CwdSnapshot() - self._sys_path_snapshot = SysPathsSnapshot() - self._sys_modules_snapshot = self.__take_sys_modules_snapshot() - self.chdir() - self.request.addfinalizer(self.finalize) - method = self.request.config.getoption("--runpytest") - if method == "inprocess": - self._runpytest_method = self.runpytest_inprocess - elif method == "subprocess": - self._runpytest_method = self.runpytest_subprocess - - def __repr__(self): - return "" % (self.tmpdir,) - - def finalize(self): - """Clean up global state artifacts. - - Some methods modify the global interpreter state and this tries to - clean this up. It does not remove the temporary directory however so - it can be looked at after the test run has finished. - - """ - self._sys_modules_snapshot.restore() - self._sys_path_snapshot.restore() - self._cwd_snapshot.restore() - os.environ.pop("PYTEST_DEBUG_TEMPROOT", None) - - def __take_sys_modules_snapshot(self): - # some zope modules used by twisted-related tests keep internal state - # and can't be deleted; we had some trouble in the past with - # `zope.interface` for example - def preserve_module(name): - return name.startswith("zope") - - return SysModulesSnapshot(preserve=preserve_module) - - def make_hook_recorder(self, pluginmanager): - """Create a new :py:class:`HookRecorder` for a PluginManager.""" - pluginmanager.reprec = reprec = HookRecorder(pluginmanager) - self.request.addfinalizer(reprec.finish_recording) - return reprec - - def chdir(self): - """Cd into the temporary directory. - - This is done automatically upon instantiation. - - """ - self.tmpdir.chdir() - - def _makefile(self, ext, args, kwargs, encoding="utf-8"): - items = list(kwargs.items()) - - def to_text(s): - return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s) - - if args: - source = u"\n".join(to_text(x) for x in args) - basename = self.request.function.__name__ - items.insert(0, (basename, source)) - - ret = None - for basename, value in items: - p = self.tmpdir.join(basename).new(ext=ext) - p.dirpath().ensure_dir() - source = Source(value) - source = u"\n".join(to_text(line) for line in source.lines) - p.write(source.strip().encode(encoding), "wb") - if ret is None: - ret = p - return ret - - def makefile(self, ext, *args, **kwargs): - r"""Create new file(s) in the testdir. - - :param str ext: The extension the file(s) should use, including the dot, e.g. `.py`. - :param list[str] args: All args will be treated as strings and joined using newlines. - The result will be written as contents to the file. The name of the - file will be based on the test function requesting this fixture. - :param kwargs: Each keyword is the name of a file, while the value of it will - be written as contents of the file. - - Examples: - - .. code-block:: python - - testdir.makefile(".txt", "line1", "line2") - - testdir.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") - - """ - return self._makefile(ext, args, kwargs) - - def makeconftest(self, source): - """Write a contest.py file with 'source' as contents.""" - return self.makepyfile(conftest=source) - - def makeini(self, source): - """Write a tox.ini file with 'source' as contents.""" - return self.makefile(".ini", tox=source) - - def getinicfg(self, source): - """Return the pytest section from the tox.ini config file.""" - p = self.makeini(source) - return py.iniconfig.IniConfig(p)["pytest"] - - def makepyfile(self, *args, **kwargs): - """Shortcut for .makefile() with a .py extension.""" - return self._makefile(".py", args, kwargs) - - def maketxtfile(self, *args, **kwargs): - """Shortcut for .makefile() with a .txt extension.""" - return self._makefile(".txt", args, kwargs) - - def syspathinsert(self, path=None): - """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`. - - This is undone automatically when this object dies at the end of each - test. - - """ - if path is None: - path = self.tmpdir - sys.path.insert(0, str(path)) - # a call to syspathinsert() usually means that the caller wants to - # import some dynamically created files, thus with python3 we - # invalidate its import caches - self._possibly_invalidate_import_caches() - - def _possibly_invalidate_import_caches(self): - # invalidate caches if we can (py33 and above) - try: - import importlib - except ImportError: - pass - else: - if hasattr(importlib, "invalidate_caches"): - importlib.invalidate_caches() - - def mkdir(self, name): - """Create a new (sub)directory.""" - return self.tmpdir.mkdir(name) - - def mkpydir(self, name): - """Create a new python package. - - This creates a (sub)directory with an empty ``__init__.py`` file so it - gets recognised as a python package. - - """ - p = self.mkdir(name) - p.ensure("__init__.py") - return p - - def copy_example(self, name=None): - import warnings - from _pytest.warning_types import PYTESTER_COPY_EXAMPLE - - warnings.warn(PYTESTER_COPY_EXAMPLE, stacklevel=2) - example_dir = self.request.config.getini("pytester_example_dir") - if example_dir is None: - raise ValueError("pytester_example_dir is unset, can't copy examples") - example_dir = self.request.config.rootdir.join(example_dir) - - for extra_element in self.request.node.iter_markers("pytester_example_path"): - assert extra_element.args - example_dir = example_dir.join(*extra_element.args) - - if name is None: - func_name = self.request.function.__name__ - maybe_dir = example_dir / func_name - maybe_file = example_dir / (func_name + ".py") - - if maybe_dir.isdir(): - example_path = maybe_dir - elif maybe_file.isfile(): - example_path = maybe_file - else: - raise LookupError( - "{} cant be found as module or package in {}".format( - func_name, example_dir.bestrelpath(self.request.confg.rootdir) - ) - ) - else: - example_path = example_dir.join(name) - - if example_path.isdir() and not example_path.join("__init__.py").isfile(): - example_path.copy(self.tmpdir) - return self.tmpdir - elif example_path.isfile(): - result = self.tmpdir.join(example_path.basename) - example_path.copy(result) - return result - else: - raise LookupError( - 'example "{}" is not found as a file or directory'.format(example_path) - ) - - Session = Session - - def getnode(self, config, arg): - """Return the collection node of a file. - - :param config: :py:class:`_pytest.config.Config` instance, see - :py:meth:`parseconfig` and :py:meth:`parseconfigure` to create the - configuration - - :param arg: a :py:class:`py.path.local` instance of the file - - """ - session = Session(config) - assert "::" not in str(arg) - p = py.path.local(arg) - config.hook.pytest_sessionstart(session=session) - res = session.perform_collect([str(p)], genitems=False)[0] - config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) - return res - - def getpathnode(self, path): - """Return the collection node of a file. - - This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to - create the (configured) pytest Config instance. - - :param path: a :py:class:`py.path.local` instance of the file - - """ - config = self.parseconfigure(path) - session = Session(config) - x = session.fspath.bestrelpath(path) - config.hook.pytest_sessionstart(session=session) - res = session.perform_collect([x], genitems=False)[0] - config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK) - return res - - def genitems(self, colitems): - """Generate all test items from a collection node. - - This recurses into the collection node and returns a list of all the - test items contained within. - - """ - session = colitems[0].session - result = [] - for colitem in colitems: - result.extend(session.genitems(colitem)) - return result - - def runitem(self, source): - """Run the "test_func" Item. - - The calling test instance (class containing the test method) must - provide a ``.getrunner()`` method which should return a runner which - can run the test protocol for a single item, e.g. - :py:func:`_pytest.runner.runtestprotocol`. - - """ - # used from runner functional tests - item = self.getitem(source) - # the test class where we are called from wants to provide the runner - testclassinstance = self.request.instance - runner = testclassinstance.getrunner() - return runner(item) - - def inline_runsource(self, source, *cmdlineargs): - """Run a test module in process using ``pytest.main()``. - - This run writes "source" into a temporary file and runs - ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance - for the result. - - :param source: the source code of the test module - - :param cmdlineargs: any extra command line arguments to use - - :return: :py:class:`HookRecorder` instance of the result - - """ - p = self.makepyfile(source) - values = list(cmdlineargs) + [p] - return self.inline_run(*values) - - def inline_genitems(self, *args): - """Run ``pytest.main(['--collectonly'])`` in-process. - - Runs the :py:func:`pytest.main` function to run all of pytest inside - the test process itself like :py:meth:`inline_run`, but returns a - tuple of the collected items and a :py:class:`HookRecorder` instance. - - """ - rec = self.inline_run("--collect-only", *args) - items = [x.item for x in rec.getcalls("pytest_itemcollected")] - return items, rec - - def inline_run(self, *args, **kwargs): - """Run ``pytest.main()`` in-process, returning a HookRecorder. - - Runs the :py:func:`pytest.main` function to run all of pytest inside - the test process itself. This means it can return a - :py:class:`HookRecorder` instance which gives more detailed results - from that run than can be done by matching stdout/stderr from - :py:meth:`runpytest`. - - :param args: command line arguments to pass to :py:func:`pytest.main` - - :param plugin: (keyword-only) extra plugin instances the - ``pytest.main()`` instance should use - - :return: a :py:class:`HookRecorder` instance - - """ - finalizers = [] - try: - # When running pytest inline any plugins active in the main test - # process are already imported. So this disables the warning which - # will trigger to say they can no longer be rewritten, which is - # fine as they have already been rewritten. - orig_warn = AssertionRewritingHook._warn_already_imported - - def revert_warn_already_imported(): - AssertionRewritingHook._warn_already_imported = orig_warn - - finalizers.append(revert_warn_already_imported) - AssertionRewritingHook._warn_already_imported = lambda *a: None - - # Any sys.module or sys.path changes done while running pytest - # inline should be reverted after the test run completes to avoid - # clashing with later inline tests run within the same pytest test, - # e.g. just because they use matching test module names. - finalizers.append(self.__take_sys_modules_snapshot().restore) - finalizers.append(SysPathsSnapshot().restore) - - # Important note: - # - our tests should not leave any other references/registrations - # laying around other than possibly loaded test modules - # referenced from sys.modules, as nothing will clean those up - # automatically - - rec = [] - - class Collect(object): - def pytest_configure(x, config): - rec.append(self.make_hook_recorder(config.pluginmanager)) - - plugins = kwargs.get("plugins") or [] - plugins.append(Collect()) - ret = pytest.main(list(args), plugins=plugins) - if len(rec) == 1: - reprec = rec.pop() - else: - - class reprec(object): - pass - - reprec.ret = ret - - # typically we reraise keyboard interrupts from the child run - # because it's our user requesting interruption of the testing - if ret == EXIT_INTERRUPTED and not kwargs.get("no_reraise_ctrlc"): - calls = reprec.getcalls("pytest_keyboard_interrupt") - if calls and calls[-1].excinfo.type == KeyboardInterrupt: - raise KeyboardInterrupt() - return reprec - finally: - for finalizer in finalizers: - finalizer() - - def runpytest_inprocess(self, *args, **kwargs): - """Return result of running pytest in-process, providing a similar - interface to what self.runpytest() provides. - - """ - if kwargs.get("syspathinsert"): - self.syspathinsert() - now = time.time() - capture = MultiCapture(Capture=SysCapture) - capture.start_capturing() - try: - try: - reprec = self.inline_run(*args, **kwargs) - except SystemExit as e: - - class reprec(object): - ret = e.args[0] - - except Exception: - traceback.print_exc() - - class reprec(object): - ret = 3 - - finally: - out, err = capture.readouterr() - capture.stop_capturing() - sys.stdout.write(out) - sys.stderr.write(err) - - res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), time.time() - now) - res.reprec = reprec - return res - - def runpytest(self, *args, **kwargs): - """Run pytest inline or in a subprocess, depending on the command line - option "--runpytest" and return a :py:class:`RunResult`. - - """ - args = self._ensure_basetemp(args) - return self._runpytest_method(*args, **kwargs) - - def _ensure_basetemp(self, args): - args = list(args) - for x in args: - if safe_str(x).startswith("--basetemp"): - break - else: - args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp")) - return args - - def parseconfig(self, *args): - """Return a new pytest Config instance from given commandline args. - - This invokes the pytest bootstrapping code in _pytest.config to create - a new :py:class:`_pytest.core.PluginManager` and call the - pytest_cmdline_parse hook to create a new - :py:class:`_pytest.config.Config` instance. - - If :py:attr:`plugins` has been populated they should be plugin modules - to be registered with the PluginManager. - - """ - args = self._ensure_basetemp(args) - - import _pytest.config - - config = _pytest.config._prepareconfig(args, self.plugins) - # we don't know what the test will do with this half-setup config - # object and thus we make sure it gets unconfigured properly in any - # case (otherwise capturing could still be active, for example) - self.request.addfinalizer(config._ensure_unconfigure) - return config - - def parseconfigure(self, *args): - """Return a new pytest configured Config instance. - - This returns a new :py:class:`_pytest.config.Config` instance like - :py:meth:`parseconfig`, but also calls the pytest_configure hook. - - """ - config = self.parseconfig(*args) - config._do_configure() - self.request.addfinalizer(config._ensure_unconfigure) - return config - - def getitem(self, source, funcname="test_func"): - """Return the test item for a test function. - - This writes the source to a python file and runs pytest's collection on - the resulting module, returning the test item for the requested - function name. - - :param source: the module source - - :param funcname: the name of the test function for which to return a - test item - - """ - items = self.getitems(source) - for item in items: - if item.name == funcname: - return item - assert 0, "%r item not found in module:\n%s\nitems: %s" % ( - funcname, - source, - items, - ) - - def getitems(self, source): - """Return all test items collected from the module. - - This writes the source to a python file and runs pytest's collection on - the resulting module, returning all test items contained within. - - """ - modcol = self.getmodulecol(source) - return self.genitems([modcol]) - - def getmodulecol(self, source, configargs=(), withinit=False): - """Return the module collection node for ``source``. - - This writes ``source`` to a file using :py:meth:`makepyfile` and then - runs the pytest collection on it, returning the collection node for the - test module. - - :param source: the source code of the module to collect - - :param configargs: any extra arguments to pass to - :py:meth:`parseconfigure` - - :param withinit: whether to also write an ``__init__.py`` file to the - same directory to ensure it is a package - - """ - if isinstance(source, Path): - path = self.tmpdir.join(str(source)) - assert not withinit, "not supported for paths" - else: - kw = {self.request.function.__name__: Source(source).strip()} - path = self.makepyfile(**kw) - if withinit: - self.makepyfile(__init__="#") - self.config = config = self.parseconfigure(path, *configargs) - return self.getnode(config, path) - - def collect_by_name(self, modcol, name): - """Return the collection node for name from the module collection. - - This will search a module collection node for a collection node - matching the given name. - - :param modcol: a module collection node; see :py:meth:`getmodulecol` - - :param name: the name of the node to return - - """ - if modcol not in self._mod_collections: - self._mod_collections[modcol] = list(modcol.collect()) - for colitem in self._mod_collections[modcol]: - if colitem.name == name: - return colitem - - def popen(self, cmdargs, stdout, stderr, **kw): - """Invoke subprocess.Popen. - - This calls subprocess.Popen making sure the current working directory - is in the PYTHONPATH. - - You probably want to use :py:meth:`run` instead. - - """ - env = os.environ.copy() - env["PYTHONPATH"] = os.pathsep.join( - filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) - ) - kw["env"] = env - - popen = subprocess.Popen( - cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw - ) - popen.stdin.close() - - return popen - - def run(self, *cmdargs, **kwargs): - """Run a command with arguments. - - Run a process using subprocess.Popen saving the stdout and stderr. - - :param args: the sequence of arguments to pass to `subprocess.Popen()` - :param timeout: the period in seconds after which to timeout and raise - :py:class:`Testdir.TimeoutExpired` - - Returns a :py:class:`RunResult`. - - """ - __tracebackhide__ = True - - timeout = kwargs.pop("timeout", None) - raise_on_kwargs(kwargs) - - cmdargs = [ - str(arg) if isinstance(arg, py.path.local) else arg for arg in cmdargs - ] - p1 = self.tmpdir.join("stdout") - p2 = self.tmpdir.join("stderr") - print("running:", *cmdargs) - print(" in:", py.path.local()) - f1 = codecs.open(str(p1), "w", encoding="utf8") - f2 = codecs.open(str(p2), "w", encoding="utf8") - try: - now = time.time() - popen = self.popen( - cmdargs, stdout=f1, stderr=f2, close_fds=(sys.platform != "win32") - ) - - def handle_timeout(): - __tracebackhide__ = True - - timeout_message = ( - "{seconds} second timeout expired running:" - " {command}".format(seconds=timeout, command=cmdargs) - ) - - popen.kill() - popen.wait() - raise self.TimeoutExpired(timeout_message) - - if timeout is None: - ret = popen.wait() - elif six.PY3: - try: - ret = popen.wait(timeout) - except subprocess.TimeoutExpired: - handle_timeout() - else: - end = time.time() + timeout - - resolution = min(0.1, timeout / 10) - - while True: - ret = popen.poll() - if ret is not None: - break - - if time.time() > end: - handle_timeout() - - time.sleep(resolution) - finally: - f1.close() - f2.close() - f1 = codecs.open(str(p1), "r", encoding="utf8") - f2 = codecs.open(str(p2), "r", encoding="utf8") - try: - out = f1.read().splitlines() - err = f2.read().splitlines() - finally: - f1.close() - f2.close() - self._dump_lines(out, sys.stdout) - self._dump_lines(err, sys.stderr) - return RunResult(ret, out, err, time.time() - now) - - def _dump_lines(self, lines, fp): - try: - for line in lines: - print(line, file=fp) - except UnicodeEncodeError: - print("couldn't print to %s because of encoding" % (fp,)) - - def _getpytestargs(self): - return sys.executable, "-mpytest" - - def runpython(self, script): - """Run a python script using sys.executable as interpreter. - - Returns a :py:class:`RunResult`. - - """ - return self.run(sys.executable, script) - - def runpython_c(self, command): - """Run python -c "command", return a :py:class:`RunResult`.""" - return self.run(sys.executable, "-c", command) - - def runpytest_subprocess(self, *args, **kwargs): - """Run pytest as a subprocess with given arguments. - - Any plugins added to the :py:attr:`plugins` list will be added using the - ``-p`` command line option. Additionally ``--basetemp`` is used to put - any temporary files and directories in a numbered directory prefixed - with "runpytest-" to not conflict with the normal numbered pytest - location for temporary files and directories. - - :param args: the sequence of arguments to pass to the pytest subprocess - :param timeout: the period in seconds after which to timeout and raise - :py:class:`Testdir.TimeoutExpired` - - Returns a :py:class:`RunResult`. - - """ - __tracebackhide__ = True - - p = py.path.local.make_numbered_dir( - prefix="runpytest-", keep=None, rootdir=self.tmpdir - ) - args = ("--basetemp=%s" % p,) + args - plugins = [x for x in self.plugins if isinstance(x, str)] - if plugins: - args = ("-p", plugins[0]) + args - args = self._getpytestargs() + args - return self.run(*args, timeout=kwargs.get("timeout")) - - def spawn_pytest(self, string, expect_timeout=10.0): - """Run pytest using pexpect. - - This makes sure to use the right pytest and sets up the temporary - directory locations. - - The pexpect child is returned. - - """ - basetemp = self.tmpdir.mkdir("temp-pexpect") - invoke = " ".join(map(str, self._getpytestargs())) - cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) - return self.spawn(cmd, expect_timeout=expect_timeout) - - def spawn(self, cmd, expect_timeout=10.0): - """Run a command using pexpect. - - The pexpect child is returned. - - """ - pexpect = pytest.importorskip("pexpect", "3.0") - if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): - pytest.skip("pypy-64 bit not supported") - if sys.platform.startswith("freebsd"): - pytest.xfail("pexpect does not work reliably on freebsd") - logfile = self.tmpdir.join("spawn.out").open("wb") - child = pexpect.spawn(cmd, logfile=logfile) - self.request.addfinalizer(logfile.close) - child.timeout = expect_timeout - return child - - -def getdecoded(out): - try: - return out.decode("utf-8") - except UnicodeDecodeError: - return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( - py.io.saferepr(out), - ) - - -class LineComp(object): - def __init__(self): - self.stringio = py.io.TextIO() - - def assert_contains_lines(self, lines2): - """Assert that lines2 are contained (linearly) in lines1. - - Return a list of extralines found. - - """ - __tracebackhide__ = True - val = self.stringio.getvalue() - self.stringio.truncate(0) - self.stringio.seek(0) - lines1 = val.split("\n") - return LineMatcher(lines1).fnmatch_lines(lines2) - - -class LineMatcher(object): - """Flexible matching of text. - - This is a convenience class to test large texts like the output of - commands. - - The constructor takes a list of lines without their trailing newlines, i.e. - ``text.splitlines()``. - - """ - - def __init__(self, lines): - self.lines = lines - self._log_output = [] - - def str(self): - """Return the entire original text.""" - return "\n".join(self.lines) - - def _getlines(self, lines2): - if isinstance(lines2, str): - lines2 = Source(lines2) - if isinstance(lines2, Source): - lines2 = lines2.strip().lines - return lines2 - - def fnmatch_lines_random(self, lines2): - """Check lines exist in the output using in any order. - - Lines are checked using ``fnmatch.fnmatch``. The argument is a list of - lines which have to occur in the output, in any order. - - """ - self._match_lines_random(lines2, fnmatch) - - def re_match_lines_random(self, lines2): - """Check lines exist in the output using ``re.match``, in any order. - - The argument is a list of lines which have to occur in the output, in - any order. - - """ - self._match_lines_random(lines2, lambda name, pat: re.match(pat, name)) - - def _match_lines_random(self, lines2, match_func): - """Check lines exist in the output. - - The argument is a list of lines which have to occur in the output, in - any order. Each line can contain glob whildcards. - - """ - lines2 = self._getlines(lines2) - for line in lines2: - for x in self.lines: - if line == x or match_func(x, line): - self._log("matched: ", repr(line)) - break - else: - self._log("line %r not found in output" % line) - raise ValueError(self._log_text) - - def get_lines_after(self, fnline): - """Return all lines following the given line in the text. - - The given line can contain glob wildcards. - - """ - for i, line in enumerate(self.lines): - if fnline == line or fnmatch(line, fnline): - return self.lines[i + 1 :] - raise ValueError("line %r not found in output" % fnline) - - def _log(self, *args): - self._log_output.append(" ".join((str(x) for x in args))) - - @property - def _log_text(self): - return "\n".join(self._log_output) - - def fnmatch_lines(self, lines2): - """Search captured text for matching lines using ``fnmatch.fnmatch``. - - The argument is a list of lines which have to match and can use glob - wildcards. If they do not match a pytest.fail() is called. The - matches and non-matches are also printed on stdout. - - """ - __tracebackhide__ = True - self._match_lines(lines2, fnmatch, "fnmatch") - - def re_match_lines(self, lines2): - """Search captured text for matching lines using ``re.match``. - - The argument is a list of lines which have to match using ``re.match``. - If they do not match a pytest.fail() is called. - - The matches and non-matches are also printed on stdout. - - """ - __tracebackhide__ = True - self._match_lines(lines2, lambda name, pat: re.match(pat, name), "re.match") - - def _match_lines(self, lines2, match_func, match_nickname): - """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. - - :param list[str] lines2: list of string patterns to match. The actual - format depends on ``match_func`` - :param match_func: a callable ``match_func(line, pattern)`` where line - is the captured line from stdout/stderr and pattern is the matching - pattern - :param str match_nickname: the nickname for the match function that - will be logged to stdout when a match occurs - - """ - lines2 = self._getlines(lines2) - lines1 = self.lines[:] - nextline = None - extralines = [] - __tracebackhide__ = True - for line in lines2: - nomatchprinted = False - while lines1: - nextline = lines1.pop(0) - if line == nextline: - self._log("exact match:", repr(line)) - break - elif match_func(nextline, line): - self._log("%s:" % match_nickname, repr(line)) - self._log(" with:", repr(nextline)) - break - else: - if not nomatchprinted: - self._log("nomatch:", repr(line)) - nomatchprinted = True - self._log(" and:", repr(nextline)) - extralines.append(nextline) - else: - self._log("remains unmatched: %r" % (line,)) - pytest.fail(self._log_text) diff --git a/venv/lib/python3.8/site-packages/_pytest/python.py b/venv/lib/python3.8/site-packages/_pytest/python.py deleted file mode 100644 index d360e2c8..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/python.py +++ /dev/null @@ -1,1454 +0,0 @@ -""" Python test discovery, setup and run of test functions. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import collections -import fnmatch -import inspect -import os -import sys -import warnings -from textwrap import dedent - -import py -import six - -import _pytest -from _pytest import deprecated -from _pytest import fixtures -from _pytest import nodes -from _pytest._code import filter_traceback -from _pytest.compat import ascii_escaped -from _pytest.compat import enum -from _pytest.compat import get_default_arg_names -from _pytest.compat import get_real_func -from _pytest.compat import getfslineno -from _pytest.compat import getimfunc -from _pytest.compat import getlocation -from _pytest.compat import is_generator -from _pytest.compat import isclass -from _pytest.compat import isfunction -from _pytest.compat import NoneType -from _pytest.compat import NOTSET -from _pytest.compat import REGEX_TYPE -from _pytest.compat import safe_getattr -from _pytest.compat import safe_isclass -from _pytest.compat import safe_str -from _pytest.compat import STRING_TYPES -from _pytest.config import hookimpl -from _pytest.main import FSHookProxy -from _pytest.mark.structures import get_unpacked_marks -from _pytest.mark.structures import normalize_mark_list -from _pytest.mark.structures import transfer_markers -from _pytest.outcomes import fail -from _pytest.pathlib import parts -from _pytest.warning_types import PytestWarning -from _pytest.warning_types import RemovedInPytest4Warning - - -def pyobj_property(name): - def get(self): - node = self.getparent(getattr(__import__("pytest"), name)) - if node is not None: - return node.obj - - doc = "python %s object this node was collected from (can be None)." % ( - name.lower(), - ) - return property(get, None, None, doc) - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group.addoption( - "--fixtures", - "--funcargs", - action="store_true", - dest="showfixtures", - default=False, - help="show available fixtures, sorted by plugin appearance " - "(fixtures with leading '_' are only shown with '-v')", - ) - group.addoption( - "--fixtures-per-test", - action="store_true", - dest="show_fixtures_per_test", - default=False, - help="show fixtures per test", - ) - parser.addini( - "usefixtures", - type="args", - default=[], - help="list of default fixtures to be used with this project", - ) - parser.addini( - "python_files", - type="args", - default=["test_*.py", "*_test.py"], - help="glob-style file patterns for Python test module discovery", - ) - parser.addini( - "python_classes", - type="args", - default=["Test"], - help="prefixes or glob names for Python test class discovery", - ) - parser.addini( - "python_functions", - type="args", - default=["test"], - help="prefixes or glob names for Python test function and method discovery", - ) - - group.addoption( - "--import-mode", - default="prepend", - choices=["prepend", "append"], - dest="importmode", - help="prepend/append to sys.path when importing test modules, " - "default is to prepend.", - ) - - -def pytest_cmdline_main(config): - if config.option.showfixtures: - showfixtures(config) - return 0 - if config.option.show_fixtures_per_test: - show_fixtures_per_test(config) - return 0 - - -def pytest_generate_tests(metafunc): - # those alternative spellings are common - raise a specific error to alert - # the user - alt_spellings = ["parameterize", "parametrise", "parameterise"] - for attr in alt_spellings: - if hasattr(metafunc.function, attr): - msg = "{0} has '{1}' mark, spelling should be 'parametrize'" - fail(msg.format(metafunc.function.__name__, attr), pytrace=False) - for marker in metafunc.definition.iter_markers(name="parametrize"): - metafunc.parametrize(*marker.args, **marker.kwargs) - - -def pytest_configure(config): - config.addinivalue_line( - "markers", - "parametrize(argnames, argvalues): call a test function multiple " - "times passing in different arguments in turn. argvalues generally " - "needs to be a list of values if argnames specifies only one name " - "or a list of tuples of values if argnames specifies multiple names. " - "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " - "decorated test function, one with arg1=1 and another with arg1=2." - "see https://docs.pytest.org/en/latest/parametrize.html for more info " - "and examples.", - ) - config.addinivalue_line( - "markers", - "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " - "all of the specified fixtures. see " - "https://docs.pytest.org/en/latest/fixture.html#usefixtures ", - ) - - -@hookimpl(trylast=True) -def pytest_pyfunc_call(pyfuncitem): - testfunction = pyfuncitem.obj - if pyfuncitem._isyieldedfunction(): - testfunction(*pyfuncitem._args) - else: - funcargs = pyfuncitem.funcargs - testargs = {} - for arg in pyfuncitem._fixtureinfo.argnames: - testargs[arg] = funcargs[arg] - testfunction(**testargs) - return True - - -def pytest_collect_file(path, parent): - ext = path.ext - if ext == ".py": - if not parent.session.isinitpath(path): - if not path_matches_patterns( - path, parent.config.getini("python_files") + ["__init__.py"] - ): - return - ihook = parent.session.gethookproxy(path) - return ihook.pytest_pycollect_makemodule(path=path, parent=parent) - - -def path_matches_patterns(path, patterns): - """Returns True if the given py.path.local matches one of the patterns in the list of globs given""" - return any(path.fnmatch(pattern) for pattern in patterns) - - -def pytest_pycollect_makemodule(path, parent): - if path.basename == "__init__.py": - return Package(path, parent) - return Module(path, parent) - - -@hookimpl(hookwrapper=True) -def pytest_pycollect_makeitem(collector, name, obj): - outcome = yield - res = outcome.get_result() - if res is not None: - return - # nothing was collected elsewhere, let's do it here - if safe_isclass(obj): - if collector.istestclass(obj, name): - Class = collector._getcustomclass("Class") - outcome.force_result(Class(name, parent=collector)) - elif collector.istestfunction(obj, name): - # mock seems to store unbound methods (issue473), normalize it - obj = getattr(obj, "__func__", obj) - # We need to try and unwrap the function if it's a functools.partial - # or a funtools.wrapped. - # We musn't if it's been wrapped with mock.patch (python 2 only) - if not (isfunction(obj) or isfunction(get_real_func(obj))): - filename, lineno = getfslineno(obj) - warnings.warn_explicit( - message=PytestWarning( - "cannot collect %r because it is not a function." % name - ), - category=None, - filename=str(filename), - lineno=lineno + 1, - ) - elif getattr(obj, "__test__", True): - if is_generator(obj): - res = Generator(name, parent=collector) - else: - res = list(collector._genfunctions(name, obj)) - outcome.force_result(res) - - -def pytest_make_parametrize_id(config, val, argname=None): - return None - - -class PyobjContext(object): - module = pyobj_property("Module") - cls = pyobj_property("Class") - instance = pyobj_property("Instance") - - -class PyobjMixin(PyobjContext): - _ALLOW_MARKERS = True - - def __init__(self, *k, **kw): - super(PyobjMixin, self).__init__(*k, **kw) - - def obj(): - def fget(self): - obj = getattr(self, "_obj", None) - if obj is None: - self._obj = obj = self._getobj() - # XXX evil hack - # used to avoid Instance collector marker duplication - if self._ALLOW_MARKERS: - self.own_markers.extend(get_unpacked_marks(self.obj)) - return obj - - def fset(self, value): - self._obj = value - - return property(fget, fset, None, "underlying python object") - - obj = obj() - - def _getobj(self): - return getattr(self.parent.obj, self.name) - - def getmodpath(self, stopatmodule=True, includemodule=False): - """ return python path relative to the containing module. """ - chain = self.listchain() - chain.reverse() - parts = [] - for node in chain: - if isinstance(node, Instance): - continue - name = node.name - if isinstance(node, Module): - name = os.path.splitext(name)[0] - if stopatmodule: - if includemodule: - parts.append(name) - break - parts.append(name) - parts.reverse() - s = ".".join(parts) - return s.replace(".[", "[") - - def _getfslineno(self): - return getfslineno(self.obj) - - def reportinfo(self): - # XXX caching? - obj = self.obj - compat_co_firstlineno = getattr(obj, "compat_co_firstlineno", None) - if isinstance(compat_co_firstlineno, int): - # nose compatibility - fspath = sys.modules[obj.__module__].__file__ - if fspath.endswith(".pyc"): - fspath = fspath[:-1] - lineno = compat_co_firstlineno - else: - fspath, lineno = getfslineno(obj) - modpath = self.getmodpath() - assert isinstance(lineno, int) - return fspath, lineno, modpath - - -class PyCollector(PyobjMixin, nodes.Collector): - def funcnamefilter(self, name): - return self._matches_prefix_or_glob_option("python_functions", name) - - def isnosetest(self, obj): - """ Look for the __test__ attribute, which is applied by the - @nose.tools.istest decorator - """ - # We explicitly check for "is True" here to not mistakenly treat - # classes with a custom __getattr__ returning something truthy (like a - # function) as test classes. - return safe_getattr(obj, "__test__", False) is True - - def classnamefilter(self, name): - return self._matches_prefix_or_glob_option("python_classes", name) - - def istestfunction(self, obj, name): - if self.funcnamefilter(name) or self.isnosetest(obj): - if isinstance(obj, staticmethod): - # static methods need to be unwrapped - obj = safe_getattr(obj, "__func__", False) - return ( - safe_getattr(obj, "__call__", False) - and fixtures.getfixturemarker(obj) is None - ) - else: - return False - - def istestclass(self, obj, name): - return self.classnamefilter(name) or self.isnosetest(obj) - - def _matches_prefix_or_glob_option(self, option_name, name): - """ - checks if the given name matches the prefix or glob-pattern defined - in ini configuration. - """ - for option in self.config.getini(option_name): - if name.startswith(option): - return True - # check that name looks like a glob-string before calling fnmatch - # because this is called for every name in each collected module, - # and fnmatch is somewhat expensive to call - elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( - name, option - ): - return True - return False - - def collect(self): - if not getattr(self.obj, "__test__", True): - return [] - - # NB. we avoid random getattrs and peek in the __dict__ instead - # (XXX originally introduced from a PyPy need, still true?) - dicts = [getattr(self.obj, "__dict__", {})] - for basecls in inspect.getmro(self.obj.__class__): - dicts.append(basecls.__dict__) - seen = {} - values = [] - for dic in dicts: - for name, obj in list(dic.items()): - if name in seen: - continue - seen[name] = True - res = self._makeitem(name, obj) - if res is None: - continue - if not isinstance(res, list): - res = [res] - values.extend(res) - values.sort(key=lambda item: item.reportinfo()[:2]) - return values - - def makeitem(self, name, obj): - warnings.warn(deprecated.COLLECTOR_MAKEITEM, stacklevel=2) - self._makeitem(name, obj) - - def _makeitem(self, name, obj): - # assert self.ihook.fspath == self.fspath, self - return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj) - - def _genfunctions(self, name, funcobj): - module = self.getparent(Module).obj - clscol = self.getparent(Class) - cls = clscol and clscol.obj or None - transfer_markers(funcobj, cls, module) - fm = self.session._fixturemanager - - definition = FunctionDefinition(name=name, parent=self, callobj=funcobj) - fixtureinfo = fm.getfixtureinfo(definition, funcobj, cls) - - metafunc = Metafunc( - definition, fixtureinfo, self.config, cls=cls, module=module - ) - methods = [] - if hasattr(module, "pytest_generate_tests"): - methods.append(module.pytest_generate_tests) - if hasattr(cls, "pytest_generate_tests"): - methods.append(cls().pytest_generate_tests) - if methods: - self.ihook.pytest_generate_tests.call_extra( - methods, dict(metafunc=metafunc) - ) - else: - self.ihook.pytest_generate_tests(metafunc=metafunc) - - Function = self._getcustomclass("Function") - if not metafunc._calls: - yield Function(name, parent=self, fixtureinfo=fixtureinfo) - else: - # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs - fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm) - - # add_funcarg_pseudo_fixture_def may have shadowed some fixtures - # with direct parametrization, so make sure we update what the - # function really needs. - fixtureinfo.prune_dependency_tree() - - for callspec in metafunc._calls: - subname = "%s[%s]" % (name, callspec.id) - yield Function( - name=subname, - parent=self, - callspec=callspec, - callobj=funcobj, - fixtureinfo=fixtureinfo, - keywords={callspec.id: True}, - originalname=name, - ) - - -class Module(nodes.File, PyCollector): - """ Collector for test classes and functions. """ - - def _getobj(self): - return self._importtestmodule() - - def collect(self): - self.session._fixturemanager.parsefactories(self) - return super(Module, self).collect() - - def _importtestmodule(self): - # we assume we are only called once per module - importmode = self.config.getoption("--import-mode") - try: - mod = self.fspath.pyimport(ensuresyspath=importmode) - except SyntaxError: - raise self.CollectError( - _pytest._code.ExceptionInfo().getrepr(style="short") - ) - except self.fspath.ImportMismatchError: - e = sys.exc_info()[1] - raise self.CollectError( - "import file mismatch:\n" - "imported module %r has this __file__ attribute:\n" - " %s\n" - "which is not the same as the test file we want to collect:\n" - " %s\n" - "HINT: remove __pycache__ / .pyc files and/or use a " - "unique basename for your test file modules" % e.args - ) - except ImportError: - from _pytest._code.code import ExceptionInfo - - exc_info = ExceptionInfo() - if self.config.getoption("verbose") < 2: - exc_info.traceback = exc_info.traceback.filter(filter_traceback) - exc_repr = ( - exc_info.getrepr(style="short") - if exc_info.traceback - else exc_info.exconly() - ) - formatted_tb = safe_str(exc_repr) - raise self.CollectError( - "ImportError while importing test module '{fspath}'.\n" - "Hint: make sure your test modules/packages have valid Python names.\n" - "Traceback:\n" - "{traceback}".format(fspath=self.fspath, traceback=formatted_tb) - ) - except _pytest.runner.Skipped as e: - if e.allow_module_level: - raise - raise self.CollectError( - "Using pytest.skip outside of a test is not allowed. " - "To decorate a test function, use the @pytest.mark.skip " - "or @pytest.mark.skipif decorators instead, and to skip a " - "module use `pytestmark = pytest.mark.{skip,skipif}." - ) - self.config.pluginmanager.consider_module(mod) - return mod - - def setup(self): - setup_module = _get_xunit_setup_teardown(self.obj, "setUpModule") - if setup_module is None: - setup_module = _get_xunit_setup_teardown(self.obj, "setup_module") - if setup_module is not None: - setup_module() - - teardown_module = _get_xunit_setup_teardown(self.obj, "tearDownModule") - if teardown_module is None: - teardown_module = _get_xunit_setup_teardown(self.obj, "teardown_module") - if teardown_module is not None: - self.addfinalizer(teardown_module) - - -class Package(Module): - def __init__(self, fspath, parent=None, config=None, session=None, nodeid=None): - session = parent.session - nodes.FSCollector.__init__( - self, fspath, parent=parent, config=config, session=session, nodeid=nodeid - ) - self.name = fspath.dirname - self.trace = session.trace - self._norecursepatterns = session._norecursepatterns - self.fspath = fspath - - def _recurse(self, dirpath): - if dirpath.basename == "__pycache__": - return False - ihook = self.gethookproxy(dirpath.dirpath()) - if ihook.pytest_ignore_collect(path=dirpath, config=self.config): - return - for pat in self._norecursepatterns: - if dirpath.check(fnmatch=pat): - return False - ihook = self.gethookproxy(dirpath) - ihook.pytest_collect_directory(path=dirpath, parent=self) - return True - - def gethookproxy(self, fspath): - # check if we have the common case of running - # hooks with all conftest.py filesall conftest.py - pm = self.config.pluginmanager - my_conftestmodules = pm._getconftestmodules(fspath) - remove_mods = pm._conftest_plugins.difference(my_conftestmodules) - if remove_mods: - # one or more conftests are not in use at this fspath - proxy = FSHookProxy(fspath, pm, remove_mods) - else: - # all plugis are active for this fspath - proxy = self.config.hook - return proxy - - def _collectfile(self, path, handle_dupes=True): - ihook = self.gethookproxy(path) - if not self.isinitpath(path): - if ihook.pytest_ignore_collect(path=path, config=self.config): - return () - - if handle_dupes: - keepduplicates = self.config.getoption("keepduplicates") - if not keepduplicates: - duplicate_paths = self.config.pluginmanager._duplicatepaths - if path in duplicate_paths: - return () - else: - duplicate_paths.add(path) - - if self.fspath == path: # __init__.py - return [self] - - return ihook.pytest_collect_file(path=path, parent=self) - - def isinitpath(self, path): - return path in self.session._initialpaths - - def collect(self): - this_path = self.fspath.dirpath() - init_module = this_path.join("__init__.py") - if init_module.check(file=1) and path_matches_patterns( - init_module, self.config.getini("python_files") - ): - yield Module(init_module, self) - pkg_prefixes = set() - for path in this_path.visit(rec=self._recurse, bf=True, sort=True): - # We will visit our own __init__.py file, in which case we skip it. - if path.isfile(): - if path.basename == "__init__.py" and path.dirpath() == this_path: - continue - - parts_ = parts(path.strpath) - if any( - pkg_prefix in parts_ and pkg_prefix.join("__init__.py") != path - for pkg_prefix in pkg_prefixes - ): - continue - - if path.isdir() and path.join("__init__.py").check(file=1): - pkg_prefixes.add(path) - - for x in self._collectfile(path): - yield x - - -def _get_xunit_setup_teardown(holder, attr_name, param_obj=None): - """ - Return a callable to perform xunit-style setup or teardown if - the function exists in the ``holder`` object. - The ``param_obj`` parameter is the parameter which will be passed to the function - when the callable is called without arguments, defaults to the ``holder`` object. - Return ``None`` if a suitable callable is not found. - """ - param_obj = param_obj if param_obj is not None else holder - result = _get_xunit_func(holder, attr_name) - if result is not None: - arg_count = result.__code__.co_argcount - if inspect.ismethod(result): - arg_count -= 1 - if arg_count: - return lambda: result(param_obj) - else: - return result - - -def _get_xunit_func(obj, name): - """Return the attribute from the given object to be used as a setup/teardown - xunit-style function, but only if not marked as a fixture to - avoid calling it twice. - """ - meth = getattr(obj, name, None) - if fixtures.getfixturemarker(meth) is None: - return meth - - -class Class(PyCollector): - """ Collector for test methods. """ - - def collect(self): - if not safe_getattr(self.obj, "__test__", True): - return [] - if hasinit(self.obj): - self.warn( - PytestWarning( - "cannot collect test class %r because it has a " - "__init__ constructor" % self.obj.__name__ - ) - ) - return [] - elif hasnew(self.obj): - self.warn( - PytestWarning( - "cannot collect test class %r because it has a " - "__new__ constructor" % self.obj.__name__ - ) - ) - return [] - return [self._getcustomclass("Instance")(name="()", parent=self)] - - def setup(self): - setup_class = _get_xunit_func(self.obj, "setup_class") - if setup_class is not None: - setup_class = getimfunc(setup_class) - setup_class(self.obj) - - fin_class = getattr(self.obj, "teardown_class", None) - if fin_class is not None: - fin_class = getimfunc(fin_class) - self.addfinalizer(lambda: fin_class(self.obj)) - - -class Instance(PyCollector): - _ALLOW_MARKERS = False # hack, destroy later - # instances share the object with their parents in a way - # that duplicates markers instances if not taken out - # can be removed at node structure reorganization time - - def _getobj(self): - return self.parent.obj() - - def collect(self): - self.session._fixturemanager.parsefactories(self) - return super(Instance, self).collect() - - def newinstance(self): - self.obj = self._getobj() - return self.obj - - -class FunctionMixin(PyobjMixin): - """ mixin for the code common to Function and Generator. - """ - - def setup(self): - """ perform setup for this test function. """ - if hasattr(self, "_preservedparent"): - obj = self._preservedparent - elif isinstance(self.parent, Instance): - obj = self.parent.newinstance() - self.obj = self._getobj() - else: - obj = self.parent.obj - if inspect.ismethod(self.obj): - setup_name = "setup_method" - teardown_name = "teardown_method" - else: - setup_name = "setup_function" - teardown_name = "teardown_function" - setup_func_or_method = _get_xunit_setup_teardown( - obj, setup_name, param_obj=self.obj - ) - if setup_func_or_method is not None: - setup_func_or_method() - teardown_func_or_method = _get_xunit_setup_teardown( - obj, teardown_name, param_obj=self.obj - ) - if teardown_func_or_method is not None: - self.addfinalizer(teardown_func_or_method) - - def _prunetraceback(self, excinfo): - if hasattr(self, "_obj") and not self.config.option.fulltrace: - code = _pytest._code.Code(get_real_func(self.obj)) - path, firstlineno = code.path, code.firstlineno - traceback = excinfo.traceback - ntraceback = traceback.cut(path=path, firstlineno=firstlineno) - if ntraceback == traceback: - ntraceback = ntraceback.cut(path=path) - if ntraceback == traceback: - ntraceback = ntraceback.filter(filter_traceback) - if not ntraceback: - ntraceback = traceback - - excinfo.traceback = ntraceback.filter() - # issue364: mark all but first and last frames to - # only show a single-line message for each frame - if self.config.option.tbstyle == "auto": - if len(excinfo.traceback) > 2: - for entry in excinfo.traceback[1:-1]: - entry.set_repr_style("short") - - def repr_failure(self, excinfo, outerr=None): - assert outerr is None, "XXX outerr usage is deprecated" - style = self.config.option.tbstyle - if style == "auto": - style = "long" - return self._repr_failure_py(excinfo, style=style) - - -class Generator(FunctionMixin, PyCollector): - def collect(self): - # test generators are seen as collectors but they also - # invoke setup/teardown on popular request - # (induced by the common "test_*" naming shared with normal tests) - from _pytest import deprecated - - self.session._setupstate.prepare(self) - # see FunctionMixin.setup and test_setupstate_is_preserved_134 - self._preservedparent = self.parent.obj - values = [] - seen = {} - for i, x in enumerate(self.obj()): - name, call, args = self.getcallargs(x) - if not callable(call): - raise TypeError("%r yielded non callable test %r" % (self.obj, call)) - if name is None: - name = "[%d]" % i - else: - name = "['%s']" % name - if name in seen: - raise ValueError( - "%r generated tests with non-unique name %r" % (self, name) - ) - seen[name] = True - with warnings.catch_warnings(): - # ignore our own deprecation warning - function_class = self.Function - values.append(function_class(name, self, args=args, callobj=call)) - self.warn(deprecated.YIELD_TESTS) - return values - - def getcallargs(self, obj): - if not isinstance(obj, (tuple, list)): - obj = (obj,) - # explicit naming - if isinstance(obj[0], six.string_types): - name = obj[0] - obj = obj[1:] - else: - name = None - call, args = obj[0], obj[1:] - return name, call, args - - -def hasinit(obj): - init = getattr(obj, "__init__", None) - if init: - return init != object.__init__ - - -def hasnew(obj): - new = getattr(obj, "__new__", None) - if new: - return new != object.__new__ - - -class CallSpec2(object): - def __init__(self, metafunc): - self.metafunc = metafunc - self.funcargs = {} - self._idlist = [] - self.params = {} - self._globalid = NOTSET - self._globalparam = NOTSET - self._arg2scopenum = {} # used for sorting parametrized resources - self.marks = [] - self.indices = {} - - def copy(self): - cs = CallSpec2(self.metafunc) - cs.funcargs.update(self.funcargs) - cs.params.update(self.params) - cs.marks.extend(self.marks) - cs.indices.update(self.indices) - cs._arg2scopenum.update(self._arg2scopenum) - cs._idlist = list(self._idlist) - cs._globalid = self._globalid - cs._globalparam = self._globalparam - return cs - - def _checkargnotcontained(self, arg): - if arg in self.params or arg in self.funcargs: - raise ValueError("duplicate %r" % (arg,)) - - def getparam(self, name): - try: - return self.params[name] - except KeyError: - if self._globalparam is NOTSET: - raise ValueError(name) - return self._globalparam - - @property - def id(self): - return "-".join(map(str, filter(None, self._idlist))) - - def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum, param_index): - for arg, val in zip(argnames, valset): - self._checkargnotcontained(arg) - valtype_for_arg = valtypes[arg] - getattr(self, valtype_for_arg)[arg] = val - self.indices[arg] = param_index - self._arg2scopenum[arg] = scopenum - self._idlist.append(id) - self.marks.extend(normalize_mark_list(marks)) - - def setall(self, funcargs, id, param): - for x in funcargs: - self._checkargnotcontained(x) - self.funcargs.update(funcargs) - if id is not NOTSET: - self._idlist.append(id) - if param is not NOTSET: - assert self._globalparam is NOTSET - self._globalparam = param - for arg in funcargs: - self._arg2scopenum[arg] = fixtures.scopenum_function - - -class Metafunc(fixtures.FuncargnamesCompatAttr): - """ - Metafunc objects are passed to the :func:`pytest_generate_tests <_pytest.hookspec.pytest_generate_tests>` hook. - They help to inspect a test function and to generate tests according to - test configuration or values specified in the class or module where a - test function is defined. - """ - - def __init__(self, definition, fixtureinfo, config, cls=None, module=None): - assert ( - isinstance(definition, FunctionDefinition) - or type(definition).__name__ == "DefinitionMock" - ) - self.definition = definition - - #: access to the :class:`_pytest.config.Config` object for the test session - self.config = config - - #: the module object where the test function is defined in. - self.module = module - - #: underlying python test function - self.function = definition.obj - - #: set of fixture names required by the test function - self.fixturenames = fixtureinfo.names_closure - - #: class object where the test function is defined in or ``None``. - self.cls = cls - - self._calls = [] - self._ids = set() - self._arg2fixturedefs = fixtureinfo.name2fixturedefs - - def parametrize(self, argnames, argvalues, indirect=False, ids=None, scope=None): - """ Add new invocations to the underlying test function using the list - of argvalues for the given argnames. Parametrization is performed - during the collection phase. If you need to setup expensive resources - see about setting indirect to do it rather at test setup time. - - :arg argnames: a comma-separated string denoting one or more argument - names, or a list/tuple of argument strings. - - :arg argvalues: The list of argvalues determines how often a - test is invoked with different argument values. If only one - argname was specified argvalues is a list of values. If N - argnames were specified, argvalues must be a list of N-tuples, - where each tuple-element specifies a value for its respective - argname. - - :arg indirect: The list of argnames or boolean. A list of arguments' - names (subset of argnames). If True the list contains all names from - the argnames. Each argvalue corresponding to an argname in this list will - be passed as request.param to its respective argname fixture - function so that it can perform more expensive setups during the - setup phase of a test rather than at collection time. - - :arg ids: list of string ids, or a callable. - If strings, each is corresponding to the argvalues so that they are - part of the test id. If None is given as id of specific test, the - automatically generated id for that argument will be used. - If callable, it should take one argument (a single argvalue) and return - a string or return None. If None, the automatically generated id for that - argument will be used. - If no ids are provided they will be generated automatically from - the argvalues. - - :arg scope: if specified it denotes the scope of the parameters. - The scope is used for grouping tests by parameter instances. - It will also override any fixture-function defined scope, allowing - to set a dynamic scope using test context or configuration. - """ - from _pytest.fixtures import scope2index - from _pytest.mark import ParameterSet - - argnames, parameters = ParameterSet._for_parametrize( - argnames, - argvalues, - self.function, - self.config, - function_definition=self.definition, - ) - del argvalues - - if scope is None: - scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) - - self._validate_if_using_arg_names(argnames, indirect) - - arg_values_types = self._resolve_arg_value_types(argnames, indirect) - - ids = self._resolve_arg_ids(argnames, ids, parameters, item=self.definition) - - scopenum = scope2index( - scope, descr="parametrize() call in {}".format(self.function.__name__) - ) - - # create the new calls: if we are parametrize() multiple times (by applying the decorator - # more than once) then we accumulate those calls generating the cartesian product - # of all calls - newcalls = [] - for callspec in self._calls or [CallSpec2(self)]: - for param_index, (param_id, param_set) in enumerate(zip(ids, parameters)): - newcallspec = callspec.copy() - newcallspec.setmulti2( - arg_values_types, - argnames, - param_set.values, - param_id, - param_set.marks, - scopenum, - param_index, - ) - newcalls.append(newcallspec) - self._calls = newcalls - - def _resolve_arg_ids(self, argnames, ids, parameters, item): - """Resolves the actual ids for the given argnames, based on the ``ids`` parameter given - to ``parametrize``. - - :param List[str] argnames: list of argument names passed to ``parametrize()``. - :param ids: the ids parameter of the parametrized call (see docs). - :param List[ParameterSet] parameters: the list of parameter values, same size as ``argnames``. - :param Item item: the item that generated this parametrized call. - :rtype: List[str] - :return: the list of ids for each argname given - """ - from py.io import saferepr - - idfn = None - if callable(ids): - idfn = ids - ids = None - if ids: - func_name = self.function.__name__ - if len(ids) != len(parameters): - msg = "In {}: {} parameter sets specified, with different number of ids: {}" - fail(msg.format(func_name, len(parameters), len(ids)), pytrace=False) - for id_value in ids: - if id_value is not None and not isinstance(id_value, six.string_types): - msg = "In {}: ids must be list of strings, found: {} (type: {!r})" - fail( - msg.format(func_name, saferepr(id_value), type(id_value)), - pytrace=False, - ) - ids = idmaker(argnames, parameters, idfn, ids, self.config, item=item) - return ids - - def _resolve_arg_value_types(self, argnames, indirect): - """Resolves if each parametrized argument must be considered a parameter to a fixture or a "funcarg" - to the function, based on the ``indirect`` parameter of the parametrized() call. - - :param List[str] argnames: list of argument names passed to ``parametrize()``. - :param indirect: same ``indirect`` parameter of ``parametrize()``. - :rtype: Dict[str, str] - A dict mapping each arg name to either: - * "params" if the argname should be the parameter of a fixture of the same name. - * "funcargs" if the argname should be a parameter to the parametrized test function. - """ - valtypes = {} - if indirect is True: - valtypes = dict.fromkeys(argnames, "params") - elif indirect is False: - valtypes = dict.fromkeys(argnames, "funcargs") - elif isinstance(indirect, (tuple, list)): - valtypes = dict.fromkeys(argnames, "funcargs") - for arg in indirect: - if arg not in argnames: - fail( - "In {}: indirect fixture '{}' doesn't exist".format( - self.function.__name__, arg - ), - pytrace=False, - ) - valtypes[arg] = "params" - return valtypes - - def _validate_if_using_arg_names(self, argnames, indirect): - """ - Check if all argnames are being used, by default values, or directly/indirectly. - - :param List[str] argnames: list of argument names passed to ``parametrize()``. - :param indirect: same ``indirect`` parameter of ``parametrize()``. - :raise ValueError: if validation fails. - """ - default_arg_names = set(get_default_arg_names(self.function)) - func_name = self.function.__name__ - for arg in argnames: - if arg not in self.fixturenames: - if arg in default_arg_names: - fail( - "In {}: function already takes an argument '{}' with a default value".format( - func_name, arg - ), - pytrace=False, - ) - else: - if isinstance(indirect, (tuple, list)): - name = "fixture" if arg in indirect else "argument" - else: - name = "fixture" if indirect else "argument" - fail( - "In {}: function uses no {} '{}'".format(func_name, name, arg), - pytrace=False, - ) - - def addcall(self, funcargs=None, id=NOTSET, param=NOTSET): - """ Add a new call to the underlying test function during the collection phase of a test run. - - .. deprecated:: 3.3 - - Use :meth:`parametrize` instead. - - Note that request.addcall() is called during the test collection phase prior and - independently to actual test execution. You should only use addcall() - if you need to specify multiple arguments of a test function. - - :arg funcargs: argument keyword dictionary used when invoking - the test function. - - :arg id: used for reporting and identification purposes. If you - don't supply an `id` an automatic unique id will be generated. - - :arg param: a parameter which will be exposed to a later fixture function - invocation through the ``request.param`` attribute. - """ - warnings.warn(deprecated.METAFUNC_ADD_CALL, stacklevel=2) - - assert funcargs is None or isinstance(funcargs, dict) - if funcargs is not None: - for name in funcargs: - if name not in self.fixturenames: - fail("funcarg %r not used in this function." % name) - else: - funcargs = {} - if id is None: - raise ValueError("id=None not allowed") - if id is NOTSET: - id = len(self._calls) - id = str(id) - if id in self._ids: - raise ValueError("duplicate id %r" % id) - self._ids.add(id) - - cs = CallSpec2(self) - cs.setall(funcargs, id, param) - self._calls.append(cs) - - -def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): - """Find the most appropriate scope for a parametrized call based on its arguments. - - When there's at least one direct argument, always use "function" scope. - - When a test function is parametrized and all its arguments are indirect - (e.g. fixtures), return the most narrow scope based on the fixtures used. - - Related to issue #1832, based on code posted by @Kingdread. - """ - from _pytest.fixtures import scopes - - if isinstance(indirect, (list, tuple)): - all_arguments_are_fixtures = len(indirect) == len(argnames) - else: - all_arguments_are_fixtures = bool(indirect) - - if all_arguments_are_fixtures: - fixturedefs = arg2fixturedefs or {} - used_scopes = [ - fixturedef[0].scope - for name, fixturedef in fixturedefs.items() - if name in argnames - ] - if used_scopes: - # Takes the most narrow scope from used fixtures - for scope in reversed(scopes): - if scope in used_scopes: - return scope - - return "function" - - -def _idval(val, argname, idx, idfn, item, config): - if idfn: - s = None - try: - s = idfn(val) - except Exception as e: - # See issue https://github.com/pytest-dev/pytest/issues/2169 - msg = ( - "While trying to determine id of parameter {} at position " - "{} the following exception was raised:\n".format(argname, idx) - ) - msg += " {}: {}\n".format(type(e).__name__, e) - msg += "This warning will be an error error in pytest-4.0." - item.warn(RemovedInPytest4Warning(msg)) - if s: - return ascii_escaped(s) - - if config: - hook_id = config.hook.pytest_make_parametrize_id( - config=config, val=val, argname=argname - ) - if hook_id: - return hook_id - - if isinstance(val, STRING_TYPES): - return ascii_escaped(val) - elif isinstance(val, (float, int, bool, NoneType)): - return str(val) - elif isinstance(val, REGEX_TYPE): - return ascii_escaped(val.pattern) - elif enum is not None and isinstance(val, enum.Enum): - return str(val) - elif (isclass(val) or isfunction(val)) and hasattr(val, "__name__"): - return val.__name__ - return str(argname) + str(idx) - - -def _idvalset(idx, parameterset, argnames, idfn, ids, item, config): - if parameterset.id is not None: - return parameterset.id - if ids is None or (idx >= len(ids) or ids[idx] is None): - this_id = [ - _idval(val, argname, idx, idfn, item=item, config=config) - for val, argname in zip(parameterset.values, argnames) - ] - return "-".join(this_id) - else: - return ascii_escaped(ids[idx]) - - -def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None): - ids = [ - _idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item) - for valindex, parameterset in enumerate(parametersets) - ] - if len(set(ids)) != len(ids): - # The ids are not unique - duplicates = [testid for testid in ids if ids.count(testid) > 1] - counters = collections.defaultdict(lambda: 0) - for index, testid in enumerate(ids): - if testid in duplicates: - ids[index] = testid + str(counters[testid]) - counters[testid] += 1 - return ids - - -def show_fixtures_per_test(config): - from _pytest.main import wrap_session - - return wrap_session(config, _show_fixtures_per_test) - - -def _show_fixtures_per_test(config, session): - import _pytest.config - - session.perform_collect() - curdir = py.path.local() - tw = _pytest.config.create_terminal_writer(config) - verbose = config.getvalue("verbose") - - def get_best_relpath(func): - loc = getlocation(func, curdir) - return curdir.bestrelpath(loc) - - def write_fixture(fixture_def): - argname = fixture_def.argname - if verbose <= 0 and argname.startswith("_"): - return - if verbose > 0: - bestrel = get_best_relpath(fixture_def.func) - funcargspec = "{} -- {}".format(argname, bestrel) - else: - funcargspec = argname - tw.line(funcargspec, green=True) - fixture_doc = fixture_def.func.__doc__ - if fixture_doc: - write_docstring(tw, fixture_doc) - else: - tw.line(" no docstring available", red=True) - - def write_item(item): - try: - info = item._fixtureinfo - except AttributeError: - # doctests items have no _fixtureinfo attribute - return - if not info.name2fixturedefs: - # this test item does not use any fixtures - return - tw.line() - tw.sep("-", "fixtures used by {}".format(item.name)) - tw.sep("-", "({})".format(get_best_relpath(item.function))) - # dict key not used in loop but needed for sorting - for _, fixturedefs in sorted(info.name2fixturedefs.items()): - assert fixturedefs is not None - if not fixturedefs: - continue - # last item is expected to be the one used by the test item - write_fixture(fixturedefs[-1]) - - for session_item in session.items: - write_item(session_item) - - -def showfixtures(config): - from _pytest.main import wrap_session - - return wrap_session(config, _showfixtures_main) - - -def _showfixtures_main(config, session): - import _pytest.config - - session.perform_collect() - curdir = py.path.local() - tw = _pytest.config.create_terminal_writer(config) - verbose = config.getvalue("verbose") - - fm = session._fixturemanager - - available = [] - seen = set() - - for argname, fixturedefs in fm._arg2fixturedefs.items(): - assert fixturedefs is not None - if not fixturedefs: - continue - for fixturedef in fixturedefs: - loc = getlocation(fixturedef.func, curdir) - if (fixturedef.argname, loc) in seen: - continue - seen.add((fixturedef.argname, loc)) - available.append( - ( - len(fixturedef.baseid), - fixturedef.func.__module__, - curdir.bestrelpath(loc), - fixturedef.argname, - fixturedef, - ) - ) - - available.sort() - currentmodule = None - for baseid, module, bestrel, argname, fixturedef in available: - if currentmodule != module: - if not module.startswith("_pytest."): - tw.line() - tw.sep("-", "fixtures defined from %s" % (module,)) - currentmodule = module - if verbose <= 0 and argname[0] == "_": - continue - if verbose > 0: - funcargspec = "%s -- %s" % (argname, bestrel) - else: - funcargspec = argname - tw.line(funcargspec, green=True) - loc = getlocation(fixturedef.func, curdir) - doc = fixturedef.func.__doc__ or "" - if doc: - write_docstring(tw, doc) - else: - tw.line(" %s: no docstring available" % (loc,), red=True) - - -def write_docstring(tw, doc): - INDENT = " " - doc = doc.rstrip() - if "\n" in doc: - firstline, rest = doc.split("\n", 1) - else: - firstline, rest = doc, "" - - if firstline.strip(): - tw.line(INDENT + firstline.strip()) - - if rest: - for line in dedent(rest).split("\n"): - tw.write(INDENT + line + "\n") - - -class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr): - """ a Function Item is responsible for setting up and executing a - Python test function. - """ - - _genid = None - # disable since functions handle it themselves - _ALLOW_MARKERS = False - - def __init__( - self, - name, - parent, - args=None, - config=None, - callspec=None, - callobj=NOTSET, - keywords=None, - session=None, - fixtureinfo=None, - originalname=None, - ): - super(Function, self).__init__(name, parent, config=config, session=session) - self._args = args - if callobj is not NOTSET: - self.obj = callobj - - self.keywords.update(self.obj.__dict__) - self.own_markers.extend(get_unpacked_marks(self.obj)) - if callspec: - self.callspec = callspec - # this is total hostile and a mess - # keywords are broken by design by now - # this will be redeemed later - for mark in callspec.marks: - # feel free to cry, this was broken for years before - # and keywords cant fix it per design - self.keywords[mark.name] = mark - self.own_markers.extend(normalize_mark_list(callspec.marks)) - if keywords: - self.keywords.update(keywords) - - if fixtureinfo is None: - fixtureinfo = self.session._fixturemanager.getfixtureinfo( - self, self.obj, self.cls, funcargs=not self._isyieldedfunction() - ) - self._fixtureinfo = fixtureinfo - self.fixturenames = fixtureinfo.names_closure - self._initrequest() - - #: original function name, without any decorations (for example - #: parametrization adds a ``"[...]"`` suffix to function names). - #: - #: .. versionadded:: 3.0 - self.originalname = originalname - - def _initrequest(self): - self.funcargs = {} - if self._isyieldedfunction(): - assert not hasattr( - self, "callspec" - ), "yielded functions (deprecated) cannot have funcargs" - else: - if hasattr(self, "callspec"): - callspec = self.callspec - assert not callspec.funcargs - self._genid = callspec.id - if hasattr(callspec, "param"): - self.param = callspec.param - self._request = fixtures.FixtureRequest(self) - - @property - def function(self): - "underlying python 'function' object" - return getimfunc(self.obj) - - def _getobj(self): - name = self.name - i = name.find("[") # parametrization - if i != -1: - name = name[:i] - return getattr(self.parent.obj, name) - - @property - def _pyfuncitem(self): - "(compatonly) for code expecting pytest-2.2 style request objects" - return self - - def _isyieldedfunction(self): - return getattr(self, "_args", None) is not None - - def runtest(self): - """ execute the underlying test function. """ - self.ihook.pytest_pyfunc_call(pyfuncitem=self) - - def setup(self): - super(Function, self).setup() - fixtures.fillfixtures(self) - - -class FunctionDefinition(Function): - """ - internal hack until we get actual definition nodes instead of the - crappy metafunc hack - """ - - def runtest(self): - raise RuntimeError("function definitions are not supposed to be used") - - setup = runtest diff --git a/venv/lib/python3.8/site-packages/_pytest/python_api.py b/venv/lib/python3.8/site-packages/_pytest/python_api.py deleted file mode 100644 index 805cd85a..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/python_api.py +++ /dev/null @@ -1,721 +0,0 @@ -import math -import pprint -import sys -from decimal import Decimal -from numbers import Number - -import six -from more_itertools.more import always_iterable -from six.moves import filterfalse -from six.moves import zip - -import _pytest._code -from _pytest.compat import isclass -from _pytest.compat import Mapping -from _pytest.compat import Sequence -from _pytest.compat import STRING_TYPES -from _pytest.outcomes import fail - -BASE_TYPE = (type, STRING_TYPES) - - -def _cmp_raises_type_error(self, other): - """__cmp__ implementation which raises TypeError. Used - by Approx base classes to implement only == and != and raise a - TypeError for other comparisons. - - Needed in Python 2 only, Python 3 all it takes is not implementing the - other operators at all. - """ - __tracebackhide__ = True - raise TypeError( - "Comparison operators other than == and != not supported by approx objects" - ) - - -def _non_numeric_type_error(value, at): - at_str = " at {}".format(at) if at else "" - return TypeError( - "cannot make approximate comparisons to non-numeric values: {!r} {}".format( - value, at_str - ) - ) - - -# builtin pytest.approx helper - - -class ApproxBase(object): - """ - Provide shared utilities for making approximate comparisons between numbers - or sequences of numbers. - """ - - # Tell numpy to use our `__eq__` operator instead of its. - __array_ufunc__ = None - __array_priority__ = 100 - - def __init__(self, expected, rel=None, abs=None, nan_ok=False): - __tracebackhide__ = True - self.expected = expected - self.abs = abs - self.rel = rel - self.nan_ok = nan_ok - self._check_type() - - def __repr__(self): - raise NotImplementedError - - def __eq__(self, actual): - return all( - a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) - ) - - __hash__ = None - - def __ne__(self, actual): - return not (actual == self) - - if sys.version_info[0] == 2: - __cmp__ = _cmp_raises_type_error - - def _approx_scalar(self, x): - return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) - - def _yield_comparisons(self, actual): - """ - Yield all the pairs of numbers to be compared. This is used to - implement the `__eq__` method. - """ - raise NotImplementedError - - def _check_type(self): - """ - Raise a TypeError if the expected value is not a valid type. - """ - # This is only a concern if the expected value is a sequence. In every - # other case, the approx() function ensures that the expected value has - # a numeric type. For this reason, the default is to do nothing. The - # classes that deal with sequences should reimplement this method to - # raise if there are any non-numeric elements in the sequence. - pass - - -def _recursive_list_map(f, x): - if isinstance(x, list): - return list(_recursive_list_map(f, xi) for xi in x) - else: - return f(x) - - -class ApproxNumpy(ApproxBase): - """ - Perform approximate comparisons where the expected value is numpy array. - """ - - def __repr__(self): - list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist()) - return "approx({!r})".format(list_scalars) - - if sys.version_info[0] == 2: - __cmp__ = _cmp_raises_type_error - - def __eq__(self, actual): - import numpy as np - - # self.expected is supposed to always be an array here - - if not np.isscalar(actual): - try: - actual = np.asarray(actual) - except: # noqa - raise TypeError("cannot compare '{}' to numpy.ndarray".format(actual)) - - if not np.isscalar(actual) and actual.shape != self.expected.shape: - return False - - return ApproxBase.__eq__(self, actual) - - def _yield_comparisons(self, actual): - import numpy as np - - # `actual` can either be a numpy array or a scalar, it is treated in - # `__eq__` before being passed to `ApproxBase.__eq__`, which is the - # only method that calls this one. - - if np.isscalar(actual): - for i in np.ndindex(self.expected.shape): - yield actual, np.asscalar(self.expected[i]) - else: - for i in np.ndindex(self.expected.shape): - yield np.asscalar(actual[i]), np.asscalar(self.expected[i]) - - -class ApproxMapping(ApproxBase): - """ - Perform approximate comparisons where the expected value is a mapping with - numeric values (the keys can be anything). - """ - - def __repr__(self): - return "approx({!r})".format( - {k: self._approx_scalar(v) for k, v in self.expected.items()} - ) - - def __eq__(self, actual): - if set(actual.keys()) != set(self.expected.keys()): - return False - - return ApproxBase.__eq__(self, actual) - - def _yield_comparisons(self, actual): - for k in self.expected.keys(): - yield actual[k], self.expected[k] - - def _check_type(self): - __tracebackhide__ = True - for key, value in self.expected.items(): - if isinstance(value, type(self.expected)): - msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" - raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) - elif not isinstance(value, Number): - raise _non_numeric_type_error(self.expected, at="key={!r}".format(key)) - - -class ApproxSequence(ApproxBase): - """ - Perform approximate comparisons where the expected value is a sequence of - numbers. - """ - - def __repr__(self): - seq_type = type(self.expected) - if seq_type not in (tuple, list, set): - seq_type = list - return "approx({!r})".format( - seq_type(self._approx_scalar(x) for x in self.expected) - ) - - def __eq__(self, actual): - if len(actual) != len(self.expected): - return False - return ApproxBase.__eq__(self, actual) - - def _yield_comparisons(self, actual): - return zip(actual, self.expected) - - def _check_type(self): - __tracebackhide__ = True - for index, x in enumerate(self.expected): - if isinstance(x, type(self.expected)): - msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" - raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) - elif not isinstance(x, Number): - raise _non_numeric_type_error( - self.expected, at="index {}".format(index) - ) - - -class ApproxScalar(ApproxBase): - """ - Perform approximate comparisons where the expected value is a single number. - """ - - DEFAULT_ABSOLUTE_TOLERANCE = 1e-12 - DEFAULT_RELATIVE_TOLERANCE = 1e-6 - - def __repr__(self): - """ - Return a string communicating both the expected value and the tolerance - for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode - plus/minus symbol if this is python3 (it's too hard to get right for - python2). - """ - if isinstance(self.expected, complex): - return str(self.expected) - - # Infinities aren't compared using tolerances, so don't show a - # tolerance. - if math.isinf(self.expected): - return str(self.expected) - - # If a sensible tolerance can't be calculated, self.tolerance will - # raise a ValueError. In this case, display '???'. - try: - vetted_tolerance = "{:.1e}".format(self.tolerance) - except ValueError: - vetted_tolerance = "???" - - if sys.version_info[0] == 2: - return "{} +- {}".format(self.expected, vetted_tolerance) - else: - return u"{} \u00b1 {}".format(self.expected, vetted_tolerance) - - def __eq__(self, actual): - """ - Return true if the given value is equal to the expected value within - the pre-specified tolerance. - """ - if _is_numpy_array(actual): - # Call ``__eq__()`` manually to prevent infinite-recursion with - # numpy<1.13. See #3748. - return all(self.__eq__(a) for a in actual.flat) - - # Short-circuit exact equality. - if actual == self.expected: - return True - - # Allow the user to control whether NaNs are considered equal to each - # other or not. The abs() calls are for compatibility with complex - # numbers. - if math.isnan(abs(self.expected)): - return self.nan_ok and math.isnan(abs(actual)) - - # Infinity shouldn't be approximately equal to anything but itself, but - # if there's a relative tolerance, it will be infinite and infinity - # will seem approximately equal to everything. The equal-to-itself - # case would have been short circuited above, so here we can just - # return false if the expected value is infinite. The abs() call is - # for compatibility with complex numbers. - if math.isinf(abs(self.expected)): - return False - - # Return true if the two numbers are within the tolerance. - return abs(self.expected - actual) <= self.tolerance - - __hash__ = None - - @property - def tolerance(self): - """ - Return the tolerance for the comparison. This could be either an - absolute tolerance or a relative tolerance, depending on what the user - specified or which would be larger. - """ - - def set_default(x, default): - return x if x is not None else default - - # Figure out what the absolute tolerance should be. ``self.abs`` is - # either None or a value specified by the user. - absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) - - if absolute_tolerance < 0: - raise ValueError( - "absolute tolerance can't be negative: {}".format(absolute_tolerance) - ) - if math.isnan(absolute_tolerance): - raise ValueError("absolute tolerance can't be NaN.") - - # If the user specified an absolute tolerance but not a relative one, - # just return the absolute tolerance. - if self.rel is None: - if self.abs is not None: - return absolute_tolerance - - # Figure out what the relative tolerance should be. ``self.rel`` is - # either None or a value specified by the user. This is done after - # we've made sure the user didn't ask for an absolute tolerance only, - # because we don't want to raise errors about the relative tolerance if - # we aren't even going to use it. - relative_tolerance = set_default( - self.rel, self.DEFAULT_RELATIVE_TOLERANCE - ) * abs(self.expected) - - if relative_tolerance < 0: - raise ValueError( - "relative tolerance can't be negative: {}".format(absolute_tolerance) - ) - if math.isnan(relative_tolerance): - raise ValueError("relative tolerance can't be NaN.") - - # Return the larger of the relative and absolute tolerances. - return max(relative_tolerance, absolute_tolerance) - - -class ApproxDecimal(ApproxScalar): - """ - Perform approximate comparisons where the expected value is a decimal. - """ - - DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") - DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") - - -def approx(expected, rel=None, abs=None, nan_ok=False): - """ - Assert that two numbers (or two sets of numbers) are equal to each other - within some tolerance. - - Due to the `intricacies of floating-point arithmetic`__, numbers that we - would intuitively expect to be equal are not always so:: - - >>> 0.1 + 0.2 == 0.3 - False - - __ https://docs.python.org/3/tutorial/floatingpoint.html - - This problem is commonly encountered when writing tests, e.g. when making - sure that floating-point values are what you expect them to be. One way to - deal with this problem is to assert that two floating-point numbers are - equal to within some appropriate tolerance:: - - >>> abs((0.1 + 0.2) - 0.3) < 1e-6 - True - - However, comparisons like this are tedious to write and difficult to - understand. Furthermore, absolute comparisons like the one above are - usually discouraged because there's no tolerance that works well for all - situations. ``1e-6`` is good for numbers around ``1``, but too small for - very big numbers and too big for very small ones. It's better to express - the tolerance as a fraction of the expected value, but relative comparisons - like that are even more difficult to write correctly and concisely. - - The ``approx`` class performs floating-point comparisons using a syntax - that's as intuitive as possible:: - - >>> from pytest import approx - >>> 0.1 + 0.2 == approx(0.3) - True - - The same syntax also works for sequences of numbers:: - - >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) - True - - Dictionary *values*:: - - >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) - True - - ``numpy`` arrays:: - - >>> import numpy as np # doctest: +SKIP - >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP - True - - And for a ``numpy`` array against a scalar:: - - >>> import numpy as np # doctest: +SKIP - >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP - True - - By default, ``approx`` considers numbers within a relative tolerance of - ``1e-6`` (i.e. one part in a million) of its expected value to be equal. - This treatment would lead to surprising results if the expected value was - ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. - To handle this case less surprisingly, ``approx`` also considers numbers - within an absolute tolerance of ``1e-12`` of its expected value to be - equal. Infinity and NaN are special cases. Infinity is only considered - equal to itself, regardless of the relative tolerance. NaN is not - considered equal to anything by default, but you can make it be equal to - itself by setting the ``nan_ok`` argument to True. (This is meant to - facilitate comparing arrays that use NaN to mean "no data".) - - Both the relative and absolute tolerances can be changed by passing - arguments to the ``approx`` constructor:: - - >>> 1.0001 == approx(1) - False - >>> 1.0001 == approx(1, rel=1e-3) - True - >>> 1.0001 == approx(1, abs=1e-3) - True - - If you specify ``abs`` but not ``rel``, the comparison will not consider - the relative tolerance at all. In other words, two numbers that are within - the default relative tolerance of ``1e-6`` will still be considered unequal - if they exceed the specified absolute tolerance. If you specify both - ``abs`` and ``rel``, the numbers will be considered equal if either - tolerance is met:: - - >>> 1 + 1e-8 == approx(1) - True - >>> 1 + 1e-8 == approx(1, abs=1e-12) - False - >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) - True - - If you're thinking about using ``approx``, then you might want to know how - it compares to other good ways of comparing floating-point numbers. All of - these algorithms are based on relative and absolute tolerances and should - agree for the most part, but they do have meaningful differences: - - - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative - tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute - tolerance is met. Because the relative tolerance is calculated w.r.t. - both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor - ``b`` is a "reference value"). You have to specify an absolute tolerance - if you want to compare to ``0.0`` because there is no tolerance by - default. Only available in python>=3.5. `More information...`__ - - __ https://docs.python.org/3/library/math.html#math.isclose - - - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference - between ``a`` and ``b`` is less that the sum of the relative tolerance - w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance - is only calculated w.r.t. ``b``, this test is asymmetric and you can - think of ``b`` as the reference value. Support for comparing sequences - is provided by ``numpy.allclose``. `More information...`__ - - __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html - - - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` - are within an absolute tolerance of ``1e-7``. No relative tolerance is - considered and the absolute tolerance cannot be changed, so this function - is not appropriate for very large or very small numbers. Also, it's only - available in subclasses of ``unittest.TestCase`` and it's ugly because it - doesn't follow PEP8. `More information...`__ - - __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual - - - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative - tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. - Because the relative tolerance is only calculated w.r.t. ``b``, this test - is asymmetric and you can think of ``b`` as the reference value. In the - special case that you explicitly specify an absolute tolerance but not a - relative tolerance, only the absolute tolerance is considered. - - .. warning:: - - .. versionchanged:: 3.2 - - In order to avoid inconsistent behavior, ``TypeError`` is - raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. - The example below illustrates the problem:: - - assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) - assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) - - In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` - to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to - comparison. This is because the call hierarchy of rich comparisons - follows a fixed behavior. `More information...`__ - - __ https://docs.python.org/3/reference/datamodel.html#object.__ge__ - """ - - # Delegate the comparison to a class that knows how to deal with the type - # of the expected value (e.g. int, float, list, dict, numpy.array, etc). - # - # The primary responsibility of these classes is to implement ``__eq__()`` - # and ``__repr__()``. The former is used to actually check if some - # "actual" value is equivalent to the given expected value within the - # allowed tolerance. The latter is used to show the user the expected - # value and tolerance, in the case that a test failed. - # - # The actual logic for making approximate comparisons can be found in - # ApproxScalar, which is used to compare individual numbers. All of the - # other Approx classes eventually delegate to this class. The ApproxBase - # class provides some convenient methods and overloads, but isn't really - # essential. - - __tracebackhide__ = True - - if isinstance(expected, Decimal): - cls = ApproxDecimal - elif isinstance(expected, Number): - cls = ApproxScalar - elif isinstance(expected, Mapping): - cls = ApproxMapping - elif isinstance(expected, Sequence) and not isinstance(expected, STRING_TYPES): - cls = ApproxSequence - elif _is_numpy_array(expected): - cls = ApproxNumpy - else: - raise _non_numeric_type_error(expected, at=None) - - return cls(expected, rel, abs, nan_ok) - - -def _is_numpy_array(obj): - """ - Return true if the given object is a numpy array. Make a special effort to - avoid importing numpy unless it's really necessary. - """ - import sys - - np = sys.modules.get("numpy") - if np is not None: - return isinstance(obj, np.ndarray) - return False - - -# builtin pytest.raises helper - - -def raises(expected_exception, *args, **kwargs): - r""" - Assert that a code block/function call raises ``expected_exception`` - and raise a failure exception otherwise. - - :arg message: if specified, provides a custom failure message if the - exception is not raised - :arg match: if specified, asserts that the exception matches a text or regex - - This helper produces a ``ExceptionInfo()`` object (see below). - - You may use this function as a context manager:: - - >>> with raises(ZeroDivisionError): - ... 1/0 - - .. versionchanged:: 2.10 - - In the context manager form you may use the keyword argument - ``message`` to specify a custom failure message:: - - >>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"): - ... pass - Traceback (most recent call last): - ... - Failed: Expecting ZeroDivisionError - - .. note:: - - When using ``pytest.raises`` as a context manager, it's worthwhile to - note that normal context manager rules apply and that the exception - raised *must* be the final line in the scope of the context manager. - Lines of code after that, within the scope of the context manager will - not be executed. For example:: - - >>> value = 15 - >>> with raises(ValueError) as exc_info: - ... if value > 10: - ... raise ValueError("value must be <= 10") - ... assert exc_info.type == ValueError # this will not execute - - Instead, the following approach must be taken (note the difference in - scope):: - - >>> with raises(ValueError) as exc_info: - ... if value > 10: - ... raise ValueError("value must be <= 10") - ... - >>> assert exc_info.type == ValueError - - - Since version ``3.1`` you can use the keyword argument ``match`` to assert that the - exception matches a text or regex:: - - >>> with raises(ValueError, match='must be 0 or None'): - ... raise ValueError("value must be 0 or None") - - >>> with raises(ValueError, match=r'must be \d+$'): - ... raise ValueError("value must be 42") - - **Legacy forms** - - The forms below are fully supported but are discouraged for new code because the - context manager form is regarded as more readable and less error-prone. - - It is possible to specify a callable by passing a to-be-called lambda:: - - >>> raises(ZeroDivisionError, lambda: 1/0) - - - or you can specify an arbitrary callable with arguments:: - - >>> def f(x): return 1/x - ... - >>> raises(ZeroDivisionError, f, 0) - - >>> raises(ZeroDivisionError, f, x=0) - - - It is also possible to pass a string to be evaluated at runtime:: - - >>> raises(ZeroDivisionError, "f(0)") - - - The string will be evaluated using the same ``locals()`` and ``globals()`` - at the moment of the ``raises`` call. - - .. currentmodule:: _pytest._code - - Consult the API of ``excinfo`` objects: :class:`ExceptionInfo`. - - .. note:: - Similar to caught exception objects in Python, explicitly clearing - local references to returned ``ExceptionInfo`` objects can - help the Python interpreter speed up its garbage collection. - - Clearing those references breaks a reference cycle - (``ExceptionInfo`` --> caught exception --> frame stack raising - the exception --> current frame stack --> local variables --> - ``ExceptionInfo``) which makes Python keep all objects referenced - from that cycle (including all local variables in the current - frame) alive until the next cyclic garbage collection run. See the - official Python ``try`` statement documentation for more detailed - information. - - """ - __tracebackhide__ = True - for exc in filterfalse(isclass, always_iterable(expected_exception, BASE_TYPE)): - msg = ( - "exceptions must be old-style classes or" - " derived from BaseException, not %s" - ) - raise TypeError(msg % type(exc)) - - message = "DID NOT RAISE {}".format(expected_exception) - match_expr = None - - if not args: - if "message" in kwargs: - message = kwargs.pop("message") - if "match" in kwargs: - match_expr = kwargs.pop("match") - if kwargs: - msg = "Unexpected keyword arguments passed to pytest.raises: " - msg += ", ".join(kwargs.keys()) - raise TypeError(msg) - return RaisesContext(expected_exception, message, match_expr) - elif isinstance(args[0], str): - code, = args - assert isinstance(code, str) - frame = sys._getframe(1) - loc = frame.f_locals.copy() - loc.update(kwargs) - # print "raises frame scope: %r" % frame.f_locals - try: - code = _pytest._code.Source(code).compile() - six.exec_(code, frame.f_globals, loc) - # XXX didn't mean f_globals == f_locals something special? - # this is destroyed here ... - except expected_exception: - return _pytest._code.ExceptionInfo() - else: - func = args[0] - try: - func(*args[1:], **kwargs) - except expected_exception: - return _pytest._code.ExceptionInfo() - fail(message) - - -raises.Exception = fail.Exception - - -class RaisesContext(object): - def __init__(self, expected_exception, message, match_expr): - self.expected_exception = expected_exception - self.message = message - self.match_expr = match_expr - self.excinfo = None - - def __enter__(self): - self.excinfo = object.__new__(_pytest._code.ExceptionInfo) - return self.excinfo - - def __exit__(self, *tp): - __tracebackhide__ = True - if tp[0] is None: - fail(self.message) - self.excinfo.__init__(tp) - suppress_exception = issubclass(self.excinfo.type, self.expected_exception) - if sys.version_info[0] == 2 and suppress_exception: - sys.exc_clear() - if self.match_expr and suppress_exception: - self.excinfo.match(self.match_expr) - return suppress_exception diff --git a/venv/lib/python3.8/site-packages/_pytest/recwarn.py b/venv/lib/python3.8/site-packages/_pytest/recwarn.py deleted file mode 100644 index 4f3ab7f2..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/recwarn.py +++ /dev/null @@ -1,241 +0,0 @@ -""" recording warnings during test function execution. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import inspect -import re -import sys -import warnings - -import six - -import _pytest._code -from _pytest.fixtures import yield_fixture -from _pytest.outcomes import fail - - -@yield_fixture -def recwarn(): - """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. - - See http://docs.python.org/library/warnings.html for information - on warning categories. - """ - wrec = WarningsRecorder() - with wrec: - warnings.simplefilter("default") - yield wrec - - -def deprecated_call(func=None, *args, **kwargs): - """context manager that can be used to ensure a block of code triggers a - ``DeprecationWarning`` or ``PendingDeprecationWarning``:: - - >>> import warnings - >>> def api_call_v2(): - ... warnings.warn('use v3 of this api', DeprecationWarning) - ... return 200 - - >>> with deprecated_call(): - ... assert api_call_v2() == 200 - - ``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``, - in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings - types above. - """ - __tracebackhide__ = True - if func is not None: - args = (func,) + args - return warns((DeprecationWarning, PendingDeprecationWarning), *args, **kwargs) - - -def warns(expected_warning, *args, **kwargs): - r"""Assert that code raises a particular class of warning. - - Specifically, the parameter ``expected_warning`` can be a warning class or - sequence of warning classes, and the inside the ``with`` block must issue a warning of that class or - classes. - - This helper produces a list of :class:`warnings.WarningMessage` objects, - one for each warning raised. - - This function can be used as a context manager, or any of the other ways - ``pytest.raises`` can be used:: - - >>> with warns(RuntimeWarning): - ... warnings.warn("my warning", RuntimeWarning) - - In the context manager form you may use the keyword argument ``match`` to assert - that the exception matches a text or regex:: - - >>> with warns(UserWarning, match='must be 0 or None'): - ... warnings.warn("value must be 0 or None", UserWarning) - - >>> with warns(UserWarning, match=r'must be \d+$'): - ... warnings.warn("value must be 42", UserWarning) - - >>> with warns(UserWarning, match=r'must be \d+$'): - ... warnings.warn("this is not here", UserWarning) - Traceback (most recent call last): - ... - Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted... - - """ - __tracebackhide__ = True - match_expr = None - if not args: - if "match" in kwargs: - match_expr = kwargs.pop("match") - return WarningsChecker(expected_warning, match_expr=match_expr) - elif isinstance(args[0], str): - code, = args - assert isinstance(code, str) - frame = sys._getframe(1) - loc = frame.f_locals.copy() - loc.update(kwargs) - - with WarningsChecker(expected_warning, match_expr=match_expr): - code = _pytest._code.Source(code).compile() - six.exec_(code, frame.f_globals, loc) - else: - func = args[0] - with WarningsChecker(expected_warning, match_expr=match_expr): - return func(*args[1:], **kwargs) - - -class WarningsRecorder(warnings.catch_warnings): - """A context manager to record raised warnings. - - Adapted from `warnings.catch_warnings`. - """ - - def __init__(self): - super(WarningsRecorder, self).__init__(record=True) - self._entered = False - self._list = [] - - @property - def list(self): - """The list of recorded warnings.""" - return self._list - - def __getitem__(self, i): - """Get a recorded warning by index.""" - return self._list[i] - - def __iter__(self): - """Iterate through the recorded warnings.""" - return iter(self._list) - - def __len__(self): - """The number of recorded warnings.""" - return len(self._list) - - def pop(self, cls=Warning): - """Pop the first recorded warning, raise exception if not exists.""" - for i, w in enumerate(self._list): - if issubclass(w.category, cls): - return self._list.pop(i) - __tracebackhide__ = True - raise AssertionError("%r not found in warning list" % cls) - - def clear(self): - """Clear the list of recorded warnings.""" - self._list[:] = [] - - def __enter__(self): - if self._entered: - __tracebackhide__ = True - raise RuntimeError("Cannot enter %r twice" % self) - self._list = super(WarningsRecorder, self).__enter__() - warnings.simplefilter("always") - # python3 keeps track of a "filter version", when the filters are - # updated previously seen warnings can be re-warned. python2 has no - # concept of this so we must reset the warnings registry manually. - # trivial patching of `warnings.warn` seems to be enough somehow? - if six.PY2: - - def warn(message, category=None, stacklevel=1): - # duplicate the stdlib logic due to - # bad handing in the c version of warnings - if isinstance(message, Warning): - category = message.__class__ - # Check category argument - if category is None: - category = UserWarning - assert issubclass(category, Warning) - - # emulate resetting the warn registry - f_globals = sys._getframe(stacklevel).f_globals - if "__warningregistry__" in f_globals: - orig = f_globals["__warningregistry__"] - f_globals["__warningregistry__"] = None - try: - return self._saved_warn(message, category, stacklevel + 1) - finally: - f_globals["__warningregistry__"] = orig - else: - return self._saved_warn(message, category, stacklevel + 1) - - warnings.warn, self._saved_warn = warn, warnings.warn - return self - - def __exit__(self, *exc_info): - if not self._entered: - __tracebackhide__ = True - raise RuntimeError("Cannot exit %r without entering first" % self) - # see above where `self._saved_warn` is assigned - if six.PY2: - warnings.warn = self._saved_warn - super(WarningsRecorder, self).__exit__(*exc_info) - - -class WarningsChecker(WarningsRecorder): - def __init__(self, expected_warning=None, match_expr=None): - super(WarningsChecker, self).__init__() - - msg = "exceptions must be old-style classes or derived from Warning, not %s" - if isinstance(expected_warning, tuple): - for exc in expected_warning: - if not inspect.isclass(exc): - raise TypeError(msg % type(exc)) - elif inspect.isclass(expected_warning): - expected_warning = (expected_warning,) - elif expected_warning is not None: - raise TypeError(msg % type(expected_warning)) - - self.expected_warning = expected_warning - self.match_expr = match_expr - - def __exit__(self, *exc_info): - super(WarningsChecker, self).__exit__(*exc_info) - - __tracebackhide__ = True - - # only check if we're not currently handling an exception - if all(a is None for a in exc_info): - if self.expected_warning is not None: - if not any(issubclass(r.category, self.expected_warning) for r in self): - __tracebackhide__ = True - fail( - "DID NOT WARN. No warnings of type {} was emitted. " - "The list of emitted warnings is: {}.".format( - self.expected_warning, [each.message for each in self] - ) - ) - elif self.match_expr is not None: - for r in self: - if issubclass(r.category, self.expected_warning): - if re.compile(self.match_expr).search(str(r.message)): - break - else: - fail( - "DID NOT WARN. No warnings of type {} matching" - " ('{}') was emitted. The list of emitted warnings" - " is: {}.".format( - self.expected_warning, - self.match_expr, - [each.message for each in self], - ) - ) diff --git a/venv/lib/python3.8/site-packages/_pytest/reports.py b/venv/lib/python3.8/site-packages/_pytest/reports.py deleted file mode 100644 index b2010cc2..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/reports.py +++ /dev/null @@ -1,197 +0,0 @@ -import py - -from _pytest._code.code import TerminalRepr - - -def getslaveinfoline(node): - try: - return node._slaveinfocache - except AttributeError: - d = node.slaveinfo - ver = "%s.%s.%s" % d["version_info"][:3] - node._slaveinfocache = s = "[%s] %s -- Python %s %s" % ( - d["id"], - d["sysplatform"], - ver, - d["executable"], - ) - return s - - -class BaseReport(object): - def __init__(self, **kw): - self.__dict__.update(kw) - - def toterminal(self, out): - if hasattr(self, "node"): - out.line(getslaveinfoline(self.node)) - - longrepr = self.longrepr - if longrepr is None: - return - - if hasattr(longrepr, "toterminal"): - longrepr.toterminal(out) - else: - try: - out.line(longrepr) - except UnicodeEncodeError: - out.line("") - - def get_sections(self, prefix): - for name, content in self.sections: - if name.startswith(prefix): - yield prefix, content - - @property - def longreprtext(self): - """ - Read-only property that returns the full string representation - of ``longrepr``. - - .. versionadded:: 3.0 - """ - tw = py.io.TerminalWriter(stringio=True) - tw.hasmarkup = False - self.toterminal(tw) - exc = tw.stringio.getvalue() - return exc.strip() - - @property - def caplog(self): - """Return captured log lines, if log capturing is enabled - - .. versionadded:: 3.5 - """ - return "\n".join( - content for (prefix, content) in self.get_sections("Captured log") - ) - - @property - def capstdout(self): - """Return captured text from stdout, if capturing is enabled - - .. versionadded:: 3.0 - """ - return "".join( - content for (prefix, content) in self.get_sections("Captured stdout") - ) - - @property - def capstderr(self): - """Return captured text from stderr, if capturing is enabled - - .. versionadded:: 3.0 - """ - return "".join( - content for (prefix, content) in self.get_sections("Captured stderr") - ) - - passed = property(lambda x: x.outcome == "passed") - failed = property(lambda x: x.outcome == "failed") - skipped = property(lambda x: x.outcome == "skipped") - - @property - def fspath(self): - return self.nodeid.split("::")[0] - - -class TestReport(BaseReport): - """ Basic test report object (also used for setup and teardown calls if - they fail). - """ - - def __init__( - self, - nodeid, - location, - keywords, - outcome, - longrepr, - when, - sections=(), - duration=0, - user_properties=None, - **extra - ): - #: normalized collection node id - self.nodeid = nodeid - - #: a (filesystempath, lineno, domaininfo) tuple indicating the - #: actual location of a test item - it might be different from the - #: collected one e.g. if a method is inherited from a different module. - self.location = location - - #: a name -> value dictionary containing all keywords and - #: markers associated with a test invocation. - self.keywords = keywords - - #: test outcome, always one of "passed", "failed", "skipped". - self.outcome = outcome - - #: None or a failure representation. - self.longrepr = longrepr - - #: one of 'setup', 'call', 'teardown' to indicate runtest phase. - self.when = when - - #: user properties is a list of tuples (name, value) that holds user - #: defined properties of the test - self.user_properties = list(user_properties or []) - - #: list of pairs ``(str, str)`` of extra information which needs to - #: marshallable. Used by pytest to add captured text - #: from ``stdout`` and ``stderr``, but may be used by other plugins - #: to add arbitrary information to reports. - self.sections = list(sections) - - #: time it took to run just the test - self.duration = duration - - self.__dict__.update(extra) - - def __repr__(self): - return "" % ( - self.nodeid, - self.when, - self.outcome, - ) - - -class TeardownErrorReport(BaseReport): - outcome = "failed" - when = "teardown" - - def __init__(self, longrepr, **extra): - self.longrepr = longrepr - self.sections = [] - self.__dict__.update(extra) - - -class CollectReport(BaseReport): - def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra): - self.nodeid = nodeid - self.outcome = outcome - self.longrepr = longrepr - self.result = result or [] - self.sections = list(sections) - self.__dict__.update(extra) - - @property - def location(self): - return (self.fspath, None, self.fspath) - - def __repr__(self): - return "" % ( - self.nodeid, - len(self.result), - self.outcome, - ) - - -class CollectErrorRepr(TerminalRepr): - def __init__(self, msg): - self.longrepr = msg - - def toterminal(self, out): - out.line(self.longrepr, red=True) diff --git a/venv/lib/python3.8/site-packages/_pytest/resultlog.py b/venv/lib/python3.8/site-packages/_pytest/resultlog.py deleted file mode 100644 index 3efdbea6..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/resultlog.py +++ /dev/null @@ -1,123 +0,0 @@ -""" log machine-parseable test session result information in a plain -text file. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -import py - - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting", "resultlog plugin options") - group.addoption( - "--resultlog", - "--result-log", - action="store", - metavar="path", - default=None, - help="DEPRECATED path for machine-readable result log.", - ) - - -def pytest_configure(config): - resultlog = config.option.resultlog - # prevent opening resultlog on slave nodes (xdist) - if resultlog and not hasattr(config, "slaveinput"): - dirname = os.path.dirname(os.path.abspath(resultlog)) - if not os.path.isdir(dirname): - os.makedirs(dirname) - logfile = open(resultlog, "w", 1) # line buffered - config._resultlog = ResultLog(config, logfile) - config.pluginmanager.register(config._resultlog) - - from _pytest.deprecated import RESULT_LOG - from _pytest.warnings import _issue_config_warning - - _issue_config_warning(RESULT_LOG, config) - - -def pytest_unconfigure(config): - resultlog = getattr(config, "_resultlog", None) - if resultlog: - resultlog.logfile.close() - del config._resultlog - config.pluginmanager.unregister(resultlog) - - -def generic_path(item): - chain = item.listchain() - gpath = [chain[0].name] - fspath = chain[0].fspath - fspart = False - for node in chain[1:]: - newfspath = node.fspath - if newfspath == fspath: - if fspart: - gpath.append(":") - fspart = False - else: - gpath.append(".") - else: - gpath.append("/") - fspart = True - name = node.name - if name[0] in "([": - gpath.pop() - gpath.append(name) - fspath = newfspath - return "".join(gpath) - - -class ResultLog(object): - def __init__(self, config, logfile): - self.config = config - self.logfile = logfile # preferably line buffered - - def write_log_entry(self, testpath, lettercode, longrepr): - print("%s %s" % (lettercode, testpath), file=self.logfile) - for line in longrepr.splitlines(): - print(" %s" % line, file=self.logfile) - - def log_outcome(self, report, lettercode, longrepr): - testpath = getattr(report, "nodeid", None) - if testpath is None: - testpath = report.fspath - self.write_log_entry(testpath, lettercode, longrepr) - - def pytest_runtest_logreport(self, report): - if report.when != "call" and report.passed: - return - res = self.config.hook.pytest_report_teststatus(report=report) - code = res[1] - if code == "x": - longrepr = str(report.longrepr) - elif code == "X": - longrepr = "" - elif report.passed: - longrepr = "" - elif report.failed: - longrepr = str(report.longrepr) - elif report.skipped: - longrepr = str(report.longrepr[2]) - self.log_outcome(report, code, longrepr) - - def pytest_collectreport(self, report): - if not report.passed: - if report.failed: - code = "F" - longrepr = str(report.longrepr) - else: - assert report.skipped - code = "S" - longrepr = "%s:%d: %s" % report.longrepr - self.log_outcome(report, code, longrepr) - - def pytest_internalerror(self, excrepr): - reprcrash = getattr(excrepr, "reprcrash", None) - path = getattr(reprcrash, "path", None) - if path is None: - path = "cwd:%s" % py.path.local() - self.write_log_entry(path, "!", str(excrepr)) diff --git a/venv/lib/python3.8/site-packages/_pytest/runner.py b/venv/lib/python3.8/site-packages/_pytest/runner.py deleted file mode 100644 index 4d4b06d7..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/runner.py +++ /dev/null @@ -1,394 +0,0 @@ -""" basic collect and runtest protocol implementations """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import bdb -import os -import sys -from time import time - -import six - -from .reports import CollectErrorRepr -from .reports import CollectReport -from .reports import TestReport -from _pytest._code.code import ExceptionInfo -from _pytest.outcomes import skip -from _pytest.outcomes import Skipped -from _pytest.outcomes import TEST_OUTCOME - -# -# pytest plugin hooks - - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting", "reporting", after="general") - group.addoption( - "--durations", - action="store", - type=int, - default=None, - metavar="N", - help="show N slowest setup/test durations (N=0 for all).", - ), - - -def pytest_terminal_summary(terminalreporter): - durations = terminalreporter.config.option.durations - verbose = terminalreporter.config.getvalue("verbose") - if durations is None: - return - tr = terminalreporter - dlist = [] - for replist in tr.stats.values(): - for rep in replist: - if hasattr(rep, "duration"): - dlist.append(rep) - if not dlist: - return - dlist.sort(key=lambda x: x.duration) - dlist.reverse() - if not durations: - tr.write_sep("=", "slowest test durations") - else: - tr.write_sep("=", "slowest %s test durations" % durations) - dlist = dlist[:durations] - - for rep in dlist: - if verbose < 2 and rep.duration < 0.005: - tr.write_line("") - tr.write_line("(0.00 durations hidden. Use -vv to show these durations.)") - break - nodeid = rep.nodeid.replace("::()::", "::") - tr.write_line("%02.2fs %-8s %s" % (rep.duration, rep.when, nodeid)) - - -def pytest_sessionstart(session): - session._setupstate = SetupState() - - -def pytest_sessionfinish(session): - session._setupstate.teardown_all() - - -def pytest_runtest_protocol(item, nextitem): - item.ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) - runtestprotocol(item, nextitem=nextitem) - item.ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) - return True - - -def runtestprotocol(item, log=True, nextitem=None): - hasrequest = hasattr(item, "_request") - if hasrequest and not item._request: - item._initrequest() - rep = call_and_report(item, "setup", log) - reports = [rep] - if rep.passed: - if item.config.option.setupshow: - show_test_item(item) - if not item.config.option.setuponly: - reports.append(call_and_report(item, "call", log)) - reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) - # after all teardown hooks have been called - # want funcargs and request info to go away - if hasrequest: - item._request = False - item.funcargs = None - return reports - - -def show_test_item(item): - """Show test function, parameters and the fixtures of the test item.""" - tw = item.config.get_terminal_writer() - tw.line() - tw.write(" " * 8) - tw.write(item._nodeid) - used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys()) - if used_fixtures: - tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) - - -def pytest_runtest_setup(item): - _update_current_test_var(item, "setup") - item.session._setupstate.prepare(item) - - -def pytest_runtest_call(item): - _update_current_test_var(item, "call") - sys.last_type, sys.last_value, sys.last_traceback = (None, None, None) - try: - item.runtest() - except Exception: - # Store trace info to allow postmortem debugging - type, value, tb = sys.exc_info() - tb = tb.tb_next # Skip *this* frame - sys.last_type = type - sys.last_value = value - sys.last_traceback = tb - del type, value, tb # Get rid of these in this frame - raise - - -def pytest_runtest_teardown(item, nextitem): - _update_current_test_var(item, "teardown") - item.session._setupstate.teardown_exact(item, nextitem) - _update_current_test_var(item, None) - - -def _update_current_test_var(item, when): - """ - Update PYTEST_CURRENT_TEST to reflect the current item and stage. - - If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment. - """ - var_name = "PYTEST_CURRENT_TEST" - if when: - value = "{} ({})".format(item.nodeid, when) - # don't allow null bytes on environment variables (see #2644, #2957) - value = value.replace("\x00", "(null)") - os.environ[var_name] = value - else: - os.environ.pop(var_name) - - -def pytest_report_teststatus(report): - if report.when in ("setup", "teardown"): - if report.failed: - # category, shortletter, verbose-word - return "error", "E", "ERROR" - elif report.skipped: - return "skipped", "s", "SKIPPED" - else: - return "", "", "" - - -# -# Implementation - - -def call_and_report(item, when, log=True, **kwds): - call = call_runtest_hook(item, when, **kwds) - hook = item.ihook - report = hook.pytest_runtest_makereport(item=item, call=call) - if log: - hook.pytest_runtest_logreport(report=report) - if check_interactive_exception(call, report): - hook.pytest_exception_interact(node=item, call=call, report=report) - return report - - -def check_interactive_exception(call, report): - return call.excinfo and not ( - hasattr(report, "wasxfail") - or call.excinfo.errisinstance(skip.Exception) - or call.excinfo.errisinstance(bdb.BdbQuit) - ) - - -def call_runtest_hook(item, when, **kwds): - hookname = "pytest_runtest_" + when - ihook = getattr(item.ihook, hookname) - return CallInfo( - lambda: ihook(item=item, **kwds), - when=when, - treat_keyboard_interrupt_as_exception=item.config.getvalue("usepdb"), - ) - - -class CallInfo(object): - """ Result/Exception info a function invocation. """ - - #: None or ExceptionInfo object. - excinfo = None - - def __init__(self, func, when, treat_keyboard_interrupt_as_exception=False): - #: context of invocation: one of "setup", "call", - #: "teardown", "memocollect" - self.when = when - self.start = time() - try: - self.result = func() - except KeyboardInterrupt: - if treat_keyboard_interrupt_as_exception: - self.excinfo = ExceptionInfo() - else: - self.stop = time() - raise - except: # noqa - self.excinfo = ExceptionInfo() - self.stop = time() - - def __repr__(self): - if self.excinfo: - status = "exception: %s" % str(self.excinfo.value) - else: - status = "result: %r" % (self.result,) - return "" % (self.when, status) - - -def pytest_runtest_makereport(item, call): - when = call.when - duration = call.stop - call.start - keywords = {x: 1 for x in item.keywords} - excinfo = call.excinfo - sections = [] - if not call.excinfo: - outcome = "passed" - longrepr = None - else: - if not isinstance(excinfo, ExceptionInfo): - outcome = "failed" - longrepr = excinfo - elif excinfo.errisinstance(skip.Exception): - outcome = "skipped" - r = excinfo._getreprcrash() - longrepr = (str(r.path), r.lineno, r.message) - else: - outcome = "failed" - if call.when == "call": - longrepr = item.repr_failure(excinfo) - else: # exception in setup or teardown - longrepr = item._repr_failure_py( - excinfo, style=item.config.option.tbstyle - ) - for rwhen, key, content in item._report_sections: - sections.append(("Captured %s %s" % (key, rwhen), content)) - return TestReport( - item.nodeid, - item.location, - keywords, - outcome, - longrepr, - when, - sections, - duration, - user_properties=item.user_properties, - ) - - -def pytest_make_collect_report(collector): - call = CallInfo(lambda: list(collector.collect()), "collect") - longrepr = None - if not call.excinfo: - outcome = "passed" - else: - from _pytest import nose - - skip_exceptions = (Skipped,) + nose.get_skip_exceptions() - if call.excinfo.errisinstance(skip_exceptions): - outcome = "skipped" - r = collector._repr_failure_py(call.excinfo, "line").reprcrash - longrepr = (str(r.path), r.lineno, r.message) - else: - outcome = "failed" - errorinfo = collector.repr_failure(call.excinfo) - if not hasattr(errorinfo, "toterminal"): - errorinfo = CollectErrorRepr(errorinfo) - longrepr = errorinfo - rep = CollectReport( - collector.nodeid, outcome, longrepr, getattr(call, "result", None) - ) - rep.call = call # see collect_one_node - return rep - - -class SetupState(object): - """ shared state for setting up/tearing down test items or collectors. """ - - def __init__(self): - self.stack = [] - self._finalizers = {} - - def addfinalizer(self, finalizer, colitem): - """ attach a finalizer to the given colitem. - if colitem is None, this will add a finalizer that - is called at the end of teardown_all(). - """ - assert colitem and not isinstance(colitem, tuple) - assert callable(finalizer) - # assert colitem in self.stack # some unit tests don't setup stack :/ - self._finalizers.setdefault(colitem, []).append(finalizer) - - def _pop_and_teardown(self): - colitem = self.stack.pop() - self._teardown_with_finalization(colitem) - - def _callfinalizers(self, colitem): - finalizers = self._finalizers.pop(colitem, None) - exc = None - while finalizers: - fin = finalizers.pop() - try: - fin() - except TEST_OUTCOME: - # XXX Only first exception will be seen by user, - # ideally all should be reported. - if exc is None: - exc = sys.exc_info() - if exc: - six.reraise(*exc) - - def _teardown_with_finalization(self, colitem): - self._callfinalizers(colitem) - if hasattr(colitem, "teardown"): - colitem.teardown() - for colitem in self._finalizers: - assert ( - colitem is None or colitem in self.stack or isinstance(colitem, tuple) - ) - - def teardown_all(self): - while self.stack: - self._pop_and_teardown() - for key in list(self._finalizers): - self._teardown_with_finalization(key) - assert not self._finalizers - - def teardown_exact(self, item, nextitem): - needed_collectors = nextitem and nextitem.listchain() or [] - self._teardown_towards(needed_collectors) - - def _teardown_towards(self, needed_collectors): - exc = None - while self.stack: - if self.stack == needed_collectors[: len(self.stack)]: - break - try: - self._pop_and_teardown() - except TEST_OUTCOME: - # XXX Only first exception will be seen by user, - # ideally all should be reported. - if exc is None: - exc = sys.exc_info() - if exc: - six.reraise(*exc) - - def prepare(self, colitem): - """ setup objects along the collector chain to the test-method - and teardown previously setup objects.""" - needed_collectors = colitem.listchain() - self._teardown_towards(needed_collectors) - - # check if the last collection node has raised an error - for col in self.stack: - if hasattr(col, "_prepare_exc"): - six.reraise(*col._prepare_exc) - for col in needed_collectors[len(self.stack) :]: - self.stack.append(col) - try: - col.setup() - except TEST_OUTCOME: - col._prepare_exc = sys.exc_info() - raise - - -def collect_one_node(collector): - ihook = collector.ihook - ihook.pytest_collectstart(collector=collector) - rep = ihook.pytest_make_collect_report(collector=collector) - call = rep.__dict__.pop("call", None) - if call and check_interactive_exception(call, rep): - ihook.pytest_exception_interact(node=collector, call=call, report=rep) - return rep diff --git a/venv/lib/python3.8/site-packages/_pytest/setuponly.py b/venv/lib/python3.8/site-packages/_pytest/setuponly.py deleted file mode 100644 index 4bd4ad6d..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/setuponly.py +++ /dev/null @@ -1,88 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys - -import pytest - - -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group.addoption( - "--setuponly", - "--setup-only", - action="store_true", - help="only setup fixtures, do not execute tests.", - ) - group.addoption( - "--setupshow", - "--setup-show", - action="store_true", - help="show setup of fixtures while executing tests.", - ) - - -@pytest.hookimpl(hookwrapper=True) -def pytest_fixture_setup(fixturedef, request): - yield - config = request.config - if config.option.setupshow: - if hasattr(request, "param"): - # Save the fixture parameter so ._show_fixture_action() can - # display it now and during the teardown (in .finish()). - if fixturedef.ids: - if callable(fixturedef.ids): - fixturedef.cached_param = fixturedef.ids(request.param) - else: - fixturedef.cached_param = fixturedef.ids[request.param_index] - else: - fixturedef.cached_param = request.param - _show_fixture_action(fixturedef, "SETUP") - - -def pytest_fixture_post_finalizer(fixturedef): - if hasattr(fixturedef, "cached_result"): - config = fixturedef._fixturemanager.config - if config.option.setupshow: - _show_fixture_action(fixturedef, "TEARDOWN") - if hasattr(fixturedef, "cached_param"): - del fixturedef.cached_param - - -def _show_fixture_action(fixturedef, msg): - config = fixturedef._fixturemanager.config - capman = config.pluginmanager.getplugin("capturemanager") - if capman: - capman.suspend_global_capture() - out, err = capman.read_global_capture() - - tw = config.get_terminal_writer() - tw.line() - tw.write(" " * 2 * fixturedef.scopenum) - tw.write( - "{step} {scope} {fixture}".format( - step=msg.ljust(8), # align the output to TEARDOWN - scope=fixturedef.scope[0].upper(), - fixture=fixturedef.argname, - ) - ) - - if msg == "SETUP": - deps = sorted(arg for arg in fixturedef.argnames if arg != "request") - if deps: - tw.write(" (fixtures used: {})".format(", ".join(deps))) - - if hasattr(fixturedef, "cached_param"): - tw.write("[{}]".format(fixturedef.cached_param)) - - if capman: - capman.resume_global_capture() - sys.stdout.write(out) - sys.stderr.write(err) - - -@pytest.hookimpl(tryfirst=True) -def pytest_cmdline_main(config): - if config.option.setuponly: - config.option.setupshow = True diff --git a/venv/lib/python3.8/site-packages/_pytest/setupplan.py b/venv/lib/python3.8/site-packages/_pytest/setupplan.py deleted file mode 100644 index 351e0be6..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/setupplan.py +++ /dev/null @@ -1,31 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import pytest - - -def pytest_addoption(parser): - group = parser.getgroup("debugconfig") - group.addoption( - "--setupplan", - "--setup-plan", - action="store_true", - help="show what fixtures and tests would be executed but " - "don't execute anything.", - ) - - -@pytest.hookimpl(tryfirst=True) -def pytest_fixture_setup(fixturedef, request): - # Will return a dummy fixture if the setuponly option is provided. - if request.config.option.setupplan: - fixturedef.cached_result = (None, None, None) - return fixturedef.cached_result - - -@pytest.hookimpl(tryfirst=True) -def pytest_cmdline_main(config): - if config.option.setupplan: - config.option.setuponly = True - config.option.setupshow = True diff --git a/venv/lib/python3.8/site-packages/_pytest/skipping.py b/venv/lib/python3.8/site-packages/_pytest/skipping.py deleted file mode 100644 index f755fc4e..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/skipping.py +++ /dev/null @@ -1,298 +0,0 @@ -""" support for skip/xfail functions and markers. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from _pytest.config import hookimpl -from _pytest.mark.evaluate import MarkEvaluator -from _pytest.outcomes import fail -from _pytest.outcomes import skip -from _pytest.outcomes import xfail - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group.addoption( - "--runxfail", - action="store_true", - dest="runxfail", - default=False, - help="run tests even if they are marked xfail", - ) - - parser.addini( - "xfail_strict", - "default for the strict parameter of xfail " - "markers when not given explicitly (default: False)", - default=False, - type="bool", - ) - - -def pytest_configure(config): - if config.option.runxfail: - # yay a hack - import pytest - - old = pytest.xfail - config._cleanup.append(lambda: setattr(pytest, "xfail", old)) - - def nop(*args, **kwargs): - pass - - nop.Exception = xfail.Exception - setattr(pytest, "xfail", nop) - - config.addinivalue_line( - "markers", - "skip(reason=None): skip the given test function with an optional reason. " - 'Example: skip(reason="no way of currently testing this") skips the ' - "test.", - ) - config.addinivalue_line( - "markers", - "skipif(condition): skip the given test function if eval(condition) " - "results in a True value. Evaluation happens within the " - "module global context. Example: skipif('sys.platform == \"win32\"') " - "skips the test if we are on the win32 platform. see " - "https://docs.pytest.org/en/latest/skipping.html", - ) - config.addinivalue_line( - "markers", - "xfail(condition, reason=None, run=True, raises=None, strict=False): " - "mark the test function as an expected failure if eval(condition) " - "has a True value. Optionally specify a reason for better reporting " - "and run=False if you don't even want to execute the test function. " - "If only specific exception(s) are expected, you can list them in " - "raises, and if the test fails in other ways, it will be reported as " - "a true failure. See https://docs.pytest.org/en/latest/skipping.html", - ) - - -@hookimpl(tryfirst=True) -def pytest_runtest_setup(item): - # Check if skip or skipif are specified as pytest marks - item._skipped_by_mark = False - eval_skipif = MarkEvaluator(item, "skipif") - if eval_skipif.istrue(): - item._skipped_by_mark = True - skip(eval_skipif.getexplanation()) - - for skip_info in item.iter_markers(name="skip"): - item._skipped_by_mark = True - if "reason" in skip_info.kwargs: - skip(skip_info.kwargs["reason"]) - elif skip_info.args: - skip(skip_info.args[0]) - else: - skip("unconditional skip") - - item._evalxfail = MarkEvaluator(item, "xfail") - check_xfail_no_run(item) - - -@hookimpl(hookwrapper=True) -def pytest_pyfunc_call(pyfuncitem): - check_xfail_no_run(pyfuncitem) - outcome = yield - passed = outcome.excinfo is None - if passed: - check_strict_xfail(pyfuncitem) - - -def check_xfail_no_run(item): - """check xfail(run=False)""" - if not item.config.option.runxfail: - evalxfail = item._evalxfail - if evalxfail.istrue(): - if not evalxfail.get("run", True): - xfail("[NOTRUN] " + evalxfail.getexplanation()) - - -def check_strict_xfail(pyfuncitem): - """check xfail(strict=True) for the given PASSING test""" - evalxfail = pyfuncitem._evalxfail - if evalxfail.istrue(): - strict_default = pyfuncitem.config.getini("xfail_strict") - is_strict_xfail = evalxfail.get("strict", strict_default) - if is_strict_xfail: - del pyfuncitem._evalxfail - explanation = evalxfail.getexplanation() - fail("[XPASS(strict)] " + explanation, pytrace=False) - - -@hookimpl(hookwrapper=True) -def pytest_runtest_makereport(item, call): - outcome = yield - rep = outcome.get_result() - evalxfail = getattr(item, "_evalxfail", None) - # unitttest special case, see setting of _unexpectedsuccess - if hasattr(item, "_unexpectedsuccess") and rep.when == "call": - from _pytest.compat import _is_unittest_unexpected_success_a_failure - - if item._unexpectedsuccess: - rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess) - else: - rep.longrepr = "Unexpected success" - if _is_unittest_unexpected_success_a_failure(): - rep.outcome = "failed" - else: - rep.outcome = "passed" - rep.wasxfail = rep.longrepr - elif item.config.option.runxfail: - pass # don't interefere - elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): - rep.wasxfail = "reason: " + call.excinfo.value.msg - rep.outcome = "skipped" - elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): - if call.excinfo: - if evalxfail.invalidraise(call.excinfo.value): - rep.outcome = "failed" - else: - rep.outcome = "skipped" - rep.wasxfail = evalxfail.getexplanation() - elif call.when == "call": - strict_default = item.config.getini("xfail_strict") - is_strict_xfail = evalxfail.get("strict", strict_default) - explanation = evalxfail.getexplanation() - if is_strict_xfail: - rep.outcome = "failed" - rep.longrepr = "[XPASS(strict)] {}".format(explanation) - else: - rep.outcome = "passed" - rep.wasxfail = explanation - elif ( - getattr(item, "_skipped_by_mark", False) - and rep.skipped - and type(rep.longrepr) is tuple - ): - # skipped by mark.skipif; change the location of the failure - # to point to the item definition, otherwise it will display - # the location of where the skip exception was raised within pytest - filename, line, reason = rep.longrepr - filename, line = item.location[:2] - rep.longrepr = filename, line, reason - - -# called by terminalreporter progress reporting - - -def pytest_report_teststatus(report): - if hasattr(report, "wasxfail"): - if report.skipped: - return "xfailed", "x", "xfail" - elif report.passed: - return "xpassed", "X", ("XPASS", {"yellow": True}) - - -# called by the terminalreporter instance/plugin - - -def pytest_terminal_summary(terminalreporter): - tr = terminalreporter - if not tr.reportchars: - # for name in "xfailed skipped failed xpassed": - # if not tr.stats.get(name, 0): - # tr.write_line("HINT: use '-r' option to see extra " - # "summary info about tests") - # break - return - - lines = [] - for char in tr.reportchars: - action = REPORTCHAR_ACTIONS.get(char, lambda tr, lines: None) - action(terminalreporter, lines) - - if lines: - tr._tw.sep("=", "short test summary info") - for line in lines: - tr._tw.line(line) - - -def show_simple(terminalreporter, lines, stat, format): - failed = terminalreporter.stats.get(stat) - if failed: - for rep in failed: - pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) - lines.append(format % (pos,)) - - -def show_xfailed(terminalreporter, lines): - xfailed = terminalreporter.stats.get("xfailed") - if xfailed: - for rep in xfailed: - pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) - reason = rep.wasxfail - lines.append("XFAIL %s" % (pos,)) - if reason: - lines.append(" " + str(reason)) - - -def show_xpassed(terminalreporter, lines): - xpassed = terminalreporter.stats.get("xpassed") - if xpassed: - for rep in xpassed: - pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) - reason = rep.wasxfail - lines.append("XPASS %s %s" % (pos, reason)) - - -def folded_skips(skipped): - d = {} - for event in skipped: - key = event.longrepr - assert len(key) == 3, (event, key) - keywords = getattr(event, "keywords", {}) - # folding reports with global pytestmark variable - # this is workaround, because for now we cannot identify the scope of a skip marker - # TODO: revisit after marks scope would be fixed - when = getattr(event, "when", None) - if when == "setup" and "skip" in keywords and "pytestmark" not in keywords: - key = (key[0], None, key[2]) - d.setdefault(key, []).append(event) - values = [] - for key, events in d.items(): - values.append((len(events),) + key) - return values - - -def show_skipped(terminalreporter, lines): - tr = terminalreporter - skipped = tr.stats.get("skipped", []) - if skipped: - # if not tr.hasopt('skipped'): - # tr.write_line( - # "%d skipped tests, specify -rs for more info" % - # len(skipped)) - # return - fskips = folded_skips(skipped) - if fskips: - # tr.write_sep("_", "skipped test summary") - for num, fspath, lineno, reason in fskips: - if reason.startswith("Skipped: "): - reason = reason[9:] - if lineno is not None: - lines.append( - "SKIP [%d] %s:%d: %s" % (num, fspath, lineno + 1, reason) - ) - else: - lines.append("SKIP [%d] %s: %s" % (num, fspath, reason)) - - -def shower(stat, format): - def show_(terminalreporter, lines): - return show_simple(terminalreporter, lines, stat, format) - - return show_ - - -REPORTCHAR_ACTIONS = { - "x": show_xfailed, - "X": show_xpassed, - "f": shower("failed", "FAIL %s"), - "F": shower("failed", "FAIL %s"), - "s": show_skipped, - "S": show_skipped, - "p": shower("passed", "PASSED %s"), - "E": shower("error", "ERROR %s"), -} diff --git a/venv/lib/python3.8/site-packages/_pytest/stepwise.py b/venv/lib/python3.8/site-packages/_pytest/stepwise.py deleted file mode 100644 index 1efa2e7c..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/stepwise.py +++ /dev/null @@ -1,102 +0,0 @@ -import pytest - - -def pytest_addoption(parser): - group = parser.getgroup("general") - group.addoption( - "--sw", - "--stepwise", - action="store_true", - dest="stepwise", - help="exit on test fail and continue from last failing test next time", - ) - group.addoption( - "--stepwise-skip", - action="store_true", - dest="stepwise_skip", - help="ignore the first failing test but stop on the next failing test", - ) - - -@pytest.hookimpl -def pytest_configure(config): - config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin") - - -class StepwisePlugin: - def __init__(self, config): - self.config = config - self.active = config.getvalue("stepwise") - self.session = None - - if self.active: - self.lastfailed = config.cache.get("cache/stepwise", None) - self.skip = config.getvalue("stepwise_skip") - - def pytest_sessionstart(self, session): - self.session = session - - def pytest_collection_modifyitems(self, session, config, items): - if not self.active or not self.lastfailed: - return - - already_passed = [] - found = False - - # Make a list of all tests that have been run before the last failing one. - for item in items: - if item.nodeid == self.lastfailed: - found = True - break - else: - already_passed.append(item) - - # If the previously failed test was not found among the test items, - # do not skip any tests. - if not found: - already_passed = [] - - for item in already_passed: - items.remove(item) - - config.hook.pytest_deselected(items=already_passed) - - def pytest_collectreport(self, report): - if self.active and report.failed: - self.session.shouldstop = ( - "Error when collecting test, stopping test execution." - ) - - def pytest_runtest_logreport(self, report): - # Skip this hook if plugin is not active or the test is xfailed. - if not self.active or "xfail" in report.keywords: - return - - if report.failed: - if self.skip: - # Remove test from the failed ones (if it exists) and unset the skip option - # to make sure the following tests will not be skipped. - if report.nodeid == self.lastfailed: - self.lastfailed = None - - self.skip = False - else: - # Mark test as the last failing and interrupt the test session. - self.lastfailed = report.nodeid - self.session.shouldstop = ( - "Test failed, continuing from this test next run." - ) - - else: - # If the test was actually run and did pass. - if report.when == "call": - # Remove test from the failed ones, if exists. - if report.nodeid == self.lastfailed: - self.lastfailed = None - - def pytest_sessionfinish(self, session): - if self.active: - self.config.cache.set("cache/stepwise", self.lastfailed) - else: - # Clear the list of failing tests if the plugin is not active. - self.config.cache.set("cache/stepwise", []) diff --git a/venv/lib/python3.8/site-packages/_pytest/terminal.py b/venv/lib/python3.8/site-packages/_pytest/terminal.py deleted file mode 100644 index dde2750b..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/terminal.py +++ /dev/null @@ -1,893 +0,0 @@ -""" terminal reporting of the full testing process. - -This is a good source for looking at the various reporting hooks. -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import itertools -import platform -import sys -import time - -import attr -import pluggy -import py -import six -from more_itertools import collapse - -import pytest -from _pytest import nodes -from _pytest.main import EXIT_INTERRUPTED -from _pytest.main import EXIT_NOTESTSCOLLECTED -from _pytest.main import EXIT_OK -from _pytest.main import EXIT_TESTSFAILED -from _pytest.main import EXIT_USAGEERROR - - -class MoreQuietAction(argparse.Action): - """ - a modified copy of the argparse count action which counts down and updates - the legacy quiet attribute at the same time - - used to unify verbosity handling - """ - - def __init__(self, option_strings, dest, default=None, required=False, help=None): - super(MoreQuietAction, self).__init__( - option_strings=option_strings, - dest=dest, - nargs=0, - default=default, - required=required, - help=help, - ) - - def __call__(self, parser, namespace, values, option_string=None): - new_count = getattr(namespace, self.dest, 0) - 1 - setattr(namespace, self.dest, new_count) - # todo Deprecate config.quiet - namespace.quiet = getattr(namespace, "quiet", 0) + 1 - - -def pytest_addoption(parser): - group = parser.getgroup("terminal reporting", "reporting", after="general") - group._addoption( - "-v", - "--verbose", - action="count", - default=0, - dest="verbose", - help="increase verbosity.", - ), - group._addoption( - "-q", - "--quiet", - action=MoreQuietAction, - default=0, - dest="verbose", - help="decrease verbosity.", - ), - group._addoption( - "--verbosity", dest="verbose", type=int, default=0, help="set verbosity" - ) - group._addoption( - "-r", - action="store", - dest="reportchars", - default="", - metavar="chars", - help="show extra test summary info as specified by chars (f)ailed, " - "(E)error, (s)skipped, (x)failed, (X)passed, " - "(p)passed, (P)passed with output, (a)all except pP. " - "Warnings are displayed at all times except when " - "--disable-warnings is set", - ) - group._addoption( - "--disable-warnings", - "--disable-pytest-warnings", - default=False, - dest="disable_warnings", - action="store_true", - help="disable warnings summary", - ) - group._addoption( - "-l", - "--showlocals", - action="store_true", - dest="showlocals", - default=False, - help="show locals in tracebacks (disabled by default).", - ) - group._addoption( - "--tb", - metavar="style", - action="store", - dest="tbstyle", - default="auto", - choices=["auto", "long", "short", "no", "line", "native"], - help="traceback print mode (auto/long/short/line/native/no).", - ) - group._addoption( - "--show-capture", - action="store", - dest="showcapture", - choices=["no", "stdout", "stderr", "log", "all"], - default="all", - help="Controls how captured stdout/stderr/log is shown on failed tests. " - "Default is 'all'.", - ) - group._addoption( - "--fulltrace", - "--full-trace", - action="store_true", - default=False, - help="don't cut any tracebacks (default is to cut).", - ) - group._addoption( - "--color", - metavar="color", - action="store", - dest="color", - default="auto", - choices=["yes", "no", "auto"], - help="color terminal output (yes/no/auto).", - ) - - parser.addini( - "console_output_style", - help="console output: classic or with additional progress information (classic|progress).", - default="progress", - ) - - -def pytest_configure(config): - reporter = TerminalReporter(config, sys.stdout) - config.pluginmanager.register(reporter, "terminalreporter") - if config.option.debug or config.option.traceconfig: - - def mywriter(tags, args): - msg = " ".join(map(str, args)) - reporter.write_line("[traceconfig] " + msg) - - config.trace.root.setprocessor("pytest:config", mywriter) - - -def getreportopt(config): - reportopts = "" - reportchars = config.option.reportchars - if not config.option.disable_warnings and "w" not in reportchars: - reportchars += "w" - elif config.option.disable_warnings and "w" in reportchars: - reportchars = reportchars.replace("w", "") - if reportchars: - for char in reportchars: - if char not in reportopts and char != "a": - reportopts += char - elif char == "a": - reportopts = "fEsxXw" - return reportopts - - -def pytest_report_teststatus(report): - if report.passed: - letter = "." - elif report.skipped: - letter = "s" - elif report.failed: - letter = "F" - if report.when != "call": - letter = "f" - return report.outcome, letter, report.outcome.upper() - - -@attr.s -class WarningReport(object): - """ - Simple structure to hold warnings information captured by ``pytest_logwarning`` and ``pytest_warning_captured``. - - :ivar str message: user friendly message about the warning - :ivar str|None nodeid: node id that generated the warning (see ``get_location``). - :ivar tuple|py.path.local fslocation: - file system location of the source of the warning (see ``get_location``). - - :ivar bool legacy: if this warning report was generated from the deprecated ``pytest_logwarning`` hook. - """ - - message = attr.ib() - nodeid = attr.ib(default=None) - fslocation = attr.ib(default=None) - legacy = attr.ib(default=False) - - def get_location(self, config): - """ - Returns the more user-friendly information about the location - of a warning, or None. - """ - if self.nodeid: - return self.nodeid - if self.fslocation: - if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2: - filename, linenum = self.fslocation[:2] - relpath = py.path.local(filename).relto(config.invocation_dir) - if not relpath: - relpath = str(filename) - return "%s:%s" % (relpath, linenum) - else: - return str(self.fslocation) - return None - - -class TerminalReporter(object): - def __init__(self, config, file=None): - import _pytest.config - - self.config = config - self.verbosity = self.config.option.verbose - self.showheader = self.verbosity >= 0 - self.showfspath = self.verbosity >= 0 - self.showlongtestinfo = self.verbosity > 0 - self._numcollected = 0 - self._session = None - - self.stats = {} - self.startdir = py.path.local() - if file is None: - file = sys.stdout - self._tw = _pytest.config.create_terminal_writer(config, file) - # self.writer will be deprecated in pytest-3.4 - self.writer = self._tw - self._screen_width = self._tw.fullwidth - self.currentfspath = None - self.reportchars = getreportopt(config) - self.hasmarkup = self._tw.hasmarkup - self.isatty = file.isatty() - self._progress_nodeids_reported = set() - self._show_progress_info = self._determine_show_progress_info() - self._collect_report_last_write = None - - def _determine_show_progress_info(self): - """Return True if we should display progress information based on the current config""" - # do not show progress if we are not capturing output (#3038) - if self.config.getoption("capture") == "no": - return False - # do not show progress if we are showing fixture setup/teardown - if self.config.getoption("setupshow"): - return False - return self.config.getini("console_output_style") in ("progress", "count") - - def hasopt(self, char): - char = {"xfailed": "x", "skipped": "s"}.get(char, char) - return char in self.reportchars - - def write_fspath_result(self, nodeid, res, **markup): - fspath = self.config.rootdir.join(nodeid.split("::")[0]) - if fspath != self.currentfspath: - if self.currentfspath is not None and self._show_progress_info: - self._write_progress_information_filling_space() - self.currentfspath = fspath - fspath = self.startdir.bestrelpath(fspath) - self._tw.line() - self._tw.write(fspath + " ") - self._tw.write(res, **markup) - - def write_ensure_prefix(self, prefix, extra="", **kwargs): - if self.currentfspath != prefix: - self._tw.line() - self.currentfspath = prefix - self._tw.write(prefix) - if extra: - self._tw.write(extra, **kwargs) - self.currentfspath = -2 - - def ensure_newline(self): - if self.currentfspath: - self._tw.line() - self.currentfspath = None - - def write(self, content, **markup): - self._tw.write(content, **markup) - - def write_line(self, line, **markup): - if not isinstance(line, six.text_type): - line = six.text_type(line, errors="replace") - self.ensure_newline() - self._tw.line(line, **markup) - - def rewrite(self, line, **markup): - """ - Rewinds the terminal cursor to the beginning and writes the given line. - - :kwarg erase: if True, will also add spaces until the full terminal width to ensure - previous lines are properly erased. - - The rest of the keyword arguments are markup instructions. - """ - erase = markup.pop("erase", False) - if erase: - fill_count = self._tw.fullwidth - len(line) - 1 - fill = " " * fill_count - else: - fill = "" - line = str(line) - self._tw.write("\r" + line + fill, **markup) - - def write_sep(self, sep, title=None, **markup): - self.ensure_newline() - self._tw.sep(sep, title, **markup) - - def section(self, title, sep="=", **kw): - self._tw.sep(sep, title, **kw) - - def line(self, msg, **kw): - self._tw.line(msg, **kw) - - def pytest_internalerror(self, excrepr): - for line in six.text_type(excrepr).split("\n"): - self.write_line("INTERNALERROR> " + line) - return 1 - - def pytest_logwarning(self, fslocation, message, nodeid): - warnings = self.stats.setdefault("warnings", []) - warning = WarningReport( - fslocation=fslocation, message=message, nodeid=nodeid, legacy=True - ) - warnings.append(warning) - - def pytest_warning_captured(self, warning_message, item): - # from _pytest.nodes import get_fslocation_from_item - from _pytest.warnings import warning_record_to_str - - warnings = self.stats.setdefault("warnings", []) - fslocation = warning_message.filename, warning_message.lineno - message = warning_record_to_str(warning_message) - - nodeid = item.nodeid if item is not None else "" - warning_report = WarningReport( - fslocation=fslocation, message=message, nodeid=nodeid - ) - warnings.append(warning_report) - - def pytest_plugin_registered(self, plugin): - if self.config.option.traceconfig: - msg = "PLUGIN registered: %s" % (plugin,) - # XXX this event may happen during setup/teardown time - # which unfortunately captures our output here - # which garbles our output if we use self.write_line - self.write_line(msg) - - def pytest_deselected(self, items): - self.stats.setdefault("deselected", []).extend(items) - - def pytest_runtest_logstart(self, nodeid, location): - # ensure that the path is printed before the - # 1st test of a module starts running - if self.showlongtestinfo: - line = self._locationline(nodeid, *location) - self.write_ensure_prefix(line, "") - elif self.showfspath: - fsid = nodeid.split("::")[0] - self.write_fspath_result(fsid, "") - - def pytest_runtest_logreport(self, report): - rep = report - res = self.config.hook.pytest_report_teststatus(report=rep) - category, letter, word = res - if isinstance(word, tuple): - word, markup = word - else: - markup = None - self.stats.setdefault(category, []).append(rep) - self._tests_ran = True - if not letter and not word: - # probably passed setup/teardown - return - running_xdist = hasattr(rep, "node") - if markup is None: - if rep.passed: - markup = {"green": True} - elif rep.failed: - markup = {"red": True} - elif rep.skipped: - markup = {"yellow": True} - else: - markup = {} - if self.verbosity <= 0: - if not running_xdist and self.showfspath: - self.write_fspath_result(rep.nodeid, letter, **markup) - else: - self._tw.write(letter, **markup) - else: - self._progress_nodeids_reported.add(rep.nodeid) - line = self._locationline(rep.nodeid, *rep.location) - if not running_xdist: - self.write_ensure_prefix(line, word, **markup) - if self._show_progress_info: - self._write_progress_information_filling_space() - else: - self.ensure_newline() - self._tw.write("[%s]" % rep.node.gateway.id) - if self._show_progress_info: - self._tw.write( - self._get_progress_information_message() + " ", cyan=True - ) - else: - self._tw.write(" ") - self._tw.write(word, **markup) - self._tw.write(" " + line) - self.currentfspath = -2 - - def pytest_runtest_logfinish(self, nodeid): - if self.config.getini("console_output_style") == "count": - num_tests = self._session.testscollected - progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests))) - else: - progress_length = len(" [100%]") - - if self.verbosity <= 0 and self._show_progress_info: - self._progress_nodeids_reported.add(nodeid) - last_item = ( - len(self._progress_nodeids_reported) == self._session.testscollected - ) - if last_item: - self._write_progress_information_filling_space() - else: - w = self._width_of_current_line - past_edge = w + progress_length + 1 >= self._screen_width - if past_edge: - msg = self._get_progress_information_message() - self._tw.write(msg + "\n", cyan=True) - - def _get_progress_information_message(self): - if self.config.getoption("capture") == "no": - return "" - collected = self._session.testscollected - if self.config.getini("console_output_style") == "count": - if collected: - progress = self._progress_nodeids_reported - counter_format = "{{:{}d}}".format(len(str(collected))) - format_string = " [{}/{{}}]".format(counter_format) - return format_string.format(len(progress), collected) - return " [ {} / {} ]".format(collected, collected) - else: - if collected: - progress = len(self._progress_nodeids_reported) * 100 // collected - return " [{:3d}%]".format(progress) - return " [100%]" - - def _write_progress_information_filling_space(self): - msg = self._get_progress_information_message() - w = self._width_of_current_line - fill = self._tw.fullwidth - w - 1 - self.write(msg.rjust(fill), cyan=True) - - @property - def _width_of_current_line(self): - """Return the width of current line, using the superior implementation of py-1.6 when available""" - try: - return self._tw.width_of_current_line - except AttributeError: - # py < 1.6.0 - return self._tw.chars_on_current_line - - def pytest_collection(self): - if self.isatty: - if self.config.option.verbose >= 0: - self.write("collecting ... ", bold=True) - self._collect_report_last_write = time.time() - elif self.config.option.verbose >= 1: - self.write("collecting ... ", bold=True) - - def pytest_collectreport(self, report): - if report.failed: - self.stats.setdefault("error", []).append(report) - elif report.skipped: - self.stats.setdefault("skipped", []).append(report) - items = [x for x in report.result if isinstance(x, pytest.Item)] - self._numcollected += len(items) - if self.isatty: - self.report_collect() - - def report_collect(self, final=False): - if self.config.option.verbose < 0: - return - - if not final: - # Only write "collecting" report every 0.5s. - t = time.time() - if ( - self._collect_report_last_write is not None - and self._collect_report_last_write > t - 0.5 - ): - return - self._collect_report_last_write = t - - errors = len(self.stats.get("error", [])) - skipped = len(self.stats.get("skipped", [])) - deselected = len(self.stats.get("deselected", [])) - if final: - line = "collected " - else: - line = "collecting " - line += ( - str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") - ) - if errors: - line += " / %d errors" % errors - if deselected: - line += " / %d deselected" % deselected - if skipped: - line += " / %d skipped" % skipped - if self.isatty: - self.rewrite(line, bold=True, erase=True) - if final: - self.write("\n") - else: - self.write_line(line) - - @pytest.hookimpl(trylast=True) - def pytest_collection_modifyitems(self): - self.report_collect(True) - - @pytest.hookimpl(trylast=True) - def pytest_sessionstart(self, session): - self._session = session - self._sessionstarttime = time.time() - if not self.showheader: - return - self.write_sep("=", "test session starts", bold=True) - verinfo = platform.python_version() - msg = "platform %s -- Python %s" % (sys.platform, verinfo) - if hasattr(sys, "pypy_version_info"): - verinfo = ".".join(map(str, sys.pypy_version_info[:3])) - msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3]) - msg += ", pytest-%s, py-%s, pluggy-%s" % ( - pytest.__version__, - py.__version__, - pluggy.__version__, - ) - if ( - self.verbosity > 0 - or self.config.option.debug - or getattr(self.config.option, "pastebin", None) - ): - msg += " -- " + str(sys.executable) - self.write_line(msg) - lines = self.config.hook.pytest_report_header( - config=self.config, startdir=self.startdir - ) - self._write_report_lines_from_hooks(lines) - - def _write_report_lines_from_hooks(self, lines): - lines.reverse() - for line in collapse(lines): - self.write_line(line) - - def pytest_report_header(self, config): - inifile = "" - if config.inifile: - inifile = " " + config.rootdir.bestrelpath(config.inifile) - lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)] - - plugininfo = config.pluginmanager.list_plugin_distinfo() - if plugininfo: - - lines.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo))) - return lines - - def pytest_collection_finish(self, session): - if self.config.option.collectonly: - self._printcollecteditems(session.items) - if self.stats.get("failed"): - self._tw.sep("!", "collection failures") - for rep in self.stats.get("failed"): - rep.toterminal(self._tw) - return 1 - return 0 - lines = self.config.hook.pytest_report_collectionfinish( - config=self.config, startdir=self.startdir, items=session.items - ) - self._write_report_lines_from_hooks(lines) - - def _printcollecteditems(self, items): - # to print out items and their parent collectors - # we take care to leave out Instances aka () - # because later versions are going to get rid of them anyway - if self.config.option.verbose < 0: - if self.config.option.verbose < -1: - counts = {} - for item in items: - name = item.nodeid.split("::", 1)[0] - counts[name] = counts.get(name, 0) + 1 - for name, count in sorted(counts.items()): - self._tw.line("%s: %d" % (name, count)) - else: - for item in items: - nodeid = item.nodeid - nodeid = nodeid.replace("::()::", "::") - self._tw.line(nodeid) - return - stack = [] - indent = "" - for item in items: - needed_collectors = item.listchain()[1:] # strip root node - while stack: - if stack == needed_collectors[: len(stack)]: - break - stack.pop() - for col in needed_collectors[len(stack) :]: - stack.append(col) - # if col.name == "()": - # continue - indent = (len(stack) - 1) * " " - self._tw.line("%s%s" % (indent, col)) - - @pytest.hookimpl(hookwrapper=True) - def pytest_sessionfinish(self, exitstatus): - outcome = yield - outcome.get_result() - self._tw.line("") - summary_exit_codes = ( - EXIT_OK, - EXIT_TESTSFAILED, - EXIT_INTERRUPTED, - EXIT_USAGEERROR, - EXIT_NOTESTSCOLLECTED, - ) - if exitstatus in summary_exit_codes: - self.config.hook.pytest_terminal_summary( - terminalreporter=self, exitstatus=exitstatus - ) - if exitstatus == EXIT_INTERRUPTED: - self._report_keyboardinterrupt() - del self._keyboardinterrupt_memo - self.summary_stats() - - @pytest.hookimpl(hookwrapper=True) - def pytest_terminal_summary(self): - self.summary_errors() - self.summary_failures() - yield - self.summary_warnings() - self.summary_passes() - - def pytest_keyboard_interrupt(self, excinfo): - self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) - - def pytest_unconfigure(self): - if hasattr(self, "_keyboardinterrupt_memo"): - self._report_keyboardinterrupt() - - def _report_keyboardinterrupt(self): - excrepr = self._keyboardinterrupt_memo - msg = excrepr.reprcrash.message - self.write_sep("!", msg) - if "KeyboardInterrupt" in msg: - if self.config.option.fulltrace: - excrepr.toterminal(self._tw) - else: - excrepr.reprcrash.toterminal(self._tw) - self._tw.line( - "(to show a full traceback on KeyboardInterrupt use --fulltrace)", - yellow=True, - ) - - def _locationline(self, nodeid, fspath, lineno, domain): - def mkrel(nodeid): - line = self.config.cwd_relative_nodeid(nodeid) - if domain and line.endswith(domain): - line = line[: -len(domain)] - values = domain.split("[") - values[0] = values[0].replace(".", "::") # don't replace '.' in params - line += "[".join(values) - return line - - # collect_fspath comes from testid which has a "/"-normalized path - - if fspath: - res = mkrel(nodeid).replace("::()", "") # parens-normalization - if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( - "\\", nodes.SEP - ): - res += " <- " + self.startdir.bestrelpath(fspath) - else: - res = "[location]" - return res + " " - - def _getfailureheadline(self, rep): - if hasattr(rep, "location"): - fspath, lineno, domain = rep.location - return domain - else: - return "test session" # XXX? - - def _getcrashline(self, rep): - try: - return str(rep.longrepr.reprcrash) - except AttributeError: - try: - return str(rep.longrepr)[:50] - except AttributeError: - return "" - - # - # summaries for sessionfinish - # - def getreports(self, name): - values = [] - for x in self.stats.get(name, []): - if not hasattr(x, "_pdbshown"): - values.append(x) - return values - - def summary_warnings(self): - if self.hasopt("w"): - all_warnings = self.stats.get("warnings") - if not all_warnings: - return - - grouped = itertools.groupby( - all_warnings, key=lambda wr: wr.get_location(self.config) - ) - - self.write_sep("=", "warnings summary", yellow=True, bold=False) - for location, warning_records in grouped: - # legacy warnings show their location explicitly, while standard warnings look better without - # it because the location is already formatted into the message - warning_records = list(warning_records) - if location: - self._tw.line(str(location)) - for w in warning_records: - if location: - lines = w.message.splitlines() - indented = "\n".join(" " + x for x in lines) - message = indented.rstrip() - else: - message = w.message.rstrip() - self._tw.line(message) - self._tw.line() - self._tw.line("-- Docs: https://docs.pytest.org/en/latest/warnings.html") - - def summary_passes(self): - if self.config.option.tbstyle != "no": - if self.hasopt("P"): - reports = self.getreports("passed") - if not reports: - return - self.write_sep("=", "PASSES") - for rep in reports: - if rep.sections: - msg = self._getfailureheadline(rep) - self.write_sep("_", msg) - self._outrep_summary(rep) - - def print_teardown_sections(self, rep): - showcapture = self.config.option.showcapture - if showcapture == "no": - return - for secname, content in rep.sections: - if showcapture != "all" and showcapture not in secname: - continue - if "teardown" in secname: - self._tw.sep("-", secname) - if content[-1:] == "\n": - content = content[:-1] - self._tw.line(content) - - def summary_failures(self): - if self.config.option.tbstyle != "no": - reports = self.getreports("failed") - if not reports: - return - self.write_sep("=", "FAILURES") - for rep in reports: - if self.config.option.tbstyle == "line": - line = self._getcrashline(rep) - self.write_line(line) - else: - msg = self._getfailureheadline(rep) - markup = {"red": True, "bold": True} - self.write_sep("_", msg, **markup) - self._outrep_summary(rep) - for report in self.getreports(""): - if report.nodeid == rep.nodeid and report.when == "teardown": - self.print_teardown_sections(report) - - def summary_errors(self): - if self.config.option.tbstyle != "no": - reports = self.getreports("error") - if not reports: - return - self.write_sep("=", "ERRORS") - for rep in self.stats["error"]: - msg = self._getfailureheadline(rep) - if not hasattr(rep, "when"): - # collect - msg = "ERROR collecting " + msg - elif rep.when == "setup": - msg = "ERROR at setup of " + msg - elif rep.when == "teardown": - msg = "ERROR at teardown of " + msg - self.write_sep("_", msg) - self._outrep_summary(rep) - - def _outrep_summary(self, rep): - rep.toterminal(self._tw) - showcapture = self.config.option.showcapture - if showcapture == "no": - return - for secname, content in rep.sections: - if showcapture != "all" and showcapture not in secname: - continue - self._tw.sep("-", secname) - if content[-1:] == "\n": - content = content[:-1] - self._tw.line(content) - - def summary_stats(self): - session_duration = time.time() - self._sessionstarttime - (line, color) = build_summary_stats_line(self.stats) - msg = "%s in %.2f seconds" % (line, session_duration) - markup = {color: True, "bold": True} - - if self.verbosity >= 0: - self.write_sep("=", msg, **markup) - if self.verbosity == -1: - self.write_line(msg, **markup) - - -def repr_pythonversion(v=None): - if v is None: - v = sys.version_info - try: - return "%s.%s.%s-%s-%s" % v - except (TypeError, ValueError): - return str(v) - - -def build_summary_stats_line(stats): - keys = ("failed passed skipped deselected xfailed xpassed warnings error").split() - unknown_key_seen = False - for key in stats.keys(): - if key not in keys: - if key: # setup/teardown reports have an empty key, ignore them - keys.append(key) - unknown_key_seen = True - parts = [] - for key in keys: - val = stats.get(key, None) - if val: - parts.append("%d %s" % (len(val), key)) - - if parts: - line = ", ".join(parts) - else: - line = "no tests ran" - - if "failed" in stats or "error" in stats: - color = "red" - elif "warnings" in stats or unknown_key_seen: - color = "yellow" - elif "passed" in stats: - color = "green" - else: - color = "yellow" - - return (line, color) - - -def _plugin_nameversions(plugininfo): - values = [] - for plugin, dist in plugininfo: - # gets us name and version! - name = "{dist.project_name}-{dist.version}".format(dist=dist) - # questionable convenience, but it keeps things short - if name.startswith("pytest-"): - name = name[7:] - # we decided to print python package names - # they can have more than one plugin - if name not in values: - values.append(name) - return values diff --git a/venv/lib/python3.8/site-packages/_pytest/tmpdir.py b/venv/lib/python3.8/site-packages/_pytest/tmpdir.py deleted file mode 100644 index 6287c170..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/tmpdir.py +++ /dev/null @@ -1,187 +0,0 @@ -""" support for providing temporary directories to test functions. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import re -import tempfile -import warnings - -import attr -import py - -import pytest -from .pathlib import ensure_reset_dir -from .pathlib import LOCK_TIMEOUT -from .pathlib import make_numbered_dir -from .pathlib import make_numbered_dir_with_cleanup -from .pathlib import Path -from _pytest.monkeypatch import MonkeyPatch - - -@attr.s -class TempPathFactory(object): - """Factory for temporary directories under the common base temp directory. - - The base directory can be configured using the ``--basetemp`` option.""" - - _given_basetemp = attr.ib() - _trace = attr.ib() - _basetemp = attr.ib(default=None) - - @classmethod - def from_config(cls, config): - """ - :param config: a pytest configuration - """ - return cls( - given_basetemp=config.option.basetemp, trace=config.trace.get("tmpdir") - ) - - def mktemp(self, basename, numbered=True): - """makes a temporary directory managed by the factory""" - if not numbered: - p = self.getbasetemp().joinpath(basename) - p.mkdir() - else: - p = make_numbered_dir(root=self.getbasetemp(), prefix=basename) - self._trace("mktemp", p) - return p - - def getbasetemp(self): - """ return base temporary directory. """ - if self._basetemp is None: - if self._given_basetemp is not None: - basetemp = Path(self._given_basetemp) - ensure_reset_dir(basetemp) - else: - from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") - temproot = Path(from_env or tempfile.gettempdir()) - user = get_user() or "unknown" - # use a sub-directory in the temproot to speed-up - # make_numbered_dir() call - rootdir = temproot.joinpath("pytest-of-{}".format(user)) - rootdir.mkdir(exist_ok=True) - basetemp = make_numbered_dir_with_cleanup( - prefix="pytest-", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT - ) - assert basetemp is not None - self._basetemp = t = basetemp - self._trace("new basetemp", t) - return t - else: - return self._basetemp - - -@attr.s -class TempdirFactory(object): - """ - backward comptibility wrapper that implements - :class:``py.path.local`` for :class:``TempPathFactory`` - """ - - _tmppath_factory = attr.ib() - - def ensuretemp(self, string, dir=1): - """ (deprecated) return temporary directory path with - the given string as the trailing part. It is usually - better to use the 'tmpdir' function argument which - provides an empty unique-per-test-invocation directory - and is guaranteed to be empty. - """ - # py.log._apiwarn(">1.1", "use tmpdir function argument") - from .deprecated import PYTEST_ENSURETEMP - - warnings.warn(PYTEST_ENSURETEMP, stacklevel=2) - return self.getbasetemp().ensure(string, dir=dir) - - def mktemp(self, basename, numbered=True): - """Create a subdirectory of the base temporary directory and return it. - If ``numbered``, ensure the directory is unique by adding a number - prefix greater than any existing one. - """ - return py.path.local(self._tmppath_factory.mktemp(basename, numbered).resolve()) - - def getbasetemp(self): - """backward compat wrapper for ``_tmppath_factory.getbasetemp``""" - return py.path.local(self._tmppath_factory.getbasetemp().resolve()) - - -def get_user(): - """Return the current user name, or None if getuser() does not work - in the current environment (see #1010). - """ - import getpass - - try: - return getpass.getuser() - except (ImportError, KeyError): - return None - - -def pytest_configure(config): - """Create a TempdirFactory and attach it to the config object. - - This is to comply with existing plugins which expect the handler to be - available at pytest_configure time, but ideally should be moved entirely - to the tmpdir_factory session fixture. - """ - mp = MonkeyPatch() - tmppath_handler = TempPathFactory.from_config(config) - t = TempdirFactory(tmppath_handler) - config._cleanup.append(mp.undo) - mp.setattr(config, "_tmp_path_factory", tmppath_handler, raising=False) - mp.setattr(config, "_tmpdirhandler", t, raising=False) - mp.setattr(pytest, "ensuretemp", t.ensuretemp, raising=False) - - -@pytest.fixture(scope="session") -def tmpdir_factory(request): - """Return a :class:`_pytest.tmpdir.TempdirFactory` instance for the test session. - """ - return request.config._tmpdirhandler - - -@pytest.fixture(scope="session") -def tmp_path_factory(request): - """Return a :class:`_pytest.tmpdir.TempPathFactory` instance for the test session. - """ - return request.config._tmp_path_factory - - -def _mk_tmp(request, factory): - name = request.node.name - name = re.sub(r"[\W]", "_", name) - MAXVAL = 30 - name = name[:MAXVAL] - return factory.mktemp(name, numbered=True) - - -@pytest.fixture -def tmpdir(request, tmpdir_factory): - """Return a temporary directory path object - which is unique to each test function invocation, - created as a sub directory of the base temporary - directory. The returned object is a `py.path.local`_ - path object. - - .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html - """ - return _mk_tmp(request, tmpdir_factory) - - -@pytest.fixture -def tmp_path(request, tmp_path_factory): - """Return a temporary directory path object - which is unique to each test function invocation, - created as a sub directory of the base temporary - directory. The returned object is a :class:`pathlib.Path` - object. - - .. note:: - - in python < 3.6 this is a pathlib2.Path - """ - - return _mk_tmp(request, tmp_path_factory) diff --git a/venv/lib/python3.8/site-packages/_pytest/unittest.py b/venv/lib/python3.8/site-packages/_pytest/unittest.py deleted file mode 100644 index a38a60d8..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/unittest.py +++ /dev/null @@ -1,259 +0,0 @@ -""" discovery and running of std-library "unittest" style tests. """ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys -import traceback - -import _pytest._code -from _pytest.compat import getimfunc -from _pytest.config import hookimpl -from _pytest.outcomes import fail -from _pytest.outcomes import skip -from _pytest.outcomes import xfail -from _pytest.python import Class -from _pytest.python import Function -from _pytest.python import Module -from _pytest.python import transfer_markers - - -def pytest_pycollect_makeitem(collector, name, obj): - # has unittest been imported and is obj a subclass of its TestCase? - try: - if not issubclass(obj, sys.modules["unittest"].TestCase): - return - except Exception: - return - # yes, so let's collect it - return UnitTestCase(name, parent=collector) - - -class UnitTestCase(Class): - # marker for fixturemanger.getfixtureinfo() - # to declare that our children do not support funcargs - nofuncargs = True - - def setup(self): - cls = self.obj - if getattr(cls, "__unittest_skip__", False): - return # skipped - setup = getattr(cls, "setUpClass", None) - if setup is not None: - setup() - teardown = getattr(cls, "tearDownClass", None) - if teardown is not None: - self.addfinalizer(teardown) - super(UnitTestCase, self).setup() - - def collect(self): - from unittest import TestLoader - - cls = self.obj - if not getattr(cls, "__test__", True): - return - self.session._fixturemanager.parsefactories(self, unittest=True) - loader = TestLoader() - module = self.getparent(Module).obj - foundsomething = False - for name in loader.getTestCaseNames(self.obj): - x = getattr(self.obj, name) - if not getattr(x, "__test__", True): - continue - funcobj = getimfunc(x) - transfer_markers(funcobj, cls, module) - yield TestCaseFunction(name, parent=self, callobj=funcobj) - foundsomething = True - - if not foundsomething: - runtest = getattr(self.obj, "runTest", None) - if runtest is not None: - ut = sys.modules.get("twisted.trial.unittest", None) - if ut is None or runtest != ut.TestCase.runTest: - yield TestCaseFunction("runTest", parent=self) - - -class TestCaseFunction(Function): - nofuncargs = True - _excinfo = None - _testcase = None - - def setup(self): - self._testcase = self.parent.obj(self.name) - self._fix_unittest_skip_decorator() - self._obj = getattr(self._testcase, self.name) - if hasattr(self._testcase, "setup_method"): - self._testcase.setup_method(self._obj) - if hasattr(self, "_request"): - self._request._fillfixtures() - - def _fix_unittest_skip_decorator(self): - """ - The @unittest.skip decorator calls functools.wraps(self._testcase) - The call to functools.wraps() fails unless self._testcase - has a __name__ attribute. This is usually automatically supplied - if the test is a function or method, but we need to add manually - here. - - See issue #1169 - """ - if sys.version_info[0] == 2: - setattr(self._testcase, "__name__", self.name) - - def teardown(self): - if hasattr(self._testcase, "teardown_method"): - self._testcase.teardown_method(self._obj) - # Allow garbage collection on TestCase instance attributes. - self._testcase = None - self._obj = None - - def startTest(self, testcase): - pass - - def _addexcinfo(self, rawexcinfo): - # unwrap potential exception info (see twisted trial support below) - rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) - try: - excinfo = _pytest._code.ExceptionInfo(rawexcinfo) - except TypeError: - try: - try: - values = traceback.format_exception(*rawexcinfo) - values.insert( - 0, - "NOTE: Incompatible Exception Representation, " - "displaying natively:\n\n", - ) - fail("".join(values), pytrace=False) - except (fail.Exception, KeyboardInterrupt): - raise - except: # noqa - fail( - "ERROR: Unknown Incompatible Exception " - "representation:\n%r" % (rawexcinfo,), - pytrace=False, - ) - except KeyboardInterrupt: - raise - except fail.Exception: - excinfo = _pytest._code.ExceptionInfo() - self.__dict__.setdefault("_excinfo", []).append(excinfo) - - def addError(self, testcase, rawexcinfo): - self._addexcinfo(rawexcinfo) - - def addFailure(self, testcase, rawexcinfo): - self._addexcinfo(rawexcinfo) - - def addSkip(self, testcase, reason): - try: - skip(reason) - except skip.Exception: - self._skipped_by_mark = True - self._addexcinfo(sys.exc_info()) - - def addExpectedFailure(self, testcase, rawexcinfo, reason=""): - try: - xfail(str(reason)) - except xfail.Exception: - self._addexcinfo(sys.exc_info()) - - def addUnexpectedSuccess(self, testcase, reason=""): - self._unexpectedsuccess = reason - - def addSuccess(self, testcase): - pass - - def stopTest(self, testcase): - pass - - def _handle_skip(self): - # implements the skipping machinery (see #2137) - # analog to pythons Lib/unittest/case.py:run - testMethod = getattr(self._testcase, self._testcase._testMethodName) - if getattr(self._testcase.__class__, "__unittest_skip__", False) or getattr( - testMethod, "__unittest_skip__", False - ): - # If the class or method was skipped. - skip_why = getattr( - self._testcase.__class__, "__unittest_skip_why__", "" - ) or getattr(testMethod, "__unittest_skip_why__", "") - try: # PY3, unittest2 on PY2 - self._testcase._addSkip(self, self._testcase, skip_why) - except TypeError: # PY2 - if sys.version_info[0] != 2: - raise - self._testcase._addSkip(self, skip_why) - return True - return False - - def runtest(self): - if self.config.pluginmanager.get_plugin("pdbinvoke") is None: - self._testcase(result=self) - else: - # disables tearDown and cleanups for post mortem debugging (see #1890) - if self._handle_skip(): - return - self._testcase.debug() - - def _prunetraceback(self, excinfo): - Function._prunetraceback(self, excinfo) - traceback = excinfo.traceback.filter( - lambda x: not x.frame.f_globals.get("__unittest") - ) - if traceback: - excinfo.traceback = traceback - - -@hookimpl(tryfirst=True) -def pytest_runtest_makereport(item, call): - if isinstance(item, TestCaseFunction): - if item._excinfo: - call.excinfo = item._excinfo.pop(0) - try: - del call.result - except AttributeError: - pass - - -# twisted trial support - - -@hookimpl(hookwrapper=True) -def pytest_runtest_protocol(item): - if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: - ut = sys.modules["twisted.python.failure"] - Failure__init__ = ut.Failure.__init__ - check_testcase_implements_trial_reporter() - - def excstore( - self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None - ): - if exc_value is None: - self._rawexcinfo = sys.exc_info() - else: - if exc_type is None: - exc_type = type(exc_value) - self._rawexcinfo = (exc_type, exc_value, exc_tb) - try: - Failure__init__( - self, exc_value, exc_type, exc_tb, captureVars=captureVars - ) - except TypeError: - Failure__init__(self, exc_value, exc_type, exc_tb) - - ut.Failure.__init__ = excstore - yield - ut.Failure.__init__ = Failure__init__ - else: - yield - - -def check_testcase_implements_trial_reporter(done=[]): - if done: - return - from zope.interface import classImplements - from twisted.trial.itrial import IReporter - - classImplements(TestCaseFunction, IReporter) - done.append(1) diff --git a/venv/lib/python3.8/site-packages/_pytest/warning_types.py b/venv/lib/python3.8/site-packages/_pytest/warning_types.py deleted file mode 100644 index 55e1f037..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/warning_types.py +++ /dev/null @@ -1,60 +0,0 @@ -import attr - - -class PytestWarning(UserWarning): - """ - Bases: :class:`UserWarning`. - - Base class for all warnings emitted by pytest. - """ - - -class PytestDeprecationWarning(PytestWarning, DeprecationWarning): - """ - Bases: :class:`pytest.PytestWarning`, :class:`DeprecationWarning`. - - Warning class for features that will be removed in a future version. - """ - - -class RemovedInPytest4Warning(PytestDeprecationWarning): - """ - Bases: :class:`pytest.PytestDeprecationWarning`. - - Warning class for features scheduled to be removed in pytest 4.0. - """ - - -class PytestExperimentalApiWarning(PytestWarning, FutureWarning): - """ - Bases: :class:`pytest.PytestWarning`, :class:`FutureWarning`. - - Warning category used to denote experiments in pytest. Use sparingly as the API might change or even be - removed completely in future version - """ - - @classmethod - def simple(cls, apiname): - return cls( - "{apiname} is an experimental api that may change over time".format( - apiname=apiname - ) - ) - - -@attr.s -class UnformattedWarning(object): - """Used to hold warnings that need to format their message at runtime, as opposed to a direct message. - - Using this class avoids to keep all the warning types and messages in this module, avoiding misuse. - """ - - category = attr.ib() - template = attr.ib() - - def format(self, **kwargs): - """Returns an instance of the warning category, formatted with given kwargs""" - return self.category(self.template.format(**kwargs)) - - -PYTESTER_COPY_EXAMPLE = PytestExperimentalApiWarning.simple("testdir.copy_example") diff --git a/venv/lib/python3.8/site-packages/_pytest/warnings.py b/venv/lib/python3.8/site-packages/_pytest/warnings.py deleted file mode 100644 index 52afbe78..00000000 --- a/venv/lib/python3.8/site-packages/_pytest/warnings.py +++ /dev/null @@ -1,173 +0,0 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import sys -import warnings -from contextlib import contextmanager - -import pytest -from _pytest import compat - - -def _setoption(wmod, arg): - """ - Copy of the warning._setoption function but does not escape arguments. - """ - parts = arg.split(":") - if len(parts) > 5: - raise wmod._OptionError("too many fields (max 5): %r" % (arg,)) - while len(parts) < 5: - parts.append("") - action, message, category, module, lineno = [s.strip() for s in parts] - action = wmod._getaction(action) - category = wmod._getcategory(category) - if lineno: - try: - lineno = int(lineno) - if lineno < 0: - raise ValueError - except (ValueError, OverflowError): - raise wmod._OptionError("invalid lineno %r" % (lineno,)) - else: - lineno = 0 - wmod.filterwarnings(action, message, category, module, lineno) - - -def pytest_addoption(parser): - group = parser.getgroup("pytest-warnings") - group.addoption( - "-W", - "--pythonwarnings", - action="append", - help="set which warnings to report, see -W option of python itself.", - ) - parser.addini( - "filterwarnings", - type="linelist", - help="Each line specifies a pattern for " - "warnings.filterwarnings. " - "Processed after -W and --pythonwarnings.", - ) - - -def pytest_configure(config): - config.addinivalue_line( - "markers", - "filterwarnings(warning): add a warning filter to the given test. " - "see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings ", - ) - - -@contextmanager -def catch_warnings_for_item(config, ihook, when, item): - """ - Context manager that catches warnings generated in the contained execution block. - - ``item`` can be None if we are not in the context of an item execution. - - Each warning captured triggers the ``pytest_warning_captured`` hook. - """ - cmdline_filters = config.getoption("pythonwarnings") or [] - inifilters = config.getini("filterwarnings") - with warnings.catch_warnings(record=True) as log: - - if not sys.warnoptions: - # if user is not explicitly configuring warning filters, show deprecation warnings by default (#2908) - warnings.filterwarnings("always", category=DeprecationWarning) - warnings.filterwarnings("always", category=PendingDeprecationWarning) - - # filters should have this precedence: mark, cmdline options, ini - # filters should be applied in the inverse order of precedence - for arg in inifilters: - _setoption(warnings, arg) - - for arg in cmdline_filters: - warnings._setoption(arg) - - if item is not None: - for mark in item.iter_markers(name="filterwarnings"): - for arg in mark.args: - _setoption(warnings, arg) - - yield - - for warning_message in log: - ihook.pytest_warning_captured.call_historic( - kwargs=dict(warning_message=warning_message, when=when, item=item) - ) - - -def warning_record_to_str(warning_message): - """Convert a warnings.WarningMessage to a string, taking in account a lot of unicode shenaningans in Python 2. - - When Python 2 support is dropped this function can be greatly simplified. - """ - warn_msg = warning_message.message - unicode_warning = False - if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args): - new_args = [] - for m in warn_msg.args: - new_args.append( - compat.ascii_escaped(m) if isinstance(m, compat.UNICODE_TYPES) else m - ) - unicode_warning = list(warn_msg.args) != new_args - warn_msg.args = new_args - - msg = warnings.formatwarning( - warn_msg, - warning_message.category, - warning_message.filename, - warning_message.lineno, - warning_message.line, - ) - if unicode_warning: - warnings.warn( - "Warning is using unicode non convertible to ascii, " - "converting to a safe representation:\n {!r}".format(compat.safe_str(msg)), - UnicodeWarning, - ) - return msg - - -@pytest.hookimpl(hookwrapper=True, tryfirst=True) -def pytest_runtest_protocol(item): - with catch_warnings_for_item( - config=item.config, ihook=item.ihook, when="runtest", item=item - ): - yield - - -@pytest.hookimpl(hookwrapper=True, tryfirst=True) -def pytest_collection(session): - config = session.config - with catch_warnings_for_item( - config=config, ihook=config.hook, when="collect", item=None - ): - yield - - -@pytest.hookimpl(hookwrapper=True) -def pytest_terminal_summary(terminalreporter): - config = terminalreporter.config - with catch_warnings_for_item( - config=config, ihook=config.hook, when="config", item=None - ): - yield - - -def _issue_config_warning(warning, config): - """ - This function should be used instead of calling ``warnings.warn`` directly when we are in the "configure" stage: - at this point the actual options might not have been set, so we manually trigger the pytest_warning_captured - hook so we can display this warnings in the terminal. This is a hack until we can sort out #2891. - - :param warning: the warning instance. - :param config: - """ - with warnings.catch_warnings(record=True) as records: - warnings.simplefilter("always", type(warning)) - warnings.warn(warning, stacklevel=2) - config.hook.pytest_warning_captured.call_historic( - kwargs=dict(warning_message=records[0], when="config", item=None) - ) diff --git a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/DESCRIPTION.rst b/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/DESCRIPTION.rst deleted file mode 100644 index c605ec26..00000000 --- a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,227 +0,0 @@ - -.. image:: https://secure.travis-ci.org/ActiveState/appdirs.png - :target: http://travis-ci.org/ActiveState/appdirs - -the problem -=========== - -What directory should your app use for storing user data? If running on Mac OS X, you -should use:: - - ~/Library/Application Support/ - -If on Windows (at least English Win XP) that should be:: - - C:\Documents and Settings\\Application Data\Local Settings\\ - -or possibly:: - - C:\Documents and Settings\\Application Data\\ - -for `roaming profiles `_ but that is another story. - -On Linux (and other Unices) the dir, according to the `XDG -spec `_, is:: - - ~/.local/share/ - - -``appdirs`` to the rescue -========================= - -This kind of thing is what the ``appdirs`` module is for. ``appdirs`` will -help you choose an appropriate: - -- user data dir (``user_data_dir``) -- user config dir (``user_config_dir``) -- user cache dir (``user_cache_dir``) -- site data dir (``site_data_dir``) -- site config dir (``site_config_dir``) -- user log dir (``user_log_dir``) - -and also: - -- is a single module so other Python packages can include their own private copy -- is slightly opinionated on the directory names used. Look for "OPINION" in - documentation and code for when an opinion is being applied. - - -some example output -=================== - -On Mac OS X:: - - >>> from appdirs import * - >>> appname = "SuperApp" - >>> appauthor = "Acme" - >>> user_data_dir(appname, appauthor) - '/Users/trentm/Library/Application Support/SuperApp' - >>> site_data_dir(appname, appauthor) - '/Library/Application Support/SuperApp' - >>> user_cache_dir(appname, appauthor) - '/Users/trentm/Library/Caches/SuperApp' - >>> user_log_dir(appname, appauthor) - '/Users/trentm/Library/Logs/SuperApp' - -On Windows 7:: - - >>> from appdirs import * - >>> appname = "SuperApp" - >>> appauthor = "Acme" - >>> user_data_dir(appname, appauthor) - 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp' - >>> user_data_dir(appname, appauthor, roaming=True) - 'C:\\Users\\trentm\\AppData\\Roaming\\Acme\\SuperApp' - >>> user_cache_dir(appname, appauthor) - 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Cache' - >>> user_log_dir(appname, appauthor) - 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Logs' - -On Linux:: - - >>> from appdirs import * - >>> appname = "SuperApp" - >>> appauthor = "Acme" - >>> user_data_dir(appname, appauthor) - '/home/trentm/.local/share/SuperApp - >>> site_data_dir(appname, appauthor) - '/usr/local/share/SuperApp' - >>> site_data_dir(appname, appauthor, multipath=True) - '/usr/local/share/SuperApp:/usr/share/SuperApp' - >>> user_cache_dir(appname, appauthor) - '/home/trentm/.cache/SuperApp' - >>> user_log_dir(appname, appauthor) - '/home/trentm/.cache/SuperApp/log' - >>> user_config_dir(appname) - '/home/trentm/.config/SuperApp' - >>> site_config_dir(appname) - '/etc/xdg/SuperApp' - >>> os.environ['XDG_CONFIG_DIRS'] = '/etc:/usr/local/etc' - >>> site_config_dir(appname, multipath=True) - '/etc/SuperApp:/usr/local/etc/SuperApp' - - -``AppDirs`` for convenience -=========================== - -:: - - >>> from appdirs import AppDirs - >>> dirs = AppDirs("SuperApp", "Acme") - >>> dirs.user_data_dir - '/Users/trentm/Library/Application Support/SuperApp' - >>> dirs.site_data_dir - '/Library/Application Support/SuperApp' - >>> dirs.user_cache_dir - '/Users/trentm/Library/Caches/SuperApp' - >>> dirs.user_log_dir - '/Users/trentm/Library/Logs/SuperApp' - - - -Per-version isolation -===================== - -If you have multiple versions of your app in use that you want to be -able to run side-by-side, then you may want version-isolation for these -dirs:: - - >>> from appdirs import AppDirs - >>> dirs = AppDirs("SuperApp", "Acme", version="1.0") - >>> dirs.user_data_dir - '/Users/trentm/Library/Application Support/SuperApp/1.0' - >>> dirs.site_data_dir - '/Library/Application Support/SuperApp/1.0' - >>> dirs.user_cache_dir - '/Users/trentm/Library/Caches/SuperApp/1.0' - >>> dirs.user_log_dir - '/Users/trentm/Library/Logs/SuperApp/1.0' - - - -appdirs Changelog -================= - -appdirs 1.4.3 -------------- -- [PR #76] Python 3.6 invalid escape sequence deprecation fixes -- Fix for Python 3.6 support - -appdirs 1.4.2 -------------- -- [PR #84] Allow installing without setuptools -- [PR #86] Fix string delimiters in setup.py description -- Add Python 3.6 support - -appdirs 1.4.1 -------------- -- [issue #38] Fix _winreg import on Windows Py3 -- [issue #55] Make appname optional - -appdirs 1.4.0 -------------- -- [PR #42] AppAuthor is now optional on Windows -- [issue 41] Support Jython on Windows, Mac, and Unix-like platforms. Windows - support requires `JNA `_. -- [PR #44] Fix incorrect behaviour of the site_config_dir method - -appdirs 1.3.0 -------------- -- [Unix, issue 16] Conform to XDG standard, instead of breaking it for - everybody -- [Unix] Removes gratuitous case mangling of the case, since \*nix-es are - usually case sensitive, so mangling is not wise -- [Unix] Fixes the utterly wrong behaviour in ``site_data_dir``, return result - based on XDG_DATA_DIRS and make room for respecting the standard which - specifies XDG_DATA_DIRS is a multiple-value variable -- [Issue 6] Add ``*_config_dir`` which are distinct on nix-es, according to - XDG specs; on Windows and Mac return the corresponding ``*_data_dir`` - -appdirs 1.2.0 -------------- - -- [Unix] Put ``user_log_dir`` under the *cache* dir on Unix. Seems to be more - typical. -- [issue 9] Make ``unicode`` work on py3k. - -appdirs 1.1.0 -------------- - -- [issue 4] Add ``AppDirs.user_log_dir``. -- [Unix, issue 2, issue 7] appdirs now conforms to `XDG base directory spec - `_. -- [Mac, issue 5] Fix ``site_data_dir()`` on Mac. -- [Mac] Drop use of 'Carbon' module in favour of hardcoded paths; supports - Python3 now. -- [Windows] Append "Cache" to ``user_cache_dir`` on Windows by default. Use - ``opinion=False`` option to disable this. -- Add ``appdirs.AppDirs`` convenience class. Usage: - - >>> dirs = AppDirs("SuperApp", "Acme", version="1.0") - >>> dirs.user_data_dir - '/Users/trentm/Library/Application Support/SuperApp/1.0' - -- [Windows] Cherry-pick Komodo's change to downgrade paths to the Windows short - paths if there are high bit chars. -- [Linux] Change default ``user_cache_dir()`` on Linux to be singular, e.g. - "~/.superapp/cache". -- [Windows] Add ``roaming`` option to ``user_data_dir()`` (for use on Windows only) - and change the default ``user_data_dir`` behaviour to use a *non*-roaming - profile dir (``CSIDL_LOCAL_APPDATA`` instead of ``CSIDL_APPDATA``). Why? Because - a large roaming profile can cause login speed issues. The "only syncs on - logout" behaviour can cause surprises in appdata info. - - -appdirs 1.0.1 (never released) ------------------------------- - -Started this changelog 27 July 2010. Before that this module originated in the -`Komodo `_ product as ``applib.py`` and then -as `applib/location.py -`_ (used by -`PyPM `_ in `ActivePython -`_). This is basically a fork of -applib.py 1.0.1 and applib/location.py 1.0.1. - - - diff --git a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/INSTALLER b/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/INSTALLER deleted file mode 100644 index a1b589e3..00000000 --- a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/METADATA b/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/METADATA deleted file mode 100644 index 69ddf934..00000000 --- a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/METADATA +++ /dev/null @@ -1,254 +0,0 @@ -Metadata-Version: 2.0 -Name: appdirs -Version: 1.4.3 -Summary: A small Python module for determining appropriate platform-specific dirs, e.g. a "user data dir". -Home-page: http://github.com/ActiveState/appdirs -Author: Trent Mick; Sridhar Ratnakumar; Jeff Rouse -Author-email: trentm@gmail.com; github@srid.name; jr@its.to -License: MIT -Keywords: application directory log cache user -Platform: UNKNOWN -Classifier: Development Status :: 4 - Beta -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.2 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Topic :: Software Development :: Libraries :: Python Modules - - -.. image:: https://secure.travis-ci.org/ActiveState/appdirs.png - :target: http://travis-ci.org/ActiveState/appdirs - -the problem -=========== - -What directory should your app use for storing user data? If running on Mac OS X, you -should use:: - - ~/Library/Application Support/ - -If on Windows (at least English Win XP) that should be:: - - C:\Documents and Settings\\Application Data\Local Settings\\ - -or possibly:: - - C:\Documents and Settings\\Application Data\\ - -for `roaming profiles `_ but that is another story. - -On Linux (and other Unices) the dir, according to the `XDG -spec `_, is:: - - ~/.local/share/ - - -``appdirs`` to the rescue -========================= - -This kind of thing is what the ``appdirs`` module is for. ``appdirs`` will -help you choose an appropriate: - -- user data dir (``user_data_dir``) -- user config dir (``user_config_dir``) -- user cache dir (``user_cache_dir``) -- site data dir (``site_data_dir``) -- site config dir (``site_config_dir``) -- user log dir (``user_log_dir``) - -and also: - -- is a single module so other Python packages can include their own private copy -- is slightly opinionated on the directory names used. Look for "OPINION" in - documentation and code for when an opinion is being applied. - - -some example output -=================== - -On Mac OS X:: - - >>> from appdirs import * - >>> appname = "SuperApp" - >>> appauthor = "Acme" - >>> user_data_dir(appname, appauthor) - '/Users/trentm/Library/Application Support/SuperApp' - >>> site_data_dir(appname, appauthor) - '/Library/Application Support/SuperApp' - >>> user_cache_dir(appname, appauthor) - '/Users/trentm/Library/Caches/SuperApp' - >>> user_log_dir(appname, appauthor) - '/Users/trentm/Library/Logs/SuperApp' - -On Windows 7:: - - >>> from appdirs import * - >>> appname = "SuperApp" - >>> appauthor = "Acme" - >>> user_data_dir(appname, appauthor) - 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp' - >>> user_data_dir(appname, appauthor, roaming=True) - 'C:\\Users\\trentm\\AppData\\Roaming\\Acme\\SuperApp' - >>> user_cache_dir(appname, appauthor) - 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Cache' - >>> user_log_dir(appname, appauthor) - 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Logs' - -On Linux:: - - >>> from appdirs import * - >>> appname = "SuperApp" - >>> appauthor = "Acme" - >>> user_data_dir(appname, appauthor) - '/home/trentm/.local/share/SuperApp - >>> site_data_dir(appname, appauthor) - '/usr/local/share/SuperApp' - >>> site_data_dir(appname, appauthor, multipath=True) - '/usr/local/share/SuperApp:/usr/share/SuperApp' - >>> user_cache_dir(appname, appauthor) - '/home/trentm/.cache/SuperApp' - >>> user_log_dir(appname, appauthor) - '/home/trentm/.cache/SuperApp/log' - >>> user_config_dir(appname) - '/home/trentm/.config/SuperApp' - >>> site_config_dir(appname) - '/etc/xdg/SuperApp' - >>> os.environ['XDG_CONFIG_DIRS'] = '/etc:/usr/local/etc' - >>> site_config_dir(appname, multipath=True) - '/etc/SuperApp:/usr/local/etc/SuperApp' - - -``AppDirs`` for convenience -=========================== - -:: - - >>> from appdirs import AppDirs - >>> dirs = AppDirs("SuperApp", "Acme") - >>> dirs.user_data_dir - '/Users/trentm/Library/Application Support/SuperApp' - >>> dirs.site_data_dir - '/Library/Application Support/SuperApp' - >>> dirs.user_cache_dir - '/Users/trentm/Library/Caches/SuperApp' - >>> dirs.user_log_dir - '/Users/trentm/Library/Logs/SuperApp' - - - -Per-version isolation -===================== - -If you have multiple versions of your app in use that you want to be -able to run side-by-side, then you may want version-isolation for these -dirs:: - - >>> from appdirs import AppDirs - >>> dirs = AppDirs("SuperApp", "Acme", version="1.0") - >>> dirs.user_data_dir - '/Users/trentm/Library/Application Support/SuperApp/1.0' - >>> dirs.site_data_dir - '/Library/Application Support/SuperApp/1.0' - >>> dirs.user_cache_dir - '/Users/trentm/Library/Caches/SuperApp/1.0' - >>> dirs.user_log_dir - '/Users/trentm/Library/Logs/SuperApp/1.0' - - - -appdirs Changelog -================= - -appdirs 1.4.3 -------------- -- [PR #76] Python 3.6 invalid escape sequence deprecation fixes -- Fix for Python 3.6 support - -appdirs 1.4.2 -------------- -- [PR #84] Allow installing without setuptools -- [PR #86] Fix string delimiters in setup.py description -- Add Python 3.6 support - -appdirs 1.4.1 -------------- -- [issue #38] Fix _winreg import on Windows Py3 -- [issue #55] Make appname optional - -appdirs 1.4.0 -------------- -- [PR #42] AppAuthor is now optional on Windows -- [issue 41] Support Jython on Windows, Mac, and Unix-like platforms. Windows - support requires `JNA `_. -- [PR #44] Fix incorrect behaviour of the site_config_dir method - -appdirs 1.3.0 -------------- -- [Unix, issue 16] Conform to XDG standard, instead of breaking it for - everybody -- [Unix] Removes gratuitous case mangling of the case, since \*nix-es are - usually case sensitive, so mangling is not wise -- [Unix] Fixes the utterly wrong behaviour in ``site_data_dir``, return result - based on XDG_DATA_DIRS and make room for respecting the standard which - specifies XDG_DATA_DIRS is a multiple-value variable -- [Issue 6] Add ``*_config_dir`` which are distinct on nix-es, according to - XDG specs; on Windows and Mac return the corresponding ``*_data_dir`` - -appdirs 1.2.0 -------------- - -- [Unix] Put ``user_log_dir`` under the *cache* dir on Unix. Seems to be more - typical. -- [issue 9] Make ``unicode`` work on py3k. - -appdirs 1.1.0 -------------- - -- [issue 4] Add ``AppDirs.user_log_dir``. -- [Unix, issue 2, issue 7] appdirs now conforms to `XDG base directory spec - `_. -- [Mac, issue 5] Fix ``site_data_dir()`` on Mac. -- [Mac] Drop use of 'Carbon' module in favour of hardcoded paths; supports - Python3 now. -- [Windows] Append "Cache" to ``user_cache_dir`` on Windows by default. Use - ``opinion=False`` option to disable this. -- Add ``appdirs.AppDirs`` convenience class. Usage: - - >>> dirs = AppDirs("SuperApp", "Acme", version="1.0") - >>> dirs.user_data_dir - '/Users/trentm/Library/Application Support/SuperApp/1.0' - -- [Windows] Cherry-pick Komodo's change to downgrade paths to the Windows short - paths if there are high bit chars. -- [Linux] Change default ``user_cache_dir()`` on Linux to be singular, e.g. - "~/.superapp/cache". -- [Windows] Add ``roaming`` option to ``user_data_dir()`` (for use on Windows only) - and change the default ``user_data_dir`` behaviour to use a *non*-roaming - profile dir (``CSIDL_LOCAL_APPDATA`` instead of ``CSIDL_APPDATA``). Why? Because - a large roaming profile can cause login speed issues. The "only syncs on - logout" behaviour can cause surprises in appdata info. - - -appdirs 1.0.1 (never released) ------------------------------- - -Started this changelog 27 July 2010. Before that this module originated in the -`Komodo `_ product as ``applib.py`` and then -as `applib/location.py -`_ (used by -`PyPM `_ in `ActivePython -`_). This is basically a fork of -applib.py 1.0.1 and applib/location.py 1.0.1. - - - diff --git a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/RECORD b/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/RECORD deleted file mode 100644 index 27a99569..00000000 --- a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/RECORD +++ /dev/null @@ -1,9 +0,0 @@ -__pycache__/appdirs.cpython-38.pyc,, -appdirs-1.4.3.dist-info/DESCRIPTION.rst,sha256=77Fe8OIOLSjDSNdLiL5xywMKO-AGE42rdXkqKo4Ee-k,7531 -appdirs-1.4.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -appdirs-1.4.3.dist-info/METADATA,sha256=3IFw6jTfImdOqsCb2GYvVR157tL7KEzfRAszn382csk,8773 -appdirs-1.4.3.dist-info/RECORD,, -appdirs-1.4.3.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110 -appdirs-1.4.3.dist-info/metadata.json,sha256=fL_Q-GuFJu3PJxMrwU7SdsI8RGqjIfi2AvouCSF5DSA,1359 -appdirs-1.4.3.dist-info/top_level.txt,sha256=nKncE8CUqZERJ6VuQWL4_bkunSPDNfn7KZqb4Tr5YEM,8 -appdirs.py,sha256=MievUEuv3l_mQISH5SF0shDk_BNhHHzYiAPrT3ITN4I,24701 diff --git a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/WHEEL b/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/WHEEL deleted file mode 100644 index 8b6dd1b5..00000000 --- a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.29.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/metadata.json b/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/metadata.json deleted file mode 100644 index da1e5f3a..00000000 --- a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Software Development :: Libraries :: Python Modules"], "extensions": {"python.details": {"contacts": [{"email": "trentm@gmail.com; github@srid.name; jr@its.to", "name": "Trent Mick; Sridhar Ratnakumar; Jeff Rouse", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/ActiveState/appdirs"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["application", "directory", "log", "cache", "user"], "license": "MIT", "metadata_version": "2.0", "name": "appdirs", "summary": "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\".", "test_requires": [{"requires": []}], "version": "1.4.3"} \ No newline at end of file diff --git a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/top_level.txt b/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/top_level.txt deleted file mode 100644 index d64bc321..00000000 --- a/venv/lib/python3.8/site-packages/appdirs-1.4.3.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -appdirs diff --git a/venv/lib/python3.8/site-packages/appdirs.py b/venv/lib/python3.8/site-packages/appdirs.py deleted file mode 100644 index ae67001a..00000000 --- a/venv/lib/python3.8/site-packages/appdirs.py +++ /dev/null @@ -1,608 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright (c) 2005-2010 ActiveState Software Inc. -# Copyright (c) 2013 Eddy Petrișor - -"""Utilities for determining application-specific dirs. - -See for details and usage. -""" -# Dev Notes: -# - MSDN on where to store app data files: -# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 -# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html -# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - -__version_info__ = (1, 4, 3) -__version__ = '.'.join(map(str, __version_info__)) - - -import sys -import os - -PY3 = sys.version_info[0] == 3 - -if PY3: - unicode = str - -if sys.platform.startswith('java'): - import platform - os_name = platform.java_ver()[3][0] - if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. - system = 'win32' - elif os_name.startswith('Mac'): # "Mac OS X", etc. - system = 'darwin' - else: # "Linux", "SunOS", "FreeBSD", etc. - # Setting this to "linux2" is not ideal, but only Windows or Mac - # are actually checked for and the rest of the module expects - # *sys.platform* style strings. - system = 'linux2' -else: - system = sys.platform - - - -def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user data directories are: - Mac OS X: ~/Library/Application Support/ - Unix: ~/.local/share/ # or in $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\\Application Data\\ - Win XP (roaming): C:\Documents and Settings\\Local Settings\Application Data\\ - Win 7 (not roaming): C:\Users\\AppData\Local\\ - Win 7 (roaming): C:\Users\\AppData\Roaming\\ - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/". - """ - if system == "win32": - if appauthor is None: - appauthor = appname - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(_get_win_folder(const)) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('~/Library/Application Support/') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of data dirs should be - returned. By default, the first item from XDG_DATA_DIRS is - returned, or '/usr/local/share/', - if XDG_DATA_DIRS is not set - - Typical site data directories are: - Mac OS X: /Library/Application Support/ - Unix: /usr/local/share/ or /usr/share/ - Win XP: C:\Documents and Settings\All Users\Application Data\\ - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - Win 7: C:\ProgramData\\ # Hidden, but writeable on Win 7. - - For Unix, this is using the $XDG_DATA_DIRS[0] default. - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('/Library/Application Support') - if appname: - path = os.path.join(path, appname) - else: - # XDG default for $XDG_DATA_DIRS - # only first, if multipath is False - path = os.getenv('XDG_DATA_DIRS', - os.pathsep.join(['/usr/local/share', '/usr/share'])) - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - if appname and version: - path = os.path.join(path, version) - return path - - -def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user config directories are: - Mac OS X: same as user_data_dir - Unix: ~/.config/ # or in $XDG_CONFIG_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by default "~/.config/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of config dirs should be - returned. By default, the first item from XDG_CONFIG_DIRS is - returned, or '/etc/xdg/', if XDG_CONFIG_DIRS is not set - - Typical site config directories are: - Mac OS X: same as site_data_dir - Unix: /etc/xdg/ or $XDG_CONFIG_DIRS[i]/ for each value in - $XDG_CONFIG_DIRS - Win *: same as site_data_dir - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - - For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system in ["win32", "darwin"]: - path = site_data_dir(appname, appauthor) - if appname and version: - path = os.path.join(path, version) - else: - # XDG default for $XDG_CONFIG_DIRS - # only first, if multipath is False - path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - -def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Cache" to the base app data dir for Windows. See - discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Caches/ - Unix: ~/.cache/ (XDG default) - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Cache - Vista: C:\Users\\AppData\Local\\\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go in - the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming - app data dir (the default returned by `user_data_dir` above). Apps typically - put cache data somewhere *under* the given dir here. Some examples: - ...\Mozilla\Firefox\Profiles\\Cache - ...\Acme\SuperApp\Cache\1.0 - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - This can be disabled with the `opinion=False` option. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - if opinion: - path = os.path.join(path, "Cache") - elif system == 'darwin': - path = os.path.expanduser('~/Library/Caches') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_state_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific state dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - - for a discussion of issues. - - Typical user state directories are: - Mac OS X: same as user_data_dir - Unix: ~/.local/state/ # or in $XDG_STATE_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow this Debian proposal - to extend the XDG spec and support $XDG_STATE_HOME. - - That means, by default "~/.local/state/". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific log dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be ".". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Logs" to the base app data dir for Windows, and "log" to the - base cache dir for Unix. See discussion below. - - Typical user log directories are: - Mac OS X: ~/Library/Logs/ - Unix: ~/.cache//log # or under $XDG_CACHE_HOME if defined - Win XP: C:\Documents and Settings\\Local Settings\Application Data\\\Logs - Vista: C:\Users\\AppData\Local\\\Logs - - On Windows the only suggestion in the MSDN docs is that local settings - go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in - examples of what some windows apps use for a logs dir.) - - OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` - value for Windows and appends "log" to the user cache dir for Unix. - This can be disabled with the `opinion=False` option. - """ - if system == "darwin": - path = os.path.join( - os.path.expanduser('~/Library/Logs'), - appname) - elif system == "win32": - path = user_data_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "Logs") - else: - path = user_cache_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "log") - if appname and version: - path = os.path.join(path, version) - return path - - -class AppDirs(object): - """Convenience wrapper for getting application dirs.""" - def __init__(self, appname=None, appauthor=None, version=None, - roaming=False, multipath=False): - self.appname = appname - self.appauthor = appauthor - self.version = version - self.roaming = roaming - self.multipath = multipath - - @property - def user_data_dir(self): - return user_data_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_data_dir(self): - return site_data_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_config_dir(self): - return user_config_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_config_dir(self): - return site_config_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_cache_dir(self): - return user_cache_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_state_dir(self): - return user_state_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_log_dir(self): - return user_log_dir(self.appname, self.appauthor, - version=self.version) - - -#---- internal support stuff - -def _get_win_folder_from_registry(csidl_name): - """This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - if PY3: - import winreg as _winreg - else: - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - dir, type = _winreg.QueryValueEx(key, shell_folder_name) - return dir - - -def _get_win_folder_with_pywin32(csidl_name): - from win32com.shell import shellcon, shell - dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) - # Try to make this a unicode path because SHGetFolderPath does - # not return unicode strings when there is unicode data in the - # path. - try: - dir = unicode(dir) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - try: - import win32api - dir = win32api.GetShortPathName(dir) - except ImportError: - pass - except UnicodeError: - pass - return dir - - -def _get_win_folder_with_ctypes(csidl_name): - import ctypes - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -def _get_win_folder_with_jna(csidl_name): - import array - from com.sun import jna - from com.sun.jna.platform import win32 - - buf_size = win32.WinDef.MAX_PATH * 2 - buf = array.zeros('c', buf_size) - shell = win32.Shell32.INSTANCE - shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - # Downgrade to short path name if have highbit chars. See - # . - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf = array.zeros('c', buf_size) - kernel = win32.Kernel32.INSTANCE - if kernel.GetShortPathName(dir, buf, buf_size): - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - return dir - -if system == "win32": - try: - import win32com.shell - _get_win_folder = _get_win_folder_with_pywin32 - except ImportError: - try: - from ctypes import windll - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - try: - import com.sun.jna - _get_win_folder = _get_win_folder_with_jna - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -#---- self test code - -if __name__ == "__main__": - appname = "MyApp" - appauthor = "MyCompany" - - props = ("user_data_dir", - "user_config_dir", - "user_cache_dir", - "user_state_dir", - "user_log_dir", - "site_data_dir", - "site_config_dir") - - print("-- app dirs %s --" % __version__) - - print("-- app dirs (with optional 'version')") - dirs = AppDirs(appname, appauthor, version="1.0") - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'version')") - dirs = AppDirs(appname, appauthor) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'appauthor')") - dirs = AppDirs(appname) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (with disabled 'appauthor')") - dirs = AppDirs(appname, appauthor=False) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/COPYING b/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/COPYING deleted file mode 100644 index d511905c..00000000 --- a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/COPYING +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/COPYING.LESSER b/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/COPYING.LESSER deleted file mode 100644 index 2d2d780e..00000000 --- a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/COPYING.LESSER +++ /dev/null @@ -1,510 +0,0 @@ - - GNU LESSER GENERAL PUBLIC LICENSE - Version 2.1, February 1999 - - Copyright (C) 1991, 1999 Free Software Foundation, Inc. - 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -Licenses are intended to guarantee your freedom to share and change -free software--to make sure the software is free for all its users. - - This license, the Lesser General Public License, applies to some -specially designated software packages--typically libraries--of the -Free Software Foundation and other authors who decide to use it. You -can use it too, but we suggest you first think carefully about whether -this license or the ordinary General Public License is the better -strategy to use in any particular case, based on the explanations -below. - - When we speak of free software, we are referring to freedom of use, -not price. Our General Public Licenses are designed to make sure that -you have the freedom to distribute copies of free software (and charge -for this service if you wish); that you receive source code or can get -it if you want it; that you can change the software and use pieces of -it in new free programs; and that you are informed that you can do -these things. - - To protect your rights, we need to make restrictions that forbid -distributors to deny you these rights or to ask you to surrender these -rights. These restrictions translate to certain responsibilities for -you if you distribute copies of the library or if you modify it. - - For example, if you distribute copies of the library, whether gratis -or for a fee, you must give the recipients all the rights that we gave -you. You must make sure that they, too, receive or can get the source -code. If you link other code with the library, you must provide -complete object files to the recipients, so that they can relink them -with the library after making changes to the library and recompiling -it. And you must show them these terms so they know their rights. - - We protect your rights with a two-step method: (1) we copyright the -library, and (2) we offer you this license, which gives you legal -permission to copy, distribute and/or modify the library. - - To protect each distributor, we want to make it very clear that -there is no warranty for the free library. Also, if the library is -modified by someone else and passed on, the recipients should know -that what they have is not the original version, so that the original -author's reputation will not be affected by problems that might be -introduced by others. - - Finally, software patents pose a constant threat to the existence of -any free program. We wish to make sure that a company cannot -effectively restrict the users of a free program by obtaining a -restrictive license from a patent holder. Therefore, we insist that -any patent license obtained for a version of the library must be -consistent with the full freedom of use specified in this license. - - Most GNU software, including some libraries, is covered by the -ordinary GNU General Public License. This license, the GNU Lesser -General Public License, applies to certain designated libraries, and -is quite different from the ordinary General Public License. We use -this license for certain libraries in order to permit linking those -libraries into non-free programs. - - When a program is linked with a library, whether statically or using -a shared library, the combination of the two is legally speaking a -combined work, a derivative of the original library. The ordinary -General Public License therefore permits such linking only if the -entire combination fits its criteria of freedom. The Lesser General -Public License permits more lax criteria for linking other code with -the library. - - We call this license the "Lesser" General Public License because it -does Less to protect the user's freedom than the ordinary General -Public License. It also provides other free software developers Less -of an advantage over competing non-free programs. These disadvantages -are the reason we use the ordinary General Public License for many -libraries. However, the Lesser license provides advantages in certain -special circumstances. - - For example, on rare occasions, there may be a special need to -encourage the widest possible use of a certain library, so that it -becomes a de-facto standard. To achieve this, non-free programs must -be allowed to use the library. A more frequent case is that a free -library does the same job as widely used non-free libraries. In this -case, there is little to gain by limiting the free library to free -software only, so we use the Lesser General Public License. - - In other cases, permission to use a particular library in non-free -programs enables a greater number of people to use a large body of -free software. For example, permission to use the GNU C Library in -non-free programs enables many more people to use the whole GNU -operating system, as well as its variant, the GNU/Linux operating -system. - - Although the Lesser General Public License is Less protective of the -users' freedom, it does ensure that the user of a program that is -linked with the Library has the freedom and the wherewithal to run -that program using a modified version of the Library. - - The precise terms and conditions for copying, distribution and -modification follow. Pay close attention to the difference between a -"work based on the library" and a "work that uses the library". The -former contains code derived from the library, whereas the latter must -be combined with the library in order to run. - - GNU LESSER GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License Agreement applies to any software library or other -program which contains a notice placed by the copyright holder or -other authorized party saying it may be distributed under the terms of -this Lesser General Public License (also called "this License"). -Each licensee is addressed as "you". - - A "library" means a collection of software functions and/or data -prepared so as to be conveniently linked with application programs -(which use some of those functions and data) to form executables. - - The "Library", below, refers to any such software library or work -which has been distributed under these terms. A "work based on the -Library" means either the Library or any derivative work under -copyright law: that is to say, a work containing the Library or a -portion of it, either verbatim or with modifications and/or translated -straightforwardly into another language. (Hereinafter, translation is -included without limitation in the term "modification".) - - "Source code" for a work means the preferred form of the work for -making modifications to it. For a library, complete source code means -all the source code for all modules it contains, plus any associated -interface definition files, plus the scripts used to control -compilation and installation of the library. - - Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running a program using the Library is not restricted, and output from -such a program is covered only if its contents constitute a work based -on the Library (independent of the use of the Library in a tool for -writing it). Whether that is true depends on what the Library does -and what the program that uses the Library does. - - 1. You may copy and distribute verbatim copies of the Library's -complete source code as you receive it, in any medium, provided that -you conspicuously and appropriately publish on each copy an -appropriate copyright notice and disclaimer of warranty; keep intact -all the notices that refer to this License and to the absence of any -warranty; and distribute a copy of this License along with the -Library. - - You may charge a fee for the physical act of transferring a copy, -and you may at your option offer warranty protection in exchange for a -fee. - - 2. You may modify your copy or copies of the Library or any portion -of it, thus forming a work based on the Library, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) The modified work must itself be a software library. - - b) You must cause the files modified to carry prominent notices - stating that you changed the files and the date of any change. - - c) You must cause the whole of the work to be licensed at no - charge to all third parties under the terms of this License. - - d) If a facility in the modified Library refers to a function or a - table of data to be supplied by an application program that uses - the facility, other than as an argument passed when the facility - is invoked, then you must make a good faith effort to ensure that, - in the event an application does not supply such function or - table, the facility still operates, and performs whatever part of - its purpose remains meaningful. - - (For example, a function in a library to compute square roots has - a purpose that is entirely well-defined independent of the - application. Therefore, Subsection 2d requires that any - application-supplied function or table used by this function must - be optional: if the application does not supply it, the square - root function must still compute square roots.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Library, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Library, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote -it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Library. - -In addition, mere aggregation of another work not based on the Library -with the Library (or with a work based on the Library) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may opt to apply the terms of the ordinary GNU General Public -License instead of this License to a given copy of the Library. To do -this, you must alter all the notices that refer to this License, so -that they refer to the ordinary GNU General Public License, version 2, -instead of to this License. (If a newer version than version 2 of the -ordinary GNU General Public License has appeared, then you can specify -that version instead if you wish.) Do not make any other change in -these notices. - - Once this change is made in a given copy, it is irreversible for -that copy, so the ordinary GNU General Public License applies to all -subsequent copies and derivative works made from that copy. - - This option is useful when you wish to copy part of the code of -the Library into a program that is not a library. - - 4. You may copy and distribute the Library (or a portion or -derivative of it, under Section 2) in object code or executable form -under the terms of Sections 1 and 2 above provided that you accompany -it with the complete corresponding machine-readable source code, which -must be distributed under the terms of Sections 1 and 2 above on a -medium customarily used for software interchange. - - If distribution of object code is made by offering access to copy -from a designated place, then offering equivalent access to copy the -source code from the same place satisfies the requirement to -distribute the source code, even though third parties are not -compelled to copy the source along with the object code. - - 5. A program that contains no derivative of any portion of the -Library, but is designed to work with the Library by being compiled or -linked with it, is called a "work that uses the Library". Such a -work, in isolation, is not a derivative work of the Library, and -therefore falls outside the scope of this License. - - However, linking a "work that uses the Library" with the Library -creates an executable that is a derivative of the Library (because it -contains portions of the Library), rather than a "work that uses the -library". The executable is therefore covered by this License. -Section 6 states terms for distribution of such executables. - - When a "work that uses the Library" uses material from a header file -that is part of the Library, the object code for the work may be a -derivative work of the Library even though the source code is not. -Whether this is true is especially significant if the work can be -linked without the Library, or if the work is itself a library. The -threshold for this to be true is not precisely defined by law. - - If such an object file uses only numerical parameters, data -structure layouts and accessors, and small macros and small inline -functions (ten lines or less in length), then the use of the object -file is unrestricted, regardless of whether it is legally a derivative -work. (Executables containing this object code plus portions of the -Library will still fall under Section 6.) - - Otherwise, if the work is a derivative of the Library, you may -distribute the object code for the work under the terms of Section 6. -Any executables containing that work also fall under Section 6, -whether or not they are linked directly with the Library itself. - - 6. As an exception to the Sections above, you may also combine or -link a "work that uses the Library" with the Library to produce a -work containing portions of the Library, and distribute that work -under terms of your choice, provided that the terms permit -modification of the work for the customer's own use and reverse -engineering for debugging such modifications. - - You must give prominent notice with each copy of the work that the -Library is used in it and that the Library and its use are covered by -this License. You must supply a copy of this License. If the work -during execution displays copyright notices, you must include the -copyright notice for the Library among them, as well as a reference -directing the user to the copy of this License. Also, you must do one -of these things: - - a) Accompany the work with the complete corresponding - machine-readable source code for the Library including whatever - changes were used in the work (which must be distributed under - Sections 1 and 2 above); and, if the work is an executable linked - with the Library, with the complete machine-readable "work that - uses the Library", as object code and/or source code, so that the - user can modify the Library and then relink to produce a modified - executable containing the modified Library. (It is understood - that the user who changes the contents of definitions files in the - Library will not necessarily be able to recompile the application - to use the modified definitions.) - - b) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (1) uses at run time a - copy of the library already present on the user's computer system, - rather than copying library functions into the executable, and (2) - will operate properly with a modified version of the library, if - the user installs one, as long as the modified version is - interface-compatible with the version that the work was made with. - - c) Accompany the work with a written offer, valid for at least - three years, to give the same user the materials specified in - Subsection 6a, above, for a charge no more than the cost of - performing this distribution. - - d) If distribution of the work is made by offering access to copy - from a designated place, offer equivalent access to copy the above - specified materials from the same place. - - e) Verify that the user has already received a copy of these - materials or that you have already sent this user a copy. - - For an executable, the required form of the "work that uses the -Library" must include any data and utility programs needed for -reproducing the executable from it. However, as a special exception, -the materials to be distributed need not include anything that is -normally distributed (in either source or binary form) with the major -components (compiler, kernel, and so on) of the operating system on -which the executable runs, unless that component itself accompanies -the executable. - - It may happen that this requirement contradicts the license -restrictions of other proprietary libraries that do not normally -accompany the operating system. Such a contradiction means you cannot -use both them and the Library together in an executable that you -distribute. - - 7. You may place library facilities that are a work based on the -Library side-by-side in a single library together with other library -facilities not covered by this License, and distribute such a combined -library, provided that the separate distribution of the work based on -the Library and of the other library facilities is otherwise -permitted, and provided that you do these two things: - - a) Accompany the combined library with a copy of the same work - based on the Library, uncombined with any other library - facilities. This must be distributed under the terms of the - Sections above. - - b) Give prominent notice with the combined library of the fact - that part of it is a work based on the Library, and explaining - where to find the accompanying uncombined form of the same work. - - 8. You may not copy, modify, sublicense, link with, or distribute -the Library except as expressly provided under this License. Any -attempt otherwise to copy, modify, sublicense, link with, or -distribute the Library is void, and will automatically terminate your -rights under this License. However, parties who have received copies, -or rights, from you under this License will not have their licenses -terminated so long as such parties remain in full compliance. - - 9. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Library or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Library (or any work based on the -Library), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Library or works based on it. - - 10. Each time you redistribute the Library (or any work based on the -Library), the recipient automatically receives a license from the -original licensor to copy, distribute, link with or modify the Library -subject to these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties with -this License. - - 11. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Library at all. For example, if a patent -license would not permit royalty-free redistribution of the Library by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply, and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 12. If the distribution and/or use of the Library is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Library under this License -may add an explicit geographical distribution limitation excluding those -countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 13. The Free Software Foundation may publish revised and/or new -versions of the Lesser General Public License from time to time. -Such new versions will be similar in spirit to the present version, -but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library -specifies a version number of this License which applies to it and -"any later version", you have the option of following the terms and -conditions either of that version or of any later version published by -the Free Software Foundation. If the Library does not specify a -license version number, you may choose any version ever published by -the Free Software Foundation. - - 14. If you wish to incorporate parts of the Library into other free -programs whose distribution conditions are incompatible with these, -write to the author to ask for permission. For software which is -copyrighted by the Free Software Foundation, write to the Free -Software Foundation; we sometimes make exceptions for this. Our -decision will be guided by the two goals of preserving the free status -of all derivatives of our free software and of promoting the sharing -and reuse of software generally. - - NO WARRANTY - - 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO -WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. -EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR -OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY -KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE -LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME -THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN -WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY -AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU -FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR -CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE -LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING -RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A -FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF -SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH -DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Libraries - - If you develop a new library, and you want it to be of the greatest -possible use to the public, we recommend making it free software that -everyone can redistribute and change. You can do so by permitting -redistribution under these terms (or, alternatively, under the terms -of the ordinary General Public License). - - To apply these terms, attach the following notices to the library. -It is safest to attach them to the start of each source file to most -effectively convey the exclusion of warranty; and each file should -have at least the "copyright" line and a pointer to where the full -notice is found. - - - - Copyright (C) - - This library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - This library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with this library; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - -Also add information on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or -your school, if any, to sign a "copyright disclaimer" for the library, -if necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the - library `Frob' (a library for tweaking knobs) written by James - Random Hacker. - - , 1 April 1990 - Ty Coon, President of Vice - -That's all there is to it! - - diff --git a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/INSTALLER b/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/INSTALLER deleted file mode 100644 index a1b589e3..00000000 --- a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/METADATA b/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/METADATA deleted file mode 100644 index 2805693e..00000000 --- a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/METADATA +++ /dev/null @@ -1,117 +0,0 @@ -Metadata-Version: 2.1 -Name: astroid -Version: 2.3.3 -Summary: An abstract syntax tree for Python with inference support. -Home-page: https://github.com/PyCQA/astroid -Author: Python Code Quality Authority -Author-email: code-quality@python.org -License: LGPL -Platform: UNKNOWN -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Software Development :: Quality Assurance -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Requires-Python: >=3.5.* -Requires-Dist: lazy-object-proxy (==1.4.*) -Requires-Dist: six (~=1.12) -Requires-Dist: wrapt (==1.11.*) -Requires-Dist: typed-ast (<1.5,>=1.4.0) ; implementation_name == "cpython" and python_version < "3.8" - -Astroid -======= - -.. image:: https://travis-ci.org/PyCQA/astroid.svg?branch=master - :target: https://travis-ci.org/PyCQA/astroid - -.. image:: https://ci.appveyor.com/api/projects/status/co3u42kunguhbh6l/branch/master?svg=true - :alt: AppVeyor Build Status - :target: https://ci.appveyor.com/project/PCManticore/astroid - -.. image:: https://coveralls.io/repos/github/PyCQA/astroid/badge.svg?branch=master - :target: https://coveralls.io/github/PyCQA/astroid?branch=master - -.. image:: https://readthedocs.org/projects/astroid/badge/?version=latest - :target: http://astroid.readthedocs.io/en/latest/?badge=latest - :alt: Documentation Status - -.. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/ambv/black - -.. |tideliftlogo| image:: doc/media/Tidelift_Logos_RGB_Tidelift_Shorthand_On-White_small.png - :width: 75 - :height: 60 - :alt: Tidelift - -.. list-table:: - :widths: 10 100 - - * - |tideliftlogo| - - Professional support for astroid is available as part of the `Tidelift - Subscription`_. Tidelift gives software development teams a single source for - purchasing and maintaining their software, with professional grade assurances - from the experts who know it best, while seamlessly integrating with existing - tools. - -.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-astroid?utm_source=pypi-astroid&utm_medium=referral&utm_campaign=readme - - - -What's this? ------------- - -The aim of this module is to provide a common base representation of -python source code. It is currently the library powering pylint's capabilities. - -It provides a compatible representation which comes from the `_ast` -module. It rebuilds the tree generated by the builtin _ast module by -recursively walking down the AST and building an extended ast. The new -node classes have additional methods and attributes for different -usages. They include some support for static inference and local name -scopes. Furthermore, astroid can also build partial trees by inspecting living -objects. - - -Installation ------------- - -Extract the tarball, jump into the created directory and run:: - - pip install . - - -If you want to do an editable installation, you can run:: - - pip install -e . - - -If you have any questions, please mail the code-quality@python.org -mailing list for support. See -http://mail.python.org/mailman/listinfo/code-quality for subscription -information and archives. - -Documentation -------------- -http://astroid.readthedocs.io/en/latest/ - - -Python Versions ---------------- - -astroid 2.0 is currently available for Python 3 only. If you want Python 2 -support, older versions of astroid will still supported until 2020. - -Test ----- - -Tests are in the 'test' subdirectory. To launch the whole tests suite, you can use -either `tox` or `pytest`:: - - tox - pytest astroid - - diff --git a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/RECORD b/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/RECORD deleted file mode 100644 index 01f34b2e..00000000 --- a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/RECORD +++ /dev/null @@ -1,145 +0,0 @@ -astroid-2.3.3.dist-info/COPYING,sha256=qxX9UmvY3Rip5368E5ZWv00z6X_HI4zRG_YOK5uGZsY,17987 -astroid-2.3.3.dist-info/COPYING.LESSER,sha256=qb3eVhbs3R6YC0TzYGAO6Hg7H5m4zIOivrFjoKOQ6GE,26527 -astroid-2.3.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -astroid-2.3.3.dist-info/METADATA,sha256=i0Ut5kY28jjA7pIT7o-_UbHKI5HbTXA0xQubIxcHO8w,3869 -astroid-2.3.3.dist-info/RECORD,, -astroid-2.3.3.dist-info/WHEEL,sha256=p46_5Uhzqz6AzeSosiOnxK-zmFja1i22CrQCjmYe8ec,92 -astroid-2.3.3.dist-info/top_level.txt,sha256=HsdW4O2x7ZXRj6k-agi3RaQybGLobI3VSE-jt4vQUXM,8 -astroid/__init__.py,sha256=tJJMsKzMv8hUgw3y0VQAAMx9BO-nrNUcNy_wI0XBFXo,5538 -astroid/__pkginfo__.py,sha256=vS7X-qu0abKFCIxjA0h9994nl1zj7Ziu3lEz9jniONU,2053 -astroid/__pycache__/__init__.cpython-38.pyc,, -astroid/__pycache__/__pkginfo__.cpython-38.pyc,, -astroid/__pycache__/_ast.cpython-38.pyc,, -astroid/__pycache__/arguments.cpython-38.pyc,, -astroid/__pycache__/as_string.cpython-38.pyc,, -astroid/__pycache__/bases.cpython-38.pyc,, -astroid/__pycache__/builder.cpython-38.pyc,, -astroid/__pycache__/context.cpython-38.pyc,, -astroid/__pycache__/decorators.cpython-38.pyc,, -astroid/__pycache__/exceptions.cpython-38.pyc,, -astroid/__pycache__/helpers.cpython-38.pyc,, -astroid/__pycache__/inference.cpython-38.pyc,, -astroid/__pycache__/manager.cpython-38.pyc,, -astroid/__pycache__/mixins.cpython-38.pyc,, -astroid/__pycache__/modutils.cpython-38.pyc,, -astroid/__pycache__/node_classes.cpython-38.pyc,, -astroid/__pycache__/nodes.cpython-38.pyc,, -astroid/__pycache__/objects.cpython-38.pyc,, -astroid/__pycache__/protocols.cpython-38.pyc,, -astroid/__pycache__/raw_building.cpython-38.pyc,, -astroid/__pycache__/rebuilder.cpython-38.pyc,, -astroid/__pycache__/scoped_nodes.cpython-38.pyc,, -astroid/__pycache__/test_utils.cpython-38.pyc,, -astroid/__pycache__/transforms.cpython-38.pyc,, -astroid/__pycache__/util.cpython-38.pyc,, -astroid/_ast.py,sha256=6OGeHGRbK6oLmrsw6-UOpLFlIV1rStrA7BNpKGsu5Lw,1406 -astroid/arguments.py,sha256=cui-UmbEeywSk0eitSrOhi9F0Ci2clS4qYXTi8uXRs4,11783 -astroid/as_string.py,sha256=8SoRjh8UlDRWkbFMTvse9th8flPt6iu9xOcBip1s1f8,22411 -astroid/bases.py,sha256=G2Zs5OEHoshjLJT8e-ApDH9Q3EZtC27cKJ5yKf84_7w,18698 -astroid/brain/__pycache__/brain_argparse.cpython-38.pyc,, -astroid/brain/__pycache__/brain_attrs.cpython-38.pyc,, -astroid/brain/__pycache__/brain_builtin_inference.cpython-38.pyc,, -astroid/brain/__pycache__/brain_collections.cpython-38.pyc,, -astroid/brain/__pycache__/brain_crypt.cpython-38.pyc,, -astroid/brain/__pycache__/brain_curses.cpython-38.pyc,, -astroid/brain/__pycache__/brain_dataclasses.cpython-38.pyc,, -astroid/brain/__pycache__/brain_dateutil.cpython-38.pyc,, -astroid/brain/__pycache__/brain_fstrings.cpython-38.pyc,, -astroid/brain/__pycache__/brain_functools.cpython-38.pyc,, -astroid/brain/__pycache__/brain_gi.cpython-38.pyc,, -astroid/brain/__pycache__/brain_hashlib.cpython-38.pyc,, -astroid/brain/__pycache__/brain_http.cpython-38.pyc,, -astroid/brain/__pycache__/brain_io.cpython-38.pyc,, -astroid/brain/__pycache__/brain_mechanize.cpython-38.pyc,, -astroid/brain/__pycache__/brain_multiprocessing.cpython-38.pyc,, -astroid/brain/__pycache__/brain_namedtuple_enum.cpython-38.pyc,, -astroid/brain/__pycache__/brain_nose.cpython-38.pyc,, -astroid/brain/__pycache__/brain_numpy_core_fromnumeric.cpython-38.pyc,, -astroid/brain/__pycache__/brain_numpy_core_function_base.cpython-38.pyc,, -astroid/brain/__pycache__/brain_numpy_core_multiarray.cpython-38.pyc,, -astroid/brain/__pycache__/brain_numpy_core_numeric.cpython-38.pyc,, -astroid/brain/__pycache__/brain_numpy_core_numerictypes.cpython-38.pyc,, -astroid/brain/__pycache__/brain_numpy_core_umath.cpython-38.pyc,, -astroid/brain/__pycache__/brain_numpy_ndarray.cpython-38.pyc,, -astroid/brain/__pycache__/brain_numpy_random_mtrand.cpython-38.pyc,, -astroid/brain/__pycache__/brain_numpy_utils.cpython-38.pyc,, -astroid/brain/__pycache__/brain_pkg_resources.cpython-38.pyc,, -astroid/brain/__pycache__/brain_pytest.cpython-38.pyc,, -astroid/brain/__pycache__/brain_qt.cpython-38.pyc,, -astroid/brain/__pycache__/brain_random.cpython-38.pyc,, -astroid/brain/__pycache__/brain_re.cpython-38.pyc,, -astroid/brain/__pycache__/brain_six.cpython-38.pyc,, -astroid/brain/__pycache__/brain_ssl.cpython-38.pyc,, -astroid/brain/__pycache__/brain_subprocess.cpython-38.pyc,, -astroid/brain/__pycache__/brain_threading.cpython-38.pyc,, -astroid/brain/__pycache__/brain_typing.cpython-38.pyc,, -astroid/brain/__pycache__/brain_uuid.cpython-38.pyc,, -astroid/brain/brain_argparse.py,sha256=VEeMCr3OIjHmCy35uc-kX6nJ5_NUOAimpGJMr6CChoA,1024 -astroid/brain/brain_attrs.py,sha256=k8zJqIXsIbQrncthrzyB5NtdPTktgVi9wG7nyl8xMzs,2208 -astroid/brain/brain_builtin_inference.py,sha256=Ttwr1Ekt1_czEF50uEjY0dA5S89WFqyyBl0sWPUaYnE,27206 -astroid/brain/brain_collections.py,sha256=8Vmsb9I19er3MycZtT6qWDrIMV_SEHtl87gTPC5qQHc,2651 -astroid/brain/brain_crypt.py,sha256=gA7Q4GVuAM4viuTGWM6SNTPQXv5Gr_mFapyKMTRcsJ0,875 -astroid/brain/brain_curses.py,sha256=tDnlCP1bEvleqCMz856yua9mM5um1p_JendFhT4rBFk,3303 -astroid/brain/brain_dataclasses.py,sha256=5WndOYSY0oi2v-Od6KdPte-FKt00LoNRH2riSB4S1os,1647 -astroid/brain/brain_dateutil.py,sha256=q2dyV2907Bw4n7m2W4EEdok3Ndv8NzeIQxAZwXBiS14,795 -astroid/brain/brain_fstrings.py,sha256=VKVMijgLE2pg2dtXM6GGFgONOxOg8qA9D5V6dYzWTbQ,2121 -astroid/brain/brain_functools.py,sha256=gGMs0cEMVXR9pRPeu3LqkMARE6yzymvC7pzmRbJCWIY,5400 -astroid/brain/brain_gi.py,sha256=-EpcKf9z3wT_7v0k0WXIZtgk3-213lkfUX9bxeKOM3Y,6810 -astroid/brain/brain_hashlib.py,sha256=cp30hX5HhWqbWG3zqcNu8N3aHGeQK4DPi4ac8owBonU,2163 -astroid/brain/brain_http.py,sha256=-cQohgE5uQ5eBBjjFg7P5c2OlganAK6yZOKA6EkKd6o,10317 -astroid/brain/brain_io.py,sha256=DJcTFMTexrsHaGg2-kHoXwonddu13ImT7NEjiF1xPiU,1470 -astroid/brain/brain_mechanize.py,sha256=xTBc-u2DMmMPeci7DVFs4L2T98DwwLF_Ob5YZviLPp8,889 -astroid/brain/brain_multiprocessing.py,sha256=4iLBXpB7Bgy_hGVx-xhV7spYKg5tc4OybIiBcuwNL7U,3017 -astroid/brain/brain_namedtuple_enum.py,sha256=JBRVBhPSicUAixPdeEerhnxeEJtVnS7T1FkVhvJcDZU,15722 -astroid/brain/brain_nose.py,sha256=kECw2jHmX0IUPX4Gx3XVGrflKGnlgPB79QHt6WU2cwQ,2211 -astroid/brain/brain_numpy_core_fromnumeric.py,sha256=_mtg-7jySDnDoxhtrNtimVZ_lbsm63jb7U0iqcBjgLY,626 -astroid/brain/brain_numpy_core_function_base.py,sha256=2jtHOa_RCMlig7UZVUWSmICFvotvu7bZKCdLZhbTc0Q,1173 -astroid/brain/brain_numpy_core_multiarray.py,sha256=e-igYgbLP8UhCq3VSlRhykhXyoMcO2M7UOcrbzfuWpQ,1890 -astroid/brain/brain_numpy_core_numeric.py,sha256=RP9L1GfhPBGK3KQeeDoo-OyFUvkVNksw0sc9a6t3NJ8,1389 -astroid/brain/brain_numpy_core_numerictypes.py,sha256=RBRdil8D5qtTj6yquQ6_JwYACKRM7vfh4p7nwy3MYLk,7706 -astroid/brain/brain_numpy_core_umath.py,sha256=GGTCDVNDKEAppXjjToNzawa8lpCFr9GEh0OY3eQulec,5279 -astroid/brain/brain_numpy_ndarray.py,sha256=GMDomYcpCfCoKa1amdtQPsdy_VMPot3QUaG9mxlApBk,8417 -astroid/brain/brain_numpy_random_mtrand.py,sha256=It76Xh4atuxwGtsHiXe4llvEKyKh0R5Wa7MgG5y5vVU,3284 -astroid/brain/brain_numpy_utils.py,sha256=NxY99MzQ-m2Md_nofdAU30DFmse2CjpgqfWvYoMDDOc,1622 -astroid/brain/brain_pkg_resources.py,sha256=S_5UED1Zg8ObEJumRdpYGnjxZzemh_G_NFj3p5NGPfc,2262 -astroid/brain/brain_pytest.py,sha256=RXaNUVqy2R0et0Upn4GJkVgq5SG8Pl7zLlhqQg8Xx3Q,2384 -astroid/brain/brain_qt.py,sha256=FXdziZGGzFRzukhZguFoMY4q6PSsp6ZhNJovpzDG_Kc,2464 -astroid/brain/brain_random.py,sha256=2RZY-QEXMNWp7E6h0l0-ke-DtjKTOFlTdjiQZi3XdQc,2432 -astroid/brain/brain_re.py,sha256=le7VJHUAf80HyE_aQCh7_8FyDVK6JwNWA--c9RaMVQ8,1128 -astroid/brain/brain_six.py,sha256=6QHcKXoYf8yMMXWkx3g3lK0kqB5OFeYcXwjUTdgWTMw,6146 -astroid/brain/brain_ssl.py,sha256=2quiZVA_BW8PWmkAsOuYanq9Hvb93LT7c9YVslw3r14,3634 -astroid/brain/brain_subprocess.py,sha256=iXuKDWsUJhJDdKLDm6N8EiBw78Pjn-Xw-UJFk5gvup0,3668 -astroid/brain/brain_threading.py,sha256=73Inb3j7Tps5LQDJDGZOgR-bawttS1rk1l0LUL1WR1o,818 -astroid/brain/brain_typing.py,sha256=iFw33beNCitCJjJNvccIY6SsFJcdKVDdl-56DxDioh0,2780 -astroid/brain/brain_uuid.py,sha256=flWrk1Ve7oqYrO8GTZ3To8RBYteRfYwvash-s9KiU9o,564 -astroid/builder.py,sha256=0wrC4-ausU_nEEkgI8LJTsrNFN_XCbOkqoG2DsKCsks,16023 -astroid/context.py,sha256=VsyUDVB1J9fk1o8MQoE4ygfC7gdNjVYVUD4Bhgs9JM0,5164 -astroid/decorators.py,sha256=m0v63YRiQKc66-g8ckkYeJ0d5cji8AhkUxFPbTfLVDc,4229 -astroid/exceptions.py,sha256=_IJRdLfyNSPVjxYgEd11Uu9XpdqE7uBCVOEIxt3ua70,7047 -astroid/helpers.py,sha256=3HOFwK0ieIoLu7JhrbM1r0zxPyDtTl2oNSv-tXQ2iRw,9170 -astroid/inference.py,sha256=0diHXE-ZGiWU9y31flQa3YZhg6-v4dZgD4PPFAlHJGc,33023 -astroid/interpreter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -astroid/interpreter/__pycache__/__init__.cpython-38.pyc,, -astroid/interpreter/__pycache__/dunder_lookup.cpython-38.pyc,, -astroid/interpreter/__pycache__/objectmodel.cpython-38.pyc,, -astroid/interpreter/_import/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -astroid/interpreter/_import/__pycache__/__init__.cpython-38.pyc,, -astroid/interpreter/_import/__pycache__/spec.cpython-38.pyc,, -astroid/interpreter/_import/__pycache__/util.cpython-38.pyc,, -astroid/interpreter/_import/spec.py,sha256=L48FismdLnk6wjyAzIzJocKVdkBmbQlJgxwzeJ2_luA,11318 -astroid/interpreter/_import/util.py,sha256=inubUz6F3_kaMFaeleKUW6E6wCMIPrhU882zvwEZ02I,255 -astroid/interpreter/dunder_lookup.py,sha256=dP-AZU_aGPNt03b1ttrMglxzeU3NtgnG0MfpSLPH6sg,2155 -astroid/interpreter/objectmodel.py,sha256=7wQbTJhoUwH89x3tBfaA9WLaudBjwKcNpsBPWBQM_7U,23935 -astroid/manager.py,sha256=p7YPLYupDzG05OxR8qqF4fWMJExFAGIjTbVunPT3ECQ,12998 -astroid/mixins.py,sha256=F2rv2Ow7AU3YT_2jitVJik95ZWRVK6hpf8BrkkspzUY,5571 -astroid/modutils.py,sha256=1mBU_-rZH5-9K4nXB9hPi4mesi-pdlDltM_A-OU3zec,23425 -astroid/node_classes.py,sha256=FVYqErzW6lEHEZz3x_ZsqpyR1nyNOvnt0_Oi86btwAQ,140093 -astroid/nodes.py,sha256=tzYNu1tTF8bemsDitnSj7RFjQR2hrwlMDTwAmULoU5A,2957 -astroid/objects.py,sha256=q6ffgYLpyHENUY8BtiZAPHhnz91LJbQFkuaQnrNtf7g,9879 -astroid/protocols.py,sha256=Y-Mupe42X_FrdDC6KwnLyUM4yByWicR_tfqaSGWopT0,26828 -astroid/raw_building.py,sha256=HKYGE5Ll3g0WKntVErqCacQFiyTa5OVuVieIhkvckbc,16808 -astroid/rebuilder.py,sha256=q1XtkOYkykbRhk2UXhuMGsnGZFMzCDxdvTaG4VEh6Mw,41835 -astroid/scoped_nodes.py,sha256=C-ZcmS7QNkIBGUb2wc-hbHaUtOvfcOkQxYhD8xPrwjQ,94141 -astroid/test_utils.py,sha256=Q9SsfJDCJqSdRzEkp_5i1xLGcbFDztqqkdRjjLH476o,2314 -astroid/transforms.py,sha256=1npwJWcQUSIjcpcWd1pc-dJhtHOyiboQHsETAIQd5co,3377 -astroid/util.py,sha256=jg5LnqbWSZTZP1KgpxGBuC6Lfwhn9Jb2T2TohXghmC0,4785 diff --git a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/WHEEL b/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/WHEEL deleted file mode 100644 index 3b5c4038..00000000 --- a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.6) -Root-Is-Purelib: true -Tag: py3-none-any - diff --git a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/top_level.txt b/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/top_level.txt deleted file mode 100644 index 450d4fe9..00000000 --- a/venv/lib/python3.8/site-packages/astroid-2.3.3.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -astroid diff --git a/venv/lib/python3.8/site-packages/astroid/__init__.py b/venv/lib/python3.8/site-packages/astroid/__init__.py deleted file mode 100644 index d36a5b45..00000000 --- a/venv/lib/python3.8/site-packages/astroid/__init__.py +++ /dev/null @@ -1,166 +0,0 @@ -# Copyright (c) 2006-2013, 2015 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2014 Eevee (Alex Munroe) -# Copyright (c) 2015-2016, 2018 Claudiu Popa -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2016 Derek Gustafson -# Copyright (c) 2016 Moises Lopez -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Python Abstract Syntax Tree New Generation - -The aim of this module is to provide a common base representation of -python source code for projects such as pychecker, pyreverse, -pylint... Well, actually the development of this library is essentially -governed by pylint's needs. - -It extends class defined in the python's _ast module with some -additional methods and attributes. Instance attributes are added by a -builder object, which can either generate extended ast (let's call -them astroid ;) by visiting an existent ast tree or by inspecting living -object. Methods are added by monkey patching ast classes. - -Main modules are: - -* nodes and scoped_nodes for more information about methods and - attributes added to different node classes - -* the manager contains a high level object to get astroid trees from - source files and living objects. It maintains a cache of previously - constructed tree for quick access - -* builder contains the class responsible to build astroid trees -""" - -import enum -import itertools -import os -import sys - -import wrapt - - -_Context = enum.Enum("Context", "Load Store Del") -Load = _Context.Load -Store = _Context.Store -Del = _Context.Del -del _Context - - -from .__pkginfo__ import version as __version__ - -# WARNING: internal imports order matters ! - -# pylint: disable=redefined-builtin - -# make all exception classes accessible from astroid package -from astroid.exceptions import * - -# make all node classes accessible from astroid package -from astroid.nodes import * - -# trigger extra monkey-patching -from astroid import inference - -# more stuff available -from astroid import raw_building -from astroid.bases import BaseInstance, Instance, BoundMethod, UnboundMethod -from astroid.node_classes import are_exclusive, unpack_infer -from astroid.scoped_nodes import builtin_lookup -from astroid.builder import parse, extract_node -from astroid.util import Uninferable - -# make a manager instance (borg) accessible from astroid package -from astroid.manager import AstroidManager - -MANAGER = AstroidManager() -del AstroidManager - -# transform utilities (filters and decorator) - - -# pylint: disable=dangerous-default-value -@wrapt.decorator -def _inference_tip_cached(func, instance, args, kwargs, _cache={}): - """Cache decorator used for inference tips""" - node = args[0] - try: - return iter(_cache[func, node]) - except KeyError: - result = func(*args, **kwargs) - # Need to keep an iterator around - original, copy = itertools.tee(result) - _cache[func, node] = list(copy) - return original - - -# pylint: enable=dangerous-default-value - - -def inference_tip(infer_function, raise_on_overwrite=False): - """Given an instance specific inference function, return a function to be - given to MANAGER.register_transform to set this inference function. - - :param bool raise_on_overwrite: Raise an `InferenceOverwriteError` - if the inference tip will overwrite another. Used for debugging - - Typical usage - - .. sourcecode:: python - - MANAGER.register_transform(Call, inference_tip(infer_named_tuple), - predicate) - - .. Note:: - - Using an inference tip will override - any previously set inference tip for the given - node. Use a predicate in the transform to prevent - excess overwrites. - """ - - def transform(node, infer_function=infer_function): - if ( - raise_on_overwrite - and node._explicit_inference is not None - and node._explicit_inference is not infer_function - ): - raise InferenceOverwriteError( - "Inference already set to {existing_inference}. " - "Trying to overwrite with {new_inference} for {node}".format( - existing_inference=infer_function, - new_inference=node._explicit_inference, - node=node, - ) - ) - # pylint: disable=no-value-for-parameter - node._explicit_inference = _inference_tip_cached(infer_function) - return node - - return transform - - -def register_module_extender(manager, module_name, get_extension_mod): - def transform(node): - extension_module = get_extension_mod() - for name, objs in extension_module.locals.items(): - node.locals[name] = objs - for obj in objs: - if obj.parent is extension_module: - obj.parent = node - - manager.register_transform(Module, transform, lambda n: n.name == module_name) - - -# load brain plugins -BRAIN_MODULES_DIR = os.path.join(os.path.dirname(__file__), "brain") -if BRAIN_MODULES_DIR not in sys.path: - # add it to the end of the list so user path take precedence - sys.path.append(BRAIN_MODULES_DIR) -# load modules in this directory -for module in os.listdir(BRAIN_MODULES_DIR): - if module.endswith(".py"): - __import__(module[:-3]) diff --git a/venv/lib/python3.8/site-packages/astroid/__pkginfo__.py b/venv/lib/python3.8/site-packages/astroid/__pkginfo__.py deleted file mode 100644 index 4a17b5dd..00000000 --- a/venv/lib/python3.8/site-packages/astroid/__pkginfo__.py +++ /dev/null @@ -1,51 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2015-2017 Ceridwen -# Copyright (c) 2015 Florian Bruhin -# Copyright (c) 2015 Radosław Ganczarek -# Copyright (c) 2016 Moises Lopez -# Copyright (c) 2017 Hugo -# Copyright (c) 2017 Łukasz Rogalski -# Copyright (c) 2017 Calen Pennington -# Copyright (c) 2018 Ashley Whetter -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""astroid packaging information""" - -version = "2.3.3" -numversion = tuple(int(elem) for elem in version.split(".") if elem.isdigit()) - -extras_require = {} -install_requires = [ - "lazy_object_proxy==1.4.*", - "six~=1.12", - "wrapt==1.11.*", - 'typed-ast>=1.4.0,<1.5;implementation_name== "cpython" and python_version<"3.8"', -] - -# pylint: disable=redefined-builtin; why license is a builtin anyway? -license = "LGPL" - -author = "Python Code Quality Authority" -author_email = "code-quality@python.org" -mailinglist = "mailto://%s" % author_email -web = "https://github.com/PyCQA/astroid" - -description = "An abstract syntax tree for Python with inference support." - -classifiers = [ - "Topic :: Software Development :: Libraries :: Python Modules", - "Topic :: Software Development :: Quality Assurance", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.5", - "Programming Language :: Python :: 3.6", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: Implementation :: CPython", - "Programming Language :: Python :: Implementation :: PyPy", -] diff --git a/venv/lib/python3.8/site-packages/astroid/_ast.py b/venv/lib/python3.8/site-packages/astroid/_ast.py deleted file mode 100644 index 2e44c1f1..00000000 --- a/venv/lib/python3.8/site-packages/astroid/_ast.py +++ /dev/null @@ -1,49 +0,0 @@ -import ast -from collections import namedtuple -from functools import partial -from typing import Optional -import sys - -_ast_py2 = _ast_py3 = None -try: - import typed_ast.ast3 as _ast_py3 - import typed_ast.ast27 as _ast_py2 -except ImportError: - pass - - -PY38 = sys.version_info[:2] >= (3, 8) -if PY38: - # On Python 3.8, typed_ast was merged back into `ast` - _ast_py3 = ast - - -FunctionType = namedtuple("FunctionType", ["argtypes", "returns"]) - - -def _get_parser_module(parse_python_two: bool = False): - if parse_python_two: - parser_module = _ast_py2 - else: - parser_module = _ast_py3 - return parser_module or ast - - -def _parse(string: str, parse_python_two: bool = False): - parse_module = _get_parser_module(parse_python_two=parse_python_two) - parse_func = parse_module.parse - if _ast_py3: - if PY38: - parse_func = partial(parse_func, type_comments=True) - if not parse_python_two: - parse_func = partial(parse_func, feature_version=sys.version_info.minor) - return parse_func(string) - - -def parse_function_type_comment(type_comment: str) -> Optional[FunctionType]: - """Given a correct type comment, obtain a FunctionType object""" - if _ast_py3 is None: - return None - - func_type = _ast_py3.parse(type_comment, "", "func_type") - return FunctionType(argtypes=func_type.argtypes, returns=func_type.returns) diff --git a/venv/lib/python3.8/site-packages/astroid/arguments.py b/venv/lib/python3.8/site-packages/astroid/arguments.py deleted file mode 100644 index c4bdc6d9..00000000 --- a/venv/lib/python3.8/site-packages/astroid/arguments.py +++ /dev/null @@ -1,285 +0,0 @@ -# Copyright (c) 2015-2016, 2018 Claudiu Popa -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2018 Bryce Guinta -# Copyright (c) 2018 Nick Drozd -# Copyright (c) 2018 Anthony Sottile - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -from astroid import bases -from astroid import context as contextmod -from astroid import exceptions -from astroid import nodes -from astroid import util - - -class CallSite: - """Class for understanding arguments passed into a call site - - It needs a call context, which contains the arguments and the - keyword arguments that were passed into a given call site. - In order to infer what an argument represents, call - :meth:`infer_argument` with the corresponding function node - and the argument name. - """ - - def __init__(self, callcontext, argument_context_map=None): - if argument_context_map is None: - argument_context_map = {} - self.argument_context_map = argument_context_map - args = callcontext.args - keywords = callcontext.keywords - self.duplicated_keywords = set() - self._unpacked_args = self._unpack_args(args) - self._unpacked_kwargs = self._unpack_keywords(keywords) - - self.positional_arguments = [ - arg for arg in self._unpacked_args if arg is not util.Uninferable - ] - self.keyword_arguments = { - key: value - for key, value in self._unpacked_kwargs.items() - if value is not util.Uninferable - } - - @classmethod - def from_call(cls, call_node): - """Get a CallSite object from the given Call node.""" - callcontext = contextmod.CallContext(call_node.args, call_node.keywords) - return cls(callcontext) - - def has_invalid_arguments(self): - """Check if in the current CallSite were passed *invalid* arguments - - This can mean multiple things. For instance, if an unpacking - of an invalid object was passed, then this method will return True. - Other cases can be when the arguments can't be inferred by astroid, - for example, by passing objects which aren't known statically. - """ - return len(self.positional_arguments) != len(self._unpacked_args) - - def has_invalid_keywords(self): - """Check if in the current CallSite were passed *invalid* keyword arguments - - For instance, unpacking a dictionary with integer keys is invalid - (**{1:2}), because the keys must be strings, which will make this - method to return True. Other cases where this might return True if - objects which can't be inferred were passed. - """ - return len(self.keyword_arguments) != len(self._unpacked_kwargs) - - def _unpack_keywords(self, keywords): - values = {} - context = contextmod.InferenceContext() - context.extra_context = self.argument_context_map - for name, value in keywords: - if name is None: - # Then it's an unpacking operation (**) - try: - inferred = next(value.infer(context=context)) - except exceptions.InferenceError: - values[name] = util.Uninferable - continue - - if not isinstance(inferred, nodes.Dict): - # Not something we can work with. - values[name] = util.Uninferable - continue - - for dict_key, dict_value in inferred.items: - try: - dict_key = next(dict_key.infer(context=context)) - except exceptions.InferenceError: - values[name] = util.Uninferable - continue - if not isinstance(dict_key, nodes.Const): - values[name] = util.Uninferable - continue - if not isinstance(dict_key.value, str): - values[name] = util.Uninferable - continue - if dict_key.value in values: - # The name is already in the dictionary - values[dict_key.value] = util.Uninferable - self.duplicated_keywords.add(dict_key.value) - continue - values[dict_key.value] = dict_value - else: - values[name] = value - return values - - def _unpack_args(self, args): - values = [] - context = contextmod.InferenceContext() - context.extra_context = self.argument_context_map - for arg in args: - if isinstance(arg, nodes.Starred): - try: - inferred = next(arg.value.infer(context=context)) - except exceptions.InferenceError: - values.append(util.Uninferable) - continue - - if inferred is util.Uninferable: - values.append(util.Uninferable) - continue - if not hasattr(inferred, "elts"): - values.append(util.Uninferable) - continue - values.extend(inferred.elts) - else: - values.append(arg) - return values - - def infer_argument(self, funcnode, name, context): - """infer a function argument value according to the call context - - Arguments: - funcnode: The function being called. - name: The name of the argument whose value is being inferred. - context: Inference context object - """ - if name in self.duplicated_keywords: - raise exceptions.InferenceError( - "The arguments passed to {func!r} " " have duplicate keywords.", - call_site=self, - func=funcnode, - arg=name, - context=context, - ) - - # Look into the keywords first, maybe it's already there. - try: - return self.keyword_arguments[name].infer(context) - except KeyError: - pass - - # Too many arguments given and no variable arguments. - if len(self.positional_arguments) > len(funcnode.args.args): - if not funcnode.args.vararg: - raise exceptions.InferenceError( - "Too many positional arguments " - "passed to {func!r} that does " - "not have *args.", - call_site=self, - func=funcnode, - arg=name, - context=context, - ) - - positional = self.positional_arguments[: len(funcnode.args.args)] - vararg = self.positional_arguments[len(funcnode.args.args) :] - argindex = funcnode.args.find_argname(name)[0] - kwonlyargs = {arg.name for arg in funcnode.args.kwonlyargs} - kwargs = { - key: value - for key, value in self.keyword_arguments.items() - if key not in kwonlyargs - } - # If there are too few positionals compared to - # what the function expects to receive, check to see - # if the missing positional arguments were passed - # as keyword arguments and if so, place them into the - # positional args list. - if len(positional) < len(funcnode.args.args): - for func_arg in funcnode.args.args: - if func_arg.name in kwargs: - arg = kwargs.pop(func_arg.name) - positional.append(arg) - - if argindex is not None: - # 2. first argument of instance/class method - if argindex == 0 and funcnode.type in ("method", "classmethod"): - if context.boundnode is not None: - boundnode = context.boundnode - else: - # XXX can do better ? - boundnode = funcnode.parent.frame() - - if isinstance(boundnode, nodes.ClassDef): - # Verify that we're accessing a method - # of the metaclass through a class, as in - # `cls.metaclass_method`. In this case, the - # first argument is always the class. - method_scope = funcnode.parent.scope() - if method_scope is boundnode.metaclass(): - return iter((boundnode,)) - - if funcnode.type == "method": - if not isinstance(boundnode, bases.Instance): - boundnode = bases.Instance(boundnode) - return iter((boundnode,)) - if funcnode.type == "classmethod": - return iter((boundnode,)) - # if we have a method, extract one position - # from the index, so we'll take in account - # the extra parameter represented by `self` or `cls` - if funcnode.type in ("method", "classmethod"): - argindex -= 1 - # 2. search arg index - try: - return self.positional_arguments[argindex].infer(context) - except IndexError: - pass - - if funcnode.args.kwarg == name: - # It wants all the keywords that were passed into - # the call site. - if self.has_invalid_keywords(): - raise exceptions.InferenceError( - "Inference failed to find values for all keyword arguments " - "to {func!r}: {unpacked_kwargs!r} doesn't correspond to " - "{keyword_arguments!r}.", - keyword_arguments=self.keyword_arguments, - unpacked_kwargs=self._unpacked_kwargs, - call_site=self, - func=funcnode, - arg=name, - context=context, - ) - kwarg = nodes.Dict( - lineno=funcnode.args.lineno, - col_offset=funcnode.args.col_offset, - parent=funcnode.args, - ) - kwarg.postinit( - [(nodes.const_factory(key), value) for key, value in kwargs.items()] - ) - return iter((kwarg,)) - if funcnode.args.vararg == name: - # It wants all the args that were passed into - # the call site. - if self.has_invalid_arguments(): - raise exceptions.InferenceError( - "Inference failed to find values for all positional " - "arguments to {func!r}: {unpacked_args!r} doesn't " - "correspond to {positional_arguments!r}.", - positional_arguments=self.positional_arguments, - unpacked_args=self._unpacked_args, - call_site=self, - func=funcnode, - arg=name, - context=context, - ) - args = nodes.Tuple( - lineno=funcnode.args.lineno, - col_offset=funcnode.args.col_offset, - parent=funcnode.args, - ) - args.postinit(vararg) - return iter((args,)) - - # Check if it's a default parameter. - try: - return funcnode.args.default_value(name).infer(context) - except exceptions.NoDefault: - pass - raise exceptions.InferenceError( - "No value found for argument {name} to " "{func!r}", - call_site=self, - func=funcnode, - arg=name, - context=context, - ) diff --git a/venv/lib/python3.8/site-packages/astroid/as_string.py b/venv/lib/python3.8/site-packages/astroid/as_string.py deleted file mode 100644 index 3cd6e0d2..00000000 --- a/venv/lib/python3.8/site-packages/astroid/as_string.py +++ /dev/null @@ -1,633 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2009-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2010 Daniel Harding -# Copyright (c) 2013-2016, 2018 Claudiu Popa -# Copyright (c) 2013-2014 Google, Inc. -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2016 Jared Garst -# Copyright (c) 2016 Jakub Wilk -# Copyright (c) 2017 Łukasz Rogalski -# Copyright (c) 2017 rr- -# Copyright (c) 2018 brendanator -# Copyright (c) 2018 Nick Drozd - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""This module renders Astroid nodes as string: - -* :func:`to_code` function return equivalent (hopefully valid) python string - -* :func:`dump` function return an internal representation of nodes found - in the tree, useful for debugging or understanding the tree structure -""" - -# pylint: disable=unused-argument - -DOC_NEWLINE = "\0" - - -class AsStringVisitor: - """Visitor to render an Astroid node as a valid python code string""" - - def __init__(self, indent): - self.indent = indent - - def __call__(self, node): - """Makes this visitor behave as a simple function""" - return node.accept(self).replace(DOC_NEWLINE, "\n") - - def _docs_dedent(self, doc): - """Stop newlines in docs being indented by self._stmt_list""" - return '\n%s"""%s"""' % (self.indent, doc.replace("\n", DOC_NEWLINE)) - - def _stmt_list(self, stmts, indent=True): - """return a list of nodes to string""" - stmts = "\n".join(nstr for nstr in [n.accept(self) for n in stmts] if nstr) - if indent: - return self.indent + stmts.replace("\n", "\n" + self.indent) - - return stmts - - def _precedence_parens(self, node, child, is_left=True): - """Wrap child in parens only if required to keep same semantics""" - if self._should_wrap(node, child, is_left): - return "(%s)" % child.accept(self) - - return child.accept(self) - - def _should_wrap(self, node, child, is_left): - """Wrap child if: - - it has lower precedence - - same precedence with position opposite to associativity direction - """ - node_precedence = node.op_precedence() - child_precedence = child.op_precedence() - - if node_precedence > child_precedence: - # 3 * (4 + 5) - return True - - if ( - node_precedence == child_precedence - and is_left != node.op_left_associative() - ): - # 3 - (4 - 5) - # (2**3)**4 - return True - - return False - - ## visit_ methods ########################################### - - def visit_arguments(self, node): - """return an astroid.Function node as string""" - return node.format_args() - - def visit_assignattr(self, node): - """return an astroid.AssAttr node as string""" - return self.visit_attribute(node) - - def visit_assert(self, node): - """return an astroid.Assert node as string""" - if node.fail: - return "assert %s, %s" % (node.test.accept(self), node.fail.accept(self)) - return "assert %s" % node.test.accept(self) - - def visit_assignname(self, node): - """return an astroid.AssName node as string""" - return node.name - - def visit_assign(self, node): - """return an astroid.Assign node as string""" - lhs = " = ".join(n.accept(self) for n in node.targets) - return "%s = %s" % (lhs, node.value.accept(self)) - - def visit_augassign(self, node): - """return an astroid.AugAssign node as string""" - return "%s %s %s" % (node.target.accept(self), node.op, node.value.accept(self)) - - def visit_annassign(self, node): - """Return an astroid.AugAssign node as string""" - - target = node.target.accept(self) - annotation = node.annotation.accept(self) - if node.value is None: - return "%s: %s" % (target, annotation) - return "%s: %s = %s" % (target, annotation, node.value.accept(self)) - - def visit_repr(self, node): - """return an astroid.Repr node as string""" - return "`%s`" % node.value.accept(self) - - def visit_binop(self, node): - """return an astroid.BinOp node as string""" - left = self._precedence_parens(node, node.left) - right = self._precedence_parens(node, node.right, is_left=False) - if node.op == "**": - return "%s%s%s" % (left, node.op, right) - - return "%s %s %s" % (left, node.op, right) - - def visit_boolop(self, node): - """return an astroid.BoolOp node as string""" - values = ["%s" % self._precedence_parens(node, n) for n in node.values] - return (" %s " % node.op).join(values) - - def visit_break(self, node): - """return an astroid.Break node as string""" - return "break" - - def visit_call(self, node): - """return an astroid.Call node as string""" - expr_str = self._precedence_parens(node, node.func) - args = [arg.accept(self) for arg in node.args] - if node.keywords: - keywords = [kwarg.accept(self) for kwarg in node.keywords] - else: - keywords = [] - - args.extend(keywords) - return "%s(%s)" % (expr_str, ", ".join(args)) - - def visit_classdef(self, node): - """return an astroid.ClassDef node as string""" - decorate = node.decorators.accept(self) if node.decorators else "" - bases = ", ".join(n.accept(self) for n in node.bases) - metaclass = node.metaclass() - if metaclass and not node.has_metaclass_hack(): - if bases: - bases = "(%s, metaclass=%s)" % (bases, metaclass.name) - else: - bases = "(metaclass=%s)" % metaclass.name - else: - bases = "(%s)" % bases if bases else "" - docs = self._docs_dedent(node.doc) if node.doc else "" - return "\n\n%sclass %s%s:%s\n%s\n" % ( - decorate, - node.name, - bases, - docs, - self._stmt_list(node.body), - ) - - def visit_compare(self, node): - """return an astroid.Compare node as string""" - rhs_str = " ".join( - [ - "%s %s" % (op, self._precedence_parens(node, expr, is_left=False)) - for op, expr in node.ops - ] - ) - return "%s %s" % (self._precedence_parens(node, node.left), rhs_str) - - def visit_comprehension(self, node): - """return an astroid.Comprehension node as string""" - ifs = "".join(" if %s" % n.accept(self) for n in node.ifs) - return "for %s in %s%s" % ( - node.target.accept(self), - node.iter.accept(self), - ifs, - ) - - def visit_const(self, node): - """return an astroid.Const node as string""" - if node.value is Ellipsis: - return "..." - return repr(node.value) - - def visit_continue(self, node): - """return an astroid.Continue node as string""" - return "continue" - - def visit_delete(self, node): # XXX check if correct - """return an astroid.Delete node as string""" - return "del %s" % ", ".join(child.accept(self) for child in node.targets) - - def visit_delattr(self, node): - """return an astroid.DelAttr node as string""" - return self.visit_attribute(node) - - def visit_delname(self, node): - """return an astroid.DelName node as string""" - return node.name - - def visit_decorators(self, node): - """return an astroid.Decorators node as string""" - return "@%s\n" % "\n@".join(item.accept(self) for item in node.nodes) - - def visit_dict(self, node): - """return an astroid.Dict node as string""" - return "{%s}" % ", ".join(self._visit_dict(node)) - - def _visit_dict(self, node): - for key, value in node.items: - key = key.accept(self) - value = value.accept(self) - if key == "**": - # It can only be a DictUnpack node. - yield key + value - else: - yield "%s: %s" % (key, value) - - def visit_dictunpack(self, node): - return "**" - - def visit_dictcomp(self, node): - """return an astroid.DictComp node as string""" - return "{%s: %s %s}" % ( - node.key.accept(self), - node.value.accept(self), - " ".join(n.accept(self) for n in node.generators), - ) - - def visit_expr(self, node): - """return an astroid.Discard node as string""" - return node.value.accept(self) - - def visit_emptynode(self, node): - """dummy method for visiting an Empty node""" - return "" - - def visit_excepthandler(self, node): - if node.type: - if node.name: - excs = "except %s, %s" % ( - node.type.accept(self), - node.name.accept(self), - ) - else: - excs = "except %s" % node.type.accept(self) - else: - excs = "except" - return "%s:\n%s" % (excs, self._stmt_list(node.body)) - - def visit_ellipsis(self, node): - """return an astroid.Ellipsis node as string""" - return "..." - - def visit_empty(self, node): - """return an Empty node as string""" - return "" - - def visit_exec(self, node): - """return an astroid.Exec node as string""" - if node.locals: - return "exec %s in %s, %s" % ( - node.expr.accept(self), - node.locals.accept(self), - node.globals.accept(self), - ) - if node.globals: - return "exec %s in %s" % (node.expr.accept(self), node.globals.accept(self)) - return "exec %s" % node.expr.accept(self) - - def visit_extslice(self, node): - """return an astroid.ExtSlice node as string""" - return ", ".join(dim.accept(self) for dim in node.dims) - - def visit_for(self, node): - """return an astroid.For node as string""" - fors = "for %s in %s:\n%s" % ( - node.target.accept(self), - node.iter.accept(self), - self._stmt_list(node.body), - ) - if node.orelse: - fors = "%s\nelse:\n%s" % (fors, self._stmt_list(node.orelse)) - return fors - - def visit_importfrom(self, node): - """return an astroid.ImportFrom node as string""" - return "from %s import %s" % ( - "." * (node.level or 0) + node.modname, - _import_string(node.names), - ) - - def visit_functiondef(self, node): - """return an astroid.Function node as string""" - decorate = node.decorators.accept(self) if node.decorators else "" - docs = self._docs_dedent(node.doc) if node.doc else "" - trailer = ":" - if node.returns: - return_annotation = " -> " + node.returns.as_string() - trailer = return_annotation + ":" - def_format = "\n%sdef %s(%s)%s%s\n%s" - return def_format % ( - decorate, - node.name, - node.args.accept(self), - trailer, - docs, - self._stmt_list(node.body), - ) - - def visit_generatorexp(self, node): - """return an astroid.GeneratorExp node as string""" - return "(%s %s)" % ( - node.elt.accept(self), - " ".join(n.accept(self) for n in node.generators), - ) - - def visit_attribute(self, node): - """return an astroid.Getattr node as string""" - return "%s.%s" % (self._precedence_parens(node, node.expr), node.attrname) - - def visit_global(self, node): - """return an astroid.Global node as string""" - return "global %s" % ", ".join(node.names) - - def visit_if(self, node): - """return an astroid.If node as string""" - ifs = ["if %s:\n%s" % (node.test.accept(self), self._stmt_list(node.body))] - if node.has_elif_block(): - ifs.append("el%s" % self._stmt_list(node.orelse, indent=False)) - elif node.orelse: - ifs.append("else:\n%s" % self._stmt_list(node.orelse)) - return "\n".join(ifs) - - def visit_ifexp(self, node): - """return an astroid.IfExp node as string""" - return "%s if %s else %s" % ( - self._precedence_parens(node, node.body, is_left=True), - self._precedence_parens(node, node.test, is_left=True), - self._precedence_parens(node, node.orelse, is_left=False), - ) - - def visit_import(self, node): - """return an astroid.Import node as string""" - return "import %s" % _import_string(node.names) - - def visit_keyword(self, node): - """return an astroid.Keyword node as string""" - if node.arg is None: - return "**%s" % node.value.accept(self) - return "%s=%s" % (node.arg, node.value.accept(self)) - - def visit_lambda(self, node): - """return an astroid.Lambda node as string""" - args = node.args.accept(self) - body = node.body.accept(self) - if args: - return "lambda %s: %s" % (args, body) - - return "lambda: %s" % body - - def visit_list(self, node): - """return an astroid.List node as string""" - return "[%s]" % ", ".join(child.accept(self) for child in node.elts) - - def visit_listcomp(self, node): - """return an astroid.ListComp node as string""" - return "[%s %s]" % ( - node.elt.accept(self), - " ".join(n.accept(self) for n in node.generators), - ) - - def visit_module(self, node): - """return an astroid.Module node as string""" - docs = '"""%s"""\n\n' % node.doc if node.doc else "" - return docs + "\n".join(n.accept(self) for n in node.body) + "\n\n" - - def visit_name(self, node): - """return an astroid.Name node as string""" - return node.name - - def visit_pass(self, node): - """return an astroid.Pass node as string""" - return "pass" - - def visit_print(self, node): - """return an astroid.Print node as string""" - nodes = ", ".join(n.accept(self) for n in node.values) - if not node.nl: - nodes = "%s," % nodes - if node.dest: - return "print >> %s, %s" % (node.dest.accept(self), nodes) - return "print %s" % nodes - - def visit_raise(self, node): - """return an astroid.Raise node as string""" - if node.exc: - if node.inst: - if node.tback: - return "raise %s, %s, %s" % ( - node.exc.accept(self), - node.inst.accept(self), - node.tback.accept(self), - ) - return "raise %s, %s" % (node.exc.accept(self), node.inst.accept(self)) - return "raise %s" % node.exc.accept(self) - return "raise" - - def visit_return(self, node): - """return an astroid.Return node as string""" - if node.is_tuple_return() and len(node.value.elts) > 1: - elts = [child.accept(self) for child in node.value.elts] - return "return %s" % ", ".join(elts) - - if node.value: - return "return %s" % node.value.accept(self) - - return "return" - - def visit_index(self, node): - """return an astroid.Index node as string""" - return node.value.accept(self) - - def visit_set(self, node): - """return an astroid.Set node as string""" - return "{%s}" % ", ".join(child.accept(self) for child in node.elts) - - def visit_setcomp(self, node): - """return an astroid.SetComp node as string""" - return "{%s %s}" % ( - node.elt.accept(self), - " ".join(n.accept(self) for n in node.generators), - ) - - def visit_slice(self, node): - """return an astroid.Slice node as string""" - lower = node.lower.accept(self) if node.lower else "" - upper = node.upper.accept(self) if node.upper else "" - step = node.step.accept(self) if node.step else "" - if step: - return "%s:%s:%s" % (lower, upper, step) - return "%s:%s" % (lower, upper) - - def visit_subscript(self, node): - """return an astroid.Subscript node as string""" - idx = node.slice - if idx.__class__.__name__.lower() == "index": - idx = idx.value - idxstr = idx.accept(self) - if idx.__class__.__name__.lower() == "tuple" and idx.elts: - # Remove parenthesis in tuple and extended slice. - # a[(::1, 1:)] is not valid syntax. - idxstr = idxstr[1:-1] - return "%s[%s]" % (self._precedence_parens(node, node.value), idxstr) - - def visit_tryexcept(self, node): - """return an astroid.TryExcept node as string""" - trys = ["try:\n%s" % self._stmt_list(node.body)] - for handler in node.handlers: - trys.append(handler.accept(self)) - if node.orelse: - trys.append("else:\n%s" % self._stmt_list(node.orelse)) - return "\n".join(trys) - - def visit_tryfinally(self, node): - """return an astroid.TryFinally node as string""" - return "try:\n%s\nfinally:\n%s" % ( - self._stmt_list(node.body), - self._stmt_list(node.finalbody), - ) - - def visit_tuple(self, node): - """return an astroid.Tuple node as string""" - if len(node.elts) == 1: - return "(%s, )" % node.elts[0].accept(self) - return "(%s)" % ", ".join(child.accept(self) for child in node.elts) - - def visit_unaryop(self, node): - """return an astroid.UnaryOp node as string""" - if node.op == "not": - operator = "not " - else: - operator = node.op - return "%s%s" % (operator, self._precedence_parens(node, node.operand)) - - def visit_while(self, node): - """return an astroid.While node as string""" - whiles = "while %s:\n%s" % (node.test.accept(self), self._stmt_list(node.body)) - if node.orelse: - whiles = "%s\nelse:\n%s" % (whiles, self._stmt_list(node.orelse)) - return whiles - - def visit_with(self, node): # 'with' without 'as' is possible - """return an astroid.With node as string""" - items = ", ".join( - ("%s" % expr.accept(self)) + (vars and " as %s" % (vars.accept(self)) or "") - for expr, vars in node.items - ) - return "with %s:\n%s" % (items, self._stmt_list(node.body)) - - def visit_yield(self, node): - """yield an ast.Yield node as string""" - yi_val = (" " + node.value.accept(self)) if node.value else "" - expr = "yield" + yi_val - if node.parent.is_statement: - return expr - - return "(%s)" % (expr,) - - def visit_starred(self, node): - """return Starred node as string""" - return "*" + node.value.accept(self) - - # These aren't for real AST nodes, but for inference objects. - - def visit_frozenset(self, node): - return node.parent.accept(self) - - def visit_super(self, node): - return node.parent.accept(self) - - def visit_uninferable(self, node): - return str(node) - - -class AsStringVisitor3(AsStringVisitor): - """AsStringVisitor3 overwrites some AsStringVisitor methods""" - - def visit_excepthandler(self, node): - if node.type: - if node.name: - excs = "except %s as %s" % ( - node.type.accept(self), - node.name.accept(self), - ) - else: - excs = "except %s" % node.type.accept(self) - else: - excs = "except" - return "%s:\n%s" % (excs, self._stmt_list(node.body)) - - def visit_nonlocal(self, node): - """return an astroid.Nonlocal node as string""" - return "nonlocal %s" % ", ".join(node.names) - - def visit_raise(self, node): - """return an astroid.Raise node as string""" - if node.exc: - if node.cause: - return "raise %s from %s" % ( - node.exc.accept(self), - node.cause.accept(self), - ) - return "raise %s" % node.exc.accept(self) - return "raise" - - def visit_yieldfrom(self, node): - """ Return an astroid.YieldFrom node as string. """ - yi_val = (" " + node.value.accept(self)) if node.value else "" - expr = "yield from" + yi_val - if node.parent.is_statement: - return expr - - return "(%s)" % (expr,) - - def visit_asyncfunctiondef(self, node): - function = super(AsStringVisitor3, self).visit_functiondef(node) - return "async " + function.strip() - - def visit_await(self, node): - return "await %s" % node.value.accept(self) - - def visit_asyncwith(self, node): - return "async %s" % self.visit_with(node) - - def visit_asyncfor(self, node): - return "async %s" % self.visit_for(node) - - def visit_joinedstr(self, node): - # Special treatment for constants, - # as we want to join literals not reprs - string = "".join( - value.value if type(value).__name__ == "Const" else value.accept(self) - for value in node.values - ) - return "f'%s'" % string - - def visit_formattedvalue(self, node): - return "{%s}" % node.value.accept(self) - - def visit_comprehension(self, node): - """return an astroid.Comprehension node as string""" - return "%s%s" % ( - "async " if node.is_async else "", - super(AsStringVisitor3, self).visit_comprehension(node), - ) - - def visit_namedexpr(self, node): - """Return an assignment expression node as string""" - target = node.target.accept(self) - value = node.value.accept(self) - return "%s := %s" % (target, value) - - -def _import_string(names): - """return a list of (name, asname) formatted as a string""" - _names = [] - for name, asname in names: - if asname is not None: - _names.append("%s as %s" % (name, asname)) - else: - _names.append(name) - return ", ".join(_names) - - -AsStringVisitor = AsStringVisitor3 - -# This sets the default indent to 4 spaces. -to_code = AsStringVisitor(" ") diff --git a/venv/lib/python3.8/site-packages/astroid/bases.py b/venv/lib/python3.8/site-packages/astroid/bases.py deleted file mode 100644 index d5b042a0..00000000 --- a/venv/lib/python3.8/site-packages/astroid/bases.py +++ /dev/null @@ -1,542 +0,0 @@ -# Copyright (c) 2009-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2012 FELD Boris -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2014 Eevee (Alex Munroe) -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 Florian Bruhin -# Copyright (c) 2016-2017 Derek Gustafson -# Copyright (c) 2017 Calen Pennington -# Copyright (c) 2018 Bryce Guinta -# Copyright (c) 2018 Nick Drozd -# Copyright (c) 2018 Daniel Colascione - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""This module contains base classes and functions for the nodes and some -inference utils. -""" - -import builtins -import collections - -from astroid import context as contextmod -from astroid import exceptions -from astroid import util - -objectmodel = util.lazy_import("interpreter.objectmodel") -helpers = util.lazy_import("helpers") -BUILTINS = builtins.__name__ -manager = util.lazy_import("manager") -MANAGER = manager.AstroidManager() - -# TODO: check if needs special treatment -BUILTINS = "builtins" -BOOL_SPECIAL_METHOD = "__bool__" - -PROPERTIES = {BUILTINS + ".property", "abc.abstractproperty"} -# List of possible property names. We use this list in order -# to see if a method is a property or not. This should be -# pretty reliable and fast, the alternative being to check each -# decorator to see if its a real property-like descriptor, which -# can be too complicated. -# Also, these aren't qualified, because each project can -# define them, we shouldn't expect to know every possible -# property-like decorator! -POSSIBLE_PROPERTIES = { - "cached_property", - "cachedproperty", - "lazyproperty", - "lazy_property", - "reify", - "lazyattribute", - "lazy_attribute", - "LazyProperty", - "lazy", - "cache_readonly", -} - - -def _is_property(meth): - if PROPERTIES.intersection(meth.decoratornames()): - return True - stripped = { - name.split(".")[-1] - for name in meth.decoratornames() - if name is not util.Uninferable - } - if any(name in stripped for name in POSSIBLE_PROPERTIES): - return True - - # Lookup for subclasses of *property* - if not meth.decorators: - return False - for decorator in meth.decorators.nodes or (): - inferred = helpers.safe_infer(decorator) - if inferred is None or inferred is util.Uninferable: - continue - if inferred.__class__.__name__ == "ClassDef": - for base_class in inferred.bases: - if base_class.__class__.__name__ != "Name": - continue - module, _ = base_class.lookup(base_class.name) - if module.name == BUILTINS and base_class.name == "property": - return True - - return False - - -class Proxy: - """a simple proxy object - - Note: - - Subclasses of this object will need a custom __getattr__ - if new instance attributes are created. See the Const class - """ - - _proxied = None # proxied object may be set by class or by instance - - def __init__(self, proxied=None): - if proxied is not None: - self._proxied = proxied - - def __getattr__(self, name): - if name == "_proxied": - return getattr(self.__class__, "_proxied") - if name in self.__dict__: - return self.__dict__[name] - return getattr(self._proxied, name) - - def infer(self, context=None): - yield self - - -def _infer_stmts(stmts, context, frame=None): - """Return an iterator on statements inferred by each statement in *stmts*.""" - inferred = False - if context is not None: - name = context.lookupname - context = context.clone() - else: - name = None - context = contextmod.InferenceContext() - - for stmt in stmts: - if stmt is util.Uninferable: - yield stmt - inferred = True - continue - context.lookupname = stmt._infer_name(frame, name) - try: - for inferred in stmt.infer(context=context): - yield inferred - inferred = True - except exceptions.NameInferenceError: - continue - except exceptions.InferenceError: - yield util.Uninferable - inferred = True - if not inferred: - raise exceptions.InferenceError( - "Inference failed for all members of {stmts!r}.", - stmts=stmts, - frame=frame, - context=context, - ) - - -def _infer_method_result_truth(instance, method_name, context): - # Get the method from the instance and try to infer - # its return's truth value. - meth = next(instance.igetattr(method_name, context=context), None) - if meth and hasattr(meth, "infer_call_result"): - if not meth.callable(): - return util.Uninferable - try: - for value in meth.infer_call_result(instance, context=context): - if value is util.Uninferable: - return value - - inferred = next(value.infer(context=context)) - return inferred.bool_value() - except exceptions.InferenceError: - pass - return util.Uninferable - - -class BaseInstance(Proxy): - """An instance base class, which provides lookup methods for potential instances.""" - - special_attributes = None - - def display_type(self): - return "Instance of" - - def getattr(self, name, context=None, lookupclass=True): - try: - values = self._proxied.instance_attr(name, context) - except exceptions.AttributeInferenceError as exc: - if self.special_attributes and name in self.special_attributes: - return [self.special_attributes.lookup(name)] - - if lookupclass: - # Class attributes not available through the instance - # unless they are explicitly defined. - return self._proxied.getattr(name, context, class_context=False) - - raise exceptions.AttributeInferenceError( - target=self, attribute=name, context=context - ) from exc - # since we've no context information, return matching class members as - # well - if lookupclass: - try: - return values + self._proxied.getattr( - name, context, class_context=False - ) - except exceptions.AttributeInferenceError: - pass - return values - - def igetattr(self, name, context=None): - """inferred getattr""" - if not context: - context = contextmod.InferenceContext() - try: - # avoid recursively inferring the same attr on the same class - if context.push((self._proxied, name)): - raise exceptions.InferenceError( - message="Cannot infer the same attribute again", - node=self, - context=context, - ) - - # XXX frame should be self._proxied, or not ? - get_attr = self.getattr(name, context, lookupclass=False) - yield from _infer_stmts( - self._wrap_attr(get_attr, context), context, frame=self - ) - except exceptions.AttributeInferenceError as error: - try: - # fallback to class.igetattr since it has some logic to handle - # descriptors - # But only if the _proxied is the Class. - if self._proxied.__class__.__name__ != "ClassDef": - raise - attrs = self._proxied.igetattr(name, context, class_context=False) - yield from self._wrap_attr(attrs, context) - except exceptions.AttributeInferenceError as error: - raise exceptions.InferenceError(**vars(error)) from error - - def _wrap_attr(self, attrs, context=None): - """wrap bound methods of attrs in a InstanceMethod proxies""" - for attr in attrs: - if isinstance(attr, UnboundMethod): - if _is_property(attr): - yield from attr.infer_call_result(self, context) - else: - yield BoundMethod(attr, self) - elif hasattr(attr, "name") and attr.name == "": - if attr.args.args and attr.args.args[0].name == "self": - yield BoundMethod(attr, self) - continue - yield attr - else: - yield attr - - def infer_call_result(self, caller, context=None): - """infer what a class instance is returning when called""" - context = contextmod.bind_context_to_node(context, self) - inferred = False - for node in self._proxied.igetattr("__call__", context): - if node is util.Uninferable or not node.callable(): - continue - for res in node.infer_call_result(caller, context): - inferred = True - yield res - if not inferred: - raise exceptions.InferenceError(node=self, caller=caller, context=context) - - -class Instance(BaseInstance): - """A special node representing a class instance.""" - - # pylint: disable=unnecessary-lambda - special_attributes = util.lazy_descriptor(lambda: objectmodel.InstanceModel()) - - def __repr__(self): - return "" % ( - self._proxied.root().name, - self._proxied.name, - id(self), - ) - - def __str__(self): - return "Instance of %s.%s" % (self._proxied.root().name, self._proxied.name) - - def callable(self): - try: - self._proxied.getattr("__call__", class_context=False) - return True - except exceptions.AttributeInferenceError: - return False - - def pytype(self): - return self._proxied.qname() - - def display_type(self): - return "Instance of" - - def bool_value(self): - """Infer the truth value for an Instance - - The truth value of an instance is determined by these conditions: - - * if it implements __bool__ on Python 3 or __nonzero__ - on Python 2, then its bool value will be determined by - calling this special method and checking its result. - * when this method is not defined, __len__() is called, if it - is defined, and the object is considered true if its result is - nonzero. If a class defines neither __len__() nor __bool__(), - all its instances are considered true. - """ - context = contextmod.InferenceContext() - context.callcontext = contextmod.CallContext(args=[]) - context.boundnode = self - - try: - result = _infer_method_result_truth(self, BOOL_SPECIAL_METHOD, context) - except (exceptions.InferenceError, exceptions.AttributeInferenceError): - # Fallback to __len__. - try: - result = _infer_method_result_truth(self, "__len__", context) - except (exceptions.AttributeInferenceError, exceptions.InferenceError): - return True - return result - - # This is set in inference.py. - def getitem(self, index, context=None): - pass - - -class UnboundMethod(Proxy): - """a special node representing a method not bound to an instance""" - - # pylint: disable=unnecessary-lambda - special_attributes = util.lazy_descriptor(lambda: objectmodel.UnboundMethodModel()) - - def __repr__(self): - frame = self._proxied.parent.frame() - return "<%s %s of %s at 0x%s" % ( - self.__class__.__name__, - self._proxied.name, - frame.qname(), - id(self), - ) - - def implicit_parameters(self): - return 0 - - def is_bound(self): - return False - - def getattr(self, name, context=None): - if name in self.special_attributes: - return [self.special_attributes.lookup(name)] - return self._proxied.getattr(name, context) - - def igetattr(self, name, context=None): - if name in self.special_attributes: - return iter((self.special_attributes.lookup(name),)) - return self._proxied.igetattr(name, context) - - def infer_call_result(self, caller, context): - """ - The boundnode of the regular context with a function called - on ``object.__new__`` will be of type ``object``, - which is incorrect for the argument in general. - If no context is given the ``object.__new__`` call argument will - correctly inferred except when inside a call that requires - the additional context (such as a classmethod) of the boundnode - to determine which class the method was called from - """ - - # If we're unbound method __new__ of builtin object, the result is an - # instance of the class given as first argument. - if ( - self._proxied.name == "__new__" - and self._proxied.parent.frame().qname() == "%s.object" % BUILTINS - ): - if caller.args: - node_context = context.extra_context.get(caller.args[0]) - infer = caller.args[0].infer(context=node_context) - else: - infer = [] - return (Instance(x) if x is not util.Uninferable else x for x in infer) - return self._proxied.infer_call_result(caller, context) - - def bool_value(self): - return True - - -class BoundMethod(UnboundMethod): - """a special node representing a method bound to an instance""" - - # pylint: disable=unnecessary-lambda - special_attributes = util.lazy_descriptor(lambda: objectmodel.BoundMethodModel()) - - def __init__(self, proxy, bound): - UnboundMethod.__init__(self, proxy) - self.bound = bound - - def implicit_parameters(self): - return 1 - - def is_bound(self): - return True - - def _infer_type_new_call(self, caller, context): - """Try to infer what type.__new__(mcs, name, bases, attrs) returns. - - In order for such call to be valid, the metaclass needs to be - a subtype of ``type``, the name needs to be a string, the bases - needs to be a tuple of classes - """ - # pylint: disable=import-outside-toplevel; circular import - from astroid import node_classes - - # Verify the metaclass - mcs = next(caller.args[0].infer(context=context)) - if mcs.__class__.__name__ != "ClassDef": - # Not a valid first argument. - return None - if not mcs.is_subtype_of("%s.type" % BUILTINS): - # Not a valid metaclass. - return None - - # Verify the name - name = next(caller.args[1].infer(context=context)) - if name.__class__.__name__ != "Const": - # Not a valid name, needs to be a const. - return None - if not isinstance(name.value, str): - # Needs to be a string. - return None - - # Verify the bases - bases = next(caller.args[2].infer(context=context)) - if bases.__class__.__name__ != "Tuple": - # Needs to be a tuple. - return None - inferred_bases = [next(elt.infer(context=context)) for elt in bases.elts] - if any(base.__class__.__name__ != "ClassDef" for base in inferred_bases): - # All the bases needs to be Classes - return None - - # Verify the attributes. - attrs = next(caller.args[3].infer(context=context)) - if attrs.__class__.__name__ != "Dict": - # Needs to be a dictionary. - return None - cls_locals = collections.defaultdict(list) - for key, value in attrs.items: - key = next(key.infer(context=context)) - value = next(value.infer(context=context)) - # Ignore non string keys - if key.__class__.__name__ == "Const" and isinstance(key.value, str): - cls_locals[key.value].append(value) - - # Build the class from now. - cls = mcs.__class__( - name=name.value, - lineno=caller.lineno, - col_offset=caller.col_offset, - parent=caller, - ) - empty = node_classes.Pass() - cls.postinit( - bases=bases.elts, - body=[empty], - decorators=[], - newstyle=True, - metaclass=mcs, - keywords=[], - ) - cls.locals = cls_locals - return cls - - def infer_call_result(self, caller, context=None): - context = contextmod.bind_context_to_node(context, self.bound) - if ( - self.bound.__class__.__name__ == "ClassDef" - and self.bound.name == "type" - and self.name == "__new__" - and len(caller.args) == 4 - ): - # Check if we have a ``type.__new__(mcs, name, bases, attrs)`` call. - new_cls = self._infer_type_new_call(caller, context) - if new_cls: - return iter((new_cls,)) - - return super(BoundMethod, self).infer_call_result(caller, context) - - def bool_value(self): - return True - - -class Generator(BaseInstance): - """a special node representing a generator. - - Proxied class is set once for all in raw_building. - """ - - # pylint: disable=unnecessary-lambda - special_attributes = util.lazy_descriptor(lambda: objectmodel.GeneratorModel()) - - # pylint: disable=super-init-not-called - def __init__(self, parent=None): - self.parent = parent - - def callable(self): - return False - - def pytype(self): - return "%s.generator" % BUILTINS - - def display_type(self): - return "Generator" - - def bool_value(self): - return True - - def __repr__(self): - return "" % ( - self._proxied.name, - self.lineno, - id(self), - ) - - def __str__(self): - return "Generator(%s)" % (self._proxied.name) - - -class AsyncGenerator(Generator): - """Special node representing an async generator""" - - def pytype(self): - return "%s.async_generator" % BUILTINS - - def display_type(self): - return "AsyncGenerator" - - def __repr__(self): - return "" % ( - self._proxied.name, - self.lineno, - id(self), - ) - - def __str__(self): - return "AsyncGenerator(%s)" % (self._proxied.name) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_argparse.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_argparse.py deleted file mode 100644 index d4899117..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_argparse.py +++ /dev/null @@ -1,33 +0,0 @@ -from astroid import MANAGER, arguments, nodes, inference_tip, UseInferenceDefault - - -def infer_namespace(node, context=None): - callsite = arguments.CallSite.from_call(node) - if not callsite.keyword_arguments: - # Cannot make sense of it. - raise UseInferenceDefault() - - class_node = nodes.ClassDef("Namespace", "docstring") - class_node.parent = node.parent - for attr in set(callsite.keyword_arguments): - fake_node = nodes.EmptyNode() - fake_node.parent = class_node - fake_node.attrname = attr - class_node.instance_attrs[attr] = [fake_node] - return iter((class_node.instantiate_class(),)) - - -def _looks_like_namespace(node): - func = node.func - if isinstance(func, nodes.Attribute): - return ( - func.attrname == "Namespace" - and isinstance(func.expr, nodes.Name) - and func.expr.name == "argparse" - ) - return False - - -MANAGER.register_transform( - nodes.Call, inference_tip(infer_namespace), _looks_like_namespace -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_attrs.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_attrs.py deleted file mode 100644 index 670736fe..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_attrs.py +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER -""" -Astroid hook for the attrs library - -Without this hook pylint reports unsupported-assignment-operation -for attrs classes -""" - -import astroid -from astroid import MANAGER - - -ATTRIB_NAMES = frozenset(("attr.ib", "attrib", "attr.attrib")) -ATTRS_NAMES = frozenset(("attr.s", "attrs", "attr.attrs", "attr.attributes")) - - -def is_decorated_with_attrs(node, decorator_names=ATTRS_NAMES): - """Return True if a decorated node has - an attr decorator applied.""" - if not node.decorators: - return False - for decorator_attribute in node.decorators.nodes: - if isinstance(decorator_attribute, astroid.Call): # decorator with arguments - decorator_attribute = decorator_attribute.func - if decorator_attribute.as_string() in decorator_names: - return True - return False - - -def attr_attributes_transform(node): - """Given that the ClassNode has an attr decorator, - rewrite class attributes as instance attributes - """ - # Astroid can't infer this attribute properly - # Prevents https://github.com/PyCQA/pylint/issues/1884 - node.locals["__attrs_attrs__"] = [astroid.Unknown(parent=node)] - - for cdefbodynode in node.body: - if not isinstance(cdefbodynode, (astroid.Assign, astroid.AnnAssign)): - continue - if isinstance(cdefbodynode.value, astroid.Call): - if cdefbodynode.value.func.as_string() not in ATTRIB_NAMES: - continue - else: - continue - targets = ( - cdefbodynode.targets - if hasattr(cdefbodynode, "targets") - else [cdefbodynode.target] - ) - for target in targets: - - rhs_node = astroid.Unknown( - lineno=cdefbodynode.lineno, - col_offset=cdefbodynode.col_offset, - parent=cdefbodynode, - ) - node.locals[target.name] = [rhs_node] - node.instance_attrs[target.name] = [rhs_node] - - -MANAGER.register_transform( - astroid.ClassDef, attr_attributes_transform, is_decorated_with_attrs -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_builtin_inference.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_builtin_inference.py deleted file mode 100644 index 2dd7cc54..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_builtin_inference.py +++ /dev/null @@ -1,829 +0,0 @@ -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014-2015 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 Rene Zhang -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Astroid hooks for various builtins.""" - -from functools import partial -from textwrap import dedent - -import six -from astroid import ( - MANAGER, - UseInferenceDefault, - AttributeInferenceError, - inference_tip, - InferenceError, - NameInferenceError, - AstroidTypeError, - MroError, -) -from astroid import arguments -from astroid.builder import AstroidBuilder -from astroid import helpers -from astroid import nodes -from astroid import objects -from astroid import scoped_nodes -from astroid import util - - -OBJECT_DUNDER_NEW = "object.__new__" - - -def _extend_str(class_node, rvalue): - """function to extend builtin str/unicode class""" - code = dedent( - """ - class whatever(object): - def join(self, iterable): - return {rvalue} - def replace(self, old, new, count=None): - return {rvalue} - def format(self, *args, **kwargs): - return {rvalue} - def encode(self, encoding='ascii', errors=None): - return '' - def decode(self, encoding='ascii', errors=None): - return u'' - def capitalize(self): - return {rvalue} - def title(self): - return {rvalue} - def lower(self): - return {rvalue} - def upper(self): - return {rvalue} - def swapcase(self): - return {rvalue} - def index(self, sub, start=None, end=None): - return 0 - def find(self, sub, start=None, end=None): - return 0 - def count(self, sub, start=None, end=None): - return 0 - def strip(self, chars=None): - return {rvalue} - def lstrip(self, chars=None): - return {rvalue} - def rstrip(self, chars=None): - return {rvalue} - def rjust(self, width, fillchar=None): - return {rvalue} - def center(self, width, fillchar=None): - return {rvalue} - def ljust(self, width, fillchar=None): - return {rvalue} - """ - ) - code = code.format(rvalue=rvalue) - fake = AstroidBuilder(MANAGER).string_build(code)["whatever"] - for method in fake.mymethods(): - method.parent = class_node - method.lineno = None - method.col_offset = None - if "__class__" in method.locals: - method.locals["__class__"] = [class_node] - class_node.locals[method.name] = [method] - method.parent = class_node - - -def _extend_builtins(class_transforms): - builtin_ast = MANAGER.builtins_module - for class_name, transform in class_transforms.items(): - transform(builtin_ast[class_name]) - - -_extend_builtins( - { - "bytes": partial(_extend_str, rvalue="b''"), - "str": partial(_extend_str, rvalue="''"), - } -) - - -def _builtin_filter_predicate(node, builtin_name): - if isinstance(node.func, nodes.Name) and node.func.name == builtin_name: - return True - if isinstance(node.func, nodes.Attribute): - return ( - node.func.attrname == "fromkeys" - and isinstance(node.func.expr, nodes.Name) - and node.func.expr.name == "dict" - ) - return False - - -def register_builtin_transform(transform, builtin_name): - """Register a new transform function for the given *builtin_name*. - - The transform function must accept two parameters, a node and - an optional context. - """ - - def _transform_wrapper(node, context=None): - result = transform(node, context=context) - if result: - if not result.parent: - # Let the transformation function determine - # the parent for its result. Otherwise, - # we set it to be the node we transformed from. - result.parent = node - - if result.lineno is None: - result.lineno = node.lineno - if result.col_offset is None: - result.col_offset = node.col_offset - return iter([result]) - - MANAGER.register_transform( - nodes.Call, - inference_tip(_transform_wrapper), - partial(_builtin_filter_predicate, builtin_name=builtin_name), - ) - - -def _container_generic_inference(node, context, node_type, transform): - args = node.args - if not args: - return node_type() - if len(node.args) > 1: - raise UseInferenceDefault() - - arg, = args - transformed = transform(arg) - if not transformed: - try: - inferred = next(arg.infer(context=context)) - except (InferenceError, StopIteration): - raise UseInferenceDefault() - if inferred is util.Uninferable: - raise UseInferenceDefault() - transformed = transform(inferred) - if not transformed or transformed is util.Uninferable: - raise UseInferenceDefault() - return transformed - - -def _container_generic_transform(arg, klass, iterables, build_elts): - if isinstance(arg, klass): - return arg - elif isinstance(arg, iterables): - if all(isinstance(elt, nodes.Const) for elt in arg.elts): - elts = [elt.value for elt in arg.elts] - else: - # TODO: Does not handle deduplication for sets. - elts = filter(None, map(helpers.safe_infer, arg.elts)) - elif isinstance(arg, nodes.Dict): - # Dicts need to have consts as strings already. - if not all(isinstance(elt[0], nodes.Const) for elt in arg.items): - raise UseInferenceDefault() - elts = [item[0].value for item in arg.items] - elif isinstance(arg, nodes.Const) and isinstance( - arg.value, (six.string_types, six.binary_type) - ): - elts = arg.value - else: - return - return klass.from_elements(elts=build_elts(elts)) - - -def _infer_builtin_container( - node, context, klass=None, iterables=None, build_elts=None -): - transform_func = partial( - _container_generic_transform, - klass=klass, - iterables=iterables, - build_elts=build_elts, - ) - - return _container_generic_inference(node, context, klass, transform_func) - - -# pylint: disable=invalid-name -infer_tuple = partial( - _infer_builtin_container, - klass=nodes.Tuple, - iterables=( - nodes.List, - nodes.Set, - objects.FrozenSet, - objects.DictItems, - objects.DictKeys, - objects.DictValues, - ), - build_elts=tuple, -) - -infer_list = partial( - _infer_builtin_container, - klass=nodes.List, - iterables=( - nodes.Tuple, - nodes.Set, - objects.FrozenSet, - objects.DictItems, - objects.DictKeys, - objects.DictValues, - ), - build_elts=list, -) - -infer_set = partial( - _infer_builtin_container, - klass=nodes.Set, - iterables=(nodes.List, nodes.Tuple, objects.FrozenSet, objects.DictKeys), - build_elts=set, -) - -infer_frozenset = partial( - _infer_builtin_container, - klass=objects.FrozenSet, - iterables=(nodes.List, nodes.Tuple, nodes.Set, objects.FrozenSet, objects.DictKeys), - build_elts=frozenset, -) - - -def _get_elts(arg, context): - is_iterable = lambda n: isinstance(n, (nodes.List, nodes.Tuple, nodes.Set)) - try: - inferred = next(arg.infer(context)) - except (InferenceError, NameInferenceError): - raise UseInferenceDefault() - if isinstance(inferred, nodes.Dict): - items = inferred.items - elif is_iterable(inferred): - items = [] - for elt in inferred.elts: - # If an item is not a pair of two items, - # then fallback to the default inference. - # Also, take in consideration only hashable items, - # tuples and consts. We are choosing Names as well. - if not is_iterable(elt): - raise UseInferenceDefault() - if len(elt.elts) != 2: - raise UseInferenceDefault() - if not isinstance(elt.elts[0], (nodes.Tuple, nodes.Const, nodes.Name)): - raise UseInferenceDefault() - items.append(tuple(elt.elts)) - else: - raise UseInferenceDefault() - return items - - -def infer_dict(node, context=None): - """Try to infer a dict call to a Dict node. - - The function treats the following cases: - - * dict() - * dict(mapping) - * dict(iterable) - * dict(iterable, **kwargs) - * dict(mapping, **kwargs) - * dict(**kwargs) - - If a case can't be inferred, we'll fallback to default inference. - """ - call = arguments.CallSite.from_call(node) - if call.has_invalid_arguments() or call.has_invalid_keywords(): - raise UseInferenceDefault - - args = call.positional_arguments - kwargs = list(call.keyword_arguments.items()) - - if not args and not kwargs: - # dict() - return nodes.Dict() - elif kwargs and not args: - # dict(a=1, b=2, c=4) - items = [(nodes.Const(key), value) for key, value in kwargs] - elif len(args) == 1 and kwargs: - # dict(some_iterable, b=2, c=4) - elts = _get_elts(args[0], context) - keys = [(nodes.Const(key), value) for key, value in kwargs] - items = elts + keys - elif len(args) == 1: - items = _get_elts(args[0], context) - else: - raise UseInferenceDefault() - - value = nodes.Dict( - col_offset=node.col_offset, lineno=node.lineno, parent=node.parent - ) - value.postinit(items) - return value - - -def infer_super(node, context=None): - """Understand super calls. - - There are some restrictions for what can be understood: - - * unbounded super (one argument form) is not understood. - - * if the super call is not inside a function (classmethod or method), - then the default inference will be used. - - * if the super arguments can't be inferred, the default inference - will be used. - """ - if len(node.args) == 1: - # Ignore unbounded super. - raise UseInferenceDefault - - scope = node.scope() - if not isinstance(scope, nodes.FunctionDef): - # Ignore non-method uses of super. - raise UseInferenceDefault - if scope.type not in ("classmethod", "method"): - # Not interested in staticmethods. - raise UseInferenceDefault - - cls = scoped_nodes.get_wrapping_class(scope) - if not len(node.args): - mro_pointer = cls - # In we are in a classmethod, the interpreter will fill - # automatically the class as the second argument, not an instance. - if scope.type == "classmethod": - mro_type = cls - else: - mro_type = cls.instantiate_class() - else: - try: - mro_pointer = next(node.args[0].infer(context=context)) - except InferenceError: - raise UseInferenceDefault - try: - mro_type = next(node.args[1].infer(context=context)) - except InferenceError: - raise UseInferenceDefault - - if mro_pointer is util.Uninferable or mro_type is util.Uninferable: - # No way we could understand this. - raise UseInferenceDefault - - super_obj = objects.Super( - mro_pointer=mro_pointer, mro_type=mro_type, self_class=cls, scope=scope - ) - super_obj.parent = node - return super_obj - - -def _infer_getattr_args(node, context): - if len(node.args) not in (2, 3): - # Not a valid getattr call. - raise UseInferenceDefault - - try: - obj = next(node.args[0].infer(context=context)) - attr = next(node.args[1].infer(context=context)) - except InferenceError: - raise UseInferenceDefault - - if obj is util.Uninferable or attr is util.Uninferable: - # If one of the arguments is something we can't infer, - # then also make the result of the getattr call something - # which is unknown. - return util.Uninferable, util.Uninferable - - is_string = isinstance(attr, nodes.Const) and isinstance( - attr.value, six.string_types - ) - if not is_string: - raise UseInferenceDefault - - return obj, attr.value - - -def infer_getattr(node, context=None): - """Understand getattr calls - - If one of the arguments is an Uninferable object, then the - result will be an Uninferable object. Otherwise, the normal attribute - lookup will be done. - """ - obj, attr = _infer_getattr_args(node, context) - if ( - obj is util.Uninferable - or attr is util.Uninferable - or not hasattr(obj, "igetattr") - ): - return util.Uninferable - - try: - return next(obj.igetattr(attr, context=context)) - except (StopIteration, InferenceError, AttributeInferenceError): - if len(node.args) == 3: - # Try to infer the default and return it instead. - try: - return next(node.args[2].infer(context=context)) - except InferenceError: - raise UseInferenceDefault - - raise UseInferenceDefault - - -def infer_hasattr(node, context=None): - """Understand hasattr calls - - This always guarantees three possible outcomes for calling - hasattr: Const(False) when we are sure that the object - doesn't have the intended attribute, Const(True) when - we know that the object has the attribute and Uninferable - when we are unsure of the outcome of the function call. - """ - try: - obj, attr = _infer_getattr_args(node, context) - if ( - obj is util.Uninferable - or attr is util.Uninferable - or not hasattr(obj, "getattr") - ): - return util.Uninferable - obj.getattr(attr, context=context) - except UseInferenceDefault: - # Can't infer something from this function call. - return util.Uninferable - except AttributeInferenceError: - # Doesn't have it. - return nodes.Const(False) - return nodes.Const(True) - - -def infer_callable(node, context=None): - """Understand callable calls - - This follows Python's semantics, where an object - is callable if it provides an attribute __call__, - even though that attribute is something which can't be - called. - """ - if len(node.args) != 1: - # Invalid callable call. - raise UseInferenceDefault - - argument = node.args[0] - try: - inferred = next(argument.infer(context=context)) - except InferenceError: - return util.Uninferable - if inferred is util.Uninferable: - return util.Uninferable - return nodes.Const(inferred.callable()) - - -def infer_bool(node, context=None): - """Understand bool calls.""" - if len(node.args) > 1: - # Invalid bool call. - raise UseInferenceDefault - - if not node.args: - return nodes.Const(False) - - argument = node.args[0] - try: - inferred = next(argument.infer(context=context)) - except InferenceError: - return util.Uninferable - if inferred is util.Uninferable: - return util.Uninferable - - bool_value = inferred.bool_value() - if bool_value is util.Uninferable: - return util.Uninferable - return nodes.Const(bool_value) - - -def infer_type(node, context=None): - """Understand the one-argument form of *type*.""" - if len(node.args) != 1: - raise UseInferenceDefault - - return helpers.object_type(node.args[0], context) - - -def infer_slice(node, context=None): - """Understand `slice` calls.""" - args = node.args - if not 0 < len(args) <= 3: - raise UseInferenceDefault - - infer_func = partial(helpers.safe_infer, context=context) - args = [infer_func(arg) for arg in args] - for arg in args: - if not arg or arg is util.Uninferable: - raise UseInferenceDefault - if not isinstance(arg, nodes.Const): - raise UseInferenceDefault - if not isinstance(arg.value, (type(None), int)): - raise UseInferenceDefault - - if len(args) < 3: - # Make sure we have 3 arguments. - args.extend([None] * (3 - len(args))) - - slice_node = nodes.Slice( - lineno=node.lineno, col_offset=node.col_offset, parent=node.parent - ) - slice_node.postinit(*args) - return slice_node - - -def _infer_object__new__decorator(node, context=None): - # Instantiate class immediately - # since that's what @object.__new__ does - return iter((node.instantiate_class(),)) - - -def _infer_object__new__decorator_check(node): - """Predicate before inference_tip - - Check if the given ClassDef has an @object.__new__ decorator - """ - if not node.decorators: - return False - - for decorator in node.decorators.nodes: - if isinstance(decorator, nodes.Attribute): - if decorator.as_string() == OBJECT_DUNDER_NEW: - return True - return False - - -def infer_issubclass(callnode, context=None): - """Infer issubclass() calls - - :param nodes.Call callnode: an `issubclass` call - :param InferenceContext: the context for the inference - :rtype nodes.Const: Boolean Const value of the `issubclass` call - :raises UseInferenceDefault: If the node cannot be inferred - """ - call = arguments.CallSite.from_call(callnode) - if call.keyword_arguments: - # issubclass doesn't support keyword arguments - raise UseInferenceDefault("TypeError: issubclass() takes no keyword arguments") - if len(call.positional_arguments) != 2: - raise UseInferenceDefault( - "Expected two arguments, got {count}".format( - count=len(call.positional_arguments) - ) - ) - # The left hand argument is the obj to be checked - obj_node, class_or_tuple_node = call.positional_arguments - - try: - obj_type = next(obj_node.infer(context=context)) - except InferenceError as exc: - raise UseInferenceDefault from exc - if not isinstance(obj_type, nodes.ClassDef): - raise UseInferenceDefault("TypeError: arg 1 must be class") - - # The right hand argument is the class(es) that the given - # object is to be checked against. - try: - class_container = _class_or_tuple_to_container( - class_or_tuple_node, context=context - ) - except InferenceError as exc: - raise UseInferenceDefault from exc - try: - issubclass_bool = helpers.object_issubclass(obj_type, class_container, context) - except AstroidTypeError as exc: - raise UseInferenceDefault("TypeError: " + str(exc)) from exc - except MroError as exc: - raise UseInferenceDefault from exc - return nodes.Const(issubclass_bool) - - -def infer_isinstance(callnode, context=None): - """Infer isinstance calls - - :param nodes.Call callnode: an isinstance call - :param InferenceContext: context for call - (currently unused but is a common interface for inference) - :rtype nodes.Const: Boolean Const value of isinstance call - - :raises UseInferenceDefault: If the node cannot be inferred - """ - call = arguments.CallSite.from_call(callnode) - if call.keyword_arguments: - # isinstance doesn't support keyword arguments - raise UseInferenceDefault("TypeError: isinstance() takes no keyword arguments") - if len(call.positional_arguments) != 2: - raise UseInferenceDefault( - "Expected two arguments, got {count}".format( - count=len(call.positional_arguments) - ) - ) - # The left hand argument is the obj to be checked - obj_node, class_or_tuple_node = call.positional_arguments - # The right hand argument is the class(es) that the given - # obj is to be check is an instance of - try: - class_container = _class_or_tuple_to_container( - class_or_tuple_node, context=context - ) - except InferenceError: - raise UseInferenceDefault - try: - isinstance_bool = helpers.object_isinstance(obj_node, class_container, context) - except AstroidTypeError as exc: - raise UseInferenceDefault("TypeError: " + str(exc)) - except MroError as exc: - raise UseInferenceDefault from exc - if isinstance_bool is util.Uninferable: - raise UseInferenceDefault - return nodes.Const(isinstance_bool) - - -def _class_or_tuple_to_container(node, context=None): - # Move inferences results into container - # to simplify later logic - # raises InferenceError if any of the inferences fall through - node_infer = next(node.infer(context=context)) - # arg2 MUST be a type or a TUPLE of types - # for isinstance - if isinstance(node_infer, nodes.Tuple): - class_container = [ - next(node.infer(context=context)) for node in node_infer.elts - ] - class_container = [ - klass_node for klass_node in class_container if klass_node is not None - ] - else: - class_container = [node_infer] - return class_container - - -def infer_len(node, context=None): - """Infer length calls - - :param nodes.Call node: len call to infer - :param context.InferenceContext: node context - :rtype nodes.Const: a Const node with the inferred length, if possible - """ - call = arguments.CallSite.from_call(node) - if call.keyword_arguments: - raise UseInferenceDefault("TypeError: len() must take no keyword arguments") - if len(call.positional_arguments) != 1: - raise UseInferenceDefault( - "TypeError: len() must take exactly one argument " - "({len}) given".format(len=len(call.positional_arguments)) - ) - [argument_node] = call.positional_arguments - try: - return nodes.Const(helpers.object_len(argument_node, context=context)) - except (AstroidTypeError, InferenceError) as exc: - raise UseInferenceDefault(str(exc)) from exc - - -def infer_str(node, context=None): - """Infer str() calls - - :param nodes.Call node: str() call to infer - :param context.InferenceContext: node context - :rtype nodes.Const: a Const containing an empty string - """ - call = arguments.CallSite.from_call(node) - if call.keyword_arguments: - raise UseInferenceDefault("TypeError: str() must take no keyword arguments") - try: - return nodes.Const("") - except (AstroidTypeError, InferenceError) as exc: - raise UseInferenceDefault(str(exc)) from exc - - -def infer_int(node, context=None): - """Infer int() calls - - :param nodes.Call node: int() call to infer - :param context.InferenceContext: node context - :rtype nodes.Const: a Const containing the integer value of the int() call - """ - call = arguments.CallSite.from_call(node) - if call.keyword_arguments: - raise UseInferenceDefault("TypeError: int() must take no keyword arguments") - - if call.positional_arguments: - try: - first_value = next(call.positional_arguments[0].infer(context=context)) - except InferenceError as exc: - raise UseInferenceDefault(str(exc)) from exc - - if first_value is util.Uninferable: - raise UseInferenceDefault - - if isinstance(first_value, nodes.Const) and isinstance( - first_value.value, (int, str) - ): - try: - actual_value = int(first_value.value) - except ValueError: - return nodes.Const(0) - return nodes.Const(actual_value) - - return nodes.Const(0) - - -def infer_dict_fromkeys(node, context=None): - """Infer dict.fromkeys - - :param nodes.Call node: dict.fromkeys() call to infer - :param context.InferenceContext: node context - :rtype nodes.Dict: - a Dictionary containing the values that astroid was able to infer. - In case the inference failed for any reason, an empty dictionary - will be inferred instead. - """ - - def _build_dict_with_elements(elements): - new_node = nodes.Dict( - col_offset=node.col_offset, lineno=node.lineno, parent=node.parent - ) - new_node.postinit(elements) - return new_node - - call = arguments.CallSite.from_call(node) - if call.keyword_arguments: - raise UseInferenceDefault("TypeError: int() must take no keyword arguments") - if len(call.positional_arguments) not in {1, 2}: - raise UseInferenceDefault( - "TypeError: Needs between 1 and 2 positional arguments" - ) - - default = nodes.Const(None) - values = call.positional_arguments[0] - try: - inferred_values = next(values.infer(context=context)) - except InferenceError: - return _build_dict_with_elements([]) - if inferred_values is util.Uninferable: - return _build_dict_with_elements([]) - - # Limit to a couple of potential values, as this can become pretty complicated - accepted_iterable_elements = (nodes.Const,) - if isinstance(inferred_values, (nodes.List, nodes.Set, nodes.Tuple)): - elements = inferred_values.elts - for element in elements: - if not isinstance(element, accepted_iterable_elements): - # Fallback to an empty dict - return _build_dict_with_elements([]) - - elements_with_value = [(element, default) for element in elements] - return _build_dict_with_elements(elements_with_value) - - elif isinstance(inferred_values, nodes.Const) and isinstance( - inferred_values.value, (str, bytes) - ): - elements = [ - (nodes.Const(element), default) for element in inferred_values.value - ] - return _build_dict_with_elements(elements) - elif isinstance(inferred_values, nodes.Dict): - keys = inferred_values.itered() - for key in keys: - if not isinstance(key, accepted_iterable_elements): - # Fallback to an empty dict - return _build_dict_with_elements([]) - - elements_with_value = [(element, default) for element in keys] - return _build_dict_with_elements(elements_with_value) - - # Fallback to an empty dictionary - return _build_dict_with_elements([]) - - -# Builtins inference -register_builtin_transform(infer_bool, "bool") -register_builtin_transform(infer_super, "super") -register_builtin_transform(infer_callable, "callable") -register_builtin_transform(infer_getattr, "getattr") -register_builtin_transform(infer_hasattr, "hasattr") -register_builtin_transform(infer_tuple, "tuple") -register_builtin_transform(infer_set, "set") -register_builtin_transform(infer_list, "list") -register_builtin_transform(infer_dict, "dict") -register_builtin_transform(infer_frozenset, "frozenset") -register_builtin_transform(infer_type, "type") -register_builtin_transform(infer_slice, "slice") -register_builtin_transform(infer_isinstance, "isinstance") -register_builtin_transform(infer_issubclass, "issubclass") -register_builtin_transform(infer_len, "len") -register_builtin_transform(infer_str, "str") -register_builtin_transform(infer_int, "int") -register_builtin_transform(infer_dict_fromkeys, "dict.fromkeys") - - -# Infer object.__new__ calls -MANAGER.register_transform( - nodes.ClassDef, - inference_tip(_infer_object__new__decorator), - _infer_object__new__decorator_check, -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_collections.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_collections.py deleted file mode 100644 index e5b09ec8..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_collections.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2016, 2018 Claudiu Popa -# Copyright (c) 2016-2017 Łukasz Rogalski -# Copyright (c) 2017 Derek Gustafson -# Copyright (c) 2018 Ioana Tagirta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER -import sys - -import astroid - - -def _collections_transform(): - return astroid.parse( - """ - class defaultdict(dict): - default_factory = None - def __missing__(self, key): pass - def __getitem__(self, key): return default_factory - - """ - + _deque_mock() - + _ordered_dict_mock() - ) - - -def _deque_mock(): - base_deque_class = """ - class deque(object): - maxlen = 0 - def __init__(self, iterable=None, maxlen=None): - self.iterable = iterable or [] - def append(self, x): pass - def appendleft(self, x): pass - def clear(self): pass - def count(self, x): return 0 - def extend(self, iterable): pass - def extendleft(self, iterable): pass - def pop(self): return self.iterable[0] - def popleft(self): return self.iterable[0] - def remove(self, value): pass - def reverse(self): return reversed(self.iterable) - def rotate(self, n=1): return self - def __iter__(self): return self - def __reversed__(self): return self.iterable[::-1] - def __getitem__(self, index): return self.iterable[index] - def __setitem__(self, index, value): pass - def __delitem__(self, index): pass - def __bool__(self): return bool(self.iterable) - def __nonzero__(self): return bool(self.iterable) - def __contains__(self, o): return o in self.iterable - def __len__(self): return len(self.iterable) - def __copy__(self): return deque(self.iterable) - def copy(self): return deque(self.iterable) - def index(self, x, start=0, end=0): return 0 - def insert(self, x, i): pass - def __add__(self, other): pass - def __iadd__(self, other): pass - def __mul__(self, other): pass - def __imul__(self, other): pass - def __rmul__(self, other): pass""" - return base_deque_class - - -def _ordered_dict_mock(): - base_ordered_dict_class = """ - class OrderedDict(dict): - def __reversed__(self): return self[::-1] - def move_to_end(self, key, last=False): pass""" - return base_ordered_dict_class - - -astroid.register_module_extender(astroid.MANAGER, "collections", _collections_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_crypt.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_crypt.py deleted file mode 100644 index 491ee23c..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_crypt.py +++ /dev/null @@ -1,26 +0,0 @@ -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER -import sys -import astroid - -PY37 = sys.version_info >= (3, 7) - -if PY37: - # Since Python 3.7 Hashing Methods are added - # dynamically to globals() - - def _re_transform(): - return astroid.parse( - """ - from collections import namedtuple - _Method = namedtuple('_Method', 'name ident salt_chars total_size') - - METHOD_SHA512 = _Method('SHA512', '6', 16, 106) - METHOD_SHA256 = _Method('SHA256', '5', 16, 63) - METHOD_BLOWFISH = _Method('BLOWFISH', 2, 'b', 22) - METHOD_MD5 = _Method('MD5', '1', 8, 34) - METHOD_CRYPT = _Method('CRYPT', None, 2, 13) - """ - ) - - astroid.register_module_extender(astroid.MANAGER, "crypt", _re_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_curses.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_curses.py deleted file mode 100644 index 68e88b90..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_curses.py +++ /dev/null @@ -1,179 +0,0 @@ -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER -import astroid - - -def _curses_transform(): - return astroid.parse( - """ - A_ALTCHARSET = 1 - A_BLINK = 1 - A_BOLD = 1 - A_DIM = 1 - A_INVIS = 1 - A_ITALIC = 1 - A_NORMAL = 1 - A_PROTECT = 1 - A_REVERSE = 1 - A_STANDOUT = 1 - A_UNDERLINE = 1 - A_HORIZONTAL = 1 - A_LEFT = 1 - A_LOW = 1 - A_RIGHT = 1 - A_TOP = 1 - A_VERTICAL = 1 - A_CHARTEXT = 1 - A_ATTRIBUTES = 1 - A_CHARTEXT = 1 - A_COLOR = 1 - KEY_MIN = 1 - KEY_BREAK = 1 - KEY_DOWN = 1 - KEY_UP = 1 - KEY_LEFT = 1 - KEY_RIGHT = 1 - KEY_HOME = 1 - KEY_BACKSPACE = 1 - KEY_F0 = 1 - KEY_Fn = 1 - KEY_DL = 1 - KEY_IL = 1 - KEY_DC = 1 - KEY_IC = 1 - KEY_EIC = 1 - KEY_CLEAR = 1 - KEY_EOS = 1 - KEY_EOL = 1 - KEY_SF = 1 - KEY_SR = 1 - KEY_NPAGE = 1 - KEY_PPAGE = 1 - KEY_STAB = 1 - KEY_CTAB = 1 - KEY_CATAB = 1 - KEY_ENTER = 1 - KEY_SRESET = 1 - KEY_RESET = 1 - KEY_PRINT = 1 - KEY_LL = 1 - KEY_A1 = 1 - KEY_A3 = 1 - KEY_B2 = 1 - KEY_C1 = 1 - KEY_C3 = 1 - KEY_BTAB = 1 - KEY_BEG = 1 - KEY_CANCEL = 1 - KEY_CLOSE = 1 - KEY_COMMAND = 1 - KEY_COPY = 1 - KEY_CREATE = 1 - KEY_END = 1 - KEY_EXIT = 1 - KEY_FIND = 1 - KEY_HELP = 1 - KEY_MARK = 1 - KEY_MESSAGE = 1 - KEY_MOVE = 1 - KEY_NEXT = 1 - KEY_OPEN = 1 - KEY_OPTIONS = 1 - KEY_PREVIOUS = 1 - KEY_REDO = 1 - KEY_REFERENCE = 1 - KEY_REFRESH = 1 - KEY_REPLACE = 1 - KEY_RESTART = 1 - KEY_RESUME = 1 - KEY_SAVE = 1 - KEY_SBEG = 1 - KEY_SCANCEL = 1 - KEY_SCOMMAND = 1 - KEY_SCOPY = 1 - KEY_SCREATE = 1 - KEY_SDC = 1 - KEY_SDL = 1 - KEY_SELECT = 1 - KEY_SEND = 1 - KEY_SEOL = 1 - KEY_SEXIT = 1 - KEY_SFIND = 1 - KEY_SHELP = 1 - KEY_SHOME = 1 - KEY_SIC = 1 - KEY_SLEFT = 1 - KEY_SMESSAGE = 1 - KEY_SMOVE = 1 - KEY_SNEXT = 1 - KEY_SOPTIONS = 1 - KEY_SPREVIOUS = 1 - KEY_SPRINT = 1 - KEY_SREDO = 1 - KEY_SREPLACE = 1 - KEY_SRIGHT = 1 - KEY_SRSUME = 1 - KEY_SSAVE = 1 - KEY_SSUSPEND = 1 - KEY_SUNDO = 1 - KEY_SUSPEND = 1 - KEY_UNDO = 1 - KEY_MOUSE = 1 - KEY_RESIZE = 1 - KEY_MAX = 1 - ACS_BBSS = 1 - ACS_BLOCK = 1 - ACS_BOARD = 1 - ACS_BSBS = 1 - ACS_BSSB = 1 - ACS_BSSS = 1 - ACS_BTEE = 1 - ACS_BULLET = 1 - ACS_CKBOARD = 1 - ACS_DARROW = 1 - ACS_DEGREE = 1 - ACS_DIAMOND = 1 - ACS_GEQUAL = 1 - ACS_HLINE = 1 - ACS_LANTERN = 1 - ACS_LARROW = 1 - ACS_LEQUAL = 1 - ACS_LLCORNER = 1 - ACS_LRCORNER = 1 - ACS_LTEE = 1 - ACS_NEQUAL = 1 - ACS_PI = 1 - ACS_PLMINUS = 1 - ACS_PLUS = 1 - ACS_RARROW = 1 - ACS_RTEE = 1 - ACS_S1 = 1 - ACS_S3 = 1 - ACS_S7 = 1 - ACS_S9 = 1 - ACS_SBBS = 1 - ACS_SBSB = 1 - ACS_SBSS = 1 - ACS_SSBB = 1 - ACS_SSBS = 1 - ACS_SSSB = 1 - ACS_SSSS = 1 - ACS_STERLING = 1 - ACS_TTEE = 1 - ACS_UARROW = 1 - ACS_ULCORNER = 1 - ACS_URCORNER = 1 - ACS_VLINE = 1 - COLOR_BLACK = 1 - COLOR_BLUE = 1 - COLOR_CYAN = 1 - COLOR_GREEN = 1 - COLOR_MAGENTA = 1 - COLOR_RED = 1 - COLOR_WHITE = 1 - COLOR_YELLOW = 1 - """ - ) - - -astroid.register_module_extender(astroid.MANAGER, "curses", _curses_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_dataclasses.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_dataclasses.py deleted file mode 100644 index 7a25e0c6..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_dataclasses.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER -""" -Astroid hook for the dataclasses library -""" - -import astroid -from astroid import MANAGER - - -DATACLASSES_DECORATORS = frozenset(("dataclasses.dataclass", "dataclass")) - - -def is_decorated_with_dataclass(node, decorator_names=DATACLASSES_DECORATORS): - """Return True if a decorated node has a `dataclass` decorator applied.""" - if not node.decorators: - return False - for decorator_attribute in node.decorators.nodes: - if isinstance(decorator_attribute, astroid.Call): # decorator with arguments - decorator_attribute = decorator_attribute.func - if decorator_attribute.as_string() in decorator_names: - return True - return False - - -def dataclass_transform(node): - """Rewrite a dataclass to be easily understood by pylint""" - - for assign_node in node.body: - if not isinstance(assign_node, (astroid.AnnAssign, astroid.Assign)): - continue - - targets = ( - assign_node.targets - if hasattr(assign_node, "targets") - else [assign_node.target] - ) - for target in targets: - rhs_node = astroid.Unknown( - lineno=assign_node.lineno, - col_offset=assign_node.col_offset, - parent=assign_node, - ) - node.instance_attrs[target.name] = [rhs_node] - node.locals[target.name] = [rhs_node] - - -MANAGER.register_transform( - astroid.ClassDef, dataclass_transform, is_decorated_with_dataclass -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_dateutil.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_dateutil.py deleted file mode 100644 index a1c270fe..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_dateutil.py +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright (c) 2015-2016 Claudiu Popa -# Copyright (c) 2015 raylu -# Copyright (c) 2016 Ceridwen - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Astroid hooks for dateutil""" - -import textwrap - -from astroid import MANAGER, register_module_extender -from astroid.builder import AstroidBuilder - - -def dateutil_transform(): - return AstroidBuilder(MANAGER).string_build( - textwrap.dedent( - """ - import datetime - def parse(timestr, parserinfo=None, **kwargs): - return datetime.datetime() - """ - ) - ) - - -register_module_extender(MANAGER, "dateutil.parser", dateutil_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_fstrings.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_fstrings.py deleted file mode 100644 index 7d8c7b68..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_fstrings.py +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (c) 2017 Claudiu Popa - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER -import collections -import sys - -import astroid - - -def _clone_node_with_lineno(node, parent, lineno): - cls = node.__class__ - other_fields = node._other_fields - _astroid_fields = node._astroid_fields - init_params = {"lineno": lineno, "col_offset": node.col_offset, "parent": parent} - postinit_params = {param: getattr(node, param) for param in _astroid_fields} - if other_fields: - init_params.update({param: getattr(node, param) for param in other_fields}) - new_node = cls(**init_params) - if hasattr(node, "postinit") and _astroid_fields: - for param, child in postinit_params.items(): - if child and not isinstance(child, collections.Sequence): - cloned_child = _clone_node_with_lineno( - node=child, lineno=new_node.lineno, parent=new_node - ) - postinit_params[param] = cloned_child - new_node.postinit(**postinit_params) - return new_node - - -def _transform_formatted_value(node): - if node.value and node.value.lineno == 1: - if node.lineno != node.value.lineno: - new_node = astroid.FormattedValue( - lineno=node.lineno, col_offset=node.col_offset, parent=node.parent - ) - new_value = _clone_node_with_lineno( - node=node.value, lineno=node.lineno, parent=new_node - ) - new_node.postinit(value=new_value, format_spec=node.format_spec) - return new_node - - -if sys.version_info[:2] >= (3, 6): - # TODO: this fix tries to *patch* http://bugs.python.org/issue29051 - # The problem is that FormattedValue.value, which is a Name node, - # has wrong line numbers, usually 1. This creates problems for pylint, - # which expects correct line numbers for things such as message control. - astroid.MANAGER.register_transform( - astroid.FormattedValue, _transform_formatted_value - ) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_functools.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_functools.py deleted file mode 100644 index 8b594efe..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_functools.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) 2016, 2018 Claudiu Popa -# Copyright (c) 2018 Bryce Guinta - -"""Astroid hooks for understanding functools library module.""" -from functools import partial -from itertools import chain - -import astroid -from astroid import arguments -from astroid import BoundMethod -from astroid import extract_node -from astroid import helpers -from astroid.interpreter import objectmodel -from astroid import MANAGER -from astroid import objects - - -LRU_CACHE = "functools.lru_cache" - - -class LruWrappedModel(objectmodel.FunctionModel): - """Special attribute model for functions decorated with functools.lru_cache. - - The said decorators patches at decoration time some functions onto - the decorated function. - """ - - @property - def attr___wrapped__(self): - return self._instance - - @property - def attr_cache_info(self): - cache_info = extract_node( - """ - from functools import _CacheInfo - _CacheInfo(0, 0, 0, 0) - """ - ) - - class CacheInfoBoundMethod(BoundMethod): - def infer_call_result(self, caller, context=None): - yield helpers.safe_infer(cache_info) - - return CacheInfoBoundMethod(proxy=self._instance, bound=self._instance) - - @property - def attr_cache_clear(self): - node = extract_node("""def cache_clear(self): pass""") - return BoundMethod(proxy=node, bound=self._instance.parent.scope()) - - -def _transform_lru_cache(node, context=None): - # TODO: this is not ideal, since the node should be immutable, - # but due to https://github.com/PyCQA/astroid/issues/354, - # there's not much we can do now. - # Replacing the node would work partially, because, - # in pylint, the old node would still be available, leading - # to spurious false positives. - node.special_attributes = LruWrappedModel()(node) - return - - -def _functools_partial_inference(node, context=None): - call = arguments.CallSite.from_call(node) - number_of_positional = len(call.positional_arguments) - if number_of_positional < 1: - raise astroid.UseInferenceDefault( - "functools.partial takes at least one argument" - ) - if number_of_positional == 1 and not call.keyword_arguments: - raise astroid.UseInferenceDefault( - "functools.partial needs at least to have some filled arguments" - ) - - partial_function = call.positional_arguments[0] - try: - inferred_wrapped_function = next(partial_function.infer(context=context)) - except astroid.InferenceError as exc: - raise astroid.UseInferenceDefault from exc - if inferred_wrapped_function is astroid.Uninferable: - raise astroid.UseInferenceDefault("Cannot infer the wrapped function") - if not isinstance(inferred_wrapped_function, astroid.FunctionDef): - raise astroid.UseInferenceDefault("The wrapped function is not a function") - - # Determine if the passed keywords into the callsite are supported - # by the wrapped function. - function_parameters = chain( - inferred_wrapped_function.args.args or (), - inferred_wrapped_function.args.posonlyargs or (), - inferred_wrapped_function.args.kwonlyargs or (), - ) - parameter_names = set( - param.name - for param in function_parameters - if isinstance(param, astroid.AssignName) - ) - if set(call.keyword_arguments) - parameter_names: - raise astroid.UseInferenceDefault( - "wrapped function received unknown parameters" - ) - - partial_function = objects.PartialFunction( - call, - name=inferred_wrapped_function.name, - doc=inferred_wrapped_function.doc, - lineno=inferred_wrapped_function.lineno, - col_offset=inferred_wrapped_function.col_offset, - parent=inferred_wrapped_function.parent, - ) - partial_function.postinit( - args=inferred_wrapped_function.args, - body=inferred_wrapped_function.body, - decorators=inferred_wrapped_function.decorators, - returns=inferred_wrapped_function.returns, - type_comment_returns=inferred_wrapped_function.type_comment_returns, - type_comment_args=inferred_wrapped_function.type_comment_args, - ) - return iter((partial_function,)) - - -def _looks_like_lru_cache(node): - """Check if the given function node is decorated with lru_cache.""" - if not node.decorators: - return False - for decorator in node.decorators.nodes: - if not isinstance(decorator, astroid.Call): - continue - if _looks_like_functools_member(decorator, "lru_cache"): - return True - return False - - -def _looks_like_functools_member(node, member): - """Check if the given Call node is a functools.partial call""" - if isinstance(node.func, astroid.Name): - return node.func.name == member - elif isinstance(node.func, astroid.Attribute): - return ( - node.func.attrname == member - and isinstance(node.func.expr, astroid.Name) - and node.func.expr.name == "functools" - ) - - -_looks_like_partial = partial(_looks_like_functools_member, member="partial") - - -MANAGER.register_transform( - astroid.FunctionDef, _transform_lru_cache, _looks_like_lru_cache -) - - -MANAGER.register_transform( - astroid.Call, - astroid.inference_tip(_functools_partial_inference), - _looks_like_partial, -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_gi.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_gi.py deleted file mode 100644 index 09706107..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_gi.py +++ /dev/null @@ -1,220 +0,0 @@ -# Copyright (c) 2013-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2014 Cole Robinson -# Copyright (c) 2015-2016 Claudiu Popa -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 David Shea -# Copyright (c) 2016 Jakub Wilk -# Copyright (c) 2016 Giuseppe Scrivano - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Astroid hooks for the Python 2 GObject introspection bindings. - -Helps with understanding everything imported from 'gi.repository' -""" - -import inspect -import itertools -import sys -import re -import warnings - -from astroid import MANAGER, AstroidBuildingError, nodes -from astroid.builder import AstroidBuilder - - -_inspected_modules = {} - -_identifier_re = r"^[A-Za-z_]\w*$" - - -def _gi_build_stub(parent): - """ - Inspect the passed module recursively and build stubs for functions, - classes, etc. - """ - classes = {} - functions = {} - constants = {} - methods = {} - for name in dir(parent): - if name.startswith("__"): - continue - - # Check if this is a valid name in python - if not re.match(_identifier_re, name): - continue - - try: - obj = getattr(parent, name) - except: - continue - - if inspect.isclass(obj): - classes[name] = obj - elif inspect.isfunction(obj) or inspect.isbuiltin(obj): - functions[name] = obj - elif inspect.ismethod(obj) or inspect.ismethoddescriptor(obj): - methods[name] = obj - elif ( - str(obj).startswith(", ) - # Only accept function calls with two constant arguments - if len(node.args) != 2: - return False - - if not all(isinstance(arg, nodes.Const) for arg in node.args): - return False - - func = node.func - if isinstance(func, nodes.Attribute): - if func.attrname != "require_version": - return False - if isinstance(func.expr, nodes.Name) and func.expr.name == "gi": - return True - - return False - - if isinstance(func, nodes.Name): - return func.name == "require_version" - - return False - - -def _register_require_version(node): - # Load the gi.require_version locally - try: - import gi - - gi.require_version(node.args[0].value, node.args[1].value) - except Exception: - pass - - return node - - -MANAGER.register_failed_import_hook(_import_gi_module) -MANAGER.register_transform( - nodes.Call, _register_require_version, _looks_like_require_version -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_hashlib.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_hashlib.py deleted file mode 100644 index 98ae774d..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_hashlib.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright (c) 2016, 2018 Claudiu Popa -# Copyright (c) 2018 Ioana Tagirta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER -import sys - -import six - -import astroid - -PY36 = sys.version_info >= (3, 6) - - -def _hashlib_transform(): - signature = "value=''" - template = """ - class %(name)s(object): - def __init__(self, %(signature)s): pass - def digest(self): - return %(digest)s - def copy(self): - return self - def update(self, value): pass - def hexdigest(self): - return '' - @property - def name(self): - return %(name)r - @property - def block_size(self): - return 1 - @property - def digest_size(self): - return 1 - """ - algorithms_with_signature = dict.fromkeys( - ["md5", "sha1", "sha224", "sha256", "sha384", "sha512"], signature - ) - if PY36: - blake2b_signature = "data=b'', *, digest_size=64, key=b'', salt=b'', \ - person=b'', fanout=1, depth=1, leaf_size=0, node_offset=0, \ - node_depth=0, inner_size=0, last_node=False" - blake2s_signature = "data=b'', *, digest_size=32, key=b'', salt=b'', \ - person=b'', fanout=1, depth=1, leaf_size=0, node_offset=0, \ - node_depth=0, inner_size=0, last_node=False" - new_algorithms = dict.fromkeys( - ["sha3_224", "sha3_256", "sha3_384", "sha3_512", "shake_128", "shake_256"], - signature, - ) - algorithms_with_signature.update(new_algorithms) - algorithms_with_signature.update( - {"blake2b": blake2b_signature, "blake2s": blake2s_signature} - ) - classes = "".join( - template - % { - "name": hashfunc, - "digest": 'b""' if six.PY3 else '""', - "signature": signature, - } - for hashfunc, signature in algorithms_with_signature.items() - ) - return astroid.parse(classes) - - -astroid.register_module_extender(astroid.MANAGER, "hashlib", _hashlib_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_http.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_http.py deleted file mode 100644 index a3aa8141..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_http.py +++ /dev/null @@ -1,201 +0,0 @@ -# Copyright (c) 2018 Claudiu Popa - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Astroid brain hints for some of the `http` module.""" -import textwrap - -import astroid -from astroid.builder import AstroidBuilder - - -def _http_transform(): - code = textwrap.dedent( - """ - from collections import namedtuple - _HTTPStatus = namedtuple('_HTTPStatus', 'value phrase description') - - class HTTPStatus: - - # informational - CONTINUE = _HTTPStatus(100, 'Continue', 'Request received, please continue') - SWITCHING_PROTOCOLS = _HTTPStatus(101, 'Switching Protocols', - 'Switching to new protocol; obey Upgrade header') - PROCESSING = _HTTPStatus(102, 'Processing', '') - OK = _HTTPStatus(200, 'OK', 'Request fulfilled, document follows') - CREATED = _HTTPStatus(201, 'Created', 'Document created, URL follows') - ACCEPTED = _HTTPStatus(202, 'Accepted', - 'Request accepted, processing continues off-line') - NON_AUTHORITATIVE_INFORMATION = _HTTPStatus(203, - 'Non-Authoritative Information', 'Request fulfilled from cache') - NO_CONTENT = _HTTPStatus(204, 'No Content', 'Request fulfilled, nothing follows') - RESET_CONTENT =_HTTPStatus(205, 'Reset Content', 'Clear input form for further input') - PARTIAL_CONTENT = _HTTPStatus(206, 'Partial Content', 'Partial content follows') - MULTI_STATUS = _HTTPStatus(207, 'Multi-Status', '') - ALREADY_REPORTED = _HTTPStatus(208, 'Already Reported', '') - IM_USED = _HTTPStatus(226, 'IM Used', '') - MULTIPLE_CHOICES = _HTTPStatus(300, 'Multiple Choices', - 'Object has several resources -- see URI list') - MOVED_PERMANENTLY = _HTTPStatus(301, 'Moved Permanently', - 'Object moved permanently -- see URI list') - FOUND = _HTTPStatus(302, 'Found', 'Object moved temporarily -- see URI list') - SEE_OTHER = _HTTPStatus(303, 'See Other', 'Object moved -- see Method and URL list') - NOT_MODIFIED = _HTTPStatus(304, 'Not Modified', - 'Document has not changed since given time') - USE_PROXY = _HTTPStatus(305, 'Use Proxy', - 'You must use proxy specified in Location to access this resource') - TEMPORARY_REDIRECT = _HTTPStatus(307, 'Temporary Redirect', - 'Object moved temporarily -- see URI list') - PERMANENT_REDIRECT = _HTTPStatus(308, 'Permanent Redirect', - 'Object moved permanently -- see URI list') - BAD_REQUEST = _HTTPStatus(400, 'Bad Request', - 'Bad request syntax or unsupported method') - UNAUTHORIZED = _HTTPStatus(401, 'Unauthorized', - 'No permission -- see authorization schemes') - PAYMENT_REQUIRED = _HTTPStatus(402, 'Payment Required', - 'No payment -- see charging schemes') - FORBIDDEN = _HTTPStatus(403, 'Forbidden', - 'Request forbidden -- authorization will not help') - NOT_FOUND = _HTTPStatus(404, 'Not Found', - 'Nothing matches the given URI') - METHOD_NOT_ALLOWED = _HTTPStatus(405, 'Method Not Allowed', - 'Specified method is invalid for this resource') - NOT_ACCEPTABLE = _HTTPStatus(406, 'Not Acceptable', - 'URI not available in preferred format') - PROXY_AUTHENTICATION_REQUIRED = _HTTPStatus(407, - 'Proxy Authentication Required', - 'You must authenticate with this proxy before proceeding') - REQUEST_TIMEOUT = _HTTPStatus(408, 'Request Timeout', - 'Request timed out; try again later') - CONFLICT = _HTTPStatus(409, 'Conflict', 'Request conflict') - GONE = _HTTPStatus(410, 'Gone', - 'URI no longer exists and has been permanently removed') - LENGTH_REQUIRED = _HTTPStatus(411, 'Length Required', - 'Client must specify Content-Length') - PRECONDITION_FAILED = _HTTPStatus(412, 'Precondition Failed', - 'Precondition in headers is false') - REQUEST_ENTITY_TOO_LARGE = _HTTPStatus(413, 'Request Entity Too Large', - 'Entity is too large') - REQUEST_URI_TOO_LONG = _HTTPStatus(414, 'Request-URI Too Long', - 'URI is too long') - UNSUPPORTED_MEDIA_TYPE = _HTTPStatus(415, 'Unsupported Media Type', - 'Entity body in unsupported format') - REQUESTED_RANGE_NOT_SATISFIABLE = _HTTPStatus(416, - 'Requested Range Not Satisfiable', - 'Cannot satisfy request range') - EXPECTATION_FAILED = _HTTPStatus(417, 'Expectation Failed', - 'Expect condition could not be satisfied') - MISDIRECTED_REQUEST = _HTTPStatus(421, 'Misdirected Request', - 'Server is not able to produce a response') - UNPROCESSABLE_ENTITY = _HTTPStatus(422, 'Unprocessable Entity') - LOCKED = _HTTPStatus(423, 'Locked') - FAILED_DEPENDENCY = _HTTPStatus(424, 'Failed Dependency') - UPGRADE_REQUIRED = _HTTPStatus(426, 'Upgrade Required') - PRECONDITION_REQUIRED = _HTTPStatus(428, 'Precondition Required', - 'The origin server requires the request to be conditional') - TOO_MANY_REQUESTS = _HTTPStatus(429, 'Too Many Requests', - 'The user has sent too many requests in ' - 'a given amount of time ("rate limiting")') - REQUEST_HEADER_FIELDS_TOO_LARGE = _HTTPStatus(431, - 'Request Header Fields Too Large', - 'The server is unwilling to process the request because its header ' - 'fields are too large') - UNAVAILABLE_FOR_LEGAL_REASONS = _HTTPStatus(451, - 'Unavailable For Legal Reasons', - 'The server is denying access to the ' - 'resource as a consequence of a legal demand') - INTERNAL_SERVER_ERROR = _HTTPStatus(500, 'Internal Server Error', - 'Server got itself in trouble') - NOT_IMPLEMENTED = _HTTPStatus(501, 'Not Implemented', - 'Server does not support this operation') - BAD_GATEWAY = _HTTPStatus(502, 'Bad Gateway', - 'Invalid responses from another server/proxy') - SERVICE_UNAVAILABLE = _HTTPStatus(503, 'Service Unavailable', - 'The server cannot process the request due to a high load') - GATEWAY_TIMEOUT = _HTTPStatus(504, 'Gateway Timeout', - 'The gateway server did not receive a timely response') - HTTP_VERSION_NOT_SUPPORTED = _HTTPStatus(505, 'HTTP Version Not Supported', - 'Cannot fulfill request') - VARIANT_ALSO_NEGOTIATES = _HTTPStatus(506, 'Variant Also Negotiates') - INSUFFICIENT_STORAGE = _HTTPStatus(507, 'Insufficient Storage') - LOOP_DETECTED = _HTTPStatus(508, 'Loop Detected') - NOT_EXTENDED = _HTTPStatus(510, 'Not Extended') - NETWORK_AUTHENTICATION_REQUIRED = _HTTPStatus(511, - 'Network Authentication Required', - 'The client needs to authenticate to gain network access') - """ - ) - return AstroidBuilder(astroid.MANAGER).string_build(code) - - -def _http_client_transform(): - return AstroidBuilder(astroid.MANAGER).string_build( - textwrap.dedent( - """ - from http import HTTPStatus - - CONTINUE = HTTPStatus.CONTINUE - SWITCHING_PROTOCOLS = HTTPStatus.SWITCHING_PROTOCOLS - PROCESSING = HTTPStatus.PROCESSING - OK = HTTPStatus.OK - CREATED = HTTPStatus.CREATED - ACCEPTED = HTTPStatus.ACCEPTED - NON_AUTHORITATIVE_INFORMATION = HTTPStatus.NON_AUTHORITATIVE_INFORMATION - NO_CONTENT = HTTPStatus.NO_CONTENT - RESET_CONTENT = HTTPStatus.RESET_CONTENT - PARTIAL_CONTENT = HTTPStatus.PARTIAL_CONTENT - MULTI_STATUS = HTTPStatus.MULTI_STATUS - ALREADY_REPORTED = HTTPStatus.ALREADY_REPORTED - IM_USED = HTTPStatus.IM_USED - MULTIPLE_CHOICES = HTTPStatus.MULTIPLE_CHOICES - MOVED_PERMANENTLY = HTTPStatus.MOVED_PERMANENTLY - FOUND = HTTPStatus.FOUND - SEE_OTHER = HTTPStatus.SEE_OTHER - NOT_MODIFIED = HTTPStatus.NOT_MODIFIED - USE_PROXY = HTTPStatus.USE_PROXY - TEMPORARY_REDIRECT = HTTPStatus.TEMPORARY_REDIRECT - PERMANENT_REDIRECT = HTTPStatus.PERMANENT_REDIRECT - BAD_REQUEST = HTTPStatus.BAD_REQUEST - UNAUTHORIZED = HTTPStatus.UNAUTHORIZED - PAYMENT_REQUIRED = HTTPStatus.PAYMENT_REQUIRED - FORBIDDEN = HTTPStatus.FORBIDDEN - NOT_FOUND = HTTPStatus.NOT_FOUND - METHOD_NOT_ALLOWED = HTTPStatus.METHOD_NOT_ALLOWED - NOT_ACCEPTABLE = HTTPStatus.NOT_ACCEPTABLE - PROXY_AUTHENTICATION_REQUIRED = HTTPStatus.PROXY_AUTHENTICATION_REQUIRED - REQUEST_TIMEOUT = HTTPStatus.REQUEST_TIMEOUT - CONFLICT = HTTPStatus.CONFLICT - GONE = HTTPStatus.GONE - LENGTH_REQUIRED = HTTPStatus.LENGTH_REQUIRED - PRECONDITION_FAILED = HTTPStatus.PRECONDITION_FAILED - REQUEST_ENTITY_TOO_LARGE = HTTPStatus.REQUEST_ENTITY_TOO_LARGE - REQUEST_URI_TOO_LONG = HTTPStatus.REQUEST_URI_TOO_LONG - UNSUPPORTED_MEDIA_TYPE = HTTPStatus.UNSUPPORTED_MEDIA_TYPE - REQUESTED_RANGE_NOT_SATISFIABLE = HTTPStatus.REQUESTED_RANGE_NOT_SATISFIABLE - EXPECTATION_FAILED = HTTPStatus.EXPECTATION_FAILED - UNPROCESSABLE_ENTITY = HTTPStatus.UNPROCESSABLE_ENTITY - LOCKED = HTTPStatus.LOCKED - FAILED_DEPENDENCY = HTTPStatus.FAILED_DEPENDENCY - UPGRADE_REQUIRED = HTTPStatus.UPGRADE_REQUIRED - PRECONDITION_REQUIRED = HTTPStatus.PRECONDITION_REQUIRED - TOO_MANY_REQUESTS = HTTPStatus.TOO_MANY_REQUESTS - REQUEST_HEADER_FIELDS_TOO_LARGE = HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE - INTERNAL_SERVER_ERROR = HTTPStatus.INTERNAL_SERVER_ERROR - NOT_IMPLEMENTED = HTTPStatus.NOT_IMPLEMENTED - BAD_GATEWAY = HTTPStatus.BAD_GATEWAY - SERVICE_UNAVAILABLE = HTTPStatus.SERVICE_UNAVAILABLE - GATEWAY_TIMEOUT = HTTPStatus.GATEWAY_TIMEOUT - HTTP_VERSION_NOT_SUPPORTED = HTTPStatus.HTTP_VERSION_NOT_SUPPORTED - VARIANT_ALSO_NEGOTIATES = HTTPStatus.VARIANT_ALSO_NEGOTIATES - INSUFFICIENT_STORAGE = HTTPStatus.INSUFFICIENT_STORAGE - LOOP_DETECTED = HTTPStatus.LOOP_DETECTED - NOT_EXTENDED = HTTPStatus.NOT_EXTENDED - NETWORK_AUTHENTICATION_REQUIRED = HTTPStatus.NETWORK_AUTHENTICATION_REQUIRED - """ - ) - ) - - -astroid.register_module_extender(astroid.MANAGER, "http", _http_transform) -astroid.register_module_extender(astroid.MANAGER, "http.client", _http_client_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_io.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_io.py deleted file mode 100644 index 4c689225..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_io.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (c) 2016 Claudiu Popa - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Astroid brain hints for some of the _io C objects.""" - -import astroid - - -BUFFERED = {"BufferedWriter", "BufferedReader"} -TextIOWrapper = "TextIOWrapper" -FileIO = "FileIO" -BufferedWriter = "BufferedWriter" - - -def _generic_io_transform(node, name, cls): - """Transform the given name, by adding the given *class* as a member of the node.""" - - io_module = astroid.MANAGER.ast_from_module_name("_io") - attribute_object = io_module[cls] - instance = attribute_object.instantiate_class() - node.locals[name] = [instance] - - -def _transform_text_io_wrapper(node): - # This is not always correct, since it can vary with the type of the descriptor, - # being stdout, stderr or stdin. But we cannot get access to the name of the - # stream, which is why we are using the BufferedWriter class as a default - # value - return _generic_io_transform(node, name="buffer", cls=BufferedWriter) - - -def _transform_buffered(node): - return _generic_io_transform(node, name="raw", cls=FileIO) - - -astroid.MANAGER.register_transform( - astroid.ClassDef, _transform_buffered, lambda node: node.name in BUFFERED -) -astroid.MANAGER.register_transform( - astroid.ClassDef, - _transform_text_io_wrapper, - lambda node: node.name == TextIOWrapper, -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_mechanize.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_mechanize.py deleted file mode 100644 index 93f282ec..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_mechanize.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2012-2013 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2015-2016 Claudiu Popa -# Copyright (c) 2016 Ceridwen - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -from astroid import MANAGER, register_module_extender -from astroid.builder import AstroidBuilder - - -def mechanize_transform(): - return AstroidBuilder(MANAGER).string_build( - """ - -class Browser(object): - def open(self, url, data=None, timeout=None): - return None - def open_novisit(self, url, data=None, timeout=None): - return None - def open_local_file(self, filename): - return None - -""" - ) - - -register_module_extender(MANAGER, "mechanize", mechanize_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_multiprocessing.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_multiprocessing.py deleted file mode 100644 index 71256ee3..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_multiprocessing.py +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright (c) 2016 Claudiu Popa - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -import sys - -import astroid -from astroid import exceptions - - -def _multiprocessing_transform(): - module = astroid.parse( - """ - from multiprocessing.managers import SyncManager - def Manager(): - return SyncManager() - """ - ) - # Multiprocessing uses a getattr lookup inside contexts, - # in order to get the attributes they need. Since it's extremely - # dynamic, we use this approach to fake it. - node = astroid.parse( - """ - from multiprocessing.context import DefaultContext, BaseContext - default = DefaultContext() - base = BaseContext() - """ - ) - try: - context = next(node["default"].infer()) - base = next(node["base"].infer()) - except exceptions.InferenceError: - return module - - for node in (context, base): - for key, value in node.locals.items(): - if key.startswith("_"): - continue - - value = value[0] - if isinstance(value, astroid.FunctionDef): - # We need to rebound this, since otherwise - # it will have an extra argument (self). - value = astroid.BoundMethod(value, node) - module[key] = value - return module - - -def _multiprocessing_managers_transform(): - return astroid.parse( - """ - import array - import threading - import multiprocessing.pool as pool - - import six - - class Namespace(object): - pass - - class Value(object): - def __init__(self, typecode, value, lock=True): - self._typecode = typecode - self._value = value - def get(self): - return self._value - def set(self, value): - self._value = value - def __repr__(self): - return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value) - value = property(get, set) - - def Array(typecode, sequence, lock=True): - return array.array(typecode, sequence) - - class SyncManager(object): - Queue = JoinableQueue = six.moves.queue.Queue - Event = threading.Event - RLock = threading.RLock - BoundedSemaphore = threading.BoundedSemaphore - Condition = threading.Condition - Barrier = threading.Barrier - Pool = pool.Pool - list = list - dict = dict - Value = Value - Array = Array - Namespace = Namespace - __enter__ = lambda self: self - __exit__ = lambda *args: args - - def start(self, initializer=None, initargs=None): - pass - def shutdown(self): - pass - """ - ) - - -astroid.register_module_extender( - astroid.MANAGER, "multiprocessing.managers", _multiprocessing_managers_transform -) -astroid.register_module_extender( - astroid.MANAGER, "multiprocessing", _multiprocessing_transform -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_namedtuple_enum.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_namedtuple_enum.py deleted file mode 100644 index de240671..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_namedtuple_enum.py +++ /dev/null @@ -1,449 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2012-2015 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2013-2014 Google, Inc. -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014 Eevee (Alex Munroe) -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 Dmitry Pribysh -# Copyright (c) 2015 David Shea -# Copyright (c) 2015 Philip Lorenz -# Copyright (c) 2016 Jakub Wilk -# Copyright (c) 2016 Mateusz Bysiek -# Copyright (c) 2017 Hugo -# Copyright (c) 2017 Łukasz Rogalski - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Astroid hooks for the Python standard library.""" - -import functools -import keyword -from textwrap import dedent - -from astroid import MANAGER, UseInferenceDefault, inference_tip, InferenceError -from astroid import arguments -from astroid import exceptions -from astroid import nodes -from astroid.builder import AstroidBuilder, extract_node -from astroid import util - - -TYPING_NAMEDTUPLE_BASENAMES = {"NamedTuple", "typing.NamedTuple"} -ENUM_BASE_NAMES = { - "Enum", - "IntEnum", - "enum.Enum", - "enum.IntEnum", - "IntFlag", - "enum.IntFlag", -} - - -def _infer_first(node, context): - if node is util.Uninferable: - raise UseInferenceDefault - try: - value = next(node.infer(context=context)) - if value is util.Uninferable: - raise UseInferenceDefault() - else: - return value - except StopIteration: - raise InferenceError() - - -def _find_func_form_arguments(node, context): - def _extract_namedtuple_arg_or_keyword(position, key_name=None): - - if len(args) > position: - return _infer_first(args[position], context) - if key_name and key_name in found_keywords: - return _infer_first(found_keywords[key_name], context) - - args = node.args - keywords = node.keywords - found_keywords = ( - {keyword.arg: keyword.value for keyword in keywords} if keywords else {} - ) - - name = _extract_namedtuple_arg_or_keyword(position=0, key_name="typename") - names = _extract_namedtuple_arg_or_keyword(position=1, key_name="field_names") - if name and names: - return name.value, names - - raise UseInferenceDefault() - - -def infer_func_form(node, base_type, context=None, enum=False): - """Specific inference function for namedtuple or Python 3 enum. """ - # node is a Call node, class name as first argument and generated class - # attributes as second argument - - # namedtuple or enums list of attributes can be a list of strings or a - # whitespace-separate string - try: - name, names = _find_func_form_arguments(node, context) - try: - attributes = names.value.replace(",", " ").split() - except AttributeError: - if not enum: - attributes = [ - _infer_first(const, context).value for const in names.elts - ] - else: - # Enums supports either iterator of (name, value) pairs - # or mappings. - if hasattr(names, "items") and isinstance(names.items, list): - attributes = [ - _infer_first(const[0], context).value - for const in names.items - if isinstance(const[0], nodes.Const) - ] - elif hasattr(names, "elts"): - # Enums can support either ["a", "b", "c"] - # or [("a", 1), ("b", 2), ...], but they can't - # be mixed. - if all(isinstance(const, nodes.Tuple) for const in names.elts): - attributes = [ - _infer_first(const.elts[0], context).value - for const in names.elts - if isinstance(const, nodes.Tuple) - ] - else: - attributes = [ - _infer_first(const, context).value for const in names.elts - ] - else: - raise AttributeError - if not attributes: - raise AttributeError - except (AttributeError, exceptions.InferenceError): - raise UseInferenceDefault() - - # If we can't infer the name of the class, don't crash, up to this point - # we know it is a namedtuple anyway. - name = name or "Uninferable" - # we want to return a Class node instance with proper attributes set - class_node = nodes.ClassDef(name, "docstring") - class_node.parent = node.parent - # set base class=tuple - class_node.bases.append(base_type) - # XXX add __init__(*attributes) method - for attr in attributes: - fake_node = nodes.EmptyNode() - fake_node.parent = class_node - fake_node.attrname = attr - class_node.instance_attrs[attr] = [fake_node] - return class_node, name, attributes - - -def _has_namedtuple_base(node): - """Predicate for class inference tip - - :type node: ClassDef - :rtype: bool - """ - return set(node.basenames) & TYPING_NAMEDTUPLE_BASENAMES - - -def _looks_like(node, name): - func = node.func - if isinstance(func, nodes.Attribute): - return func.attrname == name - if isinstance(func, nodes.Name): - return func.name == name - return False - - -_looks_like_namedtuple = functools.partial(_looks_like, name="namedtuple") -_looks_like_enum = functools.partial(_looks_like, name="Enum") -_looks_like_typing_namedtuple = functools.partial(_looks_like, name="NamedTuple") - - -def infer_named_tuple(node, context=None): - """Specific inference function for namedtuple Call node""" - tuple_base_name = nodes.Name(name="tuple", parent=node.root()) - class_node, name, attributes = infer_func_form( - node, tuple_base_name, context=context - ) - call_site = arguments.CallSite.from_call(node) - func = next(extract_node("import collections; collections.namedtuple").infer()) - try: - rename = next(call_site.infer_argument(func, "rename", context)).bool_value() - except InferenceError: - rename = False - - if rename: - attributes = _get_renamed_namedtuple_attributes(attributes) - - replace_args = ", ".join("{arg}=None".format(arg=arg) for arg in attributes) - field_def = ( - " {name} = property(lambda self: self[{index:d}], " - "doc='Alias for field number {index:d}')" - ) - field_defs = "\n".join( - field_def.format(name=name, index=index) - for index, name in enumerate(attributes) - ) - fake = AstroidBuilder(MANAGER).string_build( - """ -class %(name)s(tuple): - __slots__ = () - _fields = %(fields)r - def _asdict(self): - return self.__dict__ - @classmethod - def _make(cls, iterable, new=tuple.__new__, len=len): - return new(cls, iterable) - def _replace(self, %(replace_args)s): - return self - def __getnewargs__(self): - return tuple(self) -%(field_defs)s - """ - % { - "name": name, - "fields": attributes, - "field_defs": field_defs, - "replace_args": replace_args, - } - ) - class_node.locals["_asdict"] = fake.body[0].locals["_asdict"] - class_node.locals["_make"] = fake.body[0].locals["_make"] - class_node.locals["_replace"] = fake.body[0].locals["_replace"] - class_node.locals["_fields"] = fake.body[0].locals["_fields"] - for attr in attributes: - class_node.locals[attr] = fake.body[0].locals[attr] - # we use UseInferenceDefault, we can't be a generator so return an iterator - return iter([class_node]) - - -def _get_renamed_namedtuple_attributes(field_names): - names = list(field_names) - seen = set() - for i, name in enumerate(field_names): - if ( - not all(c.isalnum() or c == "_" for c in name) - or keyword.iskeyword(name) - or not name - or name[0].isdigit() - or name.startswith("_") - or name in seen - ): - names[i] = "_%d" % i - seen.add(name) - return tuple(names) - - -def infer_enum(node, context=None): - """ Specific inference function for enum Call node. """ - enum_meta = extract_node( - """ - class EnumMeta(object): - 'docstring' - def __call__(self, node): - class EnumAttribute(object): - name = '' - value = 0 - return EnumAttribute() - def __iter__(self): - class EnumAttribute(object): - name = '' - value = 0 - return [EnumAttribute()] - def __reversed__(self): - class EnumAttribute(object): - name = '' - value = 0 - return (EnumAttribute, ) - def __next__(self): - return next(iter(self)) - def __getitem__(self, attr): - class Value(object): - @property - def name(self): - return '' - @property - def value(self): - return attr - - return Value() - __members__ = [''] - """ - ) - class_node = infer_func_form(node, enum_meta, context=context, enum=True)[0] - return iter([class_node.instantiate_class()]) - - -INT_FLAG_ADDITION_METHODS = """ - def __or__(self, other): - return {name}(self.value | other.value) - def __and__(self, other): - return {name}(self.value & other.value) - def __xor__(self, other): - return {name}(self.value ^ other.value) - def __add__(self, other): - return {name}(self.value + other.value) - def __div__(self, other): - return {name}(self.value / other.value) - def __invert__(self): - return {name}(~self.value) - def __mul__(self, other): - return {name}(self.value * other.value) -""" - - -def infer_enum_class(node): - """ Specific inference for enums. """ - for basename in node.basenames: - # TODO: doesn't handle subclasses yet. This implementation - # is a hack to support enums. - if basename not in ENUM_BASE_NAMES: - continue - if node.root().name == "enum": - # Skip if the class is directly from enum module. - break - for local, values in node.locals.items(): - if any(not isinstance(value, nodes.AssignName) for value in values): - continue - - targets = [] - stmt = values[0].statement() - if isinstance(stmt, nodes.Assign): - if isinstance(stmt.targets[0], nodes.Tuple): - targets = stmt.targets[0].itered() - else: - targets = stmt.targets - elif isinstance(stmt, nodes.AnnAssign): - targets = [stmt.target] - - inferred_return_value = None - if isinstance(stmt, nodes.Assign): - if isinstance(stmt.value, nodes.Const): - if isinstance(stmt.value.value, str): - inferred_return_value = repr(stmt.value.value) - else: - inferred_return_value = stmt.value.value - else: - inferred_return_value = stmt.value.as_string() - - new_targets = [] - for target in targets: - # Replace all the assignments with our mocked class. - classdef = dedent( - """ - class {name}({types}): - @property - def value(self): - return {return_value} - @property - def name(self): - return "{name}" - """.format( - name=target.name, - types=", ".join(node.basenames), - return_value=inferred_return_value, - ) - ) - if "IntFlag" in basename: - # Alright, we need to add some additional methods. - # Unfortunately we still can't infer the resulting objects as - # Enum members, but once we'll be able to do that, the following - # should result in some nice symbolic execution - classdef += INT_FLAG_ADDITION_METHODS.format(name=target.name) - - fake = AstroidBuilder(MANAGER).string_build(classdef)[target.name] - fake.parent = target.parent - for method in node.mymethods(): - fake.locals[method.name] = [method] - new_targets.append(fake.instantiate_class()) - node.locals[local] = new_targets - break - return node - - -def infer_typing_namedtuple_class(class_node, context=None): - """Infer a subclass of typing.NamedTuple""" - # Check if it has the corresponding bases - annassigns_fields = [ - annassign.target.name - for annassign in class_node.body - if isinstance(annassign, nodes.AnnAssign) - ] - code = dedent( - """ - from collections import namedtuple - namedtuple({typename!r}, {fields!r}) - """ - ).format(typename=class_node.name, fields=",".join(annassigns_fields)) - node = extract_node(code) - generated_class_node = next(infer_named_tuple(node, context)) - for method in class_node.mymethods(): - generated_class_node.locals[method.name] = [method] - - for assign in class_node.body: - if not isinstance(assign, nodes.Assign): - continue - - for target in assign.targets: - attr = target.name - generated_class_node.locals[attr] = class_node.locals[attr] - - return iter((generated_class_node,)) - - -def infer_typing_namedtuple(node, context=None): - """Infer a typing.NamedTuple(...) call.""" - # This is essentially a namedtuple with different arguments - # so we extract the args and infer a named tuple. - try: - func = next(node.func.infer()) - except InferenceError: - raise UseInferenceDefault - - if func.qname() != "typing.NamedTuple": - raise UseInferenceDefault - - if len(node.args) != 2: - raise UseInferenceDefault - - if not isinstance(node.args[1], (nodes.List, nodes.Tuple)): - raise UseInferenceDefault - - names = [] - for elt in node.args[1].elts: - if not isinstance(elt, (nodes.List, nodes.Tuple)): - raise UseInferenceDefault - if len(elt.elts) != 2: - raise UseInferenceDefault - names.append(elt.elts[0].as_string()) - - typename = node.args[0].as_string() - if names: - field_names = "({},)".format(",".join(names)) - else: - field_names = "''" - node = extract_node( - "namedtuple({typename}, {fields})".format(typename=typename, fields=field_names) - ) - return infer_named_tuple(node, context) - - -MANAGER.register_transform( - nodes.Call, inference_tip(infer_named_tuple), _looks_like_namedtuple -) -MANAGER.register_transform(nodes.Call, inference_tip(infer_enum), _looks_like_enum) -MANAGER.register_transform( - nodes.ClassDef, - infer_enum_class, - predicate=lambda cls: any( - basename for basename in cls.basenames if basename in ENUM_BASE_NAMES - ), -) -MANAGER.register_transform( - nodes.ClassDef, inference_tip(infer_typing_namedtuple_class), _has_namedtuple_base -) -MANAGER.register_transform( - nodes.Call, inference_tip(infer_typing_namedtuple), _looks_like_typing_namedtuple -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_nose.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_nose.py deleted file mode 100644 index 7b12d760..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_nose.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2015-2016 Claudiu Popa -# Copyright (c) 2016 Ceridwen - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -"""Hooks for nose library.""" - -import re -import textwrap - -import astroid -import astroid.builder - -_BUILDER = astroid.builder.AstroidBuilder(astroid.MANAGER) - - -def _pep8(name, caps=re.compile("([A-Z])")): - return caps.sub(lambda m: "_" + m.groups()[0].lower(), name) - - -def _nose_tools_functions(): - """Get an iterator of names and bound methods.""" - module = _BUILDER.string_build( - textwrap.dedent( - """ - import unittest - - class Test(unittest.TestCase): - pass - a = Test() - """ - ) - ) - try: - case = next(module["a"].infer()) - except astroid.InferenceError: - return - for method in case.methods(): - if method.name.startswith("assert") and "_" not in method.name: - pep8_name = _pep8(method.name) - yield pep8_name, astroid.BoundMethod(method, case) - if method.name == "assertEqual": - # nose also exports assert_equals. - yield "assert_equals", astroid.BoundMethod(method, case) - - -def _nose_tools_transform(node): - for method_name, method in _nose_tools_functions(): - node.locals[method_name] = [method] - - -def _nose_tools_trivial_transform(): - """Custom transform for the nose.tools module.""" - stub = _BUILDER.string_build("""__all__ = []""") - all_entries = ["ok_", "eq_"] - - for pep8_name, method in _nose_tools_functions(): - all_entries.append(pep8_name) - stub[pep8_name] = method - - # Update the __all__ variable, since nose.tools - # does this manually with .append. - all_assign = stub["__all__"].parent - all_object = astroid.List(all_entries) - all_object.parent = all_assign - all_assign.value = all_object - return stub - - -astroid.register_module_extender( - astroid.MANAGER, "nose.tools.trivial", _nose_tools_trivial_transform -) -astroid.MANAGER.register_transform( - astroid.Module, _nose_tools_transform, lambda n: n.name == "nose.tools" -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_fromnumeric.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_fromnumeric.py deleted file mode 100644 index 43b30e4d..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_fromnumeric.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright (c) 2018-2019 hippo91 - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -"""Astroid hooks for numpy.core.fromnumeric module.""" - -import astroid - - -def numpy_core_fromnumeric_transform(): - return astroid.parse( - """ - def sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None): - return numpy.ndarray([0, 0]) - """ - ) - - -astroid.register_module_extender( - astroid.MANAGER, "numpy.core.fromnumeric", numpy_core_fromnumeric_transform -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_function_base.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_function_base.py deleted file mode 100644 index 05a73d96..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_function_base.py +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2018-2019 hippo91 - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -"""Astroid hooks for numpy.core.function_base module.""" - -import functools -import astroid -from brain_numpy_utils import looks_like_numpy_member, infer_numpy_member - - -METHODS_TO_BE_INFERRED = { - "linspace": """def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0): - return numpy.ndarray([0, 0])""", - "logspace": """def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0): - return numpy.ndarray([0, 0])""", - "geomspace": """def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): - return numpy.ndarray([0, 0])""", -} - -for func_name, func_src in METHODS_TO_BE_INFERRED.items(): - inference_function = functools.partial(infer_numpy_member, func_src) - astroid.MANAGER.register_transform( - astroid.Attribute, - astroid.inference_tip(inference_function), - functools.partial(looks_like_numpy_member, func_name), - ) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_multiarray.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_multiarray.py deleted file mode 100644 index 3032acc9..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_multiarray.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright (c) 2018-2019 hippo91 - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -"""Astroid hooks for numpy.core.multiarray module.""" - -import functools -import astroid -from brain_numpy_utils import looks_like_numpy_member, infer_numpy_member - - -def numpy_core_multiarray_transform(): - return astroid.parse( - """ - # different functions defined in multiarray.py - def inner(a, b): - return numpy.ndarray([0, 0]) - - def vdot(a, b): - return numpy.ndarray([0, 0]) - """ - ) - - -astroid.register_module_extender( - astroid.MANAGER, "numpy.core.multiarray", numpy_core_multiarray_transform -) - - -METHODS_TO_BE_INFERRED = { - "array": """def array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0): - return numpy.ndarray([0, 0])""", - "dot": """def dot(a, b, out=None): - return numpy.ndarray([0, 0])""", - "empty_like": """def empty_like(a, dtype=None, order='K', subok=True): - return numpy.ndarray((0, 0))""", - "concatenate": """def concatenate(arrays, axis=None, out=None): - return numpy.ndarray((0, 0))""", - "where": """def where(condition, x=None, y=None): - return numpy.ndarray([0, 0])""", - "empty": """def empty(shape, dtype=float, order='C'): - return numpy.ndarray([0, 0])""", - "zeros": """def zeros(shape, dtype=float, order='C'): - return numpy.ndarray([0, 0])""", -} - -for method_name, function_src in METHODS_TO_BE_INFERRED.items(): - inference_function = functools.partial(infer_numpy_member, function_src) - astroid.MANAGER.register_transform( - astroid.Attribute, - astroid.inference_tip(inference_function), - functools.partial(looks_like_numpy_member, method_name), - ) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_numeric.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_numeric.py deleted file mode 100644 index ba43c941..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_numeric.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2018-2019 hippo91 - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -"""Astroid hooks for numpy.core.numeric module.""" - -import functools -import astroid -from brain_numpy_utils import looks_like_numpy_member, infer_numpy_member - - -def numpy_core_numeric_transform(): - return astroid.parse( - """ - # different functions defined in numeric.py - import numpy - def zeros_like(a, dtype=None, order='K', subok=True): return numpy.ndarray((0, 0)) - def ones_like(a, dtype=None, order='K', subok=True): return numpy.ndarray((0, 0)) - def full_like(a, fill_value, dtype=None, order='K', subok=True): return numpy.ndarray((0, 0)) - """ - ) - - -astroid.register_module_extender( - astroid.MANAGER, "numpy.core.numeric", numpy_core_numeric_transform -) - - -METHODS_TO_BE_INFERRED = { - "ones": """def ones(shape, dtype=None, order='C'): - return numpy.ndarray([0, 0])""" -} - - -for method_name, function_src in METHODS_TO_BE_INFERRED.items(): - inference_function = functools.partial(infer_numpy_member, function_src) - astroid.MANAGER.register_transform( - astroid.Attribute, - astroid.inference_tip(inference_function), - functools.partial(looks_like_numpy_member, method_name), - ) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_numerictypes.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_numerictypes.py deleted file mode 100644 index 42021fae..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_numerictypes.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright (c) 2018-2019 hippo91 - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -# TODO(hippo91) : correct the methods signature. - -"""Astroid hooks for numpy.core.numerictypes module.""" - -import astroid - - -def numpy_core_numerictypes_transform(): - return astroid.parse( - """ - # different types defined in numerictypes.py - class generic(object): - def __init__(self, value): - self.T = None - self.base = None - self.data = None - self.dtype = None - self.flags = None - self.flat = None - self.imag = None - self.itemsize = None - self.nbytes = None - self.ndim = None - self.real = None - self.size = None - self.strides = None - - def all(self): return uninferable - def any(self): return uninferable - def argmax(self): return uninferable - def argmin(self): return uninferable - def argsort(self): return uninferable - def astype(self): return uninferable - def base(self): return uninferable - def byteswap(self): return uninferable - def choose(self): return uninferable - def clip(self): return uninferable - def compress(self): return uninferable - def conj(self): return uninferable - def conjugate(self): return uninferable - def copy(self): return uninferable - def cumprod(self): return uninferable - def cumsum(self): return uninferable - def data(self): return uninferable - def diagonal(self): return uninferable - def dtype(self): return uninferable - def dump(self): return uninferable - def dumps(self): return uninferable - def fill(self): return uninferable - def flags(self): return uninferable - def flat(self): return uninferable - def flatten(self): return uninferable - def getfield(self): return uninferable - def imag(self): return uninferable - def item(self): return uninferable - def itemset(self): return uninferable - def itemsize(self): return uninferable - def max(self): return uninferable - def mean(self): return uninferable - def min(self): return uninferable - def nbytes(self): return uninferable - def ndim(self): return uninferable - def newbyteorder(self): return uninferable - def nonzero(self): return uninferable - def prod(self): return uninferable - def ptp(self): return uninferable - def put(self): return uninferable - def ravel(self): return uninferable - def real(self): return uninferable - def repeat(self): return uninferable - def reshape(self): return uninferable - def resize(self): return uninferable - def round(self): return uninferable - def searchsorted(self): return uninferable - def setfield(self): return uninferable - def setflags(self): return uninferable - def shape(self): return uninferable - def size(self): return uninferable - def sort(self): return uninferable - def squeeze(self): return uninferable - def std(self): return uninferable - def strides(self): return uninferable - def sum(self): return uninferable - def swapaxes(self): return uninferable - def take(self): return uninferable - def tobytes(self): return uninferable - def tofile(self): return uninferable - def tolist(self): return uninferable - def tostring(self): return uninferable - def trace(self): return uninferable - def transpose(self): return uninferable - def var(self): return uninferable - def view(self): return uninferable - - - class dtype(object): - def __init__(self, obj, align=False, copy=False): - self.alignment = None - self.base = None - self.byteorder = None - self.char = None - self.descr = None - self.fields = None - self.flags = None - self.hasobject = None - self.isalignedstruct = None - self.isbuiltin = None - self.isnative = None - self.itemsize = None - self.kind = None - self.metadata = None - self.name = None - self.names = None - self.num = None - self.shape = None - self.str = None - self.subdtype = None - self.type = None - - def newbyteorder(self, new_order='S'): return uninferable - def __neg__(self): return uninferable - - class busdaycalendar(object): - def __init__(self, weekmask='1111100', holidays=None): - self.holidays = None - self.weekmask = None - - class flexible(generic): pass - class bool_(generic): pass - class number(generic): - def __neg__(self): return uninferable - class datetime64(generic): - def __init__(self, nb, unit=None): pass - - - class void(flexible): - def __init__(self, *args, **kwargs): - self.base = None - self.dtype = None - self.flags = None - def getfield(self): return uninferable - def setfield(self): return uninferable - - - class character(flexible): pass - - - class integer(number): - def __init__(self, value): - self.denominator = None - self.numerator = None - - - class inexact(number): pass - - - class str_(str, character): - def maketrans(self, x, y=None, z=None): return uninferable - - - class bytes_(bytes, character): - def fromhex(self, string): return uninferable - def maketrans(self, frm, to): return uninferable - - - class signedinteger(integer): pass - - - class unsignedinteger(integer): pass - - - class complexfloating(inexact): pass - - - class floating(inexact): pass - - - class float64(floating, float): - def fromhex(self, string): return uninferable - - - class uint64(unsignedinteger): pass - class complex64(complexfloating): pass - class int16(signedinteger): pass - class float96(floating): pass - class int8(signedinteger): pass - class uint32(unsignedinteger): pass - class uint8(unsignedinteger): pass - class _typedict(dict): pass - class complex192(complexfloating): pass - class timedelta64(signedinteger): - def __init__(self, nb, unit=None): pass - class int32(signedinteger): pass - class uint16(unsignedinteger): pass - class float32(floating): pass - class complex128(complexfloating, complex): pass - class float16(floating): pass - class int64(signedinteger): pass - - buffer_type = memoryview - bool8 = bool_ - byte = int8 - bytes0 = bytes_ - cdouble = complex128 - cfloat = complex128 - clongdouble = complex192 - clongfloat = complex192 - complex_ = complex128 - csingle = complex64 - double = float64 - float_ = float64 - half = float16 - int0 = int32 - int_ = int32 - intc = int32 - intp = int32 - long = int32 - longcomplex = complex192 - longdouble = float96 - longfloat = float96 - longlong = int64 - object0 = object_ - object_ = object_ - short = int16 - single = float32 - singlecomplex = complex64 - str0 = str_ - string_ = bytes_ - ubyte = uint8 - uint = uint32 - uint0 = uint32 - uintc = uint32 - uintp = uint32 - ulonglong = uint64 - unicode = str_ - unicode_ = str_ - ushort = uint16 - void0 = void - """ - ) - - -astroid.register_module_extender( - astroid.MANAGER, "numpy.core.numerictypes", numpy_core_numerictypes_transform -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_umath.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_umath.py deleted file mode 100644 index 459d38c7..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_core_umath.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (c) 2018-2019 hippo91 - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -"""Astroid hooks for numpy.core.umath module.""" - -import astroid - - -def numpy_core_umath_transform(): - ufunc_optional_keyword_arguments = ( - """out=None, where=True, casting='same_kind', order='K', """ - """dtype=None, subok=True""" - ) - return astroid.parse( - """ - # Constants - e = 2.718281828459045 - euler_gamma = 0.5772156649015329 - - # No arg functions - def geterrobj(): return [] - - # One arg functions - def seterrobj(errobj): return None - - # One arg functions with optional kwargs - def arccos(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def arccosh(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def arcsin(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def arcsinh(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def arctan(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def arctanh(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def cbrt(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def conj(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def conjugate(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def cosh(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def deg2rad(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def degrees(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def exp2(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def expm1(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def fabs(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def frexp(x, {opt_args:s}): return (numpy.ndarray((0, 0)), numpy.ndarray((0, 0))) - def isfinite(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def isinf(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def log(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def log1p(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def log2(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def logical_not(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def modf(x, {opt_args:s}): return (numpy.ndarray((0, 0)), numpy.ndarray((0, 0))) - def negative(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def rad2deg(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def radians(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def reciprocal(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def rint(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def sign(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def signbit(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def sinh(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def spacing(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def square(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def tan(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def tanh(x, {opt_args:s}): return numpy.ndarray((0, 0)) - def trunc(x, {opt_args:s}): return numpy.ndarray((0, 0)) - - # Two args functions with optional kwargs - def bitwise_and(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def bitwise_or(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def bitwise_xor(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def copysign(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def divide(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def equal(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def floor_divide(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def fmax(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def fmin(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def fmod(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def greater(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def hypot(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def ldexp(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def left_shift(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def less(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def logaddexp(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def logaddexp2(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def logical_and(x1, x2, {opt_args:s}): return numpy.ndarray([0, 0]) - def logical_or(x1, x2, {opt_args:s}): return numpy.ndarray([0, 0]) - def logical_xor(x1, x2, {opt_args:s}): return numpy.ndarray([0, 0]) - def maximum(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def minimum(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def nextafter(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def not_equal(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def power(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def remainder(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def right_shift(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def subtract(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - def true_divide(x1, x2, {opt_args:s}): return numpy.ndarray((0, 0)) - """.format( - opt_args=ufunc_optional_keyword_arguments - ) - ) - - -astroid.register_module_extender( - astroid.MANAGER, "numpy.core.umath", numpy_core_umath_transform -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_ndarray.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_ndarray.py deleted file mode 100644 index 8c231a30..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_ndarray.py +++ /dev/null @@ -1,153 +0,0 @@ -# Copyright (c) 2015-2016, 2018 Claudiu Popa -# Copyright (c) 2016 Ceridwen -# Copyright (c) 2017-2018 hippo91 - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -"""Astroid hooks for numpy ndarray class.""" - -import functools -import astroid - - -def infer_numpy_ndarray(node, context=None): - ndarray = """ - class ndarray(object): - def __init__(self, shape, dtype=float, buffer=None, offset=0, - strides=None, order=None): - self.T = None - self.base = None - self.ctypes = None - self.data = None - self.dtype = None - self.flags = None - self.flat = None - self.imag = None - self.itemsize = None - self.nbytes = None - self.ndim = None - self.real = None - self.shape = None - self.size = None - self.strides = None - - def __abs__(self): return numpy.ndarray([0, 0]) - def __add__(self, value): return numpy.ndarray([0, 0]) - def __and__(self, value): return numpy.ndarray([0, 0]) - def __array__(self, dtype=None): return numpy.ndarray([0, 0]) - def __array_wrap__(self, obj): return numpy.ndarray([0, 0]) - def __contains__(self, key): return True - def __copy__(self): return numpy.ndarray([0, 0]) - def __deepcopy__(self, memo): return numpy.ndarray([0, 0]) - def __divmod__(self, value): return (numpy.ndarray([0, 0]), numpy.ndarray([0, 0])) - def __eq__(self, value): return numpy.ndarray([0, 0]) - def __float__(self): return 0. - def __floordiv__(self): return numpy.ndarray([0, 0]) - def __ge__(self, value): return numpy.ndarray([0, 0]) - def __getitem__(self, key): return uninferable - def __gt__(self, value): return numpy.ndarray([0, 0]) - def __iadd__(self, value): return numpy.ndarray([0, 0]) - def __iand__(self, value): return numpy.ndarray([0, 0]) - def __ifloordiv__(self, value): return numpy.ndarray([0, 0]) - def __ilshift__(self, value): return numpy.ndarray([0, 0]) - def __imod__(self, value): return numpy.ndarray([0, 0]) - def __imul__(self, value): return numpy.ndarray([0, 0]) - def __int__(self): return 0 - def __invert__(self): return numpy.ndarray([0, 0]) - def __ior__(self, value): return numpy.ndarray([0, 0]) - def __ipow__(self, value): return numpy.ndarray([0, 0]) - def __irshift__(self, value): return numpy.ndarray([0, 0]) - def __isub__(self, value): return numpy.ndarray([0, 0]) - def __itruediv__(self, value): return numpy.ndarray([0, 0]) - def __ixor__(self, value): return numpy.ndarray([0, 0]) - def __le__(self, value): return numpy.ndarray([0, 0]) - def __len__(self): return 1 - def __lshift__(self, value): return numpy.ndarray([0, 0]) - def __lt__(self, value): return numpy.ndarray([0, 0]) - def __matmul__(self, value): return numpy.ndarray([0, 0]) - def __mod__(self, value): return numpy.ndarray([0, 0]) - def __mul__(self, value): return numpy.ndarray([0, 0]) - def __ne__(self, value): return numpy.ndarray([0, 0]) - def __neg__(self): return numpy.ndarray([0, 0]) - def __or__(self): return numpy.ndarray([0, 0]) - def __pos__(self): return numpy.ndarray([0, 0]) - def __pow__(self): return numpy.ndarray([0, 0]) - def __repr__(self): return str() - def __rshift__(self): return numpy.ndarray([0, 0]) - def __setitem__(self, key, value): return uninferable - def __str__(self): return str() - def __sub__(self, value): return numpy.ndarray([0, 0]) - def __truediv__(self, value): return numpy.ndarray([0, 0]) - def __xor__(self, value): return numpy.ndarray([0, 0]) - def all(self, axis=None, out=None, keepdims=False): return np.ndarray([0, 0]) - def any(self, axis=None, out=None, keepdims=False): return np.ndarray([0, 0]) - def argmax(self, axis=None, out=None): return np.ndarray([0, 0]) - def argmin(self, axis=None, out=None): return np.ndarray([0, 0]) - def argpartition(self, kth, axis=-1, kind='introselect', order=None): return np.ndarray([0, 0]) - def argsort(self, axis=-1, kind='quicksort', order=None): return np.ndarray([0, 0]) - def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): return np.ndarray([0, 0]) - def byteswap(self, inplace=False): return np.ndarray([0, 0]) - def choose(self, choices, out=None, mode='raise'): return np.ndarray([0, 0]) - def clip(self, min=None, max=None, out=None): return np.ndarray([0, 0]) - def compress(self, condition, axis=None, out=None): return np.ndarray([0, 0]) - def conj(self): return np.ndarray([0, 0]) - def conjugate(self): return np.ndarray([0, 0]) - def copy(self, order='C'): return np.ndarray([0, 0]) - def cumprod(self, axis=None, dtype=None, out=None): return np.ndarray([0, 0]) - def cumsum(self, axis=None, dtype=None, out=None): return np.ndarray([0, 0]) - def diagonal(self, offset=0, axis1=0, axis2=1): return np.ndarray([0, 0]) - def dot(self, b, out=None): return np.ndarray([0, 0]) - def dump(self, file): return None - def dumps(self): return str() - def fill(self, value): return None - def flatten(self, order='C'): return np.ndarray([0, 0]) - def getfield(self, dtype, offset=0): return np.ndarray([0, 0]) - def item(self, *args): return uninferable - def itemset(self, *args): return None - def max(self, axis=None, out=None): return np.ndarray([0, 0]) - def mean(self, axis=None, dtype=None, out=None, keepdims=False): return np.ndarray([0, 0]) - def min(self, axis=None, out=None, keepdims=False): return np.ndarray([0, 0]) - def newbyteorder(self, new_order='S'): return np.ndarray([0, 0]) - def nonzero(self): return (1,) - def partition(self, kth, axis=-1, kind='introselect', order=None): return None - def prod(self, axis=None, dtype=None, out=None, keepdims=False): return np.ndarray([0, 0]) - def ptp(self, axis=None, out=None): return np.ndarray([0, 0]) - def put(self, indices, values, mode='raise'): return None - def ravel(self, order='C'): return np.ndarray([0, 0]) - def repeat(self, repeats, axis=None): return np.ndarray([0, 0]) - def reshape(self, shape, order='C'): return np.ndarray([0, 0]) - def resize(self, new_shape, refcheck=True): return None - def round(self, decimals=0, out=None): return np.ndarray([0, 0]) - def searchsorted(self, v, side='left', sorter=None): return np.ndarray([0, 0]) - def setfield(self, val, dtype, offset=0): return None - def setflags(self, write=None, align=None, uic=None): return None - def sort(self, axis=-1, kind='quicksort', order=None): return None - def squeeze(self, axis=None): return np.ndarray([0, 0]) - def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return np.ndarray([0, 0]) - def sum(self, axis=None, dtype=None, out=None, keepdims=False): return np.ndarray([0, 0]) - def swapaxes(self, axis1, axis2): return np.ndarray([0, 0]) - def take(self, indices, axis=None, out=None, mode='raise'): return np.ndarray([0, 0]) - def tobytes(self, order='C'): return b'' - def tofile(self, fid, sep="", format="%s"): return None - def tolist(self, ): return [] - def tostring(self, order='C'): return b'' - def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): return np.ndarray([0, 0]) - def transpose(self, *axes): return np.ndarray([0, 0]) - def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): return np.ndarray([0, 0]) - def view(self, dtype=None, type=None): return np.ndarray([0, 0]) - """ - node = astroid.extract_node(ndarray) - return node.infer(context=context) - - -def _looks_like_numpy_ndarray(node): - return isinstance(node, astroid.Attribute) and node.attrname == "ndarray" - - -astroid.MANAGER.register_transform( - astroid.Attribute, - astroid.inference_tip(infer_numpy_ndarray), - _looks_like_numpy_ndarray, -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_random_mtrand.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_random_mtrand.py deleted file mode 100644 index 772bfc4e..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_random_mtrand.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2018-2019 hippo91 - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -# TODO(hippo91) : correct the functions return types -"""Astroid hooks for numpy.random.mtrand module.""" - -import astroid - - -def numpy_random_mtrand_transform(): - return astroid.parse( - """ - def beta(a, b, size=None): return uninferable - def binomial(n, p, size=None): return uninferable - def bytes(length): return uninferable - def chisquare(df, size=None): return uninferable - def choice(a, size=None, replace=True, p=None): return uninferable - def dirichlet(alpha, size=None): return uninferable - def exponential(scale=1.0, size=None): return uninferable - def f(dfnum, dfden, size=None): return uninferable - def gamma(shape, scale=1.0, size=None): return uninferable - def geometric(p, size=None): return uninferable - def get_state(): return uninferable - def gumbel(loc=0.0, scale=1.0, size=None): return uninferable - def hypergeometric(ngood, nbad, nsample, size=None): return uninferable - def laplace(loc=0.0, scale=1.0, size=None): return uninferable - def logistic(loc=0.0, scale=1.0, size=None): return uninferable - def lognormal(mean=0.0, sigma=1.0, size=None): return uninferable - def logseries(p, size=None): return uninferable - def multinomial(n, pvals, size=None): return uninferable - def multivariate_normal(mean, cov, size=None): return uninferable - def negative_binomial(n, p, size=None): return uninferable - def noncentral_chisquare(df, nonc, size=None): return uninferable - def noncentral_f(dfnum, dfden, nonc, size=None): return uninferable - def normal(loc=0.0, scale=1.0, size=None): return uninferable - def pareto(a, size=None): return uninferable - def permutation(x): return uninferable - def poisson(lam=1.0, size=None): return uninferable - def power(a, size=None): return uninferable - def rand(*args): return uninferable - def randint(low, high=None, size=None, dtype='l'): - import numpy - return numpy.ndarray((1,1)) - def randn(*args): return uninferable - def random_integers(low, high=None, size=None): return uninferable - def random_sample(size=None): return uninferable - def rayleigh(scale=1.0, size=None): return uninferable - def seed(seed=None): return uninferable - def set_state(state): return uninferable - def shuffle(x): return uninferable - def standard_cauchy(size=None): return uninferable - def standard_exponential(size=None): return uninferable - def standard_gamma(shape, size=None): return uninferable - def standard_normal(size=None): return uninferable - def standard_t(df, size=None): return uninferable - def triangular(left, mode, right, size=None): return uninferable - def uniform(low=0.0, high=1.0, size=None): return uninferable - def vonmises(mu, kappa, size=None): return uninferable - def wald(mean, scale, size=None): return uninferable - def weibull(a, size=None): return uninferable - def zipf(a, size=None): return uninferable - """ - ) - - -astroid.register_module_extender( - astroid.MANAGER, "numpy.random.mtrand", numpy_random_mtrand_transform -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_utils.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_utils.py deleted file mode 100644 index 2bad01ee..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_numpy_utils.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2018-2019 hippo91 - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -"""Different utilities for the numpy brains""" - - -import astroid - - -def infer_numpy_member(src, node, context=None): - node = astroid.extract_node(src) - return node.infer(context=context) - - -def _is_a_numpy_module(node: astroid.node_classes.Name) -> bool: - """ - Returns True if the node is a representation of a numpy module. - - For example in : - import numpy as np - x = np.linspace(1, 2) - The node is a representation of the numpy module. - - :param node: node to test - :return: True if the node is a representation of the numpy module. - """ - module_nickname = node.name - potential_import_target = [ - x for x in node.lookup(module_nickname)[1] if isinstance(x, astroid.Import) - ] - for target in potential_import_target: - if ("numpy", module_nickname) in target.names: - return True - return False - - -def looks_like_numpy_member( - member_name: str, node: astroid.node_classes.NodeNG -) -> bool: - """ - Returns True if the node is a member of numpy whose - name is member_name. - - :param member_name: name of the member - :param node: node to test - :return: True if the node is a member of numpy - """ - return ( - isinstance(node, astroid.Attribute) - and node.attrname == member_name - and isinstance(node.expr, astroid.Name) - and _is_a_numpy_module(node.expr) - ) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_pkg_resources.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_pkg_resources.py deleted file mode 100644 index 25e76495..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_pkg_resources.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2016, 2018 Claudiu Popa -# Copyright (c) 2016 Ceridwen - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -import astroid -from astroid import parse -from astroid import inference_tip -from astroid import register_module_extender -from astroid import MANAGER - - -def pkg_resources_transform(): - return parse( - """ -def require(*requirements): - return pkg_resources.working_set.require(*requirements) - -def run_script(requires, script_name): - return pkg_resources.working_set.run_script(requires, script_name) - -def iter_entry_points(group, name=None): - return pkg_resources.working_set.iter_entry_points(group, name) - -def resource_exists(package_or_requirement, resource_name): - return get_provider(package_or_requirement).has_resource(resource_name) - -def resource_isdir(package_or_requirement, resource_name): - return get_provider(package_or_requirement).resource_isdir( - resource_name) - -def resource_filename(package_or_requirement, resource_name): - return get_provider(package_or_requirement).get_resource_filename( - self, resource_name) - -def resource_stream(package_or_requirement, resource_name): - return get_provider(package_or_requirement).get_resource_stream( - self, resource_name) - -def resource_string(package_or_requirement, resource_name): - return get_provider(package_or_requirement).get_resource_string( - self, resource_name) - -def resource_listdir(package_or_requirement, resource_name): - return get_provider(package_or_requirement).resource_listdir( - resource_name) - -def extraction_error(): - pass - -def get_cache_path(archive_name, names=()): - extract_path = self.extraction_path or get_default_cache() - target_path = os.path.join(extract_path, archive_name+'-tmp', *names) - return target_path - -def postprocess(tempname, filename): - pass - -def set_extraction_path(path): - pass - -def cleanup_resources(force=False): - pass - -def get_distribution(dist): - return Distribution(dist) - -_namespace_packages = {} -""" - ) - - -register_module_extender(MANAGER, "pkg_resources", pkg_resources_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_pytest.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_pytest.py deleted file mode 100644 index d7e3ac8a..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_pytest.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) 2014-2016 Claudiu Popa -# Copyright (c) 2014 Jeff Quast -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2016 Florian Bruhin -# Copyright (c) 2016 Ceridwen - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Astroid hooks for pytest.""" -from __future__ import absolute_import -from astroid import MANAGER, register_module_extender -from astroid.builder import AstroidBuilder - - -def pytest_transform(): - return AstroidBuilder(MANAGER).string_build( - """ - -try: - import _pytest.mark - import _pytest.recwarn - import _pytest.runner - import _pytest.python - import _pytest.skipping - import _pytest.assertion -except ImportError: - pass -else: - deprecated_call = _pytest.recwarn.deprecated_call - warns = _pytest.recwarn.warns - - exit = _pytest.runner.exit - fail = _pytest.runner.fail - skip = _pytest.runner.skip - importorskip = _pytest.runner.importorskip - - xfail = _pytest.skipping.xfail - mark = _pytest.mark.MarkGenerator() - raises = _pytest.python.raises - - # New in pytest 3.0 - try: - approx = _pytest.python.approx - register_assert_rewrite = _pytest.assertion.register_assert_rewrite - except AttributeError: - pass - - -# Moved in pytest 3.0 - -try: - import _pytest.freeze_support - freeze_includes = _pytest.freeze_support.freeze_includes -except ImportError: - try: - import _pytest.genscript - freeze_includes = _pytest.genscript.freeze_includes - except ImportError: - pass - -try: - import _pytest.debugging - set_trace = _pytest.debugging.pytestPDB().set_trace -except ImportError: - try: - import _pytest.pdb - set_trace = _pytest.pdb.pytestPDB().set_trace - except ImportError: - pass - -try: - import _pytest.fixtures - fixture = _pytest.fixtures.fixture - yield_fixture = _pytest.fixtures.yield_fixture -except ImportError: - try: - import _pytest.python - fixture = _pytest.python.fixture - yield_fixture = _pytest.python.yield_fixture - except ImportError: - pass -""" - ) - - -register_module_extender(MANAGER, "pytest", pytest_transform) -register_module_extender(MANAGER, "py.test", pytest_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_qt.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_qt.py deleted file mode 100644 index 8679d140..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_qt.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright (c) 2015-2016 Claudiu Popa -# Copyright (c) 2016 Ceridwen -# Copyright (c) 2017 Roy Wright -# Copyright (c) 2018 Ashley Whetter - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Astroid hooks for the PyQT library.""" - -from astroid import MANAGER, register_module_extender -from astroid.builder import AstroidBuilder -from astroid import nodes -from astroid import parse - - -def _looks_like_signal(node, signal_name="pyqtSignal"): - if "__class__" in node.instance_attrs: - try: - cls = node.instance_attrs["__class__"][0] - return cls.name == signal_name - except AttributeError: - # return False if the cls does not have a name attribute - pass - return False - - -def transform_pyqt_signal(node): - module = parse( - """ - class pyqtSignal(object): - def connect(self, slot, type=None, no_receiver_check=False): - pass - def disconnect(self, slot): - pass - def emit(self, *args): - pass - """ - ) - signal_cls = module["pyqtSignal"] - node.instance_attrs["emit"] = signal_cls["emit"] - node.instance_attrs["disconnect"] = signal_cls["disconnect"] - node.instance_attrs["connect"] = signal_cls["connect"] - - -def transform_pyside_signal(node): - module = parse( - """ - class NotPySideSignal(object): - def connect(self, receiver, type=None): - pass - def disconnect(self, receiver): - pass - def emit(self, *args): - pass - """ - ) - signal_cls = module["NotPySideSignal"] - node.instance_attrs["connect"] = signal_cls["connect"] - node.instance_attrs["disconnect"] = signal_cls["disconnect"] - node.instance_attrs["emit"] = signal_cls["emit"] - - -def pyqt4_qtcore_transform(): - return AstroidBuilder(MANAGER).string_build( - """ - -def SIGNAL(signal_name): pass - -class QObject(object): - def emit(self, signal): pass -""" - ) - - -register_module_extender(MANAGER, "PyQt4.QtCore", pyqt4_qtcore_transform) -MANAGER.register_transform(nodes.FunctionDef, transform_pyqt_signal, _looks_like_signal) -MANAGER.register_transform( - nodes.ClassDef, - transform_pyside_signal, - lambda node: node.qname() in ("PySide.QtCore.Signal", "PySide2.QtCore.Signal"), -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_random.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_random.py deleted file mode 100644 index 5ec858a1..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_random.py +++ /dev/null @@ -1,75 +0,0 @@ -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER -import random - -import astroid -from astroid import helpers -from astroid import MANAGER - - -ACCEPTED_ITERABLES_FOR_SAMPLE = (astroid.List, astroid.Set, astroid.Tuple) - - -def _clone_node_with_lineno(node, parent, lineno): - cls = node.__class__ - other_fields = node._other_fields - _astroid_fields = node._astroid_fields - init_params = {"lineno": lineno, "col_offset": node.col_offset, "parent": parent} - postinit_params = {param: getattr(node, param) for param in _astroid_fields} - if other_fields: - init_params.update({param: getattr(node, param) for param in other_fields}) - new_node = cls(**init_params) - if hasattr(node, "postinit") and _astroid_fields: - new_node.postinit(**postinit_params) - return new_node - - -def infer_random_sample(node, context=None): - if len(node.args) != 2: - raise astroid.UseInferenceDefault - - length = node.args[1] - if not isinstance(length, astroid.Const): - raise astroid.UseInferenceDefault - if not isinstance(length.value, int): - raise astroid.UseInferenceDefault - - inferred_sequence = helpers.safe_infer(node.args[0], context=context) - if not inferred_sequence: - raise astroid.UseInferenceDefault - - if not isinstance(inferred_sequence, ACCEPTED_ITERABLES_FOR_SAMPLE): - raise astroid.UseInferenceDefault - - if length.value > len(inferred_sequence.elts): - # In this case, this will raise a ValueError - raise astroid.UseInferenceDefault - - try: - elts = random.sample(inferred_sequence.elts, length.value) - except ValueError: - raise astroid.UseInferenceDefault - - new_node = astroid.List( - lineno=node.lineno, col_offset=node.col_offset, parent=node.scope() - ) - new_elts = [ - _clone_node_with_lineno(elt, parent=new_node, lineno=new_node.lineno) - for elt in elts - ] - new_node.postinit(new_elts) - return iter((new_node,)) - - -def _looks_like_random_sample(node): - func = node.func - if isinstance(func, astroid.Attribute): - return func.attrname == "sample" - if isinstance(func, astroid.Name): - return func.name == "sample" - return False - - -MANAGER.register_transform( - astroid.Call, astroid.inference_tip(infer_random_sample), _looks_like_random_sample -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_re.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_re.py deleted file mode 100644 index c7ee51a5..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_re.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER -import sys -import astroid - -PY36 = sys.version_info >= (3, 6) - -if PY36: - # Since Python 3.6 there is the RegexFlag enum - # where every entry will be exposed via updating globals() - - def _re_transform(): - return astroid.parse( - """ - import sre_compile - ASCII = sre_compile.SRE_FLAG_ASCII - IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE - LOCALE = sre_compile.SRE_FLAG_LOCALE - UNICODE = sre_compile.SRE_FLAG_UNICODE - MULTILINE = sre_compile.SRE_FLAG_MULTILINE - DOTALL = sre_compile.SRE_FLAG_DOTALL - VERBOSE = sre_compile.SRE_FLAG_VERBOSE - A = ASCII - I = IGNORECASE - L = LOCALE - U = UNICODE - M = MULTILINE - S = DOTALL - X = VERBOSE - TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE - T = TEMPLATE - DEBUG = sre_compile.SRE_FLAG_DEBUG - """ - ) - - astroid.register_module_extender(astroid.MANAGER, "re", _re_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_six.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_six.py deleted file mode 100644 index b342fbf5..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_six.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright (c) 2014-2016, 2018 Claudiu Popa -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -"""Astroid hooks for six module.""" - -from textwrap import dedent - -from astroid import MANAGER, register_module_extender -from astroid.builder import AstroidBuilder -from astroid.exceptions import ( - AstroidBuildingError, - InferenceError, - AttributeInferenceError, -) -from astroid import nodes - - -SIX_ADD_METACLASS = "six.add_metaclass" - - -def _indent(text, prefix, predicate=None): - """Adds 'prefix' to the beginning of selected lines in 'text'. - - If 'predicate' is provided, 'prefix' will only be added to the lines - where 'predicate(line)' is True. If 'predicate' is not provided, - it will default to adding 'prefix' to all non-empty lines that do not - consist solely of whitespace characters. - """ - if predicate is None: - predicate = lambda line: line.strip() - - def prefixed_lines(): - for line in text.splitlines(True): - yield prefix + line if predicate(line) else line - - return "".join(prefixed_lines()) - - -_IMPORTS = """ -import _io -cStringIO = _io.StringIO -filter = filter -from itertools import filterfalse -input = input -from sys import intern -map = map -range = range -from imp import reload as reload_module -from functools import reduce -from shlex import quote as shlex_quote -from io import StringIO -from collections import UserDict, UserList, UserString -xrange = range -zip = zip -from itertools import zip_longest -import builtins -import configparser -import copyreg -import _dummy_thread -import http.cookiejar as http_cookiejar -import http.cookies as http_cookies -import html.entities as html_entities -import html.parser as html_parser -import http.client as http_client -import http.server as http_server -BaseHTTPServer = CGIHTTPServer = SimpleHTTPServer = http.server -import pickle as cPickle -import queue -import reprlib -import socketserver -import _thread -import winreg -import xmlrpc.server as xmlrpc_server -import xmlrpc.client as xmlrpc_client -import urllib.robotparser as urllib_robotparser -import email.mime.multipart as email_mime_multipart -import email.mime.nonmultipart as email_mime_nonmultipart -import email.mime.text as email_mime_text -import email.mime.base as email_mime_base -import urllib.parse as urllib_parse -import urllib.error as urllib_error -import tkinter -import tkinter.dialog as tkinter_dialog -import tkinter.filedialog as tkinter_filedialog -import tkinter.scrolledtext as tkinter_scrolledtext -import tkinter.simpledialog as tkinder_simpledialog -import tkinter.tix as tkinter_tix -import tkinter.ttk as tkinter_ttk -import tkinter.constants as tkinter_constants -import tkinter.dnd as tkinter_dnd -import tkinter.colorchooser as tkinter_colorchooser -import tkinter.commondialog as tkinter_commondialog -import tkinter.filedialog as tkinter_tkfiledialog -import tkinter.font as tkinter_font -import tkinter.messagebox as tkinter_messagebox -import urllib -import urllib.request as urllib_request -import urllib.robotparser as urllib_robotparser -import urllib.parse as urllib_parse -import urllib.error as urllib_error -""" - - -def six_moves_transform(): - code = dedent( - """ - class Moves(object): - {} - moves = Moves() - """ - ).format(_indent(_IMPORTS, " ")) - module = AstroidBuilder(MANAGER).string_build(code) - module.name = "six.moves" - return module - - -def _six_fail_hook(modname): - """Fix six.moves imports due to the dynamic nature of this - class. - - Construct a pseudo-module which contains all the necessary imports - for six - - :param modname: Name of failed module - :type modname: str - - :return: An astroid module - :rtype: nodes.Module - """ - - attribute_of = modname != "six.moves" and modname.startswith("six.moves") - if modname != "six.moves" and not attribute_of: - raise AstroidBuildingError(modname=modname) - module = AstroidBuilder(MANAGER).string_build(_IMPORTS) - module.name = "six.moves" - if attribute_of: - # Facilitate import of submodules in Moves - start_index = len(module.name) - attribute = modname[start_index:].lstrip(".").replace(".", "_") - try: - import_attr = module.getattr(attribute)[0] - except AttributeInferenceError: - raise AstroidBuildingError(modname=modname) - if isinstance(import_attr, nodes.Import): - submodule = MANAGER.ast_from_module_name(import_attr.names[0][0]) - return submodule - # Let dummy submodule imports pass through - # This will cause an Uninferable result, which is okay - return module - - -def _looks_like_decorated_with_six_add_metaclass(node): - if not node.decorators: - return False - - for decorator in node.decorators.nodes: - if not isinstance(decorator, nodes.Call): - continue - if decorator.func.as_string() == SIX_ADD_METACLASS: - return True - return False - - -def transform_six_add_metaclass(node): - """Check if the given class node is decorated with *six.add_metaclass* - - If so, inject its argument as the metaclass of the underlying class. - """ - if not node.decorators: - return - - for decorator in node.decorators.nodes: - if not isinstance(decorator, nodes.Call): - continue - - try: - func = next(decorator.func.infer()) - except InferenceError: - continue - if func.qname() == SIX_ADD_METACLASS and decorator.args: - metaclass = decorator.args[0] - node._metaclass = metaclass - return node - - -register_module_extender(MANAGER, "six", six_moves_transform) -register_module_extender( - MANAGER, "requests.packages.urllib3.packages.six", six_moves_transform -) -MANAGER.register_failed_import_hook(_six_fail_hook) -MANAGER.register_transform( - nodes.ClassDef, - transform_six_add_metaclass, - _looks_like_decorated_with_six_add_metaclass, -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_ssl.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_ssl.py deleted file mode 100644 index 893d8a2f..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_ssl.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright (c) 2016 Claudiu Popa -# Copyright (c) 2016 Ceridwen - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Astroid hooks for the ssl library.""" - -from astroid import MANAGER, register_module_extender -from astroid.builder import AstroidBuilder -from astroid import nodes -from astroid import parse - - -def ssl_transform(): - return parse( - """ - from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION - from _ssl import _SSLContext, MemoryBIO - from _ssl import ( - SSLError, SSLZeroReturnError, SSLWantReadError, SSLWantWriteError, - SSLSyscallError, SSLEOFError, - ) - from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED - from _ssl import txt2obj as _txt2obj, nid2obj as _nid2obj - from _ssl import RAND_status, RAND_add, RAND_bytes, RAND_pseudo_bytes - try: - from _ssl import RAND_egd - except ImportError: - # LibreSSL does not provide RAND_egd - pass - from _ssl import (OP_ALL, OP_CIPHER_SERVER_PREFERENCE, - OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3, - OP_NO_TLSv1, OP_NO_TLSv1_1, OP_NO_TLSv1_2, - OP_SINGLE_DH_USE, OP_SINGLE_ECDH_USE) - - from _ssl import (ALERT_DESCRIPTION_ACCESS_DENIED, ALERT_DESCRIPTION_BAD_CERTIFICATE, - ALERT_DESCRIPTION_BAD_CERTIFICATE_HASH_VALUE, - ALERT_DESCRIPTION_BAD_CERTIFICATE_STATUS_RESPONSE, - ALERT_DESCRIPTION_BAD_RECORD_MAC, - ALERT_DESCRIPTION_CERTIFICATE_EXPIRED, - ALERT_DESCRIPTION_CERTIFICATE_REVOKED, - ALERT_DESCRIPTION_CERTIFICATE_UNKNOWN, - ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE, - ALERT_DESCRIPTION_CLOSE_NOTIFY, ALERT_DESCRIPTION_DECODE_ERROR, - ALERT_DESCRIPTION_DECOMPRESSION_FAILURE, - ALERT_DESCRIPTION_DECRYPT_ERROR, - ALERT_DESCRIPTION_HANDSHAKE_FAILURE, - ALERT_DESCRIPTION_ILLEGAL_PARAMETER, - ALERT_DESCRIPTION_INSUFFICIENT_SECURITY, - ALERT_DESCRIPTION_INTERNAL_ERROR, - ALERT_DESCRIPTION_NO_RENEGOTIATION, - ALERT_DESCRIPTION_PROTOCOL_VERSION, - ALERT_DESCRIPTION_RECORD_OVERFLOW, - ALERT_DESCRIPTION_UNEXPECTED_MESSAGE, - ALERT_DESCRIPTION_UNKNOWN_CA, - ALERT_DESCRIPTION_UNKNOWN_PSK_IDENTITY, - ALERT_DESCRIPTION_UNRECOGNIZED_NAME, - ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE, - ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION, - ALERT_DESCRIPTION_USER_CANCELLED) - from _ssl import (SSL_ERROR_EOF, SSL_ERROR_INVALID_ERROR_CODE, SSL_ERROR_SSL, - SSL_ERROR_SYSCALL, SSL_ERROR_WANT_CONNECT, SSL_ERROR_WANT_READ, - SSL_ERROR_WANT_WRITE, SSL_ERROR_WANT_X509_LOOKUP, SSL_ERROR_ZERO_RETURN) - from _ssl import VERIFY_CRL_CHECK_CHAIN, VERIFY_CRL_CHECK_LEAF, VERIFY_DEFAULT, VERIFY_X509_STRICT - from _ssl import HAS_SNI, HAS_ECDH, HAS_NPN, HAS_ALPN - from _ssl import _OPENSSL_API_VERSION - from _ssl import PROTOCOL_SSLv23, PROTOCOL_TLSv1, PROTOCOL_TLSv1_1, PROTOCOL_TLSv1_2 - from _ssl import PROTOCOL_TLS, PROTOCOL_TLS_CLIENT, PROTOCOL_TLS_SERVER - """ - ) - - -register_module_extender(MANAGER, "ssl", ssl_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_subprocess.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_subprocess.py deleted file mode 100644 index c14dc55a..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_subprocess.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) 2016-2017 Claudiu Popa -# Copyright (c) 2017 Hugo -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -import sys -import textwrap - -import astroid - - -PY37 = sys.version_info >= (3, 7) -PY36 = sys.version_info >= (3, 6) - - -def _subprocess_transform(): - communicate = (bytes("string", "ascii"), bytes("string", "ascii")) - communicate_signature = "def communicate(self, input=None, timeout=None)" - if PY37: - init = """ - def __init__(self, args, bufsize=0, executable=None, - stdin=None, stdout=None, stderr=None, - preexec_fn=None, close_fds=False, shell=False, - cwd=None, env=None, universal_newlines=False, - startupinfo=None, creationflags=0, restore_signals=True, - start_new_session=False, pass_fds=(), *, - encoding=None, errors=None, text=None): - pass - """ - elif PY36: - init = """ - def __init__(self, args, bufsize=0, executable=None, - stdin=None, stdout=None, stderr=None, - preexec_fn=None, close_fds=False, shell=False, - cwd=None, env=None, universal_newlines=False, - startupinfo=None, creationflags=0, restore_signals=True, - start_new_session=False, pass_fds=(), *, - encoding=None, errors=None): - pass - """ - else: - init = """ - def __init__(self, args, bufsize=0, executable=None, - stdin=None, stdout=None, stderr=None, - preexec_fn=None, close_fds=False, shell=False, - cwd=None, env=None, universal_newlines=False, - startupinfo=None, creationflags=0, restore_signals=True, - start_new_session=False, pass_fds=()): - pass - """ - wait_signature = "def wait(self, timeout=None)" - ctx_manager = """ - def __enter__(self): return self - def __exit__(self, *args): pass - """ - py3_args = "args = []" - code = textwrap.dedent( - """ - def check_output( - args, *, - stdin=None, - stderr=None, - shell=False, - cwd=None, - encoding=None, - errors=None, - universal_newlines=False, - timeout=None, - env=None - ): - - if universal_newlines: - return "" - return b"" - class Popen(object): - returncode = pid = 0 - stdin = stdout = stderr = file() - %(py3_args)s - - %(communicate_signature)s: - return %(communicate)r - %(wait_signature)s: - return self.returncode - def poll(self): - return self.returncode - def send_signal(self, signal): - pass - def terminate(self): - pass - def kill(self): - pass - %(ctx_manager)s - """ - % { - "communicate": communicate, - "communicate_signature": communicate_signature, - "wait_signature": wait_signature, - "ctx_manager": ctx_manager, - "py3_args": py3_args, - } - ) - - init_lines = textwrap.dedent(init).splitlines() - indented_init = "\n".join(" " * 4 + line for line in init_lines) - code += indented_init - return astroid.parse(code) - - -astroid.register_module_extender(astroid.MANAGER, "subprocess", _subprocess_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_threading.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_threading.py deleted file mode 100644 index dffa55ad..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_threading.py +++ /dev/null @@ -1,31 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2016 Claudiu Popa -# Copyright (c) 2017 Łukasz Rogalski - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER -import astroid - - -def _thread_transform(): - return astroid.parse( - """ - class lock(object): - def acquire(self, blocking=True, timeout=-1): - pass - def release(self): - pass - def __enter__(self): - return True - def __exit__(self, *args): - pass - def locked(self): - return False - - def Lock(): - return lock() - """ - ) - - -astroid.register_module_extender(astroid.MANAGER, "threading", _thread_transform) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_typing.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_typing.py deleted file mode 100644 index 9ff72274..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_typing.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2017-2018 Claudiu Popa -# Copyright (c) 2017 Łukasz Rogalski -# Copyright (c) 2017 David Euresti -# Copyright (c) 2018 Bryce Guinta - -"""Astroid hooks for typing.py support.""" -import typing - -from astroid import ( - MANAGER, - UseInferenceDefault, - extract_node, - inference_tip, - nodes, - InferenceError, -) - - -TYPING_NAMEDTUPLE_BASENAMES = {"NamedTuple", "typing.NamedTuple"} -TYPING_TYPEVARS = {"TypeVar", "NewType"} -TYPING_TYPEVARS_QUALIFIED = {"typing.TypeVar", "typing.NewType"} -TYPING_TYPE_TEMPLATE = """ -class Meta(type): - def __getitem__(self, item): - return self - - @property - def __args__(self): - return () - -class {0}(metaclass=Meta): - pass -""" -TYPING_MEMBERS = set(typing.__all__) - - -def looks_like_typing_typevar_or_newtype(node): - func = node.func - if isinstance(func, nodes.Attribute): - return func.attrname in TYPING_TYPEVARS - if isinstance(func, nodes.Name): - return func.name in TYPING_TYPEVARS - return False - - -def infer_typing_typevar_or_newtype(node, context=None): - """Infer a typing.TypeVar(...) or typing.NewType(...) call""" - try: - func = next(node.func.infer(context=context)) - except InferenceError as exc: - raise UseInferenceDefault from exc - - if func.qname() not in TYPING_TYPEVARS_QUALIFIED: - raise UseInferenceDefault - if not node.args: - raise UseInferenceDefault - - typename = node.args[0].as_string().strip("'") - node = extract_node(TYPING_TYPE_TEMPLATE.format(typename)) - return node.infer(context=context) - - -def _looks_like_typing_subscript(node): - """Try to figure out if a Subscript node *might* be a typing-related subscript""" - if isinstance(node, nodes.Name): - return node.name in TYPING_MEMBERS - elif isinstance(node, nodes.Attribute): - return node.attrname in TYPING_MEMBERS - elif isinstance(node, nodes.Subscript): - return _looks_like_typing_subscript(node.value) - return False - - -def infer_typing_attr(node, context=None): - """Infer a typing.X[...] subscript""" - try: - value = next(node.value.infer()) - except InferenceError as exc: - raise UseInferenceDefault from exc - - if not value.qname().startswith("typing."): - raise UseInferenceDefault - - node = extract_node(TYPING_TYPE_TEMPLATE.format(value.qname().split(".")[-1])) - return node.infer(context=context) - - -MANAGER.register_transform( - nodes.Call, - inference_tip(infer_typing_typevar_or_newtype), - looks_like_typing_typevar_or_newtype, -) -MANAGER.register_transform( - nodes.Subscript, inference_tip(infer_typing_attr), _looks_like_typing_subscript -) diff --git a/venv/lib/python3.8/site-packages/astroid/brain/brain_uuid.py b/venv/lib/python3.8/site-packages/astroid/brain/brain_uuid.py deleted file mode 100644 index 8bda631d..00000000 --- a/venv/lib/python3.8/site-packages/astroid/brain/brain_uuid.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright (c) 2017 Claudiu Popa - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Astroid hooks for the UUID module.""" - - -from astroid import MANAGER -from astroid import nodes - - -def _patch_uuid_class(node): - # The .int member is patched using __dict__ - node.locals["int"] = [nodes.Const(0, parent=node)] - - -MANAGER.register_transform( - nodes.ClassDef, _patch_uuid_class, lambda node: node.qname() == "uuid.UUID" -) diff --git a/venv/lib/python3.8/site-packages/astroid/builder.py b/venv/lib/python3.8/site-packages/astroid/builder.py deleted file mode 100644 index ac71093d..00000000 --- a/venv/lib/python3.8/site-packages/astroid/builder.py +++ /dev/null @@ -1,435 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2006-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2013 Phil Schaf -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014-2015 Google, Inc. -# Copyright (c) 2014 Alexander Presnyakov -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2016 Derek Gustafson -# Copyright (c) 2017 Łukasz Rogalski -# Copyright (c) 2018 Anthony Sottile - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""The AstroidBuilder makes astroid from living object and / or from _ast - -The builder is not thread safe and can't be used to parse different sources -at the same time. -""" - -import os -import textwrap -from tokenize import detect_encoding - -from astroid._ast import _parse -from astroid import bases -from astroid import exceptions -from astroid import manager -from astroid import modutils -from astroid import raw_building -from astroid import rebuilder -from astroid import nodes -from astroid import util - -# The name of the transient function that is used to -# wrap expressions to be extracted when calling -# extract_node. -_TRANSIENT_FUNCTION = "__" - -# The comment used to select a statement to be extracted -# when calling extract_node. -_STATEMENT_SELECTOR = "#@" - -MANAGER = manager.AstroidManager() - - -def open_source_file(filename): - with open(filename, "rb") as byte_stream: - encoding = detect_encoding(byte_stream.readline)[0] - stream = open(filename, "r", newline=None, encoding=encoding) - data = stream.read() - return stream, encoding, data - - -def _can_assign_attr(node, attrname): - try: - slots = node.slots() - except NotImplementedError: - pass - else: - if slots and attrname not in {slot.value for slot in slots}: - return False - return True - - -class AstroidBuilder(raw_building.InspectBuilder): - """Class for building an astroid tree from source code or from a live module. - - The param *manager* specifies the manager class which should be used. - If no manager is given, then the default one will be used. The - param *apply_transforms* determines if the transforms should be - applied after the tree was built from source or from a live object, - by default being True. - """ - - # pylint: disable=redefined-outer-name - def __init__(self, manager=None, apply_transforms=True): - super(AstroidBuilder, self).__init__() - self._manager = manager or MANAGER - self._apply_transforms = apply_transforms - - def module_build(self, module, modname=None): - """Build an astroid from a living module instance.""" - node = None - path = getattr(module, "__file__", None) - if path is not None: - path_, ext = os.path.splitext(modutils._path_from_filename(path)) - if ext in (".py", ".pyc", ".pyo") and os.path.exists(path_ + ".py"): - node = self.file_build(path_ + ".py", modname) - if node is None: - # this is a built-in module - # get a partial representation by introspection - node = self.inspect_build(module, modname=modname, path=path) - if self._apply_transforms: - # We have to handle transformation by ourselves since the - # rebuilder isn't called for builtin nodes - node = self._manager.visit_transforms(node) - return node - - def file_build(self, path, modname=None): - """Build astroid from a source code file (i.e. from an ast) - - *path* is expected to be a python source file - """ - try: - stream, encoding, data = open_source_file(path) - except IOError as exc: - raise exceptions.AstroidBuildingError( - "Unable to load file {path}:\n{error}", - modname=modname, - path=path, - error=exc, - ) from exc - except (SyntaxError, LookupError) as exc: - raise exceptions.AstroidSyntaxError( - "Python 3 encoding specification error or unknown encoding:\n" - "{error}", - modname=modname, - path=path, - error=exc, - ) from exc - except UnicodeError as exc: # wrong encoding - # detect_encoding returns utf-8 if no encoding specified - raise exceptions.AstroidBuildingError( - "Wrong or no encoding specified for {filename}.", filename=path - ) from exc - with stream: - # get module name if necessary - if modname is None: - try: - modname = ".".join(modutils.modpath_from_file(path)) - except ImportError: - modname = os.path.splitext(os.path.basename(path))[0] - # build astroid representation - module = self._data_build(data, modname, path) - return self._post_build(module, encoding) - - def string_build(self, data, modname="", path=None): - """Build astroid from source code string.""" - module = self._data_build(data, modname, path) - module.file_bytes = data.encode("utf-8") - return self._post_build(module, "utf-8") - - def _post_build(self, module, encoding): - """Handles encoding and delayed nodes after a module has been built""" - module.file_encoding = encoding - self._manager.cache_module(module) - # post tree building steps after we stored the module in the cache: - for from_node in module._import_from_nodes: - if from_node.modname == "__future__": - for symbol, _ in from_node.names: - module.future_imports.add(symbol) - self.add_from_names_to_locals(from_node) - # handle delayed assattr nodes - for delayed in module._delayed_assattr: - self.delayed_assattr(delayed) - - # Visit the transforms - if self._apply_transforms: - module = self._manager.visit_transforms(module) - return module - - def _data_build(self, data, modname, path): - """Build tree node from data and add some informations""" - try: - node = _parse(data + "\n") - except (TypeError, ValueError, SyntaxError) as exc: - raise exceptions.AstroidSyntaxError( - "Parsing Python code failed:\n{error}", - source=data, - modname=modname, - path=path, - error=exc, - ) from exc - if path is not None: - node_file = os.path.abspath(path) - else: - node_file = "" - if modname.endswith(".__init__"): - modname = modname[:-9] - package = True - else: - package = ( - path is not None - and os.path.splitext(os.path.basename(path))[0] == "__init__" - ) - builder = rebuilder.TreeRebuilder(self._manager) - module = builder.visit_module(node, modname, node_file, package) - module._import_from_nodes = builder._import_from_nodes - module._delayed_assattr = builder._delayed_assattr - return module - - def add_from_names_to_locals(self, node): - """Store imported names to the locals - - Resort the locals if coming from a delayed node - """ - _key_func = lambda node: node.fromlineno - - def sort_locals(my_list): - my_list.sort(key=_key_func) - - for (name, asname) in node.names: - if name == "*": - try: - imported = node.do_import_module() - except exceptions.AstroidBuildingError: - continue - for name in imported.public_names(): - node.parent.set_local(name, node) - sort_locals(node.parent.scope().locals[name]) - else: - node.parent.set_local(asname or name, node) - sort_locals(node.parent.scope().locals[asname or name]) - - def delayed_assattr(self, node): - """Visit a AssAttr node - - This adds name to locals and handle members definition. - """ - try: - frame = node.frame() - for inferred in node.expr.infer(): - if inferred is util.Uninferable: - continue - try: - if inferred.__class__ is bases.Instance: - inferred = inferred._proxied - iattrs = inferred.instance_attrs - if not _can_assign_attr(inferred, node.attrname): - continue - elif isinstance(inferred, bases.Instance): - # Const, Tuple, ... we may be wrong, may be not, but - # anyway we don't want to pollute builtin's namespace - continue - elif inferred.is_function: - iattrs = inferred.instance_attrs - else: - iattrs = inferred.locals - except AttributeError: - # XXX log error - continue - values = iattrs.setdefault(node.attrname, []) - if node in values: - continue - # get assign in __init__ first XXX useful ? - if ( - frame.name == "__init__" - and values - and values[0].frame().name != "__init__" - ): - values.insert(0, node) - else: - values.append(node) - except exceptions.InferenceError: - pass - - -def build_namespace_package_module(name, path): - return nodes.Module(name, doc="", path=path, package=True) - - -def parse(code, module_name="", path=None, apply_transforms=True): - """Parses a source string in order to obtain an astroid AST from it - - :param str code: The code for the module. - :param str module_name: The name for the module, if any - :param str path: The path for the module - :param bool apply_transforms: - Apply the transforms for the give code. Use it if you - don't want the default transforms to be applied. - """ - code = textwrap.dedent(code) - builder = AstroidBuilder(manager=MANAGER, apply_transforms=apply_transforms) - return builder.string_build(code, modname=module_name, path=path) - - -def _extract_expressions(node): - """Find expressions in a call to _TRANSIENT_FUNCTION and extract them. - - The function walks the AST recursively to search for expressions that - are wrapped into a call to _TRANSIENT_FUNCTION. If it finds such an - expression, it completely removes the function call node from the tree, - replacing it by the wrapped expression inside the parent. - - :param node: An astroid node. - :type node: astroid.bases.NodeNG - :yields: The sequence of wrapped expressions on the modified tree - expression can be found. - """ - if ( - isinstance(node, nodes.Call) - and isinstance(node.func, nodes.Name) - and node.func.name == _TRANSIENT_FUNCTION - ): - real_expr = node.args[0] - real_expr.parent = node.parent - # Search for node in all _astng_fields (the fields checked when - # get_children is called) of its parent. Some of those fields may - # be lists or tuples, in which case the elements need to be checked. - # When we find it, replace it by real_expr, so that the AST looks - # like no call to _TRANSIENT_FUNCTION ever took place. - for name in node.parent._astroid_fields: - child = getattr(node.parent, name) - if isinstance(child, (list, tuple)): - for idx, compound_child in enumerate(child): - if compound_child is node: - child[idx] = real_expr - elif child is node: - setattr(node.parent, name, real_expr) - yield real_expr - else: - for child in node.get_children(): - yield from _extract_expressions(child) - - -def _find_statement_by_line(node, line): - """Extracts the statement on a specific line from an AST. - - If the line number of node matches line, it will be returned; - otherwise its children are iterated and the function is called - recursively. - - :param node: An astroid node. - :type node: astroid.bases.NodeNG - :param line: The line number of the statement to extract. - :type line: int - :returns: The statement on the line, or None if no statement for the line - can be found. - :rtype: astroid.bases.NodeNG or None - """ - if isinstance(node, (nodes.ClassDef, nodes.FunctionDef)): - # This is an inaccuracy in the AST: the nodes that can be - # decorated do not carry explicit information on which line - # the actual definition (class/def), but .fromline seems to - # be close enough. - node_line = node.fromlineno - else: - node_line = node.lineno - - if node_line == line: - return node - - for child in node.get_children(): - result = _find_statement_by_line(child, line) - if result: - return result - - return None - - -def extract_node(code, module_name=""): - """Parses some Python code as a module and extracts a designated AST node. - - Statements: - To extract one or more statement nodes, append #@ to the end of the line - - Examples: - >>> def x(): - >>> def y(): - >>> return 1 #@ - - The return statement will be extracted. - - >>> class X(object): - >>> def meth(self): #@ - >>> pass - - The function object 'meth' will be extracted. - - Expressions: - To extract arbitrary expressions, surround them with the fake - function call __(...). After parsing, the surrounded expression - will be returned and the whole AST (accessible via the returned - node's parent attribute) will look like the function call was - never there in the first place. - - Examples: - >>> a = __(1) - - The const node will be extracted. - - >>> def x(d=__(foo.bar)): pass - - The node containing the default argument will be extracted. - - >>> def foo(a, b): - >>> return 0 < __(len(a)) < b - - The node containing the function call 'len' will be extracted. - - If no statements or expressions are selected, the last toplevel - statement will be returned. - - If the selected statement is a discard statement, (i.e. an expression - turned into a statement), the wrapped expression is returned instead. - - For convenience, singleton lists are unpacked. - - :param str code: A piece of Python code that is parsed as - a module. Will be passed through textwrap.dedent first. - :param str module_name: The name of the module. - :returns: The designated node from the parse tree, or a list of nodes. - :rtype: astroid.bases.NodeNG, or a list of nodes. - """ - - def _extract(node): - if isinstance(node, nodes.Expr): - return node.value - - return node - - requested_lines = [] - for idx, line in enumerate(code.splitlines()): - if line.strip().endswith(_STATEMENT_SELECTOR): - requested_lines.append(idx + 1) - - tree = parse(code, module_name=module_name) - if not tree.body: - raise ValueError("Empty tree, cannot extract from it") - - extracted = [] - if requested_lines: - extracted = [_find_statement_by_line(tree, line) for line in requested_lines] - - # Modifies the tree. - extracted.extend(_extract_expressions(tree)) - - if not extracted: - extracted.append(tree.body[-1]) - - extracted = [_extract(node) for node in extracted] - if len(extracted) == 1: - return extracted[0] - return extracted diff --git a/venv/lib/python3.8/site-packages/astroid/context.py b/venv/lib/python3.8/site-packages/astroid/context.py deleted file mode 100644 index 70a92088..00000000 --- a/venv/lib/python3.8/site-packages/astroid/context.py +++ /dev/null @@ -1,179 +0,0 @@ -# Copyright (c) 2015-2016, 2018 Claudiu Popa -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2018 Bryce Guinta -# Copyright (c) 2018 Nick Drozd - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Various context related utilities, including inference and call contexts.""" -import contextlib -import pprint -from typing import Optional - - -class InferenceContext: - """Provide context for inference - - Store already inferred nodes to save time - Account for already visited nodes to infinite stop infinite recursion - """ - - __slots__ = ( - "path", - "lookupname", - "callcontext", - "boundnode", - "inferred", - "extra_context", - ) - - def __init__(self, path=None, inferred=None): - self.path = path or set() - """ - :type: set(tuple(NodeNG, optional(str))) - - Path of visited nodes and their lookupname - - Currently this key is ``(node, context.lookupname)`` - """ - self.lookupname = None - """ - :type: optional[str] - - The original name of the node - - e.g. - foo = 1 - The inference of 'foo' is nodes.Const(1) but the lookup name is 'foo' - """ - self.callcontext = None - """ - :type: optional[CallContext] - - The call arguments and keywords for the given context - """ - self.boundnode = None - """ - :type: optional[NodeNG] - - The bound node of the given context - - e.g. the bound node of object.__new__(cls) is the object node - """ - self.inferred = inferred or {} - """ - :type: dict(seq, seq) - - Inferred node contexts to their mapped results - Currently the key is ``(node, lookupname, callcontext, boundnode)`` - and the value is tuple of the inferred results - """ - self.extra_context = {} - """ - :type: dict(NodeNG, Context) - - Context that needs to be passed down through call stacks - for call arguments - """ - - def push(self, node): - """Push node into inference path - - :return: True if node is already in context path else False - :rtype: bool - - Allows one to see if the given node has already - been looked at for this inference context""" - name = self.lookupname - if (node, name) in self.path: - return True - - self.path.add((node, name)) - return False - - def clone(self): - """Clone inference path - - For example, each side of a binary operation (BinOp) - starts with the same context but diverge as each side is inferred - so the InferenceContext will need be cloned""" - # XXX copy lookupname/callcontext ? - clone = InferenceContext(self.path, inferred=self.inferred) - clone.callcontext = self.callcontext - clone.boundnode = self.boundnode - clone.extra_context = self.extra_context - return clone - - def cache_generator(self, key, generator): - """Cache result of generator into dictionary - - Used to cache inference results""" - results = [] - for result in generator: - results.append(result) - yield result - - self.inferred[key] = tuple(results) - - @contextlib.contextmanager - def restore_path(self): - path = set(self.path) - yield - self.path = path - - def __str__(self): - state = ( - "%s=%s" - % (field, pprint.pformat(getattr(self, field), width=80 - len(field))) - for field in self.__slots__ - ) - return "%s(%s)" % (type(self).__name__, ",\n ".join(state)) - - -class CallContext: - """Holds information for a call site.""" - - __slots__ = ("args", "keywords") - - def __init__(self, args, keywords=None): - """ - :param List[NodeNG] args: Call positional arguments - :param Union[List[nodes.Keyword], None] keywords: Call keywords - """ - self.args = args - if keywords: - keywords = [(arg.arg, arg.value) for arg in keywords] - else: - keywords = [] - self.keywords = keywords - - -def copy_context(context: Optional[InferenceContext]) -> InferenceContext: - """Clone a context if given, or return a fresh contexxt""" - if context is not None: - return context.clone() - - return InferenceContext() - - -def bind_context_to_node(context, node): - """Give a context a boundnode - to retrieve the correct function name or attribute value - with from further inference. - - Do not use an existing context since the boundnode could then - be incorrectly propagated higher up in the call stack. - - :param context: Context to use - :type context: Optional(context) - - :param node: Node to do name lookups from - :type node NodeNG: - - :returns: A new context - :rtype: InferenceContext - """ - context = copy_context(context) - context.boundnode = node - return context diff --git a/venv/lib/python3.8/site-packages/astroid/decorators.py b/venv/lib/python3.8/site-packages/astroid/decorators.py deleted file mode 100644 index 14487570..00000000 --- a/venv/lib/python3.8/site-packages/astroid/decorators.py +++ /dev/null @@ -1,141 +0,0 @@ -# Copyright (c) 2015-2016, 2018 Claudiu Popa -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 Florian Bruhin -# Copyright (c) 2016 Derek Gustafson -# Copyright (c) 2018 Nick Drozd -# Copyright (c) 2018 Ashley Whetter -# Copyright (c) 2018 HoverHell -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -""" A few useful function/method decorators.""" - -import functools - -import wrapt - -from astroid import context as contextmod -from astroid import exceptions -from astroid import util - - -@wrapt.decorator -def cached(func, instance, args, kwargs): - """Simple decorator to cache result of method calls without args.""" - cache = getattr(instance, "__cache", None) - if cache is None: - instance.__cache = cache = {} - try: - return cache[func] - except KeyError: - cache[func] = result = func(*args, **kwargs) - return result - - -class cachedproperty: - """ Provides a cached property equivalent to the stacking of - @cached and @property, but more efficient. - - After first usage, the becomes part of the object's - __dict__. Doing: - - del obj. empties the cache. - - Idea taken from the pyramid_ framework and the mercurial_ project. - - .. _pyramid: http://pypi.python.org/pypi/pyramid - .. _mercurial: http://pypi.python.org/pypi/Mercurial - """ - - __slots__ = ("wrapped",) - - def __init__(self, wrapped): - try: - wrapped.__name__ - except AttributeError as exc: - raise TypeError("%s must have a __name__ attribute" % wrapped) from exc - self.wrapped = wrapped - - @property - def __doc__(self): - doc = getattr(self.wrapped, "__doc__", None) - return "%s" % ( - "\n%s" % doc if doc else "" - ) - - def __get__(self, inst, objtype=None): - if inst is None: - return self - val = self.wrapped(inst) - setattr(inst, self.wrapped.__name__, val) - return val - - -def path_wrapper(func): - """return the given infer function wrapped to handle the path - - Used to stop inference if the node has already been looked - at for a given `InferenceContext` to prevent infinite recursion - """ - - @functools.wraps(func) - def wrapped(node, context=None, _func=func, **kwargs): - """wrapper function handling context""" - if context is None: - context = contextmod.InferenceContext() - if context.push(node): - return None - - yielded = set() - generator = _func(node, context, **kwargs) - try: - while True: - res = next(generator) - # unproxy only true instance, not const, tuple, dict... - if res.__class__.__name__ == "Instance": - ares = res._proxied - else: - ares = res - if ares not in yielded: - yield res - yielded.add(ares) - except StopIteration as error: - if error.args: - return error.args[0] - return None - - return wrapped - - -@wrapt.decorator -def yes_if_nothing_inferred(func, instance, args, kwargs): - generator = func(*args, **kwargs) - - try: - yield next(generator) - except StopIteration: - # generator is empty - yield util.Uninferable - return - - yield from generator - - -@wrapt.decorator -def raise_if_nothing_inferred(func, instance, args, kwargs): - generator = func(*args, **kwargs) - - try: - yield next(generator) - except StopIteration as error: - # generator is empty - if error.args: - # pylint: disable=not-a-mapping - raise exceptions.InferenceError(**error.args[0]) - raise exceptions.InferenceError( - "StopIteration raised without any error information." - ) - - yield from generator diff --git a/venv/lib/python3.8/site-packages/astroid/exceptions.py b/venv/lib/python3.8/site-packages/astroid/exceptions.py deleted file mode 100644 index 7e9d655e..00000000 --- a/venv/lib/python3.8/site-packages/astroid/exceptions.py +++ /dev/null @@ -1,230 +0,0 @@ -# Copyright (c) 2007, 2009-2010, 2013 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2015-2018 Claudiu Popa -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2016 Derek Gustafson -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""this module contains exceptions used in the astroid library -""" -from astroid import util - - -class AstroidError(Exception): - """base exception class for all astroid related exceptions - - AstroidError and its subclasses are structured, intended to hold - objects representing state when the exception is thrown. Field - values are passed to the constructor as keyword-only arguments. - Each subclass has its own set of standard fields, but use your - best judgment to decide whether a specific exception instance - needs more or fewer fields for debugging. Field values may be - used to lazily generate the error message: self.message.format() - will be called with the field names and values supplied as keyword - arguments. - """ - - def __init__(self, message="", **kws): - super(AstroidError, self).__init__(message) - self.message = message - for key, value in kws.items(): - setattr(self, key, value) - - def __str__(self): - return self.message.format(**vars(self)) - - -class AstroidBuildingError(AstroidError): - """exception class when we are unable to build an astroid representation - - Standard attributes: - modname: Name of the module that AST construction failed for. - error: Exception raised during construction. - """ - - def __init__(self, message="Failed to import module {modname}.", **kws): - super(AstroidBuildingError, self).__init__(message, **kws) - - -class AstroidImportError(AstroidBuildingError): - """Exception class used when a module can't be imported by astroid.""" - - -class TooManyLevelsError(AstroidImportError): - """Exception class which is raised when a relative import was beyond the top-level. - - Standard attributes: - level: The level which was attempted. - name: the name of the module on which the relative import was attempted. - """ - - level = None - name = None - - def __init__( - self, - message="Relative import with too many levels " "({level}) for module {name!r}", - **kws - ): - super(TooManyLevelsError, self).__init__(message, **kws) - - -class AstroidSyntaxError(AstroidBuildingError): - """Exception class used when a module can't be parsed.""" - - -class NoDefault(AstroidError): - """raised by function's `default_value` method when an argument has - no default value - - Standard attributes: - func: Function node. - name: Name of argument without a default. - """ - - func = None - name = None - - def __init__(self, message="{func!r} has no default for {name!r}.", **kws): - super(NoDefault, self).__init__(message, **kws) - - -class ResolveError(AstroidError): - """Base class of astroid resolution/inference error. - - ResolveError is not intended to be raised. - - Standard attributes: - context: InferenceContext object. - """ - - context = None - - -class MroError(ResolveError): - """Error raised when there is a problem with method resolution of a class. - - Standard attributes: - mros: A sequence of sequences containing ClassDef nodes. - cls: ClassDef node whose MRO resolution failed. - context: InferenceContext object. - """ - - mros = () - cls = None - - def __str__(self): - mro_names = ", ".join( - "({})".format(", ".join(b.name for b in m)) for m in self.mros - ) - return self.message.format(mros=mro_names, cls=self.cls) - - -class DuplicateBasesError(MroError): - """Error raised when there are duplicate bases in the same class bases.""" - - -class InconsistentMroError(MroError): - """Error raised when a class's MRO is inconsistent.""" - - -class SuperError(ResolveError): - """Error raised when there is a problem with a *super* call. - - Standard attributes: - *super_*: The Super instance that raised the exception. - context: InferenceContext object. - """ - - super_ = None - - def __str__(self): - return self.message.format(**vars(self.super_)) - - -class InferenceError(ResolveError): - """raised when we are unable to infer a node - - Standard attributes: - node: The node inference was called on. - context: InferenceContext object. - """ - - node = None - context = None - - def __init__(self, message="Inference failed for {node!r}.", **kws): - super(InferenceError, self).__init__(message, **kws) - - -# Why does this inherit from InferenceError rather than ResolveError? -# Changing it causes some inference tests to fail. -class NameInferenceError(InferenceError): - """Raised when a name lookup fails, corresponds to NameError. - - Standard attributes: - name: The name for which lookup failed, as a string. - scope: The node representing the scope in which the lookup occurred. - context: InferenceContext object. - """ - - name = None - scope = None - - def __init__(self, message="{name!r} not found in {scope!r}.", **kws): - super(NameInferenceError, self).__init__(message, **kws) - - -class AttributeInferenceError(ResolveError): - """Raised when an attribute lookup fails, corresponds to AttributeError. - - Standard attributes: - target: The node for which lookup failed. - attribute: The attribute for which lookup failed, as a string. - context: InferenceContext object. - """ - - target = None - attribute = None - - def __init__(self, message="{attribute!r} not found on {target!r}.", **kws): - super(AttributeInferenceError, self).__init__(message, **kws) - - -class UseInferenceDefault(Exception): - """exception to be raised in custom inference function to indicate that it - should go back to the default behaviour - """ - - -class _NonDeducibleTypeHierarchy(Exception): - """Raised when is_subtype / is_supertype can't deduce the relation between two types.""" - - -class AstroidIndexError(AstroidError): - """Raised when an Indexable / Mapping does not have an index / key.""" - - -class AstroidTypeError(AstroidError): - """Raised when a TypeError would be expected in Python code.""" - - -class InferenceOverwriteError(AstroidError): - """Raised when an inference tip is overwritten - - Currently only used for debugging. - """ - - -# Backwards-compatibility aliases -OperationError = util.BadOperationMessage -UnaryOperationError = util.BadUnaryOperationMessage -BinaryOperationError = util.BadBinaryOperationMessage - -SuperArgumentTypeError = SuperError -UnresolvableName = NameInferenceError -NotFoundError = AttributeInferenceError -AstroidBuildingException = AstroidBuildingError diff --git a/venv/lib/python3.8/site-packages/astroid/helpers.py b/venv/lib/python3.8/site-packages/astroid/helpers.py deleted file mode 100644 index be133b38..00000000 --- a/venv/lib/python3.8/site-packages/astroid/helpers.py +++ /dev/null @@ -1,273 +0,0 @@ -# Copyright (c) 2015-2018 Claudiu Popa -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -""" -Various helper utilities. -""" - -import builtins as builtins_mod - -from astroid import bases -from astroid import context as contextmod -from astroid import exceptions -from astroid import manager -from astroid import nodes -from astroid import raw_building -from astroid import scoped_nodes -from astroid import util - - -BUILTINS = builtins_mod.__name__ - - -def _build_proxy_class(cls_name, builtins): - proxy = raw_building.build_class(cls_name) - proxy.parent = builtins - return proxy - - -def _function_type(function, builtins): - if isinstance(function, scoped_nodes.Lambda): - if function.root().name == BUILTINS: - cls_name = "builtin_function_or_method" - else: - cls_name = "function" - elif isinstance(function, bases.BoundMethod): - cls_name = "method" - elif isinstance(function, bases.UnboundMethod): - cls_name = "function" - return _build_proxy_class(cls_name, builtins) - - -def _object_type(node, context=None): - astroid_manager = manager.AstroidManager() - builtins = astroid_manager.builtins_module - context = context or contextmod.InferenceContext() - - for inferred in node.infer(context=context): - if isinstance(inferred, scoped_nodes.ClassDef): - if inferred.newstyle: - metaclass = inferred.metaclass(context=context) - if metaclass: - yield metaclass - continue - yield builtins.getattr("type")[0] - elif isinstance(inferred, (scoped_nodes.Lambda, bases.UnboundMethod)): - yield _function_type(inferred, builtins) - elif isinstance(inferred, scoped_nodes.Module): - yield _build_proxy_class("module", builtins) - else: - yield inferred._proxied - - -def object_type(node, context=None): - """Obtain the type of the given node - - This is used to implement the ``type`` builtin, which means that it's - used for inferring type calls, as well as used in a couple of other places - in the inference. - The node will be inferred first, so this function can support all - sorts of objects, as long as they support inference. - """ - - try: - types = set(_object_type(node, context)) - except exceptions.InferenceError: - return util.Uninferable - if len(types) > 1 or not types: - return util.Uninferable - return list(types)[0] - - -def _object_type_is_subclass(obj_type, class_or_seq, context=None): - if not isinstance(class_or_seq, (tuple, list)): - class_seq = (class_or_seq,) - else: - class_seq = class_or_seq - - if obj_type is util.Uninferable: - return util.Uninferable - - # Instances are not types - class_seq = [ - item if not isinstance(item, bases.Instance) else util.Uninferable - for item in class_seq - ] - # strict compatibility with issubclass - # issubclass(type, (object, 1)) evaluates to true - # issubclass(object, (1, type)) raises TypeError - for klass in class_seq: - if klass is util.Uninferable: - raise exceptions.AstroidTypeError("arg 2 must be a type or tuple of types") - - for obj_subclass in obj_type.mro(): - if obj_subclass == klass: - return True - return False - - -def object_isinstance(node, class_or_seq, context=None): - """Check if a node 'isinstance' any node in class_or_seq - - :param node: A given node - :param class_or_seq: Union[nodes.NodeNG, Sequence[nodes.NodeNG]] - :rtype: bool - - :raises AstroidTypeError: if the given ``classes_or_seq`` are not types - """ - obj_type = object_type(node, context) - if obj_type is util.Uninferable: - return util.Uninferable - return _object_type_is_subclass(obj_type, class_or_seq, context=context) - - -def object_issubclass(node, class_or_seq, context=None): - """Check if a type is a subclass of any node in class_or_seq - - :param node: A given node - :param class_or_seq: Union[Nodes.NodeNG, Sequence[nodes.NodeNG]] - :rtype: bool - - :raises AstroidTypeError: if the given ``classes_or_seq`` are not types - :raises AstroidError: if the type of the given node cannot be inferred - or its type's mro doesn't work - """ - if not isinstance(node, nodes.ClassDef): - raise TypeError("{node} needs to be a ClassDef node".format(node=node)) - return _object_type_is_subclass(node, class_or_seq, context=context) - - -def safe_infer(node, context=None): - """Return the inferred value for the given node. - - Return None if inference failed or if there is some ambiguity (more than - one node has been inferred). - """ - try: - inferit = node.infer(context=context) - value = next(inferit) - except exceptions.InferenceError: - return None - try: - next(inferit) - return None # None if there is ambiguity on the inferred node - except exceptions.InferenceError: - return None # there is some kind of ambiguity - except StopIteration: - return value - - -def has_known_bases(klass, context=None): - """Return true if all base classes of a class could be inferred.""" - try: - return klass._all_bases_known - except AttributeError: - pass - for base in klass.bases: - result = safe_infer(base, context=context) - # TODO: check for A->B->A->B pattern in class structure too? - if ( - not isinstance(result, scoped_nodes.ClassDef) - or result is klass - or not has_known_bases(result, context=context) - ): - klass._all_bases_known = False - return False - klass._all_bases_known = True - return True - - -def _type_check(type1, type2): - if not all(map(has_known_bases, (type1, type2))): - raise exceptions._NonDeducibleTypeHierarchy - - if not all([type1.newstyle, type2.newstyle]): - return False - try: - return type1 in type2.mro()[:-1] - except exceptions.MroError: - # The MRO is invalid. - raise exceptions._NonDeducibleTypeHierarchy - - -def is_subtype(type1, type2): - """Check if *type1* is a subtype of *type2*.""" - return _type_check(type1=type2, type2=type1) - - -def is_supertype(type1, type2): - """Check if *type2* is a supertype of *type1*.""" - return _type_check(type1, type2) - - -def class_instance_as_index(node): - """Get the value as an index for the given instance. - - If an instance provides an __index__ method, then it can - be used in some scenarios where an integer is expected, - for instance when multiplying or subscripting a list. - """ - context = contextmod.InferenceContext() - context.callcontext = contextmod.CallContext(args=[node]) - - try: - for inferred in node.igetattr("__index__", context=context): - if not isinstance(inferred, bases.BoundMethod): - continue - - for result in inferred.infer_call_result(node, context=context): - if isinstance(result, nodes.Const) and isinstance(result.value, int): - return result - except exceptions.InferenceError: - pass - return None - - -def object_len(node, context=None): - """Infer length of given node object - - :param Union[nodes.ClassDef, nodes.Instance] node: - :param node: Node to infer length of - - :raises AstroidTypeError: If an invalid node is returned - from __len__ method or no __len__ method exists - :raises InferenceError: If the given node cannot be inferred - or if multiple nodes are inferred - :rtype int: Integer length of node - """ - # pylint: disable=import-outside-toplevel; circular import - from astroid.objects import FrozenSet - - inferred_node = safe_infer(node, context=context) - if inferred_node is None or inferred_node is util.Uninferable: - raise exceptions.InferenceError(node=node) - if isinstance(inferred_node, nodes.Const) and isinstance( - inferred_node.value, (bytes, str) - ): - return len(inferred_node.value) - if isinstance(inferred_node, (nodes.List, nodes.Set, nodes.Tuple, FrozenSet)): - return len(inferred_node.elts) - if isinstance(inferred_node, nodes.Dict): - return len(inferred_node.items) - try: - node_type = object_type(inferred_node, context=context) - len_call = next(node_type.igetattr("__len__", context=context)) - except exceptions.AttributeInferenceError: - raise exceptions.AstroidTypeError( - "object of type '{}' has no len()".format(len_call.pytype()) - ) - - result_of_len = next(len_call.infer_call_result(node, context)) - if ( - isinstance(result_of_len, nodes.Const) - and result_of_len.pytype() == "builtins.int" - ): - return result_of_len.value - raise exceptions.AstroidTypeError( - "'{}' object cannot be interpreted as an integer".format(result_of_len) - ) diff --git a/venv/lib/python3.8/site-packages/astroid/inference.py b/venv/lib/python3.8/site-packages/astroid/inference.py deleted file mode 100644 index 77c6b1d0..00000000 --- a/venv/lib/python3.8/site-packages/astroid/inference.py +++ /dev/null @@ -1,943 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2006-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2012 FELD Boris -# Copyright (c) 2013-2014 Google, Inc. -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014 Eevee (Alex Munroe) -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 Dmitry Pribysh -# Copyright (c) 2016 Jakub Wilk -# Copyright (c) 2017 Michał Masłowski -# Copyright (c) 2017 Calen Pennington -# Copyright (c) 2017 Łukasz Rogalski -# Copyright (c) 2018 Bryce Guinta -# Copyright (c) 2018 Nick Drozd -# Copyright (c) 2018 Ashley Whetter -# Copyright (c) 2018 HoverHell - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""this module contains a set of functions to handle inference on astroid trees -""" - -import functools -import itertools -import operator - -from astroid import bases -from astroid import context as contextmod -from astroid import exceptions -from astroid import decorators -from astroid import helpers -from astroid import manager -from astroid import nodes -from astroid.interpreter import dunder_lookup -from astroid import protocols -from astroid import util - - -MANAGER = manager.AstroidManager() - - -# .infer method ############################################################### - - -def infer_end(self, context=None): - """inference's end for node such as Module, ClassDef, FunctionDef, - Const... - - """ - yield self - - -nodes.Module._infer = infer_end -nodes.ClassDef._infer = infer_end -nodes.FunctionDef._infer = infer_end -nodes.Lambda._infer = infer_end -nodes.Const._infer = infer_end -nodes.Slice._infer = infer_end - - -def _infer_sequence_helper(node, context=None): - """Infer all values based on _BaseContainer.elts""" - values = [] - - for elt in node.elts: - if isinstance(elt, nodes.Starred): - starred = helpers.safe_infer(elt.value, context) - if not starred: - raise exceptions.InferenceError(node=node, context=context) - if not hasattr(starred, "elts"): - raise exceptions.InferenceError(node=node, context=context) - values.extend(_infer_sequence_helper(starred)) - elif isinstance(elt, nodes.NamedExpr): - value = helpers.safe_infer(elt.value, context) - if not value: - raise exceptions.InferenceError(node=node, context=context) - values.append(value) - else: - values.append(elt) - return values - - -@decorators.raise_if_nothing_inferred -def infer_sequence(self, context=None): - has_starred_named_expr = any( - isinstance(e, (nodes.Starred, nodes.NamedExpr)) for e in self.elts - ) - if has_starred_named_expr: - values = _infer_sequence_helper(self, context) - new_seq = type(self)( - lineno=self.lineno, col_offset=self.col_offset, parent=self.parent - ) - new_seq.postinit(values) - - yield new_seq - else: - yield self - - -nodes.List._infer = infer_sequence -nodes.Tuple._infer = infer_sequence -nodes.Set._infer = infer_sequence - - -def infer_map(self, context=None): - if not any(isinstance(k, nodes.DictUnpack) for k, _ in self.items): - yield self - else: - items = _infer_map(self, context) - new_seq = type(self)(self.lineno, self.col_offset, self.parent) - new_seq.postinit(list(items.items())) - yield new_seq - - -def _update_with_replacement(lhs_dict, rhs_dict): - """Delete nodes that equate to duplicate keys - - Since an astroid node doesn't 'equal' another node with the same value, - this function uses the as_string method to make sure duplicate keys - don't get through - - Note that both the key and the value are astroid nodes - - Fixes issue with DictUnpack causing duplicte keys - in inferred Dict items - - :param dict(nodes.NodeNG, nodes.NodeNG) lhs_dict: Dictionary to 'merge' nodes into - :param dict(nodes.NodeNG, nodes.NodeNG) rhs_dict: Dictionary with nodes to pull from - :return dict(nodes.NodeNG, nodes.NodeNG): merged dictionary of nodes - """ - combined_dict = itertools.chain(lhs_dict.items(), rhs_dict.items()) - # Overwrite keys which have the same string values - string_map = {key.as_string(): (key, value) for key, value in combined_dict} - # Return to dictionary - return dict(string_map.values()) - - -def _infer_map(node, context): - """Infer all values based on Dict.items""" - values = {} - for name, value in node.items: - if isinstance(name, nodes.DictUnpack): - double_starred = helpers.safe_infer(value, context) - if not double_starred: - raise exceptions.InferenceError - if not isinstance(double_starred, nodes.Dict): - raise exceptions.InferenceError(node=node, context=context) - unpack_items = _infer_map(double_starred, context) - values = _update_with_replacement(values, unpack_items) - else: - key = helpers.safe_infer(name, context=context) - value = helpers.safe_infer(value, context=context) - if any(not elem for elem in (key, value)): - raise exceptions.InferenceError(node=node, context=context) - values = _update_with_replacement(values, {key: value}) - return values - - -nodes.Dict._infer = infer_map - - -def _higher_function_scope(node): - """ Search for the first function which encloses the given - scope. This can be used for looking up in that function's - scope, in case looking up in a lower scope for a particular - name fails. - - :param node: A scope node. - :returns: - ``None``, if no parent function scope was found, - otherwise an instance of :class:`astroid.scoped_nodes.Function`, - which encloses the given node. - """ - current = node - while current.parent and not isinstance(current.parent, nodes.FunctionDef): - current = current.parent - if current and current.parent: - return current.parent - return None - - -def infer_name(self, context=None): - """infer a Name: use name lookup rules""" - frame, stmts = self.lookup(self.name) - if not stmts: - # Try to see if the name is enclosed in a nested function - # and use the higher (first function) scope for searching. - parent_function = _higher_function_scope(self.scope()) - if parent_function: - _, stmts = parent_function.lookup(self.name) - - if not stmts: - raise exceptions.NameInferenceError( - name=self.name, scope=self.scope(), context=context - ) - context = contextmod.copy_context(context) - context.lookupname = self.name - return bases._infer_stmts(stmts, context, frame) - - -# pylint: disable=no-value-for-parameter -nodes.Name._infer = decorators.raise_if_nothing_inferred( - decorators.path_wrapper(infer_name) -) -nodes.AssignName.infer_lhs = infer_name # won't work with a path wrapper - - -@decorators.raise_if_nothing_inferred -@decorators.path_wrapper -def infer_call(self, context=None): - """infer a Call node by trying to guess what the function returns""" - callcontext = contextmod.copy_context(context) - callcontext.callcontext = contextmod.CallContext( - args=self.args, keywords=self.keywords - ) - callcontext.boundnode = None - if context is not None: - callcontext.extra_context = _populate_context_lookup(self, context.clone()) - - for callee in self.func.infer(context): - if callee is util.Uninferable: - yield callee - continue - try: - if hasattr(callee, "infer_call_result"): - yield from callee.infer_call_result(caller=self, context=callcontext) - except exceptions.InferenceError: - continue - return dict(node=self, context=context) - - -nodes.Call._infer = infer_call - - -@decorators.raise_if_nothing_inferred -@decorators.path_wrapper -def infer_import(self, context=None, asname=True): - """infer an Import node: return the imported module/object""" - name = context.lookupname - if name is None: - raise exceptions.InferenceError(node=self, context=context) - - try: - if asname: - yield self.do_import_module(self.real_name(name)) - else: - yield self.do_import_module(name) - except exceptions.AstroidBuildingError as exc: - raise exceptions.InferenceError(node=self, context=context) from exc - - -nodes.Import._infer = infer_import - - -@decorators.raise_if_nothing_inferred -@decorators.path_wrapper -def infer_import_from(self, context=None, asname=True): - """infer a ImportFrom node: return the imported module/object""" - name = context.lookupname - if name is None: - raise exceptions.InferenceError(node=self, context=context) - if asname: - name = self.real_name(name) - - try: - module = self.do_import_module() - except exceptions.AstroidBuildingError as exc: - raise exceptions.InferenceError(node=self, context=context) from exc - - try: - context = contextmod.copy_context(context) - context.lookupname = name - stmts = module.getattr(name, ignore_locals=module is self.root()) - return bases._infer_stmts(stmts, context) - except exceptions.AttributeInferenceError as error: - raise exceptions.InferenceError( - error.message, target=self, attribute=name, context=context - ) from error - - -nodes.ImportFrom._infer = infer_import_from - - -def infer_attribute(self, context=None): - """infer an Attribute node by using getattr on the associated object""" - for owner in self.expr.infer(context): - if owner is util.Uninferable: - yield owner - continue - - if context and context.boundnode: - # This handles the situation where the attribute is accessed through a subclass - # of a base class and the attribute is defined at the base class's level, - # by taking in consideration a redefinition in the subclass. - if isinstance(owner, bases.Instance) and isinstance( - context.boundnode, bases.Instance - ): - try: - if helpers.is_subtype( - helpers.object_type(context.boundnode), - helpers.object_type(owner), - ): - owner = context.boundnode - except exceptions._NonDeducibleTypeHierarchy: - # Can't determine anything useful. - pass - - try: - context.boundnode = owner - yield from owner.igetattr(self.attrname, context) - context.boundnode = None - except (exceptions.AttributeInferenceError, exceptions.InferenceError): - context.boundnode = None - except AttributeError: - # XXX method / function - context.boundnode = None - return dict(node=self, context=context) - - -nodes.Attribute._infer = decorators.raise_if_nothing_inferred( - decorators.path_wrapper(infer_attribute) -) -# won't work with a path wrapper -nodes.AssignAttr.infer_lhs = decorators.raise_if_nothing_inferred(infer_attribute) - - -@decorators.raise_if_nothing_inferred -@decorators.path_wrapper -def infer_global(self, context=None): - if context.lookupname is None: - raise exceptions.InferenceError(node=self, context=context) - try: - return bases._infer_stmts(self.root().getattr(context.lookupname), context) - except exceptions.AttributeInferenceError as error: - raise exceptions.InferenceError( - error.message, target=self, attribute=context.lookupname, context=context - ) from error - - -nodes.Global._infer = infer_global - - -_SUBSCRIPT_SENTINEL = object() - - -@decorators.raise_if_nothing_inferred -def infer_subscript(self, context=None): - """Inference for subscripts - - We're understanding if the index is a Const - or a slice, passing the result of inference - to the value's `getitem` method, which should - handle each supported index type accordingly. - """ - - found_one = False - for value in self.value.infer(context): - if value is util.Uninferable: - yield util.Uninferable - return None - for index in self.slice.infer(context): - if index is util.Uninferable: - yield util.Uninferable - return None - - # Try to deduce the index value. - index_value = _SUBSCRIPT_SENTINEL - if value.__class__ == bases.Instance: - index_value = index - else: - if index.__class__ == bases.Instance: - instance_as_index = helpers.class_instance_as_index(index) - if instance_as_index: - index_value = instance_as_index - else: - index_value = index - if index_value is _SUBSCRIPT_SENTINEL: - raise exceptions.InferenceError(node=self, context=context) - - try: - assigned = value.getitem(index_value, context) - except ( - exceptions.AstroidTypeError, - exceptions.AstroidIndexError, - exceptions.AttributeInferenceError, - AttributeError, - ) as exc: - raise exceptions.InferenceError(node=self, context=context) from exc - - # Prevent inferring if the inferred subscript - # is the same as the original subscripted object. - if self is assigned or assigned is util.Uninferable: - yield util.Uninferable - return None - yield from assigned.infer(context) - found_one = True - - if found_one: - return dict(node=self, context=context) - return None - - -nodes.Subscript._infer = decorators.path_wrapper(infer_subscript) -nodes.Subscript.infer_lhs = infer_subscript - - -@decorators.raise_if_nothing_inferred -@decorators.path_wrapper -def _infer_boolop(self, context=None): - """Infer a boolean operation (and / or / not). - - The function will calculate the boolean operation - for all pairs generated through inference for each component - node. - """ - values = self.values - if self.op == "or": - predicate = operator.truth - else: - predicate = operator.not_ - - try: - values = [value.infer(context=context) for value in values] - except exceptions.InferenceError: - yield util.Uninferable - return None - - for pair in itertools.product(*values): - if any(item is util.Uninferable for item in pair): - # Can't infer the final result, just yield Uninferable. - yield util.Uninferable - continue - - bool_values = [item.bool_value() for item in pair] - if any(item is util.Uninferable for item in bool_values): - # Can't infer the final result, just yield Uninferable. - yield util.Uninferable - continue - - # Since the boolean operations are short circuited operations, - # this code yields the first value for which the predicate is True - # and if no value respected the predicate, then the last value will - # be returned (or Uninferable if there was no last value). - # This is conforming to the semantics of `and` and `or`: - # 1 and 0 -> 1 - # 0 and 1 -> 0 - # 1 or 0 -> 1 - # 0 or 1 -> 1 - value = util.Uninferable - for value, bool_value in zip(pair, bool_values): - if predicate(bool_value): - yield value - break - else: - yield value - - return dict(node=self, context=context) - - -nodes.BoolOp._infer = _infer_boolop - - -# UnaryOp, BinOp and AugAssign inferences - - -def _filter_operation_errors(self, infer_callable, context, error): - for result in infer_callable(self, context): - if isinstance(result, error): - # For the sake of .infer(), we don't care about operation - # errors, which is the job of pylint. So return something - # which shows that we can't infer the result. - yield util.Uninferable - else: - yield result - - -def _infer_unaryop(self, context=None): - """Infer what an UnaryOp should return when evaluated.""" - for operand in self.operand.infer(context): - try: - yield operand.infer_unary_op(self.op) - except TypeError as exc: - # The operand doesn't support this operation. - yield util.BadUnaryOperationMessage(operand, self.op, exc) - except AttributeError as exc: - meth = protocols.UNARY_OP_METHOD[self.op] - if meth is None: - # `not node`. Determine node's boolean - # value and negate its result, unless it is - # Uninferable, which will be returned as is. - bool_value = operand.bool_value() - if bool_value is not util.Uninferable: - yield nodes.const_factory(not bool_value) - else: - yield util.Uninferable - else: - if not isinstance(operand, (bases.Instance, nodes.ClassDef)): - # The operation was used on something which - # doesn't support it. - yield util.BadUnaryOperationMessage(operand, self.op, exc) - continue - - try: - try: - methods = dunder_lookup.lookup(operand, meth) - except exceptions.AttributeInferenceError: - yield util.BadUnaryOperationMessage(operand, self.op, exc) - continue - - meth = methods[0] - inferred = next(meth.infer(context=context)) - if inferred is util.Uninferable or not inferred.callable(): - continue - - context = contextmod.copy_context(context) - context.callcontext = contextmod.CallContext(args=[operand]) - call_results = inferred.infer_call_result(self, context=context) - result = next(call_results, None) - if result is None: - # Failed to infer, return the same type. - yield operand - else: - yield result - except exceptions.AttributeInferenceError as exc: - # The unary operation special method was not found. - yield util.BadUnaryOperationMessage(operand, self.op, exc) - except exceptions.InferenceError: - yield util.Uninferable - - -@decorators.raise_if_nothing_inferred -@decorators.path_wrapper -def infer_unaryop(self, context=None): - """Infer what an UnaryOp should return when evaluated.""" - yield from _filter_operation_errors( - self, _infer_unaryop, context, util.BadUnaryOperationMessage - ) - return dict(node=self, context=context) - - -nodes.UnaryOp._infer_unaryop = _infer_unaryop -nodes.UnaryOp._infer = infer_unaryop - - -def _is_not_implemented(const): - """Check if the given const node is NotImplemented.""" - return isinstance(const, nodes.Const) and const.value is NotImplemented - - -def _invoke_binop_inference(instance, opnode, op, other, context, method_name): - """Invoke binary operation inference on the given instance.""" - methods = dunder_lookup.lookup(instance, method_name) - context = contextmod.bind_context_to_node(context, instance) - method = methods[0] - inferred = next(method.infer(context=context)) - if inferred is util.Uninferable: - raise exceptions.InferenceError - return instance.infer_binary_op(opnode, op, other, context, inferred) - - -def _aug_op(instance, opnode, op, other, context, reverse=False): - """Get an inference callable for an augmented binary operation.""" - method_name = protocols.AUGMENTED_OP_METHOD[op] - return functools.partial( - _invoke_binop_inference, - instance=instance, - op=op, - opnode=opnode, - other=other, - context=context, - method_name=method_name, - ) - - -def _bin_op(instance, opnode, op, other, context, reverse=False): - """Get an inference callable for a normal binary operation. - - If *reverse* is True, then the reflected method will be used instead. - """ - if reverse: - method_name = protocols.REFLECTED_BIN_OP_METHOD[op] - else: - method_name = protocols.BIN_OP_METHOD[op] - return functools.partial( - _invoke_binop_inference, - instance=instance, - op=op, - opnode=opnode, - other=other, - context=context, - method_name=method_name, - ) - - -def _get_binop_contexts(context, left, right): - """Get contexts for binary operations. - - This will return two inference contexts, the first one - for x.__op__(y), the other one for y.__rop__(x), where - only the arguments are inversed. - """ - # The order is important, since the first one should be - # left.__op__(right). - for arg in (right, left): - new_context = context.clone() - new_context.callcontext = contextmod.CallContext(args=[arg]) - new_context.boundnode = None - yield new_context - - -def _same_type(type1, type2): - """Check if type1 is the same as type2.""" - return type1.qname() == type2.qname() - - -def _get_binop_flow( - left, left_type, binary_opnode, right, right_type, context, reverse_context -): - """Get the flow for binary operations. - - The rules are a bit messy: - - * if left and right have the same type, then only one - method will be called, left.__op__(right) - * if left and right are unrelated typewise, then first - left.__op__(right) is tried and if this does not exist - or returns NotImplemented, then right.__rop__(left) is tried. - * if left is a subtype of right, then only left.__op__(right) - is tried. - * if left is a supertype of right, then right.__rop__(left) - is first tried and then left.__op__(right) - """ - op = binary_opnode.op - if _same_type(left_type, right_type): - methods = [_bin_op(left, binary_opnode, op, right, context)] - elif helpers.is_subtype(left_type, right_type): - methods = [_bin_op(left, binary_opnode, op, right, context)] - elif helpers.is_supertype(left_type, right_type): - methods = [ - _bin_op(right, binary_opnode, op, left, reverse_context, reverse=True), - _bin_op(left, binary_opnode, op, right, context), - ] - else: - methods = [ - _bin_op(left, binary_opnode, op, right, context), - _bin_op(right, binary_opnode, op, left, reverse_context, reverse=True), - ] - return methods - - -def _get_aug_flow( - left, left_type, aug_opnode, right, right_type, context, reverse_context -): - """Get the flow for augmented binary operations. - - The rules are a bit messy: - - * if left and right have the same type, then left.__augop__(right) - is first tried and then left.__op__(right). - * if left and right are unrelated typewise, then - left.__augop__(right) is tried, then left.__op__(right) - is tried and then right.__rop__(left) is tried. - * if left is a subtype of right, then left.__augop__(right) - is tried and then left.__op__(right). - * if left is a supertype of right, then left.__augop__(right) - is tried, then right.__rop__(left) and then - left.__op__(right) - """ - bin_op = aug_opnode.op.strip("=") - aug_op = aug_opnode.op - if _same_type(left_type, right_type): - methods = [ - _aug_op(left, aug_opnode, aug_op, right, context), - _bin_op(left, aug_opnode, bin_op, right, context), - ] - elif helpers.is_subtype(left_type, right_type): - methods = [ - _aug_op(left, aug_opnode, aug_op, right, context), - _bin_op(left, aug_opnode, bin_op, right, context), - ] - elif helpers.is_supertype(left_type, right_type): - methods = [ - _aug_op(left, aug_opnode, aug_op, right, context), - _bin_op(right, aug_opnode, bin_op, left, reverse_context, reverse=True), - _bin_op(left, aug_opnode, bin_op, right, context), - ] - else: - methods = [ - _aug_op(left, aug_opnode, aug_op, right, context), - _bin_op(left, aug_opnode, bin_op, right, context), - _bin_op(right, aug_opnode, bin_op, left, reverse_context, reverse=True), - ] - return methods - - -def _infer_binary_operation(left, right, binary_opnode, context, flow_factory): - """Infer a binary operation between a left operand and a right operand - - This is used by both normal binary operations and augmented binary - operations, the only difference is the flow factory used. - """ - - context, reverse_context = _get_binop_contexts(context, left, right) - left_type = helpers.object_type(left) - right_type = helpers.object_type(right) - methods = flow_factory( - left, left_type, binary_opnode, right, right_type, context, reverse_context - ) - for method in methods: - try: - results = list(method()) - except AttributeError: - continue - except exceptions.AttributeInferenceError: - continue - except exceptions.InferenceError: - yield util.Uninferable - return - else: - if any(result is util.Uninferable for result in results): - yield util.Uninferable - return - - if all(map(_is_not_implemented, results)): - continue - not_implemented = sum( - 1 for result in results if _is_not_implemented(result) - ) - if not_implemented and not_implemented != len(results): - # Can't infer yet what this is. - yield util.Uninferable - return - - yield from results - return - # The operation doesn't seem to be supported so let the caller know about it - yield util.BadBinaryOperationMessage(left_type, binary_opnode.op, right_type) - - -def _infer_binop(self, context): - """Binary operation inference logic.""" - left = self.left - right = self.right - - # we use two separate contexts for evaluating lhs and rhs because - # 1. evaluating lhs may leave some undesired entries in context.path - # which may not let us infer right value of rhs - context = context or contextmod.InferenceContext() - lhs_context = contextmod.copy_context(context) - rhs_context = contextmod.copy_context(context) - lhs_iter = left.infer(context=lhs_context) - rhs_iter = right.infer(context=rhs_context) - for lhs, rhs in itertools.product(lhs_iter, rhs_iter): - if any(value is util.Uninferable for value in (rhs, lhs)): - # Don't know how to process this. - yield util.Uninferable - return - - try: - yield from _infer_binary_operation(lhs, rhs, self, context, _get_binop_flow) - except exceptions._NonDeducibleTypeHierarchy: - yield util.Uninferable - - -@decorators.yes_if_nothing_inferred -@decorators.path_wrapper -def infer_binop(self, context=None): - return _filter_operation_errors( - self, _infer_binop, context, util.BadBinaryOperationMessage - ) - - -nodes.BinOp._infer_binop = _infer_binop -nodes.BinOp._infer = infer_binop - - -def _infer_augassign(self, context=None): - """Inference logic for augmented binary operations.""" - if context is None: - context = contextmod.InferenceContext() - - rhs_context = context.clone() - - lhs_iter = self.target.infer_lhs(context=context) - rhs_iter = self.value.infer(context=rhs_context) - for lhs, rhs in itertools.product(lhs_iter, rhs_iter): - if any(value is util.Uninferable for value in (rhs, lhs)): - # Don't know how to process this. - yield util.Uninferable - return - - try: - yield from _infer_binary_operation( - left=lhs, - right=rhs, - binary_opnode=self, - context=context, - flow_factory=_get_aug_flow, - ) - except exceptions._NonDeducibleTypeHierarchy: - yield util.Uninferable - - -@decorators.raise_if_nothing_inferred -@decorators.path_wrapper -def infer_augassign(self, context=None): - return _filter_operation_errors( - self, _infer_augassign, context, util.BadBinaryOperationMessage - ) - - -nodes.AugAssign._infer_augassign = _infer_augassign -nodes.AugAssign._infer = infer_augassign - -# End of binary operation inference. - - -@decorators.raise_if_nothing_inferred -def infer_arguments(self, context=None): - name = context.lookupname - if name is None: - raise exceptions.InferenceError(node=self, context=context) - return protocols._arguments_infer_argname(self, name, context) - - -nodes.Arguments._infer = infer_arguments - - -@decorators.raise_if_nothing_inferred -@decorators.path_wrapper -def infer_assign(self, context=None): - """infer a AssignName/AssignAttr: need to inspect the RHS part of the - assign node - """ - stmt = self.statement() - if isinstance(stmt, nodes.AugAssign): - return stmt.infer(context) - - stmts = list(self.assigned_stmts(context=context)) - return bases._infer_stmts(stmts, context) - - -nodes.AssignName._infer = infer_assign -nodes.AssignAttr._infer = infer_assign - - -@decorators.raise_if_nothing_inferred -@decorators.path_wrapper -def infer_empty_node(self, context=None): - if not self.has_underlying_object(): - yield util.Uninferable - else: - try: - yield from MANAGER.infer_ast_from_something(self.object, context=context) - except exceptions.AstroidError: - yield util.Uninferable - - -nodes.EmptyNode._infer = infer_empty_node - - -@decorators.raise_if_nothing_inferred -def infer_index(self, context=None): - return self.value.infer(context) - - -nodes.Index._infer = infer_index - -# TODO: move directly into bases.Instance when the dependency hell -# will be solved. -def instance_getitem(self, index, context=None): - # Rewrap index to Const for this case - new_context = contextmod.bind_context_to_node(context, self) - if not context: - context = new_context - - # Create a new callcontext for providing index as an argument. - new_context.callcontext = contextmod.CallContext(args=[index]) - - method = next(self.igetattr("__getitem__", context=context), None) - if not isinstance(method, bases.BoundMethod): - raise exceptions.InferenceError( - "Could not find __getitem__ for {node!r}.", node=self, context=context - ) - - return next(method.infer_call_result(self, new_context)) - - -bases.Instance.getitem = instance_getitem - - -def _populate_context_lookup(call, context): - # Allows context to be saved for later - # for inference inside a function - context_lookup = {} - if context is None: - return context_lookup - for arg in call.args: - if isinstance(arg, nodes.Starred): - context_lookup[arg.value] = context - else: - context_lookup[arg] = context - keywords = call.keywords if call.keywords is not None else [] - for keyword in keywords: - context_lookup[keyword.value] = context - return context_lookup - - -@decorators.raise_if_nothing_inferred -def infer_ifexp(self, context=None): - """Support IfExp inference - - If we can't infer the truthiness of the condition, we default - to inferring both branches. Otherwise, we infer either branch - depending on the condition. - """ - both_branches = False - # We use two separate contexts for evaluating lhs and rhs because - # evaluating lhs may leave some undesired entries in context.path - # which may not let us infer right value of rhs. - - context = context or contextmod.InferenceContext() - lhs_context = contextmod.copy_context(context) - rhs_context = contextmod.copy_context(context) - try: - test = next(self.test.infer(context=context.clone())) - except exceptions.InferenceError: - both_branches = True - else: - if test is not util.Uninferable: - if test.bool_value(): - yield from self.body.infer(context=lhs_context) - else: - yield from self.orelse.infer(context=rhs_context) - else: - both_branches = True - if both_branches: - yield from self.body.infer(context=lhs_context) - yield from self.orelse.infer(context=rhs_context) - - -nodes.IfExp._infer = infer_ifexp diff --git a/venv/lib/python3.8/site-packages/astroid/interpreter/__init__.py b/venv/lib/python3.8/site-packages/astroid/interpreter/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/venv/lib/python3.8/site-packages/astroid/interpreter/_import/__init__.py b/venv/lib/python3.8/site-packages/astroid/interpreter/_import/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/venv/lib/python3.8/site-packages/astroid/interpreter/_import/spec.py b/venv/lib/python3.8/site-packages/astroid/interpreter/_import/spec.py deleted file mode 100644 index 84e093bb..00000000 --- a/venv/lib/python3.8/site-packages/astroid/interpreter/_import/spec.py +++ /dev/null @@ -1,344 +0,0 @@ -# Copyright (c) 2016-2018 Claudiu Popa -# Copyright (c) 2016 Derek Gustafson -# Copyright (c) 2017 Chris Philip -# Copyright (c) 2017 Hugo -# Copyright (c) 2017 ioanatia -# Copyright (c) 2017 Calen Pennington -# Copyright (c) 2018 Nick Drozd - -import abc -import collections -import distutils -import enum -import imp -import os -import sys -import zipimport - -try: - import importlib.machinery - - _HAS_MACHINERY = True -except ImportError: - _HAS_MACHINERY = False - -try: - from functools import lru_cache -except ImportError: - from backports.functools_lru_cache import lru_cache - -from . import util - -ModuleType = enum.Enum( - "ModuleType", - "C_BUILTIN C_EXTENSION PKG_DIRECTORY " - "PY_CODERESOURCE PY_COMPILED PY_FROZEN PY_RESOURCE " - "PY_SOURCE PY_ZIPMODULE PY_NAMESPACE", -) -_ImpTypes = { - imp.C_BUILTIN: ModuleType.C_BUILTIN, - imp.C_EXTENSION: ModuleType.C_EXTENSION, - imp.PKG_DIRECTORY: ModuleType.PKG_DIRECTORY, - imp.PY_COMPILED: ModuleType.PY_COMPILED, - imp.PY_FROZEN: ModuleType.PY_FROZEN, - imp.PY_SOURCE: ModuleType.PY_SOURCE, -} -if hasattr(imp, "PY_RESOURCE"): - _ImpTypes[imp.PY_RESOURCE] = ModuleType.PY_RESOURCE -if hasattr(imp, "PY_CODERESOURCE"): - _ImpTypes[imp.PY_CODERESOURCE] = ModuleType.PY_CODERESOURCE - - -def _imp_type_to_module_type(imp_type): - return _ImpTypes[imp_type] - - -_ModuleSpec = collections.namedtuple( - "_ModuleSpec", "name type location " "origin submodule_search_locations" -) - - -class ModuleSpec(_ModuleSpec): - """Defines a class similar to PEP 420's ModuleSpec - - A module spec defines a name of a module, its type, location - and where submodules can be found, if the module is a package. - """ - - def __new__( - cls, - name, - module_type, - location=None, - origin=None, - submodule_search_locations=None, - ): - return _ModuleSpec.__new__( - cls, - name=name, - type=module_type, - location=location, - origin=origin, - submodule_search_locations=submodule_search_locations, - ) - - -class Finder: - """A finder is a class which knows how to find a particular module.""" - - def __init__(self, path=None): - self._path = path or sys.path - - @abc.abstractmethod - def find_module(self, modname, module_parts, processed, submodule_path): - """Find the given module - - Each finder is responsible for each protocol of finding, as long as - they all return a ModuleSpec. - - :param str modname: The module which needs to be searched. - :param list module_parts: It should be a list of strings, - where each part contributes to the module's - namespace. - :param list processed: What parts from the module parts were processed - so far. - :param list submodule_path: A list of paths where the module - can be looked into. - :returns: A ModuleSpec, describing how and where the module was found, - None, otherwise. - """ - - def contribute_to_path(self, spec, processed): - """Get a list of extra paths where this finder can search.""" - - -class ImpFinder(Finder): - """A finder based on the imp module.""" - - def find_module(self, modname, module_parts, processed, submodule_path): - if submodule_path is not None: - submodule_path = list(submodule_path) - try: - stream, mp_filename, mp_desc = imp.find_module(modname, submodule_path) - except ImportError: - return None - - # Close resources. - if stream: - stream.close() - - return ModuleSpec( - name=modname, - location=mp_filename, - module_type=_imp_type_to_module_type(mp_desc[2]), - ) - - def contribute_to_path(self, spec, processed): - if spec.location is None: - # Builtin. - return None - - if _is_setuptools_namespace(spec.location): - # extend_path is called, search sys.path for module/packages - # of this name see pkgutil.extend_path documentation - path = [ - os.path.join(p, *processed) - for p in sys.path - if os.path.isdir(os.path.join(p, *processed)) - ] - # We already import distutils elsewhere in astroid, - # so if it is the same module, we can use it directly. - elif spec.name == "distutils" and spec.location in distutils.__path__: - # distutils is patched inside virtualenvs to pick up submodules - # from the original Python, not from the virtualenv itself. - path = list(distutils.__path__) - else: - path = [spec.location] - return path - - -class ExplicitNamespacePackageFinder(ImpFinder): - """A finder for the explicit namespace packages, generated through pkg_resources.""" - - def find_module(self, modname, module_parts, processed, submodule_path): - if processed: - modname = ".".join(processed + [modname]) - if util.is_namespace(modname) and modname in sys.modules: - submodule_path = sys.modules[modname].__path__ - return ModuleSpec( - name=modname, - location="", - origin="namespace", - module_type=ModuleType.PY_NAMESPACE, - submodule_search_locations=submodule_path, - ) - return None - - def contribute_to_path(self, spec, processed): - return spec.submodule_search_locations - - -class ZipFinder(Finder): - """Finder that knows how to find a module inside zip files.""" - - def __init__(self, path): - super(ZipFinder, self).__init__(path) - self._zipimporters = _precache_zipimporters(path) - - def find_module(self, modname, module_parts, processed, submodule_path): - try: - file_type, filename, path = _search_zip(module_parts, self._zipimporters) - except ImportError: - return None - - return ModuleSpec( - name=modname, - location=filename, - origin="egg", - module_type=file_type, - submodule_search_locations=path, - ) - - -class PathSpecFinder(Finder): - """Finder based on importlib.machinery.PathFinder.""" - - def find_module(self, modname, module_parts, processed, submodule_path): - spec = importlib.machinery.PathFinder.find_spec(modname, path=submodule_path) - if spec: - # origin can be either a string on older Python versions - # or None in case it is a namespace package: - # https://github.com/python/cpython/pull/5481 - is_namespace_pkg = spec.origin in ("namespace", None) - location = spec.origin if not is_namespace_pkg else None - module_type = ModuleType.PY_NAMESPACE if is_namespace_pkg else None - spec = ModuleSpec( - name=spec.name, - location=location, - origin=spec.origin, - module_type=module_type, - submodule_search_locations=list(spec.submodule_search_locations or []), - ) - return spec - - def contribute_to_path(self, spec, processed): - if spec.type == ModuleType.PY_NAMESPACE: - return spec.submodule_search_locations - return None - - -_SPEC_FINDERS = (ImpFinder, ZipFinder) -if _HAS_MACHINERY: - _SPEC_FINDERS += (PathSpecFinder,) -_SPEC_FINDERS += (ExplicitNamespacePackageFinder,) - - -def _is_setuptools_namespace(location): - try: - with open(os.path.join(location, "__init__.py"), "rb") as stream: - data = stream.read(4096) - except IOError: - pass - else: - extend_path = b"pkgutil" in data and b"extend_path" in data - declare_namespace = ( - b"pkg_resources" in data and b"declare_namespace(__name__)" in data - ) - return extend_path or declare_namespace - - -@lru_cache() -def _cached_set_diff(left, right): - result = set(left) - result.difference_update(right) - return result - - -def _precache_zipimporters(path=None): - pic = sys.path_importer_cache - - # When measured, despite having the same complexity (O(n)), - # converting to tuples and then caching the conversion to sets - # and the set difference is faster than converting to sets - # and then only caching the set difference. - - req_paths = tuple(path or sys.path) - cached_paths = tuple(pic) - new_paths = _cached_set_diff(req_paths, cached_paths) - for entry_path in new_paths: - try: - pic[entry_path] = zipimport.zipimporter(entry_path) - except zipimport.ZipImportError: - continue - return pic - - -def _search_zip(modpath, pic): - for filepath, importer in list(pic.items()): - if importer is not None: - found = importer.find_module(modpath[0]) - if found: - if not importer.find_module(os.path.sep.join(modpath)): - raise ImportError( - "No module named %s in %s/%s" - % (".".join(modpath[1:]), filepath, modpath) - ) - # import code; code.interact(local=locals()) - return ( - ModuleType.PY_ZIPMODULE, - os.path.abspath(filepath) + os.path.sep + os.path.sep.join(modpath), - filepath, - ) - raise ImportError("No module named %s" % ".".join(modpath)) - - -def _find_spec_with_path(search_path, modname, module_parts, processed, submodule_path): - finders = [finder(search_path) for finder in _SPEC_FINDERS] - for finder in finders: - spec = finder.find_module(modname, module_parts, processed, submodule_path) - if spec is None: - continue - return finder, spec - - raise ImportError("No module named %s" % ".".join(module_parts)) - - -def find_spec(modpath, path=None): - """Find a spec for the given module. - - :type modpath: list or tuple - :param modpath: - split module's name (i.e name of a module or package split - on '.'), with leading empty strings for explicit relative import - - :type path: list or None - :param path: - optional list of path where the module or package should be - searched (use sys.path if nothing or None is given) - - :rtype: ModuleSpec - :return: A module spec, which describes how the module was - found and where. - """ - _path = path or sys.path - - # Need a copy for not mutating the argument. - modpath = modpath[:] - - submodule_path = None - module_parts = modpath[:] - processed = [] - - while modpath: - modname = modpath.pop(0) - finder, spec = _find_spec_with_path( - _path, modname, module_parts, processed, submodule_path or path - ) - processed.append(modname) - if modpath: - submodule_path = finder.contribute_to_path(spec, processed) - - if spec.type == ModuleType.PKG_DIRECTORY: - spec = spec._replace(submodule_search_locations=submodule_path) - - return spec diff --git a/venv/lib/python3.8/site-packages/astroid/interpreter/_import/util.py b/venv/lib/python3.8/site-packages/astroid/interpreter/_import/util.py deleted file mode 100644 index a917bd3d..00000000 --- a/venv/lib/python3.8/site-packages/astroid/interpreter/_import/util.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright (c) 2016, 2018 Claudiu Popa - -try: - import pkg_resources -except ImportError: - pkg_resources = None - - -def is_namespace(modname): - return pkg_resources is not None and modname in pkg_resources._namespace_packages diff --git a/venv/lib/python3.8/site-packages/astroid/interpreter/dunder_lookup.py b/venv/lib/python3.8/site-packages/astroid/interpreter/dunder_lookup.py deleted file mode 100644 index 0ae9bc90..00000000 --- a/venv/lib/python3.8/site-packages/astroid/interpreter/dunder_lookup.py +++ /dev/null @@ -1,66 +0,0 @@ -# Copyright (c) 2016-2018 Claudiu Popa -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Contains logic for retrieving special methods. - -This implementation does not rely on the dot attribute access -logic, found in ``.getattr()``. The difference between these two -is that the dunder methods are looked with the type slots -(you can find more about these here -http://lucumr.pocoo.org/2014/8/16/the-python-i-would-like-to-see/) -As such, the lookup for the special methods is actually simpler than -the dot attribute access. -""" -import itertools - -import astroid -from astroid import exceptions - - -def _lookup_in_mro(node, name): - attrs = node.locals.get(name, []) - - nodes = itertools.chain.from_iterable( - ancestor.locals.get(name, []) for ancestor in node.ancestors(recurs=True) - ) - values = list(itertools.chain(attrs, nodes)) - if not values: - raise exceptions.AttributeInferenceError(attribute=name, target=node) - - return values - - -def lookup(node, name): - """Lookup the given special method name in the given *node* - - If the special method was found, then a list of attributes - will be returned. Otherwise, `astroid.AttributeInferenceError` - is going to be raised. - """ - if isinstance( - node, (astroid.List, astroid.Tuple, astroid.Const, astroid.Dict, astroid.Set) - ): - return _builtin_lookup(node, name) - if isinstance(node, astroid.Instance): - return _lookup_in_mro(node, name) - if isinstance(node, astroid.ClassDef): - return _class_lookup(node, name) - - raise exceptions.AttributeInferenceError(attribute=name, target=node) - - -def _class_lookup(node, name): - metaclass = node.metaclass() - if metaclass is None: - raise exceptions.AttributeInferenceError(attribute=name, target=node) - - return _lookup_in_mro(metaclass, name) - - -def _builtin_lookup(node, name): - values = node.locals.get(name, []) - if not values: - raise exceptions.AttributeInferenceError(attribute=name, target=node) - - return values diff --git a/venv/lib/python3.8/site-packages/astroid/interpreter/objectmodel.py b/venv/lib/python3.8/site-packages/astroid/interpreter/objectmodel.py deleted file mode 100644 index 5e488d9d..00000000 --- a/venv/lib/python3.8/site-packages/astroid/interpreter/objectmodel.py +++ /dev/null @@ -1,738 +0,0 @@ -# Copyright (c) 2016-2018 Claudiu Popa -# Copyright (c) 2016 Derek Gustafson -# Copyright (c) 2017-2018 Bryce Guinta -# Copyright (c) 2017 Ceridwen -# Copyright (c) 2017 Calen Pennington -# Copyright (c) 2018 Nick Drozd -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER -""" -Data object model, as per https://docs.python.org/3/reference/datamodel.html. - -This module describes, at least partially, a data object model for some -of astroid's nodes. The model contains special attributes that nodes such -as functions, classes, modules etc have, such as __doc__, __class__, -__module__ etc, being used when doing attribute lookups over nodes. - -For instance, inferring `obj.__class__` will first trigger an inference -of the `obj` variable. If it was successfully inferred, then an attribute -`__class__ will be looked for in the inferred object. This is the part -where the data model occurs. The model is attached to those nodes -and the lookup mechanism will try to see if attributes such as -`__class__` are defined by the model or not. If they are defined, -the model will be requested to return the corresponding value of that -attribute. Thus the model can be viewed as a special part of the lookup -mechanism. -""" - -import itertools -import pprint -import os -import types -from functools import lru_cache - -import astroid -from astroid import context as contextmod -from astroid import exceptions -from astroid import node_classes - - -IMPL_PREFIX = "attr_" - - -def _dunder_dict(instance, attributes): - obj = node_classes.Dict(parent=instance) - - # Convert the keys to node strings - keys = [ - node_classes.Const(value=value, parent=obj) for value in list(attributes.keys()) - ] - - # The original attribute has a list of elements for each key, - # but that is not useful for retrieving the special attribute's value. - # In this case, we're picking the last value from each list. - values = [elem[-1] for elem in attributes.values()] - - obj.postinit(list(zip(keys, values))) - return obj - - -class ObjectModel: - def __init__(self): - self._instance = None - - def __repr__(self): - result = [] - cname = type(self).__name__ - string = "%(cname)s(%(fields)s)" - alignment = len(cname) + 1 - for field in sorted(self.attributes()): - width = 80 - len(field) - alignment - lines = pprint.pformat(field, indent=2, width=width).splitlines(True) - - inner = [lines[0]] - for line in lines[1:]: - inner.append(" " * alignment + line) - result.append(field) - - return string % { - "cname": cname, - "fields": (",\n" + " " * alignment).join(result), - } - - def __call__(self, instance): - self._instance = instance - return self - - def __get__(self, instance, cls=None): - # ObjectModel needs to be a descriptor so that just doing - # `special_attributes = SomeObjectModel` should be enough in the body of a node. - # But at the same time, node.special_attributes should return an object - # which can be used for manipulating the special attributes. That's the reason - # we pass the instance through which it got accessed to ObjectModel.__call__, - # returning itself afterwards, so we can still have access to the - # underlying data model and to the instance for which it got accessed. - return self(instance) - - def __contains__(self, name): - return name in self.attributes() - - @lru_cache(maxsize=None) - def attributes(self): - """Get the attributes which are exported by this object model.""" - return [ - obj[len(IMPL_PREFIX) :] for obj in dir(self) if obj.startswith(IMPL_PREFIX) - ] - - def lookup(self, name): - """Look up the given *name* in the current model - - It should return an AST or an interpreter object, - but if the name is not found, then an AttributeInferenceError will be raised. - """ - - if name in self.attributes(): - return getattr(self, IMPL_PREFIX + name) - raise exceptions.AttributeInferenceError(target=self._instance, attribute=name) - - -class ModuleModel(ObjectModel): - def _builtins(self): - builtins_ast_module = astroid.MANAGER.builtins_module - return builtins_ast_module.special_attributes.lookup("__dict__") - - @property - def attr_builtins(self): - return self._builtins() - - @property - def attr___path__(self): - if not self._instance.package: - raise exceptions.AttributeInferenceError( - target=self._instance, attribute="__path__" - ) - - path_objs = [ - node_classes.Const( - value=path - if not path.endswith("__init__.py") - else os.path.dirname(path), - parent=self._instance, - ) - for path in self._instance.path - ] - - container = node_classes.List(parent=self._instance) - container.postinit(path_objs) - - return container - - @property - def attr___name__(self): - return node_classes.Const(value=self._instance.name, parent=self._instance) - - @property - def attr___doc__(self): - return node_classes.Const(value=self._instance.doc, parent=self._instance) - - @property - def attr___file__(self): - return node_classes.Const(value=self._instance.file, parent=self._instance) - - @property - def attr___dict__(self): - return _dunder_dict(self._instance, self._instance.globals) - - @property - def attr___package__(self): - if not self._instance.package: - value = "" - else: - value = self._instance.name - - return node_classes.Const(value=value, parent=self._instance) - - # These are related to the Python 3 implementation of the - # import system, - # https://docs.python.org/3/reference/import.html#import-related-module-attributes - - @property - def attr___spec__(self): - # No handling for now. - return node_classes.Unknown() - - @property - def attr___loader__(self): - # No handling for now. - return node_classes.Unknown() - - @property - def attr___cached__(self): - # No handling for now. - return node_classes.Unknown() - - -class FunctionModel(ObjectModel): - @property - def attr___name__(self): - return node_classes.Const(value=self._instance.name, parent=self._instance) - - @property - def attr___doc__(self): - return node_classes.Const(value=self._instance.doc, parent=self._instance) - - @property - def attr___qualname__(self): - return node_classes.Const(value=self._instance.qname(), parent=self._instance) - - @property - def attr___defaults__(self): - func = self._instance - if not func.args.defaults: - return node_classes.Const(value=None, parent=func) - - defaults_obj = node_classes.Tuple(parent=func) - defaults_obj.postinit(func.args.defaults) - return defaults_obj - - @property - def attr___annotations__(self): - obj = node_classes.Dict(parent=self._instance) - - if not self._instance.returns: - returns = None - else: - returns = self._instance.returns - - args = self._instance.args - pair_annotations = itertools.chain( - zip(args.args or [], args.annotations), - zip(args.kwonlyargs, args.kwonlyargs_annotations), - zip(args.posonlyargs or [], args.posonlyargs_annotations), - ) - - annotations = { - arg.name: annotation for (arg, annotation) in pair_annotations if annotation - } - if args.varargannotation: - annotations[args.vararg] = args.varargannotation - if args.kwargannotation: - annotations[args.kwarg] = args.kwargannotation - if returns: - annotations["return"] = returns - - items = [ - (node_classes.Const(key, parent=obj), value) - for (key, value) in annotations.items() - ] - - obj.postinit(items) - return obj - - @property - def attr___dict__(self): - return node_classes.Dict(parent=self._instance) - - attr___globals__ = attr___dict__ - - @property - def attr___kwdefaults__(self): - def _default_args(args, parent): - for arg in args.kwonlyargs: - try: - default = args.default_value(arg.name) - except exceptions.NoDefault: - continue - - name = node_classes.Const(arg.name, parent=parent) - yield name, default - - args = self._instance.args - obj = node_classes.Dict(parent=self._instance) - defaults = dict(_default_args(args, obj)) - - obj.postinit(list(defaults.items())) - return obj - - @property - def attr___module__(self): - return node_classes.Const(self._instance.root().qname()) - - @property - def attr___get__(self): - # pylint: disable=import-outside-toplevel; circular import - from astroid import bases - - func = self._instance - - class DescriptorBoundMethod(bases.BoundMethod): - """Bound method which knows how to understand calling descriptor binding.""" - - def implicit_parameters(self): - # Different than BoundMethod since the signature - # is different. - return 0 - - def infer_call_result(self, caller, context=None): - if len(caller.args) > 2 or len(caller.args) < 1: - raise exceptions.InferenceError( - "Invalid arguments for descriptor binding", - target=self, - context=context, - ) - - context = contextmod.copy_context(context) - cls = next(caller.args[0].infer(context=context)) - - if cls is astroid.Uninferable: - raise exceptions.InferenceError( - "Invalid class inferred", target=self, context=context - ) - - # For some reason func is a Node that the below - # code is not expecting - if isinstance(func, bases.BoundMethod): - yield func - return - - # Rebuild the original value, but with the parent set as the - # class where it will be bound. - new_func = func.__class__( - name=func.name, - doc=func.doc, - lineno=func.lineno, - col_offset=func.col_offset, - parent=cls, - ) - # pylint: disable=no-member - new_func.postinit(func.args, func.body, func.decorators, func.returns) - - # Build a proper bound method that points to our newly built function. - proxy = bases.UnboundMethod(new_func) - yield bases.BoundMethod(proxy=proxy, bound=cls) - - @property - def args(self): - """Overwrite the underlying args to match those of the underlying func - - Usually the underlying *func* is a function/method, as in: - - def test(self): - pass - - This has only the *self* parameter but when we access test.__get__ - we get a new object which has two parameters, *self* and *type*. - """ - nonlocal func - positional_or_keyword_params = func.args.args.copy() - positional_or_keyword_params.append(astroid.AssignName(name="type")) - - positional_only_params = func.args.posonlyargs.copy() - - arguments = astroid.Arguments(parent=func.args.parent) - arguments.postinit( - args=positional_or_keyword_params, - posonlyargs=positional_only_params, - defaults=[], - kwonlyargs=[], - kw_defaults=[], - annotations=[], - ) - return arguments - - return DescriptorBoundMethod(proxy=self._instance, bound=self._instance) - - # These are here just for completion. - @property - def attr___ne__(self): - return node_classes.Unknown() - - attr___subclasshook__ = attr___ne__ - attr___str__ = attr___ne__ - attr___sizeof__ = attr___ne__ - attr___setattr___ = attr___ne__ - attr___repr__ = attr___ne__ - attr___reduce__ = attr___ne__ - attr___reduce_ex__ = attr___ne__ - attr___new__ = attr___ne__ - attr___lt__ = attr___ne__ - attr___eq__ = attr___ne__ - attr___gt__ = attr___ne__ - attr___format__ = attr___ne__ - attr___delattr___ = attr___ne__ - attr___getattribute__ = attr___ne__ - attr___hash__ = attr___ne__ - attr___init__ = attr___ne__ - attr___dir__ = attr___ne__ - attr___call__ = attr___ne__ - attr___class__ = attr___ne__ - attr___closure__ = attr___ne__ - attr___code__ = attr___ne__ - - -class ClassModel(ObjectModel): - @property - def attr___module__(self): - return node_classes.Const(self._instance.root().qname()) - - @property - def attr___name__(self): - return node_classes.Const(self._instance.name) - - @property - def attr___qualname__(self): - return node_classes.Const(self._instance.qname()) - - @property - def attr___doc__(self): - return node_classes.Const(self._instance.doc) - - @property - def attr___mro__(self): - if not self._instance.newstyle: - raise exceptions.AttributeInferenceError( - target=self._instance, attribute="__mro__" - ) - - mro = self._instance.mro() - obj = node_classes.Tuple(parent=self._instance) - obj.postinit(mro) - return obj - - @property - def attr_mro(self): - if not self._instance.newstyle: - raise exceptions.AttributeInferenceError( - target=self._instance, attribute="mro" - ) - - # pylint: disable=import-outside-toplevel; circular import - from astroid import bases - - other_self = self - - # Cls.mro is a method and we need to return one in order to have a proper inference. - # The method we're returning is capable of inferring the underlying MRO though. - class MroBoundMethod(bases.BoundMethod): - def infer_call_result(self, caller, context=None): - yield other_self.attr___mro__ - - implicit_metaclass = self._instance.implicit_metaclass() - mro_method = implicit_metaclass.locals["mro"][0] - return MroBoundMethod(proxy=mro_method, bound=implicit_metaclass) - - @property - def attr___bases__(self): - obj = node_classes.Tuple() - context = contextmod.InferenceContext() - elts = list(self._instance._inferred_bases(context)) - obj.postinit(elts=elts) - return obj - - @property - def attr___class__(self): - # pylint: disable=import-outside-toplevel; circular import - from astroid import helpers - - return helpers.object_type(self._instance) - - @property - def attr___subclasses__(self): - """Get the subclasses of the underlying class - - This looks only in the current module for retrieving the subclasses, - thus it might miss a couple of them. - """ - # pylint: disable=import-outside-toplevel; circular import - from astroid import bases - from astroid import scoped_nodes - - if not self._instance.newstyle: - raise exceptions.AttributeInferenceError( - target=self._instance, attribute="__subclasses__" - ) - - qname = self._instance.qname() - root = self._instance.root() - classes = [ - cls - for cls in root.nodes_of_class(scoped_nodes.ClassDef) - if cls != self._instance and cls.is_subtype_of(qname) - ] - - obj = node_classes.List(parent=self._instance) - obj.postinit(classes) - - class SubclassesBoundMethod(bases.BoundMethod): - def infer_call_result(self, caller, context=None): - yield obj - - implicit_metaclass = self._instance.implicit_metaclass() - subclasses_method = implicit_metaclass.locals["__subclasses__"][0] - return SubclassesBoundMethod(proxy=subclasses_method, bound=implicit_metaclass) - - @property - def attr___dict__(self): - return node_classes.Dict(parent=self._instance) - - -class SuperModel(ObjectModel): - @property - def attr___thisclass__(self): - return self._instance.mro_pointer - - @property - def attr___self_class__(self): - return self._instance._self_class - - @property - def attr___self__(self): - return self._instance.type - - @property - def attr___class__(self): - return self._instance._proxied - - -class UnboundMethodModel(ObjectModel): - @property - def attr___class__(self): - # pylint: disable=import-outside-toplevel; circular import - from astroid import helpers - - return helpers.object_type(self._instance) - - @property - def attr___func__(self): - return self._instance._proxied - - @property - def attr___self__(self): - return node_classes.Const(value=None, parent=self._instance) - - attr_im_func = attr___func__ - attr_im_class = attr___class__ - attr_im_self = attr___self__ - - -class BoundMethodModel(FunctionModel): - @property - def attr___func__(self): - return self._instance._proxied._proxied - - @property - def attr___self__(self): - return self._instance.bound - - -class GeneratorModel(FunctionModel): - def __new__(cls, *args, **kwargs): - # Append the values from the GeneratorType unto this object. - ret = super(GeneratorModel, cls).__new__(cls, *args, **kwargs) - generator = astroid.MANAGER.builtins_module["generator"] - for name, values in generator.locals.items(): - method = values[0] - patched = lambda cls, meth=method: meth - - setattr(type(ret), IMPL_PREFIX + name, property(patched)) - - return ret - - @property - def attr___name__(self): - return node_classes.Const( - value=self._instance.parent.name, parent=self._instance - ) - - @property - def attr___doc__(self): - return node_classes.Const( - value=self._instance.parent.doc, parent=self._instance - ) - - -class AsyncGeneratorModel(GeneratorModel): - def __new__(cls, *args, **kwargs): - # Append the values from the AGeneratorType unto this object. - ret = super().__new__(cls, *args, **kwargs) - astroid_builtins = astroid.MANAGER.builtins_module - generator = astroid_builtins.get("async_generator") - if generator is None: - # Make it backward compatible. - generator = astroid_builtins.get("generator") - - for name, values in generator.locals.items(): - method = values[0] - patched = lambda cls, meth=method: meth - - setattr(type(ret), IMPL_PREFIX + name, property(patched)) - - return ret - - -class InstanceModel(ObjectModel): - @property - def attr___class__(self): - return self._instance._proxied - - @property - def attr___module__(self): - return node_classes.Const(self._instance.root().qname()) - - @property - def attr___doc__(self): - return node_classes.Const(self._instance.doc) - - @property - def attr___dict__(self): - return _dunder_dict(self._instance, self._instance.instance_attrs) - - -# Exception instances - - -class ExceptionInstanceModel(InstanceModel): - @property - def attr_args(self): - message = node_classes.Const("") - args = node_classes.Tuple(parent=self._instance) - args.postinit((message,)) - return args - - @property - def attr___traceback__(self): - builtins_ast_module = astroid.MANAGER.builtins_module - traceback_type = builtins_ast_module[types.TracebackType.__name__] - return traceback_type.instantiate_class() - - -class SyntaxErrorInstanceModel(ExceptionInstanceModel): - @property - def attr_text(self): - return node_classes.Const("") - - -class OSErrorInstanceModel(ExceptionInstanceModel): - @property - def attr_filename(self): - return node_classes.Const("") - - @property - def attr_errno(self): - return node_classes.Const(0) - - @property - def attr_strerror(self): - return node_classes.Const("") - - attr_filename2 = attr_filename - - -class ImportErrorInstanceModel(ExceptionInstanceModel): - @property - def attr_name(self): - return node_classes.Const("") - - @property - def attr_path(self): - return node_classes.Const("") - - -BUILTIN_EXCEPTIONS = { - "builtins.SyntaxError": SyntaxErrorInstanceModel, - "builtins.ImportError": ImportErrorInstanceModel, - # These are all similar to OSError in terms of attributes - "builtins.OSError": OSErrorInstanceModel, - "builtins.BlockingIOError": OSErrorInstanceModel, - "builtins.BrokenPipeError": OSErrorInstanceModel, - "builtins.ChildProcessError": OSErrorInstanceModel, - "builtins.ConnectionAbortedError": OSErrorInstanceModel, - "builtins.ConnectionError": OSErrorInstanceModel, - "builtins.ConnectionRefusedError": OSErrorInstanceModel, - "builtins.ConnectionResetError": OSErrorInstanceModel, - "builtins.FileExistsError": OSErrorInstanceModel, - "builtins.FileNotFoundError": OSErrorInstanceModel, - "builtins.InterruptedError": OSErrorInstanceModel, - "builtins.IsADirectoryError": OSErrorInstanceModel, - "builtins.NotADirectoryError": OSErrorInstanceModel, - "builtins.PermissionError": OSErrorInstanceModel, - "builtins.ProcessLookupError": OSErrorInstanceModel, - "builtins.TimeoutError": OSErrorInstanceModel, -} - - -class DictModel(ObjectModel): - @property - def attr___class__(self): - return self._instance._proxied - - def _generic_dict_attribute(self, obj, name): - """Generate a bound method that can infer the given *obj*.""" - - class DictMethodBoundMethod(astroid.BoundMethod): - def infer_call_result(self, caller, context=None): - yield obj - - meth = next(self._instance._proxied.igetattr(name)) - return DictMethodBoundMethod(proxy=meth, bound=self._instance) - - @property - def attr_items(self): - elems = [] - obj = node_classes.List(parent=self._instance) - for key, value in self._instance.items: - elem = node_classes.Tuple(parent=obj) - elem.postinit((key, value)) - elems.append(elem) - obj.postinit(elts=elems) - - # pylint: disable=import-outside-toplevel; circular import - from astroid import objects - - obj = objects.DictItems(obj) - return self._generic_dict_attribute(obj, "items") - - @property - def attr_keys(self): - keys = [key for (key, _) in self._instance.items] - obj = node_classes.List(parent=self._instance) - obj.postinit(elts=keys) - - # pylint: disable=import-outside-toplevel; circular import - from astroid import objects - - obj = objects.DictKeys(obj) - return self._generic_dict_attribute(obj, "keys") - - @property - def attr_values(self): - - values = [value for (_, value) in self._instance.items] - obj = node_classes.List(parent=self._instance) - obj.postinit(values) - - # pylint: disable=import-outside-toplevel; circular import - from astroid import objects - - obj = objects.DictValues(obj) - return self._generic_dict_attribute(obj, "values") diff --git a/venv/lib/python3.8/site-packages/astroid/manager.py b/venv/lib/python3.8/site-packages/astroid/manager.py deleted file mode 100644 index e5fd0d6b..00000000 --- a/venv/lib/python3.8/site-packages/astroid/manager.py +++ /dev/null @@ -1,337 +0,0 @@ -# Copyright (c) 2006-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014 BioGeek -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2014 Eevee (Alex Munroe) -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2016 Derek Gustafson -# Copyright (c) 2017 Iva Miholic -# Copyright (c) 2018 Bryce Guinta -# Copyright (c) 2018 Nick Drozd - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""astroid manager: avoid multiple astroid build of a same module when -possible by providing a class responsible to get astroid representation -from various source and using a cache of built modules) -""" - -import os -import zipimport - -from astroid import exceptions -from astroid.interpreter._import import spec -from astroid import modutils -from astroid import transforms - - -ZIP_IMPORT_EXTS = (".zip", ".egg", ".whl") - - -def safe_repr(obj): - try: - return repr(obj) - except Exception: # pylint: disable=broad-except - return "???" - - -class AstroidManager: - """the astroid manager, responsible to build astroid from files - or modules. - - Use the Borg pattern. - """ - - name = "astroid loader" - brain = {} - - def __init__(self): - self.__dict__ = AstroidManager.brain - if not self.__dict__: - # NOTE: cache entries are added by the [re]builder - self.astroid_cache = {} - self._mod_file_cache = {} - self._failed_import_hooks = [] - self.always_load_extensions = False - self.optimize_ast = False - self.extension_package_whitelist = set() - self._transform = transforms.TransformVisitor() - - # Export these APIs for convenience - self.register_transform = self._transform.register_transform - self.unregister_transform = self._transform.unregister_transform - self.max_inferable_values = 100 - - @property - def builtins_module(self): - return self.astroid_cache["builtins"] - - def visit_transforms(self, node): - """Visit the transforms and apply them to the given *node*.""" - return self._transform.visit(node) - - def ast_from_file(self, filepath, modname=None, fallback=True, source=False): - """given a module name, return the astroid object""" - try: - filepath = modutils.get_source_file(filepath, include_no_ext=True) - source = True - except modutils.NoSourceFile: - pass - if modname is None: - try: - modname = ".".join(modutils.modpath_from_file(filepath)) - except ImportError: - modname = filepath - if ( - modname in self.astroid_cache - and self.astroid_cache[modname].file == filepath - ): - return self.astroid_cache[modname] - if source: - # pylint: disable=import-outside-toplevel; circular import - from astroid.builder import AstroidBuilder - - return AstroidBuilder(self).file_build(filepath, modname) - if fallback and modname: - return self.ast_from_module_name(modname) - raise exceptions.AstroidBuildingError( - "Unable to build an AST for {path}.", path=filepath - ) - - def _build_stub_module(self, modname): - # pylint: disable=import-outside-toplevel; circular import - from astroid.builder import AstroidBuilder - - return AstroidBuilder(self).string_build("", modname) - - def _build_namespace_module(self, modname, path): - # pylint: disable=import-outside-toplevel; circular import - from astroid.builder import build_namespace_package_module - - return build_namespace_package_module(modname, path) - - def _can_load_extension(self, modname): - if self.always_load_extensions: - return True - if modutils.is_standard_module(modname): - return True - parts = modname.split(".") - return any( - ".".join(parts[:x]) in self.extension_package_whitelist - for x in range(1, len(parts) + 1) - ) - - def ast_from_module_name(self, modname, context_file=None): - """given a module name, return the astroid object""" - if modname in self.astroid_cache: - return self.astroid_cache[modname] - if modname == "__main__": - return self._build_stub_module(modname) - old_cwd = os.getcwd() - if context_file: - os.chdir(os.path.dirname(context_file)) - try: - found_spec = self.file_from_module_name(modname, context_file) - if found_spec.type == spec.ModuleType.PY_ZIPMODULE: - module = self.zip_import_data(found_spec.location) - if module is not None: - return module - - elif found_spec.type in ( - spec.ModuleType.C_BUILTIN, - spec.ModuleType.C_EXTENSION, - ): - if ( - found_spec.type == spec.ModuleType.C_EXTENSION - and not self._can_load_extension(modname) - ): - return self._build_stub_module(modname) - try: - module = modutils.load_module_from_name(modname) - except Exception as ex: - raise exceptions.AstroidImportError( - "Loading {modname} failed with:\n{error}", - modname=modname, - path=found_spec.location, - ) from ex - return self.ast_from_module(module, modname) - - elif found_spec.type == spec.ModuleType.PY_COMPILED: - raise exceptions.AstroidImportError( - "Unable to load compiled module {modname}.", - modname=modname, - path=found_spec.location, - ) - - elif found_spec.type == spec.ModuleType.PY_NAMESPACE: - return self._build_namespace_module( - modname, found_spec.submodule_search_locations - ) - - if found_spec.location is None: - raise exceptions.AstroidImportError( - "Can't find a file for module {modname}.", modname=modname - ) - - return self.ast_from_file(found_spec.location, modname, fallback=False) - except exceptions.AstroidBuildingError as e: - for hook in self._failed_import_hooks: - try: - return hook(modname) - except exceptions.AstroidBuildingError: - pass - raise e - finally: - os.chdir(old_cwd) - - def zip_import_data(self, filepath): - if zipimport is None: - return None - - # pylint: disable=import-outside-toplevel; circular import - from astroid.builder import AstroidBuilder - - builder = AstroidBuilder(self) - for ext in ZIP_IMPORT_EXTS: - try: - eggpath, resource = filepath.rsplit(ext + os.path.sep, 1) - except ValueError: - continue - try: - importer = zipimport.zipimporter(eggpath + ext) - zmodname = resource.replace(os.path.sep, ".") - if importer.is_package(resource): - zmodname = zmodname + ".__init__" - module = builder.string_build( - importer.get_source(resource), zmodname, filepath - ) - return module - except Exception: # pylint: disable=broad-except - continue - return None - - def file_from_module_name(self, modname, contextfile): - try: - value = self._mod_file_cache[(modname, contextfile)] - except KeyError: - try: - value = modutils.file_info_from_modpath( - modname.split("."), context_file=contextfile - ) - except ImportError as ex: - value = exceptions.AstroidImportError( - "Failed to import module {modname} with error:\n{error}.", - modname=modname, - error=ex, - ) - self._mod_file_cache[(modname, contextfile)] = value - if isinstance(value, exceptions.AstroidBuildingError): - raise value - return value - - def ast_from_module(self, module, modname=None): - """given an imported module, return the astroid object""" - modname = modname or module.__name__ - if modname in self.astroid_cache: - return self.astroid_cache[modname] - try: - # some builtin modules don't have __file__ attribute - filepath = module.__file__ - if modutils.is_python_source(filepath): - return self.ast_from_file(filepath, modname) - except AttributeError: - pass - - # pylint: disable=import-outside-toplevel; circular import - from astroid.builder import AstroidBuilder - - return AstroidBuilder(self).module_build(module, modname) - - def ast_from_class(self, klass, modname=None): - """get astroid for the given class""" - if modname is None: - try: - modname = klass.__module__ - except AttributeError as exc: - raise exceptions.AstroidBuildingError( - "Unable to get module for class {class_name}.", - cls=klass, - class_repr=safe_repr(klass), - modname=modname, - ) from exc - modastroid = self.ast_from_module_name(modname) - return modastroid.getattr(klass.__name__)[0] # XXX - - def infer_ast_from_something(self, obj, context=None): - """infer astroid for the given class""" - if hasattr(obj, "__class__") and not isinstance(obj, type): - klass = obj.__class__ - else: - klass = obj - try: - modname = klass.__module__ - except AttributeError as exc: - raise exceptions.AstroidBuildingError( - "Unable to get module for {class_repr}.", - cls=klass, - class_repr=safe_repr(klass), - ) from exc - except Exception as exc: - raise exceptions.AstroidImportError( - "Unexpected error while retrieving module for {class_repr}:\n" - "{error}", - cls=klass, - class_repr=safe_repr(klass), - ) from exc - try: - name = klass.__name__ - except AttributeError as exc: - raise exceptions.AstroidBuildingError( - "Unable to get name for {class_repr}:\n", - cls=klass, - class_repr=safe_repr(klass), - ) from exc - except Exception as exc: - raise exceptions.AstroidImportError( - "Unexpected error while retrieving name for {class_repr}:\n" "{error}", - cls=klass, - class_repr=safe_repr(klass), - ) from exc - # take care, on living object __module__ is regularly wrong :( - modastroid = self.ast_from_module_name(modname) - if klass is obj: - for inferred in modastroid.igetattr(name, context): - yield inferred - else: - for inferred in modastroid.igetattr(name, context): - yield inferred.instantiate_class() - - def register_failed_import_hook(self, hook): - """Registers a hook to resolve imports that cannot be found otherwise. - - `hook` must be a function that accepts a single argument `modname` which - contains the name of the module or package that could not be imported. - If `hook` can resolve the import, must return a node of type `astroid.Module`, - otherwise, it must raise `AstroidBuildingError`. - """ - self._failed_import_hooks.append(hook) - - def cache_module(self, module): - """Cache a module if no module with the same name is known yet.""" - self.astroid_cache.setdefault(module.name, module) - - def bootstrap(self): - """Bootstrap the required AST modules needed for the manager to work - - The bootstrap usually involves building the AST for the builtins - module, which is required by the rest of astroid to work correctly. - """ - from astroid import raw_building # pylint: disable=import-outside-toplevel - - raw_building._astroid_bootstrapping() - - def clear_cache(self): - """Clear the underlying cache. Also bootstraps the builtins module.""" - self.astroid_cache.clear() - self.bootstrap() diff --git a/venv/lib/python3.8/site-packages/astroid/mixins.py b/venv/lib/python3.8/site-packages/astroid/mixins.py deleted file mode 100644 index 497a8400..00000000 --- a/venv/lib/python3.8/site-packages/astroid/mixins.py +++ /dev/null @@ -1,160 +0,0 @@ -# Copyright (c) 2010-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2014-2016, 2018 Claudiu Popa -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2014 Eevee (Alex Munroe) -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 Florian Bruhin -# Copyright (c) 2016 Jakub Wilk -# Copyright (c) 2018 Nick Drozd - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""This module contains some mixins for the different nodes. -""" -import itertools - -from astroid import decorators -from astroid import exceptions - - -class BlockRangeMixIn: - """override block range """ - - @decorators.cachedproperty - def blockstart_tolineno(self): - return self.lineno - - def _elsed_block_range(self, lineno, orelse, last=None): - """handle block line numbers range for try/finally, for, if and while - statements - """ - if lineno == self.fromlineno: - return lineno, lineno - if orelse: - if lineno >= orelse[0].fromlineno: - return lineno, orelse[-1].tolineno - return lineno, orelse[0].fromlineno - 1 - return lineno, last or self.tolineno - - -class FilterStmtsMixin: - """Mixin for statement filtering and assignment type""" - - def _get_filtered_stmts(self, _, node, _stmts, mystmt): - """method used in _filter_stmts to get statements and trigger break""" - if self.statement() is mystmt: - # original node's statement is the assignment, only keep - # current node (gen exp, list comp) - return [node], True - return _stmts, False - - def assign_type(self): - return self - - -class AssignTypeMixin: - def assign_type(self): - return self - - def _get_filtered_stmts(self, lookup_node, node, _stmts, mystmt): - """method used in filter_stmts""" - if self is mystmt: - return _stmts, True - if self.statement() is mystmt: - # original node's statement is the assignment, only keep - # current node (gen exp, list comp) - return [node], True - return _stmts, False - - -class ParentAssignTypeMixin(AssignTypeMixin): - def assign_type(self): - return self.parent.assign_type() - - -class ImportFromMixin(FilterStmtsMixin): - """MixIn for From and Import Nodes""" - - def _infer_name(self, frame, name): - return name - - def do_import_module(self, modname=None): - """return the ast for a module whose name is imported by - """ - # handle special case where we are on a package node importing a module - # using the same name as the package, which may end in an infinite loop - # on relative imports - # XXX: no more needed ? - mymodule = self.root() - level = getattr(self, "level", None) # Import as no level - if modname is None: - modname = self.modname - # XXX we should investigate deeper if we really want to check - # importing itself: modname and mymodule.name be relative or absolute - if mymodule.relative_to_absolute_name(modname, level) == mymodule.name: - # FIXME: we used to raise InferenceError here, but why ? - return mymodule - - return mymodule.import_module( - modname, level=level, relative_only=level and level >= 1 - ) - - def real_name(self, asname): - """get name from 'as' name""" - for name, _asname in self.names: - if name == "*": - return asname - if not _asname: - name = name.split(".", 1)[0] - _asname = name - if asname == _asname: - return name - raise exceptions.AttributeInferenceError( - "Could not find original name for {attribute} in {target!r}", - target=self, - attribute=asname, - ) - - -class MultiLineBlockMixin: - """Mixin for nodes with multi-line blocks, e.g. For and FunctionDef. - Note that this does not apply to every node with a `body` field. - For instance, an If node has a multi-line body, but the body of an - IfExpr is not multi-line, and hence cannot contain Return nodes, - Assign nodes, etc. - """ - - @decorators.cachedproperty - def _multi_line_blocks(self): - return tuple(getattr(self, field) for field in self._multi_line_block_fields) - - def _get_return_nodes_skip_functions(self): - for block in self._multi_line_blocks: - for child_node in block: - if child_node.is_function: - continue - yield from child_node._get_return_nodes_skip_functions() - - def _get_yield_nodes_skip_lambdas(self): - for block in self._multi_line_blocks: - for child_node in block: - if child_node.is_lambda: - continue - yield from child_node._get_yield_nodes_skip_lambdas() - - @decorators.cached - def _get_assign_nodes(self): - children_assign_nodes = ( - child_node._get_assign_nodes() - for block in self._multi_line_blocks - for child_node in block - ) - return list(itertools.chain.from_iterable(children_assign_nodes)) - - -class NoChildrenMixin: - """Mixin for nodes with no children, e.g. Pass.""" - - def get_children(self): - yield from () diff --git a/venv/lib/python3.8/site-packages/astroid/modutils.py b/venv/lib/python3.8/site-packages/astroid/modutils.py deleted file mode 100644 index 0c009b13..00000000 --- a/venv/lib/python3.8/site-packages/astroid/modutils.py +++ /dev/null @@ -1,698 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2014 Denis Laxalde -# Copyright (c) 2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2014 Eevee (Alex Munroe) -# Copyright (c) 2015 Florian Bruhin -# Copyright (c) 2015 Radosław Ganczarek -# Copyright (c) 2016 Derek Gustafson -# Copyright (c) 2016 Jakub Wilk -# Copyright (c) 2016 Ceridwen -# Copyright (c) 2018 Mario Corchero -# Copyright (c) 2018 Mario Corchero -# Copyright (c) 2018 Anthony Sottile - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Python modules manipulation utility functions. - -:type PY_SOURCE_EXTS: tuple(str) -:var PY_SOURCE_EXTS: list of possible python source file extension - -:type STD_LIB_DIRS: set of str -:var STD_LIB_DIRS: directories where standard modules are located - -:type BUILTIN_MODULES: dict -:var BUILTIN_MODULES: dictionary with builtin module names has key -""" -import imp -import os -import platform -import sys -import itertools -from distutils.sysconfig import get_python_lib # pylint: disable=import-error - -# pylint: disable=import-error, no-name-in-module -from distutils.errors import DistutilsPlatformError - -# distutils is replaced by virtualenv with a module that does -# weird path manipulations in order to get to the -# real distutils module. - -from .interpreter._import import spec -from .interpreter._import import util - -if sys.platform.startswith("win"): - PY_SOURCE_EXTS = ("py", "pyw") - PY_COMPILED_EXTS = ("dll", "pyd") -else: - PY_SOURCE_EXTS = ("py",) - PY_COMPILED_EXTS = ("so",) - - -try: - # The explicit sys.prefix is to work around a patch in virtualenv that - # replaces the 'real' sys.prefix (i.e. the location of the binary) - # with the prefix from which the virtualenv was created. This throws - # off the detection logic for standard library modules, thus the - # workaround. - STD_LIB_DIRS = { - get_python_lib(standard_lib=True, prefix=sys.prefix), - # Take care of installations where exec_prefix != prefix. - get_python_lib(standard_lib=True, prefix=sys.exec_prefix), - get_python_lib(standard_lib=True), - } -# get_python_lib(standard_lib=1) is not available on pypy, set STD_LIB_DIR to -# non-valid path, see https://bugs.pypy.org/issue1164 -except DistutilsPlatformError: - STD_LIB_DIRS = set() - -if os.name == "nt": - STD_LIB_DIRS.add(os.path.join(sys.prefix, "dlls")) - try: - # real_prefix is defined when running inside virtual environments, - # created with the **virtualenv** library. - STD_LIB_DIRS.add(os.path.join(sys.real_prefix, "dlls")) - except AttributeError: - # sys.base_exec_prefix is always defined, but in a virtual environment - # created with the stdlib **venv** module, it points to the original - # installation, if the virtual env is activated. - try: - STD_LIB_DIRS.add(os.path.join(sys.base_exec_prefix, "dlls")) - except AttributeError: - pass - -if platform.python_implementation() == "PyPy": - _root = os.path.join(sys.prefix, "lib_pypy") - STD_LIB_DIRS.add(_root) - try: - # real_prefix is defined when running inside virtualenv. - STD_LIB_DIRS.add(os.path.join(sys.real_prefix, "lib_pypy")) - except AttributeError: - pass - del _root -if os.name == "posix": - # Need the real prefix is we're under a virtualenv, otherwise - # the usual one will do. - try: - prefix = sys.real_prefix - except AttributeError: - prefix = sys.prefix - - def _posix_path(path): - base_python = "python%d.%d" % sys.version_info[:2] - return os.path.join(prefix, path, base_python) - - STD_LIB_DIRS.add(_posix_path("lib")) - if sys.maxsize > 2 ** 32: - # This tries to fix a problem with /usr/lib64 builds, - # where systems are running both 32-bit and 64-bit code - # on the same machine, which reflects into the places where - # standard library could be found. More details can be found - # here http://bugs.python.org/issue1294959. - # An easy reproducing case would be - # https://github.com/PyCQA/pylint/issues/712#issuecomment-163178753 - STD_LIB_DIRS.add(_posix_path("lib64")) - -EXT_LIB_DIRS = {get_python_lib(), get_python_lib(True)} -IS_JYTHON = platform.python_implementation() == "Jython" -BUILTIN_MODULES = dict.fromkeys(sys.builtin_module_names, True) - - -class NoSourceFile(Exception): - """exception raised when we are not able to get a python - source file for a precompiled file - """ - - -def _normalize_path(path): - return os.path.normcase(os.path.abspath(path)) - - -def _canonicalize_path(path): - return os.path.realpath(os.path.expanduser(path)) - - -def _path_from_filename(filename, is_jython=IS_JYTHON): - if not is_jython: - return filename - head, has_pyclass, _ = filename.partition("$py.class") - if has_pyclass: - return head + ".py" - return filename - - -def _handle_blacklist(blacklist, dirnames, filenames): - """remove files/directories in the black list - - dirnames/filenames are usually from os.walk - """ - for norecurs in blacklist: - if norecurs in dirnames: - dirnames.remove(norecurs) - elif norecurs in filenames: - filenames.remove(norecurs) - - -_NORM_PATH_CACHE = {} - - -def _cache_normalize_path(path): - """abspath with caching""" - # _module_file calls abspath on every path in sys.path every time it's - # called; on a larger codebase this easily adds up to half a second just - # assembling path components. This cache alleviates that. - try: - return _NORM_PATH_CACHE[path] - except KeyError: - if not path: # don't cache result for '' - return _normalize_path(path) - result = _NORM_PATH_CACHE[path] = _normalize_path(path) - return result - - -def load_module_from_name(dotted_name, path=None, use_sys=True): - """Load a Python module from its name. - - :type dotted_name: str - :param dotted_name: python name of a module or package - - :type path: list or None - :param path: - optional list of path where the module or package should be - searched (use sys.path if nothing or None is given) - - :type use_sys: bool - :param use_sys: - boolean indicating whether the sys.modules dictionary should be - used or not - - - :raise ImportError: if the module or package is not found - - :rtype: module - :return: the loaded module - """ - return load_module_from_modpath(dotted_name.split("."), path, use_sys) - - -def load_module_from_modpath(parts, path=None, use_sys=1): - """Load a python module from its split name. - - :type parts: list(str) or tuple(str) - :param parts: - python name of a module or package split on '.' - - :type path: list or None - :param path: - optional list of path where the module or package should be - searched (use sys.path if nothing or None is given) - - :type use_sys: bool - :param use_sys: - boolean indicating whether the sys.modules dictionary should be used or not - - :raise ImportError: if the module or package is not found - - :rtype: module - :return: the loaded module - """ - if use_sys: - try: - return sys.modules[".".join(parts)] - except KeyError: - pass - modpath = [] - prevmodule = None - for part in parts: - modpath.append(part) - curname = ".".join(modpath) - module = None - if len(modpath) != len(parts): - # even with use_sys=False, should try to get outer packages from sys.modules - module = sys.modules.get(curname) - elif use_sys: - # because it may have been indirectly loaded through a parent - module = sys.modules.get(curname) - if module is None: - mp_file, mp_filename, mp_desc = imp.find_module(part, path) - module = imp.load_module(curname, mp_file, mp_filename, mp_desc) - # mp_file still needs to be closed. - if mp_file: - mp_file.close() - if prevmodule: - setattr(prevmodule, part, module) - _file = getattr(module, "__file__", "") - prevmodule = module - if not _file and util.is_namespace(curname): - continue - if not _file and len(modpath) != len(parts): - raise ImportError("no module in %s" % ".".join(parts[len(modpath) :])) - path = [os.path.dirname(_file)] - return module - - -def load_module_from_file(filepath, path=None, use_sys=True, extrapath=None): - """Load a Python module from it's path. - - :type filepath: str - :param filepath: path to the python module or package - - :type path: list or None - :param path: - optional list of path where the module or package should be - searched (use sys.path if nothing or None is given) - - :type use_sys: bool - :param use_sys: - boolean indicating whether the sys.modules dictionary should be - used or not - - - :raise ImportError: if the module or package is not found - - :rtype: module - :return: the loaded module - """ - modpath = modpath_from_file(filepath, extrapath) - return load_module_from_modpath(modpath, path, use_sys) - - -def check_modpath_has_init(path, mod_path): - """check there are some __init__.py all along the way""" - modpath = [] - for part in mod_path: - modpath.append(part) - path = os.path.join(path, part) - if not _has_init(path): - old_namespace = util.is_namespace(".".join(modpath)) - if not old_namespace: - return False - return True - - -def _get_relative_base_path(filename, path_to_check): - """Extracts the relative mod path of the file to import from - - Check if a file is within the passed in path and if so, returns the - relative mod path from the one passed in. - - If the filename is no in path_to_check, returns None - - Note this function will look for both abs and realpath of the file, - this allows to find the relative base path even if the file is a - symlink of a file in the passed in path - - Examples: - _get_relative_base_path("/a/b/c/d.py", "/a/b") -> ["c","d"] - _get_relative_base_path("/a/b/c/d.py", "/dev") -> None - """ - importable_path = None - path_to_check = os.path.normcase(path_to_check) - abs_filename = os.path.abspath(filename) - if os.path.normcase(abs_filename).startswith(path_to_check): - importable_path = abs_filename - - real_filename = os.path.realpath(filename) - if os.path.normcase(real_filename).startswith(path_to_check): - importable_path = real_filename - - if importable_path: - base_path = os.path.splitext(importable_path)[0] - relative_base_path = base_path[len(path_to_check) :] - return [pkg for pkg in relative_base_path.split(os.sep) if pkg] - - return None - - -def modpath_from_file_with_callback(filename, extrapath=None, is_package_cb=None): - filename = os.path.expanduser(_path_from_filename(filename)) - - if extrapath is not None: - for path_ in itertools.chain(map(_canonicalize_path, extrapath), extrapath): - path = os.path.abspath(path_) - if not path: - continue - submodpath = _get_relative_base_path(filename, path) - if not submodpath: - continue - if is_package_cb(path, submodpath[:-1]): - return extrapath[path_].split(".") + submodpath - - for path in itertools.chain(map(_canonicalize_path, sys.path), sys.path): - path = _cache_normalize_path(path) - if not path: - continue - modpath = _get_relative_base_path(filename, path) - if not modpath: - continue - if is_package_cb(path, modpath[:-1]): - return modpath - - raise ImportError( - "Unable to find module for %s in %s" % (filename, ", \n".join(sys.path)) - ) - - -def modpath_from_file(filename, extrapath=None): - """given a file path return the corresponding split module's name - (i.e name of a module or package split on '.') - - :type filename: str - :param filename: file's path for which we want the module's name - - :type extrapath: dict - :param extrapath: - optional extra search path, with path as key and package name for the path - as value. This is usually useful to handle package split in multiple - directories using __path__ trick. - - - :raise ImportError: - if the corresponding module's name has not been found - - :rtype: list(str) - :return: the corresponding split module's name - """ - return modpath_from_file_with_callback(filename, extrapath, check_modpath_has_init) - - -def file_from_modpath(modpath, path=None, context_file=None): - return file_info_from_modpath(modpath, path, context_file).location - - -def file_info_from_modpath(modpath, path=None, context_file=None): - """given a mod path (i.e. split module / package name), return the - corresponding file, giving priority to source file over precompiled - file if it exists - - :type modpath: list or tuple - :param modpath: - split module's name (i.e name of a module or package split - on '.') - (this means explicit relative imports that start with dots have - empty strings in this list!) - - :type path: list or None - :param path: - optional list of path where the module or package should be - searched (use sys.path if nothing or None is given) - - :type context_file: str or None - :param context_file: - context file to consider, necessary if the identifier has been - introduced using a relative import unresolvable in the actual - context (i.e. modutils) - - :raise ImportError: if there is no such module in the directory - - :rtype: (str or None, import type) - :return: - the path to the module's file or None if it's an integrated - builtin module such as 'sys' - """ - if context_file is not None: - context = os.path.dirname(context_file) - else: - context = context_file - if modpath[0] == "xml": - # handle _xmlplus - try: - return _spec_from_modpath(["_xmlplus"] + modpath[1:], path, context) - except ImportError: - return _spec_from_modpath(modpath, path, context) - elif modpath == ["os", "path"]: - # FIXME: currently ignoring search_path... - return spec.ModuleSpec( - name="os.path", location=os.path.__file__, module_type=imp.PY_SOURCE - ) - return _spec_from_modpath(modpath, path, context) - - -def get_module_part(dotted_name, context_file=None): - """given a dotted name return the module part of the name : - - >>> get_module_part('astroid.as_string.dump') - 'astroid.as_string' - - :type dotted_name: str - :param dotted_name: full name of the identifier we are interested in - - :type context_file: str or None - :param context_file: - context file to consider, necessary if the identifier has been - introduced using a relative import unresolvable in the actual - context (i.e. modutils) - - - :raise ImportError: if there is no such module in the directory - - :rtype: str or None - :return: - the module part of the name or None if we have not been able at - all to import the given name - - XXX: deprecated, since it doesn't handle package precedence over module - (see #10066) - """ - # os.path trick - if dotted_name.startswith("os.path"): - return "os.path" - parts = dotted_name.split(".") - if context_file is not None: - # first check for builtin module which won't be considered latter - # in that case (path != None) - if parts[0] in BUILTIN_MODULES: - if len(parts) > 2: - raise ImportError(dotted_name) - return parts[0] - # don't use += or insert, we want a new list to be created ! - path = None - starti = 0 - if parts[0] == "": - assert ( - context_file is not None - ), "explicit relative import, but no context_file?" - path = [] # prevent resolving the import non-relatively - starti = 1 - while parts[starti] == "": # for all further dots: change context - starti += 1 - context_file = os.path.dirname(context_file) - for i in range(starti, len(parts)): - try: - file_from_modpath( - parts[starti : i + 1], path=path, context_file=context_file - ) - except ImportError: - if i < max(1, len(parts) - 2): - raise - return ".".join(parts[:i]) - return dotted_name - - -def get_module_files(src_directory, blacklist, list_all=False): - """given a package directory return a list of all available python - module's files in the package and its subpackages - - :type src_directory: str - :param src_directory: - path of the directory corresponding to the package - - :type blacklist: list or tuple - :param blacklist: iterable - list of files or directories to ignore. - - :type list_all: bool - :param list_all: - get files from all paths, including ones without __init__.py - - :rtype: list - :return: - the list of all available python module's files in the package and - its subpackages - """ - files = [] - for directory, dirnames, filenames in os.walk(src_directory): - if directory in blacklist: - continue - _handle_blacklist(blacklist, dirnames, filenames) - # check for __init__.py - if not list_all and "__init__.py" not in filenames: - dirnames[:] = () - continue - for filename in filenames: - if _is_python_file(filename): - src = os.path.join(directory, filename) - files.append(src) - return files - - -def get_source_file(filename, include_no_ext=False): - """given a python module's file name return the matching source file - name (the filename will be returned identically if it's already an - absolute path to a python source file...) - - :type filename: str - :param filename: python module's file name - - - :raise NoSourceFile: if no source file exists on the file system - - :rtype: str - :return: the absolute path of the source file if it exists - """ - filename = os.path.abspath(_path_from_filename(filename)) - base, orig_ext = os.path.splitext(filename) - for ext in PY_SOURCE_EXTS: - source_path = "%s.%s" % (base, ext) - if os.path.exists(source_path): - return source_path - if include_no_ext and not orig_ext and os.path.exists(base): - return base - raise NoSourceFile(filename) - - -def is_python_source(filename): - """ - rtype: bool - return: True if the filename is a python source file - """ - return os.path.splitext(filename)[1][1:] in PY_SOURCE_EXTS - - -def is_standard_module(modname, std_path=None): - """try to guess if a module is a standard python module (by default, - see `std_path` parameter's description) - - :type modname: str - :param modname: name of the module we are interested in - - :type std_path: list(str) or tuple(str) - :param std_path: list of path considered has standard - - - :rtype: bool - :return: - true if the module: - - is located on the path listed in one of the directory in `std_path` - - is a built-in module - """ - modname = modname.split(".")[0] - try: - filename = file_from_modpath([modname]) - except ImportError: - # import failed, i'm probably not so wrong by supposing it's - # not standard... - return False - # modules which are not living in a file are considered standard - # (sys and __builtin__ for instance) - if filename is None: - # we assume there are no namespaces in stdlib - return not util.is_namespace(modname) - filename = _normalize_path(filename) - for path in EXT_LIB_DIRS: - if filename.startswith(_cache_normalize_path(path)): - return False - if std_path is None: - std_path = STD_LIB_DIRS - for path in std_path: - if filename.startswith(_cache_normalize_path(path)): - return True - return False - - -def is_relative(modname, from_file): - """return true if the given module name is relative to the given - file name - - :type modname: str - :param modname: name of the module we are interested in - - :type from_file: str - :param from_file: - path of the module from which modname has been imported - - :rtype: bool - :return: - true if the module has been imported relatively to `from_file` - """ - if not os.path.isdir(from_file): - from_file = os.path.dirname(from_file) - if from_file in sys.path: - return False - try: - stream, _, _ = imp.find_module(modname.split(".")[0], [from_file]) - - # Close the stream to avoid ResourceWarnings. - if stream: - stream.close() - return True - except ImportError: - return False - - -# internal only functions ##################################################### - - -def _spec_from_modpath(modpath, path=None, context=None): - """given a mod path (i.e. split module / package name), return the - corresponding spec - - this function is used internally, see `file_from_modpath`'s - documentation for more information - """ - assert modpath - location = None - if context is not None: - try: - found_spec = spec.find_spec(modpath, [context]) - location = found_spec.location - except ImportError: - found_spec = spec.find_spec(modpath, path) - location = found_spec.location - else: - found_spec = spec.find_spec(modpath, path) - if found_spec.type == spec.ModuleType.PY_COMPILED: - try: - location = get_source_file(found_spec.location) - return found_spec._replace( - location=location, type=spec.ModuleType.PY_SOURCE - ) - except NoSourceFile: - return found_spec._replace(location=location) - elif found_spec.type == spec.ModuleType.C_BUILTIN: - # integrated builtin module - return found_spec._replace(location=None) - elif found_spec.type == spec.ModuleType.PKG_DIRECTORY: - location = _has_init(found_spec.location) - return found_spec._replace(location=location, type=spec.ModuleType.PY_SOURCE) - return found_spec - - -def _is_python_file(filename): - """return true if the given filename should be considered as a python file - - .pyc and .pyo are ignored - """ - return filename.endswith((".py", ".so", ".pyd", ".pyw")) - - -def _has_init(directory): - """if the given directory has a valid __init__ file, return its path, - else return None - """ - mod_or_pack = os.path.join(directory, "__init__") - for ext in PY_SOURCE_EXTS + ("pyc", "pyo"): - if os.path.exists(mod_or_pack + "." + ext): - return mod_or_pack + "." + ext - return None - - -def is_namespace(specobj): - return specobj.type == spec.ModuleType.PY_NAMESPACE - - -def is_directory(specobj): - return specobj.type == spec.ModuleType.PKG_DIRECTORY diff --git a/venv/lib/python3.8/site-packages/astroid/node_classes.py b/venv/lib/python3.8/site-packages/astroid/node_classes.py deleted file mode 100644 index 994c96bd..00000000 --- a/venv/lib/python3.8/site-packages/astroid/node_classes.py +++ /dev/null @@ -1,4775 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2009-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2010 Daniel Harding -# Copyright (c) 2012 FELD Boris -# Copyright (c) 2013-2014 Google, Inc. -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014 Eevee (Alex Munroe) -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 Florian Bruhin -# Copyright (c) 2016-2017 Derek Gustafson -# Copyright (c) 2016 Jared Garst -# Copyright (c) 2016 Jakub Wilk -# Copyright (c) 2016 Dave Baum -# Copyright (c) 2017-2018 Ashley Whetter -# Copyright (c) 2017 Łukasz Rogalski -# Copyright (c) 2017 rr- -# Copyright (c) 2018 Bryce Guinta -# Copyright (c) 2018 brendanator -# Copyright (c) 2018 Nick Drozd -# Copyright (c) 2018 HoverHell - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -# pylint: disable=too-many-lines; https://github.com/PyCQA/astroid/issues/465 - -"""Module for some node classes. More nodes in scoped_nodes.py -""" - -import abc -import builtins as builtins_mod -import itertools -import pprint -import sys -from functools import lru_cache, singledispatch as _singledispatch - -from astroid import as_string -from astroid import bases -from astroid import context as contextmod -from astroid import decorators -from astroid import exceptions -from astroid import manager -from astroid import mixins -from astroid import util - - -BUILTINS = builtins_mod.__name__ -MANAGER = manager.AstroidManager() -PY38 = sys.version_info[:2] >= (3, 8) - - -def _is_const(value): - return isinstance(value, tuple(CONST_CLS)) - - -@decorators.raise_if_nothing_inferred -def unpack_infer(stmt, context=None): - """recursively generate nodes inferred by the given statement. - If the inferred value is a list or a tuple, recurse on the elements - """ - if isinstance(stmt, (List, Tuple)): - for elt in stmt.elts: - if elt is util.Uninferable: - yield elt - continue - yield from unpack_infer(elt, context) - return dict(node=stmt, context=context) - # if inferred is a final node, return it and stop - inferred = next(stmt.infer(context)) - if inferred is stmt: - yield inferred - return dict(node=stmt, context=context) - # else, infer recursively, except Uninferable object that should be returned as is - for inferred in stmt.infer(context): - if inferred is util.Uninferable: - yield inferred - else: - yield from unpack_infer(inferred, context) - - return dict(node=stmt, context=context) - - -def are_exclusive( - stmt1, stmt2, exceptions=None -): # pylint: disable=redefined-outer-name - """return true if the two given statements are mutually exclusive - - `exceptions` may be a list of exception names. If specified, discard If - branches and check one of the statement is in an exception handler catching - one of the given exceptions. - - algorithm : - 1) index stmt1's parents - 2) climb among stmt2's parents until we find a common parent - 3) if the common parent is a If or TryExcept statement, look if nodes are - in exclusive branches - """ - # index stmt1's parents - stmt1_parents = {} - children = {} - node = stmt1.parent - previous = stmt1 - while node: - stmt1_parents[node] = 1 - children[node] = previous - previous = node - node = node.parent - # climb among stmt2's parents until we find a common parent - node = stmt2.parent - previous = stmt2 - while node: - if node in stmt1_parents: - # if the common parent is a If or TryExcept statement, look if - # nodes are in exclusive branches - if isinstance(node, If) and exceptions is None: - if ( - node.locate_child(previous)[1] - is not node.locate_child(children[node])[1] - ): - return True - elif isinstance(node, TryExcept): - c2attr, c2node = node.locate_child(previous) - c1attr, c1node = node.locate_child(children[node]) - if c1node is not c2node: - first_in_body_caught_by_handlers = ( - c2attr == "handlers" - and c1attr == "body" - and previous.catch(exceptions) - ) - second_in_body_caught_by_handlers = ( - c2attr == "body" - and c1attr == "handlers" - and children[node].catch(exceptions) - ) - first_in_else_other_in_handlers = ( - c2attr == "handlers" and c1attr == "orelse" - ) - second_in_else_other_in_handlers = ( - c2attr == "orelse" and c1attr == "handlers" - ) - if any( - ( - first_in_body_caught_by_handlers, - second_in_body_caught_by_handlers, - first_in_else_other_in_handlers, - second_in_else_other_in_handlers, - ) - ): - return True - elif c2attr == "handlers" and c1attr == "handlers": - return previous is not children[node] - return False - previous = node - node = node.parent - return False - - -# getitem() helpers. - -_SLICE_SENTINEL = object() - - -def _slice_value(index, context=None): - """Get the value of the given slice index.""" - - if isinstance(index, Const): - if isinstance(index.value, (int, type(None))): - return index.value - elif index is None: - return None - else: - # Try to infer what the index actually is. - # Since we can't return all the possible values, - # we'll stop at the first possible value. - try: - inferred = next(index.infer(context=context)) - except exceptions.InferenceError: - pass - else: - if isinstance(inferred, Const): - if isinstance(inferred.value, (int, type(None))): - return inferred.value - - # Use a sentinel, because None can be a valid - # value that this function can return, - # as it is the case for unspecified bounds. - return _SLICE_SENTINEL - - -def _infer_slice(node, context=None): - lower = _slice_value(node.lower, context) - upper = _slice_value(node.upper, context) - step = _slice_value(node.step, context) - if all(elem is not _SLICE_SENTINEL for elem in (lower, upper, step)): - return slice(lower, upper, step) - - raise exceptions.AstroidTypeError( - message="Could not infer slice used in subscript", - node=node, - index=node.parent, - context=context, - ) - - -def _container_getitem(instance, elts, index, context=None): - """Get a slice or an item, using the given *index*, for the given sequence.""" - try: - if isinstance(index, Slice): - index_slice = _infer_slice(index, context=context) - new_cls = instance.__class__() - new_cls.elts = elts[index_slice] - new_cls.parent = instance.parent - return new_cls - if isinstance(index, Const): - return elts[index.value] - except IndexError as exc: - raise exceptions.AstroidIndexError( - message="Index {index!s} out of range", - node=instance, - index=index, - context=context, - ) from exc - except TypeError as exc: - raise exceptions.AstroidTypeError( - message="Type error {error!r}", node=instance, index=index, context=context - ) from exc - - raise exceptions.AstroidTypeError("Could not use %s as subscript index" % index) - - -OP_PRECEDENCE = { - op: precedence - for precedence, ops in enumerate( - [ - ["Lambda"], # lambda x: x + 1 - ["IfExp"], # 1 if True else 2 - ["or"], - ["and"], - ["not"], - ["Compare"], # in, not in, is, is not, <, <=, >, >=, !=, == - ["|"], - ["^"], - ["&"], - ["<<", ">>"], - ["+", "-"], - ["*", "@", "/", "//", "%"], - ["UnaryOp"], # +, -, ~ - ["**"], - ["Await"], - ] - ) - for op in ops -} - - -class NodeNG: - """ A node of the new Abstract Syntax Tree (AST). - - This is the base class for all Astroid node classes. - """ - - is_statement = False - """Whether this node indicates a statement. - - :type: bool - """ - optional_assign = False # True for For (and for Comprehension if py <3.0) - """Whether this node optionally assigns a variable. - - This is for loop assignments because loop won't necessarily perform an - assignment if the loop has no iterations. - This is also the case from comprehensions in Python 2. - - :type: bool - """ - is_function = False # True for FunctionDef nodes - """Whether this node indicates a function. - - :type: bool - """ - is_lambda = False - # Attributes below are set by the builder module or by raw factories - lineno = None - """The line that this node appears on in the source code. - - :type: int or None - """ - col_offset = None - """The column that this node appears on in the source code. - - :type: int or None - """ - parent = None - """The parent node in the syntax tree. - - :type: NodeNG or None - """ - _astroid_fields = () - """Node attributes that contain child nodes. - - This is redefined in most concrete classes. - - :type: tuple(str) - """ - _other_fields = () - """Node attributes that do not contain child nodes. - - :type: tuple(str) - """ - _other_other_fields = () - """Attributes that contain AST-dependent fields. - - :type: tuple(str) - """ - # instance specific inference function infer(node, context) - _explicit_inference = None - - def __init__(self, lineno=None, col_offset=None, parent=None): - """ - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.lineno = lineno - self.col_offset = col_offset - self.parent = parent - - def infer(self, context=None, **kwargs): - """Get a generator of the inferred values. - - This is the main entry point to the inference system. - - .. seealso:: :ref:`inference` - - If the instance has some explicit inference function set, it will be - called instead of the default interface. - - :returns: The inferred values. - :rtype: iterable - """ - if context is not None: - context = context.extra_context.get(self, context) - if self._explicit_inference is not None: - # explicit_inference is not bound, give it self explicitly - try: - # pylint: disable=not-callable - return self._explicit_inference(self, context, **kwargs) - except exceptions.UseInferenceDefault: - pass - - if not context: - return self._infer(context, **kwargs) - - key = (self, context.lookupname, context.callcontext, context.boundnode) - if key in context.inferred: - return iter(context.inferred[key]) - - gen = context.cache_generator(key, self._infer(context, **kwargs)) - return util.limit_inference(gen, MANAGER.max_inferable_values) - - def _repr_name(self): - """Get a name for nice representation. - - This is either :attr:`name`, :attr:`attrname`, or the empty string. - - :returns: The nice name. - :rtype: str - """ - names = {"name", "attrname"} - if all(name not in self._astroid_fields for name in names): - return getattr(self, "name", getattr(self, "attrname", "")) - return "" - - def __str__(self): - rname = self._repr_name() - cname = type(self).__name__ - if rname: - string = "%(cname)s.%(rname)s(%(fields)s)" - alignment = len(cname) + len(rname) + 2 - else: - string = "%(cname)s(%(fields)s)" - alignment = len(cname) + 1 - result = [] - for field in self._other_fields + self._astroid_fields: - value = getattr(self, field) - width = 80 - len(field) - alignment - lines = pprint.pformat(value, indent=2, width=width).splitlines(True) - - inner = [lines[0]] - for line in lines[1:]: - inner.append(" " * alignment + line) - result.append("%s=%s" % (field, "".join(inner))) - - return string % { - "cname": cname, - "rname": rname, - "fields": (",\n" + " " * alignment).join(result), - } - - def __repr__(self): - rname = self._repr_name() - if rname: - string = "<%(cname)s.%(rname)s l.%(lineno)s at 0x%(id)x>" - else: - string = "<%(cname)s l.%(lineno)s at 0x%(id)x>" - return string % { - "cname": type(self).__name__, - "rname": rname, - "lineno": self.fromlineno, - "id": id(self), - } - - def accept(self, visitor): - """Visit this node using the given visitor.""" - func = getattr(visitor, "visit_" + self.__class__.__name__.lower()) - return func(self) - - def get_children(self): - """Get the child nodes below this node. - - :returns: The children. - :rtype: iterable(NodeNG) - """ - for field in self._astroid_fields: - attr = getattr(self, field) - if attr is None: - continue - if isinstance(attr, (list, tuple)): - yield from attr - else: - yield attr - - def last_child(self): - """An optimized version of list(get_children())[-1] - - :returns: The last child, or None if no children exist. - :rtype: NodeNG or None - """ - for field in self._astroid_fields[::-1]: - attr = getattr(self, field) - if not attr: # None or empty listy / tuple - continue - if isinstance(attr, (list, tuple)): - return attr[-1] - - return attr - return None - - def parent_of(self, node): - """Check if this node is the parent of the given node. - - :param node: The node to check if it is the child. - :type node: NodeNG - - :returns: True if this node is the parent of the given node, - False otherwise. - :rtype: bool - """ - parent = node.parent - while parent is not None: - if self is parent: - return True - parent = parent.parent - return False - - def statement(self): - """The first parent node, including self, marked as statement node. - - :returns: The first parent statement. - :rtype: NodeNG - """ - if self.is_statement: - return self - return self.parent.statement() - - def frame(self): - """The first parent frame node. - - A frame node is a :class:`Module`, :class:`FunctionDef`, - or :class:`ClassDef`. - - :returns: The first parent frame node. - :rtype: Module or FunctionDef or ClassDef - """ - return self.parent.frame() - - def scope(self): - """The first parent node defining a new scope. - - :returns: The first parent scope node. - :rtype: Module or FunctionDef or ClassDef or Lambda or GenExpr - """ - if self.parent: - return self.parent.scope() - return None - - def root(self): - """Return the root node of the syntax tree. - - :returns: The root node. - :rtype: Module - """ - if self.parent: - return self.parent.root() - return self - - def child_sequence(self, child): - """Search for the sequence that contains this child. - - :param child: The child node to search sequences for. - :type child: NodeNG - - :returns: The sequence containing the given child node. - :rtype: iterable(NodeNG) - - :raises AstroidError: If no sequence could be found that contains - the given child. - """ - for field in self._astroid_fields: - node_or_sequence = getattr(self, field) - if node_or_sequence is child: - return [node_or_sequence] - # /!\ compiler.ast Nodes have an __iter__ walking over child nodes - if ( - isinstance(node_or_sequence, (tuple, list)) - and child in node_or_sequence - ): - return node_or_sequence - - msg = "Could not find %s in %s's children" - raise exceptions.AstroidError(msg % (repr(child), repr(self))) - - def locate_child(self, child): - """Find the field of this node that contains the given child. - - :param child: The child node to search fields for. - :type child: NodeNG - - :returns: A tuple of the name of the field that contains the child, - and the sequence or node that contains the child node. - :rtype: tuple(str, iterable(NodeNG) or NodeNG) - - :raises AstroidError: If no field could be found that contains - the given child. - """ - for field in self._astroid_fields: - node_or_sequence = getattr(self, field) - # /!\ compiler.ast Nodes have an __iter__ walking over child nodes - if child is node_or_sequence: - return field, child - if ( - isinstance(node_or_sequence, (tuple, list)) - and child in node_or_sequence - ): - return field, node_or_sequence - msg = "Could not find %s in %s's children" - raise exceptions.AstroidError(msg % (repr(child), repr(self))) - - # FIXME : should we merge child_sequence and locate_child ? locate_child - # is only used in are_exclusive, child_sequence one time in pylint. - - def next_sibling(self): - """The next sibling statement node. - - :returns: The next sibling statement node. - :rtype: NodeNG or None - """ - return self.parent.next_sibling() - - def previous_sibling(self): - """The previous sibling statement. - - :returns: The previous sibling statement node. - :rtype: NodeNG or None - """ - return self.parent.previous_sibling() - - # these are lazy because they're relatively expensive to compute for every - # single node, and they rarely get looked at - - @decorators.cachedproperty - def fromlineno(self): - """The first line that this node appears on in the source code. - - :type: int or None - """ - if self.lineno is None: - return self._fixed_source_line() - - return self.lineno - - @decorators.cachedproperty - def tolineno(self): - """The last line that this node appears on in the source code. - - :type: int or None - """ - if not self._astroid_fields: - # can't have children - lastchild = None - else: - lastchild = self.last_child() - if lastchild is None: - return self.fromlineno - - return lastchild.tolineno - - def _fixed_source_line(self): - """Attempt to find the line that this node appears on. - - We need this method since not all nodes have :attr:`lineno` set. - - :returns: The line number of this node, - or None if this could not be determined. - :rtype: int or None - """ - line = self.lineno - _node = self - try: - while line is None: - _node = next(_node.get_children()) - line = _node.lineno - except StopIteration: - _node = self.parent - while _node and line is None: - line = _node.lineno - _node = _node.parent - return line - - def block_range(self, lineno): - """Get a range from the given line number to where this node ends. - - :param lineno: The line number to start the range at. - :type lineno: int - - :returns: The range of line numbers that this node belongs to, - starting at the given line number. - :rtype: tuple(int, int or None) - """ - return lineno, self.tolineno - - def set_local(self, name, stmt): - """Define that the given name is declared in the given statement node. - - This definition is stored on the parent scope node. - - .. seealso:: :meth:`scope` - - :param name: The name that is being defined. - :type name: str - - :param stmt: The statement that defines the given name. - :type stmt: NodeNG - """ - self.parent.set_local(name, stmt) - - def nodes_of_class(self, klass, skip_klass=None): - """Get the nodes (including this one or below) of the given types. - - :param klass: The types of node to search for. - :type klass: builtins.type or tuple(builtins.type) - - :param skip_klass: The types of node to ignore. This is useful to ignore - subclasses of :attr:`klass`. - :type skip_klass: builtins.type or tuple(builtins.type) - - :returns: The node of the given types. - :rtype: iterable(NodeNG) - """ - if isinstance(self, klass): - yield self - - if skip_klass is None: - for child_node in self.get_children(): - yield from child_node.nodes_of_class(klass, skip_klass) - - return - - for child_node in self.get_children(): - if isinstance(child_node, skip_klass): - continue - yield from child_node.nodes_of_class(klass, skip_klass) - - @decorators.cached - def _get_assign_nodes(self): - return [] - - def _get_name_nodes(self): - for child_node in self.get_children(): - yield from child_node._get_name_nodes() - - def _get_return_nodes_skip_functions(self): - yield from () - - def _get_yield_nodes_skip_lambdas(self): - yield from () - - def _infer_name(self, frame, name): - # overridden for ImportFrom, Import, Global, TryExcept and Arguments - pass - - def _infer(self, context=None): - """we don't know how to resolve a statement by default""" - # this method is overridden by most concrete classes - raise exceptions.InferenceError( - "No inference function for {node!r}.", node=self, context=context - ) - - def inferred(self): - """Get a list of the inferred values. - - .. seealso:: :ref:`inference` - - :returns: The inferred values. - :rtype: list - """ - return list(self.infer()) - - def instantiate_class(self): - """Instantiate an instance of the defined class. - - .. note:: - - On anything other than a :class:`ClassDef` this will return self. - - :returns: An instance of the defined class. - :rtype: object - """ - return self - - def has_base(self, node): - """Check if this node inherits from the given type. - - :param node: The node defining the base to look for. - Usually this is a :class:`Name` node. - :type node: NodeNG - """ - return False - - def callable(self): - """Whether this node defines something that is callable. - - :returns: True if this defines something that is callable, - False otherwise. - :rtype: bool - """ - return False - - def eq(self, value): - return False - - def as_string(self): - """Get the source code that this node represents. - - :returns: The source code. - :rtype: str - """ - return as_string.to_code(self) - - def repr_tree( - self, - ids=False, - include_linenos=False, - ast_state=False, - indent=" ", - max_depth=0, - max_width=80, - ): - """Get a string representation of the AST from this node. - - :param ids: If true, includes the ids with the node type names. - :type ids: bool - - :param include_linenos: If true, includes the line numbers and - column offsets. - :type include_linenos: bool - - :param ast_state: If true, includes information derived from - the whole AST like local and global variables. - :type ast_state: bool - - :param indent: A string to use to indent the output string. - :type indent: str - - :param max_depth: If set to a positive integer, won't return - nodes deeper than max_depth in the string. - :type max_depth: int - - :param max_width: Attempt to format the output string to stay - within this number of characters, but can exceed it under some - circumstances. Only positive integer values are valid, the default is 80. - :type max_width: int - - :returns: The string representation of the AST. - :rtype: str - """ - # pylint: disable=too-many-statements - @_singledispatch - def _repr_tree(node, result, done, cur_indent="", depth=1): - """Outputs a representation of a non-tuple/list, non-node that's - contained within an AST, including strings. - """ - lines = pprint.pformat( - node, width=max(max_width - len(cur_indent), 1) - ).splitlines(True) - result.append(lines[0]) - result.extend([cur_indent + line for line in lines[1:]]) - return len(lines) != 1 - - # pylint: disable=unused-variable; doesn't understand singledispatch - @_repr_tree.register(tuple) - @_repr_tree.register(list) - def _repr_seq(node, result, done, cur_indent="", depth=1): - """Outputs a representation of a sequence that's contained within an AST.""" - cur_indent += indent - result.append("[") - if not node: - broken = False - elif len(node) == 1: - broken = _repr_tree(node[0], result, done, cur_indent, depth) - elif len(node) == 2: - broken = _repr_tree(node[0], result, done, cur_indent, depth) - if not broken: - result.append(", ") - else: - result.append(",\n") - result.append(cur_indent) - broken = _repr_tree(node[1], result, done, cur_indent, depth) or broken - else: - result.append("\n") - result.append(cur_indent) - for child in node[:-1]: - _repr_tree(child, result, done, cur_indent, depth) - result.append(",\n") - result.append(cur_indent) - _repr_tree(node[-1], result, done, cur_indent, depth) - broken = True - result.append("]") - return broken - - # pylint: disable=unused-variable; doesn't understand singledispatch - @_repr_tree.register(NodeNG) - def _repr_node(node, result, done, cur_indent="", depth=1): - """Outputs a strings representation of an astroid node.""" - if node in done: - result.append( - indent - + " max_depth: - result.append("...") - return False - depth += 1 - cur_indent += indent - if ids: - result.append("%s<0x%x>(\n" % (type(node).__name__, id(node))) - else: - result.append("%s(" % type(node).__name__) - fields = [] - if include_linenos: - fields.extend(("lineno", "col_offset")) - fields.extend(node._other_fields) - fields.extend(node._astroid_fields) - if ast_state: - fields.extend(node._other_other_fields) - if not fields: - broken = False - elif len(fields) == 1: - result.append("%s=" % fields[0]) - broken = _repr_tree( - getattr(node, fields[0]), result, done, cur_indent, depth - ) - else: - result.append("\n") - result.append(cur_indent) - for field in fields[:-1]: - result.append("%s=" % field) - _repr_tree(getattr(node, field), result, done, cur_indent, depth) - result.append(",\n") - result.append(cur_indent) - result.append("%s=" % fields[-1]) - _repr_tree(getattr(node, fields[-1]), result, done, cur_indent, depth) - broken = True - result.append(")") - return broken - - result = [] - _repr_tree(self, result, set()) - return "".join(result) - - def bool_value(self): - """Determine the boolean value of this node. - - The boolean value of a node can have three - possible values: - - * False: For instance, empty data structures, - False, empty strings, instances which return - explicitly False from the __nonzero__ / __bool__ - method. - * True: Most of constructs are True by default: - classes, functions, modules etc - * Uninferable: The inference engine is uncertain of the - node's value. - - :returns: The boolean value of this node. - :rtype: bool or Uninferable - """ - return util.Uninferable - - def op_precedence(self): - # Look up by class name or default to highest precedence - return OP_PRECEDENCE.get(self.__class__.__name__, len(OP_PRECEDENCE)) - - def op_left_associative(self): - # Everything is left associative except `**` and IfExp - return True - - -class Statement(NodeNG): - """Statement node adding a few attributes""" - - is_statement = True - """Whether this node indicates a statement. - - :type: bool - """ - - def next_sibling(self): - """The next sibling statement node. - - :returns: The next sibling statement node. - :rtype: NodeNG or None - """ - stmts = self.parent.child_sequence(self) - index = stmts.index(self) - try: - return stmts[index + 1] - except IndexError: - pass - - def previous_sibling(self): - """The previous sibling statement. - - :returns: The previous sibling statement node. - :rtype: NodeNG or None - """ - stmts = self.parent.child_sequence(self) - index = stmts.index(self) - if index >= 1: - return stmts[index - 1] - return None - - -class _BaseContainer( - mixins.ParentAssignTypeMixin, NodeNG, bases.Instance, metaclass=abc.ABCMeta -): - """Base class for Set, FrozenSet, Tuple and List.""" - - _astroid_fields = ("elts",) - - def __init__(self, lineno=None, col_offset=None, parent=None): - """ - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.elts = [] - """The elements in the node. - - :type: list(NodeNG) - """ - - super(_BaseContainer, self).__init__(lineno, col_offset, parent) - - def postinit(self, elts): - """Do some setup after initialisation. - - :param elts: The list of elements the that node contains. - :type elts: list(NodeNG) - """ - self.elts = elts - - @classmethod - def from_elements(cls, elts=None): - """Create a node of this type from the given list of elements. - - :param elts: The list of elements that the node should contain. - :type elts: list(NodeNG) - - :returns: A new node containing the given elements. - :rtype: NodeNG - """ - node = cls() - if elts is None: - node.elts = [] - else: - node.elts = [const_factory(e) if _is_const(e) else e for e in elts] - return node - - def itered(self): - """An iterator over the elements this node contains. - - :returns: The contents of this node. - :rtype: iterable(NodeNG) - """ - return self.elts - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - :rtype: bool or Uninferable - """ - return bool(self.elts) - - @abc.abstractmethod - def pytype(self): - """Get the name of the type that this node represents. - - :returns: The name of the type. - :rtype: str - """ - - def get_children(self): - yield from self.elts - - -class LookupMixIn: - """Mixin to look up a name in the right scope.""" - - @lru_cache(maxsize=None) - def lookup(self, name): - """Lookup where the given variable is assigned. - - The lookup starts from self's scope. If self is not a frame itself - and the name is found in the inner frame locals, statements will be - filtered to remove ignorable statements according to self's location. - - :param name: The name of the variable to find assignments for. - :type name: str - - :returns: The scope node and the list of assignments associated to the - given name according to the scope where it has been found (locals, - globals or builtin). - :rtype: tuple(str, list(NodeNG)) - """ - return self.scope().scope_lookup(self, name) - - def ilookup(self, name): - """Lookup the inferred values of the given variable. - - :param name: The variable name to find values for. - :type name: str - - :returns: The inferred values of the statements returned from - :meth:`lookup`. - :rtype: iterable - """ - frame, stmts = self.lookup(name) - context = contextmod.InferenceContext() - return bases._infer_stmts(stmts, context, frame) - - def _get_filtered_node_statements(self, nodes): - statements = [(node, node.statement()) for node in nodes] - # Next we check if we have ExceptHandlers that are parent - # of the underlying variable, in which case the last one survives - if len(statements) > 1 and all( - isinstance(stmt, ExceptHandler) for _, stmt in statements - ): - statements = [ - (node, stmt) for node, stmt in statements if stmt.parent_of(self) - ] - return statements - - def _filter_stmts(self, stmts, frame, offset): - """Filter the given list of statements to remove ignorable statements. - - If self is not a frame itself and the name is found in the inner - frame locals, statements will be filtered to remove ignorable - statements according to self's location. - - :param stmts: The statements to filter. - :type stmts: list(NodeNG) - - :param frame: The frame that all of the given statements belong to. - :type frame: NodeNG - - :param offset: The line offset to filter statements up to. - :type offset: int - - :returns: The filtered statements. - :rtype: list(NodeNG) - """ - # if offset == -1, my actual frame is not the inner frame but its parent - # - # class A(B): pass - # - # we need this to resolve B correctly - if offset == -1: - myframe = self.frame().parent.frame() - else: - myframe = self.frame() - # If the frame of this node is the same as the statement - # of this node, then the node is part of a class or - # a function definition and the frame of this node should be the - # the upper frame, not the frame of the definition. - # For more information why this is important, - # see Pylint issue #295. - # For example, for 'b', the statement is the same - # as the frame / scope: - # - # def test(b=1): - # ... - - if self.statement() is myframe and myframe.parent: - myframe = myframe.parent.frame() - mystmt = self.statement() - # line filtering if we are in the same frame - # - # take care node may be missing lineno information (this is the case for - # nodes inserted for living objects) - if myframe is frame and mystmt.fromlineno is not None: - assert mystmt.fromlineno is not None, mystmt - mylineno = mystmt.fromlineno + offset - else: - # disabling lineno filtering - mylineno = 0 - - _stmts = [] - _stmt_parents = [] - statements = self._get_filtered_node_statements(stmts) - - for node, stmt in statements: - # line filtering is on and we have reached our location, break - if stmt.fromlineno > mylineno > 0: - break - # Ignore decorators with the same name as the - # decorated function - # Fixes issue #375 - if mystmt is stmt and is_from_decorator(self): - continue - assert hasattr(node, "assign_type"), ( - node, - node.scope(), - node.scope().locals, - ) - assign_type = node.assign_type() - if node.has_base(self): - break - - _stmts, done = assign_type._get_filtered_stmts(self, node, _stmts, mystmt) - if done: - break - - optional_assign = assign_type.optional_assign - if optional_assign and assign_type.parent_of(self): - # we are inside a loop, loop var assignment is hiding previous - # assignment - _stmts = [node] - _stmt_parents = [stmt.parent] - continue - - if isinstance(assign_type, NamedExpr): - _stmts = [node] - continue - - # XXX comment various branches below!!! - try: - pindex = _stmt_parents.index(stmt.parent) - except ValueError: - pass - else: - # we got a parent index, this means the currently visited node - # is at the same block level as a previously visited node - if _stmts[pindex].assign_type().parent_of(assign_type): - # both statements are not at the same block level - continue - # if currently visited node is following previously considered - # assignment and both are not exclusive, we can drop the - # previous one. For instance in the following code :: - # - # if a: - # x = 1 - # else: - # x = 2 - # print x - # - # we can't remove neither x = 1 nor x = 2 when looking for 'x' - # of 'print x'; while in the following :: - # - # x = 1 - # x = 2 - # print x - # - # we can remove x = 1 when we see x = 2 - # - # moreover, on loop assignment types, assignment won't - # necessarily be done if the loop has no iteration, so we don't - # want to clear previous assignments if any (hence the test on - # optional_assign) - if not (optional_assign or are_exclusive(_stmts[pindex], node)): - if ( - # In case of partial function node, if the statement is different - # from the origin function then it can be deleted otherwise it should - # remain to be able to correctly infer the call to origin function. - not node.is_function - or node.qname() != "PartialFunction" - or node.name != _stmts[pindex].name - ): - del _stmt_parents[pindex] - del _stmts[pindex] - if isinstance(node, AssignName): - if not optional_assign and stmt.parent is mystmt.parent: - _stmts = [] - _stmt_parents = [] - elif isinstance(node, DelName): - _stmts = [] - _stmt_parents = [] - continue - if not are_exclusive(self, node): - _stmts.append(node) - _stmt_parents.append(stmt.parent) - return _stmts - - -# Name classes - - -class AssignName( - mixins.NoChildrenMixin, LookupMixIn, mixins.ParentAssignTypeMixin, NodeNG -): - """Variation of :class:`ast.Assign` representing assignment to a name. - - An :class:`AssignName` is the name of something that is assigned to. - This includes variables defined in a function signature or in a loop. - - >>> node = astroid.extract_node('variable = range(10)') - >>> node - - >>> list(node.get_children()) - [, ] - >>> list(node.get_children())[0].as_string() - 'variable' - """ - - _other_fields = ("name",) - - def __init__(self, name=None, lineno=None, col_offset=None, parent=None): - """ - :param name: The name that is assigned to. - :type name: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.name = name - """The name that is assigned to. - - :type: str or None - """ - - super(AssignName, self).__init__(lineno, col_offset, parent) - - -class DelName( - mixins.NoChildrenMixin, LookupMixIn, mixins.ParentAssignTypeMixin, NodeNG -): - """Variation of :class:`ast.Delete` representing deletion of a name. - - A :class:`DelName` is the name of something that is deleted. - - >>> node = astroid.extract_node("del variable #@") - >>> list(node.get_children()) - [] - >>> list(node.get_children())[0].as_string() - 'variable' - """ - - _other_fields = ("name",) - - def __init__(self, name=None, lineno=None, col_offset=None, parent=None): - """ - :param name: The name that is being deleted. - :type name: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.name = name - """The name that is being deleted. - - :type: str or None - """ - - super(DelName, self).__init__(lineno, col_offset, parent) - - -class Name(mixins.NoChildrenMixin, LookupMixIn, NodeNG): - """Class representing an :class:`ast.Name` node. - - A :class:`Name` node is something that is named, but not covered by - :class:`AssignName` or :class:`DelName`. - - >>> node = astroid.extract_node('range(10)') - >>> node - - >>> list(node.get_children()) - [, ] - >>> list(node.get_children())[0].as_string() - 'range' - """ - - _other_fields = ("name",) - - def __init__(self, name=None, lineno=None, col_offset=None, parent=None): - """ - :param name: The name that this node refers to. - :type name: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.name = name - """The name that this node refers to. - - :type: str or None - """ - - super(Name, self).__init__(lineno, col_offset, parent) - - def _get_name_nodes(self): - yield self - - for child_node in self.get_children(): - yield from child_node._get_name_nodes() - - -class Arguments(mixins.AssignTypeMixin, NodeNG): - """Class representing an :class:`ast.arguments` node. - - An :class:`Arguments` node represents that arguments in a - function definition. - - >>> node = astroid.extract_node('def foo(bar): pass') - >>> node - - >>> node.args - - """ - - # Python 3.4+ uses a different approach regarding annotations, - # each argument is a new class, _ast.arg, which exposes an - # 'annotation' attribute. In astroid though, arguments are exposed - # as is in the Arguments node and the only way to expose annotations - # is by using something similar with Python 3.3: - # - we expose 'varargannotation' and 'kwargannotation' of annotations - # of varargs and kwargs. - # - we expose 'annotation', a list with annotations for - # for each normal argument. If an argument doesn't have an - # annotation, its value will be None. - - _astroid_fields = ( - "args", - "defaults", - "kwonlyargs", - "posonlyargs", - "kw_defaults", - "annotations", - "varargannotation", - "kwargannotation", - "kwonlyargs_annotations", - "type_comment_args", - ) - varargannotation = None - """The type annotation for the variable length arguments. - - :type: NodeNG - """ - kwargannotation = None - """The type annotation for the variable length keyword arguments. - - :type: NodeNG - """ - - _other_fields = ("vararg", "kwarg") - - def __init__(self, vararg=None, kwarg=None, parent=None): - """ - :param vararg: The name of the variable length arguments. - :type vararg: str or None - - :param kwarg: The name of the variable length keyword arguments. - :type kwarg: str or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - super(Arguments, self).__init__(parent=parent) - self.vararg = vararg - """The name of the variable length arguments. - - :type: str or None - """ - - self.kwarg = kwarg - """The name of the variable length keyword arguments. - - :type: str or None - """ - - self.args = [] - """The names of the required arguments. - - :type: list(AssignName) - """ - - self.defaults = [] - """The default values for arguments that can be passed positionally. - - :type: list(NodeNG) - """ - - self.kwonlyargs = [] - """The keyword arguments that cannot be passed positionally. - - :type: list(AssignName) - """ - - self.posonlyargs = [] - """The arguments that can only be passed positionally. - - :type: list(AssignName) - """ - - self.kw_defaults = [] - """The default values for keyword arguments that cannot be passed positionally. - - :type: list(NodeNG) - """ - - self.annotations = [] - """The type annotations of arguments that can be passed positionally. - - :type: list(NodeNG) - """ - - self.posonlyargs_annotations = [] - """The type annotations of arguments that can only be passed positionally. - - :type: list(NodeNG) - """ - - self.kwonlyargs_annotations = [] - """The type annotations of arguments that cannot be passed positionally. - - :type: list(NodeNG) - """ - - self.type_comment_args = [] - """The type annotation, passed by a type comment, of each argument. - - If an argument does not have a type comment, - the value for that argument will be None. - - :type: list(NodeNG or None) - """ - - # pylint: disable=too-many-arguments - def postinit( - self, - args, - defaults, - kwonlyargs, - kw_defaults, - annotations, - posonlyargs=None, - kwonlyargs_annotations=None, - posonlyargs_annotations=None, - varargannotation=None, - kwargannotation=None, - type_comment_args=None, - ): - """Do some setup after initialisation. - - :param args: The names of the required arguments. - :type args: list(AssignName) - - :param defaults: The default values for arguments that can be passed - positionally. - :type defaults: list(NodeNG) - - :param kwonlyargs: The keyword arguments that cannot be passed - positionally. - :type kwonlyargs: list(AssignName) - - :param posonlyargs: The arguments that can only be passed - positionally. - :type kwonlyargs: list(AssignName) - - :param kw_defaults: The default values for keyword arguments that - cannot be passed positionally. - :type kw_defaults: list(NodeNG) - - :param annotations: The type annotations of arguments that can be - passed positionally. - :type annotations: list(NodeNG) - - :param kwonlyargs_annotations: The type annotations of arguments that - cannot be passed positionally. This should always be passed in - Python 3. - :type kwonlyargs_annotations: list(NodeNG) - - :param posonlyargs_annotations: The type annotations of arguments that - can only be passed positionally. This should always be passed in - Python 3. - :type posonlyargs_annotations: list(NodeNG) - - :param varargannotation: The type annotation for the variable length - arguments. - :type varargannotation: NodeNG - - :param kwargannotation: The type annotation for the variable length - keyword arguments. - :type kwargannotation: NodeNG - - :param type_comment_args: The type annotation, - passed by a type comment, of each argument. - :type type_comment_args: list(NodeNG or None) - """ - self.args = args - self.defaults = defaults - self.kwonlyargs = kwonlyargs - self.posonlyargs = posonlyargs - self.kw_defaults = kw_defaults - self.annotations = annotations - self.kwonlyargs_annotations = kwonlyargs_annotations - self.posonlyargs_annotations = posonlyargs_annotations - self.varargannotation = varargannotation - self.kwargannotation = kwargannotation - self.type_comment_args = type_comment_args - - # pylint: disable=too-many-arguments - - def _infer_name(self, frame, name): - if self.parent is frame: - return name - return None - - @decorators.cachedproperty - def fromlineno(self): - """The first line that this node appears on in the source code. - - :type: int or None - """ - lineno = super(Arguments, self).fromlineno - return max(lineno, self.parent.fromlineno or 0) - - def format_args(self): - """Get the arguments formatted as string. - - :returns: The formatted arguments. - :rtype: str - """ - result = [] - positional_only_defaults = [] - positional_or_keyword_defaults = self.defaults - if self.defaults: - args = self.args or [] - positional_or_keyword_defaults = self.defaults[-len(args) :] - positional_only_defaults = self.defaults[: len(self.defaults) - len(args)] - - if self.posonlyargs: - result.append(_format_args(self.posonlyargs, positional_only_defaults)) - result.append("/") - if self.args: - result.append( - _format_args( - self.args, - positional_or_keyword_defaults, - getattr(self, "annotations", None), - ) - ) - if self.vararg: - result.append("*%s" % self.vararg) - if self.kwonlyargs: - if not self.vararg: - result.append("*") - result.append( - _format_args( - self.kwonlyargs, self.kw_defaults, self.kwonlyargs_annotations - ) - ) - if self.kwarg: - result.append("**%s" % self.kwarg) - return ", ".join(result) - - def default_value(self, argname): - """Get the default value for an argument. - - :param argname: The name of the argument to get the default value for. - :type argname: str - - :raises NoDefault: If there is no default value defined for the - given argument. - """ - args = list(itertools.chain((self.posonlyargs or ()), self.args or ())) - index = _find_arg(argname, args)[0] - if index is not None: - idx = index - (len(args) - len(self.defaults)) - if idx >= 0: - return self.defaults[idx] - index = _find_arg(argname, self.kwonlyargs)[0] - if index is not None and self.kw_defaults[index] is not None: - return self.kw_defaults[index] - raise exceptions.NoDefault(func=self.parent, name=argname) - - def is_argument(self, name): - """Check if the given name is defined in the arguments. - - :param name: The name to check for. - :type name: str - - :returns: True if the given name is defined in the arguments, - False otherwise. - :rtype: bool - """ - if name == self.vararg: - return True - if name == self.kwarg: - return True - return ( - self.find_argname(name, rec=True)[1] is not None - or self.kwonlyargs - and _find_arg(name, self.kwonlyargs, rec=True)[1] is not None - ) - - def find_argname(self, argname, rec=False): - """Get the index and :class:`AssignName` node for given name. - - :param argname: The name of the argument to search for. - :type argname: str - - :param rec: Whether or not to include arguments in unpacked tuples - in the search. - :type rec: bool - - :returns: The index and node for the argument. - :rtype: tuple(str or None, AssignName or None) - """ - if ( - self.args or self.posonlyargs - ): # self.args may be None in some cases (builtin function) - arguments = itertools.chain(self.posonlyargs or (), self.args or ()) - return _find_arg(argname, arguments, rec) - return None, None - - def get_children(self): - yield from self.posonlyargs or () - yield from self.args or () - - yield from self.defaults - yield from self.kwonlyargs - - for elt in self.kw_defaults: - if elt is not None: - yield elt - - for elt in self.annotations: - if elt is not None: - yield elt - - if self.varargannotation is not None: - yield self.varargannotation - - if self.kwargannotation is not None: - yield self.kwargannotation - - for elt in self.kwonlyargs_annotations: - if elt is not None: - yield elt - - -def _find_arg(argname, args, rec=False): - for i, arg in enumerate(args): - if isinstance(arg, Tuple): - if rec: - found = _find_arg(argname, arg.elts) - if found[0] is not None: - return found - elif arg.name == argname: - return i, arg - return None, None - - -def _format_args(args, defaults=None, annotations=None): - values = [] - if args is None: - return "" - if annotations is None: - annotations = [] - if defaults is not None: - default_offset = len(args) - len(defaults) - packed = itertools.zip_longest(args, annotations) - for i, (arg, annotation) in enumerate(packed): - if isinstance(arg, Tuple): - values.append("(%s)" % _format_args(arg.elts)) - else: - argname = arg.name - default_sep = "=" - if annotation is not None: - argname += ": " + annotation.as_string() - default_sep = " = " - values.append(argname) - - if defaults is not None and i >= default_offset: - if defaults[i - default_offset] is not None: - values[-1] += default_sep + defaults[i - default_offset].as_string() - return ", ".join(values) - - -class AssignAttr(mixins.ParentAssignTypeMixin, NodeNG): - """Variation of :class:`ast.Assign` representing assignment to an attribute. - - >>> node = astroid.extract_node('self.attribute = range(10)') - >>> node - - >>> list(node.get_children()) - [, ] - >>> list(node.get_children())[0].as_string() - 'self.attribute' - """ - - _astroid_fields = ("expr",) - _other_fields = ("attrname",) - expr = None - """What has the attribute that is being assigned to. - - :type: NodeNG or None - """ - - def __init__(self, attrname=None, lineno=None, col_offset=None, parent=None): - """ - :param attrname: The name of the attribute being assigned to. - :type attrname: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.attrname = attrname - """The name of the attribute being assigned to. - - :type: str or None - """ - - super(AssignAttr, self).__init__(lineno, col_offset, parent) - - def postinit(self, expr=None): - """Do some setup after initialisation. - - :param expr: What has the attribute that is being assigned to. - :type expr: NodeNG or None - """ - self.expr = expr - - def get_children(self): - yield self.expr - - -class Assert(Statement): - """Class representing an :class:`ast.Assert` node. - - An :class:`Assert` node represents an assert statement. - - >>> node = astroid.extract_node('assert len(things) == 10, "Not enough things"') - >>> node - - """ - - _astroid_fields = ("test", "fail") - test = None - """The test that passes or fails the assertion. - - :type: NodeNG or None - """ - fail = None - """The message shown when the assertion fails. - - :type: NodeNG or None - """ - - def postinit(self, test=None, fail=None): - """Do some setup after initialisation. - - :param test: The test that passes or fails the assertion. - :type test: NodeNG or None - - :param fail: The message shown when the assertion fails. - :type fail: NodeNG or None - """ - self.fail = fail - self.test = test - - def get_children(self): - yield self.test - - if self.fail is not None: - yield self.fail - - -class Assign(mixins.AssignTypeMixin, Statement): - """Class representing an :class:`ast.Assign` node. - - An :class:`Assign` is a statement where something is explicitly - asssigned to. - - >>> node = astroid.extract_node('variable = range(10)') - >>> node - - """ - - _astroid_fields = ("targets", "value") - _other_other_fields = ("type_annotation",) - targets = None - """What is being assigned to. - - :type: list(NodeNG) or None - """ - value = None - """The value being assigned to the variables. - - :type: NodeNG or None - """ - type_annotation = None - """If present, this will contain the type annotation passed by a type comment - - :type: NodeNG or None - """ - - def postinit(self, targets=None, value=None, type_annotation=None): - """Do some setup after initialisation. - - :param targets: What is being assigned to. - :type targets: list(NodeNG) or None - - :param value: The value being assigned to the variables. - :type: NodeNG or None - """ - self.targets = targets - self.value = value - self.type_annotation = type_annotation - - def get_children(self): - yield from self.targets - - yield self.value - - @decorators.cached - def _get_assign_nodes(self): - return [self] + list(self.value._get_assign_nodes()) - - def _get_yield_nodes_skip_lambdas(self): - yield from self.value._get_yield_nodes_skip_lambdas() - - -class AnnAssign(mixins.AssignTypeMixin, Statement): - """Class representing an :class:`ast.AnnAssign` node. - - An :class:`AnnAssign` is an assignment with a type annotation. - - >>> node = astroid.extract_node('variable: List[int] = range(10)') - >>> node - - """ - - _astroid_fields = ("target", "annotation", "value") - _other_fields = ("simple",) - target = None - """What is being assigned to. - - :type: NodeNG or None - """ - annotation = None - """The type annotation of what is being assigned to. - - :type: NodeNG - """ - value = None - """The value being assigned to the variables. - - :type: NodeNG or None - """ - simple = None - """Whether :attr:`target` is a pure name or a complex statement. - - :type: int - """ - - def postinit(self, target, annotation, simple, value=None): - """Do some setup after initialisation. - - :param target: What is being assigned to. - :type target: NodeNG - - :param annotation: The type annotation of what is being assigned to. - :type: NodeNG - - :param simple: Whether :attr:`target` is a pure name - or a complex statement. - :type simple: int - - :param value: The value being assigned to the variables. - :type: NodeNG or None - """ - self.target = target - self.annotation = annotation - self.value = value - self.simple = simple - - def get_children(self): - yield self.target - yield self.annotation - - if self.value is not None: - yield self.value - - -class AugAssign(mixins.AssignTypeMixin, Statement): - """Class representing an :class:`ast.AugAssign` node. - - An :class:`AugAssign` is an assignment paired with an operator. - - >>> node = astroid.extract_node('variable += 1') - >>> node - - """ - - _astroid_fields = ("target", "value") - _other_fields = ("op",) - target = None - """What is being assigned to. - - :type: NodeNG or None - """ - value = None - """The value being assigned to the variable. - - :type: NodeNG or None - """ - - def __init__(self, op=None, lineno=None, col_offset=None, parent=None): - """ - :param op: The operator that is being combined with the assignment. - This includes the equals sign. - :type op: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.op = op - """The operator that is being combined with the assignment. - - This includes the equals sign. - - :type: str or None - """ - - super(AugAssign, self).__init__(lineno, col_offset, parent) - - def postinit(self, target=None, value=None): - """Do some setup after initialisation. - - :param target: What is being assigned to. - :type target: NodeNG or None - - :param value: The value being assigned to the variable. - :type: NodeNG or None - """ - self.target = target - self.value = value - - # This is set by inference.py - def _infer_augassign(self, context=None): - raise NotImplementedError - - def type_errors(self, context=None): - """Get a list of type errors which can occur during inference. - - Each TypeError is represented by a :class:`BadBinaryOperationMessage` , - which holds the original exception. - - :returns: The list of possible type errors. - :rtype: list(BadBinaryOperationMessage) - """ - try: - results = self._infer_augassign(context=context) - return [ - result - for result in results - if isinstance(result, util.BadBinaryOperationMessage) - ] - except exceptions.InferenceError: - return [] - - def get_children(self): - yield self.target - yield self.value - - -class Repr(NodeNG): - """Class representing an :class:`ast.Repr` node. - - A :class:`Repr` node represents the backtick syntax, - which is a deprecated alias for :func:`repr` removed in Python 3. - - >>> node = astroid.extract_node('`variable`') - >>> node - - """ - - _astroid_fields = ("value",) - value = None - """What is having :func:`repr` called on it. - - :type: NodeNG or None - """ - - def postinit(self, value=None): - """Do some setup after initialisation. - - :param value: What is having :func:`repr` called on it. - :type value: NodeNG or None - """ - self.value = value - - -class BinOp(NodeNG): - """Class representing an :class:`ast.BinOp` node. - - A :class:`BinOp` node is an application of a binary operator. - - >>> node = astroid.extract_node('a + b') - >>> node - - """ - - _astroid_fields = ("left", "right") - _other_fields = ("op",) - left = None - """What is being applied to the operator on the left side. - - :type: NodeNG or None - """ - right = None - """What is being applied to the operator on the right side. - - :type: NodeNG or None - """ - - def __init__(self, op=None, lineno=None, col_offset=None, parent=None): - """ - :param op: The operator. - :type: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.op = op - """The operator. - - :type: str or None - """ - - super(BinOp, self).__init__(lineno, col_offset, parent) - - def postinit(self, left=None, right=None): - """Do some setup after initialisation. - - :param left: What is being applied to the operator on the left side. - :type left: NodeNG or None - - :param right: What is being applied to the operator on the right side. - :type right: NodeNG or None - """ - self.left = left - self.right = right - - # This is set by inference.py - def _infer_binop(self, context=None): - raise NotImplementedError - - def type_errors(self, context=None): - """Get a list of type errors which can occur during inference. - - Each TypeError is represented by a :class:`BadBinaryOperationMessage`, - which holds the original exception. - - :returns: The list of possible type errors. - :rtype: list(BadBinaryOperationMessage) - """ - try: - results = self._infer_binop(context=context) - return [ - result - for result in results - if isinstance(result, util.BadBinaryOperationMessage) - ] - except exceptions.InferenceError: - return [] - - def get_children(self): - yield self.left - yield self.right - - def op_precedence(self): - return OP_PRECEDENCE[self.op] - - def op_left_associative(self): - # 2**3**4 == 2**(3**4) - return self.op != "**" - - -class BoolOp(NodeNG): - """Class representing an :class:`ast.BoolOp` node. - - A :class:`BoolOp` is an application of a boolean operator. - - >>> node = astroid.extract_node('a and b') - >>> node - - """ - - _astroid_fields = ("values",) - _other_fields = ("op",) - values = None - """The values being applied to the operator. - - :type: list(NodeNG) or None - """ - - def __init__(self, op=None, lineno=None, col_offset=None, parent=None): - """ - :param op: The operator. - :type: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.op = op - """The operator. - - :type: str or None - """ - - super(BoolOp, self).__init__(lineno, col_offset, parent) - - def postinit(self, values=None): - """Do some setup after initialisation. - - :param values: The values being applied to the operator. - :type values: list(NodeNG) or None - """ - self.values = values - - def get_children(self): - yield from self.values - - def op_precedence(self): - return OP_PRECEDENCE[self.op] - - -class Break(mixins.NoChildrenMixin, Statement): - """Class representing an :class:`ast.Break` node. - - >>> node = astroid.extract_node('break') - >>> node - - """ - - -class Call(NodeNG): - """Class representing an :class:`ast.Call` node. - - A :class:`Call` node is a call to a function, method, etc. - - >>> node = astroid.extract_node('function()') - >>> node - - """ - - _astroid_fields = ("func", "args", "keywords") - func = None - """What is being called. - - :type: NodeNG or None - """ - args = None - """The positional arguments being given to the call. - - :type: list(NodeNG) or None - """ - keywords = None - """The keyword arguments being given to the call. - - :type: list(NodeNG) or None - """ - - def postinit(self, func=None, args=None, keywords=None): - """Do some setup after initialisation. - - :param func: What is being called. - :type func: NodeNG or None - - :param args: The positional arguments being given to the call. - :type args: list(NodeNG) or None - - :param keywords: The keyword arguments being given to the call. - :type keywords: list(NodeNG) or None - """ - self.func = func - self.args = args - self.keywords = keywords - - @property - def starargs(self): - """The positional arguments that unpack something. - - :type: list(Starred) - """ - args = self.args or [] - return [arg for arg in args if isinstance(arg, Starred)] - - @property - def kwargs(self): - """The keyword arguments that unpack something. - - :type: list(Keyword) - """ - keywords = self.keywords or [] - return [keyword for keyword in keywords if keyword.arg is None] - - def get_children(self): - yield self.func - - yield from self.args - - yield from self.keywords or () - - -class Compare(NodeNG): - """Class representing an :class:`ast.Compare` node. - - A :class:`Compare` node indicates a comparison. - - >>> node = astroid.extract_node('a <= b <= c') - >>> node - - >>> node.ops - [('<=', ), ('<=', )] - """ - - _astroid_fields = ("left", "ops") - left = None - """The value at the left being applied to a comparison operator. - - :type: NodeNG or None - """ - ops = None - """The remainder of the operators and their relevant right hand value. - - :type: list(tuple(str, NodeNG)) or None - """ - - def postinit(self, left=None, ops=None): - """Do some setup after initialisation. - - :param left: The value at the left being applied to a comparison - operator. - :type left: NodeNG or None - - :param ops: The remainder of the operators - and their relevant right hand value. - :type ops: list(tuple(str, NodeNG)) or None - """ - self.left = left - self.ops = ops - - def get_children(self): - """Get the child nodes below this node. - - Overridden to handle the tuple fields and skip returning the operator - strings. - - :returns: The children. - :rtype: iterable(NodeNG) - """ - yield self.left - for _, comparator in self.ops: - yield comparator # we don't want the 'op' - - def last_child(self): - """An optimized version of list(get_children())[-1] - - :returns: The last child. - :rtype: NodeNG - """ - # XXX maybe if self.ops: - return self.ops[-1][1] - # return self.left - - -class Comprehension(NodeNG): - """Class representing an :class:`ast.comprehension` node. - - A :class:`Comprehension` indicates the loop inside any type of - comprehension including generator expressions. - - >>> node = astroid.extract_node('[x for x in some_values]') - >>> list(node.get_children()) - [, ] - >>> list(node.get_children())[1].as_string() - 'for x in some_values' - """ - - _astroid_fields = ("target", "iter", "ifs") - _other_fields = ("is_async",) - target = None - """What is assigned to by the comprehension. - - :type: NodeNG or None - """ - iter = None - """What is iterated over by the comprehension. - - :type: NodeNG or None - """ - ifs = None - """The contents of any if statements that filter the comprehension. - - :type: list(NodeNG) or None - """ - is_async = None - """Whether this is an asynchronous comprehension or not. - - :type: bool or None - """ - - def __init__(self, parent=None): - """ - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - super(Comprehension, self).__init__() - self.parent = parent - - # pylint: disable=redefined-builtin; same name as builtin ast module. - def postinit(self, target=None, iter=None, ifs=None, is_async=None): - """Do some setup after initialisation. - - :param target: What is assigned to by the comprehension. - :type target: NodeNG or None - - :param iter: What is iterated over by the comprehension. - :type iter: NodeNG or None - - :param ifs: The contents of any if statements that filter - the comprehension. - :type ifs: list(NodeNG) or None - - :param is_async: Whether this is an asynchronous comprehension or not. - :type: bool or None - """ - self.target = target - self.iter = iter - self.ifs = ifs - self.is_async = is_async - - optional_assign = True - """Whether this node optionally assigns a variable. - - :type: bool - """ - - def assign_type(self): - """The type of assignment that this node performs. - - :returns: The assignment type. - :rtype: NodeNG - """ - return self - - def _get_filtered_stmts(self, lookup_node, node, stmts, mystmt): - """method used in filter_stmts""" - if self is mystmt: - if isinstance(lookup_node, (Const, Name)): - return [lookup_node], True - - elif self.statement() is mystmt: - # original node's statement is the assignment, only keeps - # current node (gen exp, list comp) - - return [node], True - - return stmts, False - - def get_children(self): - yield self.target - yield self.iter - - yield from self.ifs - - -class Const(mixins.NoChildrenMixin, NodeNG, bases.Instance): - """Class representing any constant including num, str, bool, None, bytes. - - >>> node = astroid.extract_node('(5, "This is a string.", True, None, b"bytes")') - >>> node - - >>> list(node.get_children()) - [, - , - , - , - ] - """ - - _other_fields = ("value",) - - def __init__(self, value, lineno=None, col_offset=None, parent=None): - """ - :param value: The value that the constant represents. - :type value: object - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.value = value - """The value that the constant represents. - - :type: object - """ - - super(Const, self).__init__(lineno, col_offset, parent) - - def __getattr__(self, name): - # This is needed because of Proxy's __getattr__ method. - # Calling object.__new__ on this class without calling - # __init__ would result in an infinite loop otherwise - # since __getattr__ is called when an attribute doesn't - # exist and self._proxied indirectly calls self.value - # and Proxy __getattr__ calls self.value - if name == "value": - raise AttributeError - return super().__getattr__(name) - - def getitem(self, index, context=None): - """Get an item from this node if subscriptable. - - :param index: The node to use as a subscript index. - :type index: Const or Slice - - :raises AstroidTypeError: When the given index cannot be used as a - subscript index, or if this node is not subscriptable. - """ - if isinstance(index, Const): - index_value = index.value - elif isinstance(index, Slice): - index_value = _infer_slice(index, context=context) - - else: - raise exceptions.AstroidTypeError( - "Could not use type {} as subscript index".format(type(index)) - ) - - try: - if isinstance(self.value, (str, bytes)): - return Const(self.value[index_value]) - except IndexError as exc: - raise exceptions.AstroidIndexError( - message="Index {index!r} out of range", - node=self, - index=index, - context=context, - ) from exc - except TypeError as exc: - raise exceptions.AstroidTypeError( - message="Type error {error!r}", node=self, index=index, context=context - ) from exc - - raise exceptions.AstroidTypeError("%r (value=%s)" % (self, self.value)) - - def has_dynamic_getattr(self): - """Check if the node has a custom __getattr__ or __getattribute__. - - :returns: True if the class has a custom - __getattr__ or __getattribute__, False otherwise. - For a :class:`Const` this is always ``False``. - :rtype: bool - """ - return False - - def itered(self): - """An iterator over the elements this node contains. - - :returns: The contents of this node. - :rtype: iterable(str) - - :raises TypeError: If this node does not represent something that is iterable. - """ - if isinstance(self.value, str): - return self.value - raise TypeError() - - def pytype(self): - """Get the name of the type that this node represents. - - :returns: The name of the type. - :rtype: str - """ - return self._proxied.qname() - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - :rtype: bool - """ - return bool(self.value) - - -class Continue(mixins.NoChildrenMixin, Statement): - """Class representing an :class:`ast.Continue` node. - - >>> node = astroid.extract_node('continue') - >>> node - - """ - - -class Decorators(NodeNG): - """A node representing a list of decorators. - - A :class:`Decorators` is the decorators that are applied to - a method or function. - - >>> node = astroid.extract_node(''' - @property - def my_property(self): - return 3 - ''') - >>> node - - >>> list(node.get_children())[0] - - """ - - _astroid_fields = ("nodes",) - nodes = None - """The decorators that this node contains. - - :type: list(Name or Call) or None - """ - - def postinit(self, nodes): - """Do some setup after initialisation. - - :param nodes: The decorators that this node contains. - :type nodes: list(Name or Call) - """ - self.nodes = nodes - - def scope(self): - """The first parent node defining a new scope. - - :returns: The first parent scope node. - :rtype: Module or FunctionDef or ClassDef or Lambda or GenExpr - """ - # skip the function node to go directly to the upper level scope - return self.parent.parent.scope() - - def get_children(self): - yield from self.nodes - - -class DelAttr(mixins.ParentAssignTypeMixin, NodeNG): - """Variation of :class:`ast.Delete` representing deletion of an attribute. - - >>> node = astroid.extract_node('del self.attr') - >>> node - - >>> list(node.get_children())[0] - - """ - - _astroid_fields = ("expr",) - _other_fields = ("attrname",) - expr = None - """The name that this node represents. - - :type: Name or None - """ - - def __init__(self, attrname=None, lineno=None, col_offset=None, parent=None): - """ - :param attrname: The name of the attribute that is being deleted. - :type attrname: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.attrname = attrname - """The name of the attribute that is being deleted. - - :type: str or None - """ - - super(DelAttr, self).__init__(lineno, col_offset, parent) - - def postinit(self, expr=None): - """Do some setup after initialisation. - - :param expr: The name that this node represents. - :type expr: Name or None - """ - self.expr = expr - - def get_children(self): - yield self.expr - - -class Delete(mixins.AssignTypeMixin, Statement): - """Class representing an :class:`ast.Delete` node. - - A :class:`Delete` is a ``del`` statement this is deleting something. - - >>> node = astroid.extract_node('del self.attr') - >>> node - - """ - - _astroid_fields = ("targets",) - targets = None - """What is being deleted. - - :type: list(NodeNG) or None - """ - - def postinit(self, targets=None): - """Do some setup after initialisation. - - :param targets: What is being deleted. - :type targets: list(NodeNG) or None - """ - self.targets = targets - - def get_children(self): - yield from self.targets - - -class Dict(NodeNG, bases.Instance): - """Class representing an :class:`ast.Dict` node. - - A :class:`Dict` is a dictionary that is created with ``{}`` syntax. - - >>> node = astroid.extract_node('{1: "1"}') - >>> node - - """ - - _astroid_fields = ("items",) - - def __init__(self, lineno=None, col_offset=None, parent=None): - """ - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.items = [] - """The key-value pairs contained in the dictionary. - - :type: list(tuple(NodeNG, NodeNG)) - """ - - super(Dict, self).__init__(lineno, col_offset, parent) - - def postinit(self, items): - """Do some setup after initialisation. - - :param items: The key-value pairs contained in the dictionary. - :type items: list(tuple(NodeNG, NodeNG)) - """ - self.items = items - - @classmethod - def from_elements(cls, items=None): - """Create a :class:`Dict` of constants from a live dictionary. - - :param items: The items to store in the node. - :type items: dict - - :returns: The created dictionary node. - :rtype: Dict - """ - node = cls() - if items is None: - node.items = [] - else: - node.items = [ - (const_factory(k), const_factory(v) if _is_const(v) else v) - for k, v in items.items() - # The keys need to be constants - if _is_const(k) - ] - return node - - def pytype(self): - """Get the name of the type that this node represents. - - :returns: The name of the type. - :rtype: str - """ - return "%s.dict" % BUILTINS - - def get_children(self): - """Get the key and value nodes below this node. - - Children are returned in the order that they are defined in the source - code, key first then the value. - - :returns: The children. - :rtype: iterable(NodeNG) - """ - for key, value in self.items: - yield key - yield value - - def last_child(self): - """An optimized version of list(get_children())[-1] - - :returns: The last child, or None if no children exist. - :rtype: NodeNG or None - """ - if self.items: - return self.items[-1][1] - return None - - def itered(self): - """An iterator over the keys this node contains. - - :returns: The keys of this node. - :rtype: iterable(NodeNG) - """ - return [key for (key, _) in self.items] - - def getitem(self, index, context=None): - """Get an item from this node. - - :param index: The node to use as a subscript index. - :type index: Const or Slice - - :raises AstroidTypeError: When the given index cannot be used as a - subscript index, or if this node is not subscriptable. - :raises AstroidIndexError: If the given index does not exist in the - dictionary. - """ - for key, value in self.items: - # TODO(cpopa): no support for overriding yet, {1:2, **{1: 3}}. - if isinstance(key, DictUnpack): - try: - return value.getitem(index, context) - except (exceptions.AstroidTypeError, exceptions.AstroidIndexError): - continue - for inferredkey in key.infer(context): - if inferredkey is util.Uninferable: - continue - if isinstance(inferredkey, Const) and isinstance(index, Const): - if inferredkey.value == index.value: - return value - - raise exceptions.AstroidIndexError(index) - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - :rtype: bool - """ - return bool(self.items) - - -class Expr(Statement): - """Class representing an :class:`ast.Expr` node. - - An :class:`Expr` is any expression that does not have its value used or - stored. - - >>> node = astroid.extract_node('method()') - >>> node - - >>> node.parent - - """ - - _astroid_fields = ("value",) - value = None - """What the expression does. - - :type: NodeNG or None - """ - - def postinit(self, value=None): - """Do some setup after initialisation. - - :param value: What the expression does. - :type value: NodeNG or None - """ - self.value = value - - def get_children(self): - yield self.value - - def _get_yield_nodes_skip_lambdas(self): - if not self.value.is_lambda: - yield from self.value._get_yield_nodes_skip_lambdas() - - -class Ellipsis(mixins.NoChildrenMixin, NodeNG): # pylint: disable=redefined-builtin - """Class representing an :class:`ast.Ellipsis` node. - - An :class:`Ellipsis` is the ``...`` syntax. - - >>> node = astroid.extract_node('...') - >>> node - - """ - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - For an :class:`Ellipsis` this is always ``True``. - :rtype: bool - """ - return True - - -class EmptyNode(mixins.NoChildrenMixin, NodeNG): - """Holds an arbitrary object in the :attr:`LocalsDictNodeNG.locals`.""" - - object = None - - -class ExceptHandler(mixins.MultiLineBlockMixin, mixins.AssignTypeMixin, Statement): - """Class representing an :class:`ast.ExceptHandler`. node. - - An :class:`ExceptHandler` is an ``except`` block on a try-except. - - >>> node = astroid.extract_node(''' - try: - do_something() - except Exception as error: - print("Error!") - ''') - >>> node - - >>> >>> node.handlers - [] - """ - - _astroid_fields = ("type", "name", "body") - _multi_line_block_fields = ("body",) - type = None - """The types that the block handles. - - :type: Tuple or NodeNG or None - """ - name = None - """The name that the caught exception is assigned to. - - :type: AssignName or None - """ - body = None - """The contents of the block. - - :type: list(NodeNG) or None - """ - - def get_children(self): - if self.type is not None: - yield self.type - - if self.name is not None: - yield self.name - - yield from self.body - - # pylint: disable=redefined-builtin; had to use the same name as builtin ast module. - def postinit(self, type=None, name=None, body=None): - """Do some setup after initialisation. - - :param type: The types that the block handles. - :type type: Tuple or NodeNG or None - - :param name: The name that the caught exception is assigned to. - :type name: AssignName or None - - :param body:The contents of the block. - :type body: list(NodeNG) or None - """ - self.type = type - self.name = name - self.body = body - - @decorators.cachedproperty - def blockstart_tolineno(self): - """The line on which the beginning of this block ends. - - :type: int - """ - if self.name: - return self.name.tolineno - if self.type: - return self.type.tolineno - return self.lineno - - def catch(self, exceptions): # pylint: disable=redefined-outer-name - """Check if this node handles any of the given exceptions. - - If ``exceptions`` is empty, this will default to ``True``. - - :param exceptions: The name of the exceptions to check for. - :type exceptions: list(str) - """ - if self.type is None or exceptions is None: - return True - for node in self.type._get_name_nodes(): - if node.name in exceptions: - return True - return False - - -class Exec(Statement): - """Class representing the ``exec`` statement. - - >>> node = astroid.extract_node('exec "True"') - >>> node - - """ - - _astroid_fields = ("expr", "globals", "locals") - expr = None - """The expression to be executed. - - :type: NodeNG or None - """ - globals = None - """The globals dictionary to execute with. - - :type: NodeNG or None - """ - locals = None - """The locals dictionary to execute with. - - :type: NodeNG or None - """ - - # pylint: disable=redefined-builtin; had to use the same name as builtin ast module. - def postinit(self, expr=None, globals=None, locals=None): - """Do some setup after initialisation. - - :param expr: The expression to be executed. - :type expr: NodeNG or None - - :param globals:The globals dictionary to execute with. - :type globals: NodeNG or None - - :param locals: The locals dictionary to execute with. - :type locals: NodeNG or None - """ - self.expr = expr - self.globals = globals - self.locals = locals - - -class ExtSlice(NodeNG): - """Class representing an :class:`ast.ExtSlice` node. - - An :class:`ExtSlice` is a complex slice expression. - - >>> node = astroid.extract_node('l[1:3, 5]') - >>> node - - >>> node.slice - - """ - - _astroid_fields = ("dims",) - dims = None - """The simple dimensions that form the complete slice. - - :type: list(NodeNG) or None - """ - - def postinit(self, dims=None): - """Do some setup after initialisation. - - :param dims: The simple dimensions that form the complete slice. - :type dims: list(NodeNG) or None - """ - self.dims = dims - - -class For( - mixins.MultiLineBlockMixin, - mixins.BlockRangeMixIn, - mixins.AssignTypeMixin, - Statement, -): - """Class representing an :class:`ast.For` node. - - >>> node = astroid.extract_node('for thing in things: print(thing)') - >>> node - - """ - - _astroid_fields = ("target", "iter", "body", "orelse") - _other_other_fields = ("type_annotation",) - _multi_line_block_fields = ("body", "orelse") - target = None - """What the loop assigns to. - - :type: NodeNG or None - """ - iter = None - """What the loop iterates over. - - :type: NodeNG or None - """ - body = None - """The contents of the body of the loop. - - :type: list(NodeNG) or None - """ - orelse = None - """The contents of the ``else`` block of the loop. - - :type: list(NodeNG) or None - """ - type_annotation = None - """If present, this will contain the type annotation passed by a type comment - - :type: NodeNG or None - """ - - # pylint: disable=redefined-builtin; had to use the same name as builtin ast module. - def postinit( - self, target=None, iter=None, body=None, orelse=None, type_annotation=None - ): - """Do some setup after initialisation. - - :param target: What the loop assigns to. - :type target: NodeNG or None - - :param iter: What the loop iterates over. - :type iter: NodeNG or None - - :param body: The contents of the body of the loop. - :type body: list(NodeNG) or None - - :param orelse: The contents of the ``else`` block of the loop. - :type orelse: list(NodeNG) or None - """ - self.target = target - self.iter = iter - self.body = body - self.orelse = orelse - self.type_annotation = type_annotation - - optional_assign = True - """Whether this node optionally assigns a variable. - - This is always ``True`` for :class:`For` nodes. - - :type: bool - """ - - @decorators.cachedproperty - def blockstart_tolineno(self): - """The line on which the beginning of this block ends. - - :type: int - """ - return self.iter.tolineno - - def get_children(self): - yield self.target - yield self.iter - - yield from self.body - yield from self.orelse - - -class AsyncFor(For): - """Class representing an :class:`ast.AsyncFor` node. - - An :class:`AsyncFor` is an asynchronous :class:`For` built with - the ``async`` keyword. - - >>> node = astroid.extract_node(''' - async def func(things): - async for thing in things: - print(thing) - ''') - >>> node - - >>> node.body[0] - - """ - - -class Await(NodeNG): - """Class representing an :class:`ast.Await` node. - - An :class:`Await` is the ``await`` keyword. - - >>> node = astroid.extract_node(''' - async def func(things): - await other_func() - ''') - >>> node - - >>> node.body[0] - - >>> list(node.body[0].get_children())[0] - - """ - - _astroid_fields = ("value",) - value = None - """What to wait for. - - :type: NodeNG or None - """ - - def postinit(self, value=None): - """Do some setup after initialisation. - - :param value: What to wait for. - :type value: NodeNG or None - """ - self.value = value - - def get_children(self): - yield self.value - - -class ImportFrom(mixins.NoChildrenMixin, mixins.ImportFromMixin, Statement): - """Class representing an :class:`ast.ImportFrom` node. - - >>> node = astroid.extract_node('from my_package import my_module') - >>> node - - """ - - _other_fields = ("modname", "names", "level") - - def __init__( - self, fromname, names, level=0, lineno=None, col_offset=None, parent=None - ): - """ - :param fromname: The module that is being imported from. - :type fromname: str or None - - :param names: What is being imported from the module. - :type names: list(tuple(str, str or None)) - - :param level: The level of relative import. - :type level: int - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.modname = fromname - """The module that is being imported from. - - This is ``None`` for relative imports. - - :type: str or None - """ - - self.names = names - """What is being imported from the module. - - Each entry is a :class:`tuple` of the name being imported, - and the alias that the name is assigned to (if any). - - :type: list(tuple(str, str or None)) - """ - - self.level = level - """The level of relative import. - - Essentially this is the number of dots in the import. - This is always 0 for absolute imports. - - :type: int - """ - - super(ImportFrom, self).__init__(lineno, col_offset, parent) - - -class Attribute(NodeNG): - """Class representing an :class:`ast.Attribute` node.""" - - _astroid_fields = ("expr",) - _other_fields = ("attrname",) - expr = None - """The name that this node represents. - - :type: Name or None - """ - - def __init__(self, attrname=None, lineno=None, col_offset=None, parent=None): - """ - :param attrname: The name of the attribute. - :type attrname: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.attrname = attrname - """The name of the attribute. - - :type: str or None - """ - - super(Attribute, self).__init__(lineno, col_offset, parent) - - def postinit(self, expr=None): - """Do some setup after initialisation. - - :param expr: The name that this node represents. - :type expr: Name or None - """ - self.expr = expr - - def get_children(self): - yield self.expr - - -class Global(mixins.NoChildrenMixin, Statement): - """Class representing an :class:`ast.Global` node. - - >>> node = astroid.extract_node('global a_global') - >>> node - - """ - - _other_fields = ("names",) - - def __init__(self, names, lineno=None, col_offset=None, parent=None): - """ - :param names: The names being declared as global. - :type names: list(str) - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.names = names - """The names being declared as global. - - :type: list(str) - """ - - super(Global, self).__init__(lineno, col_offset, parent) - - def _infer_name(self, frame, name): - return name - - -class If(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement): - """Class representing an :class:`ast.If` node. - - >>> node = astroid.extract_node('if condition: print(True)') - >>> node - - """ - - _astroid_fields = ("test", "body", "orelse") - _multi_line_block_fields = ("body", "orelse") - test = None - """The condition that the statement tests. - - :type: NodeNG or None - """ - body = None - """The contents of the block. - - :type: list(NodeNG) or None - """ - orelse = None - """The contents of the ``else`` block. - - :type: list(NodeNG) or None - """ - - def postinit(self, test=None, body=None, orelse=None): - """Do some setup after initialisation. - - :param test: The condition that the statement tests. - :type test: NodeNG or None - - :param body: The contents of the block. - :type body: list(NodeNG) or None - - :param orelse: The contents of the ``else`` block. - :type orelse: list(NodeNG) or None - """ - self.test = test - self.body = body - self.orelse = orelse - - @decorators.cachedproperty - def blockstart_tolineno(self): - """The line on which the beginning of this block ends. - - :type: int - """ - return self.test.tolineno - - def block_range(self, lineno): - """Get a range from the given line number to where this node ends. - - :param lineno: The line number to start the range at. - :type lineno: int - - :returns: The range of line numbers that this node belongs to, - starting at the given line number. - :rtype: tuple(int, int) - """ - if lineno == self.body[0].fromlineno: - return lineno, lineno - if lineno <= self.body[-1].tolineno: - return lineno, self.body[-1].tolineno - return self._elsed_block_range(lineno, self.orelse, self.body[0].fromlineno - 1) - - def get_children(self): - yield self.test - - yield from self.body - yield from self.orelse - - def has_elif_block(self): - return len(self.orelse) == 1 and isinstance(self.orelse[0], If) - - -class IfExp(NodeNG): - """Class representing an :class:`ast.IfExp` node. - - >>> node = astroid.extract_node('value if condition else other') - >>> node - - """ - - _astroid_fields = ("test", "body", "orelse") - test = None - """The condition that the statement tests. - - :type: NodeNG or None - """ - body = None - """The contents of the block. - - :type: list(NodeNG) or None - """ - orelse = None - """The contents of the ``else`` block. - - :type: list(NodeNG) or None - """ - - def postinit(self, test=None, body=None, orelse=None): - """Do some setup after initialisation. - - :param test: The condition that the statement tests. - :type test: NodeNG or None - - :param body: The contents of the block. - :type body: list(NodeNG) or None - - :param orelse: The contents of the ``else`` block. - :type orelse: list(NodeNG) or None - """ - self.test = test - self.body = body - self.orelse = orelse - - def get_children(self): - yield self.test - yield self.body - yield self.orelse - - def op_left_associative(self): - # `1 if True else 2 if False else 3` is parsed as - # `1 if True else (2 if False else 3)` - return False - - -class Import(mixins.NoChildrenMixin, mixins.ImportFromMixin, Statement): - """Class representing an :class:`ast.Import` node. - - >>> node = astroid.extract_node('import astroid') - >>> node - - """ - - _other_fields = ("names",) - - def __init__(self, names=None, lineno=None, col_offset=None, parent=None): - """ - :param names: The names being imported. - :type names: list(tuple(str, str or None)) or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.names = names - """The names being imported. - - Each entry is a :class:`tuple` of the name being imported, - and the alias that the name is assigned to (if any). - - :type: list(tuple(str, str or None)) or None - """ - - super(Import, self).__init__(lineno, col_offset, parent) - - -class Index(NodeNG): - """Class representing an :class:`ast.Index` node. - - An :class:`Index` is a simple subscript. - - >>> node = astroid.extract_node('things[1]') - >>> node - - >>> node.slice - - """ - - _astroid_fields = ("value",) - value = None - """The value to subscript with. - - :type: NodeNG or None - """ - - def postinit(self, value=None): - """Do some setup after initialisation. - - :param value: The value to subscript with. - :type value: NodeNG or None - """ - self.value = value - - def get_children(self): - yield self.value - - -class Keyword(NodeNG): - """Class representing an :class:`ast.keyword` node. - - >>> node = astroid.extract_node('function(a_kwarg=True)') - >>> node - - >>> node.keywords - [] - """ - - _astroid_fields = ("value",) - _other_fields = ("arg",) - value = None - """The value being assigned to the keyword argument. - - :type: NodeNG or None - """ - - def __init__(self, arg=None, lineno=None, col_offset=None, parent=None): - """ - :param arg: The argument being assigned to. - :type arg: Name or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.arg = arg - """The argument being assigned to. - - :type: Name or None - """ - - super(Keyword, self).__init__(lineno, col_offset, parent) - - def postinit(self, value=None): - """Do some setup after initialisation. - - :param value: The value being assigned to the ketword argument. - :type value: NodeNG or None - """ - self.value = value - - def get_children(self): - yield self.value - - -class List(_BaseContainer): - """Class representing an :class:`ast.List` node. - - >>> node = astroid.extract_node('[1, 2, 3]') - >>> node - - """ - - _other_fields = ("ctx",) - - def __init__(self, ctx=None, lineno=None, col_offset=None, parent=None): - """ - :param ctx: Whether the list is assigned to or loaded from. - :type ctx: Context or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.ctx = ctx - """Whether the list is assigned to or loaded from. - - :type: Context or None - """ - - super(List, self).__init__(lineno, col_offset, parent) - - def pytype(self): - """Get the name of the type that this node represents. - - :returns: The name of the type. - :rtype: str - """ - return "%s.list" % BUILTINS - - def getitem(self, index, context=None): - """Get an item from this node. - - :param index: The node to use as a subscript index. - :type index: Const or Slice - """ - return _container_getitem(self, self.elts, index, context=context) - - -class Nonlocal(mixins.NoChildrenMixin, Statement): - """Class representing an :class:`ast.Nonlocal` node. - - >>> node = astroid.extract_node(''' - def function(): - nonlocal var - ''') - >>> node - - >>> node.body[0] - - """ - - _other_fields = ("names",) - - def __init__(self, names, lineno=None, col_offset=None, parent=None): - """ - :param names: The names being declared as not local. - :type names: list(str) - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.names = names - """The names being declared as not local. - - :type: list(str) - """ - - super(Nonlocal, self).__init__(lineno, col_offset, parent) - - def _infer_name(self, frame, name): - return name - - -class Pass(mixins.NoChildrenMixin, Statement): - """Class representing an :class:`ast.Pass` node. - - >>> node = astroid.extract_node('pass') - >>> node - - """ - - -class Print(Statement): - """Class representing an :class:`ast.Print` node. - - >>> node = astroid.extract_node('print "A message"') - >>> node - - """ - - _astroid_fields = ("dest", "values") - dest = None - """Where to print to. - - :type: NodeNG or None - """ - values = None - """What to print. - - :type: list(NodeNG) or None - """ - - def __init__(self, nl=None, lineno=None, col_offset=None, parent=None): - """ - :param nl: Whether to print a new line. - :type nl: bool or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.nl = nl - """Whether to print a new line. - - :type: bool or None - """ - - super(Print, self).__init__(lineno, col_offset, parent) - - def postinit(self, dest=None, values=None): - """Do some setup after initialisation. - - :param dest: Where to print to. - :type dest: NodeNG or None - - :param values: What to print. - :type values: list(NodeNG) or None - """ - self.dest = dest - self.values = values - - -class Raise(Statement): - """Class representing an :class:`ast.Raise` node. - - >>> node = astroid.extract_node('raise RuntimeError("Something bad happened!")') - >>> node - - """ - - exc = None - """What is being raised. - - :type: NodeNG or None - """ - _astroid_fields = ("exc", "cause") - cause = None - """The exception being used to raise this one. - - :type: NodeNG or None - """ - - def postinit(self, exc=None, cause=None): - """Do some setup after initialisation. - - :param exc: What is being raised. - :type exc: NodeNG or None - - :param cause: The exception being used to raise this one. - :type cause: NodeNG or None - """ - self.exc = exc - self.cause = cause - - def raises_not_implemented(self): - """Check if this node raises a :class:`NotImplementedError`. - - :returns: True if this node raises a :class:`NotImplementedError`, - False otherwise. - :rtype: bool - """ - if not self.exc: - return False - for name in self.exc._get_name_nodes(): - if name.name == "NotImplementedError": - return True - return False - - def get_children(self): - if self.exc is not None: - yield self.exc - - if self.cause is not None: - yield self.cause - - -class Return(Statement): - """Class representing an :class:`ast.Return` node. - - >>> node = astroid.extract_node('return True') - >>> node - - """ - - _astroid_fields = ("value",) - value = None - """The value being returned. - - :type: NodeNG or None - """ - - def postinit(self, value=None): - """Do some setup after initialisation. - - :param value: The value being returned. - :type value: NodeNG or None - """ - self.value = value - - def get_children(self): - if self.value is not None: - yield self.value - - def is_tuple_return(self): - return isinstance(self.value, Tuple) - - def _get_return_nodes_skip_functions(self): - yield self - - -class Set(_BaseContainer): - """Class representing an :class:`ast.Set` node. - - >>> node = astroid.extract_node('{1, 2, 3}') - >>> node - - """ - - def pytype(self): - """Get the name of the type that this node represents. - - :returns: The name of the type. - :rtype: str - """ - return "%s.set" % BUILTINS - - -class Slice(NodeNG): - """Class representing an :class:`ast.Slice` node. - - >>> node = astroid.extract_node('things[1:3]') - >>> node - - >>> node.slice - - """ - - _astroid_fields = ("lower", "upper", "step") - lower = None - """The lower index in the slice. - - :type: NodeNG or None - """ - upper = None - """The upper index in the slice. - - :type: NodeNG or None - """ - step = None - """The step to take between indexes. - - :type: NodeNG or None - """ - - def postinit(self, lower=None, upper=None, step=None): - """Do some setup after initialisation. - - :param lower: The lower index in the slice. - :value lower: NodeNG or None - - :param upper: The upper index in the slice. - :value upper: NodeNG or None - - :param step: The step to take between index. - :param step: NodeNG or None - """ - self.lower = lower - self.upper = upper - self.step = step - - def _wrap_attribute(self, attr): - """Wrap the empty attributes of the Slice in a Const node.""" - if not attr: - const = const_factory(attr) - const.parent = self - return const - return attr - - @decorators.cachedproperty - def _proxied(self): - builtins = MANAGER.builtins_module - return builtins.getattr("slice")[0] - - def pytype(self): - """Get the name of the type that this node represents. - - :returns: The name of the type. - :rtype: str - """ - return "%s.slice" % BUILTINS - - def igetattr(self, attrname, context=None): - """Infer the possible values of the given attribute on the slice. - - :param attrname: The name of the attribute to infer. - :type attrname: str - - :returns: The inferred possible values. - :rtype: iterable(NodeNG) - """ - if attrname == "start": - yield self._wrap_attribute(self.lower) - elif attrname == "stop": - yield self._wrap_attribute(self.upper) - elif attrname == "step": - yield self._wrap_attribute(self.step) - else: - yield from self.getattr(attrname, context=context) - - def getattr(self, attrname, context=None): - return self._proxied.getattr(attrname, context) - - def get_children(self): - if self.lower is not None: - yield self.lower - - if self.upper is not None: - yield self.upper - - if self.step is not None: - yield self.step - - -class Starred(mixins.ParentAssignTypeMixin, NodeNG): - """Class representing an :class:`ast.Starred` node. - - >>> node = astroid.extract_node('*args') - >>> node - - """ - - _astroid_fields = ("value",) - _other_fields = ("ctx",) - value = None - """What is being unpacked. - - :type: NodeNG or None - """ - - def __init__(self, ctx=None, lineno=None, col_offset=None, parent=None): - """ - :param ctx: Whether the list is assigned to or loaded from. - :type ctx: Context or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.ctx = ctx - """Whether the starred item is assigned to or loaded from. - - :type: Context or None - """ - - super(Starred, self).__init__( - lineno=lineno, col_offset=col_offset, parent=parent - ) - - def postinit(self, value=None): - """Do some setup after initialisation. - - :param value: What is being unpacked. - :type value: NodeNG or None - """ - self.value = value - - def get_children(self): - yield self.value - - -class Subscript(NodeNG): - """Class representing an :class:`ast.Subscript` node. - - >>> node = astroid.extract_node('things[1:3]') - >>> node - - """ - - _astroid_fields = ("value", "slice") - _other_fields = ("ctx",) - value = None - """What is being indexed. - - :type: NodeNG or None - """ - slice = None - """The slice being used to lookup. - - :type: NodeNG or None - """ - - def __init__(self, ctx=None, lineno=None, col_offset=None, parent=None): - """ - :param ctx: Whether the subscripted item is assigned to or loaded from. - :type ctx: Context or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.ctx = ctx - """Whether the subscripted item is assigned to or loaded from. - - :type: Context or None - """ - - super(Subscript, self).__init__( - lineno=lineno, col_offset=col_offset, parent=parent - ) - - # pylint: disable=redefined-builtin; had to use the same name as builtin ast module. - def postinit(self, value=None, slice=None): - """Do some setup after initialisation. - - :param value: What is being indexed. - :type value: NodeNG or None - - :param slice: The slice being used to lookup. - :type slice: NodeNG or None - """ - self.value = value - self.slice = slice - - def get_children(self): - yield self.value - yield self.slice - - -class TryExcept(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement): - """Class representing an :class:`ast.TryExcept` node. - - >>> node = astroid.extract_node(''' - try: - do_something() - except Exception as error: - print("Error!") - ''') - >>> node - - """ - - _astroid_fields = ("body", "handlers", "orelse") - _multi_line_block_fields = ("body", "handlers", "orelse") - body = None - """The contents of the block to catch exceptions from. - - :type: list(NodeNG) or None - """ - handlers = None - """The exception handlers. - - :type: list(ExceptHandler) or None - """ - orelse = None - """The contents of the ``else`` block. - - :type: list(NodeNG) or None - """ - - def postinit(self, body=None, handlers=None, orelse=None): - """Do some setup after initialisation. - - :param body: The contents of the block to catch exceptions from. - :type body: list(NodeNG) or None - - :param handlers: The exception handlers. - :type handlers: list(ExceptHandler) or None - - :param orelse: The contents of the ``else`` block. - :type orelse: list(NodeNG) or None - """ - self.body = body - self.handlers = handlers - self.orelse = orelse - - def _infer_name(self, frame, name): - return name - - def block_range(self, lineno): - """Get a range from the given line number to where this node ends. - - :param lineno: The line number to start the range at. - :type lineno: int - - :returns: The range of line numbers that this node belongs to, - starting at the given line number. - :rtype: tuple(int, int) - """ - last = None - for exhandler in self.handlers: - if exhandler.type and lineno == exhandler.type.fromlineno: - return lineno, lineno - if exhandler.body[0].fromlineno <= lineno <= exhandler.body[-1].tolineno: - return lineno, exhandler.body[-1].tolineno - if last is None: - last = exhandler.body[0].fromlineno - 1 - return self._elsed_block_range(lineno, self.orelse, last) - - def get_children(self): - yield from self.body - - yield from self.handlers or () - yield from self.orelse or () - - -class TryFinally(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement): - """Class representing an :class:`ast.TryFinally` node. - - >>> node = astroid.extract_node(''' - try: - do_something() - except Exception as error: - print("Error!") - finally: - print("Cleanup!") - ''') - >>> node - - """ - - _astroid_fields = ("body", "finalbody") - _multi_line_block_fields = ("body", "finalbody") - body = None - """The try-except that the finally is attached to. - - :type: list(TryExcept) or None - """ - finalbody = None - """The contents of the ``finally`` block. - - :type: list(NodeNG) or None - """ - - def postinit(self, body=None, finalbody=None): - """Do some setup after initialisation. - - :param body: The try-except that the finally is attached to. - :type body: list(TryExcept) or None - - :param finalbody: The contents of the ``finally`` block. - :type finalbody: list(NodeNG) or None - """ - self.body = body - self.finalbody = finalbody - - def block_range(self, lineno): - """Get a range from the given line number to where this node ends. - - :param lineno: The line number to start the range at. - :type lineno: int - - :returns: The range of line numbers that this node belongs to, - starting at the given line number. - :rtype: tuple(int, int) - """ - child = self.body[0] - # py2.5 try: except: finally: - if ( - isinstance(child, TryExcept) - and child.fromlineno == self.fromlineno - and child.tolineno >= lineno > self.fromlineno - ): - return child.block_range(lineno) - return self._elsed_block_range(lineno, self.finalbody) - - def get_children(self): - yield from self.body - yield from self.finalbody - - -class Tuple(_BaseContainer): - """Class representing an :class:`ast.Tuple` node. - - >>> node = astroid.extract_node('(1, 2, 3)') - >>> node - - """ - - _other_fields = ("ctx",) - - def __init__(self, ctx=None, lineno=None, col_offset=None, parent=None): - """ - :param ctx: Whether the tuple is assigned to or loaded from. - :type ctx: Context or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.ctx = ctx - """Whether the tuple is assigned to or loaded from. - - :type: Context or None - """ - - super(Tuple, self).__init__(lineno, col_offset, parent) - - def pytype(self): - """Get the name of the type that this node represents. - - :returns: The name of the type. - :rtype: str - """ - return "%s.tuple" % BUILTINS - - def getitem(self, index, context=None): - """Get an item from this node. - - :param index: The node to use as a subscript index. - :type index: Const or Slice - """ - return _container_getitem(self, self.elts, index, context=context) - - -class UnaryOp(NodeNG): - """Class representing an :class:`ast.UnaryOp` node. - - >>> node = astroid.extract_node('-5') - >>> node - - """ - - _astroid_fields = ("operand",) - _other_fields = ("op",) - operand = None - """What the unary operator is applied to. - - :type: NodeNG or None - """ - - def __init__(self, op=None, lineno=None, col_offset=None, parent=None): - """ - :param op: The operator. - :type: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.op = op - """The operator. - - :type: str or None - """ - - super(UnaryOp, self).__init__(lineno, col_offset, parent) - - def postinit(self, operand=None): - """Do some setup after initialisation. - - :param operand: What the unary operator is applied to. - :type operand: NodeNG or None - """ - self.operand = operand - - # This is set by inference.py - def _infer_unaryop(self, context=None): - raise NotImplementedError - - def type_errors(self, context=None): - """Get a list of type errors which can occur during inference. - - Each TypeError is represented by a :class:`BadBinaryOperationMessage`, - which holds the original exception. - - :returns: The list of possible type errors. - :rtype: list(BadBinaryOperationMessage) - """ - try: - results = self._infer_unaryop(context=context) - return [ - result - for result in results - if isinstance(result, util.BadUnaryOperationMessage) - ] - except exceptions.InferenceError: - return [] - - def get_children(self): - yield self.operand - - def op_precedence(self): - if self.op == "not": - return OP_PRECEDENCE[self.op] - - return super().op_precedence() - - -class While(mixins.MultiLineBlockMixin, mixins.BlockRangeMixIn, Statement): - """Class representing an :class:`ast.While` node. - - >>> node = astroid.extract_node(''' - while condition(): - print("True") - ''') - >>> node - - """ - - _astroid_fields = ("test", "body", "orelse") - _multi_line_block_fields = ("body", "orelse") - test = None - """The condition that the loop tests. - - :type: NodeNG or None - """ - body = None - """The contents of the loop. - - :type: list(NodeNG) or None - """ - orelse = None - """The contents of the ``else`` block. - - :type: list(NodeNG) or None - """ - - def postinit(self, test=None, body=None, orelse=None): - """Do some setup after initialisation. - - :param test: The condition that the loop tests. - :type test: NodeNG or None - - :param body: The contents of the loop. - :type body: list(NodeNG) or None - - :param orelse: The contents of the ``else`` block. - :type orelse: list(NodeNG) or None - """ - self.test = test - self.body = body - self.orelse = orelse - - @decorators.cachedproperty - def blockstart_tolineno(self): - """The line on which the beginning of this block ends. - - :type: int - """ - return self.test.tolineno - - def block_range(self, lineno): - """Get a range from the given line number to where this node ends. - - :param lineno: The line number to start the range at. - :type lineno: int - - :returns: The range of line numbers that this node belongs to, - starting at the given line number. - :rtype: tuple(int, int) - """ - return self._elsed_block_range(lineno, self.orelse) - - def get_children(self): - yield self.test - - yield from self.body - yield from self.orelse - - -class With( - mixins.MultiLineBlockMixin, - mixins.BlockRangeMixIn, - mixins.AssignTypeMixin, - Statement, -): - """Class representing an :class:`ast.With` node. - - >>> node = astroid.extract_node(''' - with open(file_path) as file_: - print(file_.read()) - ''') - >>> node - - """ - - _astroid_fields = ("items", "body") - _other_other_fields = ("type_annotation",) - _multi_line_block_fields = ("body",) - items = None - """The pairs of context managers and the names they are assigned to. - - :type: list(tuple(NodeNG, AssignName or None)) or None - """ - body = None - """The contents of the ``with`` block. - - :type: list(NodeNG) or None - """ - type_annotation = None - """If present, this will contain the type annotation passed by a type comment - - :type: NodeNG or None - """ - - def postinit(self, items=None, body=None, type_annotation=None): - """Do some setup after initialisation. - - :param items: The pairs of context managers and the names - they are assigned to. - :type items: list(tuple(NodeNG, AssignName or None)) or None - - :param body: The contents of the ``with`` block. - :type body: list(NodeNG) or None - """ - self.items = items - self.body = body - self.type_annotation = type_annotation - - @decorators.cachedproperty - def blockstart_tolineno(self): - """The line on which the beginning of this block ends. - - :type: int - """ - return self.items[-1][0].tolineno - - def get_children(self): - """Get the child nodes below this node. - - :returns: The children. - :rtype: iterable(NodeNG) - """ - for expr, var in self.items: - yield expr - if var: - yield var - yield from self.body - - -class AsyncWith(With): - """Asynchronous ``with`` built with the ``async`` keyword.""" - - -class Yield(NodeNG): - """Class representing an :class:`ast.Yield` node. - - >>> node = astroid.extract_node('yield True') - >>> node - - """ - - _astroid_fields = ("value",) - value = None - """The value to yield. - - :type: NodeNG or None - """ - - def postinit(self, value=None): - """Do some setup after initialisation. - - :param value: The value to yield. - :type value: NodeNG or None - """ - self.value = value - - def get_children(self): - if self.value is not None: - yield self.value - - def _get_yield_nodes_skip_lambdas(self): - yield self - - -class YieldFrom(Yield): - """Class representing an :class:`ast.YieldFrom` node.""" - - -class DictUnpack(mixins.NoChildrenMixin, NodeNG): - """Represents the unpacking of dicts into dicts using :pep:`448`.""" - - -class FormattedValue(NodeNG): - """Class representing an :class:`ast.FormattedValue` node. - - Represents a :pep:`498` format string. - - >>> node = astroid.extract_node('f"Format {type_}"') - >>> node - - >>> node.values - [, ] - """ - - _astroid_fields = ("value", "format_spec") - value = None - """The value to be formatted into the string. - - :type: NodeNG or None - """ - conversion = None - """The type of formatting to be applied to the value. - - .. seealso:: - :class:`ast.FormattedValue` - - :type: int or None - """ - format_spec = None - """The formatting to be applied to the value. - - .. seealso:: - :class:`ast.FormattedValue` - - :type: JoinedStr or None - """ - - def postinit(self, value, conversion=None, format_spec=None): - """Do some setup after initialisation. - - :param value: The value to be formatted into the string. - :type value: NodeNG - - :param conversion: The type of formatting to be applied to the value. - :type conversion: int or None - - :param format_spec: The formatting to be applied to the value. - :type format_spec: JoinedStr or None - """ - self.value = value - self.conversion = conversion - self.format_spec = format_spec - - def get_children(self): - yield self.value - - if self.format_spec is not None: - yield self.format_spec - - -class JoinedStr(NodeNG): - """Represents a list of string expressions to be joined. - - >>> node = astroid.extract_node('f"Format {type_}"') - >>> node - - """ - - _astroid_fields = ("values",) - values = None - """The string expressions to be joined. - - :type: list(FormattedValue or Const) or None - """ - - def postinit(self, values=None): - """Do some setup after initialisation. - - :param value: The string expressions to be joined. - - :type: list(FormattedValue or Const) or None - """ - self.values = values - - def get_children(self): - yield from self.values - - -class NamedExpr(mixins.AssignTypeMixin, NodeNG): - """Represents the assignment from the assignment expression - - >>> module = astroid.parse('if a := 1: pass') - >>> module.body[0].test - - """ - - _astroid_fields = ("target", "value") - target = None - """The assignment target - - :type: Name - """ - value = None - """The value that gets assigned in the expression - - :type: NodeNG - """ - - def postinit(self, target, value): - self.target = target - self.value = value - - -class Unknown(mixins.AssignTypeMixin, NodeNG): - """This node represents a node in a constructed AST where - introspection is not possible. At the moment, it's only used in - the args attribute of FunctionDef nodes where function signature - introspection failed. - """ - - name = "Unknown" - - def qname(self): - return "Unknown" - - def infer(self, context=None, **kwargs): - """Inference on an Unknown node immediately terminates.""" - yield util.Uninferable - - -# constants ############################################################## - -CONST_CLS = { - list: List, - tuple: Tuple, - dict: Dict, - set: Set, - type(None): Const, - type(NotImplemented): Const, -} -if PY38: - CONST_CLS[type(...)] = Const - - -def _update_const_classes(): - """update constant classes, so the keys of CONST_CLS can be reused""" - klasses = (bool, int, float, complex, str, bytes) - for kls in klasses: - CONST_CLS[kls] = Const - - -_update_const_classes() - - -def _two_step_initialization(cls, value): - instance = cls() - instance.postinit(value) - return instance - - -def _dict_initialization(cls, value): - if isinstance(value, dict): - value = tuple(value.items()) - return _two_step_initialization(cls, value) - - -_CONST_CLS_CONSTRUCTORS = { - List: _two_step_initialization, - Tuple: _two_step_initialization, - Dict: _dict_initialization, - Set: _two_step_initialization, - Const: lambda cls, value: cls(value), -} - - -def const_factory(value): - """return an astroid node for a python value""" - # XXX we should probably be stricter here and only consider stuff in - # CONST_CLS or do better treatment: in case where value is not in CONST_CLS, - # we should rather recall the builder on this value than returning an empty - # node (another option being that const_factory shouldn't be called with something - # not in CONST_CLS) - assert not isinstance(value, NodeNG) - - # Hack for ignoring elements of a sequence - # or a mapping, in order to avoid transforming - # each element to an AST. This is fixed in 2.0 - # and this approach is a temporary hack. - if isinstance(value, (list, set, tuple, dict)): - elts = [] - else: - elts = value - - try: - initializer_cls = CONST_CLS[value.__class__] - initializer = _CONST_CLS_CONSTRUCTORS[initializer_cls] - return initializer(initializer_cls, elts) - except (KeyError, AttributeError): - node = EmptyNode() - node.object = value - return node - - -def is_from_decorator(node): - """Return True if the given node is the child of a decorator""" - parent = node.parent - while parent is not None: - if isinstance(parent, Decorators): - return True - parent = parent.parent - return False diff --git a/venv/lib/python3.8/site-packages/astroid/nodes.py b/venv/lib/python3.8/site-packages/astroid/nodes.py deleted file mode 100644 index bf6911ac..00000000 --- a/venv/lib/python3.8/site-packages/astroid/nodes.py +++ /dev/null @@ -1,175 +0,0 @@ -# Copyright (c) 2006-2011, 2013 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2010 Daniel Harding -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2016 Jared Garst -# Copyright (c) 2017 Ashley Whetter -# Copyright (c) 2017 rr- -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Every available node class. - -.. seealso:: - :doc:`ast documentation ` - -All nodes inherit from :class:`~astroid.node_classes.NodeNG`. -""" -# pylint: disable=unused-import,redefined-builtin - -from astroid.node_classes import ( - Arguments, - AssignAttr, - Assert, - Assign, - AnnAssign, - AssignName, - AugAssign, - Repr, - BinOp, - BoolOp, - Break, - Call, - Compare, - Comprehension, - Const, - Continue, - Decorators, - DelAttr, - DelName, - Delete, - Dict, - Expr, - Ellipsis, - EmptyNode, - ExceptHandler, - Exec, - ExtSlice, - For, - ImportFrom, - Attribute, - Global, - If, - IfExp, - Import, - Index, - Keyword, - List, - Name, - NamedExpr, - Nonlocal, - Pass, - Print, - Raise, - Return, - Set, - Slice, - Starred, - Subscript, - TryExcept, - TryFinally, - Tuple, - UnaryOp, - While, - With, - Yield, - YieldFrom, - const_factory, - AsyncFor, - Await, - AsyncWith, - FormattedValue, - JoinedStr, - # Node not present in the builtin ast module. - DictUnpack, - Unknown, -) -from astroid.scoped_nodes import ( - Module, - GeneratorExp, - Lambda, - DictComp, - ListComp, - SetComp, - FunctionDef, - ClassDef, - AsyncFunctionDef, -) - - -ALL_NODE_CLASSES = ( - AsyncFunctionDef, - AsyncFor, - AsyncWith, - Await, - Arguments, - AssignAttr, - Assert, - Assign, - AnnAssign, - AssignName, - AugAssign, - Repr, - BinOp, - BoolOp, - Break, - Call, - ClassDef, - Compare, - Comprehension, - Const, - Continue, - Decorators, - DelAttr, - DelName, - Delete, - Dict, - DictComp, - DictUnpack, - Expr, - Ellipsis, - EmptyNode, - ExceptHandler, - Exec, - ExtSlice, - For, - ImportFrom, - FunctionDef, - Attribute, - GeneratorExp, - Global, - If, - IfExp, - Import, - Index, - Keyword, - Lambda, - List, - ListComp, - Name, - NamedExpr, - Nonlocal, - Module, - Pass, - Print, - Raise, - Return, - Set, - SetComp, - Slice, - Starred, - Subscript, - TryExcept, - TryFinally, - Tuple, - UnaryOp, - While, - With, - Yield, - YieldFrom, - FormattedValue, - JoinedStr, -) diff --git a/venv/lib/python3.8/site-packages/astroid/objects.py b/venv/lib/python3.8/site-packages/astroid/objects.py deleted file mode 100644 index 888ca36e..00000000 --- a/venv/lib/python3.8/site-packages/astroid/objects.py +++ /dev/null @@ -1,282 +0,0 @@ -# Copyright (c) 2015-2016, 2018 Claudiu Popa -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 Florian Bruhin -# Copyright (c) 2016 Derek Gustafson -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -""" -Inference objects are a way to represent composite AST nodes, -which are used only as inference results, so they can't be found in the -original AST tree. For instance, inferring the following frozenset use, -leads to an inferred FrozenSet: - - Call(func=Name('frozenset'), args=Tuple(...)) -""" - -import builtins - -from astroid import bases -from astroid import decorators -from astroid import exceptions -from astroid import MANAGER -from astroid import node_classes -from astroid import scoped_nodes -from astroid import util - - -BUILTINS = builtins.__name__ -objectmodel = util.lazy_import("interpreter.objectmodel") - - -class FrozenSet(node_classes._BaseContainer): - """class representing a FrozenSet composite node""" - - def pytype(self): - return "%s.frozenset" % BUILTINS - - def _infer(self, context=None): - yield self - - @decorators.cachedproperty - def _proxied(self): # pylint: disable=method-hidden - ast_builtins = MANAGER.builtins_module - return ast_builtins.getattr("frozenset")[0] - - -class Super(node_classes.NodeNG): - """Proxy class over a super call. - - This class offers almost the same behaviour as Python's super, - which is MRO lookups for retrieving attributes from the parents. - - The *mro_pointer* is the place in the MRO from where we should - start looking, not counting it. *mro_type* is the object which - provides the MRO, it can be both a type or an instance. - *self_class* is the class where the super call is, while - *scope* is the function where the super call is. - """ - - # pylint: disable=unnecessary-lambda - special_attributes = util.lazy_descriptor(lambda: objectmodel.SuperModel()) - - # pylint: disable=super-init-not-called - def __init__(self, mro_pointer, mro_type, self_class, scope): - self.type = mro_type - self.mro_pointer = mro_pointer - self._class_based = False - self._self_class = self_class - self._scope = scope - - def _infer(self, context=None): - yield self - - def super_mro(self): - """Get the MRO which will be used to lookup attributes in this super.""" - if not isinstance(self.mro_pointer, scoped_nodes.ClassDef): - raise exceptions.SuperError( - "The first argument to super must be a subtype of " - "type, not {mro_pointer}.", - super_=self, - ) - - if isinstance(self.type, scoped_nodes.ClassDef): - # `super(type, type)`, most likely in a class method. - self._class_based = True - mro_type = self.type - else: - mro_type = getattr(self.type, "_proxied", None) - if not isinstance(mro_type, (bases.Instance, scoped_nodes.ClassDef)): - raise exceptions.SuperError( - "The second argument to super must be an " - "instance or subtype of type, not {type}.", - super_=self, - ) - - if not mro_type.newstyle: - raise exceptions.SuperError( - "Unable to call super on old-style classes.", super_=self - ) - - mro = mro_type.mro() - if self.mro_pointer not in mro: - raise exceptions.SuperError( - "The second argument to super must be an " - "instance or subtype of type, not {type}.", - super_=self, - ) - - index = mro.index(self.mro_pointer) - return mro[index + 1 :] - - @decorators.cachedproperty - def _proxied(self): - ast_builtins = MANAGER.builtins_module - return ast_builtins.getattr("super")[0] - - def pytype(self): - return "%s.super" % BUILTINS - - def display_type(self): - return "Super of" - - @property - def name(self): - """Get the name of the MRO pointer.""" - return self.mro_pointer.name - - def qname(self): - return "super" - - def igetattr(self, name, context=None): - """Retrieve the inferred values of the given attribute name.""" - - if name in self.special_attributes: - yield self.special_attributes.lookup(name) - return - - try: - mro = self.super_mro() - # Don't let invalid MROs or invalid super calls - # leak out as is from this function. - except exceptions.SuperError as exc: - raise exceptions.AttributeInferenceError( - ( - "Lookup for {name} on {target!r} because super call {super!r} " - "is invalid." - ), - target=self, - attribute=name, - context=context, - super_=exc.super_, - ) from exc - except exceptions.MroError as exc: - raise exceptions.AttributeInferenceError( - ( - "Lookup for {name} on {target!r} failed because {cls!r} has an " - "invalid MRO." - ), - target=self, - attribute=name, - context=context, - mros=exc.mros, - cls=exc.cls, - ) from exc - found = False - for cls in mro: - if name not in cls.locals: - continue - - found = True - for inferred in bases._infer_stmts([cls[name]], context, frame=self): - if not isinstance(inferred, scoped_nodes.FunctionDef): - yield inferred - continue - - # We can obtain different descriptors from a super depending - # on what we are accessing and where the super call is. - if inferred.type == "classmethod": - yield bases.BoundMethod(inferred, cls) - elif self._scope.type == "classmethod" and inferred.type == "method": - yield inferred - elif self._class_based or inferred.type == "staticmethod": - yield inferred - elif bases._is_property(inferred): - # TODO: support other descriptors as well. - try: - yield from inferred.infer_call_result(self, context) - except exceptions.InferenceError: - yield util.Uninferable - else: - yield bases.BoundMethod(inferred, cls) - - if not found: - raise exceptions.AttributeInferenceError( - target=self, attribute=name, context=context - ) - - def getattr(self, name, context=None): - return list(self.igetattr(name, context=context)) - - -class ExceptionInstance(bases.Instance): - """Class for instances of exceptions - - It has special treatment for some of the exceptions's attributes, - which are transformed at runtime into certain concrete objects, such as - the case of .args. - """ - - @decorators.cachedproperty - def special_attributes(self): - qname = self.qname() - instance = objectmodel.BUILTIN_EXCEPTIONS.get( - qname, objectmodel.ExceptionInstanceModel - ) - return instance()(self) - - -class DictInstance(bases.Instance): - """Special kind of instances for dictionaries - - This instance knows the underlying object model of the dictionaries, which means - that methods such as .values or .items can be properly inferred. - """ - - # pylint: disable=unnecessary-lambda - special_attributes = util.lazy_descriptor(lambda: objectmodel.DictModel()) - - -# Custom objects tailored for dictionaries, which are used to -# disambiguate between the types of Python 2 dict's method returns -# and Python 3 (where they return set like objects). -class DictItems(bases.Proxy): - __str__ = node_classes.NodeNG.__str__ - __repr__ = node_classes.NodeNG.__repr__ - - -class DictKeys(bases.Proxy): - __str__ = node_classes.NodeNG.__str__ - __repr__ = node_classes.NodeNG.__repr__ - - -class DictValues(bases.Proxy): - __str__ = node_classes.NodeNG.__str__ - __repr__ = node_classes.NodeNG.__repr__ - - -class PartialFunction(scoped_nodes.FunctionDef): - """A class representing partial function obtained via functools.partial""" - - def __init__( - self, call, name=None, doc=None, lineno=None, col_offset=None, parent=None - ): - super().__init__(name, doc, lineno, col_offset, parent) - self.filled_positionals = len(call.positional_arguments[1:]) - self.filled_args = call.positional_arguments[1:] - self.filled_keywords = call.keyword_arguments - - def infer_call_result(self, caller=None, context=None): - if context: - current_passed_keywords = { - keyword for (keyword, _) in context.callcontext.keywords - } - for keyword, value in self.filled_keywords.items(): - if keyword not in current_passed_keywords: - context.callcontext.keywords.append((keyword, value)) - - call_context_args = context.callcontext.args or [] - context.callcontext.args = self.filled_args + call_context_args - - return super().infer_call_result(caller=caller, context=context) - - def qname(self): - return self.__class__.__name__ - - -# TODO: Hack to solve the circular import problem between node_classes and objects -# This is not needed in 2.0, which has a cleaner design overall -node_classes.Dict.__bases__ = (node_classes.NodeNG, DictInstance) diff --git a/venv/lib/python3.8/site-packages/astroid/protocols.py b/venv/lib/python3.8/site-packages/astroid/protocols.py deleted file mode 100644 index c1825f1d..00000000 --- a/venv/lib/python3.8/site-packages/astroid/protocols.py +++ /dev/null @@ -1,766 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2009-2011, 2013-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2014 Eevee (Alex Munroe) -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 Dmitry Pribysh -# Copyright (c) 2016 Derek Gustafson -# Copyright (c) 2017-2018 Ashley Whetter -# Copyright (c) 2017 Łukasz Rogalski -# Copyright (c) 2017 rr- -# Copyright (c) 2018 Bryce Guinta -# Copyright (c) 2018 Nick Drozd -# Copyright (c) 2018 HoverHell - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""this module contains a set of functions to handle python protocols for nodes -where it makes sense. -""" - -import collections -import operator as operator_mod - -import itertools - -from astroid import Store -from astroid import arguments -from astroid import bases -from astroid import context as contextmod -from astroid import exceptions -from astroid import decorators -from astroid import node_classes -from astroid import helpers -from astroid import nodes -from astroid import util - -raw_building = util.lazy_import("raw_building") -objects = util.lazy_import("objects") - - -def _reflected_name(name): - return "__r" + name[2:] - - -def _augmented_name(name): - return "__i" + name[2:] - - -_CONTEXTLIB_MGR = "contextlib.contextmanager" -BIN_OP_METHOD = { - "+": "__add__", - "-": "__sub__", - "/": "__truediv__", - "//": "__floordiv__", - "*": "__mul__", - "**": "__pow__", - "%": "__mod__", - "&": "__and__", - "|": "__or__", - "^": "__xor__", - "<<": "__lshift__", - ">>": "__rshift__", - "@": "__matmul__", -} - -REFLECTED_BIN_OP_METHOD = { - key: _reflected_name(value) for (key, value) in BIN_OP_METHOD.items() -} -AUGMENTED_OP_METHOD = { - key + "=": _augmented_name(value) for (key, value) in BIN_OP_METHOD.items() -} - -UNARY_OP_METHOD = { - "+": "__pos__", - "-": "__neg__", - "~": "__invert__", - "not": None, # XXX not '__nonzero__' -} -_UNARY_OPERATORS = { - "+": operator_mod.pos, - "-": operator_mod.neg, - "~": operator_mod.invert, - "not": operator_mod.not_, -} - - -def _infer_unary_op(obj, op): - func = _UNARY_OPERATORS[op] - value = func(obj) - return nodes.const_factory(value) - - -nodes.Tuple.infer_unary_op = lambda self, op: _infer_unary_op(tuple(self.elts), op) -nodes.List.infer_unary_op = lambda self, op: _infer_unary_op(self.elts, op) -nodes.Set.infer_unary_op = lambda self, op: _infer_unary_op(set(self.elts), op) -nodes.Const.infer_unary_op = lambda self, op: _infer_unary_op(self.value, op) -nodes.Dict.infer_unary_op = lambda self, op: _infer_unary_op(dict(self.items), op) - -# Binary operations - -BIN_OP_IMPL = { - "+": lambda a, b: a + b, - "-": lambda a, b: a - b, - "/": lambda a, b: a / b, - "//": lambda a, b: a // b, - "*": lambda a, b: a * b, - "**": lambda a, b: a ** b, - "%": lambda a, b: a % b, - "&": lambda a, b: a & b, - "|": lambda a, b: a | b, - "^": lambda a, b: a ^ b, - "<<": lambda a, b: a << b, - ">>": lambda a, b: a >> b, - "@": operator_mod.matmul, -} -for _KEY, _IMPL in list(BIN_OP_IMPL.items()): - BIN_OP_IMPL[_KEY + "="] = _IMPL - - -@decorators.yes_if_nothing_inferred -def const_infer_binary_op(self, opnode, operator, other, context, _): - not_implemented = nodes.Const(NotImplemented) - if isinstance(other, nodes.Const): - try: - impl = BIN_OP_IMPL[operator] - try: - yield nodes.const_factory(impl(self.value, other.value)) - except TypeError: - # ArithmeticError is not enough: float >> float is a TypeError - yield not_implemented - except Exception: # pylint: disable=broad-except - yield util.Uninferable - except TypeError: - yield not_implemented - elif isinstance(self.value, str) and operator == "%": - # TODO(cpopa): implement string interpolation later on. - yield util.Uninferable - else: - yield not_implemented - - -nodes.Const.infer_binary_op = const_infer_binary_op - - -def _multiply_seq_by_int(self, opnode, other, context): - node = self.__class__(parent=opnode) - filtered_elts = ( - helpers.safe_infer(elt, context) or util.Uninferable - for elt in self.elts - if elt is not util.Uninferable - ) - node.elts = list(filtered_elts) * other.value - return node - - -def _filter_uninferable_nodes(elts, context): - for elt in elts: - if elt is util.Uninferable: - yield nodes.Unknown() - else: - for inferred in elt.infer(context): - if inferred is not util.Uninferable: - yield inferred - else: - yield nodes.Unknown() - - -@decorators.yes_if_nothing_inferred -def tl_infer_binary_op(self, opnode, operator, other, context, method): - not_implemented = nodes.Const(NotImplemented) - if isinstance(other, self.__class__) and operator == "+": - node = self.__class__(parent=opnode) - node.elts = list( - itertools.chain( - _filter_uninferable_nodes(self.elts, context), - _filter_uninferable_nodes(other.elts, context), - ) - ) - yield node - elif isinstance(other, nodes.Const) and operator == "*": - if not isinstance(other.value, int): - yield not_implemented - return - yield _multiply_seq_by_int(self, opnode, other, context) - elif isinstance(other, bases.Instance) and operator == "*": - # Verify if the instance supports __index__. - as_index = helpers.class_instance_as_index(other) - if not as_index: - yield util.Uninferable - else: - yield _multiply_seq_by_int(self, opnode, as_index, context) - else: - yield not_implemented - - -nodes.Tuple.infer_binary_op = tl_infer_binary_op -nodes.List.infer_binary_op = tl_infer_binary_op - - -@decorators.yes_if_nothing_inferred -def instance_class_infer_binary_op(self, opnode, operator, other, context, method): - return method.infer_call_result(self, context) - - -bases.Instance.infer_binary_op = instance_class_infer_binary_op -nodes.ClassDef.infer_binary_op = instance_class_infer_binary_op - - -# assignment ################################################################## - -"""the assigned_stmts method is responsible to return the assigned statement -(e.g. not inferred) according to the assignment type. - -The `assign_path` argument is used to record the lhs path of the original node. -For instance if we want assigned statements for 'c' in 'a, (b,c)', assign_path -will be [1, 1] once arrived to the Assign node. - -The `context` argument is the current inference context which should be given -to any intermediary inference necessary. -""" - - -def _resolve_looppart(parts, assign_path, context): - """recursive function to resolve multiple assignments on loops""" - assign_path = assign_path[:] - index = assign_path.pop(0) - for part in parts: - if part is util.Uninferable: - continue - if not hasattr(part, "itered"): - continue - try: - itered = part.itered() - except TypeError: - continue - for stmt in itered: - index_node = nodes.Const(index) - try: - assigned = stmt.getitem(index_node, context) - except ( - AttributeError, - exceptions.AstroidTypeError, - exceptions.AstroidIndexError, - ): - continue - if not assign_path: - # we achieved to resolved the assignment path, - # don't infer the last part - yield assigned - elif assigned is util.Uninferable: - break - else: - # we are not yet on the last part of the path - # search on each possibly inferred value - try: - yield from _resolve_looppart( - assigned.infer(context), assign_path, context - ) - except exceptions.InferenceError: - break - - -@decorators.raise_if_nothing_inferred -def for_assigned_stmts(self, node=None, context=None, assign_path=None): - if isinstance(self, nodes.AsyncFor) or getattr(self, "is_async", False): - # Skip inferring of async code for now - return dict(node=self, unknown=node, assign_path=assign_path, context=context) - if assign_path is None: - for lst in self.iter.infer(context): - if isinstance(lst, (nodes.Tuple, nodes.List)): - yield from lst.elts - else: - yield from _resolve_looppart(self.iter.infer(context), assign_path, context) - return dict(node=self, unknown=node, assign_path=assign_path, context=context) - - -nodes.For.assigned_stmts = for_assigned_stmts -nodes.Comprehension.assigned_stmts = for_assigned_stmts - - -def sequence_assigned_stmts(self, node=None, context=None, assign_path=None): - if assign_path is None: - assign_path = [] - try: - index = self.elts.index(node) - except ValueError as exc: - raise exceptions.InferenceError( - "Tried to retrieve a node {node!r} which does not exist", - node=self, - assign_path=assign_path, - context=context, - ) from exc - - assign_path.insert(0, index) - return self.parent.assigned_stmts( - node=self, context=context, assign_path=assign_path - ) - - -nodes.Tuple.assigned_stmts = sequence_assigned_stmts -nodes.List.assigned_stmts = sequence_assigned_stmts - - -def assend_assigned_stmts(self, node=None, context=None, assign_path=None): - return self.parent.assigned_stmts(node=self, context=context) - - -nodes.AssignName.assigned_stmts = assend_assigned_stmts -nodes.AssignAttr.assigned_stmts = assend_assigned_stmts - - -def _arguments_infer_argname(self, name, context): - # arguments information may be missing, in which case we can't do anything - # more - if not (self.args or self.vararg or self.kwarg): - yield util.Uninferable - return - # first argument of instance/class method - if self.args and getattr(self.args[0], "name", None) == name: - functype = self.parent.type - cls = self.parent.parent.scope() - is_metaclass = isinstance(cls, nodes.ClassDef) and cls.type == "metaclass" - # If this is a metaclass, then the first argument will always - # be the class, not an instance. - if is_metaclass or functype == "classmethod": - yield cls - return - if functype == "method": - yield bases.Instance(cls) - return - - if context and context.callcontext: - call_site = arguments.CallSite(context.callcontext, context.extra_context) - yield from call_site.infer_argument(self.parent, name, context) - return - - if name == self.vararg: - vararg = nodes.const_factory(()) - vararg.parent = self - yield vararg - return - if name == self.kwarg: - kwarg = nodes.const_factory({}) - kwarg.parent = self - yield kwarg - return - # if there is a default value, yield it. And then yield Uninferable to reflect - # we can't guess given argument value - try: - context = contextmod.copy_context(context) - yield from self.default_value(name).infer(context) - yield util.Uninferable - except exceptions.NoDefault: - yield util.Uninferable - - -def arguments_assigned_stmts(self, node=None, context=None, assign_path=None): - if context.callcontext: - # reset call context/name - callcontext = context.callcontext - context = contextmod.copy_context(context) - context.callcontext = None - args = arguments.CallSite(callcontext) - return args.infer_argument(self.parent, node.name, context) - return _arguments_infer_argname(self, node.name, context) - - -nodes.Arguments.assigned_stmts = arguments_assigned_stmts - - -@decorators.raise_if_nothing_inferred -def assign_assigned_stmts(self, node=None, context=None, assign_path=None): - if not assign_path: - yield self.value - return None - yield from _resolve_assignment_parts( - self.value.infer(context), assign_path, context - ) - - return dict(node=self, unknown=node, assign_path=assign_path, context=context) - - -def assign_annassigned_stmts(self, node=None, context=None, assign_path=None): - for inferred in assign_assigned_stmts(self, node, context, assign_path): - if inferred is None: - yield util.Uninferable - else: - yield inferred - - -nodes.Assign.assigned_stmts = assign_assigned_stmts -nodes.AnnAssign.assigned_stmts = assign_annassigned_stmts -nodes.AugAssign.assigned_stmts = assign_assigned_stmts - - -def _resolve_assignment_parts(parts, assign_path, context): - """recursive function to resolve multiple assignments""" - assign_path = assign_path[:] - index = assign_path.pop(0) - for part in parts: - assigned = None - if isinstance(part, nodes.Dict): - # A dictionary in an iterating context - try: - assigned, _ = part.items[index] - except IndexError: - return - - elif hasattr(part, "getitem"): - index_node = nodes.Const(index) - try: - assigned = part.getitem(index_node, context) - except (exceptions.AstroidTypeError, exceptions.AstroidIndexError): - return - - if not assigned: - return - - if not assign_path: - # we achieved to resolved the assignment path, don't infer the - # last part - yield assigned - elif assigned is util.Uninferable: - return - else: - # we are not yet on the last part of the path search on each - # possibly inferred value - try: - yield from _resolve_assignment_parts( - assigned.infer(context), assign_path, context - ) - except exceptions.InferenceError: - return - - -@decorators.raise_if_nothing_inferred -def excepthandler_assigned_stmts(self, node=None, context=None, assign_path=None): - for assigned in node_classes.unpack_infer(self.type): - if isinstance(assigned, nodes.ClassDef): - assigned = objects.ExceptionInstance(assigned) - - yield assigned - return dict(node=self, unknown=node, assign_path=assign_path, context=context) - - -nodes.ExceptHandler.assigned_stmts = excepthandler_assigned_stmts - - -def _infer_context_manager(self, mgr, context): - inferred = next(mgr.infer(context=context)) - if isinstance(inferred, bases.Generator): - # Check if it is decorated with contextlib.contextmanager. - func = inferred.parent - if not func.decorators: - raise exceptions.InferenceError( - "No decorators found on inferred generator %s", node=func - ) - - for decorator_node in func.decorators.nodes: - decorator = next(decorator_node.infer(context)) - if isinstance(decorator, nodes.FunctionDef): - if decorator.qname() == _CONTEXTLIB_MGR: - break - else: - # It doesn't interest us. - raise exceptions.InferenceError(node=func) - - # Get the first yield point. If it has multiple yields, - # then a RuntimeError will be raised. - - possible_yield_points = func.nodes_of_class(nodes.Yield) - # Ignore yields in nested functions - yield_point = next( - (node for node in possible_yield_points if node.scope() == func), None - ) - if yield_point: - if not yield_point.value: - const = nodes.Const(None) - const.parent = yield_point - const.lineno = yield_point.lineno - yield const - else: - yield from yield_point.value.infer(context=context) - elif isinstance(inferred, bases.Instance): - try: - enter = next(inferred.igetattr("__enter__", context=context)) - except (exceptions.InferenceError, exceptions.AttributeInferenceError): - raise exceptions.InferenceError(node=inferred) - if not isinstance(enter, bases.BoundMethod): - raise exceptions.InferenceError(node=enter) - yield from enter.infer_call_result(self, context) - else: - raise exceptions.InferenceError(node=mgr) - - -@decorators.raise_if_nothing_inferred -def with_assigned_stmts(self, node=None, context=None, assign_path=None): - """Infer names and other nodes from a *with* statement. - - This enables only inference for name binding in a *with* statement. - For instance, in the following code, inferring `func` will return - the `ContextManager` class, not whatever ``__enter__`` returns. - We are doing this intentionally, because we consider that the context - manager result is whatever __enter__ returns and what it is binded - using the ``as`` keyword. - - class ContextManager(object): - def __enter__(self): - return 42 - with ContextManager() as f: - pass - - # ContextManager().infer() will return ContextManager - # f.infer() will return 42. - - Arguments: - self: nodes.With - node: The target of the assignment, `as (a, b)` in `with foo as (a, b)`. - context: Inference context used for caching already inferred objects - assign_path: - A list of indices, where each index specifies what item to fetch from - the inference results. - """ - try: - mgr = next(mgr for (mgr, vars) in self.items if vars == node) - except StopIteration: - return None - if assign_path is None: - yield from _infer_context_manager(self, mgr, context) - else: - for result in _infer_context_manager(self, mgr, context): - # Walk the assign_path and get the item at the final index. - obj = result - for index in assign_path: - if not hasattr(obj, "elts"): - raise exceptions.InferenceError( - "Wrong type ({targets!r}) for {node!r} assignment", - node=self, - targets=node, - assign_path=assign_path, - context=context, - ) - try: - obj = obj.elts[index] - except IndexError as exc: - raise exceptions.InferenceError( - "Tried to infer a nonexistent target with index {index} " - "in {node!r}.", - node=self, - targets=node, - assign_path=assign_path, - context=context, - ) from exc - except TypeError as exc: - raise exceptions.InferenceError( - "Tried to unpack a non-iterable value " "in {node!r}.", - node=self, - targets=node, - assign_path=assign_path, - context=context, - ) from exc - yield obj - return dict(node=self, unknown=node, assign_path=assign_path, context=context) - - -nodes.With.assigned_stmts = with_assigned_stmts - - -@decorators.raise_if_nothing_inferred -def named_expr_assigned_stmts(self, node, context=None, assign_path=None): - """Infer names and other nodes from an assignment expression""" - if self.target == node: - yield from self.value.infer(context=context) - else: - raise exceptions.InferenceError( - "Cannot infer NamedExpr node {node!r}", - node=self, - assign_path=assign_path, - context=context, - ) - - -nodes.NamedExpr.assigned_stmts = named_expr_assigned_stmts - - -@decorators.yes_if_nothing_inferred -def starred_assigned_stmts(self, node=None, context=None, assign_path=None): - """ - Arguments: - self: nodes.Starred - node: a node related to the current underlying Node. - context: Inference context used for caching already inferred objects - assign_path: - A list of indices, where each index specifies what item to fetch from - the inference results. - """ - # pylint: disable=too-many-locals,too-many-branches,too-many-statements - def _determine_starred_iteration_lookups(starred, target, lookups): - # Determine the lookups for the rhs of the iteration - itered = target.itered() - for index, element in enumerate(itered): - if ( - isinstance(element, nodes.Starred) - and element.value.name == starred.value.name - ): - lookups.append((index, len(itered))) - break - if isinstance(element, nodes.Tuple): - lookups.append((index, len(element.itered()))) - _determine_starred_iteration_lookups(starred, element, lookups) - - stmt = self.statement() - if not isinstance(stmt, (nodes.Assign, nodes.For)): - raise exceptions.InferenceError( - "Statement {stmt!r} enclosing {node!r} " "must be an Assign or For node.", - node=self, - stmt=stmt, - unknown=node, - context=context, - ) - - if context is None: - context = contextmod.InferenceContext() - - if isinstance(stmt, nodes.Assign): - value = stmt.value - lhs = stmt.targets[0] - - if sum(1 for _ in lhs.nodes_of_class(nodes.Starred)) > 1: - raise exceptions.InferenceError( - "Too many starred arguments in the " " assignment targets {lhs!r}.", - node=self, - targets=lhs, - unknown=node, - context=context, - ) - - try: - rhs = next(value.infer(context)) - except exceptions.InferenceError: - yield util.Uninferable - return - if rhs is util.Uninferable or not hasattr(rhs, "itered"): - yield util.Uninferable - return - - try: - elts = collections.deque(rhs.itered()) - except TypeError: - yield util.Uninferable - return - - # Unpack iteratively the values from the rhs of the assignment, - # until the find the starred node. What will remain will - # be the list of values which the Starred node will represent - # This is done in two steps, from left to right to remove - # anything before the starred node and from right to left - # to remove anything after the starred node. - - for index, left_node in enumerate(lhs.elts): - if not isinstance(left_node, nodes.Starred): - if not elts: - break - elts.popleft() - continue - lhs_elts = collections.deque(reversed(lhs.elts[index:])) - for right_node in lhs_elts: - if not isinstance(right_node, nodes.Starred): - if not elts: - break - elts.pop() - continue - # We're done - packed = nodes.List( - ctx=Store, parent=self, lineno=lhs.lineno, col_offset=lhs.col_offset - ) - packed.postinit(elts=elts) - yield packed - break - - if isinstance(stmt, nodes.For): - try: - inferred_iterable = next(stmt.iter.infer(context=context)) - except exceptions.InferenceError: - yield util.Uninferable - return - if inferred_iterable is util.Uninferable or not hasattr( - inferred_iterable, "itered" - ): - yield util.Uninferable - return - try: - itered = inferred_iterable.itered() - except TypeError: - yield util.Uninferable - return - - target = stmt.target - - if not isinstance(target, nodes.Tuple): - raise exceptions.InferenceError( - "Could not make sense of this, the target must be a tuple", - context=context, - ) - - lookups = [] - _determine_starred_iteration_lookups(self, target, lookups) - if not lookups: - raise exceptions.InferenceError( - "Could not make sense of this, needs at least a lookup", context=context - ) - - # Make the last lookup a slice, since that what we want for a Starred node - last_element_index, last_element_length = lookups[-1] - is_starred_last = last_element_index == (last_element_length - 1) - - lookup_slice = slice( - last_element_index, - None if is_starred_last else (last_element_length - last_element_index), - ) - lookups[-1] = lookup_slice - - for element in itered: - - # We probably want to infer the potential values *for each* element in an - # iterable, but we can't infer a list of all values, when only a list of - # step values are expected: - # - # for a, *b in [...]: - # b - # - # *b* should now point to just the elements at that particular iteration step, - # which astroid can't know about. - - found_element = None - for lookup in lookups: - if not hasattr(element, "itered"): - break - if not isinstance(lookup, slice): - # Grab just the index, not the whole length - lookup = lookup[0] - try: - itered_inner_element = element.itered() - element = itered_inner_element[lookup] - except IndexError: - break - except TypeError: - # Most likely the itered() call failed, cannot make sense of this - yield util.Uninferable - return - else: - found_element = element - - unpacked = nodes.List( - ctx=Store, parent=self, lineno=self.lineno, col_offset=self.col_offset - ) - unpacked.postinit(elts=found_element or []) - yield unpacked - return - - yield util.Uninferable - - -nodes.Starred.assigned_stmts = starred_assigned_stmts diff --git a/venv/lib/python3.8/site-packages/astroid/raw_building.py b/venv/lib/python3.8/site-packages/astroid/raw_building.py deleted file mode 100644 index d94f9240..00000000 --- a/venv/lib/python3.8/site-packages/astroid/raw_building.py +++ /dev/null @@ -1,468 +0,0 @@ -# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2012 FELD Boris -# Copyright (c) 2014-2018 Claudiu Popa -# Copyright (c) 2014 Google, Inc. -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 Florian Bruhin -# Copyright (c) 2015 Ovidiu Sabou -# Copyright (c) 2016 Derek Gustafson -# Copyright (c) 2016 Jakub Wilk -# Copyright (c) 2018 Nick Drozd -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""this module contains a set of functions to create astroid trees from scratch -(build_* functions) or from living object (object_build_* functions) -""" - -import builtins -import inspect -import os -import sys -import types - -from astroid import bases -from astroid import manager -from astroid import node_classes -from astroid import nodes - - -MANAGER = manager.AstroidManager() -# the keys of CONST_CLS eg python builtin types - -_CONSTANTS = tuple(node_classes.CONST_CLS) -_BUILTINS = vars(builtins) -TYPE_NONE = type(None) -TYPE_NOTIMPLEMENTED = type(NotImplemented) -TYPE_ELLIPSIS = type(...) - - -def _io_discrepancy(member): - # _io module names itself `io`: http://bugs.python.org/issue18602 - member_self = getattr(member, "__self__", None) - return ( - member_self - and inspect.ismodule(member_self) - and member_self.__name__ == "_io" - and member.__module__ == "io" - ) - - -def _attach_local_node(parent, node, name): - node.name = name # needed by add_local_node - parent.add_local_node(node) - - -def _add_dunder_class(func, member): - """Add a __class__ member to the given func node, if we can determine it.""" - python_cls = member.__class__ - cls_name = getattr(python_cls, "__name__", None) - if not cls_name: - return - cls_bases = [ancestor.__name__ for ancestor in python_cls.__bases__] - ast_klass = build_class(cls_name, cls_bases, python_cls.__doc__) - func.instance_attrs["__class__"] = [ast_klass] - - -_marker = object() - - -def attach_dummy_node(node, name, runtime_object=_marker): - """create a dummy node and register it in the locals of the given - node with the specified name - """ - enode = nodes.EmptyNode() - enode.object = runtime_object - _attach_local_node(node, enode, name) - - -def _has_underlying_object(self): - return self.object is not None and self.object is not _marker - - -nodes.EmptyNode.has_underlying_object = _has_underlying_object - - -def attach_const_node(node, name, value): - """create a Const node and register it in the locals of the given - node with the specified name - """ - if name not in node.special_attributes: - _attach_local_node(node, nodes.const_factory(value), name) - - -def attach_import_node(node, modname, membername): - """create a ImportFrom node and register it in the locals of the given - node with the specified name - """ - from_node = nodes.ImportFrom(modname, [(membername, None)]) - _attach_local_node(node, from_node, membername) - - -def build_module(name, doc=None): - """create and initialize an astroid Module node""" - node = nodes.Module(name, doc, pure_python=False) - node.package = False - node.parent = None - return node - - -def build_class(name, basenames=(), doc=None): - """create and initialize an astroid ClassDef node""" - node = nodes.ClassDef(name, doc) - for base in basenames: - basenode = nodes.Name() - basenode.name = base - node.bases.append(basenode) - basenode.parent = node - return node - - -def build_function(name, args=None, posonlyargs=None, defaults=None, doc=None): - """create and initialize an astroid FunctionDef node""" - args, defaults, posonlyargs = args or [], defaults or [], posonlyargs or [] - # first argument is now a list of decorators - func = nodes.FunctionDef(name, doc) - func.args = argsnode = nodes.Arguments() - argsnode.args = [] - argsnode.posonlyargs = [] - for arg in args: - argsnode.args.append(nodes.Name()) - argsnode.args[-1].name = arg - argsnode.args[-1].parent = argsnode - for arg in posonlyargs: - argsnode.posonlyargs.append(nodes.Name()) - argsnode.posonlyargs[-1].name = arg - argsnode.posonlyargs[-1].parent = argsnode - argsnode.defaults = [] - for default in defaults: - argsnode.defaults.append(nodes.const_factory(default)) - argsnode.defaults[-1].parent = argsnode - argsnode.kwarg = None - argsnode.vararg = None - argsnode.parent = func - if args: - register_arguments(func) - return func - - -def build_from_import(fromname, names): - """create and initialize an astroid ImportFrom import statement""" - return nodes.ImportFrom(fromname, [(name, None) for name in names]) - - -def register_arguments(func, args=None): - """add given arguments to local - - args is a list that may contains nested lists - (i.e. def func(a, (b, c, d)): ...) - """ - if args is None: - args = func.args.args - if func.args.vararg: - func.set_local(func.args.vararg, func.args) - if func.args.kwarg: - func.set_local(func.args.kwarg, func.args) - for arg in args: - if isinstance(arg, nodes.Name): - func.set_local(arg.name, arg) - else: - register_arguments(func, arg.elts) - - -def object_build_class(node, member, localname): - """create astroid for a living class object""" - basenames = [base.__name__ for base in member.__bases__] - return _base_class_object_build(node, member, basenames, localname=localname) - - -def object_build_function(node, member, localname): - """create astroid for a living function object""" - signature = inspect.signature(member) - args = [] - defaults = [] - posonlyargs = [] - for param_name, param in signature.parameters.items(): - if param.kind == inspect.Parameter.POSITIONAL_ONLY: - posonlyargs.append(param_name) - elif param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD: - args.append(param_name) - elif param.kind == inspect.Parameter.VAR_POSITIONAL: - args.append(param_name) - elif param.kind == inspect.Parameter.VAR_KEYWORD: - args.append(param_name) - if param.default is not inspect._empty: - defaults.append(param.default) - func = build_function( - getattr(member, "__name__", None) or localname, - args, - posonlyargs, - defaults, - member.__doc__, - ) - node.add_local_node(func, localname) - - -def object_build_datadescriptor(node, member, name): - """create astroid for a living data descriptor object""" - return _base_class_object_build(node, member, [], name) - - -def object_build_methoddescriptor(node, member, localname): - """create astroid for a living method descriptor object""" - # FIXME get arguments ? - func = build_function( - getattr(member, "__name__", None) or localname, doc=member.__doc__ - ) - # set node's arguments to None to notice that we have no information, not - # and empty argument list - func.args.args = None - node.add_local_node(func, localname) - _add_dunder_class(func, member) - - -def _base_class_object_build(node, member, basenames, name=None, localname=None): - """create astroid for a living class object, with a given set of base names - (e.g. ancestors) - """ - klass = build_class( - name or getattr(member, "__name__", None) or localname, - basenames, - member.__doc__, - ) - klass._newstyle = isinstance(member, type) - node.add_local_node(klass, localname) - try: - # limit the instantiation trick since it's too dangerous - # (such as infinite test execution...) - # this at least resolves common case such as Exception.args, - # OSError.errno - if issubclass(member, Exception): - instdict = member().__dict__ - else: - raise TypeError - except TypeError: - pass - else: - for item_name, obj in instdict.items(): - valnode = nodes.EmptyNode() - valnode.object = obj - valnode.parent = klass - valnode.lineno = 1 - klass.instance_attrs[item_name] = [valnode] - return klass - - -def _build_from_function(node, name, member, module): - # verify this is not an imported function - try: - code = member.__code__ - except AttributeError: - # Some implementations don't provide the code object, - # such as Jython. - code = None - filename = getattr(code, "co_filename", None) - if filename is None: - assert isinstance(member, object) - object_build_methoddescriptor(node, member, name) - elif filename != getattr(module, "__file__", None): - attach_dummy_node(node, name, member) - else: - object_build_function(node, member, name) - - -class InspectBuilder: - """class for building nodes from living object - - this is actually a really minimal representation, including only Module, - FunctionDef and ClassDef nodes and some others as guessed. - """ - - def __init__(self): - self._done = {} - self._module = None - - def inspect_build(self, module, modname=None, path=None): - """build astroid from a living module (i.e. using inspect) - this is used when there is no python source code available (either - because it's a built-in module or because the .py is not available) - """ - self._module = module - if modname is None: - modname = module.__name__ - try: - node = build_module(modname, module.__doc__) - except AttributeError: - # in jython, java modules have no __doc__ (see #109562) - node = build_module(modname) - node.file = node.path = os.path.abspath(path) if path else path - node.name = modname - MANAGER.cache_module(node) - node.package = hasattr(module, "__path__") - self._done = {} - self.object_build(node, module) - return node - - def object_build(self, node, obj): - """recursive method which create a partial ast from real objects - (only function, class, and method are handled) - """ - if obj in self._done: - return self._done[obj] - self._done[obj] = node - for name in dir(obj): - try: - member = getattr(obj, name) - except AttributeError: - # damned ExtensionClass.Base, I know you're there ! - attach_dummy_node(node, name) - continue - if inspect.ismethod(member): - member = member.__func__ - if inspect.isfunction(member): - _build_from_function(node, name, member, self._module) - elif inspect.isbuiltin(member): - if not _io_discrepancy(member) and self.imported_member( - node, member, name - ): - continue - object_build_methoddescriptor(node, member, name) - elif inspect.isclass(member): - if self.imported_member(node, member, name): - continue - if member in self._done: - class_node = self._done[member] - if class_node not in node.locals.get(name, ()): - node.add_local_node(class_node, name) - else: - class_node = object_build_class(node, member, name) - # recursion - self.object_build(class_node, member) - if name == "__class__" and class_node.parent is None: - class_node.parent = self._done[self._module] - elif inspect.ismethoddescriptor(member): - assert isinstance(member, object) - object_build_methoddescriptor(node, member, name) - elif inspect.isdatadescriptor(member): - assert isinstance(member, object) - object_build_datadescriptor(node, member, name) - elif isinstance(member, _CONSTANTS): - attach_const_node(node, name, member) - elif inspect.isroutine(member): - # This should be called for Jython, where some builtin - # methods aren't caught by isbuiltin branch. - _build_from_function(node, name, member, self._module) - else: - # create an empty node so that the name is actually defined - attach_dummy_node(node, name, member) - return None - - def imported_member(self, node, member, name): - """verify this is not an imported class or handle it""" - # /!\ some classes like ExtensionClass doesn't have a __module__ - # attribute ! Also, this may trigger an exception on badly built module - # (see http://www.logilab.org/ticket/57299 for instance) - try: - modname = getattr(member, "__module__", None) - except TypeError: - modname = None - if modname is None: - if name in ("__new__", "__subclasshook__"): - # Python 2.5.1 (r251:54863, Sep 1 2010, 22:03:14) - # >>> print object.__new__.__module__ - # None - modname = builtins.__name__ - else: - attach_dummy_node(node, name, member) - return True - - real_name = {"gtk": "gtk_gtk", "_io": "io"}.get(modname, modname) - - if real_name != self._module.__name__: - # check if it sounds valid and then add an import node, else use a - # dummy node - try: - getattr(sys.modules[modname], name) - except (KeyError, AttributeError): - attach_dummy_node(node, name, member) - else: - attach_import_node(node, modname, name) - return True - return False - - -### astroid bootstrapping ###################################################### - -_CONST_PROXY = {} - -# TODO : find a nicer way to handle this situation; -def _set_proxied(const): - return _CONST_PROXY[const.value.__class__] - - -def _astroid_bootstrapping(): - """astroid bootstrapping the builtins module""" - # this boot strapping is necessary since we need the Const nodes to - # inspect_build builtins, and then we can proxy Const - builder = InspectBuilder() - astroid_builtin = builder.inspect_build(builtins) - - # pylint: disable=redefined-outer-name - for cls, node_cls in node_classes.CONST_CLS.items(): - if cls is TYPE_NONE: - proxy = build_class("NoneType") - proxy.parent = astroid_builtin - elif cls is TYPE_NOTIMPLEMENTED: - proxy = build_class("NotImplementedType") - proxy.parent = astroid_builtin - elif cls is TYPE_ELLIPSIS: - proxy = build_class("Ellipsis") - proxy.parent = astroid_builtin - else: - proxy = astroid_builtin.getattr(cls.__name__)[0] - if cls in (dict, list, set, tuple): - node_cls._proxied = proxy - else: - _CONST_PROXY[cls] = proxy - - # Set the builtin module as parent for some builtins. - nodes.Const._proxied = property(_set_proxied) - - _GeneratorType = nodes.ClassDef( - types.GeneratorType.__name__, types.GeneratorType.__doc__ - ) - _GeneratorType.parent = astroid_builtin - bases.Generator._proxied = _GeneratorType - builder.object_build(bases.Generator._proxied, types.GeneratorType) - - if hasattr(types, "AsyncGeneratorType"): - # pylint: disable=no-member; AsyncGeneratorType - _AsyncGeneratorType = nodes.ClassDef( - types.AsyncGeneratorType.__name__, types.AsyncGeneratorType.__doc__ - ) - _AsyncGeneratorType.parent = astroid_builtin - bases.AsyncGenerator._proxied = _AsyncGeneratorType - builder.object_build(bases.AsyncGenerator._proxied, types.AsyncGeneratorType) - builtin_types = ( - types.GetSetDescriptorType, - types.GeneratorType, - types.MemberDescriptorType, - TYPE_NONE, - TYPE_NOTIMPLEMENTED, - types.FunctionType, - types.MethodType, - types.BuiltinFunctionType, - types.ModuleType, - types.TracebackType, - ) - for _type in builtin_types: - if _type.__name__ not in astroid_builtin: - cls = nodes.ClassDef(_type.__name__, _type.__doc__) - cls.parent = astroid_builtin - builder.object_build(cls, _type) - astroid_builtin[_type.__name__] = cls - - -_astroid_bootstrapping() diff --git a/venv/lib/python3.8/site-packages/astroid/rebuilder.py b/venv/lib/python3.8/site-packages/astroid/rebuilder.py deleted file mode 100644 index fb78f7bb..00000000 --- a/venv/lib/python3.8/site-packages/astroid/rebuilder.py +++ /dev/null @@ -1,1090 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2009-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2013-2018 Claudiu Popa -# Copyright (c) 2013-2014 Google, Inc. -# Copyright (c) 2014 Alexander Presnyakov -# Copyright (c) 2014 Eevee (Alex Munroe) -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2016-2017 Derek Gustafson -# Copyright (c) 2016 Jared Garst -# Copyright (c) 2017 Hugo -# Copyright (c) 2017 Łukasz Rogalski -# Copyright (c) 2017 rr- -# Copyright (c) 2018 Nick Drozd -# Copyright (c) 2018 Bryce Guinta - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""this module contains utilities for rebuilding a _ast tree in -order to get a single Astroid representation -""" - -import sys - -import astroid -from astroid._ast import _parse, _get_parser_module, parse_function_type_comment -from astroid import nodes - - -CONST_NAME_TRANSFORMS = {"None": None, "True": True, "False": False} - -REDIRECT = { - "arguments": "Arguments", - "comprehension": "Comprehension", - "ListCompFor": "Comprehension", - "GenExprFor": "Comprehension", - "excepthandler": "ExceptHandler", - "keyword": "Keyword", -} -PY37 = sys.version_info >= (3, 7) -PY38 = sys.version_info >= (3, 8) - - -def _binary_operators_from_module(module): - binary_operators = { - module.Add: "+", - module.BitAnd: "&", - module.BitOr: "|", - module.BitXor: "^", - module.Div: "/", - module.FloorDiv: "//", - module.MatMult: "@", - module.Mod: "%", - module.Mult: "*", - module.Pow: "**", - module.Sub: "-", - module.LShift: "<<", - module.RShift: ">>", - } - return binary_operators - - -def _bool_operators_from_module(module): - return {module.And: "and", module.Or: "or"} - - -def _unary_operators_from_module(module): - return {module.UAdd: "+", module.USub: "-", module.Not: "not", module.Invert: "~"} - - -def _compare_operators_from_module(module): - return { - module.Eq: "==", - module.Gt: ">", - module.GtE: ">=", - module.In: "in", - module.Is: "is", - module.IsNot: "is not", - module.Lt: "<", - module.LtE: "<=", - module.NotEq: "!=", - module.NotIn: "not in", - } - - -def _contexts_from_module(module): - return { - module.Load: astroid.Load, - module.Store: astroid.Store, - module.Del: astroid.Del, - module.Param: astroid.Store, - } - - -def _visit_or_none(node, attr, visitor, parent, visit="visit", **kws): - """If the given node has an attribute, visits the attribute, and - otherwise returns None. - - """ - value = getattr(node, attr, None) - if value: - return getattr(visitor, visit)(value, parent, **kws) - - return None - - -class TreeRebuilder: - """Rebuilds the _ast tree to become an Astroid tree""" - - def __init__(self, manager, parse_python_two: bool = False): - self._manager = manager - self._global_names = [] - self._import_from_nodes = [] - self._delayed_assattr = [] - self._visit_meths = {} - - # Configure the right classes for the right module - self._parser_module = _get_parser_module(parse_python_two=parse_python_two) - self._unary_op_classes = _unary_operators_from_module(self._parser_module) - self._cmp_op_classes = _compare_operators_from_module(self._parser_module) - self._bool_op_classes = _bool_operators_from_module(self._parser_module) - self._bin_op_classes = _binary_operators_from_module(self._parser_module) - self._context_classes = _contexts_from_module(self._parser_module) - - def _get_doc(self, node): - try: - if PY37 and hasattr(node, "docstring"): - doc = node.docstring - return node, doc - if node.body and isinstance(node.body[0], self._parser_module.Expr): - - first_value = node.body[0].value - if isinstance(first_value, self._parser_module.Str) or ( - PY38 - and isinstance(first_value, self._parser_module.Constant) - and isinstance(first_value.value, str) - ): - doc = first_value.value if PY38 else first_value.s - node.body = node.body[1:] - return node, doc - except IndexError: - pass # ast built from scratch - return node, None - - def _get_context(self, node): - return self._context_classes.get(type(node.ctx), astroid.Load) - - def visit_module(self, node, modname, modpath, package): - """visit a Module node by returning a fresh instance of it""" - node, doc = self._get_doc(node) - newnode = nodes.Module( - name=modname, - doc=doc, - file=modpath, - path=[modpath], - package=package, - parent=None, - ) - newnode.postinit([self.visit(child, newnode) for child in node.body]) - return newnode - - def visit(self, node, parent): - cls = node.__class__ - if cls in self._visit_meths: - visit_method = self._visit_meths[cls] - else: - cls_name = cls.__name__ - visit_name = "visit_" + REDIRECT.get(cls_name, cls_name).lower() - visit_method = getattr(self, visit_name) - self._visit_meths[cls] = visit_method - return visit_method(node, parent) - - def _save_assignment(self, node, name=None): - """save assignement situation since node.parent is not available yet""" - if self._global_names and node.name in self._global_names[-1]: - node.root().set_local(node.name, node) - else: - node.parent.set_local(node.name, node) - - def visit_arguments(self, node, parent): - """visit an Arguments node by returning a fresh instance of it""" - vararg, kwarg = node.vararg, node.kwarg - newnode = nodes.Arguments( - vararg.arg if vararg else None, kwarg.arg if kwarg else None, parent - ) - args = [self.visit(child, newnode) for child in node.args] - defaults = [self.visit(child, newnode) for child in node.defaults] - varargannotation = None - kwargannotation = None - posonlyargs = [] - # change added in 82732 (7c5c678e4164), vararg and kwarg - # are instances of `_ast.arg`, not strings - if vararg: - if node.vararg.annotation: - varargannotation = self.visit(node.vararg.annotation, newnode) - vararg = vararg.arg - if kwarg: - if node.kwarg.annotation: - kwargannotation = self.visit(node.kwarg.annotation, newnode) - kwarg = kwarg.arg - kwonlyargs = [self.visit(child, newnode) for child in node.kwonlyargs] - kw_defaults = [ - self.visit(child, newnode) if child else None for child in node.kw_defaults - ] - annotations = [ - self.visit(arg.annotation, newnode) if arg.annotation else None - for arg in node.args - ] - kwonlyargs_annotations = [ - self.visit(arg.annotation, newnode) if arg.annotation else None - for arg in node.kwonlyargs - ] - - posonlyargs_annotations = [] - if PY38: - posonlyargs = [self.visit(child, newnode) for child in node.posonlyargs] - posonlyargs_annotations = [ - self.visit(arg.annotation, newnode) if arg.annotation else None - for arg in node.posonlyargs - ] - type_comment_args = [ - self.check_type_comment(child, parent=newnode) for child in node.args - ] - - newnode.postinit( - args=args, - defaults=defaults, - kwonlyargs=kwonlyargs, - posonlyargs=posonlyargs, - kw_defaults=kw_defaults, - annotations=annotations, - kwonlyargs_annotations=kwonlyargs_annotations, - posonlyargs_annotations=posonlyargs_annotations, - varargannotation=varargannotation, - kwargannotation=kwargannotation, - type_comment_args=type_comment_args, - ) - # save argument names in locals: - if vararg: - newnode.parent.set_local(vararg, newnode) - if kwarg: - newnode.parent.set_local(kwarg, newnode) - return newnode - - def visit_assert(self, node, parent): - """visit a Assert node by returning a fresh instance of it""" - newnode = nodes.Assert(node.lineno, node.col_offset, parent) - if node.msg: - msg = self.visit(node.msg, newnode) - else: - msg = None - newnode.postinit(self.visit(node.test, newnode), msg) - return newnode - - def check_type_comment(self, node, parent): - type_comment = getattr(node, "type_comment", None) - if not type_comment: - return None - - try: - type_comment_ast = _parse(type_comment) - except SyntaxError: - # Invalid type comment, just skip it. - return None - - type_object = self.visit(type_comment_ast.body[0], parent=parent) - if not isinstance(type_object, nodes.Expr): - return None - - return type_object.value - - def check_function_type_comment(self, node): - type_comment = getattr(node, "type_comment", None) - if not type_comment: - return None - - try: - type_comment_ast = parse_function_type_comment(type_comment) - except SyntaxError: - # Invalid type comment, just skip it. - return None - - returns = None - argtypes = [ - self.visit(elem, node) for elem in (type_comment_ast.argtypes or []) - ] - if type_comment_ast.returns: - returns = self.visit(type_comment_ast.returns, node) - - return returns, argtypes - - def visit_assign(self, node, parent): - """visit a Assign node by returning a fresh instance of it""" - newnode = nodes.Assign(node.lineno, node.col_offset, parent) - type_annotation = self.check_type_comment(node, parent=newnode) - newnode.postinit( - targets=[self.visit(child, newnode) for child in node.targets], - value=self.visit(node.value, newnode), - type_annotation=type_annotation, - ) - return newnode - - def visit_assignname(self, node, parent, node_name=None): - """visit a node and return a AssignName node""" - newnode = nodes.AssignName( - node_name, - getattr(node, "lineno", None), - getattr(node, "col_offset", None), - parent, - ) - self._save_assignment(newnode) - return newnode - - def visit_augassign(self, node, parent): - """visit a AugAssign node by returning a fresh instance of it""" - newnode = nodes.AugAssign( - self._bin_op_classes[type(node.op)] + "=", - node.lineno, - node.col_offset, - parent, - ) - newnode.postinit( - self.visit(node.target, newnode), self.visit(node.value, newnode) - ) - return newnode - - def visit_repr(self, node, parent): - """visit a Backquote node by returning a fresh instance of it""" - newnode = nodes.Repr(node.lineno, node.col_offset, parent) - newnode.postinit(self.visit(node.value, newnode)) - return newnode - - def visit_binop(self, node, parent): - """visit a BinOp node by returning a fresh instance of it""" - newnode = nodes.BinOp( - self._bin_op_classes[type(node.op)], node.lineno, node.col_offset, parent - ) - newnode.postinit( - self.visit(node.left, newnode), self.visit(node.right, newnode) - ) - return newnode - - def visit_boolop(self, node, parent): - """visit a BoolOp node by returning a fresh instance of it""" - newnode = nodes.BoolOp( - self._bool_op_classes[type(node.op)], node.lineno, node.col_offset, parent - ) - newnode.postinit([self.visit(child, newnode) for child in node.values]) - return newnode - - def visit_break(self, node, parent): - """visit a Break node by returning a fresh instance of it""" - return nodes.Break( - getattr(node, "lineno", None), getattr(node, "col_offset", None), parent - ) - - def visit_call(self, node, parent): - """visit a CallFunc node by returning a fresh instance of it""" - newnode = nodes.Call(node.lineno, node.col_offset, parent) - starargs = _visit_or_none(node, "starargs", self, newnode) - kwargs = _visit_or_none(node, "kwargs", self, newnode) - args = [self.visit(child, newnode) for child in node.args] - - if node.keywords: - keywords = [self.visit(child, newnode) for child in node.keywords] - else: - keywords = None - if starargs: - new_starargs = nodes.Starred( - col_offset=starargs.col_offset, - lineno=starargs.lineno, - parent=starargs.parent, - ) - new_starargs.postinit(value=starargs) - args.append(new_starargs) - if kwargs: - new_kwargs = nodes.Keyword( - arg=None, - col_offset=kwargs.col_offset, - lineno=kwargs.lineno, - parent=kwargs.parent, - ) - new_kwargs.postinit(value=kwargs) - if keywords: - keywords.append(new_kwargs) - else: - keywords = [new_kwargs] - - newnode.postinit(self.visit(node.func, newnode), args, keywords) - return newnode - - def visit_classdef(self, node, parent, newstyle=None): - """visit a ClassDef node to become astroid""" - node, doc = self._get_doc(node) - newnode = nodes.ClassDef(node.name, doc, node.lineno, node.col_offset, parent) - metaclass = None - for keyword in node.keywords: - if keyword.arg == "metaclass": - metaclass = self.visit(keyword, newnode).value - break - if node.decorator_list: - decorators = self.visit_decorators(node, newnode) - else: - decorators = None - newnode.postinit( - [self.visit(child, newnode) for child in node.bases], - [self.visit(child, newnode) for child in node.body], - decorators, - newstyle, - metaclass, - [ - self.visit(kwd, newnode) - for kwd in node.keywords - if kwd.arg != "metaclass" - ], - ) - return newnode - - def visit_const(self, node, parent): - """visit a Const node by returning a fresh instance of it""" - return nodes.Const( - node.value, - getattr(node, "lineno", None), - getattr(node, "col_offset", None), - parent, - ) - - def visit_continue(self, node, parent): - """visit a Continue node by returning a fresh instance of it""" - return nodes.Continue( - getattr(node, "lineno", None), getattr(node, "col_offset", None), parent - ) - - def visit_compare(self, node, parent): - """visit a Compare node by returning a fresh instance of it""" - newnode = nodes.Compare(node.lineno, node.col_offset, parent) - newnode.postinit( - self.visit(node.left, newnode), - [ - (self._cmp_op_classes[op.__class__], self.visit(expr, newnode)) - for (op, expr) in zip(node.ops, node.comparators) - ], - ) - return newnode - - def visit_comprehension(self, node, parent): - """visit a Comprehension node by returning a fresh instance of it""" - newnode = nodes.Comprehension(parent) - newnode.postinit( - self.visit(node.target, newnode), - self.visit(node.iter, newnode), - [self.visit(child, newnode) for child in node.ifs], - getattr(node, "is_async", None), - ) - return newnode - - def visit_decorators(self, node, parent): - """visit a Decorators node by returning a fresh instance of it""" - # /!\ node is actually a _ast.FunctionDef node while - # parent is an astroid.nodes.FunctionDef node - if PY38: - # Set the line number of the first decorator for Python 3.8+. - lineno = node.decorator_list[0].lineno - else: - lineno = node.lineno - newnode = nodes.Decorators(lineno, node.col_offset, parent) - newnode.postinit([self.visit(child, newnode) for child in node.decorator_list]) - return newnode - - def visit_delete(self, node, parent): - """visit a Delete node by returning a fresh instance of it""" - newnode = nodes.Delete(node.lineno, node.col_offset, parent) - newnode.postinit([self.visit(child, newnode) for child in node.targets]) - return newnode - - def _visit_dict_items(self, node, parent, newnode): - for key, value in zip(node.keys, node.values): - rebuilt_value = self.visit(value, newnode) - if not key: - # Python 3.5 and extended unpacking - rebuilt_key = nodes.DictUnpack( - rebuilt_value.lineno, rebuilt_value.col_offset, parent - ) - else: - rebuilt_key = self.visit(key, newnode) - yield rebuilt_key, rebuilt_value - - def visit_dict(self, node, parent): - """visit a Dict node by returning a fresh instance of it""" - newnode = nodes.Dict(node.lineno, node.col_offset, parent) - items = list(self._visit_dict_items(node, parent, newnode)) - newnode.postinit(items) - return newnode - - def visit_dictcomp(self, node, parent): - """visit a DictComp node by returning a fresh instance of it""" - newnode = nodes.DictComp(node.lineno, node.col_offset, parent) - newnode.postinit( - self.visit(node.key, newnode), - self.visit(node.value, newnode), - [self.visit(child, newnode) for child in node.generators], - ) - return newnode - - def visit_expr(self, node, parent): - """visit a Expr node by returning a fresh instance of it""" - newnode = nodes.Expr(node.lineno, node.col_offset, parent) - newnode.postinit(self.visit(node.value, newnode)) - return newnode - - # Not used in Python 3.8+. - def visit_ellipsis(self, node, parent): - """visit an Ellipsis node by returning a fresh instance of it""" - return nodes.Ellipsis( - getattr(node, "lineno", None), getattr(node, "col_offset", None), parent - ) - - def visit_emptynode(self, node, parent): - """visit an EmptyNode node by returning a fresh instance of it""" - return nodes.EmptyNode( - getattr(node, "lineno", None), getattr(node, "col_offset", None), parent - ) - - def visit_excepthandler(self, node, parent): - """visit an ExceptHandler node by returning a fresh instance of it""" - newnode = nodes.ExceptHandler(node.lineno, node.col_offset, parent) - # /!\ node.name can be a tuple - newnode.postinit( - _visit_or_none(node, "type", self, newnode), - _visit_or_none(node, "name", self, newnode), - [self.visit(child, newnode) for child in node.body], - ) - return newnode - - def visit_exec(self, node, parent): - """visit an Exec node by returning a fresh instance of it""" - newnode = nodes.Exec(node.lineno, node.col_offset, parent) - newnode.postinit( - self.visit(node.body, newnode), - _visit_or_none(node, "globals", self, newnode), - _visit_or_none(node, "locals", self, newnode), - ) - return newnode - - # Not used in Python 3.8+. - def visit_extslice(self, node, parent): - """visit an ExtSlice node by returning a fresh instance of it""" - newnode = nodes.ExtSlice(parent=parent) - newnode.postinit([self.visit(dim, newnode) for dim in node.dims]) - return newnode - - def _visit_for(self, cls, node, parent): - """visit a For node by returning a fresh instance of it""" - newnode = cls(node.lineno, node.col_offset, parent) - type_annotation = self.check_type_comment(node, parent=newnode) - newnode.postinit( - target=self.visit(node.target, newnode), - iter=self.visit(node.iter, newnode), - body=[self.visit(child, newnode) for child in node.body], - orelse=[self.visit(child, newnode) for child in node.orelse], - type_annotation=type_annotation, - ) - return newnode - - def visit_for(self, node, parent): - return self._visit_for(nodes.For, node, parent) - - def visit_importfrom(self, node, parent): - """visit an ImportFrom node by returning a fresh instance of it""" - names = [(alias.name, alias.asname) for alias in node.names] - newnode = nodes.ImportFrom( - node.module or "", - names, - node.level or None, - getattr(node, "lineno", None), - getattr(node, "col_offset", None), - parent, - ) - # store From names to add them to locals after building - self._import_from_nodes.append(newnode) - return newnode - - def _visit_functiondef(self, cls, node, parent): - """visit an FunctionDef node to become astroid""" - self._global_names.append({}) - node, doc = self._get_doc(node) - - lineno = node.lineno - if PY38 and node.decorator_list: - # Python 3.8 sets the line number of a decorated function - # to be the actual line number of the function, but the - # previous versions expected the decorator's line number instead. - # We reset the function's line number to that of the - # first decorator to maintain backward compatibility. - # It's not ideal but this discrepancy was baked into - # the framework for *years*. - lineno = node.decorator_list[0].lineno - - newnode = cls(node.name, doc, lineno, node.col_offset, parent) - if node.decorator_list: - decorators = self.visit_decorators(node, newnode) - else: - decorators = None - if node.returns: - returns = self.visit(node.returns, newnode) - else: - returns = None - - type_comment_args = type_comment_returns = None - type_comment_annotation = self.check_function_type_comment(node) - if type_comment_annotation: - type_comment_returns, type_comment_args = type_comment_annotation - newnode.postinit( - args=self.visit(node.args, newnode), - body=[self.visit(child, newnode) for child in node.body], - decorators=decorators, - returns=returns, - type_comment_returns=type_comment_returns, - type_comment_args=type_comment_args, - ) - self._global_names.pop() - return newnode - - def visit_functiondef(self, node, parent): - return self._visit_functiondef(nodes.FunctionDef, node, parent) - - def visit_generatorexp(self, node, parent): - """visit a GeneratorExp node by returning a fresh instance of it""" - newnode = nodes.GeneratorExp(node.lineno, node.col_offset, parent) - newnode.postinit( - self.visit(node.elt, newnode), - [self.visit(child, newnode) for child in node.generators], - ) - return newnode - - def visit_attribute(self, node, parent): - """visit an Attribute node by returning a fresh instance of it""" - context = self._get_context(node) - if context == astroid.Del: - # FIXME : maybe we should reintroduce and visit_delattr ? - # for instance, deactivating assign_ctx - newnode = nodes.DelAttr(node.attr, node.lineno, node.col_offset, parent) - elif context == astroid.Store: - newnode = nodes.AssignAttr(node.attr, node.lineno, node.col_offset, parent) - # Prohibit a local save if we are in an ExceptHandler. - if not isinstance(parent, astroid.ExceptHandler): - self._delayed_assattr.append(newnode) - else: - newnode = nodes.Attribute(node.attr, node.lineno, node.col_offset, parent) - newnode.postinit(self.visit(node.value, newnode)) - return newnode - - def visit_global(self, node, parent): - """visit a Global node to become astroid""" - newnode = nodes.Global( - node.names, - getattr(node, "lineno", None), - getattr(node, "col_offset", None), - parent, - ) - if self._global_names: # global at the module level, no effect - for name in node.names: - self._global_names[-1].setdefault(name, []).append(newnode) - return newnode - - def visit_if(self, node, parent): - """visit an If node by returning a fresh instance of it""" - newnode = nodes.If(node.lineno, node.col_offset, parent) - newnode.postinit( - self.visit(node.test, newnode), - [self.visit(child, newnode) for child in node.body], - [self.visit(child, newnode) for child in node.orelse], - ) - return newnode - - def visit_ifexp(self, node, parent): - """visit a IfExp node by returning a fresh instance of it""" - newnode = nodes.IfExp(node.lineno, node.col_offset, parent) - newnode.postinit( - self.visit(node.test, newnode), - self.visit(node.body, newnode), - self.visit(node.orelse, newnode), - ) - return newnode - - def visit_import(self, node, parent): - """visit a Import node by returning a fresh instance of it""" - names = [(alias.name, alias.asname) for alias in node.names] - newnode = nodes.Import( - names, - getattr(node, "lineno", None), - getattr(node, "col_offset", None), - parent, - ) - # save import names in parent's locals: - for (name, asname) in newnode.names: - name = asname or name - parent.set_local(name.split(".")[0], newnode) - return newnode - - # Not used in Python 3.8+. - def visit_index(self, node, parent): - """visit a Index node by returning a fresh instance of it""" - newnode = nodes.Index(parent=parent) - newnode.postinit(self.visit(node.value, newnode)) - return newnode - - def visit_keyword(self, node, parent): - """visit a Keyword node by returning a fresh instance of it""" - newnode = nodes.Keyword(node.arg, parent=parent) - newnode.postinit(self.visit(node.value, newnode)) - return newnode - - def visit_lambda(self, node, parent): - """visit a Lambda node by returning a fresh instance of it""" - newnode = nodes.Lambda(node.lineno, node.col_offset, parent) - newnode.postinit(self.visit(node.args, newnode), self.visit(node.body, newnode)) - return newnode - - def visit_list(self, node, parent): - """visit a List node by returning a fresh instance of it""" - context = self._get_context(node) - newnode = nodes.List( - ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent - ) - newnode.postinit([self.visit(child, newnode) for child in node.elts]) - return newnode - - def visit_listcomp(self, node, parent): - """visit a ListComp node by returning a fresh instance of it""" - newnode = nodes.ListComp(node.lineno, node.col_offset, parent) - newnode.postinit( - self.visit(node.elt, newnode), - [self.visit(child, newnode) for child in node.generators], - ) - return newnode - - def visit_name(self, node, parent): - """visit a Name node by returning a fresh instance of it""" - context = self._get_context(node) - # True and False can be assigned to something in py2x, so we have to - # check first the context. - if context == astroid.Del: - newnode = nodes.DelName(node.id, node.lineno, node.col_offset, parent) - elif context == astroid.Store: - newnode = nodes.AssignName(node.id, node.lineno, node.col_offset, parent) - elif node.id in CONST_NAME_TRANSFORMS: - newnode = nodes.Const( - CONST_NAME_TRANSFORMS[node.id], - getattr(node, "lineno", None), - getattr(node, "col_offset", None), - parent, - ) - return newnode - else: - newnode = nodes.Name(node.id, node.lineno, node.col_offset, parent) - # XXX REMOVE me : - if context in (astroid.Del, astroid.Store): # 'Aug' ?? - self._save_assignment(newnode) - return newnode - - def visit_constant(self, node, parent): - """visit a Constant node by returning a fresh instance of Const""" - return nodes.Const( - node.value, - getattr(node, "lineno", None), - getattr(node, "col_offset", None), - parent, - ) - - # Not used in Python 3.8+. - def visit_str(self, node, parent): - """visit a String/Bytes node by returning a fresh instance of Const""" - return nodes.Const( - node.s, - getattr(node, "lineno", None), - getattr(node, "col_offset", None), - parent, - ) - - visit_bytes = visit_str - - # Not used in Python 3.8+. - def visit_num(self, node, parent): - """visit a Num node by returning a fresh instance of Const""" - return nodes.Const( - node.n, - getattr(node, "lineno", None), - getattr(node, "col_offset", None), - parent, - ) - - def visit_pass(self, node, parent): - """visit a Pass node by returning a fresh instance of it""" - return nodes.Pass(node.lineno, node.col_offset, parent) - - def visit_print(self, node, parent): - """visit a Print node by returning a fresh instance of it""" - newnode = nodes.Print(node.nl, node.lineno, node.col_offset, parent) - newnode.postinit( - _visit_or_none(node, "dest", self, newnode), - [self.visit(child, newnode) for child in node.values], - ) - return newnode - - def visit_raise(self, node, parent): - """visit a Raise node by returning a fresh instance of it""" - newnode = nodes.Raise(node.lineno, node.col_offset, parent) - # pylint: disable=too-many-function-args - newnode.postinit( - _visit_or_none(node, "type", self, newnode), - _visit_or_none(node, "inst", self, newnode), - _visit_or_none(node, "tback", self, newnode), - ) - return newnode - - def visit_return(self, node, parent): - """visit a Return node by returning a fresh instance of it""" - newnode = nodes.Return(node.lineno, node.col_offset, parent) - if node.value is not None: - newnode.postinit(self.visit(node.value, newnode)) - return newnode - - def visit_set(self, node, parent): - """visit a Set node by returning a fresh instance of it""" - newnode = nodes.Set(node.lineno, node.col_offset, parent) - newnode.postinit([self.visit(child, newnode) for child in node.elts]) - return newnode - - def visit_setcomp(self, node, parent): - """visit a SetComp node by returning a fresh instance of it""" - newnode = nodes.SetComp(node.lineno, node.col_offset, parent) - newnode.postinit( - self.visit(node.elt, newnode), - [self.visit(child, newnode) for child in node.generators], - ) - return newnode - - def visit_slice(self, node, parent): - """visit a Slice node by returning a fresh instance of it""" - newnode = nodes.Slice(parent=parent) - newnode.postinit( - _visit_or_none(node, "lower", self, newnode), - _visit_or_none(node, "upper", self, newnode), - _visit_or_none(node, "step", self, newnode), - ) - return newnode - - def visit_subscript(self, node, parent): - """visit a Subscript node by returning a fresh instance of it""" - context = self._get_context(node) - newnode = nodes.Subscript( - ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent - ) - newnode.postinit( - self.visit(node.value, newnode), self.visit(node.slice, newnode) - ) - return newnode - - def visit_tryexcept(self, node, parent): - """visit a TryExcept node by returning a fresh instance of it""" - newnode = nodes.TryExcept(node.lineno, node.col_offset, parent) - newnode.postinit( - [self.visit(child, newnode) for child in node.body], - [self.visit(child, newnode) for child in node.handlers], - [self.visit(child, newnode) for child in node.orelse], - ) - return newnode - - def visit_tryfinally(self, node, parent): - """visit a TryFinally node by returning a fresh instance of it""" - newnode = nodes.TryFinally(node.lineno, node.col_offset, parent) - newnode.postinit( - [self.visit(child, newnode) for child in node.body], - [self.visit(n, newnode) for n in node.finalbody], - ) - return newnode - - def visit_tuple(self, node, parent): - """visit a Tuple node by returning a fresh instance of it""" - context = self._get_context(node) - newnode = nodes.Tuple( - ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent - ) - newnode.postinit([self.visit(child, newnode) for child in node.elts]) - return newnode - - def visit_unaryop(self, node, parent): - """visit a UnaryOp node by returning a fresh instance of it""" - newnode = nodes.UnaryOp( - self._unary_op_classes[node.op.__class__], - node.lineno, - node.col_offset, - parent, - ) - newnode.postinit(self.visit(node.operand, newnode)) - return newnode - - def visit_while(self, node, parent): - """visit a While node by returning a fresh instance of it""" - newnode = nodes.While(node.lineno, node.col_offset, parent) - newnode.postinit( - self.visit(node.test, newnode), - [self.visit(child, newnode) for child in node.body], - [self.visit(child, newnode) for child in node.orelse], - ) - return newnode - - def visit_with(self, node, parent): - newnode = nodes.With(node.lineno, node.col_offset, parent) - expr = self.visit(node.context_expr, newnode) - if node.optional_vars is not None: - optional_vars = self.visit(node.optional_vars, newnode) - else: - optional_vars = None - - type_annotation = self.check_type_comment(node, parent=newnode) - newnode.postinit( - items=[(expr, optional_vars)], - body=[self.visit(child, newnode) for child in node.body], - type_annotation=type_annotation, - ) - return newnode - - def visit_yield(self, node, parent): - """visit a Yield node by returning a fresh instance of it""" - newnode = nodes.Yield(node.lineno, node.col_offset, parent) - if node.value is not None: - newnode.postinit(self.visit(node.value, newnode)) - return newnode - - -class TreeRebuilder3(TreeRebuilder): - """extend and overwrite TreeRebuilder for python3k""" - - def visit_arg(self, node, parent): - """visit an arg node by returning a fresh AssName instance""" - return self.visit_assignname(node, parent, node.arg) - - # Not used in Python 3.8+. - def visit_nameconstant(self, node, parent): - # in Python 3.4 we have NameConstant for True / False / None - return nodes.Const( - node.value, - getattr(node, "lineno", None), - getattr(node, "col_offset", None), - parent, - ) - - def visit_excepthandler(self, node, parent): - """visit an ExceptHandler node by returning a fresh instance of it""" - newnode = nodes.ExceptHandler(node.lineno, node.col_offset, parent) - if node.name: - name = self.visit_assignname(node, newnode, node.name) - else: - name = None - newnode.postinit( - _visit_or_none(node, "type", self, newnode), - name, - [self.visit(child, newnode) for child in node.body], - ) - return newnode - - def visit_nonlocal(self, node, parent): - """visit a Nonlocal node and return a new instance of it""" - return nodes.Nonlocal( - node.names, - getattr(node, "lineno", None), - getattr(node, "col_offset", None), - parent, - ) - - def visit_raise(self, node, parent): - """visit a Raise node by returning a fresh instance of it""" - newnode = nodes.Raise(node.lineno, node.col_offset, parent) - # no traceback; anyway it is not used in Pylint - newnode.postinit( - _visit_or_none(node, "exc", self, newnode), - _visit_or_none(node, "cause", self, newnode), - ) - return newnode - - def visit_starred(self, node, parent): - """visit a Starred node and return a new instance of it""" - context = self._get_context(node) - newnode = nodes.Starred( - ctx=context, lineno=node.lineno, col_offset=node.col_offset, parent=parent - ) - newnode.postinit(self.visit(node.value, newnode)) - return newnode - - def visit_try(self, node, parent): - # python 3.3 introduce a new Try node replacing - # TryFinally/TryExcept nodes - if node.finalbody: - newnode = nodes.TryFinally(node.lineno, node.col_offset, parent) - if node.handlers: - body = [self.visit_tryexcept(node, newnode)] - else: - body = [self.visit(child, newnode) for child in node.body] - newnode.postinit(body, [self.visit(n, newnode) for n in node.finalbody]) - return newnode - if node.handlers: - return self.visit_tryexcept(node, parent) - return None - - def visit_annassign(self, node, parent): - """visit an AnnAssign node by returning a fresh instance of it""" - newnode = nodes.AnnAssign(node.lineno, node.col_offset, parent) - annotation = _visit_or_none(node, "annotation", self, newnode) - newnode.postinit( - target=self.visit(node.target, newnode), - annotation=annotation, - simple=node.simple, - value=_visit_or_none(node, "value", self, newnode), - ) - return newnode - - def _visit_with(self, cls, node, parent): - if "items" not in node._fields: - # python < 3.3 - return super(TreeRebuilder3, self).visit_with(node, parent) - - newnode = cls(node.lineno, node.col_offset, parent) - - def visit_child(child): - expr = self.visit(child.context_expr, newnode) - var = _visit_or_none(child, "optional_vars", self, newnode) - return expr, var - - type_annotation = self.check_type_comment(node, parent=newnode) - newnode.postinit( - items=[visit_child(child) for child in node.items], - body=[self.visit(child, newnode) for child in node.body], - type_annotation=type_annotation, - ) - return newnode - - def visit_with(self, node, parent): - return self._visit_with(nodes.With, node, parent) - - def visit_yieldfrom(self, node, parent): - newnode = nodes.YieldFrom(node.lineno, node.col_offset, parent) - if node.value is not None: - newnode.postinit(self.visit(node.value, newnode)) - return newnode - - def visit_classdef(self, node, parent, newstyle=True): - return super(TreeRebuilder3, self).visit_classdef( - node, parent, newstyle=newstyle - ) - - # Async structs added in Python 3.5 - def visit_asyncfunctiondef(self, node, parent): - return self._visit_functiondef(nodes.AsyncFunctionDef, node, parent) - - def visit_asyncfor(self, node, parent): - return self._visit_for(nodes.AsyncFor, node, parent) - - def visit_await(self, node, parent): - newnode = nodes.Await(node.lineno, node.col_offset, parent) - newnode.postinit(value=self.visit(node.value, newnode)) - return newnode - - def visit_asyncwith(self, node, parent): - return self._visit_with(nodes.AsyncWith, node, parent) - - def visit_joinedstr(self, node, parent): - newnode = nodes.JoinedStr(node.lineno, node.col_offset, parent) - newnode.postinit([self.visit(child, newnode) for child in node.values]) - return newnode - - def visit_formattedvalue(self, node, parent): - newnode = nodes.FormattedValue(node.lineno, node.col_offset, parent) - newnode.postinit( - self.visit(node.value, newnode), - node.conversion, - _visit_or_none(node, "format_spec", self, newnode), - ) - return newnode - - def visit_namedexpr(self, node, parent): - newnode = nodes.NamedExpr(node.lineno, node.col_offset, parent) - newnode.postinit( - self.visit(node.target, newnode), self.visit(node.value, newnode) - ) - return newnode - - -TreeRebuilder = TreeRebuilder3 diff --git a/venv/lib/python3.8/site-packages/astroid/scoped_nodes.py b/venv/lib/python3.8/site-packages/astroid/scoped_nodes.py deleted file mode 100644 index d02b6534..00000000 --- a/venv/lib/python3.8/site-packages/astroid/scoped_nodes.py +++ /dev/null @@ -1,2836 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) 2006-2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2010 Daniel Harding -# Copyright (c) 2011, 2013-2015 Google, Inc. -# Copyright (c) 2013-2018 Claudiu Popa -# Copyright (c) 2013 Phil Schaf -# Copyright (c) 2014 Eevee (Alex Munroe) -# Copyright (c) 2015-2016 Florian Bruhin -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2015 Rene Zhang -# Copyright (c) 2015 Philip Lorenz -# Copyright (c) 2016-2017 Derek Gustafson -# Copyright (c) 2017-2018 Bryce Guinta -# Copyright (c) 2017-2018 Ashley Whetter -# Copyright (c) 2017 Łukasz Rogalski -# Copyright (c) 2017 David Euresti -# Copyright (c) 2018 Nick Drozd -# Copyright (c) 2018 Anthony Sottile -# Copyright (c) 2018 HoverHell - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -""" -This module contains the classes for "scoped" node, i.e. which are opening a -new local scope in the language definition : Module, ClassDef, FunctionDef (and -Lambda, GeneratorExp, DictComp and SetComp to some extent). -""" - -import builtins -import sys -import io -import itertools -from typing import Optional, List - -from astroid import bases -from astroid import context as contextmod -from astroid import exceptions -from astroid import decorators as decorators_mod -from astroid.interpreter import objectmodel -from astroid.interpreter import dunder_lookup -from astroid import manager -from astroid import mixins -from astroid import node_classes -from astroid import util - - -BUILTINS = builtins.__name__ -ITER_METHODS = ("__iter__", "__getitem__") -EXCEPTION_BASE_CLASSES = frozenset({"Exception", "BaseException"}) -objects = util.lazy_import("objects") - - -def _c3_merge(sequences, cls, context): - """Merges MROs in *sequences* to a single MRO using the C3 algorithm. - - Adapted from http://www.python.org/download/releases/2.3/mro/. - - """ - result = [] - while True: - sequences = [s for s in sequences if s] # purge empty sequences - if not sequences: - return result - for s1 in sequences: # find merge candidates among seq heads - candidate = s1[0] - for s2 in sequences: - if candidate in s2[1:]: - candidate = None - break # reject the current head, it appears later - else: - break - if not candidate: - # Show all the remaining bases, which were considered as - # candidates for the next mro sequence. - raise exceptions.InconsistentMroError( - message="Cannot create a consistent method resolution order " - "for MROs {mros} of class {cls!r}.", - mros=sequences, - cls=cls, - context=context, - ) - - result.append(candidate) - # remove the chosen candidate - for seq in sequences: - if seq[0] == candidate: - del seq[0] - return None - - -def clean_duplicates_mro(sequences, cls, context): - for sequence in sequences: - names = [ - (node.lineno, node.qname()) if node.name else None for node in sequence - ] - last_index = dict(map(reversed, enumerate(names))) - if names and names[0] is not None and last_index[names[0]] != 0: - raise exceptions.DuplicateBasesError( - message="Duplicates found in MROs {mros} for {cls!r}.", - mros=sequences, - cls=cls, - context=context, - ) - yield [ - node - for i, (node, name) in enumerate(zip(sequence, names)) - if name is None or last_index[name] == i - ] - - -def function_to_method(n, klass): - if isinstance(n, FunctionDef): - if n.type == "classmethod": - return bases.BoundMethod(n, klass) - if n.type != "staticmethod": - return bases.UnboundMethod(n) - return n - - -MANAGER = manager.AstroidManager() - - -def builtin_lookup(name): - """lookup a name into the builtin module - return the list of matching statements and the astroid for the builtin - module - """ - builtin_astroid = MANAGER.ast_from_module(builtins) - if name == "__dict__": - return builtin_astroid, () - try: - stmts = builtin_astroid.locals[name] - except KeyError: - stmts = () - return builtin_astroid, stmts - - -# TODO move this Mixin to mixins.py; problem: 'FunctionDef' in _scope_lookup -class LocalsDictNodeNG(node_classes.LookupMixIn, node_classes.NodeNG): - """ this class provides locals handling common to Module, FunctionDef - and ClassDef nodes, including a dict like interface for direct access - to locals information - """ - - # attributes below are set by the builder module or by raw factories - - locals = {} - """A map of the name of a local variable to the node defining the local. - - :type: dict(str, NodeNG) - """ - - def qname(self): - """Get the 'qualified' name of the node. - - For example: module.name, module.class.name ... - - :returns: The qualified name. - :rtype: str - """ - # pylint: disable=no-member; github.com/pycqa/astroid/issues/278 - if self.parent is None: - return self.name - return "%s.%s" % (self.parent.frame().qname(), self.name) - - def frame(self): - """The first parent frame node. - - A frame node is a :class:`Module`, :class:`FunctionDef`, - or :class:`ClassDef`. - - :returns: The first parent frame node. - :rtype: Module or FunctionDef or ClassDef - """ - return self - - def scope(self): - """The first parent node defining a new scope. - - :returns: The first parent scope node. - :rtype: Module or FunctionDef or ClassDef or Lambda or GenExpr - """ - return self - - def _scope_lookup(self, node, name, offset=0): - """XXX method for interfacing the scope lookup""" - try: - stmts = node._filter_stmts(self.locals[name], self, offset) - except KeyError: - stmts = () - if stmts: - return self, stmts - if self.parent: # i.e. not Module - # nested scope: if parent scope is a function, that's fine - # else jump to the module - pscope = self.parent.scope() - if not pscope.is_function: - pscope = pscope.root() - return pscope.scope_lookup(node, name) - return builtin_lookup(name) # Module - - def set_local(self, name, stmt): - """Define that the given name is declared in the given statement node. - - .. seealso:: :meth:`scope` - - :param name: The name that is being defined. - :type name: str - - :param stmt: The statement that defines the given name. - :type stmt: NodeNG - """ - # assert not stmt in self.locals.get(name, ()), (self, stmt) - self.locals.setdefault(name, []).append(stmt) - - __setitem__ = set_local - - def _append_node(self, child): - """append a child, linking it in the tree""" - # pylint: disable=no-member; depending by the class - # which uses the current class as a mixin or base class. - # It's rewritten in 2.0, so it makes no sense for now - # to spend development time on it. - self.body.append(child) - child.parent = self - - def add_local_node(self, child_node, name=None): - """Append a child that should alter the locals of this scope node. - - :param child_node: The child node that will alter locals. - :type child_node: NodeNG - - :param name: The name of the local that will be altered by - the given child node. - :type name: str or None - """ - if name != "__class__": - # add __class__ node as a child will cause infinite recursion later! - self._append_node(child_node) - self.set_local(name or child_node.name, child_node) - - def __getitem__(self, item): - """The first node the defines the given local. - - :param item: The name of the locally defined object. - :type item: str - - :raises KeyError: If the name is not defined. - """ - return self.locals[item][0] - - def __iter__(self): - """Iterate over the names of locals defined in this scoped node. - - :returns: The names of the defined locals. - :rtype: iterable(str) - """ - return iter(self.keys()) - - def keys(self): - """The names of locals defined in this scoped node. - - :returns: The names of the defined locals. - :rtype: list(str) - """ - return list(self.locals.keys()) - - def values(self): - """The nodes that define the locals in this scoped node. - - :returns: The nodes that define locals. - :rtype: list(NodeNG) - """ - return [self[key] for key in self.keys()] - - def items(self): - """Get the names of the locals and the node that defines the local. - - :returns: The names of locals and their associated node. - :rtype: list(tuple(str, NodeNG)) - """ - return list(zip(self.keys(), self.values())) - - def __contains__(self, name): - """Check if a local is defined in this scope. - - :param name: The name of the local to check for. - :type name: str - - :returns: True if this node has a local of the given name, - False otherwise. - :rtype: bool - """ - return name in self.locals - - -class Module(LocalsDictNodeNG): - """Class representing an :class:`ast.Module` node. - - >>> node = astroid.extract_node('import astroid') - >>> node - - >>> node.parent - - """ - - _astroid_fields = ("body",) - - fromlineno = 0 - """The first line that this node appears on in the source code. - - :type: int or None - """ - lineno = 0 - """The line that this node appears on in the source code. - - :type: int or None - """ - - # attributes below are set by the builder module or by raw factories - - file = None - """The path to the file that this ast has been extracted from. - - This will be ``None`` when the representation has been built from a - built-in module. - - :type: str or None - """ - file_bytes = None - """The string/bytes that this ast was built from. - - :type: str or bytes or None - """ - file_encoding = None - """The encoding of the source file. - - This is used to get unicode out of a source file. - Python 2 only. - - :type: str or None - """ - name = None - """The name of the module. - - :type: str or None - """ - pure_python = None - """Whether the ast was built from source. - - :type: bool or None - """ - package = None - """Whether the node represents a package or a module. - - :type: bool or None - """ - globals = None - """A map of the name of a global variable to the node defining the global. - - :type: dict(str, NodeNG) - """ - - # Future imports - future_imports = None - """The imports from ``__future__``. - - :type: set(str) or None - """ - special_attributes = objectmodel.ModuleModel() - """The names of special attributes that this module has. - - :type: objectmodel.ModuleModel - """ - - # names of module attributes available through the global scope - scope_attrs = {"__name__", "__doc__", "__file__", "__path__", "__package__"} - """The names of module attributes available through the global scope. - - :type: str(str) - """ - - _other_fields = ( - "name", - "doc", - "file", - "path", - "package", - "pure_python", - "future_imports", - ) - _other_other_fields = ("locals", "globals") - - def __init__( - self, - name, - doc, - file=None, - path: Optional[List[str]] = None, - package=None, - parent=None, - pure_python=True, - ): - """ - :param name: The name of the module. - :type name: str - - :param doc: The module docstring. - :type doc: str - - :param file: The path to the file that this ast has been extracted from. - :type file: str or None - - :param path: - :type path: Optional[List[str]] - - :param package: Whether the node represents a package or a module. - :type package: bool or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - - :param pure_python: Whether the ast was built from source. - :type pure_python: bool or None - """ - self.name = name - self.doc = doc - self.file = file - self.path = path - self.package = package - self.parent = parent - self.pure_python = pure_python - self.locals = self.globals = {} - """A map of the name of a local variable to the node defining the local. - - :type: dict(str, NodeNG) - """ - self.body = [] - """The contents of the module. - - :type: list(NodeNG) or None - """ - self.future_imports = set() - - # pylint: enable=redefined-builtin - - def postinit(self, body=None): - """Do some setup after initialisation. - - :param body: The contents of the module. - :type body: list(NodeNG) or None - """ - self.body = body - - def _get_stream(self): - if self.file_bytes is not None: - return io.BytesIO(self.file_bytes) - if self.file is not None: - stream = open(self.file, "rb") - return stream - return None - - def stream(self): - """Get a stream to the underlying file or bytes. - - :type: file or io.BytesIO or None - """ - return self._get_stream() - - def block_range(self, lineno): - """Get a range from where this node starts to where this node ends. - - :param lineno: Unused. - :type lineno: int - - :returns: The range of line numbers that this node belongs to. - :rtype: tuple(int, int) - """ - return self.fromlineno, self.tolineno - - def scope_lookup(self, node, name, offset=0): - """Lookup where the given variable is assigned. - - :param node: The node to look for assignments up to. - Any assignments after the given node are ignored. - :type node: NodeNG - - :param name: The name of the variable to find assignments for. - :type name: str - - :param offset: The line offset to filter statements up to. - :type offset: int - - :returns: This scope node and the list of assignments associated to the - given name according to the scope where it has been found (locals, - globals or builtin). - :rtype: tuple(str, list(NodeNG)) - """ - if name in self.scope_attrs and name not in self.locals: - try: - return self, self.getattr(name) - except exceptions.AttributeInferenceError: - return self, () - return self._scope_lookup(node, name, offset) - - def pytype(self): - """Get the name of the type that this node represents. - - :returns: The name of the type. - :rtype: str - """ - return "%s.module" % BUILTINS - - def display_type(self): - """A human readable type of this node. - - :returns: The type of this node. - :rtype: str - """ - return "Module" - - def getattr(self, name, context=None, ignore_locals=False): - result = [] - name_in_locals = name in self.locals - - if name in self.special_attributes and not ignore_locals and not name_in_locals: - result = [self.special_attributes.lookup(name)] - elif not ignore_locals and name_in_locals: - result = self.locals[name] - elif self.package: - try: - result = [self.import_module(name, relative_only=True)] - except (exceptions.AstroidBuildingError, SyntaxError) as exc: - raise exceptions.AttributeInferenceError( - target=self, attribute=name, context=context - ) from exc - result = [n for n in result if not isinstance(n, node_classes.DelName)] - if result: - return result - raise exceptions.AttributeInferenceError( - target=self, attribute=name, context=context - ) - - def igetattr(self, name, context=None): - """Infer the possible values of the given variable. - - :param name: The name of the variable to infer. - :type name: str - - :returns: The inferred possible values. - :rtype: iterable(NodeNG) or None - """ - # set lookup name since this is necessary to infer on import nodes for - # instance - context = contextmod.copy_context(context) - context.lookupname = name - try: - return bases._infer_stmts(self.getattr(name, context), context, frame=self) - except exceptions.AttributeInferenceError as error: - raise exceptions.InferenceError( - error.message, target=self, attribute=name, context=context - ) from error - - def fully_defined(self): - """Check if this module has been build from a .py file. - - If so, the module contains a complete representation, - including the code. - - :returns: True if the module has been built from a .py file. - :rtype: bool - """ - return self.file is not None and self.file.endswith(".py") - - def statement(self): - """The first parent node, including self, marked as statement node. - - :returns: The first parent statement. - :rtype: NodeNG - """ - return self - - def previous_sibling(self): - """The previous sibling statement. - - :returns: The previous sibling statement node. - :rtype: NodeNG or None - """ - - def next_sibling(self): - """The next sibling statement node. - - :returns: The next sibling statement node. - :rtype: NodeNG or None - """ - - _absolute_import_activated = True - - def absolute_import_activated(self): - """Whether :pep:`328` absolute import behaviour has been enabled. - - :returns: True if :pep:`328` has been enabled, False otherwise. - :rtype: bool - """ - return self._absolute_import_activated - - def import_module(self, modname, relative_only=False, level=None): - """Get the ast for a given module as if imported from this module. - - :param modname: The name of the module to "import". - :type modname: str - - :param relative_only: Whether to only consider relative imports. - :type relative_only: bool - - :param level: The level of relative import. - :type level: int or None - - :returns: The imported module ast. - :rtype: NodeNG - """ - if relative_only and level is None: - level = 0 - absmodname = self.relative_to_absolute_name(modname, level) - - try: - return MANAGER.ast_from_module_name(absmodname) - except exceptions.AstroidBuildingError: - # we only want to import a sub module or package of this module, - # skip here - if relative_only: - raise - return MANAGER.ast_from_module_name(modname) - - def relative_to_absolute_name(self, modname, level): - """Get the absolute module name for a relative import. - - The relative import can be implicit or explicit. - - :param modname: The module name to convert. - :type modname: str - - :param level: The level of relative import. - :type level: int - - :returns: The absolute module name. - :rtype: str - - :raises TooManyLevelsError: When the relative import refers to a - module too far above this one. - """ - # XXX this returns non sens when called on an absolute import - # like 'pylint.checkers.astroid.utils' - # XXX doesn't return absolute name if self.name isn't absolute name - if self.absolute_import_activated() and level is None: - return modname - if level: - if self.package: - level = level - 1 - if level and self.name.count(".") < level: - raise exceptions.TooManyLevelsError(level=level, name=self.name) - - package_name = self.name.rsplit(".", level)[0] - elif self.package: - package_name = self.name - else: - package_name = self.name.rsplit(".", 1)[0] - - if package_name: - if not modname: - return package_name - return "%s.%s" % (package_name, modname) - return modname - - def wildcard_import_names(self): - """The list of imported names when this module is 'wildcard imported'. - - It doesn't include the '__builtins__' name which is added by the - current CPython implementation of wildcard imports. - - :returns: The list of imported names. - :rtype: list(str) - """ - # We separate the different steps of lookup in try/excepts - # to avoid catching too many Exceptions - default = [name for name in self.keys() if not name.startswith("_")] - try: - all_values = self["__all__"] - except KeyError: - return default - - try: - explicit = next(all_values.assigned_stmts()) - except exceptions.InferenceError: - return default - except AttributeError: - # not an assignment node - # XXX infer? - return default - - # Try our best to detect the exported name. - inferred = [] - try: - explicit = next(explicit.infer()) - except exceptions.InferenceError: - return default - if not isinstance(explicit, (node_classes.Tuple, node_classes.List)): - return default - - str_const = lambda node: ( - isinstance(node, node_classes.Const) and isinstance(node.value, str) - ) - for node in explicit.elts: - if str_const(node): - inferred.append(node.value) - else: - try: - inferred_node = next(node.infer()) - except exceptions.InferenceError: - continue - if str_const(inferred_node): - inferred.append(inferred_node.value) - return inferred - - def public_names(self): - """The list of the names that are publicly available in this module. - - :returns: The list of publc names. - :rtype: list(str) - """ - return [name for name in self.keys() if not name.startswith("_")] - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - For a :class:`Module` this is always ``True``. - :rtype: bool - """ - return True - - def get_children(self): - yield from self.body - - -class ComprehensionScope(LocalsDictNodeNG): - """Scoping for different types of comprehensions.""" - - def frame(self): - """The first parent frame node. - - A frame node is a :class:`Module`, :class:`FunctionDef`, - or :class:`ClassDef`. - - :returns: The first parent frame node. - :rtype: Module or FunctionDef or ClassDef - """ - return self.parent.frame() - - scope_lookup = LocalsDictNodeNG._scope_lookup - - -class GeneratorExp(ComprehensionScope): - """Class representing an :class:`ast.GeneratorExp` node. - - >>> node = astroid.extract_node('(thing for thing in things if thing)') - >>> node - - """ - - _astroid_fields = ("elt", "generators") - _other_other_fields = ("locals",) - elt = None - """The element that forms the output of the expression. - - :type: NodeNG or None - """ - generators = None - """The generators that are looped through. - - :type: list(Comprehension) or None - """ - - def __init__(self, lineno=None, col_offset=None, parent=None): - """ - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.locals = {} - """A map of the name of a local variable to the node defining the local. - - :type: dict(str, NodeNG) - """ - - super(GeneratorExp, self).__init__(lineno, col_offset, parent) - - def postinit(self, elt=None, generators=None): - """Do some setup after initialisation. - - :param elt: The element that forms the output of the expression. - :type elt: NodeNG or None - - :param generators: The generators that are looped through. - :type generators: list(Comprehension) or None - """ - self.elt = elt - if generators is None: - self.generators = [] - else: - self.generators = generators - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - For a :class:`GeneratorExp` this is always ``True``. - :rtype: bool - """ - return True - - def get_children(self): - yield self.elt - - yield from self.generators - - -class DictComp(ComprehensionScope): - """Class representing an :class:`ast.DictComp` node. - - >>> node = astroid.extract_node('{k:v for k, v in things if k > v}') - >>> node - - """ - - _astroid_fields = ("key", "value", "generators") - _other_other_fields = ("locals",) - key = None - """What produces the keys. - - :type: NodeNG or None - """ - value = None - """What produces the values. - - :type: NodeNG or None - """ - generators = None - """The generators that are looped through. - - :type: list(Comprehension) or None - """ - - def __init__(self, lineno=None, col_offset=None, parent=None): - """ - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.locals = {} - """A map of the name of a local variable to the node defining the local. - - :type: dict(str, NodeNG) - """ - - super(DictComp, self).__init__(lineno, col_offset, parent) - - def postinit(self, key=None, value=None, generators=None): - """Do some setup after initialisation. - - :param key: What produces the keys. - :type key: NodeNG or None - - :param value: What produces the values. - :type value: NodeNG or None - - :param generators: The generators that are looped through. - :type generators: list(Comprehension) or None - """ - self.key = key - self.value = value - if generators is None: - self.generators = [] - else: - self.generators = generators - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - For a :class:`DictComp` this is always :class:`Uninferable`. - :rtype: Uninferable - """ - return util.Uninferable - - def get_children(self): - yield self.key - yield self.value - - yield from self.generators - - -class SetComp(ComprehensionScope): - """Class representing an :class:`ast.SetComp` node. - - >>> node = astroid.extract_node('{thing for thing in things if thing}') - >>> node - - """ - - _astroid_fields = ("elt", "generators") - _other_other_fields = ("locals",) - elt = None - """The element that forms the output of the expression. - - :type: NodeNG or None - """ - generators = None - """The generators that are looped through. - - :type: list(Comprehension) or None - """ - - def __init__(self, lineno=None, col_offset=None, parent=None): - """ - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.locals = {} - """A map of the name of a local variable to the node defining the local. - - :type: dict(str, NodeNG) - """ - - super(SetComp, self).__init__(lineno, col_offset, parent) - - def postinit(self, elt=None, generators=None): - """Do some setup after initialisation. - - :param elt: The element that forms the output of the expression. - :type elt: NodeNG or None - - :param generators: The generators that are looped through. - :type generators: list(Comprehension) or None - """ - self.elt = elt - if generators is None: - self.generators = [] - else: - self.generators = generators - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - For a :class:`SetComp` this is always :class:`Uninferable`. - :rtype: Uninferable - """ - return util.Uninferable - - def get_children(self): - yield self.elt - - yield from self.generators - - -class _ListComp(node_classes.NodeNG): - """Class representing an :class:`ast.ListComp` node. - - >>> node = astroid.extract_node('[thing for thing in things if thing]') - >>> node - - """ - - _astroid_fields = ("elt", "generators") - elt = None - """The element that forms the output of the expression. - - :type: NodeNG or None - """ - generators = None - """The generators that are looped through. - - :type: list(Comprehension) or None - """ - - def postinit(self, elt=None, generators=None): - """Do some setup after initialisation. - - :param elt: The element that forms the output of the expression. - :type elt: NodeNG or None - - :param generators: The generators that are looped through. - :type generators: list(Comprehension) or None - """ - self.elt = elt - self.generators = generators - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - For a :class:`ListComp` this is always :class:`Uninferable`. - :rtype: Uninferable - """ - return util.Uninferable - - def get_children(self): - yield self.elt - - yield from self.generators - - -class ListComp(_ListComp, ComprehensionScope): - """Class representing an :class:`ast.ListComp` node. - - >>> node = astroid.extract_node('[thing for thing in things if thing]') - >>> node - - """ - - _other_other_fields = ("locals",) - - def __init__(self, lineno=None, col_offset=None, parent=None): - self.locals = {} - """A map of the name of a local variable to the node defining it. - - :type: dict(str, NodeNG) - """ - - super(ListComp, self).__init__(lineno, col_offset, parent) - - -def _infer_decorator_callchain(node): - """Detect decorator call chaining and see if the end result is a - static or a classmethod. - """ - if not isinstance(node, FunctionDef): - return None - if not node.parent: - return None - try: - result = next(node.infer_call_result(node.parent)) - except exceptions.InferenceError: - return None - if isinstance(result, bases.Instance): - result = result._proxied - if isinstance(result, ClassDef): - if result.is_subtype_of("%s.classmethod" % BUILTINS): - return "classmethod" - if result.is_subtype_of("%s.staticmethod" % BUILTINS): - return "staticmethod" - return None - - -class Lambda(mixins.FilterStmtsMixin, LocalsDictNodeNG): - """Class representing an :class:`ast.Lambda` node. - - >>> node = astroid.extract_node('lambda arg: arg + 1') - >>> node - l.1 at 0x7f23b2e41518> - """ - - _astroid_fields = ("args", "body") - _other_other_fields = ("locals",) - name = "" - is_lambda = True - - def implicit_parameters(self): - return 0 - - # function's type, 'function' | 'method' | 'staticmethod' | 'classmethod' - @property - def type(self): - """Whether this is a method or function. - - :returns: 'method' if this is a method, 'function' otherwise. - :rtype: str - """ - # pylint: disable=no-member - if self.args.args and self.args.args[0].name == "self": - if isinstance(self.parent.scope(), ClassDef): - return "method" - return "function" - - def __init__(self, lineno=None, col_offset=None, parent=None): - """ - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.locals = {} - """A map of the name of a local variable to the node defining it. - - :type: dict(str, NodeNG) - """ - - self.args = [] - """The arguments that the function takes. - - :type: Arguments or list - """ - - self.body = [] - """The contents of the function body. - - :type: list(NodeNG) - """ - - super(Lambda, self).__init__(lineno, col_offset, parent) - - def postinit(self, args, body): - """Do some setup after initialisation. - - :param args: The arguments that the function takes. - :type args: Arguments - - :param body: The contents of the function body. - :type body: list(NodeNG) - """ - self.args = args - self.body = body - - def pytype(self): - """Get the name of the type that this node represents. - - :returns: The name of the type. - :rtype: str - """ - if "method" in self.type: - return "%s.instancemethod" % BUILTINS - return "%s.function" % BUILTINS - - def display_type(self): - """A human readable type of this node. - - :returns: The type of this node. - :rtype: str - """ - if "method" in self.type: - return "Method" - return "Function" - - def callable(self): - """Whether this node defines something that is callable. - - :returns: True if this defines something that is callable, - False otherwise. - For a :class:`Lambda` this is always ``True``. - :rtype: bool - """ - return True - - def argnames(self): - """Get the names of each of the arguments. - - :returns: The names of the arguments. - :rtype: list(str) - """ - # pylint: disable=no-member; github.com/pycqa/astroid/issues/291 - # args is in fact redefined later on by postinit. Can't be changed - # to None due to a strong interaction between Lambda and FunctionDef. - - if self.args.args: # maybe None with builtin functions - names = _rec_get_names(self.args.args) - else: - names = [] - if self.args.vararg: - names.append(self.args.vararg) - if self.args.kwarg: - names.append(self.args.kwarg) - return names - - def infer_call_result(self, caller, context=None): - """Infer what the function returns when called. - - :param caller: Unused - :type caller: object - """ - # pylint: disable=no-member; github.com/pycqa/astroid/issues/291 - # args is in fact redefined later on by postinit. Can't be changed - # to None due to a strong interaction between Lambda and FunctionDef. - return self.body.infer(context) - - def scope_lookup(self, node, name, offset=0): - """Lookup where the given names is assigned. - - :param node: The node to look for assignments up to. - Any assignments after the given node are ignored. - :type node: NodeNG - - :param name: The name to find assignments for. - :type name: str - - :param offset: The line offset to filter statements up to. - :type offset: int - - :returns: This scope node and the list of assignments associated to the - given name according to the scope where it has been found (locals, - globals or builtin). - :rtype: tuple(str, list(NodeNG)) - """ - # pylint: disable=no-member; github.com/pycqa/astroid/issues/291 - # args is in fact redefined later on by postinit. Can't be changed - # to None due to a strong interaction between Lambda and FunctionDef. - - if node in self.args.defaults or node in self.args.kw_defaults: - frame = self.parent.frame() - # line offset to avoid that def func(f=func) resolve the default - # value to the defined function - offset = -1 - else: - # check this is not used in function decorators - frame = self - return frame._scope_lookup(node, name, offset) - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - For a :class:`Lambda` this is always ``True``. - :rtype: bool - """ - return True - - def get_children(self): - yield self.args - yield self.body - - -class FunctionDef(mixins.MultiLineBlockMixin, node_classes.Statement, Lambda): - """Class representing an :class:`ast.FunctionDef`. - - >>> node = astroid.extract_node(''' - ... def my_func(arg): - ... return arg + 1 - ... ''') - >>> node - - """ - - _astroid_fields = ("decorators", "args", "returns", "body") - _multi_line_block_fields = ("body",) - returns = None - decorators = None - """The decorators that are applied to this method or function. - - :type: Decorators or None - """ - special_attributes = objectmodel.FunctionModel() - """The names of special attributes that this function has. - - :type: objectmodel.FunctionModel - """ - is_function = True - """Whether this node indicates a function. - - For a :class:`FunctionDef` this is always ``True``. - - :type: bool - """ - type_annotation = None - """If present, this will contain the type annotation passed by a type comment - - :type: NodeNG or None - """ - type_comment_args = None - """ - If present, this will contain the type annotation for arguments - passed by a type comment - """ - type_comment_returns = None - """If present, this will contain the return type annotation, passed by a type comment""" - # attributes below are set by the builder module or by raw factories - _other_fields = ("name", "doc") - _other_other_fields = ( - "locals", - "_type", - "type_comment_returns", - "type_comment_args", - ) - _type = None - - def __init__(self, name=None, doc=None, lineno=None, col_offset=None, parent=None): - """ - :param name: The name of the function. - :type name: str or None - - :param doc: The function's docstring. - :type doc: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.name = name - """The name of the function. - - :type name: str or None - """ - - self.doc = doc - """The function's docstring. - - :type doc: str or None - """ - - self.instance_attrs = {} - super(FunctionDef, self).__init__(lineno, col_offset, parent) - if parent: - frame = parent.frame() - frame.set_local(name, self) - - # pylint: disable=arguments-differ; different than Lambdas - def postinit( - self, - args, - body, - decorators=None, - returns=None, - type_comment_returns=None, - type_comment_args=None, - ): - """Do some setup after initialisation. - - :param args: The arguments that the function takes. - :type args: Arguments or list - - :param body: The contents of the function body. - :type body: list(NodeNG) - - :param decorators: The decorators that are applied to this - method or function. - :type decorators: Decorators or None - :params type_comment_returns: - The return type annotation passed via a type comment. - :params type_comment_args: - The args type annotation passed via a type comment. - """ - self.args = args - self.body = body - self.decorators = decorators - self.returns = returns - self.type_comment_returns = type_comment_returns - self.type_comment_args = type_comment_args - - @decorators_mod.cachedproperty - def extra_decorators(self): - """The extra decorators that this function can have. - - Additional decorators are considered when they are used as - assignments, as in ``method = staticmethod(method)``. - The property will return all the callables that are used for - decoration. - - :type: list(NodeNG) - """ - frame = self.parent.frame() - if not isinstance(frame, ClassDef): - return [] - - decorators = [] - for assign in frame._get_assign_nodes(): - if isinstance(assign.value, node_classes.Call) and isinstance( - assign.value.func, node_classes.Name - ): - for assign_node in assign.targets: - if not isinstance(assign_node, node_classes.AssignName): - # Support only `name = callable(name)` - continue - - if assign_node.name != self.name: - # Interested only in the assignment nodes that - # decorates the current method. - continue - try: - meth = frame[self.name] - except KeyError: - continue - else: - # Must be a function and in the same frame as the - # original method. - if ( - isinstance(meth, FunctionDef) - and assign_node.frame() == frame - ): - decorators.append(assign.value) - return decorators - - @decorators_mod.cachedproperty - def type(self): # pylint: disable=invalid-overridden-method - """The function type for this node. - - Possible values are: method, function, staticmethod, classmethod. - - :type: str - """ - builtin_descriptors = {"classmethod", "staticmethod"} - - for decorator in self.extra_decorators: - if decorator.func.name in builtin_descriptors: - return decorator.func.name - - frame = self.parent.frame() - type_name = "function" - if isinstance(frame, ClassDef): - if self.name == "__new__": - return "classmethod" - if sys.version_info >= (3, 6) and self.name == "__init_subclass__": - return "classmethod" - - type_name = "method" - - if not self.decorators: - return type_name - - for node in self.decorators.nodes: - if isinstance(node, node_classes.Name): - if node.name in builtin_descriptors: - return node.name - - if isinstance(node, node_classes.Call): - # Handle the following case: - # @some_decorator(arg1, arg2) - # def func(...) - # - try: - current = next(node.func.infer()) - except exceptions.InferenceError: - continue - _type = _infer_decorator_callchain(current) - if _type is not None: - return _type - - try: - for inferred in node.infer(): - # Check to see if this returns a static or a class method. - _type = _infer_decorator_callchain(inferred) - if _type is not None: - return _type - - if not isinstance(inferred, ClassDef): - continue - for ancestor in inferred.ancestors(): - if not isinstance(ancestor, ClassDef): - continue - if ancestor.is_subtype_of("%s.classmethod" % BUILTINS): - return "classmethod" - if ancestor.is_subtype_of("%s.staticmethod" % BUILTINS): - return "staticmethod" - except exceptions.InferenceError: - pass - return type_name - - @decorators_mod.cachedproperty - def fromlineno(self): - """The first line that this node appears on in the source code. - - :type: int or None - """ - # lineno is the line number of the first decorator, we want the def - # statement lineno - lineno = self.lineno - if self.decorators is not None: - lineno += sum( - node.tolineno - node.lineno + 1 for node in self.decorators.nodes - ) - - return lineno - - @decorators_mod.cachedproperty - def blockstart_tolineno(self): - """The line on which the beginning of this block ends. - - :type: int - """ - return self.args.tolineno - - def block_range(self, lineno): - """Get a range from the given line number to where this node ends. - - :param lineno: Unused. - :type lineno: int - - :returns: The range of line numbers that this node belongs to, - :rtype: tuple(int, int) - """ - return self.fromlineno, self.tolineno - - def getattr(self, name, context=None): - """this method doesn't look in the instance_attrs dictionary since it's - done by an Instance proxy at inference time. - """ - if name in self.instance_attrs: - return self.instance_attrs[name] - if name in self.special_attributes: - return [self.special_attributes.lookup(name)] - raise exceptions.AttributeInferenceError(target=self, attribute=name) - - def igetattr(self, name, context=None): - """Inferred getattr, which returns an iterator of inferred statements.""" - try: - return bases._infer_stmts(self.getattr(name, context), context, frame=self) - except exceptions.AttributeInferenceError as error: - raise exceptions.InferenceError( - error.message, target=self, attribute=name, context=context - ) from error - - def is_method(self): - """Check if this function node represents a method. - - :returns: True if this is a method, False otherwise. - :rtype: bool - """ - # check we are defined in a ClassDef, because this is usually expected - # (e.g. pylint...) when is_method() return True - return self.type != "function" and isinstance(self.parent.frame(), ClassDef) - - @decorators_mod.cached - def decoratornames(self): - """Get the qualified names of each of the decorators on this function. - - :returns: The names of the decorators. - :rtype: set(str) - """ - result = set() - decoratornodes = [] - if self.decorators is not None: - decoratornodes += self.decorators.nodes - decoratornodes += self.extra_decorators - for decnode in decoratornodes: - try: - for infnode in decnode.infer(): - result.add(infnode.qname()) - except exceptions.InferenceError: - continue - return result - - def is_bound(self): - """Check if the function is bound to an instance or class. - - :returns: True if the function is bound to an instance or class, - False otherwise. - :rtype: bool - """ - return self.type == "classmethod" - - def is_abstract(self, pass_is_abstract=True): - """Check if the method is abstract. - - A method is considered abstract if any of the following is true: - * The only statement is 'raise NotImplementedError' - * The only statement is 'pass' and pass_is_abstract is True - * The method is annotated with abc.astractproperty/abc.abstractmethod - - :returns: True if the method is abstract, False otherwise. - :rtype: bool - """ - if self.decorators: - for node in self.decorators.nodes: - try: - inferred = next(node.infer()) - except exceptions.InferenceError: - continue - if inferred and inferred.qname() in ( - "abc.abstractproperty", - "abc.abstractmethod", - ): - return True - - for child_node in self.body: - if isinstance(child_node, node_classes.Raise): - if child_node.raises_not_implemented(): - return True - return pass_is_abstract and isinstance(child_node, node_classes.Pass) - # empty function is the same as function with a single "pass" statement - if pass_is_abstract: - return True - - def is_generator(self): - """Check if this is a generator function. - - :returns: True is this is a generator function, False otherwise. - :rtype: bool - """ - return next(self._get_yield_nodes_skip_lambdas(), False) - - def infer_call_result(self, caller=None, context=None): - """Infer what the function returns when called. - - :returns: What the function returns. - :rtype: iterable(NodeNG or Uninferable) or None - """ - if self.is_generator(): - if isinstance(self, AsyncFunctionDef): - generator_cls = bases.AsyncGenerator - else: - generator_cls = bases.Generator - result = generator_cls(self) - yield result - return - # This is really a gigantic hack to work around metaclass generators - # that return transient class-generating functions. Pylint's AST structure - # cannot handle a base class object that is only used for calling __new__, - # but does not contribute to the inheritance structure itself. We inject - # a fake class into the hierarchy here for several well-known metaclass - # generators, and filter it out later. - if ( - self.name == "with_metaclass" - and len(self.args.args) == 1 - and self.args.vararg is not None - ): - metaclass = next(caller.args[0].infer(context)) - if isinstance(metaclass, ClassDef): - class_bases = [next(arg.infer(context)) for arg in caller.args[1:]] - new_class = ClassDef(name="temporary_class") - new_class.hide = True - new_class.parent = self - new_class.postinit( - bases=[base for base in class_bases if base != util.Uninferable], - body=[], - decorators=[], - metaclass=metaclass, - ) - yield new_class - return - returns = self._get_return_nodes_skip_functions() - - first_return = next(returns, None) - if not first_return: - if self.body and isinstance(self.body[-1], node_classes.Assert): - yield node_classes.Const(None) - return - - raise exceptions.InferenceError( - "The function does not have any return statements" - ) - - for returnnode in itertools.chain((first_return,), returns): - if returnnode.value is None: - yield node_classes.Const(None) - else: - try: - yield from returnnode.value.infer(context) - except exceptions.InferenceError: - yield util.Uninferable - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - For a :class:`FunctionDef` this is always ``True``. - :rtype: bool - """ - return True - - def get_children(self): - if self.decorators is not None: - yield self.decorators - - yield self.args - - if self.returns is not None: - yield self.returns - - yield from self.body - - def scope_lookup(self, node, name, offset=0): - """Lookup where the given name is assigned.""" - if name == "__class__": - # __class__ is an implicit closure reference created by the compiler - # if any methods in a class body refer to either __class__ or super. - # In our case, we want to be able to look it up in the current scope - # when `__class__` is being used. - frame = self.parent.frame() - if isinstance(frame, ClassDef): - return self, [frame] - return super().scope_lookup(node, name, offset) - - -class AsyncFunctionDef(FunctionDef): - """Class representing an :class:`ast.FunctionDef` node. - - A :class:`AsyncFunctionDef` is an asynchronous function - created with the `async` keyword. - - >>> node = astroid.extract_node(''' - async def func(things): - async for thing in things: - print(thing) - ''') - >>> node - - >>> node.body[0] - - """ - - -def _rec_get_names(args, names=None): - """return a list of all argument names""" - if names is None: - names = [] - for arg in args: - if isinstance(arg, node_classes.Tuple): - _rec_get_names(arg.elts, names) - else: - names.append(arg.name) - return names - - -def _is_metaclass(klass, seen=None): - """ Return if the given class can be - used as a metaclass. - """ - if klass.name == "type": - return True - if seen is None: - seen = set() - for base in klass.bases: - try: - for baseobj in base.infer(): - baseobj_name = baseobj.qname() - if baseobj_name in seen: - continue - - seen.add(baseobj_name) - if isinstance(baseobj, bases.Instance): - # not abstract - return False - if baseobj is util.Uninferable: - continue - if baseobj is klass: - continue - if not isinstance(baseobj, ClassDef): - continue - if baseobj._type == "metaclass": - return True - if _is_metaclass(baseobj, seen): - return True - except exceptions.InferenceError: - continue - return False - - -def _class_type(klass, ancestors=None): - """return a ClassDef node type to differ metaclass and exception - from 'regular' classes - """ - # XXX we have to store ancestors in case we have an ancestor loop - if klass._type is not None: - return klass._type - if _is_metaclass(klass): - klass._type = "metaclass" - elif klass.name.endswith("Exception"): - klass._type = "exception" - else: - if ancestors is None: - ancestors = set() - klass_name = klass.qname() - if klass_name in ancestors: - # XXX we are in loop ancestors, and have found no type - klass._type = "class" - return "class" - ancestors.add(klass_name) - for base in klass.ancestors(recurs=False): - name = _class_type(base, ancestors) - if name != "class": - if name == "metaclass" and not _is_metaclass(klass): - # don't propagate it if the current class - # can't be a metaclass - continue - klass._type = base.type - break - if klass._type is None: - klass._type = "class" - return klass._type - - -def get_wrapping_class(node): - """Get the class that wraps the given node. - - We consider that a class wraps a node if the class - is a parent for the said node. - - :returns: The class that wraps the given node - :rtype: ClassDef or None - """ - - klass = node.frame() - while klass is not None and not isinstance(klass, ClassDef): - if klass.parent is None: - klass = None - else: - klass = klass.parent.frame() - return klass - - -class ClassDef(mixins.FilterStmtsMixin, LocalsDictNodeNG, node_classes.Statement): - """Class representing an :class:`ast.ClassDef` node. - - >>> node = astroid.extract_node(''' - class Thing: - def my_meth(self, arg): - return arg + self.offset - ''') - >>> node - - """ - - # some of the attributes below are set by the builder module or - # by a raw factories - - # a dictionary of class instances attributes - _astroid_fields = ("decorators", "bases", "body") # name - - decorators = None - """The decorators that are applied to this class. - - :type: Decorators or None - """ - special_attributes = objectmodel.ClassModel() - """The names of special attributes that this class has. - - :type: objectmodel.ClassModel - """ - - _type = None - _metaclass_hack = False - hide = False - type = property( - _class_type, - doc=( - "The class type for this node.\n\n" - "Possible values are: class, metaclass, exception.\n\n" - ":type: str" - ), - ) - _other_fields = ("name", "doc") - _other_other_fields = ("locals", "_newstyle") - _newstyle = None - - def __init__(self, name=None, doc=None, lineno=None, col_offset=None, parent=None): - """ - :param name: The name of the class. - :type name: str or None - - :param doc: The function's docstring. - :type doc: str or None - - :param lineno: The line that this node appears on in the source code. - :type lineno: int or None - - :param col_offset: The column that this node appears on in the - source code. - :type col_offset: int or None - - :param parent: The parent node in the syntax tree. - :type parent: NodeNG or None - """ - self.instance_attrs = {} - self.locals = {} - """A map of the name of a local variable to the node defining it. - - :type: dict(str, NodeNG) - """ - - self.keywords = [] - """The keywords given to the class definition. - - This is usually for :pep:`3115` style metaclass declaration. - - :type: list(Keyword) or None - """ - - self.bases = [] - """What the class inherits from. - - :type: list(NodeNG) - """ - - self.body = [] - """The contents of the class body. - - :type: list(NodeNG) - """ - - self.name = name - """The name of the class. - - :type name: str or None - """ - - self.doc = doc - """The class' docstring. - - :type doc: str or None - """ - - super(ClassDef, self).__init__(lineno, col_offset, parent) - if parent is not None: - parent.frame().set_local(name, self) - - for local_name, node in self.implicit_locals(): - self.add_local_node(node, local_name) - - def implicit_parameters(self): - return 1 - - def implicit_locals(self): - """Get implicitly defined class definition locals. - - :returns: the the name and Const pair for each local - :rtype: tuple(tuple(str, node_classes.Const), ...) - """ - locals_ = (("__module__", self.special_attributes.attr___module__),) - # __qualname__ is defined in PEP3155 - locals_ += (("__qualname__", self.special_attributes.attr___qualname__),) - return locals_ - - # pylint: disable=redefined-outer-name - def postinit( - self, bases, body, decorators, newstyle=None, metaclass=None, keywords=None - ): - """Do some setup after initialisation. - - :param bases: What the class inherits from. - :type bases: list(NodeNG) - - :param body: The contents of the class body. - :type body: list(NodeNG) - - :param decorators: The decorators that are applied to this class. - :type decorators: Decorators or None - - :param newstyle: Whether this is a new style class or not. - :type newstyle: bool or None - - :param metaclass: The metaclass of this class. - :type metaclass: NodeNG or None - - :param keywords: The keywords given to the class definition. - :type keywords: list(Keyword) or None - """ - self.keywords = keywords - self.bases = bases - self.body = body - self.decorators = decorators - if newstyle is not None: - self._newstyle = newstyle - if metaclass is not None: - self._metaclass = metaclass - - def _newstyle_impl(self, context=None): - if context is None: - context = contextmod.InferenceContext() - if self._newstyle is not None: - return self._newstyle - for base in self.ancestors(recurs=False, context=context): - if base._newstyle_impl(context): - self._newstyle = True - break - klass = self.declared_metaclass() - # could be any callable, we'd need to infer the result of klass(name, - # bases, dict). punt if it's not a class node. - if klass is not None and isinstance(klass, ClassDef): - self._newstyle = klass._newstyle_impl(context) - if self._newstyle is None: - self._newstyle = False - return self._newstyle - - _newstyle = None - newstyle = property( - _newstyle_impl, - doc=("Whether this is a new style class or not\n\n" ":type: bool or None"), - ) - - @decorators_mod.cachedproperty - def blockstart_tolineno(self): - """The line on which the beginning of this block ends. - - :type: int - """ - if self.bases: - return self.bases[-1].tolineno - - return self.fromlineno - - def block_range(self, lineno): - """Get a range from the given line number to where this node ends. - - :param lineno: Unused. - :type lineno: int - - :returns: The range of line numbers that this node belongs to, - :rtype: tuple(int, int) - """ - return self.fromlineno, self.tolineno - - def pytype(self): - """Get the name of the type that this node represents. - - :returns: The name of the type. - :rtype: str - """ - if self.newstyle: - return "%s.type" % BUILTINS - return "%s.classobj" % BUILTINS - - def display_type(self): - """A human readable type of this node. - - :returns: The type of this node. - :rtype: str - """ - return "Class" - - def callable(self): - """Whether this node defines something that is callable. - - :returns: True if this defines something that is callable, - False otherwise. - For a :class:`ClassDef` this is always ``True``. - :rtype: bool - """ - return True - - def is_subtype_of(self, type_name, context=None): - """Whether this class is a subtype of the given type. - - :param type_name: The name of the type of check against. - :type type_name: str - - :returns: True if this class is a subtype of the given type, - False otherwise. - :rtype: bool - """ - if self.qname() == type_name: - return True - for anc in self.ancestors(context=context): - if anc.qname() == type_name: - return True - return False - - def _infer_type_call(self, caller, context): - name_node = next(caller.args[0].infer(context)) - if isinstance(name_node, node_classes.Const) and isinstance( - name_node.value, str - ): - name = name_node.value - else: - return util.Uninferable - - result = ClassDef(name, None) - - # Get the bases of the class. - class_bases = next(caller.args[1].infer(context)) - if isinstance(class_bases, (node_classes.Tuple, node_classes.List)): - result.bases = class_bases.itered() - else: - # There is currently no AST node that can represent an 'unknown' - # node (Uninferable is not an AST node), therefore we simply return Uninferable here - # although we know at least the name of the class. - return util.Uninferable - - # Get the members of the class - try: - members = next(caller.args[2].infer(context)) - except exceptions.InferenceError: - members = None - - if members and isinstance(members, node_classes.Dict): - for attr, value in members.items: - if isinstance(attr, node_classes.Const) and isinstance(attr.value, str): - result.locals[attr.value] = [value] - - result.parent = caller.parent - return result - - def infer_call_result(self, caller, context=None): - """infer what a class is returning when called""" - if ( - self.is_subtype_of("%s.type" % (BUILTINS,), context) - and len(caller.args) == 3 - ): - result = self._infer_type_call(caller, context) - yield result - return - - dunder_call = None - try: - metaclass = self.metaclass(context=context) - if metaclass is not None: - dunder_call = next(metaclass.igetattr("__call__", context)) - except exceptions.AttributeInferenceError: - pass - - if dunder_call and dunder_call.qname() != "builtins.type.__call__": - # Call type.__call__ if not set metaclass - # (since type is the default metaclass) - context = contextmod.bind_context_to_node(context, self) - yield from dunder_call.infer_call_result(caller, context) - else: - if any(cls.name in EXCEPTION_BASE_CLASSES for cls in self.mro()): - # Subclasses of exceptions can be exception instances - yield objects.ExceptionInstance(self) - else: - yield bases.Instance(self) - - def scope_lookup(self, node, name, offset=0): - """Lookup where the given name is assigned. - - :param node: The node to look for assignments up to. - Any assignments after the given node are ignored. - :type node: NodeNG - - :param name: The name to find assignments for. - :type name: str - - :param offset: The line offset to filter statements up to. - :type offset: int - - :returns: This scope node and the list of assignments associated to the - given name according to the scope where it has been found (locals, - globals or builtin). - :rtype: tuple(str, list(NodeNG)) - """ - # If the name looks like a builtin name, just try to look - # into the upper scope of this class. We might have a - # decorator that it's poorly named after a builtin object - # inside this class. - lookup_upper_frame = ( - isinstance(node.parent, node_classes.Decorators) - and name in MANAGER.builtins_module - ) - if ( - any(node == base or base.parent_of(node) for base in self.bases) - or lookup_upper_frame - ): - # Handle the case where we have either a name - # in the bases of a class, which exists before - # the actual definition or the case where we have - # a Getattr node, with that name. - # - # name = ... - # class A(name): - # def name(self): ... - # - # import name - # class A(name.Name): - # def name(self): ... - - frame = self.parent.frame() - # line offset to avoid that class A(A) resolve the ancestor to - # the defined class - offset = -1 - else: - frame = self - return frame._scope_lookup(node, name, offset) - - @property - def basenames(self): - """The names of the parent classes - - Names are given in the order they appear in the class definition. - - :type: list(str) - """ - return [bnode.as_string() for bnode in self.bases] - - def ancestors(self, recurs=True, context=None): - """Iterate over the base classes in prefixed depth first order. - - :param recurs: Whether to recurse or return direct ancestors only. - :type recurs: bool - - :returns: The base classes - :rtype: iterable(NodeNG) - """ - # FIXME: should be possible to choose the resolution order - # FIXME: inference make infinite loops possible here - yielded = {self} - if context is None: - context = contextmod.InferenceContext() - if not self.bases and self.qname() != "builtins.object": - yield builtin_lookup("object")[1][0] - return - - for stmt in self.bases: - with context.restore_path(): - try: - for baseobj in stmt.infer(context): - if not isinstance(baseobj, ClassDef): - if isinstance(baseobj, bases.Instance): - baseobj = baseobj._proxied - else: - continue - if not baseobj.hide: - if baseobj in yielded: - continue - yielded.add(baseobj) - yield baseobj - if not recurs: - continue - for grandpa in baseobj.ancestors(recurs=True, context=context): - if grandpa is self: - # This class is the ancestor of itself. - break - if grandpa in yielded: - continue - yielded.add(grandpa) - yield grandpa - except exceptions.InferenceError: - continue - - def local_attr_ancestors(self, name, context=None): - """Iterate over the parents that define the given name. - - :param name: The name to find definitions for. - :type name: str - - :returns: The parents that define the given name. - :rtype: iterable(NodeNG) - """ - # Look up in the mro if we can. This will result in the - # attribute being looked up just as Python does it. - try: - ancestors = self.mro(context)[1:] - except exceptions.MroError: - # Fallback to use ancestors, we can't determine - # a sane MRO. - ancestors = self.ancestors(context=context) - for astroid in ancestors: - if name in astroid: - yield astroid - - def instance_attr_ancestors(self, name, context=None): - """Iterate over the parents that define the given name as an attribute. - - :param name: The name to find definitions for. - :type name: str - - :returns: The parents that define the given name as - an instance attribute. - :rtype: iterable(NodeNG) - """ - for astroid in self.ancestors(context=context): - if name in astroid.instance_attrs: - yield astroid - - def has_base(self, node): - """Whether this class directly inherits from the given node. - - :param node: The node to check for. - :type node: NodeNG - - :returns: True if this class directly inherits from the given node. - :rtype: bool - """ - return node in self.bases - - def local_attr(self, name, context=None): - """Get the list of assign nodes associated to the given name. - - Assignments are looked for in both this class and in parents. - - :returns: The list of assignments to the given name. - :rtype: list(NodeNG) - - :raises AttributeInferenceError: If no attribute with this name - can be found in this class or parent classes. - """ - result = [] - if name in self.locals: - result = self.locals[name] - else: - class_node = next(self.local_attr_ancestors(name, context), None) - if class_node: - result = class_node.locals[name] - result = [n for n in result if not isinstance(n, node_classes.DelAttr)] - if result: - return result - raise exceptions.AttributeInferenceError( - target=self, attribute=name, context=context - ) - - def instance_attr(self, name, context=None): - """Get the list of nodes associated to the given attribute name. - - Assignments are looked for in both this class and in parents. - - :returns: The list of assignments to the given name. - :rtype: list(NodeNG) - - :raises AttributeInferenceError: If no attribute with this name - can be found in this class or parent classes. - """ - # Return a copy, so we don't modify self.instance_attrs, - # which could lead to infinite loop. - values = list(self.instance_attrs.get(name, [])) - # get all values from parents - for class_node in self.instance_attr_ancestors(name, context): - values += class_node.instance_attrs[name] - values = [n for n in values if not isinstance(n, node_classes.DelAttr)] - if values: - return values - raise exceptions.AttributeInferenceError( - target=self, attribute=name, context=context - ) - - def instantiate_class(self): - """Get an :class:`Instance` of the :class:`ClassDef` node. - - :returns: An :class:`Instance` of the :class:`ClassDef` node, - or self if this is not possible. - :rtype: Instance or ClassDef - """ - return bases.Instance(self) - - def getattr(self, name, context=None, class_context=True): - """Get an attribute from this class, using Python's attribute semantic. - - This method doesn't look in the :attr:`instance_attrs` dictionary - since it is done by an :class:`Instance` proxy at inference time. - It may return an :class:`Uninferable` object if - the attribute has not been - found, but a ``__getattr__`` or ``__getattribute__`` method is defined. - If ``class_context`` is given, then it is considered that the - attribute is accessed from a class context, - e.g. ClassDef.attribute, otherwise it might have been accessed - from an instance as well. If ``class_context`` is used in that - case, then a lookup in the implicit metaclass and the explicit - metaclass will be done. - - :param name: The attribute to look for. - :type name: str - - :param class_context: Whether the attribute can be accessed statically. - :type class_context: bool - - :returns: The attribute. - :rtype: list(NodeNG) - - :raises AttributeInferenceError: If the attribute cannot be inferred. - """ - values = self.locals.get(name, []) - if name in self.special_attributes and class_context and not values: - result = [self.special_attributes.lookup(name)] - if name == "__bases__": - # Need special treatment, since they are mutable - # and we need to return all the values. - result += values - return result - - # don't modify the list in self.locals! - values = list(values) - for classnode in self.ancestors(recurs=True, context=context): - values += classnode.locals.get(name, []) - - if class_context: - values += self._metaclass_lookup_attribute(name, context) - - if not values: - raise exceptions.AttributeInferenceError( - target=self, attribute=name, context=context - ) - - # Look for AnnAssigns, which are not attributes in the purest sense. - for value in values: - if isinstance(value, node_classes.AssignName): - stmt = value.statement() - if isinstance(stmt, node_classes.AnnAssign) and stmt.value is None: - raise exceptions.AttributeInferenceError( - target=self, attribute=name, context=context - ) - return values - - def _metaclass_lookup_attribute(self, name, context): - """Search the given name in the implicit and the explicit metaclass.""" - attrs = set() - implicit_meta = self.implicit_metaclass() - metaclass = self.metaclass() - for cls in {implicit_meta, metaclass}: - if cls and cls != self and isinstance(cls, ClassDef): - cls_attributes = self._get_attribute_from_metaclass(cls, name, context) - attrs.update(set(cls_attributes)) - return attrs - - def _get_attribute_from_metaclass(self, cls, name, context): - try: - attrs = cls.getattr(name, context=context, class_context=True) - except exceptions.AttributeInferenceError: - return - - for attr in bases._infer_stmts(attrs, context, frame=cls): - if not isinstance(attr, FunctionDef): - yield attr - continue - - if bases._is_property(attr): - yield from attr.infer_call_result(self, context) - continue - if attr.type == "classmethod": - # If the method is a classmethod, then it will - # be bound to the metaclass, not to the class - # from where the attribute is retrieved. - # get_wrapping_class could return None, so just - # default to the current class. - frame = get_wrapping_class(attr) or self - yield bases.BoundMethod(attr, frame) - elif attr.type == "staticmethod": - yield attr - else: - yield bases.BoundMethod(attr, self) - - def igetattr(self, name, context=None, class_context=True): - """Infer the possible values of the given variable. - - :param name: The name of the variable to infer. - :type name: str - - :returns: The inferred possible values. - :rtype: iterable(NodeNG or Uninferable) - """ - # set lookup name since this is necessary to infer on import nodes for - # instance - context = contextmod.copy_context(context) - context.lookupname = name - try: - attr = self.getattr(name, context, class_context=class_context)[0] - for inferred in bases._infer_stmts([attr], context, frame=self): - # yield Uninferable object instead of descriptors when necessary - if not isinstance(inferred, node_classes.Const) and isinstance( - inferred, bases.Instance - ): - try: - inferred._proxied.getattr("__get__", context) - except exceptions.AttributeInferenceError: - yield inferred - else: - yield util.Uninferable - else: - yield function_to_method(inferred, self) - except exceptions.AttributeInferenceError as error: - if not name.startswith("__") and self.has_dynamic_getattr(context): - # class handle some dynamic attributes, return a Uninferable object - yield util.Uninferable - else: - raise exceptions.InferenceError( - error.message, target=self, attribute=name, context=context - ) - - def has_dynamic_getattr(self, context=None): - """Check if the class has a custom __getattr__ or __getattribute__. - - If any such method is found and it is not from - builtins, nor from an extension module, then the function - will return True. - - :returns: True if the class has a custom - __getattr__ or __getattribute__, False otherwise. - :rtype: bool - """ - - def _valid_getattr(node): - root = node.root() - return root.name != BUILTINS and getattr(root, "pure_python", None) - - try: - return _valid_getattr(self.getattr("__getattr__", context)[0]) - except exceptions.AttributeInferenceError: - # if self.newstyle: XXX cause an infinite recursion error - try: - getattribute = self.getattr("__getattribute__", context)[0] - return _valid_getattr(getattribute) - except exceptions.AttributeInferenceError: - pass - return False - - def getitem(self, index, context=None): - """Return the inference of a subscript. - - This is basically looking up the method in the metaclass and calling it. - - :returns: The inferred value of a subscript to this class. - :rtype: NodeNG - - :raises AstroidTypeError: If this class does not define a - ``__getitem__`` method. - """ - try: - methods = dunder_lookup.lookup(self, "__getitem__") - except exceptions.AttributeInferenceError as exc: - raise exceptions.AstroidTypeError(node=self, context=context) from exc - - method = methods[0] - - # Create a new callcontext for providing index as an argument. - new_context = contextmod.bind_context_to_node(context, self) - new_context.callcontext = contextmod.CallContext(args=[index]) - - try: - return next(method.infer_call_result(self, new_context)) - except exceptions.InferenceError: - return util.Uninferable - - def methods(self): - """Iterate over all of the method defined in this class and its parents. - - :returns: The methods defined on the class. - :rtype: iterable(FunctionDef) - """ - done = {} - for astroid in itertools.chain(iter((self,)), self.ancestors()): - for meth in astroid.mymethods(): - if meth.name in done: - continue - done[meth.name] = None - yield meth - - def mymethods(self): - """Iterate over all of the method defined in this class only. - - :returns: The methods defined on the class. - :rtype: iterable(FunctionDef) - """ - for member in self.values(): - if isinstance(member, FunctionDef): - yield member - - def implicit_metaclass(self): - """Get the implicit metaclass of the current class. - - For newstyle classes, this will return an instance of builtins.type. - For oldstyle classes, it will simply return None, since there's - no implicit metaclass there. - - :returns: The metaclass. - :rtype: builtins.type or None - """ - if self.newstyle: - return builtin_lookup("type")[1][0] - return None - - _metaclass = None - - def declared_metaclass(self, context=None): - """Return the explicit declared metaclass for the current class. - - An explicit declared metaclass is defined - either by passing the ``metaclass`` keyword argument - in the class definition line (Python 3) or (Python 2) by - having a ``__metaclass__`` class attribute, or if there are - no explicit bases but there is a global ``__metaclass__`` variable. - - :returns: The metaclass of this class, - or None if one could not be found. - :rtype: NodeNG or None - """ - for base in self.bases: - try: - for baseobj in base.infer(context=context): - if isinstance(baseobj, ClassDef) and baseobj.hide: - self._metaclass = baseobj._metaclass - self._metaclass_hack = True - break - except exceptions.InferenceError: - pass - - if self._metaclass: - # Expects this from Py3k TreeRebuilder - try: - return next( - node - for node in self._metaclass.infer(context=context) - if node is not util.Uninferable - ) - except (exceptions.InferenceError, StopIteration): - return None - - return None - - def _find_metaclass(self, seen=None, context=None): - if seen is None: - seen = set() - seen.add(self) - - klass = self.declared_metaclass(context=context) - if klass is None: - for parent in self.ancestors(context=context): - if parent not in seen: - klass = parent._find_metaclass(seen) - if klass is not None: - break - return klass - - def metaclass(self, context=None): - """Get the metaclass of this class. - - If this class does not define explicitly a metaclass, - then the first defined metaclass in ancestors will be used - instead. - - :returns: The metaclass of this class. - :rtype: NodeNG or None - """ - return self._find_metaclass(context=context) - - def has_metaclass_hack(self): - return self._metaclass_hack - - def _islots(self): - """ Return an iterator with the inferred slots. """ - if "__slots__" not in self.locals: - return None - for slots in self.igetattr("__slots__"): - # check if __slots__ is a valid type - for meth in ITER_METHODS: - try: - slots.getattr(meth) - break - except exceptions.AttributeInferenceError: - continue - else: - continue - - if isinstance(slots, node_classes.Const): - # a string. Ignore the following checks, - # but yield the node, only if it has a value - if slots.value: - yield slots - continue - if not hasattr(slots, "itered"): - # we can't obtain the values, maybe a .deque? - continue - - if isinstance(slots, node_classes.Dict): - values = [item[0] for item in slots.items] - else: - values = slots.itered() - if values is util.Uninferable: - continue - if not values: - # Stop the iteration, because the class - # has an empty list of slots. - return values - - for elt in values: - try: - for inferred in elt.infer(): - if inferred is util.Uninferable: - continue - if not isinstance( - inferred, node_classes.Const - ) or not isinstance(inferred.value, str): - continue - if not inferred.value: - continue - yield inferred - except exceptions.InferenceError: - continue - - return None - - def _slots(self): - if not self.newstyle: - raise NotImplementedError( - "The concept of slots is undefined for old-style classes." - ) - - slots = self._islots() - try: - first = next(slots) - except StopIteration as exc: - # The class doesn't have a __slots__ definition or empty slots. - if exc.args and exc.args[0] not in ("", None): - return exc.args[0] - return None - return [first] + list(slots) - - # Cached, because inferring them all the time is expensive - @decorators_mod.cached - def slots(self): - """Get all the slots for this node. - - :returns: The names of slots for this class. - If the class doesn't define any slot, through the ``__slots__`` - variable, then this function will return a None. - Also, it will return None in the case the slots were not inferred. - :rtype: list(str) or None - """ - - def grouped_slots(): - # Not interested in object, since it can't have slots. - for cls in self.mro()[:-1]: - try: - cls_slots = cls._slots() - except NotImplementedError: - continue - if cls_slots is not None: - yield from cls_slots - else: - yield None - - if not self.newstyle: - raise NotImplementedError( - "The concept of slots is undefined for old-style classes." - ) - - slots = list(grouped_slots()) - if not all(slot is not None for slot in slots): - return None - - return sorted(slots, key=lambda item: item.value) - - def _inferred_bases(self, context=None): - # Similar with .ancestors, but the difference is when one base is inferred, - # only the first object is wanted. That's because - # we aren't interested in superclasses, as in the following - # example: - # - # class SomeSuperClass(object): pass - # class SomeClass(SomeSuperClass): pass - # class Test(SomeClass): pass - # - # Inferring SomeClass from the Test's bases will give - # us both SomeClass and SomeSuperClass, but we are interested - # only in SomeClass. - - if context is None: - context = contextmod.InferenceContext() - if not self.bases and self.qname() != "builtins.object": - yield builtin_lookup("object")[1][0] - return - - for stmt in self.bases: - try: - baseobj = next(stmt.infer(context=context)) - except exceptions.InferenceError: - continue - if isinstance(baseobj, bases.Instance): - baseobj = baseobj._proxied - if not isinstance(baseobj, ClassDef): - continue - if not baseobj.hide: - yield baseobj - else: - yield from baseobj.bases - - def _compute_mro(self, context=None): - inferred_bases = list(self._inferred_bases(context=context)) - bases_mro = [] - for base in inferred_bases: - if base is self: - continue - - try: - mro = base._compute_mro(context=context) - bases_mro.append(mro) - except NotImplementedError: - # Some classes have in their ancestors both newstyle and - # old style classes. For these we can't retrieve the .mro, - # although in Python it's possible, since the class we are - # currently working is in fact new style. - # So, we fallback to ancestors here. - ancestors = list(base.ancestors(context=context)) - bases_mro.append(ancestors) - - unmerged_mro = [[self]] + bases_mro + [inferred_bases] - unmerged_mro = list(clean_duplicates_mro(unmerged_mro, self, context)) - return _c3_merge(unmerged_mro, self, context) - - def mro(self, context=None) -> List["ClassDef"]: - """Get the method resolution order, using C3 linearization. - - :returns: The list of ancestors, sorted by the mro. - :rtype: list(NodeNG) - :raises DuplicateBasesError: Duplicate bases in the same class base - :raises InconsistentMroError: A class' MRO is inconsistent - """ - return self._compute_mro(context=context) - - def bool_value(self): - """Determine the boolean value of this node. - - :returns: The boolean value of this node. - For a :class:`ClassDef` this is always ``True``. - :rtype: bool - """ - return True - - def get_children(self): - if self.decorators is not None: - yield self.decorators - - yield from self.bases - yield from self.body - - @decorators_mod.cached - def _get_assign_nodes(self): - children_assign_nodes = ( - child_node._get_assign_nodes() for child_node in self.body - ) - return list(itertools.chain.from_iterable(children_assign_nodes)) diff --git a/venv/lib/python3.8/site-packages/astroid/test_utils.py b/venv/lib/python3.8/site-packages/astroid/test_utils.py deleted file mode 100644 index 6c965efd..00000000 --- a/venv/lib/python3.8/site-packages/astroid/test_utils.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) 2013-2014 Google, Inc. -# Copyright (c) 2014 LOGILAB S.A. (Paris, FRANCE) -# Copyright (c) 2015-2016, 2018 Claudiu Popa -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2016 Jakub Wilk -# Copyright (c) 2018 Anthony Sottile - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -"""Utility functions for test code that uses astroid ASTs as input.""" -import contextlib -import functools -import sys -import warnings - -import pytest - -from astroid import nodes - - -def require_version(minver=None, maxver=None): - """ Compare version of python interpreter to the given one. Skip the test - if older. - """ - - def parse(string, default=None): - string = string or default - try: - return tuple(int(v) for v in string.split(".")) - except ValueError as exc: - raise ValueError( - "{string} is not a correct version : should be X.Y[.Z].".format( - string=string - ) - ) from exc - - def check_require_version(f): - current = sys.version_info[:3] - if parse(minver, "0") < current <= parse(maxver, "4"): - return f - - str_version = ".".join(str(v) for v in sys.version_info) - - @functools.wraps(f) - def new_f(*args, **kwargs): - if minver is not None: - pytest.skip( - "Needs Python > %s. Current version is %s." % (minver, str_version) - ) - elif maxver is not None: - pytest.skip( - "Needs Python <= %s. Current version is %s." % (maxver, str_version) - ) - - return new_f - - return check_require_version - - -def get_name_node(start_from, name, index=0): - return [n for n in start_from.nodes_of_class(nodes.Name) if n.name == name][index] - - -@contextlib.contextmanager -def enable_warning(warning): - warnings.simplefilter("always", warning) - try: - yield - finally: - # Reset it to default value, so it will take - # into account the values from the -W flag. - warnings.simplefilter("default", warning) diff --git a/venv/lib/python3.8/site-packages/astroid/transforms.py b/venv/lib/python3.8/site-packages/astroid/transforms.py deleted file mode 100644 index e5506cc6..00000000 --- a/venv/lib/python3.8/site-packages/astroid/transforms.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright (c) 2015-2016, 2018 Claudiu Popa -# Copyright (c) 2016 Ceridwen -# Copyright (c) 2018 Nick Drozd - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - - -import collections -from functools import lru_cache - - -class TransformVisitor: - """A visitor for handling transforms. - - The standard approach of using it is to call - :meth:`~visit` with an *astroid* module and the class - will take care of the rest, walking the tree and running the - transforms for each encountered node. - """ - - TRANSFORM_MAX_CACHE_SIZE = 10000 - - def __init__(self): - self.transforms = collections.defaultdict(list) - - @lru_cache(maxsize=TRANSFORM_MAX_CACHE_SIZE) - def _transform(self, node): - """Call matching transforms for the given node if any and return the - transformed node. - """ - cls = node.__class__ - if cls not in self.transforms: - # no transform registered for this class of node - return node - - transforms = self.transforms[cls] - for transform_func, predicate in transforms: - if predicate is None or predicate(node): - ret = transform_func(node) - # if the transformation function returns something, it's - # expected to be a replacement for the node - if ret is not None: - node = ret - if ret.__class__ != cls: - # Can no longer apply the rest of the transforms. - break - return node - - def _visit(self, node): - if hasattr(node, "_astroid_fields"): - for name in node._astroid_fields: - value = getattr(node, name) - visited = self._visit_generic(value) - if visited != value: - setattr(node, name, visited) - return self._transform(node) - - def _visit_generic(self, node): - if isinstance(node, list): - return [self._visit_generic(child) for child in node] - if isinstance(node, tuple): - return tuple(self._visit_generic(child) for child in node) - if not node or isinstance(node, str): - return node - - return self._visit(node) - - def register_transform(self, node_class, transform, predicate=None): - """Register `transform(node)` function to be applied on the given - astroid's `node_class` if `predicate` is None or returns true - when called with the node as argument. - - The transform function may return a value which is then used to - substitute the original node in the tree. - """ - self.transforms[node_class].append((transform, predicate)) - - def unregister_transform(self, node_class, transform, predicate=None): - """Unregister the given transform.""" - self.transforms[node_class].remove((transform, predicate)) - - def visit(self, module): - """Walk the given astroid *tree* and transform each encountered node - - Only the nodes which have transforms registered will actually - be replaced or changed. - """ - module.body = [self._visit(child) for child in module.body] - return self._transform(module) diff --git a/venv/lib/python3.8/site-packages/astroid/util.py b/venv/lib/python3.8/site-packages/astroid/util.py deleted file mode 100644 index 3ab75615..00000000 --- a/venv/lib/python3.8/site-packages/astroid/util.py +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (c) 2015-2018 Claudiu Popa -# Copyright (c) 2015-2016 Ceridwen -# Copyright (c) 2018 Bryce Guinta -# Copyright (c) 2018 Nick Drozd - -# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html -# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER - -import warnings -from itertools import islice - -import importlib -import lazy_object_proxy - - -def lazy_descriptor(obj): - class DescriptorProxy(lazy_object_proxy.Proxy): - def __get__(self, instance, owner=None): - return self.__class__.__get__(self, instance) - - return DescriptorProxy(obj) - - -def lazy_import(module_name): - return lazy_object_proxy.Proxy( - lambda: importlib.import_module("." + module_name, "astroid") - ) - - -@object.__new__ -class Uninferable: - """Special inference object, which is returned when inference fails.""" - - def __repr__(self): - return "Uninferable" - - __str__ = __repr__ - - def __getattribute__(self, name): - if name == "next": - raise AttributeError("next method should not be called") - if name.startswith("__") and name.endswith("__"): - return object.__getattribute__(self, name) - if name == "accept": - return object.__getattribute__(self, name) - return self - - def __call__(self, *args, **kwargs): - return self - - def __bool__(self): - return False - - __nonzero__ = __bool__ - - def accept(self, visitor): - func = getattr(visitor, "visit_uninferable") - return func(self) - - -class BadOperationMessage: - """Object which describes a TypeError occurred somewhere in the inference chain - - This is not an exception, but a container object which holds the types and - the error which occurred. - """ - - -class BadUnaryOperationMessage(BadOperationMessage): - """Object which describes operational failures on UnaryOps.""" - - def __init__(self, operand, op, error): - self.operand = operand - self.op = op - self.error = error - - @property - def _object_type_helper(self): - helpers = lazy_import("helpers") - return helpers.object_type - - def _object_type(self, obj): - # pylint: disable=not-callable; can't infer lazy_import - objtype = self._object_type_helper(obj) - if objtype is Uninferable: - return None - - return objtype - - def __str__(self): - if hasattr(self.operand, "name"): - operand_type = self.operand.name - else: - object_type = self._object_type(self.operand) - if hasattr(object_type, "name"): - operand_type = object_type.name - else: - # Just fallback to as_string - operand_type = object_type.as_string() - - msg = "bad operand type for unary {}: {}" - return msg.format(self.op, operand_type) - - -class BadBinaryOperationMessage(BadOperationMessage): - """Object which describes type errors for BinOps.""" - - def __init__(self, left_type, op, right_type): - self.left_type = left_type - self.right_type = right_type - self.op = op - - def __str__(self): - msg = "unsupported operand type(s) for {}: {!r} and {!r}" - return msg.format(self.op, self.left_type.name, self.right_type.name) - - -def _instancecheck(cls, other): - wrapped = cls.__wrapped__ - other_cls = other.__class__ - is_instance_of = wrapped is other_cls or issubclass(other_cls, wrapped) - warnings.warn( - "%r is deprecated and slated for removal in astroid " - "2.0, use %r instead" % (cls.__class__.__name__, wrapped.__name__), - PendingDeprecationWarning, - stacklevel=2, - ) - return is_instance_of - - -def proxy_alias(alias_name, node_type): - """Get a Proxy from the given name to the given node type.""" - proxy = type( - alias_name, - (lazy_object_proxy.Proxy,), - { - "__class__": object.__dict__["__class__"], - "__instancecheck__": _instancecheck, - }, - ) - return proxy(lambda: node_type) - - -def limit_inference(iterator, size): - """Limit inference amount. - - Limit inference amount to help with performance issues with - exponentially exploding possible results. - - :param iterator: Inference generator to limit - :type iterator: Iterator(NodeNG) - - :param size: Maximum mount of nodes yielded plus an - Uninferable at the end if limit reached - :type size: int - - :yields: A possibly modified generator - :rtype param: Iterable - """ - yield from islice(iterator, size) - has_more = next(iterator, False) - if has_more is not False: - yield Uninferable - return diff --git a/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/INSTALLER b/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e3..00000000 --- a/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/METADATA b/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/METADATA deleted file mode 100644 index a07d53df..00000000 --- a/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/METADATA +++ /dev/null @@ -1,144 +0,0 @@ -Metadata-Version: 2.1 -Name: atomicwrites -Version: 1.3.0 -Summary: Atomic file writes. -Home-page: https://github.com/untitaker/python-atomicwrites -Author: Markus Unterwaditzer -Author-email: markus@unterwaditzer.net -License: MIT -Platform: UNKNOWN -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: Implementation :: CPython -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* - -=================== -python-atomicwrites -=================== - -.. image:: https://travis-ci.org/untitaker/python-atomicwrites.svg?branch=master - :target: https://travis-ci.org/untitaker/python-atomicwrites - -.. image:: https://ci.appveyor.com/api/projects/status/vadc4le3c27to59x/branch/master?svg=true - :target: https://ci.appveyor.com/project/untitaker/python-atomicwrites/branch/master - -Atomic file writes. - -.. code-block:: python - - from atomicwrites import atomic_write - - with atomic_write('foo.txt', overwrite=True) as f: - f.write('Hello world.') - # "foo.txt" doesn't exist yet. - - # Now it does. - - -Features that distinguish it from other similar libraries (see `Alternatives and Credit`_): - -- Race-free assertion that the target file doesn't yet exist. This can be - controlled with the ``overwrite`` parameter. - -- Windows support, although not well-tested. The MSDN resources are not very - explicit about which operations are atomic. I'm basing my assumptions off `a - comment - `_ - by `Doug Crook - `_, who appears - to be a Microsoft employee: - - FAQ: Is MoveFileEx atomic - Frequently asked question: Is MoveFileEx atomic if the existing and new - files are both on the same drive? - - The simple answer is "usually, but in some cases it will silently fall-back - to a non-atomic method, so don't count on it". - - The implementation of MoveFileEx looks something like this: [...] - - The problem is if the rename fails, you might end up with a CopyFile, which - is definitely not atomic. - - If you really need atomic-or-nothing, you can try calling - NtSetInformationFile, which is unsupported but is much more likely to be - atomic. - -- Simple high-level API that wraps a very flexible class-based API. - -- Consistent error handling across platforms. - - -How it works -============ - -It uses a temporary file in the same directory as the given path. This ensures -that the temporary file resides on the same filesystem. - -The temporary file will then be atomically moved to the target location: On -POSIX, it will use ``rename`` if files should be overwritten, otherwise a -combination of ``link`` and ``unlink``. On Windows, it uses MoveFileEx_ through -stdlib's ``ctypes`` with the appropriate flags. - -Note that with ``link`` and ``unlink``, there's a timewindow where the file -might be available under two entries in the filesystem: The name of the -temporary file, and the name of the target file. - -Also note that the permissions of the target file may change this way. In some -situations a ``chmod`` can be issued without any concurrency problems, but -since that is not always the case, this library doesn't do it by itself. - -.. _MoveFileEx: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365240%28v=vs.85%29.aspx - -fsync ------ - -On POSIX, ``fsync`` is invoked on the temporary file after it is written (to -flush file content and metadata), and on the parent directory after the file is -moved (to flush filename). - -``fsync`` does not take care of disks' internal buffers, but there don't seem -to be any standard POSIX APIs for that. On OS X, ``fcntl`` is used with -``F_FULLFSYNC`` instead of ``fsync`` for that reason. - -On Windows, `_commit `_ -is used, but there are no guarantees about disk internal buffers. - -Alternatives and Credit -======================= - -Atomicwrites is directly inspired by the following libraries (and shares a -minimal amount of code): - -- The Trac project's `utility functions - `_, - also used in `Werkzeug `_ and - `mitsuhiko/python-atomicfile - `_. The idea to use - ``ctypes`` instead of ``PyWin32`` originated there. - -- `abarnert/fatomic `_. Windows support - (based on ``PyWin32``) was originally taken from there. - -Other alternatives to atomicwrites include: - -- `sashka/atomicfile `_. Originally I - considered using that, but at the time it was lacking a lot of features I - needed (Windows support, overwrite-parameter, overriding behavior through - subclassing). - -- The `Boltons library collection `_ - features a class for atomic file writes, which seems to have a very similar - ``overwrite`` parameter. It is lacking Windows support though. - -License -======= - -Licensed under the MIT, see ``LICENSE``. - - diff --git a/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/RECORD b/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/RECORD deleted file mode 100644 index 8476097e..00000000 --- a/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/RECORD +++ /dev/null @@ -1,7 +0,0 @@ -atomicwrites-1.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -atomicwrites-1.3.0.dist-info/METADATA,sha256=T9BgDww1IuYBG4de40vAgvpSbNBrRtNjTBNcukt1HZU,5535 -atomicwrites-1.3.0.dist-info/RECORD,, -atomicwrites-1.3.0.dist-info/WHEEL,sha256=gduuPyBvFJQSQ0zdyxF7k0zynDXbIbvg5ZBHoXum5uk,110 -atomicwrites-1.3.0.dist-info/top_level.txt,sha256=ks64zKVUkrl2ZrrP046CsytXlSGf8gLG-IcoXpNyeoc,13 -atomicwrites/__init__.py,sha256=00DapdQb04-k79KZjzzfwnI1Q8nfsiF5TPeVcqbGVw0,6562 -atomicwrites/__pycache__/__init__.cpython-38.pyc,, diff --git a/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/WHEEL b/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/WHEEL deleted file mode 100644 index 1316c41d..00000000 --- a/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.31.1) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/top_level.txt b/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/top_level.txt deleted file mode 100644 index 5fa5a87d..00000000 --- a/venv/lib/python3.8/site-packages/atomicwrites-1.3.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -atomicwrites diff --git a/venv/lib/python3.8/site-packages/atomicwrites/__init__.py b/venv/lib/python3.8/site-packages/atomicwrites/__init__.py deleted file mode 100644 index cfa5c29a..00000000 --- a/venv/lib/python3.8/site-packages/atomicwrites/__init__.py +++ /dev/null @@ -1,216 +0,0 @@ -import contextlib -import io -import os -import sys -import tempfile - -try: - import fcntl -except ImportError: - fcntl = None - -__version__ = '1.3.0' - - -PY2 = sys.version_info[0] == 2 - -text_type = unicode if PY2 else str # noqa - - -def _path_to_unicode(x): - if not isinstance(x, text_type): - return x.decode(sys.getfilesystemencoding()) - return x - - -DEFAULT_MODE = "wb" if PY2 else "w" - - -_proper_fsync = os.fsync - - -if sys.platform != 'win32': - if hasattr(fcntl, 'F_FULLFSYNC'): - def _proper_fsync(fd): - # https://lists.apple.com/archives/darwin-dev/2005/Feb/msg00072.html - # https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man2/fsync.2.html - # https://github.com/untitaker/python-atomicwrites/issues/6 - fcntl.fcntl(fd, fcntl.F_FULLFSYNC) - - def _sync_directory(directory): - # Ensure that filenames are written to disk - fd = os.open(directory, 0) - try: - _proper_fsync(fd) - finally: - os.close(fd) - - def _replace_atomic(src, dst): - os.rename(src, dst) - _sync_directory(os.path.normpath(os.path.dirname(dst))) - - def _move_atomic(src, dst): - os.link(src, dst) - os.unlink(src) - - src_dir = os.path.normpath(os.path.dirname(src)) - dst_dir = os.path.normpath(os.path.dirname(dst)) - _sync_directory(dst_dir) - if src_dir != dst_dir: - _sync_directory(src_dir) -else: - from ctypes import windll, WinError - - _MOVEFILE_REPLACE_EXISTING = 0x1 - _MOVEFILE_WRITE_THROUGH = 0x8 - _windows_default_flags = _MOVEFILE_WRITE_THROUGH - - def _handle_errors(rv): - if not rv: - raise WinError() - - def _replace_atomic(src, dst): - _handle_errors(windll.kernel32.MoveFileExW( - _path_to_unicode(src), _path_to_unicode(dst), - _windows_default_flags | _MOVEFILE_REPLACE_EXISTING - )) - - def _move_atomic(src, dst): - _handle_errors(windll.kernel32.MoveFileExW( - _path_to_unicode(src), _path_to_unicode(dst), - _windows_default_flags - )) - - -def replace_atomic(src, dst): - ''' - Move ``src`` to ``dst``. If ``dst`` exists, it will be silently - overwritten. - - Both paths must reside on the same filesystem for the operation to be - atomic. - ''' - return _replace_atomic(src, dst) - - -def move_atomic(src, dst): - ''' - Move ``src`` to ``dst``. There might a timewindow where both filesystem - entries exist. If ``dst`` already exists, :py:exc:`FileExistsError` will be - raised. - - Both paths must reside on the same filesystem for the operation to be - atomic. - ''' - return _move_atomic(src, dst) - - -class AtomicWriter(object): - ''' - A helper class for performing atomic writes. Usage:: - - with AtomicWriter(path).open() as f: - f.write(...) - - :param path: The destination filepath. May or may not exist. - :param mode: The filemode for the temporary file. This defaults to `wb` in - Python 2 and `w` in Python 3. - :param overwrite: If set to false, an error is raised if ``path`` exists. - Errors are only raised after the file has been written to. Either way, - the operation is atomic. - - If you need further control over the exact behavior, you are encouraged to - subclass. - ''' - - def __init__(self, path, mode=DEFAULT_MODE, overwrite=False, - **open_kwargs): - if 'a' in mode: - raise ValueError( - 'Appending to an existing file is not supported, because that ' - 'would involve an expensive `copy`-operation to a temporary ' - 'file. Open the file in normal `w`-mode and copy explicitly ' - 'if that\'s what you\'re after.' - ) - if 'x' in mode: - raise ValueError('Use the `overwrite`-parameter instead.') - if 'w' not in mode: - raise ValueError('AtomicWriters can only be written to.') - - self._path = path - self._mode = mode - self._overwrite = overwrite - self._open_kwargs = open_kwargs - - def open(self): - ''' - Open the temporary file. - ''' - return self._open(self.get_fileobject) - - @contextlib.contextmanager - def _open(self, get_fileobject): - f = None # make sure f exists even if get_fileobject() fails - try: - success = False - with get_fileobject(**self._open_kwargs) as f: - yield f - self.sync(f) - self.commit(f) - success = True - finally: - if not success: - try: - self.rollback(f) - except Exception: - pass - - def get_fileobject(self, suffix="", prefix=tempfile.template, dir=None, - **kwargs): - '''Return the temporary file to use.''' - if dir is None: - dir = os.path.normpath(os.path.dirname(self._path)) - descriptor, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, - dir=dir) - # io.open() will take either the descriptor or the name, but we need - # the name later for commit()/replace_atomic() and couldn't find a way - # to get the filename from the descriptor. - os.close(descriptor) - kwargs['mode'] = self._mode - kwargs['file'] = name - return io.open(**kwargs) - - def sync(self, f): - '''responsible for clearing as many file caches as possible before - commit''' - f.flush() - _proper_fsync(f.fileno()) - - def commit(self, f): - '''Move the temporary file to the target location.''' - if self._overwrite: - replace_atomic(f.name, self._path) - else: - move_atomic(f.name, self._path) - - def rollback(self, f): - '''Clean up all temporary resources.''' - os.unlink(f.name) - - -def atomic_write(path, writer_cls=AtomicWriter, **cls_kwargs): - ''' - Simple atomic writes. This wraps :py:class:`AtomicWriter`:: - - with atomic_write(path) as f: - f.write(...) - - :param path: The target path to write to. - :param writer_cls: The writer class to use. This parameter is useful if you - subclassed :py:class:`AtomicWriter` to change some behavior and want to - use that new subclass. - - Additional keyword arguments are passed to the writer class. See - :py:class:`AtomicWriter`. - ''' - return writer_cls(path, **cls_kwargs).open() diff --git a/venv/lib/python3.8/site-packages/attr/__init__.py b/venv/lib/python3.8/site-packages/attr/__init__.py deleted file mode 100644 index 9ff4d47f..00000000 --- a/venv/lib/python3.8/site-packages/attr/__init__.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import absolute_import, division, print_function - -from functools import partial - -from . import converters, exceptions, filters, validators -from ._config import get_run_validators, set_run_validators -from ._funcs import asdict, assoc, astuple, evolve, has -from ._make import ( - NOTHING, - Attribute, - Factory, - attrib, - attrs, - fields, - fields_dict, - make_class, - validate, -) -from ._version_info import VersionInfo - - -__version__ = "19.3.0" -__version_info__ = VersionInfo._from_version_string(__version__) - -__title__ = "attrs" -__description__ = "Classes Without Boilerplate" -__url__ = "https://www.attrs.org/" -__uri__ = __url__ -__doc__ = __description__ + " <" + __uri__ + ">" - -__author__ = "Hynek Schlawack" -__email__ = "hs@ox.cx" - -__license__ = "MIT" -__copyright__ = "Copyright (c) 2015 Hynek Schlawack" - - -s = attributes = attrs -ib = attr = attrib -dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) - - -__all__ = [ - "Attribute", - "Factory", - "NOTHING", - "asdict", - "assoc", - "astuple", - "attr", - "attrib", - "attributes", - "attrs", - "converters", - "evolve", - "exceptions", - "fields", - "fields_dict", - "filters", - "get_run_validators", - "has", - "ib", - "make_class", - "s", - "set_run_validators", - "validate", - "validators", -] diff --git a/venv/lib/python3.8/site-packages/attr/__init__.pyi b/venv/lib/python3.8/site-packages/attr/__init__.pyi deleted file mode 100644 index 38f16f06..00000000 --- a/venv/lib/python3.8/site-packages/attr/__init__.pyi +++ /dev/null @@ -1,278 +0,0 @@ -from typing import ( - Any, - Callable, - Dict, - Generic, - List, - Optional, - Sequence, - Mapping, - Tuple, - Type, - TypeVar, - Union, - overload, -) - -# `import X as X` is required to make these public -from . import exceptions as exceptions -from . import filters as filters -from . import converters as converters -from . import validators as validators - -from ._version_info import VersionInfo - -__version__: str -__version_info__: VersionInfo -__title__: str -__description__: str -__url__: str -__uri__: str -__author__: str -__email__: str -__license__: str -__copyright__: str - -_T = TypeVar("_T") -_C = TypeVar("_C", bound=type) - -_ValidatorType = Callable[[Any, Attribute[_T], _T], Any] -_ConverterType = Callable[[Any], _T] -_FilterType = Callable[[Attribute[_T], _T], bool] -_ReprType = Callable[[Any], str] -_ReprArgType = Union[bool, _ReprType] -# FIXME: in reality, if multiple validators are passed they must be in a list or tuple, -# but those are invariant and so would prevent subtypes of _ValidatorType from working -# when passed in a list or tuple. -_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] - -# _make -- - -NOTHING: object - -# NOTE: Factory lies about its return type to make this possible: `x: List[int] = Factory(list)` -# Work around mypy issue #4554 in the common case by using an overload. -@overload -def Factory(factory: Callable[[], _T]) -> _T: ... -@overload -def Factory( - factory: Union[Callable[[Any], _T], Callable[[], _T]], - takes_self: bool = ..., -) -> _T: ... - -class Attribute(Generic[_T]): - name: str - default: Optional[_T] - validator: Optional[_ValidatorType[_T]] - repr: _ReprArgType - cmp: bool - eq: bool - order: bool - hash: Optional[bool] - init: bool - converter: Optional[_ConverterType[_T]] - metadata: Dict[Any, Any] - type: Optional[Type[_T]] - kw_only: bool - -# NOTE: We had several choices for the annotation to use for type arg: -# 1) Type[_T] -# - Pros: Handles simple cases correctly -# - Cons: Might produce less informative errors in the case of conflicting TypeVars -# e.g. `attr.ib(default='bad', type=int)` -# 2) Callable[..., _T] -# - Pros: Better error messages than #1 for conflicting TypeVars -# - Cons: Terrible error messages for validator checks. -# e.g. attr.ib(type=int, validator=validate_str) -# -> error: Cannot infer function type argument -# 3) type (and do all of the work in the mypy plugin) -# - Pros: Simple here, and we could customize the plugin with our own errors. -# - Cons: Would need to write mypy plugin code to handle all the cases. -# We chose option #1. - -# `attr` lies about its return type to make the following possible: -# attr() -> Any -# attr(8) -> int -# attr(validator=) -> Whatever the callable expects. -# This makes this type of assignments possible: -# x: int = attr(8) -# -# This form catches explicit None or no default but with no other arguments returns Any. -@overload -def attrib( - default: None = ..., - validator: None = ..., - repr: _ReprArgType = ..., - cmp: Optional[bool] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: None = ..., - converter: None = ..., - factory: None = ..., - kw_only: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., -) -> Any: ... - -# This form catches an explicit None or no default and infers the type from the other arguments. -@overload -def attrib( - default: None = ..., - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: _ReprArgType = ..., - cmp: Optional[bool] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: Optional[Type[_T]] = ..., - converter: Optional[_ConverterType[_T]] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., -) -> _T: ... - -# This form catches an explicit default argument. -@overload -def attrib( - default: _T, - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: _ReprArgType = ..., - cmp: Optional[bool] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: Optional[Type[_T]] = ..., - converter: Optional[_ConverterType[_T]] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., -) -> _T: ... - -# This form covers type=non-Type: e.g. forward references (str), Any -@overload -def attrib( - default: Optional[_T] = ..., - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: _ReprArgType = ..., - cmp: Optional[bool] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: object = ..., - converter: Optional[_ConverterType[_T]] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., -) -> Any: ... -@overload -def attrs( - maybe_cls: _C, - these: Optional[Dict[str, Any]] = ..., - repr_ns: Optional[str] = ..., - repr: bool = ..., - cmp: Optional[bool] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., -) -> _C: ... -@overload -def attrs( - maybe_cls: None = ..., - these: Optional[Dict[str, Any]] = ..., - repr_ns: Optional[str] = ..., - repr: bool = ..., - cmp: Optional[bool] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., -) -> Callable[[_C], _C]: ... - -# TODO: add support for returning NamedTuple from the mypy plugin -class _Fields(Tuple[Attribute[Any], ...]): - def __getattr__(self, name: str) -> Attribute[Any]: ... - -def fields(cls: type) -> _Fields: ... -def fields_dict(cls: type) -> Dict[str, Attribute[Any]]: ... -def validate(inst: Any) -> None: ... - -# TODO: add support for returning a proper attrs class from the mypy plugin -# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', [attr.ib()])` is valid -def make_class( - name: str, - attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]], - bases: Tuple[type, ...] = ..., - repr_ns: Optional[str] = ..., - repr: bool = ..., - cmp: Optional[bool] = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., - eq: Optional[bool] = ..., - order: Optional[bool] = ..., -) -> type: ... - -# _funcs -- - -# TODO: add support for returning TypedDict from the mypy plugin -# FIXME: asdict/astuple do not honor their factory args. waiting on one of these: -# https://github.com/python/mypy/issues/4236 -# https://github.com/python/typing/issues/253 -def asdict( - inst: Any, - recurse: bool = ..., - filter: Optional[_FilterType[Any]] = ..., - dict_factory: Type[Mapping[Any, Any]] = ..., - retain_collection_types: bool = ..., -) -> Dict[str, Any]: ... - -# TODO: add support for returning NamedTuple from the mypy plugin -def astuple( - inst: Any, - recurse: bool = ..., - filter: Optional[_FilterType[Any]] = ..., - tuple_factory: Type[Sequence[Any]] = ..., - retain_collection_types: bool = ..., -) -> Tuple[Any, ...]: ... -def has(cls: type) -> bool: ... -def assoc(inst: _T, **changes: Any) -> _T: ... -def evolve(inst: _T, **changes: Any) -> _T: ... - -# _config -- - -def set_run_validators(run: bool) -> None: ... -def get_run_validators() -> bool: ... - -# aliases -- - -s = attributes = attrs -ib = attr = attrib -dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/venv/lib/python3.8/site-packages/attr/_compat.py b/venv/lib/python3.8/site-packages/attr/_compat.py deleted file mode 100644 index a915db8e..00000000 --- a/venv/lib/python3.8/site-packages/attr/_compat.py +++ /dev/null @@ -1,230 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import platform -import sys -import types -import warnings - - -PY2 = sys.version_info[0] == 2 -PYPY = platform.python_implementation() == "PyPy" - - -if PYPY or sys.version_info[:2] >= (3, 6): - ordered_dict = dict -else: - from collections import OrderedDict - - ordered_dict = OrderedDict - - -if PY2: - from UserDict import IterableUserDict - from collections import Mapping, Sequence - - # We 'bundle' isclass instead of using inspect as importing inspect is - # fairly expensive (order of 10-15 ms for a modern machine in 2016) - def isclass(klass): - return isinstance(klass, (type, types.ClassType)) - - # TYPE is used in exceptions, repr(int) is different on Python 2 and 3. - TYPE = "type" - - def iteritems(d): - return d.iteritems() - - # Python 2 is bereft of a read-only dict proxy, so we make one! - class ReadOnlyDict(IterableUserDict): - """ - Best-effort read-only dict wrapper. - """ - - def __setitem__(self, key, val): - # We gently pretend we're a Python 3 mappingproxy. - raise TypeError( - "'mappingproxy' object does not support item assignment" - ) - - def update(self, _): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'update'" - ) - - def __delitem__(self, _): - # We gently pretend we're a Python 3 mappingproxy. - raise TypeError( - "'mappingproxy' object does not support item deletion" - ) - - def clear(self): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'clear'" - ) - - def pop(self, key, default=None): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'pop'" - ) - - def popitem(self): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'popitem'" - ) - - def setdefault(self, key, default=None): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'setdefault'" - ) - - def __repr__(self): - # Override to be identical to the Python 3 version. - return "mappingproxy(" + repr(self.data) + ")" - - def metadata_proxy(d): - res = ReadOnlyDict() - res.data.update(d) # We blocked update, so we have to do it like this. - return res - - def just_warn(*args, **kw): # pragma: nocover - """ - We only warn on Python 3 because we are not aware of any concrete - consequences of not setting the cell on Python 2. - """ - - -else: # Python 3 and later. - from collections.abc import Mapping, Sequence # noqa - - def just_warn(*args, **kw): - """ - We only warn on Python 3 because we are not aware of any concrete - consequences of not setting the cell on Python 2. - """ - warnings.warn( - "Running interpreter doesn't sufficiently support code object " - "introspection. Some features like bare super() or accessing " - "__class__ will not work with slotted classes.", - RuntimeWarning, - stacklevel=2, - ) - - def isclass(klass): - return isinstance(klass, type) - - TYPE = "class" - - def iteritems(d): - return d.items() - - def metadata_proxy(d): - return types.MappingProxyType(dict(d)) - - -def make_set_closure_cell(): - """Return a function of two arguments (cell, value) which sets - the value stored in the closure cell `cell` to `value`. - """ - # pypy makes this easy. (It also supports the logic below, but - # why not do the easy/fast thing?) - if PYPY: # pragma: no cover - - def set_closure_cell(cell, value): - cell.__setstate__((value,)) - - return set_closure_cell - - # Otherwise gotta do it the hard way. - - # Create a function that will set its first cellvar to `value`. - def set_first_cellvar_to(value): - x = value - return - - # This function will be eliminated as dead code, but - # not before its reference to `x` forces `x` to be - # represented as a closure cell rather than a local. - def force_x_to_be_a_cell(): # pragma: no cover - return x - - try: - # Extract the code object and make sure our assumptions about - # the closure behavior are correct. - if PY2: - co = set_first_cellvar_to.func_code - else: - co = set_first_cellvar_to.__code__ - if co.co_cellvars != ("x",) or co.co_freevars != (): - raise AssertionError # pragma: no cover - - # Convert this code object to a code object that sets the - # function's first _freevar_ (not cellvar) to the argument. - if sys.version_info >= (3, 8): - # CPython 3.8+ has an incompatible CodeType signature - # (added a posonlyargcount argument) but also added - # CodeType.replace() to do this without counting parameters. - set_first_freevar_code = co.replace( - co_cellvars=co.co_freevars, co_freevars=co.co_cellvars - ) - else: - args = [co.co_argcount] - if not PY2: - args.append(co.co_kwonlyargcount) - args.extend( - [ - co.co_nlocals, - co.co_stacksize, - co.co_flags, - co.co_code, - co.co_consts, - co.co_names, - co.co_varnames, - co.co_filename, - co.co_name, - co.co_firstlineno, - co.co_lnotab, - # These two arguments are reversed: - co.co_cellvars, - co.co_freevars, - ] - ) - set_first_freevar_code = types.CodeType(*args) - - def set_closure_cell(cell, value): - # Create a function using the set_first_freevar_code, - # whose first closure cell is `cell`. Calling it will - # change the value of that cell. - setter = types.FunctionType( - set_first_freevar_code, {}, "setter", (), (cell,) - ) - # And call it to set the cell. - setter(value) - - # Make sure it works on this interpreter: - def make_func_with_cell(): - x = None - - def func(): - return x # pragma: no cover - - return func - - if PY2: - cell = make_func_with_cell().func_closure[0] - else: - cell = make_func_with_cell().__closure__[0] - set_closure_cell(cell, 100) - if cell.cell_contents != 100: - raise AssertionError # pragma: no cover - - except Exception: - return just_warn - else: - return set_closure_cell - - -set_closure_cell = make_set_closure_cell() diff --git a/venv/lib/python3.8/site-packages/attr/_config.py b/venv/lib/python3.8/site-packages/attr/_config.py deleted file mode 100644 index 8ec92096..00000000 --- a/venv/lib/python3.8/site-packages/attr/_config.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import absolute_import, division, print_function - - -__all__ = ["set_run_validators", "get_run_validators"] - -_run_validators = True - - -def set_run_validators(run): - """ - Set whether or not validators are run. By default, they are run. - """ - if not isinstance(run, bool): - raise TypeError("'run' must be bool.") - global _run_validators - _run_validators = run - - -def get_run_validators(): - """ - Return whether or not validators are run. - """ - return _run_validators diff --git a/venv/lib/python3.8/site-packages/attr/_funcs.py b/venv/lib/python3.8/site-packages/attr/_funcs.py deleted file mode 100644 index c077e428..00000000 --- a/venv/lib/python3.8/site-packages/attr/_funcs.py +++ /dev/null @@ -1,290 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import copy - -from ._compat import iteritems -from ._make import NOTHING, _obj_setattr, fields -from .exceptions import AttrsAttributeNotFoundError - - -def asdict( - inst, - recurse=True, - filter=None, - dict_factory=dict, - retain_collection_types=False, -): - """ - Return the ``attrs`` attribute values of *inst* as a dict. - - Optionally recurse into other ``attrs``-decorated classes. - - :param inst: Instance of an ``attrs``-decorated class. - :param bool recurse: Recurse into classes that are also - ``attrs``-decorated. - :param callable filter: A callable whose return code determines whether an - attribute or element is included (``True``) or dropped (``False``). Is - called with the `attr.Attribute` as the first argument and the - value as the second argument. - :param callable dict_factory: A callable to produce dictionaries from. For - example, to produce ordered dictionaries instead of normal Python - dictionaries, pass in ``collections.OrderedDict``. - :param bool retain_collection_types: Do not convert to ``list`` when - encountering an attribute whose type is ``tuple`` or ``set``. Only - meaningful if ``recurse`` is ``True``. - - :rtype: return type of *dict_factory* - - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. versionadded:: 16.0.0 *dict_factory* - .. versionadded:: 16.1.0 *retain_collection_types* - """ - attrs = fields(inst.__class__) - rv = dict_factory() - for a in attrs: - v = getattr(inst, a.name) - if filter is not None and not filter(a, v): - continue - if recurse is True: - if has(v.__class__): - rv[a.name] = asdict( - v, True, filter, dict_factory, retain_collection_types - ) - elif isinstance(v, (tuple, list, set)): - cf = v.__class__ if retain_collection_types is True else list - rv[a.name] = cf( - [ - _asdict_anything( - i, filter, dict_factory, retain_collection_types - ) - for i in v - ] - ) - elif isinstance(v, dict): - df = dict_factory - rv[a.name] = df( - ( - _asdict_anything( - kk, filter, df, retain_collection_types - ), - _asdict_anything( - vv, filter, df, retain_collection_types - ), - ) - for kk, vv in iteritems(v) - ) - else: - rv[a.name] = v - else: - rv[a.name] = v - return rv - - -def _asdict_anything(val, filter, dict_factory, retain_collection_types): - """ - ``asdict`` only works on attrs instances, this works on anything. - """ - if getattr(val.__class__, "__attrs_attrs__", None) is not None: - # Attrs class. - rv = asdict(val, True, filter, dict_factory, retain_collection_types) - elif isinstance(val, (tuple, list, set)): - cf = val.__class__ if retain_collection_types is True else list - rv = cf( - [ - _asdict_anything( - i, filter, dict_factory, retain_collection_types - ) - for i in val - ] - ) - elif isinstance(val, dict): - df = dict_factory - rv = df( - ( - _asdict_anything(kk, filter, df, retain_collection_types), - _asdict_anything(vv, filter, df, retain_collection_types), - ) - for kk, vv in iteritems(val) - ) - else: - rv = val - return rv - - -def astuple( - inst, - recurse=True, - filter=None, - tuple_factory=tuple, - retain_collection_types=False, -): - """ - Return the ``attrs`` attribute values of *inst* as a tuple. - - Optionally recurse into other ``attrs``-decorated classes. - - :param inst: Instance of an ``attrs``-decorated class. - :param bool recurse: Recurse into classes that are also - ``attrs``-decorated. - :param callable filter: A callable whose return code determines whether an - attribute or element is included (``True``) or dropped (``False``). Is - called with the `attr.Attribute` as the first argument and the - value as the second argument. - :param callable tuple_factory: A callable to produce tuples from. For - example, to produce lists instead of tuples. - :param bool retain_collection_types: Do not convert to ``list`` - or ``dict`` when encountering an attribute which type is - ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is - ``True``. - - :rtype: return type of *tuple_factory* - - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. versionadded:: 16.2.0 - """ - attrs = fields(inst.__class__) - rv = [] - retain = retain_collection_types # Very long. :/ - for a in attrs: - v = getattr(inst, a.name) - if filter is not None and not filter(a, v): - continue - if recurse is True: - if has(v.__class__): - rv.append( - astuple( - v, - recurse=True, - filter=filter, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - ) - elif isinstance(v, (tuple, list, set)): - cf = v.__class__ if retain is True else list - rv.append( - cf( - [ - astuple( - j, - recurse=True, - filter=filter, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(j.__class__) - else j - for j in v - ] - ) - ) - elif isinstance(v, dict): - df = v.__class__ if retain is True else dict - rv.append( - df( - ( - astuple( - kk, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(kk.__class__) - else kk, - astuple( - vv, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(vv.__class__) - else vv, - ) - for kk, vv in iteritems(v) - ) - ) - else: - rv.append(v) - else: - rv.append(v) - return rv if tuple_factory is list else tuple_factory(rv) - - -def has(cls): - """ - Check whether *cls* is a class with ``attrs`` attributes. - - :param type cls: Class to introspect. - :raise TypeError: If *cls* is not a class. - - :rtype: bool - """ - return getattr(cls, "__attrs_attrs__", None) is not None - - -def assoc(inst, **changes): - """ - Copy *inst* and apply *changes*. - - :param inst: Instance of a class with ``attrs`` attributes. - :param changes: Keyword changes in the new copy. - - :return: A copy of inst with *changes* incorporated. - - :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't - be found on *cls*. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. deprecated:: 17.1.0 - Use `evolve` instead. - """ - import warnings - - warnings.warn( - "assoc is deprecated and will be removed after 2018/01.", - DeprecationWarning, - stacklevel=2, - ) - new = copy.copy(inst) - attrs = fields(inst.__class__) - for k, v in iteritems(changes): - a = getattr(attrs, k, NOTHING) - if a is NOTHING: - raise AttrsAttributeNotFoundError( - "{k} is not an attrs attribute on {cl}.".format( - k=k, cl=new.__class__ - ) - ) - _obj_setattr(new, k, v) - return new - - -def evolve(inst, **changes): - """ - Create a new instance, based on *inst* with *changes* applied. - - :param inst: Instance of a class with ``attrs`` attributes. - :param changes: Keyword changes in the new copy. - - :return: A copy of inst with *changes* incorporated. - - :raise TypeError: If *attr_name* couldn't be found in the class - ``__init__``. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. versionadded:: 17.1.0 - """ - cls = inst.__class__ - attrs = fields(cls) - for a in attrs: - if not a.init: - continue - attr_name = a.name # To deal with private attributes. - init_name = attr_name if attr_name[0] != "_" else attr_name[1:] - if init_name not in changes: - changes[init_name] = getattr(inst, attr_name) - return cls(**changes) diff --git a/venv/lib/python3.8/site-packages/attr/_make.py b/venv/lib/python3.8/site-packages/attr/_make.py deleted file mode 100644 index 46f9c54e..00000000 --- a/venv/lib/python3.8/site-packages/attr/_make.py +++ /dev/null @@ -1,2168 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import copy -import linecache -import sys -import threading -import uuid -import warnings - -from operator import itemgetter - -from . import _config -from ._compat import ( - PY2, - isclass, - iteritems, - metadata_proxy, - ordered_dict, - set_closure_cell, -) -from .exceptions import ( - DefaultAlreadySetError, - FrozenInstanceError, - NotAnAttrsClassError, - PythonTooOldError, - UnannotatedAttributeError, -) - - -# This is used at least twice, so cache it here. -_obj_setattr = object.__setattr__ -_init_converter_pat = "__attr_converter_{}" -_init_factory_pat = "__attr_factory_{}" -_tuple_property_pat = ( - " {attr_name} = _attrs_property(_attrs_itemgetter({index}))" -) -_classvar_prefixes = ("typing.ClassVar", "t.ClassVar", "ClassVar") -# we don't use a double-underscore prefix because that triggers -# name mangling when trying to create a slot for the field -# (when slots=True) -_hash_cache_field = "_attrs_cached_hash" - -_empty_metadata_singleton = metadata_proxy({}) - -# Unique object for unequivocal getattr() defaults. -_sentinel = object() - - -class _Nothing(object): - """ - Sentinel class to indicate the lack of a value when ``None`` is ambiguous. - - ``_Nothing`` is a singleton. There is only ever one of it. - """ - - _singleton = None - - def __new__(cls): - if _Nothing._singleton is None: - _Nothing._singleton = super(_Nothing, cls).__new__(cls) - return _Nothing._singleton - - def __repr__(self): - return "NOTHING" - - -NOTHING = _Nothing() -""" -Sentinel to indicate the lack of a value when ``None`` is ambiguous. -""" - - -def attrib( - default=NOTHING, - validator=None, - repr=True, - cmp=None, - hash=None, - init=True, - metadata=None, - type=None, - converter=None, - factory=None, - kw_only=False, - eq=None, - order=None, -): - """ - Create a new attribute on a class. - - .. warning:: - - Does *not* do anything unless the class is also decorated with - `attr.s`! - - :param default: A value that is used if an ``attrs``-generated ``__init__`` - is used and no value is passed while instantiating or the attribute is - excluded using ``init=False``. - - If the value is an instance of `Factory`, its callable will be - used to construct a new value (useful for mutable data types like lists - or dicts). - - If a default is not set (or set manually to ``attr.NOTHING``), a value - *must* be supplied when instantiating; otherwise a `TypeError` - will be raised. - - The default can also be set using decorator notation as shown below. - - :type default: Any value - - :param callable factory: Syntactic sugar for - ``default=attr.Factory(callable)``. - - :param validator: `callable` that is called by ``attrs``-generated - ``__init__`` methods after the instance has been initialized. They - receive the initialized instance, the `Attribute`, and the - passed value. - - The return value is *not* inspected so the validator has to throw an - exception itself. - - If a ``list`` is passed, its items are treated as validators and must - all pass. - - Validators can be globally disabled and re-enabled using - `get_run_validators`. - - The validator can also be set using decorator notation as shown below. - - :type validator: ``callable`` or a ``list`` of ``callable``\\ s. - - :param repr: Include this attribute in the generated ``__repr__`` - method. If ``True``, include the attribute; if ``False``, omit it. By - default, the built-in ``repr()`` function is used. To override how the - attribute value is formatted, pass a ``callable`` that takes a single - value and returns a string. Note that the resulting string is used - as-is, i.e. it will be used directly *instead* of calling ``repr()`` - (the default). - :type repr: a ``bool`` or a ``callable`` to use a custom function. - :param bool eq: If ``True`` (default), include this attribute in the - generated ``__eq__`` and ``__ne__`` methods that check two instances - for equality. - :param bool order: If ``True`` (default), include this attributes in the - generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. - :param bool cmp: Setting to ``True`` is equivalent to setting ``eq=True, - order=True``. Deprecated in favor of *eq* and *order*. - :param hash: Include this attribute in the generated ``__hash__`` - method. If ``None`` (default), mirror *eq*'s value. This is the - correct behavior according the Python spec. Setting this value to - anything else than ``None`` is *discouraged*. - :type hash: ``bool`` or ``None`` - :param bool init: Include this attribute in the generated ``__init__`` - method. It is possible to set this to ``False`` and set a default - value. In that case this attributed is unconditionally initialized - with the specified default value or factory. - :param callable converter: `callable` that is called by - ``attrs``-generated ``__init__`` methods to converter attribute's value - to the desired format. It is given the passed-in value, and the - returned value will be used as the new value of the attribute. The - value is converted before being passed to the validator, if any. - :param metadata: An arbitrary mapping, to be used by third-party - components. See `extending_metadata`. - :param type: The type of the attribute. In Python 3.6 or greater, the - preferred method to specify the type is using a variable annotation - (see `PEP 526 `_). - This argument is provided for backward compatibility. - Regardless of the approach used, the type will be stored on - ``Attribute.type``. - - Please note that ``attrs`` doesn't do anything with this metadata by - itself. You can use it as part of your own code or for - `static type checking `. - :param kw_only: Make this attribute keyword-only (Python 3+) - in the generated ``__init__`` (if ``init`` is ``False``, this - parameter is ignored). - - .. versionadded:: 15.2.0 *convert* - .. versionadded:: 16.3.0 *metadata* - .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. - .. versionchanged:: 17.1.0 - *hash* is ``None`` and therefore mirrors *eq* by default. - .. versionadded:: 17.3.0 *type* - .. deprecated:: 17.4.0 *convert* - .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated - *convert* to achieve consistency with other noun-based arguments. - .. versionadded:: 18.1.0 - ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. - .. versionadded:: 18.2.0 *kw_only* - .. versionchanged:: 19.2.0 *convert* keyword argument removed - .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. - .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. - .. versionadded:: 19.2.0 *eq* and *order* - """ - eq, order = _determine_eq_order(cmp, eq, order) - - if hash is not None and hash is not True and hash is not False: - raise TypeError( - "Invalid value for hash. Must be True, False, or None." - ) - - if factory is not None: - if default is not NOTHING: - raise ValueError( - "The `default` and `factory` arguments are mutually " - "exclusive." - ) - if not callable(factory): - raise ValueError("The `factory` argument must be a callable.") - default = Factory(factory) - - if metadata is None: - metadata = {} - - return _CountingAttr( - default=default, - validator=validator, - repr=repr, - cmp=None, - hash=hash, - init=init, - converter=converter, - metadata=metadata, - type=type, - kw_only=kw_only, - eq=eq, - order=order, - ) - - -def _make_attr_tuple_class(cls_name, attr_names): - """ - Create a tuple subclass to hold `Attribute`s for an `attrs` class. - - The subclass is a bare tuple with properties for names. - - class MyClassAttributes(tuple): - __slots__ = () - x = property(itemgetter(0)) - """ - attr_class_name = "{}Attributes".format(cls_name) - attr_class_template = [ - "class {}(tuple):".format(attr_class_name), - " __slots__ = ()", - ] - if attr_names: - for i, attr_name in enumerate(attr_names): - attr_class_template.append( - _tuple_property_pat.format(index=i, attr_name=attr_name) - ) - else: - attr_class_template.append(" pass") - globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} - eval(compile("\n".join(attr_class_template), "", "exec"), globs) - - return globs[attr_class_name] - - -# Tuple class for extracted attributes from a class definition. -# `base_attrs` is a subset of `attrs`. -_Attributes = _make_attr_tuple_class( - "_Attributes", - [ - # all attributes to build dunder methods for - "attrs", - # attributes that have been inherited - "base_attrs", - # map inherited attributes to their originating classes - "base_attrs_map", - ], -) - - -def _is_class_var(annot): - """ - Check whether *annot* is a typing.ClassVar. - - The string comparison hack is used to avoid evaluating all string - annotations which would put attrs-based classes at a performance - disadvantage compared to plain old classes. - """ - return str(annot).startswith(_classvar_prefixes) - - -def _get_annotations(cls): - """ - Get annotations for *cls*. - """ - anns = getattr(cls, "__annotations__", None) - if anns is None: - return {} - - # Verify that the annotations aren't merely inherited. - for base_cls in cls.__mro__[1:]: - if anns is getattr(base_cls, "__annotations__", None): - return {} - - return anns - - -def _counter_getter(e): - """ - Key function for sorting to avoid re-creating a lambda for every class. - """ - return e[1].counter - - -def _transform_attrs(cls, these, auto_attribs, kw_only): - """ - Transform all `_CountingAttr`s on a class into `Attribute`s. - - If *these* is passed, use that and don't look for them on the class. - - Return an `_Attributes`. - """ - cd = cls.__dict__ - anns = _get_annotations(cls) - - if these is not None: - ca_list = [(name, ca) for name, ca in iteritems(these)] - - if not isinstance(these, ordered_dict): - ca_list.sort(key=_counter_getter) - elif auto_attribs is True: - ca_names = { - name - for name, attr in cd.items() - if isinstance(attr, _CountingAttr) - } - ca_list = [] - annot_names = set() - for attr_name, type in anns.items(): - if _is_class_var(type): - continue - annot_names.add(attr_name) - a = cd.get(attr_name, NOTHING) - if not isinstance(a, _CountingAttr): - if a is NOTHING: - a = attrib() - else: - a = attrib(default=a) - ca_list.append((attr_name, a)) - - unannotated = ca_names - annot_names - if len(unannotated) > 0: - raise UnannotatedAttributeError( - "The following `attr.ib`s lack a type annotation: " - + ", ".join( - sorted(unannotated, key=lambda n: cd.get(n).counter) - ) - + "." - ) - else: - ca_list = sorted( - ( - (name, attr) - for name, attr in cd.items() - if isinstance(attr, _CountingAttr) - ), - key=lambda e: e[1].counter, - ) - - own_attrs = [ - Attribute.from_counting_attr( - name=attr_name, ca=ca, type=anns.get(attr_name) - ) - for attr_name, ca in ca_list - ] - - base_attrs = [] - base_attr_map = {} # A dictionary of base attrs to their classes. - taken_attr_names = {a.name: a for a in own_attrs} - - # Traverse the MRO and collect attributes. - for base_cls in cls.__mro__[1:-1]: - sub_attrs = getattr(base_cls, "__attrs_attrs__", None) - if sub_attrs is not None: - for a in sub_attrs: - prev_a = taken_attr_names.get(a.name) - # Only add an attribute if it hasn't been defined before. This - # allows for overwriting attribute definitions by subclassing. - if prev_a is None: - base_attrs.append(a) - taken_attr_names[a.name] = a - base_attr_map[a.name] = base_cls - - attr_names = [a.name for a in base_attrs + own_attrs] - - AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) - - if kw_only: - own_attrs = [a._assoc(kw_only=True) for a in own_attrs] - base_attrs = [a._assoc(kw_only=True) for a in base_attrs] - - attrs = AttrsClass(base_attrs + own_attrs) - - # Mandatory vs non-mandatory attr order only matters when they are part of - # the __init__ signature and when they aren't kw_only (which are moved to - # the end and can be mandatory or non-mandatory in any order, as they will - # be specified as keyword args anyway). Check the order of those attrs: - had_default = False - for a in (a for a in attrs if a.init is not False and a.kw_only is False): - if had_default is True and a.default is NOTHING: - raise ValueError( - "No mandatory attributes allowed after an attribute with a " - "default value or factory. Attribute in question: %r" % (a,) - ) - - if had_default is False and a.default is not NOTHING: - had_default = True - - return _Attributes((attrs, base_attrs, base_attr_map)) - - -def _frozen_setattrs(self, name, value): - """ - Attached to frozen classes as __setattr__. - """ - raise FrozenInstanceError() - - -def _frozen_delattrs(self, name): - """ - Attached to frozen classes as __delattr__. - """ - raise FrozenInstanceError() - - -class _ClassBuilder(object): - """ - Iteratively build *one* class. - """ - - __slots__ = ( - "_cls", - "_cls_dict", - "_attrs", - "_base_names", - "_attr_names", - "_slots", - "_frozen", - "_weakref_slot", - "_cache_hash", - "_has_post_init", - "_delete_attribs", - "_base_attr_map", - "_is_exc", - ) - - def __init__( - self, - cls, - these, - slots, - frozen, - weakref_slot, - auto_attribs, - kw_only, - cache_hash, - is_exc, - ): - attrs, base_attrs, base_map = _transform_attrs( - cls, these, auto_attribs, kw_only - ) - - self._cls = cls - self._cls_dict = dict(cls.__dict__) if slots else {} - self._attrs = attrs - self._base_names = set(a.name for a in base_attrs) - self._base_attr_map = base_map - self._attr_names = tuple(a.name for a in attrs) - self._slots = slots - self._frozen = frozen or _has_frozen_base_class(cls) - self._weakref_slot = weakref_slot - self._cache_hash = cache_hash - self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) - self._delete_attribs = not bool(these) - self._is_exc = is_exc - - self._cls_dict["__attrs_attrs__"] = self._attrs - - if frozen: - self._cls_dict["__setattr__"] = _frozen_setattrs - self._cls_dict["__delattr__"] = _frozen_delattrs - - def __repr__(self): - return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__) - - def build_class(self): - """ - Finalize class based on the accumulated configuration. - - Builder cannot be used after calling this method. - """ - if self._slots is True: - return self._create_slots_class() - else: - return self._patch_original_class() - - def _patch_original_class(self): - """ - Apply accumulated methods and return the class. - """ - cls = self._cls - base_names = self._base_names - - # Clean class of attribute definitions (`attr.ib()`s). - if self._delete_attribs: - for name in self._attr_names: - if ( - name not in base_names - and getattr(cls, name, _sentinel) is not _sentinel - ): - try: - delattr(cls, name) - except AttributeError: - # This can happen if a base class defines a class - # variable and we want to set an attribute with the - # same name by using only a type annotation. - pass - - # Attach our dunder methods. - for name, value in self._cls_dict.items(): - setattr(cls, name, value) - - # Attach __setstate__. This is necessary to clear the hash code - # cache on deserialization. See issue - # https://github.com/python-attrs/attrs/issues/482 . - # Note that this code only handles setstate for dict classes. - # For slotted classes, see similar code in _create_slots_class . - if self._cache_hash: - existing_set_state_method = getattr(cls, "__setstate__", None) - if existing_set_state_method: - raise NotImplementedError( - "Currently you cannot use hash caching if " - "you specify your own __setstate__ method." - "See https://github.com/python-attrs/attrs/issues/494 ." - ) - - def cache_hash_set_state(chss_self, _): - # clear hash code cache - setattr(chss_self, _hash_cache_field, None) - - setattr(cls, "__setstate__", cache_hash_set_state) - - return cls - - def _create_slots_class(self): - """ - Build and return a new class with a `__slots__` attribute. - """ - base_names = self._base_names - cd = { - k: v - for k, v in iteritems(self._cls_dict) - if k not in tuple(self._attr_names) + ("__dict__", "__weakref__") - } - - weakref_inherited = False - - # Traverse the MRO to check for an existing __weakref__. - for base_cls in self._cls.__mro__[1:-1]: - if "__weakref__" in getattr(base_cls, "__dict__", ()): - weakref_inherited = True - break - - names = self._attr_names - if ( - self._weakref_slot - and "__weakref__" not in getattr(self._cls, "__slots__", ()) - and "__weakref__" not in names - and not weakref_inherited - ): - names += ("__weakref__",) - - # We only add the names of attributes that aren't inherited. - # Settings __slots__ to inherited attributes wastes memory. - slot_names = [name for name in names if name not in base_names] - if self._cache_hash: - slot_names.append(_hash_cache_field) - cd["__slots__"] = tuple(slot_names) - - qualname = getattr(self._cls, "__qualname__", None) - if qualname is not None: - cd["__qualname__"] = qualname - - # __weakref__ is not writable. - state_attr_names = tuple( - an for an in self._attr_names if an != "__weakref__" - ) - - def slots_getstate(self): - """ - Automatically created by attrs. - """ - return tuple(getattr(self, name) for name in state_attr_names) - - hash_caching_enabled = self._cache_hash - - def slots_setstate(self, state): - """ - Automatically created by attrs. - """ - __bound_setattr = _obj_setattr.__get__(self, Attribute) - for name, value in zip(state_attr_names, state): - __bound_setattr(name, value) - # Clearing the hash code cache on deserialization is needed - # because hash codes can change from run to run. See issue - # https://github.com/python-attrs/attrs/issues/482 . - # Note that this code only handles setstate for slotted classes. - # For dict classes, see similar code in _patch_original_class . - if hash_caching_enabled: - __bound_setattr(_hash_cache_field, None) - - # slots and frozen require __getstate__/__setstate__ to work - cd["__getstate__"] = slots_getstate - cd["__setstate__"] = slots_setstate - - # Create new class based on old class and our methods. - cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) - - # The following is a fix for - # https://github.com/python-attrs/attrs/issues/102. On Python 3, - # if a method mentions `__class__` or uses the no-arg super(), the - # compiler will bake a reference to the class in the method itself - # as `method.__closure__`. Since we replace the class with a - # clone, we rewrite these references so it keeps working. - for item in cls.__dict__.values(): - if isinstance(item, (classmethod, staticmethod)): - # Class- and staticmethods hide their functions inside. - # These might need to be rewritten as well. - closure_cells = getattr(item.__func__, "__closure__", None) - else: - closure_cells = getattr(item, "__closure__", None) - - if not closure_cells: # Catch None or the empty list. - continue - for cell in closure_cells: - if cell.cell_contents is self._cls: - set_closure_cell(cell, cls) - - return cls - - def add_repr(self, ns): - self._cls_dict["__repr__"] = self._add_method_dunders( - _make_repr(self._attrs, ns=ns) - ) - return self - - def add_str(self): - repr = self._cls_dict.get("__repr__") - if repr is None: - raise ValueError( - "__str__ can only be generated if a __repr__ exists." - ) - - def __str__(self): - return self.__repr__() - - self._cls_dict["__str__"] = self._add_method_dunders(__str__) - return self - - def make_unhashable(self): - self._cls_dict["__hash__"] = None - return self - - def add_hash(self): - self._cls_dict["__hash__"] = self._add_method_dunders( - _make_hash( - self._cls, - self._attrs, - frozen=self._frozen, - cache_hash=self._cache_hash, - ) - ) - - return self - - def add_init(self): - self._cls_dict["__init__"] = self._add_method_dunders( - _make_init( - self._cls, - self._attrs, - self._has_post_init, - self._frozen, - self._slots, - self._cache_hash, - self._base_attr_map, - self._is_exc, - ) - ) - - return self - - def add_eq(self): - cd = self._cls_dict - - cd["__eq__"], cd["__ne__"] = ( - self._add_method_dunders(meth) - for meth in _make_eq(self._cls, self._attrs) - ) - - return self - - def add_order(self): - cd = self._cls_dict - - cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( - self._add_method_dunders(meth) - for meth in _make_order(self._cls, self._attrs) - ) - - return self - - def _add_method_dunders(self, method): - """ - Add __module__ and __qualname__ to a *method* if possible. - """ - try: - method.__module__ = self._cls.__module__ - except AttributeError: - pass - - try: - method.__qualname__ = ".".join( - (self._cls.__qualname__, method.__name__) - ) - except AttributeError: - pass - - return method - - -_CMP_DEPRECATION = ( - "The usage of `cmp` is deprecated and will be removed on or after " - "2021-06-01. Please use `eq` and `order` instead." -) - - -def _determine_eq_order(cmp, eq, order): - """ - Validate the combination of *cmp*, *eq*, and *order*. Derive the effective - values of eq and order. - """ - if cmp is not None and any((eq is not None, order is not None)): - raise ValueError("Don't mix `cmp` with `eq' and `order`.") - - # cmp takes precedence due to bw-compatibility. - if cmp is not None: - warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=3) - - return cmp, cmp - - # If left None, equality is on and ordering mirrors equality. - if eq is None: - eq = True - - if order is None: - order = eq - - if eq is False and order is True: - raise ValueError("`order` can only be True if `eq` is True too.") - - return eq, order - - -def attrs( - maybe_cls=None, - these=None, - repr_ns=None, - repr=True, - cmp=None, - hash=None, - init=True, - slots=False, - frozen=False, - weakref_slot=True, - str=False, - auto_attribs=False, - kw_only=False, - cache_hash=False, - auto_exc=False, - eq=None, - order=None, -): - r""" - A class decorator that adds `dunder - `_\ -methods according to the - specified attributes using `attr.ib` or the *these* argument. - - :param these: A dictionary of name to `attr.ib` mappings. This is - useful to avoid the definition of your attributes within the class body - because you can't (e.g. if you want to add ``__repr__`` methods to - Django models) or don't want to. - - If *these* is not ``None``, ``attrs`` will *not* search the class body - for attributes and will *not* remove any attributes from it. - - If *these* is an ordered dict (`dict` on Python 3.6+, - `collections.OrderedDict` otherwise), the order is deduced from - the order of the attributes inside *these*. Otherwise the order - of the definition of the attributes is used. - - :type these: `dict` of `str` to `attr.ib` - - :param str repr_ns: When using nested classes, there's no way in Python 2 - to automatically detect that. Therefore it's possible to set the - namespace explicitly for a more meaningful ``repr`` output. - :param bool repr: Create a ``__repr__`` method with a human readable - representation of ``attrs`` attributes.. - :param bool str: Create a ``__str__`` method that is identical to - ``__repr__``. This is usually not necessary except for - `Exception`\ s. - :param bool eq: If ``True`` or ``None`` (default), add ``__eq__`` and - ``__ne__`` methods that check two instances for equality. - - They compare the instances as if they were tuples of their ``attrs`` - attributes, but only iff the types of both classes are *identical*! - :type eq: `bool` or `None` - :param bool order: If ``True``, add ``__lt__``, ``__le__``, ``__gt__``, - and ``__ge__`` methods that behave like *eq* above and allow instances - to be ordered. If ``None`` (default) mirror value of *eq*. - :type order: `bool` or `None` - :param cmp: Setting to ``True`` is equivalent to setting ``eq=True, - order=True``. Deprecated in favor of *eq* and *order*, has precedence - over them for backward-compatibility though. Must not be mixed with - *eq* or *order*. - :type cmp: `bool` or `None` - :param hash: If ``None`` (default), the ``__hash__`` method is generated - according how *eq* and *frozen* are set. - - 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. - 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to - None, marking it unhashable (which it is). - 3. If *eq* is False, ``__hash__`` will be left untouched meaning the - ``__hash__`` method of the base class will be used (if base class is - ``object``, this means it will fall back to id-based hashing.). - - Although not recommended, you can decide for yourself and force - ``attrs`` to create one (e.g. if the class is immutable even though you - didn't freeze it programmatically) by passing ``True`` or not. Both of - these cases are rather special and should be used carefully. - - See our documentation on `hashing`, Python's documentation on - `object.__hash__`, and the `GitHub issue that led to the default \ - behavior `_ for more - details. - :type hash: ``bool`` or ``None`` - :param bool init: Create a ``__init__`` method that initializes the - ``attrs`` attributes. Leading underscores are stripped for the - argument name. If a ``__attrs_post_init__`` method exists on the - class, it will be called after the class is fully initialized. - :param bool slots: Create a `slotted class ` that's more - memory-efficient. - :param bool frozen: Make instances immutable after initialization. If - someone attempts to modify a frozen instance, - `attr.exceptions.FrozenInstanceError` is raised. - - Please note: - - 1. This is achieved by installing a custom ``__setattr__`` method - on your class, so you can't implement your own. - - 2. True immutability is impossible in Python. - - 3. This *does* have a minor a runtime performance `impact - ` when initializing new instances. In other words: - ``__init__`` is slightly slower with ``frozen=True``. - - 4. If a class is frozen, you cannot modify ``self`` in - ``__attrs_post_init__`` or a self-written ``__init__``. You can - circumvent that limitation by using - ``object.__setattr__(self, "attribute_name", value)``. - - :param bool weakref_slot: Make instances weak-referenceable. This has no - effect unless ``slots`` is also enabled. - :param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes - (Python 3.6 and later only) from the class body. - - In this case, you **must** annotate every field. If ``attrs`` - encounters a field that is set to an `attr.ib` but lacks a type - annotation, an `attr.exceptions.UnannotatedAttributeError` is - raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't - want to set a type. - - If you assign a value to those attributes (e.g. ``x: int = 42``), that - value becomes the default value like if it were passed using - ``attr.ib(default=42)``. Passing an instance of `Factory` also - works as expected. - - Attributes annotated as `typing.ClassVar`, and attributes that are - neither annotated nor set to an `attr.ib` are **ignored**. - - .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ - :param bool kw_only: Make all attributes keyword-only (Python 3+) - in the generated ``__init__`` (if ``init`` is ``False``, this - parameter is ignored). - :param bool cache_hash: Ensure that the object's hash code is computed - only once and stored on the object. If this is set to ``True``, - hashing must be either explicitly or implicitly enabled for this - class. If the hash code is cached, avoid any reassignments of - fields involved in hash code computation or mutations of the objects - those fields point to after object creation. If such changes occur, - the behavior of the object's hash code is undefined. - :param bool auto_exc: If the class subclasses `BaseException` - (which implicitly includes any subclass of any exception), the - following happens to behave like a well-behaved Python exceptions - class: - - - the values for *eq*, *order*, and *hash* are ignored and the - instances compare and hash by the instance's ids (N.B. ``attrs`` will - *not* remove existing implementations of ``__hash__`` or the equality - methods. It just won't add own ones.), - - all attributes that are either passed into ``__init__`` or have a - default value are additionally available as a tuple in the ``args`` - attribute, - - the value of *str* is ignored leaving ``__str__`` to base classes. - - .. versionadded:: 16.0.0 *slots* - .. versionadded:: 16.1.0 *frozen* - .. versionadded:: 16.3.0 *str* - .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. - .. versionchanged:: 17.1.0 - *hash* supports ``None`` as value which is also the default now. - .. versionadded:: 17.3.0 *auto_attribs* - .. versionchanged:: 18.1.0 - If *these* is passed, no attributes are deleted from the class body. - .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. - .. versionadded:: 18.2.0 *weakref_slot* - .. deprecated:: 18.2.0 - ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a - `DeprecationWarning` if the classes compared are subclasses of - each other. ``__eq`` and ``__ne__`` never tried to compared subclasses - to each other. - .. versionchanged:: 19.2.0 - ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider - subclasses comparable anymore. - .. versionadded:: 18.2.0 *kw_only* - .. versionadded:: 18.2.0 *cache_hash* - .. versionadded:: 19.1.0 *auto_exc* - .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. - .. versionadded:: 19.2.0 *eq* and *order* - """ - eq, order = _determine_eq_order(cmp, eq, order) - - def wrap(cls): - - if getattr(cls, "__class__", None) is None: - raise TypeError("attrs only works with new-style classes.") - - is_exc = auto_exc is True and issubclass(cls, BaseException) - - builder = _ClassBuilder( - cls, - these, - slots, - frozen, - weakref_slot, - auto_attribs, - kw_only, - cache_hash, - is_exc, - ) - - if repr is True: - builder.add_repr(repr_ns) - if str is True: - builder.add_str() - if eq is True and not is_exc: - builder.add_eq() - if order is True and not is_exc: - builder.add_order() - - if hash is not True and hash is not False and hash is not None: - # Can't use `hash in` because 1 == True for example. - raise TypeError( - "Invalid value for hash. Must be True, False, or None." - ) - elif hash is False or (hash is None and eq is False) or is_exc: - # Don't do anything. Should fall back to __object__'s __hash__ - # which is by id. - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " hashing must be either explicitly or implicitly " - "enabled." - ) - elif hash is True or (hash is None and eq is True and frozen is True): - # Build a __hash__ if told so, or if it's safe. - builder.add_hash() - else: - # Raise TypeError on attempts to hash. - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " hashing must be either explicitly or implicitly " - "enabled." - ) - builder.make_unhashable() - - if init is True: - builder.add_init() - else: - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " init must be True." - ) - - return builder.build_class() - - # maybe_cls's type depends on the usage of the decorator. It's a class - # if it's used as `@attrs` but ``None`` if used as `@attrs()`. - if maybe_cls is None: - return wrap - else: - return wrap(maybe_cls) - - -_attrs = attrs -""" -Internal alias so we can use it in functions that take an argument called -*attrs*. -""" - - -if PY2: - - def _has_frozen_base_class(cls): - """ - Check whether *cls* has a frozen ancestor by looking at its - __setattr__. - """ - return ( - getattr(cls.__setattr__, "__module__", None) - == _frozen_setattrs.__module__ - and cls.__setattr__.__name__ == _frozen_setattrs.__name__ - ) - - -else: - - def _has_frozen_base_class(cls): - """ - Check whether *cls* has a frozen ancestor by looking at its - __setattr__. - """ - return cls.__setattr__ == _frozen_setattrs - - -def _attrs_to_tuple(obj, attrs): - """ - Create a tuple of all values of *obj*'s *attrs*. - """ - return tuple(getattr(obj, a.name) for a in attrs) - - -def _generate_unique_filename(cls, func_name): - """ - Create a "filename" suitable for a function being generated. - """ - unique_id = uuid.uuid4() - extra = "" - count = 1 - - while True: - unique_filename = "".format( - func_name, - cls.__module__, - getattr(cls, "__qualname__", cls.__name__), - extra, - ) - # To handle concurrency we essentially "reserve" our spot in - # the linecache with a dummy line. The caller can then - # set this value correctly. - cache_line = (1, None, (str(unique_id),), unique_filename) - if ( - linecache.cache.setdefault(unique_filename, cache_line) - == cache_line - ): - return unique_filename - - # Looks like this spot is taken. Try again. - count += 1 - extra = "-{0}".format(count) - - -def _make_hash(cls, attrs, frozen, cache_hash): - attrs = tuple( - a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) - ) - - tab = " " - - unique_filename = _generate_unique_filename(cls, "hash") - type_hash = hash(unique_filename) - - method_lines = ["def __hash__(self):"] - - def append_hash_computation_lines(prefix, indent): - """ - Generate the code for actually computing the hash code. - Below this will either be returned directly or used to compute - a value which is then cached, depending on the value of cache_hash - """ - method_lines.extend( - [indent + prefix + "hash((", indent + " %d," % (type_hash,)] - ) - - for a in attrs: - method_lines.append(indent + " self.%s," % a.name) - - method_lines.append(indent + " ))") - - if cache_hash: - method_lines.append(tab + "if self.%s is None:" % _hash_cache_field) - if frozen: - append_hash_computation_lines( - "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2 - ) - method_lines.append(tab * 2 + ")") # close __setattr__ - else: - append_hash_computation_lines( - "self.%s = " % _hash_cache_field, tab * 2 - ) - method_lines.append(tab + "return self.%s" % _hash_cache_field) - else: - append_hash_computation_lines("return ", tab) - - script = "\n".join(method_lines) - globs = {} - locs = {} - bytecode = compile(script, unique_filename, "exec") - eval(bytecode, globs, locs) - - # In order of debuggers like PDB being able to step through the code, - # we add a fake linecache entry. - linecache.cache[unique_filename] = ( - len(script), - None, - script.splitlines(True), - unique_filename, - ) - - return locs["__hash__"] - - -def _add_hash(cls, attrs): - """ - Add a hash method to *cls*. - """ - cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) - return cls - - -def __ne__(self, other): - """ - Check equality and either forward a NotImplemented or return the result - negated. - """ - result = self.__eq__(other) - if result is NotImplemented: - return NotImplemented - - return not result - - -def _make_eq(cls, attrs): - attrs = [a for a in attrs if a.eq] - - unique_filename = _generate_unique_filename(cls, "eq") - lines = [ - "def __eq__(self, other):", - " if other.__class__ is not self.__class__:", - " return NotImplemented", - ] - # We can't just do a big self.x = other.x and... clause due to - # irregularities like nan == nan is false but (nan,) == (nan,) is true. - if attrs: - lines.append(" return (") - others = [" ) == ("] - for a in attrs: - lines.append(" self.%s," % (a.name,)) - others.append(" other.%s," % (a.name,)) - - lines += others + [" )"] - else: - lines.append(" return True") - - script = "\n".join(lines) - globs = {} - locs = {} - bytecode = compile(script, unique_filename, "exec") - eval(bytecode, globs, locs) - - # In order of debuggers like PDB being able to step through the code, - # we add a fake linecache entry. - linecache.cache[unique_filename] = ( - len(script), - None, - script.splitlines(True), - unique_filename, - ) - return locs["__eq__"], __ne__ - - -def _make_order(cls, attrs): - attrs = [a for a in attrs if a.order] - - def attrs_to_tuple(obj): - """ - Save us some typing. - """ - return _attrs_to_tuple(obj, attrs) - - def __lt__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) < attrs_to_tuple(other) - - return NotImplemented - - def __le__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) <= attrs_to_tuple(other) - - return NotImplemented - - def __gt__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) > attrs_to_tuple(other) - - return NotImplemented - - def __ge__(self, other): - """ - Automatically created by attrs. - """ - if other.__class__ is self.__class__: - return attrs_to_tuple(self) >= attrs_to_tuple(other) - - return NotImplemented - - return __lt__, __le__, __gt__, __ge__ - - -def _add_eq(cls, attrs=None): - """ - Add equality methods to *cls* with *attrs*. - """ - if attrs is None: - attrs = cls.__attrs_attrs__ - - cls.__eq__, cls.__ne__ = _make_eq(cls, attrs) - - return cls - - -_already_repring = threading.local() - - -def _make_repr(attrs, ns): - """ - Make a repr method that includes relevant *attrs*, adding *ns* to the full - name. - """ - - # Figure out which attributes to include, and which function to use to - # format them. The a.repr value can be either bool or a custom callable. - attr_names_with_reprs = tuple( - (a.name, repr if a.repr is True else a.repr) - for a in attrs - if a.repr is not False - ) - - def __repr__(self): - """ - Automatically created by attrs. - """ - try: - working_set = _already_repring.working_set - except AttributeError: - working_set = set() - _already_repring.working_set = working_set - - if id(self) in working_set: - return "..." - real_cls = self.__class__ - if ns is None: - qualname = getattr(real_cls, "__qualname__", None) - if qualname is not None: - class_name = qualname.rsplit(">.", 1)[-1] - else: - class_name = real_cls.__name__ - else: - class_name = ns + "." + real_cls.__name__ - - # Since 'self' remains on the stack (i.e.: strongly referenced) for the - # duration of this call, it's safe to depend on id(...) stability, and - # not need to track the instance and therefore worry about properties - # like weakref- or hash-ability. - working_set.add(id(self)) - try: - result = [class_name, "("] - first = True - for name, attr_repr in attr_names_with_reprs: - if first: - first = False - else: - result.append(", ") - result.extend( - (name, "=", attr_repr(getattr(self, name, NOTHING))) - ) - return "".join(result) + ")" - finally: - working_set.remove(id(self)) - - return __repr__ - - -def _add_repr(cls, ns=None, attrs=None): - """ - Add a repr method to *cls*. - """ - if attrs is None: - attrs = cls.__attrs_attrs__ - - cls.__repr__ = _make_repr(attrs, ns) - return cls - - -def _make_init( - cls, attrs, post_init, frozen, slots, cache_hash, base_attr_map, is_exc -): - attrs = [a for a in attrs if a.init or a.default is not NOTHING] - - unique_filename = _generate_unique_filename(cls, "init") - - script, globs, annotations = _attrs_to_init_script( - attrs, frozen, slots, post_init, cache_hash, base_attr_map, is_exc - ) - locs = {} - bytecode = compile(script, unique_filename, "exec") - attr_dict = dict((a.name, a) for a in attrs) - globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) - - if frozen is True: - # Save the lookup overhead in __init__ if we need to circumvent - # immutability. - globs["_cached_setattr"] = _obj_setattr - - eval(bytecode, globs, locs) - - # In order of debuggers like PDB being able to step through the code, - # we add a fake linecache entry. - linecache.cache[unique_filename] = ( - len(script), - None, - script.splitlines(True), - unique_filename, - ) - - __init__ = locs["__init__"] - __init__.__annotations__ = annotations - - return __init__ - - -def fields(cls): - """ - Return the tuple of ``attrs`` attributes for a class. - - The tuple also allows accessing the fields by their names (see below for - examples). - - :param type cls: Class to introspect. - - :raise TypeError: If *cls* is not a class. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - :rtype: tuple (with name accessors) of `attr.Attribute` - - .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields - by name. - """ - if not isclass(cls): - raise TypeError("Passed object must be a class.") - attrs = getattr(cls, "__attrs_attrs__", None) - if attrs is None: - raise NotAnAttrsClassError( - "{cls!r} is not an attrs-decorated class.".format(cls=cls) - ) - return attrs - - -def fields_dict(cls): - """ - Return an ordered dictionary of ``attrs`` attributes for a class, whose - keys are the attribute names. - - :param type cls: Class to introspect. - - :raise TypeError: If *cls* is not a class. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - :rtype: an ordered dict where keys are attribute names and values are - `attr.Attribute`\\ s. This will be a `dict` if it's - naturally ordered like on Python 3.6+ or an - :class:`~collections.OrderedDict` otherwise. - - .. versionadded:: 18.1.0 - """ - if not isclass(cls): - raise TypeError("Passed object must be a class.") - attrs = getattr(cls, "__attrs_attrs__", None) - if attrs is None: - raise NotAnAttrsClassError( - "{cls!r} is not an attrs-decorated class.".format(cls=cls) - ) - return ordered_dict(((a.name, a) for a in attrs)) - - -def validate(inst): - """ - Validate all attributes on *inst* that have a validator. - - Leaves all exceptions through. - - :param inst: Instance of a class with ``attrs`` attributes. - """ - if _config._run_validators is False: - return - - for a in fields(inst.__class__): - v = a.validator - if v is not None: - v(inst, a, getattr(inst, a.name)) - - -def _is_slot_cls(cls): - return "__slots__" in cls.__dict__ - - -def _is_slot_attr(a_name, base_attr_map): - """ - Check if the attribute name comes from a slot class. - """ - return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) - - -def _attrs_to_init_script( - attrs, frozen, slots, post_init, cache_hash, base_attr_map, is_exc -): - """ - Return a script of an initializer for *attrs* and a dict of globals. - - The globals are expected by the generated script. - - If *frozen* is True, we cannot set the attributes directly so we use - a cached ``object.__setattr__``. - """ - lines = [] - any_slot_ancestors = any( - _is_slot_attr(a.name, base_attr_map) for a in attrs - ) - if frozen is True: - if slots is True: - lines.append( - # Circumvent the __setattr__ descriptor to save one lookup per - # assignment. - # Note _setattr will be used again below if cache_hash is True - "_setattr = _cached_setattr.__get__(self, self.__class__)" - ) - - def fmt_setter(attr_name, value_var): - return "_setattr('%(attr_name)s', %(value_var)s)" % { - "attr_name": attr_name, - "value_var": value_var, - } - - def fmt_setter_with_converter(attr_name, value_var): - conv_name = _init_converter_pat.format(attr_name) - return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % { - "attr_name": attr_name, - "value_var": value_var, - "conv": conv_name, - } - - else: - # Dict frozen classes assign directly to __dict__. - # But only if the attribute doesn't come from an ancestor slot - # class. - # Note _inst_dict will be used again below if cache_hash is True - lines.append("_inst_dict = self.__dict__") - if any_slot_ancestors: - lines.append( - # Circumvent the __setattr__ descriptor to save one lookup - # per assignment. - "_setattr = _cached_setattr.__get__(self, self.__class__)" - ) - - def fmt_setter(attr_name, value_var): - if _is_slot_attr(attr_name, base_attr_map): - res = "_setattr('%(attr_name)s', %(value_var)s)" % { - "attr_name": attr_name, - "value_var": value_var, - } - else: - res = "_inst_dict['%(attr_name)s'] = %(value_var)s" % { - "attr_name": attr_name, - "value_var": value_var, - } - return res - - def fmt_setter_with_converter(attr_name, value_var): - conv_name = _init_converter_pat.format(attr_name) - if _is_slot_attr(attr_name, base_attr_map): - tmpl = "_setattr('%(attr_name)s', %(c)s(%(value_var)s))" - else: - tmpl = "_inst_dict['%(attr_name)s'] = %(c)s(%(value_var)s)" - return tmpl % { - "attr_name": attr_name, - "value_var": value_var, - "c": conv_name, - } - - else: - # Not frozen. - def fmt_setter(attr_name, value): - return "self.%(attr_name)s = %(value)s" % { - "attr_name": attr_name, - "value": value, - } - - def fmt_setter_with_converter(attr_name, value_var): - conv_name = _init_converter_pat.format(attr_name) - return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % { - "attr_name": attr_name, - "value_var": value_var, - "conv": conv_name, - } - - args = [] - kw_only_args = [] - attrs_to_validate = [] - - # This is a dictionary of names to validator and converter callables. - # Injecting this into __init__ globals lets us avoid lookups. - names_for_globals = {} - annotations = {"return": None} - - for a in attrs: - if a.validator: - attrs_to_validate.append(a) - attr_name = a.name - arg_name = a.name.lstrip("_") - has_factory = isinstance(a.default, Factory) - if has_factory and a.default.takes_self: - maybe_self = "self" - else: - maybe_self = "" - if a.init is False: - if has_factory: - init_factory_name = _init_factory_pat.format(a.name) - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, - init_factory_name + "({0})".format(maybe_self), - ) - ) - conv_name = _init_converter_pat.format(a.name) - names_for_globals[conv_name] = a.converter - else: - lines.append( - fmt_setter( - attr_name, - init_factory_name + "({0})".format(maybe_self), - ) - ) - names_for_globals[init_factory_name] = a.default.factory - else: - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, - "attr_dict['{attr_name}'].default".format( - attr_name=attr_name - ), - ) - ) - conv_name = _init_converter_pat.format(a.name) - names_for_globals[conv_name] = a.converter - else: - lines.append( - fmt_setter( - attr_name, - "attr_dict['{attr_name}'].default".format( - attr_name=attr_name - ), - ) - ) - elif a.default is not NOTHING and not has_factory: - arg = "{arg_name}=attr_dict['{attr_name}'].default".format( - arg_name=arg_name, attr_name=attr_name - ) - if a.kw_only: - kw_only_args.append(arg) - else: - args.append(arg) - if a.converter is not None: - lines.append(fmt_setter_with_converter(attr_name, arg_name)) - names_for_globals[ - _init_converter_pat.format(a.name) - ] = a.converter - else: - lines.append(fmt_setter(attr_name, arg_name)) - elif has_factory: - arg = "{arg_name}=NOTHING".format(arg_name=arg_name) - if a.kw_only: - kw_only_args.append(arg) - else: - args.append(arg) - lines.append( - "if {arg_name} is not NOTHING:".format(arg_name=arg_name) - ) - init_factory_name = _init_factory_pat.format(a.name) - if a.converter is not None: - lines.append( - " " + fmt_setter_with_converter(attr_name, arg_name) - ) - lines.append("else:") - lines.append( - " " - + fmt_setter_with_converter( - attr_name, - init_factory_name + "({0})".format(maybe_self), - ) - ) - names_for_globals[ - _init_converter_pat.format(a.name) - ] = a.converter - else: - lines.append(" " + fmt_setter(attr_name, arg_name)) - lines.append("else:") - lines.append( - " " - + fmt_setter( - attr_name, - init_factory_name + "({0})".format(maybe_self), - ) - ) - names_for_globals[init_factory_name] = a.default.factory - else: - if a.kw_only: - kw_only_args.append(arg_name) - else: - args.append(arg_name) - if a.converter is not None: - lines.append(fmt_setter_with_converter(attr_name, arg_name)) - names_for_globals[ - _init_converter_pat.format(a.name) - ] = a.converter - else: - lines.append(fmt_setter(attr_name, arg_name)) - - if a.init is True and a.converter is None and a.type is not None: - annotations[arg_name] = a.type - - if attrs_to_validate: # we can skip this if there are no validators. - names_for_globals["_config"] = _config - lines.append("if _config._run_validators is True:") - for a in attrs_to_validate: - val_name = "__attr_validator_{}".format(a.name) - attr_name = "__attr_{}".format(a.name) - lines.append( - " {}(self, {}, self.{})".format(val_name, attr_name, a.name) - ) - names_for_globals[val_name] = a.validator - names_for_globals[attr_name] = a - if post_init: - lines.append("self.__attrs_post_init__()") - - # because this is set only after __attrs_post_init is called, a crash - # will result if post-init tries to access the hash code. This seemed - # preferable to setting this beforehand, in which case alteration to - # field values during post-init combined with post-init accessing the - # hash code would result in silent bugs. - if cache_hash: - if frozen: - if slots: - # if frozen and slots, then _setattr defined above - init_hash_cache = "_setattr('%s', %s)" - else: - # if frozen and not slots, then _inst_dict defined above - init_hash_cache = "_inst_dict['%s'] = %s" - else: - init_hash_cache = "self.%s = %s" - lines.append(init_hash_cache % (_hash_cache_field, "None")) - - # For exceptions we rely on BaseException.__init__ for proper - # initialization. - if is_exc: - vals = ",".join("self." + a.name for a in attrs if a.init) - - lines.append("BaseException.__init__(self, %s)" % (vals,)) - - args = ", ".join(args) - if kw_only_args: - if PY2: - raise PythonTooOldError( - "Keyword-only arguments only work on Python 3 and later." - ) - - args += "{leading_comma}*, {kw_only_args}".format( - leading_comma=", " if args else "", - kw_only_args=", ".join(kw_only_args), - ) - return ( - """\ -def __init__(self, {args}): - {lines} -""".format( - args=args, lines="\n ".join(lines) if lines else "pass" - ), - names_for_globals, - annotations, - ) - - -class Attribute(object): - """ - *Read-only* representation of an attribute. - - :attribute name: The name of the attribute. - - Plus *all* arguments of `attr.ib` (except for ``factory`` - which is only syntactic sugar for ``default=Factory(...)``. - - For the version history of the fields, see `attr.ib`. - """ - - __slots__ = ( - "name", - "default", - "validator", - "repr", - "eq", - "order", - "hash", - "init", - "metadata", - "type", - "converter", - "kw_only", - ) - - def __init__( - self, - name, - default, - validator, - repr, - cmp, # XXX: unused, remove along with other cmp code. - hash, - init, - metadata=None, - type=None, - converter=None, - kw_only=False, - eq=None, - order=None, - ): - eq, order = _determine_eq_order(cmp, eq, order) - - # Cache this descriptor here to speed things up later. - bound_setattr = _obj_setattr.__get__(self, Attribute) - - # Despite the big red warning, people *do* instantiate `Attribute` - # themselves. - bound_setattr("name", name) - bound_setattr("default", default) - bound_setattr("validator", validator) - bound_setattr("repr", repr) - bound_setattr("eq", eq) - bound_setattr("order", order) - bound_setattr("hash", hash) - bound_setattr("init", init) - bound_setattr("converter", converter) - bound_setattr( - "metadata", - ( - metadata_proxy(metadata) - if metadata - else _empty_metadata_singleton - ), - ) - bound_setattr("type", type) - bound_setattr("kw_only", kw_only) - - def __setattr__(self, name, value): - raise FrozenInstanceError() - - @classmethod - def from_counting_attr(cls, name, ca, type=None): - # type holds the annotated value. deal with conflicts: - if type is None: - type = ca.type - elif ca.type is not None: - raise ValueError( - "Type annotation and type argument cannot both be present" - ) - inst_dict = { - k: getattr(ca, k) - for k in Attribute.__slots__ - if k - not in ( - "name", - "validator", - "default", - "type", - ) # exclude methods and deprecated alias - } - return cls( - name=name, - validator=ca._validator, - default=ca._default, - type=type, - cmp=None, - **inst_dict - ) - - @property - def cmp(self): - """ - Simulate the presence of a cmp attribute and warn. - """ - warnings.warn(_CMP_DEPRECATION, DeprecationWarning, stacklevel=2) - - return self.eq and self.order - - # Don't use attr.assoc since fields(Attribute) doesn't work - def _assoc(self, **changes): - """ - Copy *self* and apply *changes*. - """ - new = copy.copy(self) - - new._setattrs(changes.items()) - - return new - - # Don't use _add_pickle since fields(Attribute) doesn't work - def __getstate__(self): - """ - Play nice with pickle. - """ - return tuple( - getattr(self, name) if name != "metadata" else dict(self.metadata) - for name in self.__slots__ - ) - - def __setstate__(self, state): - """ - Play nice with pickle. - """ - self._setattrs(zip(self.__slots__, state)) - - def _setattrs(self, name_values_pairs): - bound_setattr = _obj_setattr.__get__(self, Attribute) - for name, value in name_values_pairs: - if name != "metadata": - bound_setattr(name, value) - else: - bound_setattr( - name, - metadata_proxy(value) - if value - else _empty_metadata_singleton, - ) - - -_a = [ - Attribute( - name=name, - default=NOTHING, - validator=None, - repr=True, - cmp=None, - eq=True, - order=False, - hash=(name != "metadata"), - init=True, - ) - for name in Attribute.__slots__ -] - -Attribute = _add_hash( - _add_eq(_add_repr(Attribute, attrs=_a), attrs=_a), - attrs=[a for a in _a if a.hash], -) - - -class _CountingAttr(object): - """ - Intermediate representation of attributes that uses a counter to preserve - the order in which the attributes have been defined. - - *Internal* data structure of the attrs library. Running into is most - likely the result of a bug like a forgotten `@attr.s` decorator. - """ - - __slots__ = ( - "counter", - "_default", - "repr", - "eq", - "order", - "hash", - "init", - "metadata", - "_validator", - "converter", - "type", - "kw_only", - ) - __attrs_attrs__ = tuple( - Attribute( - name=name, - default=NOTHING, - validator=None, - repr=True, - cmp=None, - hash=True, - init=True, - kw_only=False, - eq=True, - order=False, - ) - for name in ( - "counter", - "_default", - "repr", - "eq", - "order", - "hash", - "init", - ) - ) + ( - Attribute( - name="metadata", - default=None, - validator=None, - repr=True, - cmp=None, - hash=False, - init=True, - kw_only=False, - eq=True, - order=False, - ), - ) - cls_counter = 0 - - def __init__( - self, - default, - validator, - repr, - cmp, # XXX: unused, remove along with cmp - hash, - init, - converter, - metadata, - type, - kw_only, - eq, - order, - ): - _CountingAttr.cls_counter += 1 - self.counter = _CountingAttr.cls_counter - self._default = default - # If validator is a list/tuple, wrap it using helper validator. - if validator and isinstance(validator, (list, tuple)): - self._validator = and_(*validator) - else: - self._validator = validator - self.repr = repr - self.eq = eq - self.order = order - self.hash = hash - self.init = init - self.converter = converter - self.metadata = metadata - self.type = type - self.kw_only = kw_only - - def validator(self, meth): - """ - Decorator that adds *meth* to the list of validators. - - Returns *meth* unchanged. - - .. versionadded:: 17.1.0 - """ - if self._validator is None: - self._validator = meth - else: - self._validator = and_(self._validator, meth) - return meth - - def default(self, meth): - """ - Decorator that allows to set the default for an attribute. - - Returns *meth* unchanged. - - :raises DefaultAlreadySetError: If default has been set before. - - .. versionadded:: 17.1.0 - """ - if self._default is not NOTHING: - raise DefaultAlreadySetError() - - self._default = Factory(meth, takes_self=True) - - return meth - - -_CountingAttr = _add_eq(_add_repr(_CountingAttr)) - - -@attrs(slots=True, init=False, hash=True) -class Factory(object): - """ - Stores a factory callable. - - If passed as the default value to `attr.ib`, the factory is used to - generate a new value. - - :param callable factory: A callable that takes either none or exactly one - mandatory positional argument depending on *takes_self*. - :param bool takes_self: Pass the partially initialized instance that is - being initialized as a positional argument. - - .. versionadded:: 17.1.0 *takes_self* - """ - - factory = attrib() - takes_self = attrib() - - def __init__(self, factory, takes_self=False): - """ - `Factory` is part of the default machinery so if we want a default - value here, we have to implement it ourselves. - """ - self.factory = factory - self.takes_self = takes_self - - -def make_class(name, attrs, bases=(object,), **attributes_arguments): - """ - A quick way to create a new class called *name* with *attrs*. - - :param name: The name for the new class. - :type name: str - - :param attrs: A list of names or a dictionary of mappings of names to - attributes. - - If *attrs* is a list or an ordered dict (`dict` on Python 3.6+, - `collections.OrderedDict` otherwise), the order is deduced from - the order of the names or attributes inside *attrs*. Otherwise the - order of the definition of the attributes is used. - :type attrs: `list` or `dict` - - :param tuple bases: Classes that the new class will subclass. - - :param attributes_arguments: Passed unmodified to `attr.s`. - - :return: A new class with *attrs*. - :rtype: type - - .. versionadded:: 17.1.0 *bases* - .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. - """ - if isinstance(attrs, dict): - cls_dict = attrs - elif isinstance(attrs, (list, tuple)): - cls_dict = dict((a, attrib()) for a in attrs) - else: - raise TypeError("attrs argument must be a dict or a list.") - - post_init = cls_dict.pop("__attrs_post_init__", None) - type_ = type( - name, - bases, - {} if post_init is None else {"__attrs_post_init__": post_init}, - ) - # For pickling to work, the __module__ variable needs to be set to the - # frame where the class is created. Bypass this step in environments where - # sys._getframe is not defined (Jython for example) or sys._getframe is not - # defined for arguments greater than 0 (IronPython). - try: - type_.__module__ = sys._getframe(1).f_globals.get( - "__name__", "__main__" - ) - except (AttributeError, ValueError): - pass - - # We do it here for proper warnings with meaningful stacklevel. - cmp = attributes_arguments.pop("cmp", None) - attributes_arguments["eq"], attributes_arguments[ - "order" - ] = _determine_eq_order( - cmp, attributes_arguments.get("eq"), attributes_arguments.get("order") - ) - - return _attrs(these=cls_dict, **attributes_arguments)(type_) - - -# These are required by within this module so we define them here and merely -# import into .validators. - - -@attrs(slots=True, hash=True) -class _AndValidator(object): - """ - Compose many validators to a single one. - """ - - _validators = attrib() - - def __call__(self, inst, attr, value): - for v in self._validators: - v(inst, attr, value) - - -def and_(*validators): - """ - A validator that composes multiple validators into one. - - When called on a value, it runs all wrapped validators. - - :param validators: Arbitrary number of validators. - :type validators: callables - - .. versionadded:: 17.1.0 - """ - vals = [] - for validator in validators: - vals.extend( - validator._validators - if isinstance(validator, _AndValidator) - else [validator] - ) - - return _AndValidator(tuple(vals)) diff --git a/venv/lib/python3.8/site-packages/attr/_version_info.py b/venv/lib/python3.8/site-packages/attr/_version_info.py deleted file mode 100644 index 014e78a1..00000000 --- a/venv/lib/python3.8/site-packages/attr/_version_info.py +++ /dev/null @@ -1,85 +0,0 @@ -from __future__ import absolute_import, division, print_function - -from functools import total_ordering - -from ._funcs import astuple -from ._make import attrib, attrs - - -@total_ordering -@attrs(eq=False, order=False, slots=True, frozen=True) -class VersionInfo(object): - """ - A version object that can be compared to tuple of length 1--4: - - >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) - True - >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) - True - >>> vi = attr.VersionInfo(19, 2, 0, "final") - >>> vi < (19, 1, 1) - False - >>> vi < (19,) - False - >>> vi == (19, 2,) - True - >>> vi == (19, 2, 1) - False - - .. versionadded:: 19.2 - """ - - year = attrib(type=int) - minor = attrib(type=int) - micro = attrib(type=int) - releaselevel = attrib(type=str) - - @classmethod - def _from_version_string(cls, s): - """ - Parse *s* and return a _VersionInfo. - """ - v = s.split(".") - if len(v) == 3: - v.append("final") - - return cls( - year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] - ) - - def _ensure_tuple(self, other): - """ - Ensure *other* is a tuple of a valid length. - - Returns a possibly transformed *other* and ourselves as a tuple of - the same length as *other*. - """ - - if self.__class__ is other.__class__: - other = astuple(other) - - if not isinstance(other, tuple): - raise NotImplementedError - - if not (1 <= len(other) <= 4): - raise NotImplementedError - - return astuple(self)[: len(other)], other - - def __eq__(self, other): - try: - us, them = self._ensure_tuple(other) - except NotImplementedError: - return NotImplemented - - return us == them - - def __lt__(self, other): - try: - us, them = self._ensure_tuple(other) - except NotImplementedError: - return NotImplemented - - # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't - # have to do anything special with releaselevel for now. - return us < them diff --git a/venv/lib/python3.8/site-packages/attr/_version_info.pyi b/venv/lib/python3.8/site-packages/attr/_version_info.pyi deleted file mode 100644 index 45ced086..00000000 --- a/venv/lib/python3.8/site-packages/attr/_version_info.pyi +++ /dev/null @@ -1,9 +0,0 @@ -class VersionInfo: - @property - def year(self) -> int: ... - @property - def minor(self) -> int: ... - @property - def micro(self) -> int: ... - @property - def releaselevel(self) -> str: ... diff --git a/venv/lib/python3.8/site-packages/attr/converters.py b/venv/lib/python3.8/site-packages/attr/converters.py deleted file mode 100644 index 85928978..00000000 --- a/venv/lib/python3.8/site-packages/attr/converters.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -Commonly useful converters. -""" - -from __future__ import absolute_import, division, print_function - -from ._make import NOTHING, Factory - - -def optional(converter): - """ - A converter that allows an attribute to be optional. An optional attribute - is one which can be set to ``None``. - - :param callable converter: the converter that is used for non-``None`` - values. - - .. versionadded:: 17.1.0 - """ - - def optional_converter(val): - if val is None: - return None - return converter(val) - - return optional_converter - - -def default_if_none(default=NOTHING, factory=None): - """ - A converter that allows to replace ``None`` values by *default* or the - result of *factory*. - - :param default: Value to be used if ``None`` is passed. Passing an instance - of `attr.Factory` is supported, however the ``takes_self`` option - is *not*. - :param callable factory: A callable that takes not parameters whose result - is used if ``None`` is passed. - - :raises TypeError: If **neither** *default* or *factory* is passed. - :raises TypeError: If **both** *default* and *factory* are passed. - :raises ValueError: If an instance of `attr.Factory` is passed with - ``takes_self=True``. - - .. versionadded:: 18.2.0 - """ - if default is NOTHING and factory is None: - raise TypeError("Must pass either `default` or `factory`.") - - if default is not NOTHING and factory is not None: - raise TypeError( - "Must pass either `default` or `factory` but not both." - ) - - if factory is not None: - default = Factory(factory) - - if isinstance(default, Factory): - if default.takes_self: - raise ValueError( - "`takes_self` is not supported by default_if_none." - ) - - def default_if_none_converter(val): - if val is not None: - return val - - return default.factory() - - else: - - def default_if_none_converter(val): - if val is not None: - return val - - return default - - return default_if_none_converter diff --git a/venv/lib/python3.8/site-packages/attr/converters.pyi b/venv/lib/python3.8/site-packages/attr/converters.pyi deleted file mode 100644 index 63b2a386..00000000 --- a/venv/lib/python3.8/site-packages/attr/converters.pyi +++ /dev/null @@ -1,12 +0,0 @@ -from typing import TypeVar, Optional, Callable, overload -from . import _ConverterType - -_T = TypeVar("_T") - -def optional( - converter: _ConverterType[_T] -) -> _ConverterType[Optional[_T]]: ... -@overload -def default_if_none(default: _T) -> _ConverterType[_T]: ... -@overload -def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType[_T]: ... diff --git a/venv/lib/python3.8/site-packages/attr/exceptions.py b/venv/lib/python3.8/site-packages/attr/exceptions.py deleted file mode 100644 index d1b76185..00000000 --- a/venv/lib/python3.8/site-packages/attr/exceptions.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import absolute_import, division, print_function - - -class FrozenInstanceError(AttributeError): - """ - A frozen/immutable instance has been attempted to be modified. - - It mirrors the behavior of ``namedtuples`` by using the same error message - and subclassing `AttributeError`. - - .. versionadded:: 16.1.0 - """ - - msg = "can't set attribute" - args = [msg] - - -class AttrsAttributeNotFoundError(ValueError): - """ - An ``attrs`` function couldn't find an attribute that the user asked for. - - .. versionadded:: 16.2.0 - """ - - -class NotAnAttrsClassError(ValueError): - """ - A non-``attrs`` class has been passed into an ``attrs`` function. - - .. versionadded:: 16.2.0 - """ - - -class DefaultAlreadySetError(RuntimeError): - """ - A default has been set using ``attr.ib()`` and is attempted to be reset - using the decorator. - - .. versionadded:: 17.1.0 - """ - - -class UnannotatedAttributeError(RuntimeError): - """ - A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type - annotation. - - .. versionadded:: 17.3.0 - """ - - -class PythonTooOldError(RuntimeError): - """ - An ``attrs`` feature requiring a more recent python version has been used. - - .. versionadded:: 18.2.0 - """ - - -class NotCallableError(TypeError): - """ - A ``attr.ib()`` requiring a callable has been set with a value - that is not callable. - - .. versionadded:: 19.2.0 - """ - - def __init__(self, msg, value): - super(TypeError, self).__init__(msg, value) - self.msg = msg - self.value = value - - def __str__(self): - return str(self.msg) diff --git a/venv/lib/python3.8/site-packages/attr/exceptions.pyi b/venv/lib/python3.8/site-packages/attr/exceptions.pyi deleted file mode 100644 index 736fde2e..00000000 --- a/venv/lib/python3.8/site-packages/attr/exceptions.pyi +++ /dev/null @@ -1,15 +0,0 @@ -from typing import Any - -class FrozenInstanceError(AttributeError): - msg: str = ... - -class AttrsAttributeNotFoundError(ValueError): ... -class NotAnAttrsClassError(ValueError): ... -class DefaultAlreadySetError(RuntimeError): ... -class UnannotatedAttributeError(RuntimeError): ... -class PythonTooOldError(RuntimeError): ... - -class NotCallableError(TypeError): - msg: str = ... - value: Any = ... - def __init__(self, msg: str, value: Any) -> None: ... diff --git a/venv/lib/python3.8/site-packages/attr/filters.py b/venv/lib/python3.8/site-packages/attr/filters.py deleted file mode 100644 index dc47e8fa..00000000 --- a/venv/lib/python3.8/site-packages/attr/filters.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Commonly useful filters for `attr.asdict`. -""" - -from __future__ import absolute_import, division, print_function - -from ._compat import isclass -from ._make import Attribute - - -def _split_what(what): - """ - Returns a tuple of `frozenset`s of classes and attributes. - """ - return ( - frozenset(cls for cls in what if isclass(cls)), - frozenset(cls for cls in what if isinstance(cls, Attribute)), - ) - - -def include(*what): - """ - Whitelist *what*. - - :param what: What to whitelist. - :type what: `list` of `type` or `attr.Attribute`\\ s - - :rtype: `callable` - """ - cls, attrs = _split_what(what) - - def include_(attribute, value): - return value.__class__ in cls or attribute in attrs - - return include_ - - -def exclude(*what): - """ - Blacklist *what*. - - :param what: What to blacklist. - :type what: `list` of classes or `attr.Attribute`\\ s. - - :rtype: `callable` - """ - cls, attrs = _split_what(what) - - def exclude_(attribute, value): - return value.__class__ not in cls and attribute not in attrs - - return exclude_ diff --git a/venv/lib/python3.8/site-packages/attr/filters.pyi b/venv/lib/python3.8/site-packages/attr/filters.pyi deleted file mode 100644 index 68368fe2..00000000 --- a/venv/lib/python3.8/site-packages/attr/filters.pyi +++ /dev/null @@ -1,5 +0,0 @@ -from typing import Union, Any -from . import Attribute, _FilterType - -def include(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ... -def exclude(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ... diff --git a/venv/lib/python3.8/site-packages/attr/py.typed b/venv/lib/python3.8/site-packages/attr/py.typed deleted file mode 100644 index e69de29b..00000000 diff --git a/venv/lib/python3.8/site-packages/attr/validators.py b/venv/lib/python3.8/site-packages/attr/validators.py deleted file mode 100644 index 839d310c..00000000 --- a/venv/lib/python3.8/site-packages/attr/validators.py +++ /dev/null @@ -1,378 +0,0 @@ -""" -Commonly useful validators. -""" - -from __future__ import absolute_import, division, print_function - -import re - -from ._make import _AndValidator, and_, attrib, attrs -from .exceptions import NotCallableError - - -__all__ = [ - "and_", - "deep_iterable", - "deep_mapping", - "in_", - "instance_of", - "is_callable", - "matches_re", - "optional", - "provides", -] - - -@attrs(repr=False, slots=True, hash=True) -class _InstanceOfValidator(object): - type = attrib() - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if not isinstance(value, self.type): - raise TypeError( - "'{name}' must be {type!r} (got {value!r} that is a " - "{actual!r}).".format( - name=attr.name, - type=self.type, - actual=value.__class__, - value=value, - ), - attr, - self.type, - value, - ) - - def __repr__(self): - return "".format( - type=self.type - ) - - -def instance_of(type): - """ - A validator that raises a `TypeError` if the initializer is called - with a wrong type for this particular attribute (checks are performed using - `isinstance` therefore it's also valid to pass a tuple of types). - - :param type: The type to check for. - :type type: type or tuple of types - - :raises TypeError: With a human readable error message, the attribute - (of type `attr.Attribute`), the expected type, and the value it - got. - """ - return _InstanceOfValidator(type) - - -@attrs(repr=False, frozen=True) -class _MatchesReValidator(object): - regex = attrib() - flags = attrib() - match_func = attrib() - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if not self.match_func(value): - raise ValueError( - "'{name}' must match regex {regex!r}" - " ({value!r} doesn't)".format( - name=attr.name, regex=self.regex.pattern, value=value - ), - attr, - self.regex, - value, - ) - - def __repr__(self): - return "".format( - regex=self.regex - ) - - -def matches_re(regex, flags=0, func=None): - r""" - A validator that raises `ValueError` if the initializer is called - with a string that doesn't match *regex*. - - :param str regex: a regex string to match against - :param int flags: flags that will be passed to the underlying re function - (default 0) - :param callable func: which underlying `re` function to call (options - are `re.fullmatch`, `re.search`, `re.match`, default - is ``None`` which means either `re.fullmatch` or an emulation of - it on Python 2). For performance reasons, they won't be used directly - but on a pre-`re.compile`\ ed pattern. - - .. versionadded:: 19.2.0 - """ - fullmatch = getattr(re, "fullmatch", None) - valid_funcs = (fullmatch, None, re.search, re.match) - if func not in valid_funcs: - raise ValueError( - "'func' must be one of %s." - % ( - ", ".join( - sorted( - e and e.__name__ or "None" for e in set(valid_funcs) - ) - ), - ) - ) - - pattern = re.compile(regex, flags) - if func is re.match: - match_func = pattern.match - elif func is re.search: - match_func = pattern.search - else: - if fullmatch: - match_func = pattern.fullmatch - else: - pattern = re.compile(r"(?:{})\Z".format(regex), flags) - match_func = pattern.match - - return _MatchesReValidator(pattern, flags, match_func) - - -@attrs(repr=False, slots=True, hash=True) -class _ProvidesValidator(object): - interface = attrib() - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if not self.interface.providedBy(value): - raise TypeError( - "'{name}' must provide {interface!r} which {value!r} " - "doesn't.".format( - name=attr.name, interface=self.interface, value=value - ), - attr, - self.interface, - value, - ) - - def __repr__(self): - return "".format( - interface=self.interface - ) - - -def provides(interface): - """ - A validator that raises a `TypeError` if the initializer is called - with an object that does not provide the requested *interface* (checks are - performed using ``interface.providedBy(value)`` (see `zope.interface - `_). - - :param zope.interface.Interface interface: The interface to check for. - - :raises TypeError: With a human readable error message, the attribute - (of type `attr.Attribute`), the expected interface, and the - value it got. - """ - return _ProvidesValidator(interface) - - -@attrs(repr=False, slots=True, hash=True) -class _OptionalValidator(object): - validator = attrib() - - def __call__(self, inst, attr, value): - if value is None: - return - - self.validator(inst, attr, value) - - def __repr__(self): - return "".format( - what=repr(self.validator) - ) - - -def optional(validator): - """ - A validator that makes an attribute optional. An optional attribute is one - which can be set to ``None`` in addition to satisfying the requirements of - the sub-validator. - - :param validator: A validator (or a list of validators) that is used for - non-``None`` values. - :type validator: callable or `list` of callables. - - .. versionadded:: 15.1.0 - .. versionchanged:: 17.1.0 *validator* can be a list of validators. - """ - if isinstance(validator, list): - return _OptionalValidator(_AndValidator(validator)) - return _OptionalValidator(validator) - - -@attrs(repr=False, slots=True, hash=True) -class _InValidator(object): - options = attrib() - - def __call__(self, inst, attr, value): - try: - in_options = value in self.options - except TypeError: # e.g. `1 in "abc"` - in_options = False - - if not in_options: - raise ValueError( - "'{name}' must be in {options!r} (got {value!r})".format( - name=attr.name, options=self.options, value=value - ) - ) - - def __repr__(self): - return "".format( - options=self.options - ) - - -def in_(options): - """ - A validator that raises a `ValueError` if the initializer is called - with a value that does not belong in the options provided. The check is - performed using ``value in options``. - - :param options: Allowed options. - :type options: list, tuple, `enum.Enum`, ... - - :raises ValueError: With a human readable error message, the attribute (of - type `attr.Attribute`), the expected options, and the value it - got. - - .. versionadded:: 17.1.0 - """ - return _InValidator(options) - - -@attrs(repr=False, slots=False, hash=True) -class _IsCallableValidator(object): - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if not callable(value): - message = ( - "'{name}' must be callable " - "(got {value!r} that is a {actual!r})." - ) - raise NotCallableError( - msg=message.format( - name=attr.name, value=value, actual=value.__class__ - ), - value=value, - ) - - def __repr__(self): - return "" - - -def is_callable(): - """ - A validator that raises a `attr.exceptions.NotCallableError` if the - initializer is called with a value for this particular attribute - that is not callable. - - .. versionadded:: 19.1.0 - - :raises `attr.exceptions.NotCallableError`: With a human readable error - message containing the attribute (`attr.Attribute`) name, - and the value it got. - """ - return _IsCallableValidator() - - -@attrs(repr=False, slots=True, hash=True) -class _DeepIterable(object): - member_validator = attrib(validator=is_callable()) - iterable_validator = attrib( - default=None, validator=optional(is_callable()) - ) - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if self.iterable_validator is not None: - self.iterable_validator(inst, attr, value) - - for member in value: - self.member_validator(inst, attr, member) - - def __repr__(self): - iterable_identifier = ( - "" - if self.iterable_validator is None - else " {iterable!r}".format(iterable=self.iterable_validator) - ) - return ( - "" - ).format( - iterable_identifier=iterable_identifier, - member=self.member_validator, - ) - - -def deep_iterable(member_validator, iterable_validator=None): - """ - A validator that performs deep validation of an iterable. - - :param member_validator: Validator to apply to iterable members - :param iterable_validator: Validator to apply to iterable itself - (optional) - - .. versionadded:: 19.1.0 - - :raises TypeError: if any sub-validators fail - """ - return _DeepIterable(member_validator, iterable_validator) - - -@attrs(repr=False, slots=True, hash=True) -class _DeepMapping(object): - key_validator = attrib(validator=is_callable()) - value_validator = attrib(validator=is_callable()) - mapping_validator = attrib(default=None, validator=optional(is_callable())) - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if self.mapping_validator is not None: - self.mapping_validator(inst, attr, value) - - for key in value: - self.key_validator(inst, attr, key) - self.value_validator(inst, attr, value[key]) - - def __repr__(self): - return ( - "" - ).format(key=self.key_validator, value=self.value_validator) - - -def deep_mapping(key_validator, value_validator, mapping_validator=None): - """ - A validator that performs deep validation of a dictionary. - - :param key_validator: Validator to apply to dictionary keys - :param value_validator: Validator to apply to dictionary values - :param mapping_validator: Validator to apply to top-level mapping - attribute (optional) - - .. versionadded:: 19.1.0 - - :raises TypeError: if any sub-validators fail - """ - return _DeepMapping(key_validator, value_validator, mapping_validator) diff --git a/venv/lib/python3.8/site-packages/attr/validators.pyi b/venv/lib/python3.8/site-packages/attr/validators.pyi deleted file mode 100644 index 9a22abb1..00000000 --- a/venv/lib/python3.8/site-packages/attr/validators.pyi +++ /dev/null @@ -1,66 +0,0 @@ -from typing import ( - Container, - List, - Union, - TypeVar, - Type, - Any, - Optional, - Tuple, - Iterable, - Mapping, - Callable, - Match, - AnyStr, - overload, -) -from . import _ValidatorType - -_T = TypeVar("_T") -_T1 = TypeVar("_T1") -_T2 = TypeVar("_T2") -_T3 = TypeVar("_T3") -_I = TypeVar("_I", bound=Iterable) -_K = TypeVar("_K") -_V = TypeVar("_V") -_M = TypeVar("_M", bound=Mapping) - -# To be more precise on instance_of use some overloads. -# If there are more than 3 items in the tuple then we fall back to Any -@overload -def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ... -@overload -def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ... -@overload -def instance_of( - type: Tuple[Type[_T1], Type[_T2]] -) -> _ValidatorType[Union[_T1, _T2]]: ... -@overload -def instance_of( - type: Tuple[Type[_T1], Type[_T2], Type[_T3]] -) -> _ValidatorType[Union[_T1, _T2, _T3]]: ... -@overload -def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ... -def provides(interface: Any) -> _ValidatorType[Any]: ... -def optional( - validator: Union[_ValidatorType[_T], List[_ValidatorType[_T]]] -) -> _ValidatorType[Optional[_T]]: ... -def in_(options: Container[_T]) -> _ValidatorType[_T]: ... -def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... -def matches_re( - regex: AnyStr, - flags: int = ..., - func: Optional[ - Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]] - ] = ..., -) -> _ValidatorType[AnyStr]: ... -def deep_iterable( - member_validator: _ValidatorType[_T], - iterable_validator: Optional[_ValidatorType[_I]] = ..., -) -> _ValidatorType[_I]: ... -def deep_mapping( - key_validator: _ValidatorType[_K], - value_validator: _ValidatorType[_V], - mapping_validator: Optional[_ValidatorType[_M]] = ..., -) -> _ValidatorType[_M]: ... -def is_callable() -> _ValidatorType[_T]: ... diff --git a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/INSTALLER b/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e3..00000000 --- a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/LICENSE b/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/LICENSE deleted file mode 100644 index 7ae3df93..00000000 --- a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Hynek Schlawack - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/METADATA b/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/METADATA deleted file mode 100644 index a1064151..00000000 --- a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/METADATA +++ /dev/null @@ -1,229 +0,0 @@ -Metadata-Version: 2.1 -Name: attrs -Version: 19.3.0 -Summary: Classes Without Boilerplate -Home-page: https://www.attrs.org/ -Author: Hynek Schlawack -Author-email: hs@ox.cx -Maintainer: Hynek Schlawack -Maintainer-email: hs@ox.cx -License: MIT -Project-URL: Documentation, https://www.attrs.org/ -Project-URL: Bug Tracker, https://github.com/python-attrs/attrs/issues -Project-URL: Source Code, https://github.com/python-attrs/attrs -Keywords: class,attribute,boilerplate -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: Natural Language :: English -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* -Description-Content-Type: text/x-rst -Provides-Extra: azure-pipelines -Requires-Dist: coverage ; extra == 'azure-pipelines' -Requires-Dist: hypothesis ; extra == 'azure-pipelines' -Requires-Dist: pympler ; extra == 'azure-pipelines' -Requires-Dist: pytest (>=4.3.0) ; extra == 'azure-pipelines' -Requires-Dist: six ; extra == 'azure-pipelines' -Requires-Dist: zope.interface ; extra == 'azure-pipelines' -Requires-Dist: pytest-azurepipelines ; extra == 'azure-pipelines' -Provides-Extra: dev -Requires-Dist: coverage ; extra == 'dev' -Requires-Dist: hypothesis ; extra == 'dev' -Requires-Dist: pympler ; extra == 'dev' -Requires-Dist: pytest (>=4.3.0) ; extra == 'dev' -Requires-Dist: six ; extra == 'dev' -Requires-Dist: zope.interface ; extra == 'dev' -Requires-Dist: sphinx ; extra == 'dev' -Requires-Dist: pre-commit ; extra == 'dev' -Provides-Extra: docs -Requires-Dist: sphinx ; extra == 'docs' -Requires-Dist: zope.interface ; extra == 'docs' -Provides-Extra: tests -Requires-Dist: coverage ; extra == 'tests' -Requires-Dist: hypothesis ; extra == 'tests' -Requires-Dist: pympler ; extra == 'tests' -Requires-Dist: pytest (>=4.3.0) ; extra == 'tests' -Requires-Dist: six ; extra == 'tests' -Requires-Dist: zope.interface ; extra == 'tests' - -.. image:: https://www.attrs.org/en/latest/_static/attrs_logo.png - :alt: attrs Logo - -====================================== -``attrs``: Classes Without Boilerplate -====================================== - -.. image:: https://readthedocs.org/projects/attrs/badge/?version=stable - :target: https://www.attrs.org/en/stable/?badge=stable - :alt: Documentation Status - -.. image:: https://attrs.visualstudio.com/attrs/_apis/build/status/python-attrs.attrs?branchName=master - :target: https://attrs.visualstudio.com/attrs/_build/latest?definitionId=1&branchName=master - :alt: CI Status - -.. image:: https://codecov.io/github/python-attrs/attrs/branch/master/graph/badge.svg - :target: https://codecov.io/github/python-attrs/attrs - :alt: Test Coverage - -.. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/psf/black - :alt: Code style: black - -.. teaser-begin - -``attrs`` is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka `dunder `_ methods). - -Its main goal is to help you to write **concise** and **correct** software without slowing down your code. - -.. -spiel-end- - -For that, it gives you a class decorator and a way to declaratively define the attributes on that class: - -.. -code-begin- - -.. code-block:: pycon - - >>> import attr - - >>> @attr.s - ... class SomeClass(object): - ... a_number = attr.ib(default=42) - ... list_of_numbers = attr.ib(factory=list) - ... - ... def hard_math(self, another_number): - ... return self.a_number + sum(self.list_of_numbers) * another_number - - - >>> sc = SomeClass(1, [1, 2, 3]) - >>> sc - SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) - - >>> sc.hard_math(3) - 19 - >>> sc == SomeClass(1, [1, 2, 3]) - True - >>> sc != SomeClass(2, [3, 2, 1]) - True - - >>> attr.asdict(sc) - {'a_number': 1, 'list_of_numbers': [1, 2, 3]} - - >>> SomeClass() - SomeClass(a_number=42, list_of_numbers=[]) - - >>> C = attr.make_class("C", ["a", "b"]) - >>> C("foo", "bar") - C(a='foo', b='bar') - - -After *declaring* your attributes ``attrs`` gives you: - -- a concise and explicit overview of the class's attributes, -- a nice human-readable ``__repr__``, -- a complete set of comparison methods (equality and ordering), -- an initializer, -- and much more, - -*without* writing dull boilerplate code again and again and *without* runtime performance penalties. - -On Python 3.6 and later, you can often even drop the calls to ``attr.ib()`` by using `type annotations `_. - -This gives you the power to use actual classes with actual types in your code instead of confusing ``tuple``\ s or `confusingly behaving `_ ``namedtuple``\ s. -Which in turn encourages you to write *small classes* that do `one thing well `_. -Never again violate the `single responsibility principle `_ just because implementing ``__init__`` et al is a painful drag. - - -.. -testimonials- - -Testimonials -============ - -**Amber Hawkie Brown**, Twisted Release Manager and Computer Owl: - - Writing a fully-functional class using attrs takes me less time than writing this testimonial. - - -**Glyph Lefkowitz**, creator of `Twisted `_, `Automat `_, and other open source software, in `The One Python Library Everyone Needs `_: - - I’m looking forward to is being able to program in Python-with-attrs everywhere. - It exerts a subtle, but positive, design influence in all the codebases I’ve see it used in. - - -**Kenneth Reitz**, creator of `Requests `_ (`on paper no less `_!): - - attrs—classes for humans. I like it. - - -**Łukasz Langa**, creator of `Black `_, prolific Python core developer, and release manager for Python 3.8 and 3.9: - - I'm increasingly digging your attr.ocity. Good job! - - -.. -end- - -.. -project-information- - -Getting Help -============ - -Please use the ``python-attrs`` tag on `StackOverflow `_ to get help. - -Answering questions of your fellow developers is also great way to help the project! - - -Project Information -=================== - -``attrs`` is released under the `MIT `_ license, -its documentation lives at `Read the Docs `_, -the code on `GitHub `_, -and the latest release on `PyPI `_. -It’s rigorously tested on Python 2.7, 3.4+, and PyPy. - -We collect information on **third-party extensions** in our `wiki `_. -Feel free to browse and add your own! - -If you'd like to contribute to ``attrs`` you're most welcome and we've written `a little guide `_ to get you started! - - -Release Information -=================== - -19.3.0 (2019-10-15) -------------------- - -Changes -^^^^^^^ - -- Fixed ``auto_attribs`` usage when default values cannot be compared directly with ``==``, such as ``numpy`` arrays. - `#585 `_ - -`Full changelog `_. - -Credits -======= - -``attrs`` is written and maintained by `Hynek Schlawack `_. - -The development is kindly supported by `Variomedia AG `_. - -A full list of contributors can be found in `GitHub's overview `_. - -It’s the spiritual successor of `characteristic `_ and aspires to fix some of it clunkiness and unfortunate decisions. -Both were inspired by Twisted’s `FancyEqMixin `_ but both are implemented using class decorators because `subclassing is bad for you `_, m’kay? - - diff --git a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/RECORD b/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/RECORD deleted file mode 100644 index 8dafe911..00000000 --- a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/RECORD +++ /dev/null @@ -1,33 +0,0 @@ -attr/__init__.py,sha256=ONaI-ZEGOIC7IDqH2HANgesnOxPE1m0GIRRYPPsXEHk,1349 -attr/__init__.pyi,sha256=fOnMRTF00b5J23PYPF74u66UVhVzzm0KYVxzmVXHPw0,8257 -attr/__pycache__/__init__.cpython-38.pyc,, -attr/__pycache__/_compat.cpython-38.pyc,, -attr/__pycache__/_config.cpython-38.pyc,, -attr/__pycache__/_funcs.cpython-38.pyc,, -attr/__pycache__/_make.cpython-38.pyc,, -attr/__pycache__/_version_info.cpython-38.pyc,, -attr/__pycache__/converters.cpython-38.pyc,, -attr/__pycache__/exceptions.cpython-38.pyc,, -attr/__pycache__/filters.cpython-38.pyc,, -attr/__pycache__/validators.cpython-38.pyc,, -attr/_compat.py,sha256=-pJtdtqgCg0K6rH_BWf3wKuTum58GD-WWPclQQ2SUaU,7326 -attr/_config.py,sha256=_KvW0mQdH2PYjHc0YfIUaV_o2pVfM7ziMEYTxwmEhOA,514 -attr/_funcs.py,sha256=unAJfNGSTOzxyFzkj7Rs3O1bfsQodmXyir9uZKen-vY,9696 -attr/_make.py,sha256=HhjGhFEbnxPKuUb9hFmAjXoQGpekniw1IEF3_Z-vwCc,70807 -attr/_version_info.py,sha256=azMi1lNelb3cJvvYUMXsXVbUANkRzbD5IEiaXVpeVr4,2162 -attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 -attr/converters.py,sha256=5QJRYSXE8G7PW0289y_SPwvvZIcw-nJIuBlfYVdB4BQ,2141 -attr/converters.pyi,sha256=wAhCoOT1MFV8t323rpD87O7bxQ8CYLTPiBQd-29BieI,351 -attr/exceptions.py,sha256=hbhOa3b4W8_mRrbj3FsMTR4Bt5xzbJs5xaFTWn8s6h4,1635 -attr/exceptions.pyi,sha256=4zuaJyl2axxWbqnZgxo_2oTpPNbyowEw3A4hqV5PmAc,458 -attr/filters.py,sha256=weDxwATsa69T_0bPVjiM1fGsciAMQmwhY5G8Jm5BxuI,1098 -attr/filters.pyi,sha256=xDpmKQlFdssgxGa5tsl1ADh_3zwAwAT4vUhd8h-8-Tk,214 -attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -attr/validators.py,sha256=8AsxgdDgh3sGPseiUIMPGcTr6PvaDYfH3AK46tsvs8U,11460 -attr/validators.pyi,sha256=vZgsJqUwrJevh4v_Hd7_RSXqDrBctE6-3AEZ7uYKodo,1868 -attrs-19.3.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -attrs-19.3.0.dist-info/LICENSE,sha256=v2WaKLSSQGAvVrvfSQy-LsUJsVuY-Z17GaUsdA4yeGM,1082 -attrs-19.3.0.dist-info/METADATA,sha256=WmnjYy_TftebL3pewXyGEaD4TZRrLUEHk3frEkAtqL0,9022 -attrs-19.3.0.dist-info/RECORD,, -attrs-19.3.0.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110 -attrs-19.3.0.dist-info/top_level.txt,sha256=tlRYMddkRlKPqJ96wP2_j9uEsmcNHgD2SbuWd4CzGVU,5 diff --git a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/WHEEL b/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/WHEEL deleted file mode 100644 index 8b701e93..00000000 --- a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.6) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/top_level.txt b/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/top_level.txt deleted file mode 100644 index 66a062d8..00000000 --- a/venv/lib/python3.8/site-packages/attrs-19.3.0.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -attr diff --git a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/INSTALLER b/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/INSTALLER deleted file mode 100644 index a1b589e3..00000000 --- a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/LICENSE b/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/LICENSE deleted file mode 100644 index 7a9b891f..00000000 --- a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2018 Łukasz Langa - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/METADATA b/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/METADATA deleted file mode 100644 index 643c375f..00000000 --- a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/METADATA +++ /dev/null @@ -1,1544 +0,0 @@ -Metadata-Version: 2.1 -Name: black -Version: 19.10b0 -Summary: The uncompromising code formatter. -Home-page: https://github.com/psf/black -Author: Łukasz Langa -Author-email: lukasz@langa.pl -License: MIT -Keywords: automation formatter yapf autopep8 pyfmt gofmt rustfmt -Platform: UNKNOWN -Classifier: Development Status :: 4 - Beta -Classifier: Environment :: Console -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Programming Language :: Python :: 3 :: Only -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: Software Development :: Quality Assurance -Requires-Python: >=3.6 -Description-Content-Type: text/markdown -Requires-Dist: click (>=6.5) -Requires-Dist: attrs (>=18.1.0) -Requires-Dist: appdirs -Requires-Dist: toml (>=0.9.4) -Requires-Dist: typed-ast (>=1.4.0) -Requires-Dist: regex -Requires-Dist: pathspec (<1,>=0.6) -Provides-Extra: d -Requires-Dist: aiohttp (>=3.3.2) ; extra == 'd' -Requires-Dist: aiohttp-cors ; extra == 'd' - -![Black Logo](https://raw.githubusercontent.com/psf/black/master/docs/_static/logo2-readme.png) - -

The Uncompromising Code Formatter

- -

-Build Status -Documentation Status -Coverage Status -License: MIT -PyPI -Downloads -Code style: black -

- -> “Any color you like.” - -_Black_ is the uncompromising Python code formatter. By using it, you agree to cede -control over minutiae of hand-formatting. In return, _Black_ gives you speed, -determinism, and freedom from `pycodestyle` nagging about formatting. You will save time -and mental energy for more important matters. - -Blackened code looks the same regardless of the project you're reading. Formatting -becomes transparent after a while and you can focus on the content instead. - -_Black_ makes code review faster by producing the smallest diffs possible. - -Try it out now using the [Black Playground](https://black.now.sh). Watch the -[PyCon 2019 talk](https://youtu.be/esZLCuWs_2Y) to learn more. - ---- - -_Contents:_ **[Installation and usage](#installation-and-usage)** | -**[Code style](#the-black-code-style)** | **[pyproject.toml](#pyprojecttoml)** | -**[Editor integration](#editor-integration)** | **[blackd](#blackd)** | -**[Version control integration](#version-control-integration)** | -**[Ignoring unmodified files](#ignoring-unmodified-files)** | **[Used by](#used-by)** | -**[Testimonials](#testimonials)** | **[Show your style](#show-your-style)** | -**[Contributing](#contributing-to-black)** | **[Change Log](#change-log)** | -**[Authors](#authors)** - ---- - -## Installation and usage - -### Installation - -_Black_ can be installed by running `pip install black`. It requires Python 3.6.0+ to -run but you can reformat Python 2 code with it, too. - -### Usage - -To get started right away with sensible defaults: - -``` -black {source_file_or_directory} -``` - -### Command line options - -_Black_ doesn't provide many options. You can list them by running `black --help`: - -```text -black [OPTIONS] [SRC]... - -Options: - -c, --code TEXT Format the code passed in as a string. - -l, --line-length INTEGER How many characters per line to allow. - [default: 88] - -t, --target-version [py27|py33|py34|py35|py36|py37|py38] - Python versions that should be supported by - Black's output. [default: per-file auto- - detection] - --py36 Allow using Python 3.6-only syntax on all - input files. This will put trailing commas - in function signatures and calls also after - *args and **kwargs. Deprecated; use - --target-version instead. [default: per-file - auto-detection] - --pyi Format all input files like typing stubs - regardless of file extension (useful when - piping source on standard input). - -S, --skip-string-normalization - Don't normalize string quotes or prefixes. - --check Don't write the files back, just return the - status. Return code 0 means nothing would - change. Return code 1 means some files - would be reformatted. Return code 123 means - there was an internal error. - --diff Don't write the files back, just output a - diff for each file on stdout. - --fast / --safe If --fast given, skip temporary sanity - checks. [default: --safe] - --include TEXT A regular expression that matches files and - directories that should be included on - recursive searches. An empty value means - all files are included regardless of the - name. Use forward slashes for directories - on all platforms (Windows, too). Exclusions - are calculated first, inclusions later. - [default: \.pyi?$] - --exclude TEXT A regular expression that matches files and - directories that should be excluded on - recursive searches. An empty value means no - paths are excluded. Use forward slashes for - directories on all platforms (Windows, too). - Exclusions are calculated first, inclusions - later. [default: /(\.eggs|\.git|\.hg|\.mypy - _cache|\.nox|\.tox|\.venv|_build|buck- - out|build|dist)/] - -q, --quiet Don't emit non-error messages to stderr. - Errors are still emitted, silence those with - 2>/dev/null. - -v, --verbose Also emit messages to stderr about files - that were not changed or were ignored due to - --exclude=. - --version Show the version and exit. - --config PATH Read configuration from PATH. - -h, --help Show this message and exit. -``` - -_Black_ is a well-behaved Unix-style command-line tool: - -- it does nothing if no sources are passed to it; -- it will read from standard input and write to standard output if `-` is used as the - filename; -- it only outputs messages to users on standard error; -- exits with code 0 unless an internal error occurred (or `--check` was used). - -### NOTE: This is a beta product - -_Black_ is already [successfully used](#used-by) by many projects, small and big. It -also sports a decent test suite. However, it is still very new. Things will probably be -wonky for a while. This is made explicit by the "Beta" trove classifier, as well as by -the "b" in the version number. What this means for you is that **until the formatter -becomes stable, you should expect some formatting to change in the future**. That being -said, no drastic stylistic changes are planned, mostly responses to bug reports. - -Also, as a temporary safety measure, _Black_ will check that the reformatted code still -produces a valid AST that is equivalent to the original. This slows it down. If you're -feeling confident, use `--fast`. - -## The _Black_ code style - -_Black_ reformats entire files in place. It is not configurable. It doesn't take -previous formatting into account. It doesn't reformat blocks that start with -`# fmt: off` and end with `# fmt: on`. `# fmt: on/off` have to be on the same level of -indentation. It also recognizes [YAPF](https://github.com/google/yapf)'s block comments -to the same effect, as a courtesy for straddling code. - -### How _Black_ wraps lines - -_Black_ ignores previous formatting and applies uniform horizontal and vertical -whitespace to your code. The rules for horizontal whitespace can be summarized as: do -whatever makes `pycodestyle` happy. The coding style used by _Black_ can be viewed as a -strict subset of PEP 8. - -As for vertical whitespace, _Black_ tries to render one full expression or simple -statement per line. If this fits the allotted line length, great. - -```py3 -# in: - -j = [1, - 2, - 3, -] - -# out: - -j = [1, 2, 3] -``` - -If not, _Black_ will look at the contents of the first outer matching brackets and put -that in a separate indented line. - -```py3 -# in: - -ImportantClass.important_method(exc, limit, lookup_lines, capture_locals, extra_argument) - -# out: - -ImportantClass.important_method( - exc, limit, lookup_lines, capture_locals, extra_argument -) -``` - -If that still doesn't fit the bill, it will decompose the internal expression further -using the same rule, indenting matching brackets every time. If the contents of the -matching brackets pair are comma-separated (like an argument list, or a dict literal, -and so on) then _Black_ will first try to keep them on the same line with the matching -brackets. If that doesn't work, it will put all of them in separate lines. - -```py3 -# in: - -def very_important_function(template: str, *variables, file: os.PathLike, engine: str, header: bool = True, debug: bool = False): - """Applies `variables` to the `template` and writes to `file`.""" - with open(file, 'w') as f: - ... - -# out: - -def very_important_function( - template: str, - *variables, - file: os.PathLike, - engine: str, - header: bool = True, - debug: bool = False, -): - """Applies `variables` to the `template` and writes to `file`.""" - with open(file, "w") as f: - ... -``` - -You might have noticed that closing brackets are always dedented and that a trailing -comma is always added. Such formatting produces smaller diffs; when you add or remove an -element, it's always just one line. Also, having the closing bracket dedented provides a -clear delimiter between two distinct sections of the code that otherwise share the same -indentation level (like the arguments list and the docstring in the example above). - -If a data structure literal (tuple, list, set, dict) or a line of "from" imports cannot -fit in the allotted length, it's always split into one element per line. This minimizes -diffs as well as enables readers of code to find which commit introduced a particular -entry. This also makes _Black_ compatible with [isort](https://pypi.org/p/isort/) with -the following configuration. - -
-A compatible `.isort.cfg` - -``` -[settings] -multi_line_output=3 -include_trailing_comma=True -force_grid_wrap=0 -use_parentheses=True -line_length=88 -``` - -The equivalent command line is: - -``` -$ isort --multi-line=3 --trailing-comma --force-grid-wrap=0 --use-parentheses --line-width=88 [ file.py ] -``` - -
- -### Line length - -You probably noticed the peculiar default line length. _Black_ defaults to 88 characters -per line, which happens to be 10% over 80. This number was found to produce -significantly shorter files than sticking with 80 (the most popular), or even 79 (used -by the standard library). In general, -[90-ish seems like the wise choice](https://youtu.be/wf-BqAjZb8M?t=260). - -If you're paid by the line of code you write, you can pass `--line-length` with a lower -number. _Black_ will try to respect that. However, sometimes it won't be able to without -breaking other rules. In those rare cases, auto-formatted code will exceed your allotted -limit. - -You can also increase it, but remember that people with sight disabilities find it -harder to work with line lengths exceeding 100 characters. It also adversely affects -side-by-side diff review on typical screen resolutions. Long lines also make it harder -to present code neatly in documentation or talk slides. - -If you're using Flake8, you can bump `max-line-length` to 88 and forget about it. -Alternatively, use [Bugbear](https://github.com/PyCQA/flake8-bugbear)'s B950 warning -instead of E501 and keep the max line length at 80 which you are probably already using. -You'd do it like this: - -```ini -[flake8] -max-line-length = 80 -... -select = C,E,F,W,B,B950 -ignore = E203, E501, W503 -``` - -You'll find _Black_'s own .flake8 config file is configured like this. Explanation of -why W503 and E203 are disabled can be found further in this documentation. And if you're -curious about the reasoning behind B950, -[Bugbear's documentation](https://github.com/PyCQA/flake8-bugbear#opinionated-warnings) -explains it. The tl;dr is "it's like highway speed limits, we won't bother you if you -overdo it by a few km/h". - -### Empty lines - -_Black_ avoids spurious vertical whitespace. This is in the spirit of PEP 8 which says -that in-function vertical whitespace should only be used sparingly. - -_Black_ will allow single empty lines inside functions, and single and double empty -lines on module level left by the original editors, except when they're within -parenthesized expressions. Since such expressions are always reformatted to fit minimal -space, this whitespace is lost. - -It will also insert proper spacing before and after function definitions. It's one line -before and after inner functions and two lines before and after module-level functions -and classes. _Black_ will not put empty lines between function/class definitions and -standalone comments that immediately precede the given function/class. - -_Black_ will enforce single empty lines between a class-level docstring and the first -following field or method. This conforms to -[PEP 257](https://www.python.org/dev/peps/pep-0257/#multi-line-docstrings). - -_Black_ won't insert empty lines after function docstrings unless that empty line is -required due to an inner function starting immediately after. - -### Trailing commas - -_Black_ will add trailing commas to expressions that are split by comma where each -element is on its own line. This includes function signatures. - -Unnecessary trailing commas are removed if an expression fits in one line. This makes it -1% more likely that your line won't exceed the allotted line length limit. Moreover, in -this scenario, if you added another argument to your call, you'd probably fit it in the -same line anyway. That doesn't make diffs any larger. - -One exception to removing trailing commas is tuple expressions with just one element. In -this case _Black_ won't touch the single trailing comma as this would unexpectedly -change the underlying data type. Note that this is also the case when commas are used -while indexing. This is a tuple in disguise: `numpy_array[3, ]`. - -One exception to adding trailing commas is function signatures containing `*`, `*args`, -or `**kwargs`. In this case a trailing comma is only safe to use on Python 3.6. _Black_ -will detect if your file is already 3.6+ only and use trailing commas in this situation. -If you wonder how it knows, it looks for f-strings and existing use of trailing commas -in function signatures that have stars in them. In other words, if you'd like a trailing -comma in this situation and _Black_ didn't recognize it was safe to do so, put it there -manually and _Black_ will keep it. - -### Strings - -_Black_ prefers double quotes (`"` and `"""`) over single quotes (`'` and `'''`). It -will replace the latter with the former as long as it does not result in more backslash -escapes than before. - -_Black_ also standardizes string prefixes, making them always lowercase. On top of that, -if your code is already Python 3.6+ only or it's using the `unicode_literals` future -import, _Black_ will remove `u` from the string prefix as it is meaningless in those -scenarios. - -The main reason to standardize on a single form of quotes is aesthetics. Having one kind -of quotes everywhere reduces reader distraction. It will also enable a future version of -_Black_ to merge consecutive string literals that ended up on the same line (see -[#26](https://github.com/psf/black/issues/26) for details). - -Why settle on double quotes? They anticipate apostrophes in English text. They match the -docstring standard described in -[PEP 257](https://www.python.org/dev/peps/pep-0257/#what-is-a-docstring). An empty -string in double quotes (`""`) is impossible to confuse with a one double-quote -regardless of fonts and syntax highlighting used. On top of this, double quotes for -strings are consistent with C which Python interacts a lot with. - -On certain keyboard layouts like US English, typing single quotes is a bit easier than -double quotes. The latter requires use of the Shift key. My recommendation here is to -keep using whatever is faster to type and let _Black_ handle the transformation. - -If you are adopting _Black_ in a large project with pre-existing string conventions -(like the popular -["single quotes for data, double quotes for human-readable strings"](https://stackoverflow.com/a/56190)), -you can pass `--skip-string-normalization` on the command line. This is meant as an -adoption helper, avoid using this for new projects. - -### Numeric literals - -_Black_ standardizes most numeric literals to use lowercase letters for the syntactic -parts and uppercase letters for the digits themselves: `0xAB` instead of `0XAB` and -`1e10` instead of `1E10`. Python 2 long literals are styled as `2L` instead of `2l` to -avoid confusion between `l` and `1`. - -### Line breaks & binary operators - -_Black_ will break a line before a binary operator when splitting a block of code over -multiple lines. This is so that _Black_ is compliant with the recent changes in the -[PEP 8](https://www.python.org/dev/peps/pep-0008/#should-a-line-break-before-or-after-a-binary-operator) -style guide, which emphasizes that this approach improves readability. - -This behaviour may raise `W503 line break before binary operator` warnings in style -guide enforcement tools like Flake8. Since `W503` is not PEP 8 compliant, you should -tell Flake8 to ignore these warnings. - -### Slices - -PEP 8 -[recommends](https://www.python.org/dev/peps/pep-0008/#whitespace-in-expressions-and-statements) -to treat `:` in slices as a binary operator with the lowest priority, and to leave an -equal amount of space on either side, except if a parameter is omitted (e.g. -`ham[1 + 1 :]`). It also states that for extended slices, both `:` operators have to -have the same amount of spacing, except if a parameter is omitted (`ham[1 + 1 ::]`). -_Black_ enforces these rules consistently. - -This behaviour may raise `E203 whitespace before ':'` warnings in style guide -enforcement tools like Flake8. Since `E203` is not PEP 8 compliant, you should tell -Flake8 to ignore these warnings. - -### Parentheses - -Some parentheses are optional in the Python grammar. Any expression can be wrapped in a -pair of parentheses to form an atom. There are a few interesting cases: - -- `if (...):` -- `while (...):` -- `for (...) in (...):` -- `assert (...), (...)` -- `from X import (...)` -- assignments like: - - `target = (...)` - - `target: type = (...)` - - `some, *un, packing = (...)` - - `augmented += (...)` - -In those cases, parentheses are removed when the entire statement fits in one line, or -if the inner expression doesn't have any delimiters to further split on. If there is -only a single delimiter and the expression starts or ends with a bracket, the -parenthesis can also be successfully omitted since the existing bracket pair will -organize the expression neatly anyway. Otherwise, the parentheses are added. - -Please note that _Black_ does not add or remove any additional nested parentheses that -you might want to have for clarity or further code organization. For example those -parentheses are not going to be removed: - -```py3 -return not (this or that) -decision = (maybe.this() and values > 0) or (maybe.that() and values < 0) -``` - -### Call chains - -Some popular APIs, like ORMs, use call chaining. This API style is known as a -[fluent interface](https://en.wikipedia.org/wiki/Fluent_interface). _Black_ formats -those by treating dots that follow a call or an indexing operation like a very low -priority delimiter. It's easier to show the behavior than to explain it. Look at the -example: - -```py3 -def example(session): - result = ( - session.query(models.Customer.id) - .filter( - models.Customer.account_id == account_id, - models.Customer.email == email_address, - ) - .order_by(models.Customer.id.asc()) - .all() - ) -``` - -### Typing stub files - -PEP 484 describes the syntax for type hints in Python. One of the use cases for typing -is providing type annotations for modules which cannot contain them directly (they might -be written in C, or they might be third-party, or their implementation may be overly -dynamic, and so on). - -To solve this, -[stub files with the `.pyi` file extension](https://www.python.org/dev/peps/pep-0484/#stub-files) -can be used to describe typing information for an external module. Those stub files omit -the implementation of classes and functions they describe, instead they only contain the -structure of the file (listing globals, functions, and classes with their members). The -recommended code style for those files is more terse than PEP 8: - -- prefer `...` on the same line as the class/function signature; -- avoid vertical whitespace between consecutive module-level functions, names, or - methods and fields within a single class; -- use a single blank line between top-level class definitions, or none if the classes - are very small. - -_Black_ enforces the above rules. There are additional guidelines for formatting `.pyi` -file that are not enforced yet but might be in a future version of the formatter: - -- all function bodies should be empty (contain `...` instead of the body); -- do not use docstrings; -- prefer `...` over `pass`; -- for arguments with a default, use `...` instead of the actual default; -- avoid using string literals in type annotations, stub files support forward references - natively (like Python 3.7 code with `from __future__ import annotations`); -- use variable annotations instead of type comments, even for stubs that target older - versions of Python; -- for arguments that default to `None`, use `Optional[]` explicitly; -- use `float` instead of `Union[int, float]`. - -## pyproject.toml - -_Black_ is able to read project-specific default values for its command line options -from a `pyproject.toml` file. This is especially useful for specifying custom -`--include` and `--exclude` patterns for your project. - -**Pro-tip**: If you're asking yourself "Do I need to configure anything?" the answer is -"No". _Black_ is all about sensible defaults. - -### What on Earth is a `pyproject.toml` file? - -[PEP 518](https://www.python.org/dev/peps/pep-0518/) defines `pyproject.toml` as a -configuration file to store build system requirements for Python projects. With the help -of tools like [Poetry](https://poetry.eustace.io/) or -[Flit](https://flit.readthedocs.io/en/latest/) it can fully replace the need for -`setup.py` and `setup.cfg` files. - -### Where _Black_ looks for the file - -By default _Black_ looks for `pyproject.toml` starting from the common base directory of -all files and directories passed on the command line. If it's not there, it looks in -parent directories. It stops looking when it finds the file, or a `.git` directory, or a -`.hg` directory, or the root of the file system, whichever comes first. - -If you're formatting standard input, _Black_ will look for configuration starting from -the current working directory. - -You can also explicitly specify the path to a particular file that you want with -`--config`. In this situation _Black_ will not look for any other file. - -If you're running with `--verbose`, you will see a blue message if a file was found and -used. - -Please note `blackd` will not use `pyproject.toml` configuration. - -### Configuration format - -As the file extension suggests, `pyproject.toml` is a -[TOML](https://github.com/toml-lang/toml) file. It contains separate sections for -different tools. _Black_ is using the `[tool.black]` section. The option keys are the -same as long names of options on the command line. - -Note that you have to use single-quoted strings in TOML for regular expressions. It's -the equivalent of r-strings in Python. Multiline strings are treated as verbose regular -expressions by Black. Use `[ ]` to denote a significant space character. - -
-Example `pyproject.toml` - -```toml -[tool.black] -line-length = 88 -target-version = ['py37'] -include = '\.pyi?$' -exclude = ''' - -( - /( - \.eggs # exclude a few common directories in the - | \.git # root of the project - | \.hg - | \.mypy_cache - | \.tox - | \.venv - | _build - | buck-out - | build - | dist - )/ - | foo.py # also separately exclude a file named foo.py in - # the root of the project -) -''' -``` - -
- -### Lookup hierarchy - -Command-line options have defaults that you can see in `--help`. A `pyproject.toml` can -override those defaults. Finally, options provided by the user on the command line -override both. - -_Black_ will only ever use one `pyproject.toml` file during an entire run. It doesn't -look for multiple files, and doesn't compose configuration from different levels of the -file hierarchy. - -## Editor integration - -### Emacs - -Use [proofit404/blacken](https://github.com/proofit404/blacken) or -[Elpy](https://github.com/jorgenschaefer/elpy). - -### PyCharm/IntelliJ IDEA - -1. Install `black`. - -```console -$ pip install black -``` - -2. Locate your `black` installation folder. - -On macOS / Linux / BSD: - -```console -$ which black -/usr/local/bin/black # possible location -``` - -On Windows: - -```console -$ where black -%LocalAppData%\Programs\Python\Python36-32\Scripts\black.exe # possible location -``` - -3. Open External tools in PyCharm/IntelliJ IDEA - -On macOS: - -`PyCharm -> Preferences -> Tools -> External Tools` - -On Windows / Linux / BSD: - -`File -> Settings -> Tools -> External Tools` - -4. Click the + icon to add a new external tool with the following values: - - - Name: Black - - Description: Black is the uncompromising Python code formatter. - - Program: - - Arguments: `"$FilePath$"` - -5. Format the currently opened file by selecting `Tools -> External Tools -> black`. - - - Alternatively, you can set a keyboard shortcut by navigating to - `Preferences or Settings -> Keymap -> External Tools -> External Tools - Black`. - -6. Optionally, run _Black_ on every file save: - - 1. Make sure you have the - [File Watcher](https://plugins.jetbrains.com/plugin/7177-file-watchers) plugin - installed. - 2. Go to `Preferences or Settings -> Tools -> File Watchers` and click `+` to add a - new watcher: - - Name: Black - - File type: Python - - Scope: Project Files - - Program: - - Arguments: `$FilePath$` - - Output paths to refresh: `$FilePath$` - - Working directory: `$ProjectFileDir$` - - - Uncheck "Auto-save edited files to trigger the watcher" - -### Wing IDE - -Wing supports black via the OS Commands tool, as explained in the Wing documentation on -[pep8 formatting](https://wingware.com/doc/edit/pep8). The detailed procedure is: - -1. Install `black`. - -```console -$ pip install black -``` - -2. Make sure it runs from the command line, e.g. - -```console -$ black --help -``` - -3. In Wing IDE, activate the **OS Commands** panel and define the command **black** to - execute black on the currently selected file: - -- Use the Tools -> OS Commands menu selection -- click on **+** in **OS Commands** -> New: Command line.. - - Title: black - - Command Line: black %s - - I/O Encoding: Use Default - - Key Binding: F1 - - [x] Raise OS Commands when executed - - [x] Auto-save files before execution - - [x] Line mode - -4. Select a file in the editor and press **F1** , or whatever key binding you selected - in step 3, to reformat the file. - -### Vim - -Commands and shortcuts: - -- `:Black` to format the entire file (ranges not supported); -- `:BlackUpgrade` to upgrade _Black_ inside the virtualenv; -- `:BlackVersion` to get the current version of _Black_ inside the virtualenv. - -Configuration: - -- `g:black_fast` (defaults to `0`) -- `g:black_linelength` (defaults to `88`) -- `g:black_skip_string_normalization` (defaults to `0`) -- `g:black_virtualenv` (defaults to `~/.vim/black` or `~/.local/share/nvim/black`) - -To install with [vim-plug](https://github.com/junegunn/vim-plug): - -``` -Plug 'psf/black' -``` - -or with [Vundle](https://github.com/VundleVim/Vundle.vim): - -``` -Plugin 'psf/black' -``` - -or you can copy the plugin from -[plugin/black.vim](https://github.com/psf/black/tree/master/plugin/black.vim). - -``` -mkdir -p ~/.vim/pack/python/start/black/plugin -curl https://raw.githubusercontent.com/psf/black/master/plugin/black.vim -o ~/.vim/pack/python/start/black/plugin/black.vim -``` - -Let me know if this requires any changes to work with Vim 8's builtin `packadd`, or -Pathogen, and so on. - -This plugin **requires Vim 7.0+ built with Python 3.6+ support**. It needs Python 3.6 to -be able to run _Black_ inside the Vim process which is much faster than calling an -external command. - -On first run, the plugin creates its own virtualenv using the right Python version and -automatically installs _Black_. You can upgrade it later by calling `:BlackUpgrade` and -restarting Vim. - -If you need to do anything special to make your virtualenv work and install _Black_ (for -example you want to run a version from master), create a virtualenv manually and point -`g:black_virtualenv` to it. The plugin will use it. - -To run _Black_ on save, add the following line to `.vimrc` or `init.vim`: - -``` -autocmd BufWritePre *.py execute ':Black' -``` - -To run _Black_ on a key press (e.g. F9 below), add this: - -``` -nnoremap :Black -``` - -**How to get Vim with Python 3.6?** On Ubuntu 17.10 Vim comes with Python 3.6 by -default. On macOS with Homebrew run: `brew install vim --with-python3`. When building -Vim from source, use: `./configure --enable-python3interp=yes`. There's many guides -online how to do this. - -### Visual Studio Code - -Use the -[Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python) -([instructions](https://code.visualstudio.com/docs/python/editing#_formatting)). - -### SublimeText 3 - -Use [sublack plugin](https://github.com/jgirardet/sublack). - -### Jupyter Notebook Magic - -Use [blackcellmagic](https://github.com/csurfer/blackcellmagic). - -### Python Language Server - -If your editor supports the [Language Server Protocol](https://langserver.org/) (Atom, -Sublime Text, Visual Studio Code and many more), you can use the -[Python Language Server](https://github.com/palantir/python-language-server) with the -[pyls-black](https://github.com/rupert/pyls-black) plugin. - -### Atom/Nuclide - -Use [python-black](https://atom.io/packages/python-black). - -### Kakoune - -Add the following hook to your kakrc, then run black with `:format`. - -``` -hook global WinSetOption filetype=python %{ - set-option window formatcmd 'black -q -' -} -``` - -### Other editors - -Other editors will require external contributions. - -Patches welcome! ✨ 🍰 ✨ - -Any tool that can pipe code through _Black_ using its stdio mode (just -[use `-` as the file name](https://www.tldp.org/LDP/abs/html/special-chars.html#DASHREF2)). -The formatted code will be returned on stdout (unless `--check` was passed). _Black_ -will still emit messages on stderr but that shouldn't affect your use case. - -This can be used for example with PyCharm's or IntelliJ's -[File Watchers](https://www.jetbrains.com/help/pycharm/file-watchers.html). - -## blackd - -`blackd` is a small HTTP server that exposes _Black_'s functionality over a simple -protocol. The main benefit of using it is to avoid paying the cost of starting up a new -_Black_ process every time you want to blacken a file. - -### Usage - -`blackd` is not packaged alongside _Black_ by default because it has additional -dependencies. You will need to do `pip install black[d]` to install it. - -You can start the server on the default port, binding only to the local interface by -running `blackd`. You will see a single line mentioning the server's version, and the -host and port it's listening on. `blackd` will then print an access log similar to most -web servers on standard output, merged with any exception traces caused by invalid -formatting requests. - -`blackd` provides even less options than _Black_. You can see them by running -`blackd --help`: - -```text -Usage: blackd [OPTIONS] - -Options: - --bind-host TEXT Address to bind the server to. - --bind-port INTEGER Port to listen on - --version Show the version and exit. - -h, --help Show this message and exit. -``` - -There is no official blackd client tool (yet!). You can test that blackd is working -using `curl`: - -``` -blackd --bind-port 9090 & # or let blackd choose a port -curl -s -XPOST "localhost:9090" -d "print('valid')" -``` - -### Protocol - -`blackd` only accepts `POST` requests at the `/` path. The body of the request should -contain the python source code to be formatted, encoded according to the `charset` field -in the `Content-Type` request header. If no `charset` is specified, `blackd` assumes -`UTF-8`. - -There are a few HTTP headers that control how the source is formatted. These correspond -to command line flags for _Black_. There is one exception to this: `X-Protocol-Version` -which if present, should have the value `1`, otherwise the request is rejected with -`HTTP 501` (Not Implemented). - -The headers controlling how code is formatted are: - -If any of these headers are set to invalid values, `blackd` returns a `HTTP 400` error -response, mentioning the name of the problematic header in the message body. - -- `X-Line-Length`: corresponds to the `--line-length` command line flag. -- `X-Skip-String-Normalization`: corresponds to the `--skip-string-normalization` - command line flag. If present and its value is not the empty string, no string - normalization will be performed. -- `X-Fast-Or-Safe`: if set to `fast`, `blackd` will act as _Black_ does when passed the - `--fast` command line flag. -- `X-Python-Variant`: if set to `pyi`, `blackd` will act as _Black_ does when passed the - `--pyi` command line flag. Otherwise, its value must correspond to a Python version or - a set of comma-separated Python versions, optionally prefixed with `py`. For example, - to request code that is compatible with Python 3.5 and 3.6, set the header to - `py3.5,py3.6`. -- `X-Diff`: corresponds to the `--diff` command line flag. If present, a diff of the - formats will be output. - -If any of these headers are set to invalid values, `blackd` returns a `HTTP 400` error -response, mentioning the name of the problematic header in the message body. - -Apart from the above, `blackd` can produce the following response codes: - -- `HTTP 204`: If the input is already well-formatted. The response body is empty. -- `HTTP 200`: If formatting was needed on the input. The response body contains the - blackened Python code, and the `Content-Type` header is set accordingly. -- `HTTP 400`: If the input contains a syntax error. Details of the error are returned in - the response body. -- `HTTP 500`: If there was any kind of error while trying to format the input. The - response body contains a textual representation of the error. - -The response headers include a `X-Black-Version` header containing the version of -_Black_. - -## Version control integration - -Use [pre-commit](https://pre-commit.com/). Once you -[have it installed](https://pre-commit.com/#install), add this to the -`.pre-commit-config.yaml` in your repository: - -```yaml -repos: - - repo: https://github.com/psf/black - rev: stable - hooks: - - id: black - language_version: python3.6 -``` - -Then run `pre-commit install` and you're ready to go. - -Avoid using `args` in the hook. Instead, store necessary configuration in -`pyproject.toml` so that editors and command-line usage of Black all behave consistently -for your project. See _Black_'s own [pyproject.toml](/pyproject.toml) for an example. - -If you're already using Python 3.7, switch the `language_version` accordingly. Finally, -`stable` is a tag that is pinned to the latest release on PyPI. If you'd rather run on -master, this is also an option. - -## Ignoring unmodified files - -_Black_ remembers files it has already formatted, unless the `--diff` flag is used or -code is passed via standard input. This information is stored per-user. The exact -location of the file depends on the _Black_ version and the system on which _Black_ is -run. The file is non-portable. The standard location on common operating systems is: - -- Windows: - `C:\\Users\\AppData\Local\black\black\Cache\\cache...pickle` -- macOS: - `/Users//Library/Caches/black//cache...pickle` -- Linux: - `/home//.cache/black//cache...pickle` - -`file-mode` is an int flag that determines whether the file was formatted as 3.6+ only, -as .pyi, and whether string normalization was omitted. - -To override the location of these files on macOS or Linux, set the environment variable -`XDG_CACHE_HOME` to your preferred location. For example, if you want to put the cache -in the directory you're running _Black_ from, set `XDG_CACHE_HOME=.cache`. _Black_ will -then write the above files to `.cache/black//`. - -## Used by - -The following notable open-source projects trust _Black_ with enforcing a consistent -code style: pytest, tox, Pyramid, Django Channels, Hypothesis, attrs, SQLAlchemy, -Poetry, PyPA applications (Warehouse, Pipenv, virtualenv), pandas, Pillow, every Datadog -Agent Integration. - -Are we missing anyone? Let us know. - -## Testimonials - -**Dusty Phillips**, -[writer](https://smile.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=dusty+phillips): - -> _Black_ is opinionated so you don't have to be. - -**Hynek Schlawack**, [creator of `attrs`](https://www.attrs.org/), core developer of -Twisted and CPython: - -> An auto-formatter that doesn't suck is all I want for Xmas! - -**Carl Meyer**, [Django](https://www.djangoproject.com/) core developer: - -> At least the name is good. - -**Kenneth Reitz**, creator of [`requests`](http://python-requests.org/) and -[`pipenv`](https://docs.pipenv.org/): - -> This vastly improves the formatting of our code. Thanks a ton! - -## Show your style - -Use the badge in your project's README.md: - -```markdown -[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) -``` - -Using the badge in README.rst: - -``` -.. image:: https://img.shields.io/badge/code%20style-black-000000.svg - :target: https://github.com/psf/black -``` - -Looks like this: -[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) - -## License - -MIT - -## Contributing to _Black_ - -In terms of inspiration, _Black_ is about as configurable as _gofmt_. This is -deliberate. - -Bug reports and fixes are always welcome! However, before you suggest a new feature or -configuration knob, ask yourself why you want it. If it enables better integration with -some workflow, fixes an inconsistency, speeds things up, and so on - go for it! On the -other hand, if your answer is "because I don't like a particular formatting" then you're -not ready to embrace _Black_ yet. Such changes are unlikely to get accepted. You can -still try but prepare to be disappointed. - -More details can be found in [CONTRIBUTING](CONTRIBUTING.md). - -## Change Log - -### 19.10b0 - -- added support for PEP 572 assignment expressions (#711) - -- added support for PEP 570 positional-only arguments (#943) - -- added support for async generators (#593) - -- added support for pre-splitting collections by putting an explicit trailing comma - inside (#826) - -- added `black -c` as a way to format code passed from the command line (#761) - -- --safe now works with Python 2 code (#840) - -- fixed grammar selection for Python 2-specific code (#765) - -- fixed feature detection for trailing commas in function definitions and call sites - (#763) - -- `# fmt: off`/`# fmt: on` comment pairs placed multiple times within the same block of - code now behave correctly (#1005) - -- _Black_ no longer crashes on Windows machines with more than 61 cores (#838) - -- _Black_ no longer crashes on standalone comments prepended with a backslash (#767) - -- _Black_ no longer crashes on `from` ... `import` blocks with comments (#829) - -- _Black_ no longer crashes on Python 3.7 on some platform configurations (#494) - -- _Black_ no longer fails on comments in from-imports (#671) - -- _Black_ no longer fails when the file starts with a backslash (#922) - -- _Black_ no longer merges regular comments with type comments (#1027) - -- _Black_ no longer splits long lines that contain type comments (#997) - -- removed unnecessary parentheses around `yield` expressions (#834) - -- added parentheses around long tuples in unpacking assignments (#832) - -- added parentheses around complex powers when they are prefixed by a unary operator - (#646) - -- fixed bug that led _Black_ format some code with a line length target of 1 (#762) - -- _Black_ no longer introduces quotes in f-string subexpressions on string boundaries - (#863) - -- if _Black_ puts parenthesis around a single expression, it moves comments to the - wrapped expression instead of after the brackets (#872) - -- `blackd` now returns the version of _Black_ in the response headers (#1013) - -- `blackd` can now output the diff of formats on source code when the `X-Diff` header is - provided (#969) - -### 19.3b0 - -- new option `--target-version` to control which Python versions _Black_-formatted code - should target (#618) - -- deprecated `--py36` (use `--target-version=py36` instead) (#724) - -- _Black_ no longer normalizes numeric literals to include `_` separators (#696) - -- long `del` statements are now split into multiple lines (#698) - -- type comments are no longer mangled in function signatures - -- improved performance of formatting deeply nested data structures (#509) - -- _Black_ now properly formats multiple files in parallel on Windows (#632) - -- _Black_ now creates cache files atomically which allows it to be used in parallel - pipelines (like `xargs -P8`) (#673) - -- _Black_ now correctly indents comments in files that were previously formatted with - tabs (#262) - -- `blackd` now supports CORS (#622) - -### 18.9b0 - -- numeric literals are now formatted by _Black_ (#452, #461, #464, #469): - - - numeric literals are normalized to include `_` separators on Python 3.6+ code - - - added `--skip-numeric-underscore-normalization` to disable the above behavior and - leave numeric underscores as they were in the input - - - code with `_` in numeric literals is recognized as Python 3.6+ - - - most letters in numeric literals are lowercased (e.g., in `1e10`, `0x01`) - - - hexadecimal digits are always uppercased (e.g. `0xBADC0DE`) - -- added `blackd`, see [its documentation](#blackd) for more info (#349) - -- adjacent string literals are now correctly split into multiple lines (#463) - -- trailing comma is now added to single imports that don't fit on a line (#250) - -- cache is now populated when `--check` is successful for a file which speeds up - consecutive checks of properly formatted unmodified files (#448) - -- whitespace at the beginning of the file is now removed (#399) - -- fixed mangling [pweave](http://mpastell.com/pweave/) and - [Spyder IDE](https://pythonhosted.org/spyder/) special comments (#532) - -- fixed unstable formatting when unpacking big tuples (#267) - -- fixed parsing of `__future__` imports with renames (#389) - -- fixed scope of `# fmt: off` when directly preceding `yield` and other nodes (#385) - -- fixed formatting of lambda expressions with default arguments (#468) - -- fixed `async for` statements: _Black_ no longer breaks them into separate lines (#372) - -- note: the Vim plugin stopped registering `,=` as a default chord as it turned out to - be a bad idea (#415) - -### 18.6b4 - -- hotfix: don't freeze when multiple comments directly precede `# fmt: off` (#371) - -### 18.6b3 - -- typing stub files (`.pyi`) now have blank lines added after constants (#340) - -- `# fmt: off` and `# fmt: on` are now much more dependable: - - - they now work also within bracket pairs (#329) - - - they now correctly work across function/class boundaries (#335) - - - they now work when an indentation block starts with empty lines or misaligned - comments (#334) - -- made Click not fail on invalid environments; note that Click is right but the - likelihood we'll need to access non-ASCII file paths when dealing with Python source - code is low (#277) - -- fixed improper formatting of f-strings with quotes inside interpolated expressions - (#322) - -- fixed unnecessary slowdown when long list literals where found in a file - -- fixed unnecessary slowdown on AST nodes with very many siblings - -- fixed cannibalizing backslashes during string normalization - -- fixed a crash due to symbolic links pointing outside of the project directory (#338) - -### 18.6b2 - -- added `--config` (#65) - -- added `-h` equivalent to `--help` (#316) - -- fixed improper unmodified file caching when `-S` was used - -- fixed extra space in string unpacking (#305) - -- fixed formatting of empty triple quoted strings (#313) - -- fixed unnecessary slowdown in comment placement calculation on lines without comments - -### 18.6b1 - -- hotfix: don't output human-facing information on stdout (#299) - -- hotfix: don't output cake emoji on non-zero return code (#300) - -### 18.6b0 - -- added `--include` and `--exclude` (#270) - -- added `--skip-string-normalization` (#118) - -- added `--verbose` (#283) - -- the header output in `--diff` now actually conforms to the unified diff spec - -- fixed long trivial assignments being wrapped in unnecessary parentheses (#273) - -- fixed unnecessary parentheses when a line contained multiline strings (#232) - -- fixed stdin handling not working correctly if an old version of Click was used (#276) - -- _Black_ now preserves line endings when formatting a file in place (#258) - -### 18.5b1 - -- added `--pyi` (#249) - -- added `--py36` (#249) - -- Python grammar pickle caches are stored with the formatting caches, making _Black_ - work in environments where site-packages is not user-writable (#192) - -- _Black_ now enforces a PEP 257 empty line after a class-level docstring (and/or - fields) and the first method - -- fixed invalid code produced when standalone comments were present in a trailer that - was omitted from line splitting on a large expression (#237) - -- fixed optional parentheses being removed within `# fmt: off` sections (#224) - -- fixed invalid code produced when stars in very long imports were incorrectly wrapped - in optional parentheses (#234) - -- fixed unstable formatting when inline comments were moved around in a trailer that was - omitted from line splitting on a large expression (#238) - -- fixed extra empty line between a class declaration and the first method if no class - docstring or fields are present (#219) - -- fixed extra empty line between a function signature and an inner function or inner - class (#196) - -### 18.5b0 - -- call chains are now formatted according to the - [fluent interfaces](https://en.wikipedia.org/wiki/Fluent_interface) style (#67) - -- data structure literals (tuples, lists, dictionaries, and sets) are now also always - exploded like imports when they don't fit in a single line (#152) - -- slices are now formatted according to PEP 8 (#178) - -- parentheses are now also managed automatically on the right-hand side of assignments - and return statements (#140) - -- math operators now use their respective priorities for delimiting multiline - expressions (#148) - -- optional parentheses are now omitted on expressions that start or end with a bracket - and only contain a single operator (#177) - -- empty parentheses in a class definition are now removed (#145, #180) - -- string prefixes are now standardized to lowercase and `u` is removed on Python 3.6+ - only code and Python 2.7+ code with the `unicode_literals` future import (#188, #198, - #199) - -- typing stub files (`.pyi`) are now formatted in a style that is consistent with PEP - 484 (#207, #210) - -- progress when reformatting many files is now reported incrementally - -- fixed trailers (content with brackets) being unnecessarily exploded into their own - lines after a dedented closing bracket (#119) - -- fixed an invalid trailing comma sometimes left in imports (#185) - -- fixed non-deterministic formatting when multiple pairs of removable parentheses were - used (#183) - -- fixed multiline strings being unnecessarily wrapped in optional parentheses in long - assignments (#215) - -- fixed not splitting long from-imports with only a single name - -- fixed Python 3.6+ file discovery by also looking at function calls with unpacking. - This fixed non-deterministic formatting if trailing commas where used both in function - signatures with stars and function calls with stars but the former would be - reformatted to a single line. - -- fixed crash on dealing with optional parentheses (#193) - -- fixed "is", "is not", "in", and "not in" not considered operators for splitting - purposes - -- fixed crash when dead symlinks where encountered - -### 18.4a4 - -- don't populate the cache on `--check` (#175) - -### 18.4a3 - -- added a "cache"; files already reformatted that haven't changed on disk won't be - reformatted again (#109) - -- `--check` and `--diff` are no longer mutually exclusive (#149) - -- generalized star expression handling, including double stars; this fixes - multiplication making expressions "unsafe" for trailing commas (#132) - -- _Black_ no longer enforces putting empty lines behind control flow statements (#90) - -- _Black_ now splits imports like "Mode 3 + trailing comma" of isort (#127) - -- fixed comment indentation when a standalone comment closes a block (#16, #32) - -- fixed standalone comments receiving extra empty lines if immediately preceding a - class, def, or decorator (#56, #154) - -- fixed `--diff` not showing entire path (#130) - -- fixed parsing of complex expressions after star and double stars in function calls - (#2) - -- fixed invalid splitting on comma in lambda arguments (#133) - -- fixed missing splits of ternary expressions (#141) - -### 18.4a2 - -- fixed parsing of unaligned standalone comments (#99, #112) - -- fixed placement of dictionary unpacking inside dictionary literals (#111) - -- Vim plugin now works on Windows, too - -- fixed unstable formatting when encountering unnecessarily escaped quotes in a string - (#120) - -### 18.4a1 - -- added `--quiet` (#78) - -- added automatic parentheses management (#4) - -- added [pre-commit](https://pre-commit.com) integration (#103, #104) - -- fixed reporting on `--check` with multiple files (#101, #102) - -- fixed removing backslash escapes from raw strings (#100, #105) - -### 18.4a0 - -- added `--diff` (#87) - -- add line breaks before all delimiters, except in cases like commas, to better comply - with PEP 8 (#73) - -- standardize string literals to use double quotes (almost) everywhere (#75) - -- fixed handling of standalone comments within nested bracketed expressions; _Black_ - will no longer produce super long lines or put all standalone comments at the end of - the expression (#22) - -- fixed 18.3a4 regression: don't crash and burn on empty lines with trailing whitespace - (#80) - -- fixed 18.3a4 regression: `# yapf: disable` usage as trailing comment would cause - _Black_ to not emit the rest of the file (#95) - -- when CTRL+C is pressed while formatting many files, _Black_ no longer freaks out with - a flurry of asyncio-related exceptions - -- only allow up to two empty lines on module level and only single empty lines within - functions (#74) - -### 18.3a4 - -- `# fmt: off` and `# fmt: on` are implemented (#5) - -- automatic detection of deprecated Python 2 forms of print statements and exec - statements in the formatted file (#49) - -- use proper spaces for complex expressions in default values of typed function - arguments (#60) - -- only return exit code 1 when --check is used (#50) - -- don't remove single trailing commas from square bracket indexing (#59) - -- don't omit whitespace if the previous factor leaf wasn't a math operator (#55) - -- omit extra space in kwarg unpacking if it's the first argument (#46) - -- omit extra space in - [Sphinx auto-attribute comments](http://www.sphinx-doc.org/en/stable/ext/autodoc.html#directive-autoattribute) - (#68) - -### 18.3a3 - -- don't remove single empty lines outside of bracketed expressions (#19) - -- added ability to pipe formatting from stdin to stdin (#25) - -- restored ability to format code with legacy usage of `async` as a name (#20, #42) - -- even better handling of numpy-style array indexing (#33, again) - -### 18.3a2 - -- changed positioning of binary operators to occur at beginning of lines instead of at - the end, following - [a recent change to PEP 8](https://github.com/python/peps/commit/c59c4376ad233a62ca4b3a6060c81368bd21e85b) - (#21) - -- ignore empty bracket pairs while splitting. This avoids very weirdly looking - formattings (#34, #35) - -- remove a trailing comma if there is a single argument to a call - -- if top level functions were separated by a comment, don't put four empty lines after - the upper function - -- fixed unstable formatting of newlines with imports - -- fixed unintentional folding of post scriptum standalone comments into last statement - if it was a simple statement (#18, #28) - -- fixed missing space in numpy-style array indexing (#33) - -- fixed spurious space after star-based unary expressions (#31) - -### 18.3a1 - -- added `--check` - -- only put trailing commas in function signatures and calls if it's safe to do so. If - the file is Python 3.6+ it's always safe, otherwise only safe if there are no `*args` - or `**kwargs` used in the signature or call. (#8) - -- fixed invalid spacing of dots in relative imports (#6, #13) - -- fixed invalid splitting after comma on unpacked variables in for-loops (#23) - -- fixed spurious space in parenthesized set expressions (#7) - -- fixed spurious space after opening parentheses and in default arguments (#14, #17) - -- fixed spurious space after unary operators when the operand was a complex expression - (#15) - -### 18.3a0 - -- first published version, Happy 🍰 Day 2018! - -- alpha quality - -- date-versioned (see: https://calver.org/) - -## Authors - -Glued together by [Łukasz Langa](mailto:lukasz@langa.pl). - -Maintained with [Carol Willing](mailto:carolcode@willingconsulting.com), -[Carl Meyer](mailto:carl@oddbird.net), -[Jelle Zijlstra](mailto:jelle.zijlstra@gmail.com), -[Mika Naylor](mailto:mail@autophagy.io), and -[Zsolt Dollenstein](mailto:zsol.zsol@gmail.com). - -Multiple contributions by: - -- [Abdur-Rahmaan Janhangeer](mailto:cryptolabour@gmail.com) -- [Adam Johnson](mailto:me@adamj.eu) -- [Alexander Huynh](mailto:github@grande.coffee) -- [Andrew Thorp](mailto:andrew.thorp.dev@gmail.com) -- [Andrey](mailto:dyuuus@yandex.ru) -- [Andy Freeland](mailto:andy@andyfreeland.net) -- [Anthony Sottile](mailto:asottile@umich.edu) -- [Arjaan Buijk](mailto:arjaan.buijk@gmail.com) -- [Artem Malyshev](mailto:proofit404@gmail.com) -- [Asger Hautop Drewsen](mailto:asgerdrewsen@gmail.com) -- [Augie Fackler](mailto:raf@durin42.com) -- [Aviskar KC](mailto:aviskarkc10@gmail.com) -- [Benjamin Woodruff](mailto:github@benjam.info) -- [Brandt Bucher](mailto:brandtbucher@gmail.com) -- Charles Reid -- [Christian Heimes](mailto:christian@python.org) -- [Chuck Wooters](mailto:chuck.wooters@microsoft.com) -- [Daniel Hahler](mailto:github@thequod.de) -- [Daniel M. Capella](mailto:polycitizen@gmail.com) -- Daniele Esposti -- dylanjblack -- [Eli Treuherz](mailto:eli@treuherz.com) -- [Florent Thiery](mailto:fthiery@gmail.com) -- hauntsaninja -- Hugo van Kemenade -- [Ivan Katanić](mailto:ivan.katanic@gmail.com) -- [Jason Fried](mailto:me@jasonfried.info) -- [jgirardet](mailto:ijkl@netc.fr) -- [Joe Antonakakis](mailto:jma353@cornell.edu) -- [Jon Dufresne](mailto:jon.dufresne@gmail.com) -- [Jonas Obrist](mailto:ojiidotch@gmail.com) -- [Josh Bode](mailto:joshbode@fastmail.com) -- [Juan Luis Cano Rodríguez](mailto:hello@juanlu.space) -- [Katie McLaughlin](mailto:katie@glasnt.com) -- Lawrence Chan -- [Linus Groh](mailto:mail@linusgroh.de) -- [Luka Sterbic](mailto:luka.sterbic@gmail.com) -- Mariatta -- [Matt VanEseltine](mailto:vaneseltine@gmail.com) -- [Michael Flaxman](mailto:michael.flaxman@gmail.com) -- [Michael J. Sullivan](mailto:sully@msully.net) -- [Michael McClimon](mailto:michael@mcclimon.org) -- [Miguel Gaiowski](mailto:miggaiowski@gmail.com) -- [Mike](mailto:roshi@fedoraproject.org) -- [Min ho Kim](mailto:minho42@gmail.com) -- [Miroslav Shubernetskiy](mailto:miroslav@miki725.com) -- [Neraste](mailto:neraste.herr10@gmail.com) -- [Ofek Lev](mailto:ofekmeister@gmail.com) -- [Osaetin Daniel](mailto:osaetindaniel@gmail.com) -- [Pablo Galindo](mailto:Pablogsal@gmail.com) -- [Peter Bengtsson](mailto:mail@peterbe.com) -- pmacosta -- [Rishikesh Jha](mailto:rishijha424@gmail.com) -- [Stavros Korokithakis](mailto:hi@stavros.io) -- [Stephen Rosen](mailto:sirosen@globus.org) -- [Sunil Kapil](mailto:snlkapil@gmail.com) -- [Thom Lu](mailto:thomas.c.lu@gmail.com) -- [Tom Christie](mailto:tom@tomchristie.com) -- [Tzu-ping Chung](mailto:uranusjr@gmail.com) -- [Utsav Shah](mailto:ukshah2@illinois.edu) -- vezeli -- [Vishwas B Sharma](mailto:sharma.vishwas88@gmail.com) -- [Yngve Høiseth](mailto:yngve@hoiseth.net) -- [Yurii Karabas](mailto:1998uriyyo@gmail.com) - - diff --git a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/RECORD b/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/RECORD deleted file mode 100644 index 0439c207..00000000 --- a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/RECORD +++ /dev/null @@ -1,41 +0,0 @@ -../../../bin/black,sha256=QN7VTr9g0AEz4E3CxB7fYs7FsrL4nKY_UGtUMepWt34,255 -../../../bin/blackd,sha256=dUUFscrhpX91Ne7SP0xeyGEtn5Q_xj3kmVMb-L5Jgxs,256 -__pycache__/_black_version.cpython-38.pyc,, -__pycache__/black.cpython-38.pyc,, -__pycache__/blackd.cpython-38.pyc,, -_black_version.py,sha256=H8LwmF384mJdlAgbczUzY8783HltWp3I6BJNF11SHlU,20 -black-19.10b0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -black-19.10b0.dist-info/LICENSE,sha256=nAQo8MO0d5hQz1vZbhGqqK_HLUqG1KNiI9erouWNbgA,1080 -black-19.10b0.dist-info/METADATA,sha256=UN40nGoVVTSpvLrTBwNsXgZdZIwoKFSrrDDHP6B7-A0,58841 -black-19.10b0.dist-info/RECORD,, -black-19.10b0.dist-info/WHEEL,sha256=nMbBw29sRfQVdx260S14VLTQWKtc4i1BSU4yUopWy7U,93 -black-19.10b0.dist-info/entry_points.txt,sha256=Lj74KvN0AtGUJDQuKubBxdy5y9EUVjVFT8T-gQOvd_A,79 -black-19.10b0.dist-info/top_level.txt,sha256=tgyQeKgXkAm2g853Cl_OKCCIY7iu2im52nMCWRvialU,37 -black.py,sha256=45IF72OgNfF8WpeNHnxV2QGfbCLubV5Xjl55cI65kYs,140161 -blackd.py,sha256=JCxaK4hLkMRwVfZMj8FRpRRYC0172-juKqbN22bISLE,6672 -blib2to3/Grammar.txt,sha256=dPd4EkEzQE9Wqhl16bvlUc3hb3nItlbvBkr1BMp1BOE,9628 -blib2to3/PatternGrammar.txt,sha256=7lul2ztnIqDi--JWDrwciD5yMo75w7TaHHxdHMZJvOM,793 -blib2to3/__init__.py,sha256=9_8wL9Scv8_Cs8HJyJHGvx1vwXErsuvlsAqNZLcJQR0,8 -blib2to3/__pycache__/__init__.cpython-38.pyc,, -blib2to3/__pycache__/pygram.cpython-38.pyc,, -blib2to3/__pycache__/pytree.cpython-38.pyc,, -blib2to3/pgen2/__init__.py,sha256=hY6w9QUzvTvRb-MoFfd_q_7ZLt6IUHC2yxWCfsZupQA,143 -blib2to3/pgen2/__pycache__/__init__.cpython-38.pyc,, -blib2to3/pgen2/__pycache__/conv.cpython-38.pyc,, -blib2to3/pgen2/__pycache__/driver.cpython-38.pyc,, -blib2to3/pgen2/__pycache__/grammar.cpython-38.pyc,, -blib2to3/pgen2/__pycache__/literals.cpython-38.pyc,, -blib2to3/pgen2/__pycache__/parse.cpython-38.pyc,, -blib2to3/pgen2/__pycache__/pgen.cpython-38.pyc,, -blib2to3/pgen2/__pycache__/token.cpython-38.pyc,, -blib2to3/pgen2/__pycache__/tokenize.cpython-38.pyc,, -blib2to3/pgen2/conv.py,sha256=8tb8K-6bp9e5TMoF_76s51fTLy_0HCjZSNy9ol8TyIg,9593 -blib2to3/pgen2/driver.py,sha256=bXox0MBdQgLggiAVchFHlG-h42zHrcitRsYJH8MXnl4,7638 -blib2to3/pgen2/grammar.py,sha256=-3Eha6cUDW5HF_EmlsOSSpQ6k3g2PtYNjzhwCmx6lIU,5880 -blib2to3/pgen2/literals.py,sha256=yOwfFCTNzp-zElf_Zpse2J9_yR9gi-pQwd51_ltM4-w,1537 -blib2to3/pgen2/parse.py,sha256=5j7iIHbIpYf3ILk0i9tEoDXq5s8D8gUkKydCKsVxgjg,8011 -blib2to3/pgen2/pgen.py,sha256=w6lyXWpYyQGDEwFc35cwjreD2oJ5OrE6qDEJbL45RP8,13807 -blib2to3/pgen2/token.py,sha256=TvxG_QEebn3YDdpwlCLwQxoyRPegf0tssPT7D9K3Qzs,1281 -blib2to3/pgen2/tokenize.py,sha256=4sQ81VAhGDruIKw0hTArBnzMz_Z3DyPiunWrNQLoiJk,21876 -blib2to3/pygram.py,sha256=mXpQPqHcamFwch0RkyJsb92Wd0kUP3TW7d-u9dWhCGY,2085 -blib2to3/pytree.py,sha256=RWj3IL4U-Ljhkn4laN0C3p7IRdfvT3aIRjTV-x9hK1c,28530 diff --git a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/WHEEL b/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/WHEEL deleted file mode 100644 index 107b2493..00000000 --- a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/WHEEL +++ /dev/null @@ -1,5 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.6) -Root-Is-Purelib: true -Tag: py36-none-any - diff --git a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/entry_points.txt b/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/entry_points.txt deleted file mode 100644 index bc684695..00000000 --- a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/entry_points.txt +++ /dev/null @@ -1,4 +0,0 @@ -[console_scripts] -black = black:patched_main -blackd = blackd:patched_main [d] - diff --git a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/top_level.txt b/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/top_level.txt deleted file mode 100644 index 05a77f43..00000000 --- a/venv/lib/python3.8/site-packages/black-19.10b0.dist-info/top_level.txt +++ /dev/null @@ -1,4 +0,0 @@ -_black_version -black -blackd -blib2to3 diff --git a/venv/lib/python3.8/site-packages/black.py b/venv/lib/python3.8/site-packages/black.py deleted file mode 100644 index a48f6476..00000000 --- a/venv/lib/python3.8/site-packages/black.py +++ /dev/null @@ -1,4139 +0,0 @@ -import ast -import asyncio -from concurrent.futures import Executor, ProcessPoolExecutor -from contextlib import contextmanager -from datetime import datetime -from enum import Enum -from functools import lru_cache, partial, wraps -import io -import itertools -import logging -from multiprocessing import Manager, freeze_support -import os -from pathlib import Path -import pickle -import regex as re -import signal -import sys -import tempfile -import tokenize -import traceback -from typing import ( - Any, - Callable, - Collection, - Dict, - Generator, - Generic, - Iterable, - Iterator, - List, - Optional, - Pattern, - Sequence, - Set, - Tuple, - TypeVar, - Union, - cast, -) - -from appdirs import user_cache_dir -from attr import dataclass, evolve, Factory -import click -import toml -from typed_ast import ast3, ast27 -from pathspec import PathSpec - -# lib2to3 fork -from blib2to3.pytree import Node, Leaf, type_repr -from blib2to3 import pygram, pytree -from blib2to3.pgen2 import driver, token -from blib2to3.pgen2.grammar import Grammar -from blib2to3.pgen2.parse import ParseError - -from _black_version import version as __version__ - -DEFAULT_LINE_LENGTH = 88 -DEFAULT_EXCLUDES = r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|\.svn|_build|buck-out|build|dist)/" # noqa: B950 -DEFAULT_INCLUDES = r"\.pyi?$" -CACHE_DIR = Path(user_cache_dir("black", version=__version__)) - - -# types -FileContent = str -Encoding = str -NewLine = str -Depth = int -NodeType = int -LeafID = int -Priority = int -Index = int -LN = Union[Leaf, Node] -SplitFunc = Callable[["Line", Collection["Feature"]], Iterator["Line"]] -Timestamp = float -FileSize = int -CacheInfo = Tuple[Timestamp, FileSize] -Cache = Dict[Path, CacheInfo] -out = partial(click.secho, bold=True, err=True) -err = partial(click.secho, fg="red", err=True) - -pygram.initialize(CACHE_DIR) -syms = pygram.python_symbols - - -class NothingChanged(UserWarning): - """Raised when reformatted code is the same as source.""" - - -class CannotSplit(Exception): - """A readable split that fits the allotted line length is impossible.""" - - -class InvalidInput(ValueError): - """Raised when input source code fails all parse attempts.""" - - -class WriteBack(Enum): - NO = 0 - YES = 1 - DIFF = 2 - CHECK = 3 - - @classmethod - def from_configuration(cls, *, check: bool, diff: bool) -> "WriteBack": - if check and not diff: - return cls.CHECK - - return cls.DIFF if diff else cls.YES - - -class Changed(Enum): - NO = 0 - CACHED = 1 - YES = 2 - - -class TargetVersion(Enum): - PY27 = 2 - PY33 = 3 - PY34 = 4 - PY35 = 5 - PY36 = 6 - PY37 = 7 - PY38 = 8 - - def is_python2(self) -> bool: - return self is TargetVersion.PY27 - - -PY36_VERSIONS = {TargetVersion.PY36, TargetVersion.PY37, TargetVersion.PY38} - - -class Feature(Enum): - # All string literals are unicode - UNICODE_LITERALS = 1 - F_STRINGS = 2 - NUMERIC_UNDERSCORES = 3 - TRAILING_COMMA_IN_CALL = 4 - TRAILING_COMMA_IN_DEF = 5 - # The following two feature-flags are mutually exclusive, and exactly one should be - # set for every version of python. - ASYNC_IDENTIFIERS = 6 - ASYNC_KEYWORDS = 7 - ASSIGNMENT_EXPRESSIONS = 8 - POS_ONLY_ARGUMENTS = 9 - - -VERSION_TO_FEATURES: Dict[TargetVersion, Set[Feature]] = { - TargetVersion.PY27: {Feature.ASYNC_IDENTIFIERS}, - TargetVersion.PY33: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, - TargetVersion.PY34: {Feature.UNICODE_LITERALS, Feature.ASYNC_IDENTIFIERS}, - TargetVersion.PY35: { - Feature.UNICODE_LITERALS, - Feature.TRAILING_COMMA_IN_CALL, - Feature.ASYNC_IDENTIFIERS, - }, - TargetVersion.PY36: { - Feature.UNICODE_LITERALS, - Feature.F_STRINGS, - Feature.NUMERIC_UNDERSCORES, - Feature.TRAILING_COMMA_IN_CALL, - Feature.TRAILING_COMMA_IN_DEF, - Feature.ASYNC_IDENTIFIERS, - }, - TargetVersion.PY37: { - Feature.UNICODE_LITERALS, - Feature.F_STRINGS, - Feature.NUMERIC_UNDERSCORES, - Feature.TRAILING_COMMA_IN_CALL, - Feature.TRAILING_COMMA_IN_DEF, - Feature.ASYNC_KEYWORDS, - }, - TargetVersion.PY38: { - Feature.UNICODE_LITERALS, - Feature.F_STRINGS, - Feature.NUMERIC_UNDERSCORES, - Feature.TRAILING_COMMA_IN_CALL, - Feature.TRAILING_COMMA_IN_DEF, - Feature.ASYNC_KEYWORDS, - Feature.ASSIGNMENT_EXPRESSIONS, - Feature.POS_ONLY_ARGUMENTS, - }, -} - - -@dataclass -class FileMode: - target_versions: Set[TargetVersion] = Factory(set) - line_length: int = DEFAULT_LINE_LENGTH - string_normalization: bool = True - is_pyi: bool = False - - def get_cache_key(self) -> str: - if self.target_versions: - version_str = ",".join( - str(version.value) - for version in sorted(self.target_versions, key=lambda v: v.value) - ) - else: - version_str = "-" - parts = [ - version_str, - str(self.line_length), - str(int(self.string_normalization)), - str(int(self.is_pyi)), - ] - return ".".join(parts) - - -def supports_feature(target_versions: Set[TargetVersion], feature: Feature) -> bool: - return all(feature in VERSION_TO_FEATURES[version] for version in target_versions) - - -def read_pyproject_toml( - ctx: click.Context, param: click.Parameter, value: Union[str, int, bool, None] -) -> Optional[str]: - """Inject Black configuration from "pyproject.toml" into defaults in `ctx`. - - Returns the path to a successfully found and read configuration file, None - otherwise. - """ - assert not isinstance(value, (int, bool)), "Invalid parameter type passed" - if not value: - root = find_project_root(ctx.params.get("src", ())) - path = root / "pyproject.toml" - if path.is_file(): - value = str(path) - else: - return None - - try: - pyproject_toml = toml.load(value) - config = pyproject_toml.get("tool", {}).get("black", {}) - except (toml.TomlDecodeError, OSError) as e: - raise click.FileError( - filename=value, hint=f"Error reading configuration file: {e}" - ) - - if not config: - return None - - if ctx.default_map is None: - ctx.default_map = {} - ctx.default_map.update( # type: ignore # bad types in .pyi - {k.replace("--", "").replace("-", "_"): v for k, v in config.items()} - ) - return value - - -@click.command(context_settings=dict(help_option_names=["-h", "--help"])) -@click.option("-c", "--code", type=str, help="Format the code passed in as a string.") -@click.option( - "-l", - "--line-length", - type=int, - default=DEFAULT_LINE_LENGTH, - help="How many characters per line to allow.", - show_default=True, -) -@click.option( - "-t", - "--target-version", - type=click.Choice([v.name.lower() for v in TargetVersion]), - callback=lambda c, p, v: [TargetVersion[val.upper()] for val in v], - multiple=True, - help=( - "Python versions that should be supported by Black's output. [default: " - "per-file auto-detection]" - ), -) -@click.option( - "--py36", - is_flag=True, - help=( - "Allow using Python 3.6-only syntax on all input files. This will put " - "trailing commas in function signatures and calls also after *args and " - "**kwargs. Deprecated; use --target-version instead. " - "[default: per-file auto-detection]" - ), -) -@click.option( - "--pyi", - is_flag=True, - help=( - "Format all input files like typing stubs regardless of file extension " - "(useful when piping source on standard input)." - ), -) -@click.option( - "-S", - "--skip-string-normalization", - is_flag=True, - help="Don't normalize string quotes or prefixes.", -) -@click.option( - "--check", - is_flag=True, - help=( - "Don't write the files back, just return the status. Return code 0 " - "means nothing would change. Return code 1 means some files would be " - "reformatted. Return code 123 means there was an internal error." - ), -) -@click.option( - "--diff", - is_flag=True, - help="Don't write the files back, just output a diff for each file on stdout.", -) -@click.option( - "--fast/--safe", - is_flag=True, - help="If --fast given, skip temporary sanity checks. [default: --safe]", -) -@click.option( - "--include", - type=str, - default=DEFAULT_INCLUDES, - help=( - "A regular expression that matches files and directories that should be " - "included on recursive searches. An empty value means all files are " - "included regardless of the name. Use forward slashes for directories on " - "all platforms (Windows, too). Exclusions are calculated first, inclusions " - "later." - ), - show_default=True, -) -@click.option( - "--exclude", - type=str, - default=DEFAULT_EXCLUDES, - help=( - "A regular expression that matches files and directories that should be " - "excluded on recursive searches. An empty value means no paths are excluded. " - "Use forward slashes for directories on all platforms (Windows, too). " - "Exclusions are calculated first, inclusions later." - ), - show_default=True, -) -@click.option( - "-q", - "--quiet", - is_flag=True, - help=( - "Don't emit non-error messages to stderr. Errors are still emitted; " - "silence those with 2>/dev/null." - ), -) -@click.option( - "-v", - "--verbose", - is_flag=True, - help=( - "Also emit messages to stderr about files that were not changed or were " - "ignored due to --exclude=." - ), -) -@click.version_option(version=__version__) -@click.argument( - "src", - nargs=-1, - type=click.Path( - exists=True, file_okay=True, dir_okay=True, readable=True, allow_dash=True - ), - is_eager=True, -) -@click.option( - "--config", - type=click.Path( - exists=False, file_okay=True, dir_okay=False, readable=True, allow_dash=False - ), - is_eager=True, - callback=read_pyproject_toml, - help="Read configuration from PATH.", -) -@click.pass_context -def main( - ctx: click.Context, - code: Optional[str], - line_length: int, - target_version: List[TargetVersion], - check: bool, - diff: bool, - fast: bool, - pyi: bool, - py36: bool, - skip_string_normalization: bool, - quiet: bool, - verbose: bool, - include: str, - exclude: str, - src: Tuple[str], - config: Optional[str], -) -> None: - """The uncompromising code formatter.""" - write_back = WriteBack.from_configuration(check=check, diff=diff) - if target_version: - if py36: - err(f"Cannot use both --target-version and --py36") - ctx.exit(2) - else: - versions = set(target_version) - elif py36: - err( - "--py36 is deprecated and will be removed in a future version. " - "Use --target-version py36 instead." - ) - versions = PY36_VERSIONS - else: - # We'll autodetect later. - versions = set() - mode = FileMode( - target_versions=versions, - line_length=line_length, - is_pyi=pyi, - string_normalization=not skip_string_normalization, - ) - if config and verbose: - out(f"Using configuration from {config}.", bold=False, fg="blue") - if code is not None: - print(format_str(code, mode=mode)) - ctx.exit(0) - try: - include_regex = re_compile_maybe_verbose(include) - except re.error: - err(f"Invalid regular expression for include given: {include!r}") - ctx.exit(2) - try: - exclude_regex = re_compile_maybe_verbose(exclude) - except re.error: - err(f"Invalid regular expression for exclude given: {exclude!r}") - ctx.exit(2) - report = Report(check=check, quiet=quiet, verbose=verbose) - root = find_project_root(src) - sources: Set[Path] = set() - path_empty(src, quiet, verbose, ctx) - for s in src: - p = Path(s) - if p.is_dir(): - sources.update( - gen_python_files_in_dir( - p, root, include_regex, exclude_regex, report, get_gitignore(root) - ) - ) - elif p.is_file() or s == "-": - # if a file was explicitly given, we don't care about its extension - sources.add(p) - else: - err(f"invalid path: {s}") - if len(sources) == 0: - if verbose or not quiet: - out("No Python files are present to be formatted. Nothing to do 😴") - ctx.exit(0) - - if len(sources) == 1: - reformat_one( - src=sources.pop(), - fast=fast, - write_back=write_back, - mode=mode, - report=report, - ) - else: - reformat_many( - sources=sources, fast=fast, write_back=write_back, mode=mode, report=report - ) - - if verbose or not quiet: - out("Oh no! 💥 💔 💥" if report.return_code else "All done! ✨ 🍰 ✨") - click.secho(str(report), err=True) - ctx.exit(report.return_code) - - -def path_empty(src: Tuple[str], quiet: bool, verbose: bool, ctx: click.Context) -> None: - """ - Exit if there is no `src` provided for formatting - """ - if not src: - if verbose or not quiet: - out("No Path provided. Nothing to do 😴") - ctx.exit(0) - - -def reformat_one( - src: Path, fast: bool, write_back: WriteBack, mode: FileMode, report: "Report" -) -> None: - """Reformat a single file under `src` without spawning child processes. - - `fast`, `write_back`, and `mode` options are passed to - :func:`format_file_in_place` or :func:`format_stdin_to_stdout`. - """ - try: - changed = Changed.NO - if not src.is_file() and str(src) == "-": - if format_stdin_to_stdout(fast=fast, write_back=write_back, mode=mode): - changed = Changed.YES - else: - cache: Cache = {} - if write_back != WriteBack.DIFF: - cache = read_cache(mode) - res_src = src.resolve() - if res_src in cache and cache[res_src] == get_cache_info(res_src): - changed = Changed.CACHED - if changed is not Changed.CACHED and format_file_in_place( - src, fast=fast, write_back=write_back, mode=mode - ): - changed = Changed.YES - if (write_back is WriteBack.YES and changed is not Changed.CACHED) or ( - write_back is WriteBack.CHECK and changed is Changed.NO - ): - write_cache(cache, [src], mode) - report.done(src, changed) - except Exception as exc: - report.failed(src, str(exc)) - - -def reformat_many( - sources: Set[Path], - fast: bool, - write_back: WriteBack, - mode: FileMode, - report: "Report", -) -> None: - """Reformat multiple files using a ProcessPoolExecutor.""" - loop = asyncio.get_event_loop() - worker_count = os.cpu_count() - if sys.platform == "win32": - # Work around https://bugs.python.org/issue26903 - worker_count = min(worker_count, 61) - executor = ProcessPoolExecutor(max_workers=worker_count) - try: - loop.run_until_complete( - schedule_formatting( - sources=sources, - fast=fast, - write_back=write_back, - mode=mode, - report=report, - loop=loop, - executor=executor, - ) - ) - finally: - shutdown(loop) - executor.shutdown() - - -async def schedule_formatting( - sources: Set[Path], - fast: bool, - write_back: WriteBack, - mode: FileMode, - report: "Report", - loop: asyncio.AbstractEventLoop, - executor: Executor, -) -> None: - """Run formatting of `sources` in parallel using the provided `executor`. - - (Use ProcessPoolExecutors for actual parallelism.) - - `write_back`, `fast`, and `mode` options are passed to - :func:`format_file_in_place`. - """ - cache: Cache = {} - if write_back != WriteBack.DIFF: - cache = read_cache(mode) - sources, cached = filter_cached(cache, sources) - for src in sorted(cached): - report.done(src, Changed.CACHED) - if not sources: - return - - cancelled = [] - sources_to_cache = [] - lock = None - if write_back == WriteBack.DIFF: - # For diff output, we need locks to ensure we don't interleave output - # from different processes. - manager = Manager() - lock = manager.Lock() - tasks = { - asyncio.ensure_future( - loop.run_in_executor( - executor, format_file_in_place, src, fast, mode, write_back, lock - ) - ): src - for src in sorted(sources) - } - pending: Iterable[asyncio.Future] = tasks.keys() - try: - loop.add_signal_handler(signal.SIGINT, cancel, pending) - loop.add_signal_handler(signal.SIGTERM, cancel, pending) - except NotImplementedError: - # There are no good alternatives for these on Windows. - pass - while pending: - done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) - for task in done: - src = tasks.pop(task) - if task.cancelled(): - cancelled.append(task) - elif task.exception(): - report.failed(src, str(task.exception())) - else: - changed = Changed.YES if task.result() else Changed.NO - # If the file was written back or was successfully checked as - # well-formatted, store this information in the cache. - if write_back is WriteBack.YES or ( - write_back is WriteBack.CHECK and changed is Changed.NO - ): - sources_to_cache.append(src) - report.done(src, changed) - if cancelled: - await asyncio.gather(*cancelled, loop=loop, return_exceptions=True) - if sources_to_cache: - write_cache(cache, sources_to_cache, mode) - - -def format_file_in_place( - src: Path, - fast: bool, - mode: FileMode, - write_back: WriteBack = WriteBack.NO, - lock: Any = None, # multiprocessing.Manager().Lock() is some crazy proxy -) -> bool: - """Format file under `src` path. Return True if changed. - - If `write_back` is DIFF, write a diff to stdout. If it is YES, write reformatted - code to the file. - `mode` and `fast` options are passed to :func:`format_file_contents`. - """ - if src.suffix == ".pyi": - mode = evolve(mode, is_pyi=True) - - then = datetime.utcfromtimestamp(src.stat().st_mtime) - with open(src, "rb") as buf: - src_contents, encoding, newline = decode_bytes(buf.read()) - try: - dst_contents = format_file_contents(src_contents, fast=fast, mode=mode) - except NothingChanged: - return False - - if write_back == write_back.YES: - with open(src, "w", encoding=encoding, newline=newline) as f: - f.write(dst_contents) - elif write_back == write_back.DIFF: - now = datetime.utcnow() - src_name = f"{src}\t{then} +0000" - dst_name = f"{src}\t{now} +0000" - diff_contents = diff(src_contents, dst_contents, src_name, dst_name) - - with lock or nullcontext(): - f = io.TextIOWrapper( - sys.stdout.buffer, - encoding=encoding, - newline=newline, - write_through=True, - ) - f.write(diff_contents) - f.detach() - - return True - - -def format_stdin_to_stdout( - fast: bool, *, write_back: WriteBack = WriteBack.NO, mode: FileMode -) -> bool: - """Format file on stdin. Return True if changed. - - If `write_back` is YES, write reformatted code back to stdout. If it is DIFF, - write a diff to stdout. The `mode` argument is passed to - :func:`format_file_contents`. - """ - then = datetime.utcnow() - src, encoding, newline = decode_bytes(sys.stdin.buffer.read()) - dst = src - try: - dst = format_file_contents(src, fast=fast, mode=mode) - return True - - except NothingChanged: - return False - - finally: - f = io.TextIOWrapper( - sys.stdout.buffer, encoding=encoding, newline=newline, write_through=True - ) - if write_back == WriteBack.YES: - f.write(dst) - elif write_back == WriteBack.DIFF: - now = datetime.utcnow() - src_name = f"STDIN\t{then} +0000" - dst_name = f"STDOUT\t{now} +0000" - f.write(diff(src, dst, src_name, dst_name)) - f.detach() - - -def format_file_contents( - src_contents: str, *, fast: bool, mode: FileMode -) -> FileContent: - """Reformat contents a file and return new contents. - - If `fast` is False, additionally confirm that the reformatted code is - valid by calling :func:`assert_equivalent` and :func:`assert_stable` on it. - `mode` is passed to :func:`format_str`. - """ - if src_contents.strip() == "": - raise NothingChanged - - dst_contents = format_str(src_contents, mode=mode) - if src_contents == dst_contents: - raise NothingChanged - - if not fast: - assert_equivalent(src_contents, dst_contents) - assert_stable(src_contents, dst_contents, mode=mode) - return dst_contents - - -def format_str(src_contents: str, *, mode: FileMode) -> FileContent: - """Reformat a string and return new contents. - - `mode` determines formatting options, such as how many characters per line are - allowed. - """ - src_node = lib2to3_parse(src_contents.lstrip(), mode.target_versions) - dst_contents = [] - future_imports = get_future_imports(src_node) - if mode.target_versions: - versions = mode.target_versions - else: - versions = detect_target_versions(src_node) - normalize_fmt_off(src_node) - lines = LineGenerator( - remove_u_prefix="unicode_literals" in future_imports - or supports_feature(versions, Feature.UNICODE_LITERALS), - is_pyi=mode.is_pyi, - normalize_strings=mode.string_normalization, - ) - elt = EmptyLineTracker(is_pyi=mode.is_pyi) - empty_line = Line() - after = 0 - split_line_features = { - feature - for feature in {Feature.TRAILING_COMMA_IN_CALL, Feature.TRAILING_COMMA_IN_DEF} - if supports_feature(versions, feature) - } - for current_line in lines.visit(src_node): - for _ in range(after): - dst_contents.append(str(empty_line)) - before, after = elt.maybe_empty_lines(current_line) - for _ in range(before): - dst_contents.append(str(empty_line)) - for line in split_line( - current_line, line_length=mode.line_length, features=split_line_features - ): - dst_contents.append(str(line)) - return "".join(dst_contents) - - -def decode_bytes(src: bytes) -> Tuple[FileContent, Encoding, NewLine]: - """Return a tuple of (decoded_contents, encoding, newline). - - `newline` is either CRLF or LF but `decoded_contents` is decoded with - universal newlines (i.e. only contains LF). - """ - srcbuf = io.BytesIO(src) - encoding, lines = tokenize.detect_encoding(srcbuf.readline) - if not lines: - return "", encoding, "\n" - - newline = "\r\n" if b"\r\n" == lines[0][-2:] else "\n" - srcbuf.seek(0) - with io.TextIOWrapper(srcbuf, encoding) as tiow: - return tiow.read(), encoding, newline - - -def get_grammars(target_versions: Set[TargetVersion]) -> List[Grammar]: - if not target_versions: - # No target_version specified, so try all grammars. - return [ - # Python 3.7+ - pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords, - # Python 3.0-3.6 - pygram.python_grammar_no_print_statement_no_exec_statement, - # Python 2.7 with future print_function import - pygram.python_grammar_no_print_statement, - # Python 2.7 - pygram.python_grammar, - ] - - if all(version.is_python2() for version in target_versions): - # Python 2-only code, so try Python 2 grammars. - return [ - # Python 2.7 with future print_function import - pygram.python_grammar_no_print_statement, - # Python 2.7 - pygram.python_grammar, - ] - - # Python 3-compatible code, so only try Python 3 grammar. - grammars = [] - # If we have to parse both, try to parse async as a keyword first - if not supports_feature(target_versions, Feature.ASYNC_IDENTIFIERS): - # Python 3.7+ - grammars.append( - pygram.python_grammar_no_print_statement_no_exec_statement_async_keywords - ) - if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS): - # Python 3.0-3.6 - grammars.append(pygram.python_grammar_no_print_statement_no_exec_statement) - # At least one of the above branches must have been taken, because every Python - # version has exactly one of the two 'ASYNC_*' flags - return grammars - - -def lib2to3_parse(src_txt: str, target_versions: Iterable[TargetVersion] = ()) -> Node: - """Given a string with source, return the lib2to3 Node.""" - if src_txt[-1:] != "\n": - src_txt += "\n" - - for grammar in get_grammars(set(target_versions)): - drv = driver.Driver(grammar, pytree.convert) - try: - result = drv.parse_string(src_txt, True) - break - - except ParseError as pe: - lineno, column = pe.context[1] - lines = src_txt.splitlines() - try: - faulty_line = lines[lineno - 1] - except IndexError: - faulty_line = "" - exc = InvalidInput(f"Cannot parse: {lineno}:{column}: {faulty_line}") - else: - raise exc from None - - if isinstance(result, Leaf): - result = Node(syms.file_input, [result]) - return result - - -def lib2to3_unparse(node: Node) -> str: - """Given a lib2to3 node, return its string representation.""" - code = str(node) - return code - - -T = TypeVar("T") - - -class Visitor(Generic[T]): - """Basic lib2to3 visitor that yields things of type `T` on `visit()`.""" - - def visit(self, node: LN) -> Iterator[T]: - """Main method to visit `node` and its children. - - It tries to find a `visit_*()` method for the given `node.type`, like - `visit_simple_stmt` for Node objects or `visit_INDENT` for Leaf objects. - If no dedicated `visit_*()` method is found, chooses `visit_default()` - instead. - - Then yields objects of type `T` from the selected visitor. - """ - if node.type < 256: - name = token.tok_name[node.type] - else: - name = type_repr(node.type) - yield from getattr(self, f"visit_{name}", self.visit_default)(node) - - def visit_default(self, node: LN) -> Iterator[T]: - """Default `visit_*()` implementation. Recurses to children of `node`.""" - if isinstance(node, Node): - for child in node.children: - yield from self.visit(child) - - -@dataclass -class DebugVisitor(Visitor[T]): - tree_depth: int = 0 - - def visit_default(self, node: LN) -> Iterator[T]: - indent = " " * (2 * self.tree_depth) - if isinstance(node, Node): - _type = type_repr(node.type) - out(f"{indent}{_type}", fg="yellow") - self.tree_depth += 1 - for child in node.children: - yield from self.visit(child) - - self.tree_depth -= 1 - out(f"{indent}/{_type}", fg="yellow", bold=False) - else: - _type = token.tok_name.get(node.type, str(node.type)) - out(f"{indent}{_type}", fg="blue", nl=False) - if node.prefix: - # We don't have to handle prefixes for `Node` objects since - # that delegates to the first child anyway. - out(f" {node.prefix!r}", fg="green", bold=False, nl=False) - out(f" {node.value!r}", fg="blue", bold=False) - - @classmethod - def show(cls, code: Union[str, Leaf, Node]) -> None: - """Pretty-print the lib2to3 AST of a given string of `code`. - - Convenience method for debugging. - """ - v: DebugVisitor[None] = DebugVisitor() - if isinstance(code, str): - code = lib2to3_parse(code) - list(v.visit(code)) - - -WHITESPACE = {token.DEDENT, token.INDENT, token.NEWLINE} -STATEMENT = { - syms.if_stmt, - syms.while_stmt, - syms.for_stmt, - syms.try_stmt, - syms.except_clause, - syms.with_stmt, - syms.funcdef, - syms.classdef, -} -STANDALONE_COMMENT = 153 -token.tok_name[STANDALONE_COMMENT] = "STANDALONE_COMMENT" -LOGIC_OPERATORS = {"and", "or"} -COMPARATORS = { - token.LESS, - token.GREATER, - token.EQEQUAL, - token.NOTEQUAL, - token.LESSEQUAL, - token.GREATEREQUAL, -} -MATH_OPERATORS = { - token.VBAR, - token.CIRCUMFLEX, - token.AMPER, - token.LEFTSHIFT, - token.RIGHTSHIFT, - token.PLUS, - token.MINUS, - token.STAR, - token.SLASH, - token.DOUBLESLASH, - token.PERCENT, - token.AT, - token.TILDE, - token.DOUBLESTAR, -} -STARS = {token.STAR, token.DOUBLESTAR} -VARARGS_SPECIALS = STARS | {token.SLASH} -VARARGS_PARENTS = { - syms.arglist, - syms.argument, # double star in arglist - syms.trailer, # single argument to call - syms.typedargslist, - syms.varargslist, # lambdas -} -UNPACKING_PARENTS = { - syms.atom, # single element of a list or set literal - syms.dictsetmaker, - syms.listmaker, - syms.testlist_gexp, - syms.testlist_star_expr, -} -TEST_DESCENDANTS = { - syms.test, - syms.lambdef, - syms.or_test, - syms.and_test, - syms.not_test, - syms.comparison, - syms.star_expr, - syms.expr, - syms.xor_expr, - syms.and_expr, - syms.shift_expr, - syms.arith_expr, - syms.trailer, - syms.term, - syms.power, -} -ASSIGNMENTS = { - "=", - "+=", - "-=", - "*=", - "@=", - "/=", - "%=", - "&=", - "|=", - "^=", - "<<=", - ">>=", - "**=", - "//=", -} -COMPREHENSION_PRIORITY = 20 -COMMA_PRIORITY = 18 -TERNARY_PRIORITY = 16 -LOGIC_PRIORITY = 14 -STRING_PRIORITY = 12 -COMPARATOR_PRIORITY = 10 -MATH_PRIORITIES = { - token.VBAR: 9, - token.CIRCUMFLEX: 8, - token.AMPER: 7, - token.LEFTSHIFT: 6, - token.RIGHTSHIFT: 6, - token.PLUS: 5, - token.MINUS: 5, - token.STAR: 4, - token.SLASH: 4, - token.DOUBLESLASH: 4, - token.PERCENT: 4, - token.AT: 4, - token.TILDE: 3, - token.DOUBLESTAR: 2, -} -DOT_PRIORITY = 1 - - -@dataclass -class BracketTracker: - """Keeps track of brackets on a line.""" - - depth: int = 0 - bracket_match: Dict[Tuple[Depth, NodeType], Leaf] = Factory(dict) - delimiters: Dict[LeafID, Priority] = Factory(dict) - previous: Optional[Leaf] = None - _for_loop_depths: List[int] = Factory(list) - _lambda_argument_depths: List[int] = Factory(list) - - def mark(self, leaf: Leaf) -> None: - """Mark `leaf` with bracket-related metadata. Keep track of delimiters. - - All leaves receive an int `bracket_depth` field that stores how deep - within brackets a given leaf is. 0 means there are no enclosing brackets - that started on this line. - - If a leaf is itself a closing bracket, it receives an `opening_bracket` - field that it forms a pair with. This is a one-directional link to - avoid reference cycles. - - If a leaf is a delimiter (a token on which Black can split the line if - needed) and it's on depth 0, its `id()` is stored in the tracker's - `delimiters` field. - """ - if leaf.type == token.COMMENT: - return - - self.maybe_decrement_after_for_loop_variable(leaf) - self.maybe_decrement_after_lambda_arguments(leaf) - if leaf.type in CLOSING_BRACKETS: - self.depth -= 1 - opening_bracket = self.bracket_match.pop((self.depth, leaf.type)) - leaf.opening_bracket = opening_bracket - leaf.bracket_depth = self.depth - if self.depth == 0: - delim = is_split_before_delimiter(leaf, self.previous) - if delim and self.previous is not None: - self.delimiters[id(self.previous)] = delim - else: - delim = is_split_after_delimiter(leaf, self.previous) - if delim: - self.delimiters[id(leaf)] = delim - if leaf.type in OPENING_BRACKETS: - self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf - self.depth += 1 - self.previous = leaf - self.maybe_increment_lambda_arguments(leaf) - self.maybe_increment_for_loop_variable(leaf) - - def any_open_brackets(self) -> bool: - """Return True if there is an yet unmatched open bracket on the line.""" - return bool(self.bracket_match) - - def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority: - """Return the highest priority of a delimiter found on the line. - - Values are consistent with what `is_split_*_delimiter()` return. - Raises ValueError on no delimiters. - """ - return max(v for k, v in self.delimiters.items() if k not in exclude) - - def delimiter_count_with_priority(self, priority: Priority = 0) -> int: - """Return the number of delimiters with the given `priority`. - - If no `priority` is passed, defaults to max priority on the line. - """ - if not self.delimiters: - return 0 - - priority = priority or self.max_delimiter_priority() - return sum(1 for p in self.delimiters.values() if p == priority) - - def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool: - """In a for loop, or comprehension, the variables are often unpacks. - - To avoid splitting on the comma in this situation, increase the depth of - tokens between `for` and `in`. - """ - if leaf.type == token.NAME and leaf.value == "for": - self.depth += 1 - self._for_loop_depths.append(self.depth) - return True - - return False - - def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool: - """See `maybe_increment_for_loop_variable` above for explanation.""" - if ( - self._for_loop_depths - and self._for_loop_depths[-1] == self.depth - and leaf.type == token.NAME - and leaf.value == "in" - ): - self.depth -= 1 - self._for_loop_depths.pop() - return True - - return False - - def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool: - """In a lambda expression, there might be more than one argument. - - To avoid splitting on the comma in this situation, increase the depth of - tokens between `lambda` and `:`. - """ - if leaf.type == token.NAME and leaf.value == "lambda": - self.depth += 1 - self._lambda_argument_depths.append(self.depth) - return True - - return False - - def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool: - """See `maybe_increment_lambda_arguments` above for explanation.""" - if ( - self._lambda_argument_depths - and self._lambda_argument_depths[-1] == self.depth - and leaf.type == token.COLON - ): - self.depth -= 1 - self._lambda_argument_depths.pop() - return True - - return False - - def get_open_lsqb(self) -> Optional[Leaf]: - """Return the most recent opening square bracket (if any).""" - return self.bracket_match.get((self.depth - 1, token.RSQB)) - - -@dataclass -class Line: - """Holds leaves and comments. Can be printed with `str(line)`.""" - - depth: int = 0 - leaves: List[Leaf] = Factory(list) - comments: Dict[LeafID, List[Leaf]] = Factory(dict) # keys ordered like `leaves` - bracket_tracker: BracketTracker = Factory(BracketTracker) - inside_brackets: bool = False - should_explode: bool = False - - def append(self, leaf: Leaf, preformatted: bool = False) -> None: - """Add a new `leaf` to the end of the line. - - Unless `preformatted` is True, the `leaf` will receive a new consistent - whitespace prefix and metadata applied by :class:`BracketTracker`. - Trailing commas are maybe removed, unpacked for loop variables are - demoted from being delimiters. - - Inline comments are put aside. - """ - has_value = leaf.type in BRACKETS or bool(leaf.value.strip()) - if not has_value: - return - - if token.COLON == leaf.type and self.is_class_paren_empty: - del self.leaves[-2:] - if self.leaves and not preformatted: - # Note: at this point leaf.prefix should be empty except for - # imports, for which we only preserve newlines. - leaf.prefix += whitespace( - leaf, complex_subscript=self.is_complex_subscript(leaf) - ) - if self.inside_brackets or not preformatted: - self.bracket_tracker.mark(leaf) - self.maybe_remove_trailing_comma(leaf) - if not self.append_comment(leaf): - self.leaves.append(leaf) - - def append_safe(self, leaf: Leaf, preformatted: bool = False) -> None: - """Like :func:`append()` but disallow invalid standalone comment structure. - - Raises ValueError when any `leaf` is appended after a standalone comment - or when a standalone comment is not the first leaf on the line. - """ - if self.bracket_tracker.depth == 0: - if self.is_comment: - raise ValueError("cannot append to standalone comments") - - if self.leaves and leaf.type == STANDALONE_COMMENT: - raise ValueError( - "cannot append standalone comments to a populated line" - ) - - self.append(leaf, preformatted=preformatted) - - @property - def is_comment(self) -> bool: - """Is this line a standalone comment?""" - return len(self.leaves) == 1 and self.leaves[0].type == STANDALONE_COMMENT - - @property - def is_decorator(self) -> bool: - """Is this line a decorator?""" - return bool(self) and self.leaves[0].type == token.AT - - @property - def is_import(self) -> bool: - """Is this an import line?""" - return bool(self) and is_import(self.leaves[0]) - - @property - def is_class(self) -> bool: - """Is this line a class definition?""" - return ( - bool(self) - and self.leaves[0].type == token.NAME - and self.leaves[0].value == "class" - ) - - @property - def is_stub_class(self) -> bool: - """Is this line a class definition with a body consisting only of "..."?""" - return self.is_class and self.leaves[-3:] == [ - Leaf(token.DOT, ".") for _ in range(3) - ] - - @property - def is_collection_with_optional_trailing_comma(self) -> bool: - """Is this line a collection literal with a trailing comma that's optional? - - Note that the trailing comma in a 1-tuple is not optional. - """ - if not self.leaves or len(self.leaves) < 4: - return False - - # Look for and address a trailing colon. - if self.leaves[-1].type == token.COLON: - closer = self.leaves[-2] - close_index = -2 - else: - closer = self.leaves[-1] - close_index = -1 - if closer.type not in CLOSING_BRACKETS or self.inside_brackets: - return False - - if closer.type == token.RPAR: - # Tuples require an extra check, because if there's only - # one element in the tuple removing the comma unmakes the - # tuple. - # - # We also check for parens before looking for the trailing - # comma because in some cases (eg assigning a dict - # literal) the literal gets wrapped in temporary parens - # during parsing. This case is covered by the - # collections.py test data. - opener = closer.opening_bracket - for _open_index, leaf in enumerate(self.leaves): - if leaf is opener: - break - - else: - # Couldn't find the matching opening paren, play it safe. - return False - - commas = 0 - comma_depth = self.leaves[close_index - 1].bracket_depth - for leaf in self.leaves[_open_index + 1 : close_index]: - if leaf.bracket_depth == comma_depth and leaf.type == token.COMMA: - commas += 1 - if commas > 1: - # We haven't looked yet for the trailing comma because - # we might also have caught noop parens. - return self.leaves[close_index - 1].type == token.COMMA - - elif commas == 1: - return False # it's either a one-tuple or didn't have a trailing comma - - if self.leaves[close_index - 1].type in CLOSING_BRACKETS: - close_index -= 1 - closer = self.leaves[close_index] - if closer.type == token.RPAR: - # TODO: this is a gut feeling. Will we ever see this? - return False - - if self.leaves[close_index - 1].type != token.COMMA: - return False - - return True - - @property - def is_def(self) -> bool: - """Is this a function definition? (Also returns True for async defs.)""" - try: - first_leaf = self.leaves[0] - except IndexError: - return False - - try: - second_leaf: Optional[Leaf] = self.leaves[1] - except IndexError: - second_leaf = None - return (first_leaf.type == token.NAME and first_leaf.value == "def") or ( - first_leaf.type == token.ASYNC - and second_leaf is not None - and second_leaf.type == token.NAME - and second_leaf.value == "def" - ) - - @property - def is_class_paren_empty(self) -> bool: - """Is this a class with no base classes but using parentheses? - - Those are unnecessary and should be removed. - """ - return ( - bool(self) - and len(self.leaves) == 4 - and self.is_class - and self.leaves[2].type == token.LPAR - and self.leaves[2].value == "(" - and self.leaves[3].type == token.RPAR - and self.leaves[3].value == ")" - ) - - @property - def is_triple_quoted_string(self) -> bool: - """Is the line a triple quoted string?""" - return ( - bool(self) - and self.leaves[0].type == token.STRING - and self.leaves[0].value.startswith(('"""', "'''")) - ) - - def contains_standalone_comments(self, depth_limit: int = sys.maxsize) -> bool: - """If so, needs to be split before emitting.""" - for leaf in self.leaves: - if leaf.type == STANDALONE_COMMENT and leaf.bracket_depth <= depth_limit: - return True - - return False - - def contains_uncollapsable_type_comments(self) -> bool: - ignored_ids = set() - try: - last_leaf = self.leaves[-1] - ignored_ids.add(id(last_leaf)) - if last_leaf.type == token.COMMA or ( - last_leaf.type == token.RPAR and not last_leaf.value - ): - # When trailing commas or optional parens are inserted by Black for - # consistency, comments after the previous last element are not moved - # (they don't have to, rendering will still be correct). So we ignore - # trailing commas and invisible. - last_leaf = self.leaves[-2] - ignored_ids.add(id(last_leaf)) - except IndexError: - return False - - # A type comment is uncollapsable if it is attached to a leaf - # that isn't at the end of the line (since that could cause it - # to get associated to a different argument) or if there are - # comments before it (since that could cause it to get hidden - # behind a comment. - comment_seen = False - for leaf_id, comments in self.comments.items(): - for comment in comments: - if is_type_comment(comment): - if leaf_id not in ignored_ids or comment_seen: - return True - - comment_seen = True - - return False - - def contains_unsplittable_type_ignore(self) -> bool: - if not self.leaves: - return False - - # If a 'type: ignore' is attached to the end of a line, we - # can't split the line, because we can't know which of the - # subexpressions the ignore was meant to apply to. - # - # We only want this to apply to actual physical lines from the - # original source, though: we don't want the presence of a - # 'type: ignore' at the end of a multiline expression to - # justify pushing it all onto one line. Thus we - # (unfortunately) need to check the actual source lines and - # only report an unsplittable 'type: ignore' if this line was - # one line in the original code. - - # Grab the first and last line numbers, skipping generated leaves - first_line = next((l.lineno for l in self.leaves if l.lineno != 0), 0) - last_line = next((l.lineno for l in reversed(self.leaves) if l.lineno != 0), 0) - - if first_line == last_line: - # We look at the last two leaves since a comma or an - # invisible paren could have been added at the end of the - # line. - for node in self.leaves[-2:]: - for comment in self.comments.get(id(node), []): - if is_type_comment(comment, " ignore"): - return True - - return False - - def contains_multiline_strings(self) -> bool: - for leaf in self.leaves: - if is_multiline_string(leaf): - return True - - return False - - def maybe_remove_trailing_comma(self, closing: Leaf) -> bool: - """Remove trailing comma if there is one and it's safe.""" - if not (self.leaves and self.leaves[-1].type == token.COMMA): - return False - - # We remove trailing commas only in the case of importing a - # single name from a module. - if not ( - self.leaves - and self.is_import - and len(self.leaves) > 4 - and self.leaves[-1].type == token.COMMA - and closing.type in CLOSING_BRACKETS - and self.leaves[-4].type == token.NAME - and ( - # regular `from foo import bar,` - self.leaves[-4].value == "import" - # `from foo import (bar as baz,) - or ( - len(self.leaves) > 6 - and self.leaves[-6].value == "import" - and self.leaves[-3].value == "as" - ) - # `from foo import bar as baz,` - or ( - len(self.leaves) > 5 - and self.leaves[-5].value == "import" - and self.leaves[-3].value == "as" - ) - ) - and closing.type == token.RPAR - ): - return False - - self.remove_trailing_comma() - return True - - def append_comment(self, comment: Leaf) -> bool: - """Add an inline or standalone comment to the line.""" - if ( - comment.type == STANDALONE_COMMENT - and self.bracket_tracker.any_open_brackets() - ): - comment.prefix = "" - return False - - if comment.type != token.COMMENT: - return False - - if not self.leaves: - comment.type = STANDALONE_COMMENT - comment.prefix = "" - return False - - last_leaf = self.leaves[-1] - if ( - last_leaf.type == token.RPAR - and not last_leaf.value - and last_leaf.parent - and len(list(last_leaf.parent.leaves())) <= 3 - and not is_type_comment(comment) - ): - # Comments on an optional parens wrapping a single leaf should belong to - # the wrapped node except if it's a type comment. Pinning the comment like - # this avoids unstable formatting caused by comment migration. - if len(self.leaves) < 2: - comment.type = STANDALONE_COMMENT - comment.prefix = "" - return False - - last_leaf = self.leaves[-2] - self.comments.setdefault(id(last_leaf), []).append(comment) - return True - - def comments_after(self, leaf: Leaf) -> List[Leaf]: - """Generate comments that should appear directly after `leaf`.""" - return self.comments.get(id(leaf), []) - - def remove_trailing_comma(self) -> None: - """Remove the trailing comma and moves the comments attached to it.""" - trailing_comma = self.leaves.pop() - trailing_comma_comments = self.comments.pop(id(trailing_comma), []) - self.comments.setdefault(id(self.leaves[-1]), []).extend( - trailing_comma_comments - ) - - def is_complex_subscript(self, leaf: Leaf) -> bool: - """Return True iff `leaf` is part of a slice with non-trivial exprs.""" - open_lsqb = self.bracket_tracker.get_open_lsqb() - if open_lsqb is None: - return False - - subscript_start = open_lsqb.next_sibling - - if isinstance(subscript_start, Node): - if subscript_start.type == syms.listmaker: - return False - - if subscript_start.type == syms.subscriptlist: - subscript_start = child_towards(subscript_start, leaf) - return subscript_start is not None and any( - n.type in TEST_DESCENDANTS for n in subscript_start.pre_order() - ) - - def __str__(self) -> str: - """Render the line.""" - if not self: - return "\n" - - indent = " " * self.depth - leaves = iter(self.leaves) - first = next(leaves) - res = f"{first.prefix}{indent}{first.value}" - for leaf in leaves: - res += str(leaf) - for comment in itertools.chain.from_iterable(self.comments.values()): - res += str(comment) - return res + "\n" - - def __bool__(self) -> bool: - """Return True if the line has leaves or comments.""" - return bool(self.leaves or self.comments) - - -@dataclass -class EmptyLineTracker: - """Provides a stateful method that returns the number of potential extra - empty lines needed before and after the currently processed line. - - Note: this tracker works on lines that haven't been split yet. It assumes - the prefix of the first leaf consists of optional newlines. Those newlines - are consumed by `maybe_empty_lines()` and included in the computation. - """ - - is_pyi: bool = False - previous_line: Optional[Line] = None - previous_after: int = 0 - previous_defs: List[int] = Factory(list) - - def maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: - """Return the number of extra empty lines before and after the `current_line`. - - This is for separating `def`, `async def` and `class` with extra empty - lines (two on module-level). - """ - before, after = self._maybe_empty_lines(current_line) - before = ( - # Black should not insert empty lines at the beginning - # of the file - 0 - if self.previous_line is None - else before - self.previous_after - ) - self.previous_after = after - self.previous_line = current_line - return before, after - - def _maybe_empty_lines(self, current_line: Line) -> Tuple[int, int]: - max_allowed = 1 - if current_line.depth == 0: - max_allowed = 1 if self.is_pyi else 2 - if current_line.leaves: - # Consume the first leaf's extra newlines. - first_leaf = current_line.leaves[0] - before = first_leaf.prefix.count("\n") - before = min(before, max_allowed) - first_leaf.prefix = "" - else: - before = 0 - depth = current_line.depth - while self.previous_defs and self.previous_defs[-1] >= depth: - self.previous_defs.pop() - if self.is_pyi: - before = 0 if depth else 1 - else: - before = 1 if depth else 2 - if current_line.is_decorator or current_line.is_def or current_line.is_class: - return self._maybe_empty_lines_for_class_or_def(current_line, before) - - if ( - self.previous_line - and self.previous_line.is_import - and not current_line.is_import - and depth == self.previous_line.depth - ): - return (before or 1), 0 - - if ( - self.previous_line - and self.previous_line.is_class - and current_line.is_triple_quoted_string - ): - return before, 1 - - return before, 0 - - def _maybe_empty_lines_for_class_or_def( - self, current_line: Line, before: int - ) -> Tuple[int, int]: - if not current_line.is_decorator: - self.previous_defs.append(current_line.depth) - if self.previous_line is None: - # Don't insert empty lines before the first line in the file. - return 0, 0 - - if self.previous_line.is_decorator: - return 0, 0 - - if self.previous_line.depth < current_line.depth and ( - self.previous_line.is_class or self.previous_line.is_def - ): - return 0, 0 - - if ( - self.previous_line.is_comment - and self.previous_line.depth == current_line.depth - and before == 0 - ): - return 0, 0 - - if self.is_pyi: - if self.previous_line.depth > current_line.depth: - newlines = 1 - elif current_line.is_class or self.previous_line.is_class: - if current_line.is_stub_class and self.previous_line.is_stub_class: - # No blank line between classes with an empty body - newlines = 0 - else: - newlines = 1 - elif current_line.is_def and not self.previous_line.is_def: - # Blank line between a block of functions and a block of non-functions - newlines = 1 - else: - newlines = 0 - else: - newlines = 2 - if current_line.depth and newlines: - newlines -= 1 - return newlines, 0 - - -@dataclass -class LineGenerator(Visitor[Line]): - """Generates reformatted Line objects. Empty lines are not emitted. - - Note: destroys the tree it's visiting by mutating prefixes of its leaves - in ways that will no longer stringify to valid Python code on the tree. - """ - - is_pyi: bool = False - normalize_strings: bool = True - current_line: Line = Factory(Line) - remove_u_prefix: bool = False - - def line(self, indent: int = 0) -> Iterator[Line]: - """Generate a line. - - If the line is empty, only emit if it makes sense. - If the line is too long, split it first and then generate. - - If any lines were generated, set up a new current_line. - """ - if not self.current_line: - self.current_line.depth += indent - return # Line is empty, don't emit. Creating a new one unnecessary. - - complete_line = self.current_line - self.current_line = Line(depth=complete_line.depth + indent) - yield complete_line - - def visit_default(self, node: LN) -> Iterator[Line]: - """Default `visit_*()` implementation. Recurses to children of `node`.""" - if isinstance(node, Leaf): - any_open_brackets = self.current_line.bracket_tracker.any_open_brackets() - for comment in generate_comments(node): - if any_open_brackets: - # any comment within brackets is subject to splitting - self.current_line.append(comment) - elif comment.type == token.COMMENT: - # regular trailing comment - self.current_line.append(comment) - yield from self.line() - - else: - # regular standalone comment - yield from self.line() - - self.current_line.append(comment) - yield from self.line() - - normalize_prefix(node, inside_brackets=any_open_brackets) - if self.normalize_strings and node.type == token.STRING: - normalize_string_prefix(node, remove_u_prefix=self.remove_u_prefix) - normalize_string_quotes(node) - if node.type == token.NUMBER: - normalize_numeric_literal(node) - if node.type not in WHITESPACE: - self.current_line.append(node) - yield from super().visit_default(node) - - def visit_INDENT(self, node: Node) -> Iterator[Line]: - """Increase indentation level, maybe yield a line.""" - # In blib2to3 INDENT never holds comments. - yield from self.line(+1) - yield from self.visit_default(node) - - def visit_DEDENT(self, node: Node) -> Iterator[Line]: - """Decrease indentation level, maybe yield a line.""" - # The current line might still wait for trailing comments. At DEDENT time - # there won't be any (they would be prefixes on the preceding NEWLINE). - # Emit the line then. - yield from self.line() - - # While DEDENT has no value, its prefix may contain standalone comments - # that belong to the current indentation level. Get 'em. - yield from self.visit_default(node) - - # Finally, emit the dedent. - yield from self.line(-1) - - def visit_stmt( - self, node: Node, keywords: Set[str], parens: Set[str] - ) -> Iterator[Line]: - """Visit a statement. - - This implementation is shared for `if`, `while`, `for`, `try`, `except`, - `def`, `with`, `class`, `assert` and assignments. - - The relevant Python language `keywords` for a given statement will be - NAME leaves within it. This methods puts those on a separate line. - - `parens` holds a set of string leaf values immediately after which - invisible parens should be put. - """ - normalize_invisible_parens(node, parens_after=parens) - for child in node.children: - if child.type == token.NAME and child.value in keywords: # type: ignore - yield from self.line() - - yield from self.visit(child) - - def visit_suite(self, node: Node) -> Iterator[Line]: - """Visit a suite.""" - if self.is_pyi and is_stub_suite(node): - yield from self.visit(node.children[2]) - else: - yield from self.visit_default(node) - - def visit_simple_stmt(self, node: Node) -> Iterator[Line]: - """Visit a statement without nested statements.""" - is_suite_like = node.parent and node.parent.type in STATEMENT - if is_suite_like: - if self.is_pyi and is_stub_body(node): - yield from self.visit_default(node) - else: - yield from self.line(+1) - yield from self.visit_default(node) - yield from self.line(-1) - - else: - if not self.is_pyi or not node.parent or not is_stub_suite(node.parent): - yield from self.line() - yield from self.visit_default(node) - - def visit_async_stmt(self, node: Node) -> Iterator[Line]: - """Visit `async def`, `async for`, `async with`.""" - yield from self.line() - - children = iter(node.children) - for child in children: - yield from self.visit(child) - - if child.type == token.ASYNC: - break - - internal_stmt = next(children) - for child in internal_stmt.children: - yield from self.visit(child) - - def visit_decorators(self, node: Node) -> Iterator[Line]: - """Visit decorators.""" - for child in node.children: - yield from self.line() - yield from self.visit(child) - - def visit_SEMI(self, leaf: Leaf) -> Iterator[Line]: - """Remove a semicolon and put the other statement on a separate line.""" - yield from self.line() - - def visit_ENDMARKER(self, leaf: Leaf) -> Iterator[Line]: - """End of file. Process outstanding comments and end with a newline.""" - yield from self.visit_default(leaf) - yield from self.line() - - def visit_STANDALONE_COMMENT(self, leaf: Leaf) -> Iterator[Line]: - if not self.current_line.bracket_tracker.any_open_brackets(): - yield from self.line() - yield from self.visit_default(leaf) - - def visit_factor(self, node: Node) -> Iterator[Line]: - """Force parentheses between a unary op and a binary power: - - -2 ** 8 -> -(2 ** 8) - """ - _operator, operand = node.children - if ( - operand.type == syms.power - and len(operand.children) == 3 - and operand.children[1].type == token.DOUBLESTAR - ): - lpar = Leaf(token.LPAR, "(") - rpar = Leaf(token.RPAR, ")") - index = operand.remove() or 0 - node.insert_child(index, Node(syms.atom, [lpar, operand, rpar])) - yield from self.visit_default(node) - - def __attrs_post_init__(self) -> None: - """You are in a twisty little maze of passages.""" - v = self.visit_stmt - Ø: Set[str] = set() - self.visit_assert_stmt = partial(v, keywords={"assert"}, parens={"assert", ","}) - self.visit_if_stmt = partial( - v, keywords={"if", "else", "elif"}, parens={"if", "elif"} - ) - self.visit_while_stmt = partial(v, keywords={"while", "else"}, parens={"while"}) - self.visit_for_stmt = partial(v, keywords={"for", "else"}, parens={"for", "in"}) - self.visit_try_stmt = partial( - v, keywords={"try", "except", "else", "finally"}, parens=Ø - ) - self.visit_except_clause = partial(v, keywords={"except"}, parens=Ø) - self.visit_with_stmt = partial(v, keywords={"with"}, parens=Ø) - self.visit_funcdef = partial(v, keywords={"def"}, parens=Ø) - self.visit_classdef = partial(v, keywords={"class"}, parens=Ø) - self.visit_expr_stmt = partial(v, keywords=Ø, parens=ASSIGNMENTS) - self.visit_return_stmt = partial(v, keywords={"return"}, parens={"return"}) - self.visit_import_from = partial(v, keywords=Ø, parens={"import"}) - self.visit_del_stmt = partial(v, keywords=Ø, parens={"del"}) - self.visit_async_funcdef = self.visit_async_stmt - self.visit_decorated = self.visit_decorators - - -IMPLICIT_TUPLE = {syms.testlist, syms.testlist_star_expr, syms.exprlist} -BRACKET = {token.LPAR: token.RPAR, token.LSQB: token.RSQB, token.LBRACE: token.RBRACE} -OPENING_BRACKETS = set(BRACKET.keys()) -CLOSING_BRACKETS = set(BRACKET.values()) -BRACKETS = OPENING_BRACKETS | CLOSING_BRACKETS -ALWAYS_NO_SPACE = CLOSING_BRACKETS | {token.COMMA, STANDALONE_COMMENT} - - -def whitespace(leaf: Leaf, *, complex_subscript: bool) -> str: # noqa: C901 - """Return whitespace prefix if needed for the given `leaf`. - - `complex_subscript` signals whether the given leaf is part of a subscription - which has non-trivial arguments, like arithmetic expressions or function calls. - """ - NO = "" - SPACE = " " - DOUBLESPACE = " " - t = leaf.type - p = leaf.parent - v = leaf.value - if t in ALWAYS_NO_SPACE: - return NO - - if t == token.COMMENT: - return DOUBLESPACE - - assert p is not None, f"INTERNAL ERROR: hand-made leaf without parent: {leaf!r}" - if t == token.COLON and p.type not in { - syms.subscript, - syms.subscriptlist, - syms.sliceop, - }: - return NO - - prev = leaf.prev_sibling - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type in OPENING_BRACKETS: - return NO - - if t == token.COLON: - if prevp.type == token.COLON: - return NO - - elif prevp.type != token.COMMA and not complex_subscript: - return NO - - return SPACE - - if prevp.type == token.EQUAL: - if prevp.parent: - if prevp.parent.type in { - syms.arglist, - syms.argument, - syms.parameters, - syms.varargslist, - }: - return NO - - elif prevp.parent.type == syms.typedargslist: - # A bit hacky: if the equal sign has whitespace, it means we - # previously found it's a typed argument. So, we're using - # that, too. - return prevp.prefix - - elif prevp.type in VARARGS_SPECIALS: - if is_vararg(prevp, within=VARARGS_PARENTS | UNPACKING_PARENTS): - return NO - - elif prevp.type == token.COLON: - if prevp.parent and prevp.parent.type in {syms.subscript, syms.sliceop}: - return SPACE if complex_subscript else NO - - elif ( - prevp.parent - and prevp.parent.type == syms.factor - and prevp.type in MATH_OPERATORS - ): - return NO - - elif ( - prevp.type == token.RIGHTSHIFT - and prevp.parent - and prevp.parent.type == syms.shift_expr - and prevp.prev_sibling - and prevp.prev_sibling.type == token.NAME - and prevp.prev_sibling.value == "print" # type: ignore - ): - # Python 2 print chevron - return NO - - elif prev.type in OPENING_BRACKETS: - return NO - - if p.type in {syms.parameters, syms.arglist}: - # untyped function signatures or calls - if not prev or prev.type != token.COMMA: - return NO - - elif p.type == syms.varargslist: - # lambdas - if prev and prev.type != token.COMMA: - return NO - - elif p.type == syms.typedargslist: - # typed function signatures - if not prev: - return NO - - if t == token.EQUAL: - if prev.type != syms.tname: - return NO - - elif prev.type == token.EQUAL: - # A bit hacky: if the equal sign has whitespace, it means we - # previously found it's a typed argument. So, we're using that, too. - return prev.prefix - - elif prev.type != token.COMMA: - return NO - - elif p.type == syms.tname: - # type names - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type != token.COMMA: - return NO - - elif p.type == syms.trailer: - # attributes and calls - if t == token.LPAR or t == token.RPAR: - return NO - - if not prev: - if t == token.DOT: - prevp = preceding_leaf(p) - if not prevp or prevp.type != token.NUMBER: - return NO - - elif t == token.LSQB: - return NO - - elif prev.type != token.COMMA: - return NO - - elif p.type == syms.argument: - # single argument - if t == token.EQUAL: - return NO - - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type == token.LPAR: - return NO - - elif prev.type in {token.EQUAL} | VARARGS_SPECIALS: - return NO - - elif p.type == syms.decorator: - # decorators - return NO - - elif p.type == syms.dotted_name: - if prev: - return NO - - prevp = preceding_leaf(p) - if not prevp or prevp.type == token.AT or prevp.type == token.DOT: - return NO - - elif p.type == syms.classdef: - if t == token.LPAR: - return NO - - if prev and prev.type == token.LPAR: - return NO - - elif p.type in {syms.subscript, syms.sliceop}: - # indexing - if not prev: - assert p.parent is not None, "subscripts are always parented" - if p.parent.type == syms.subscriptlist: - return SPACE - - return NO - - elif not complex_subscript: - return NO - - elif p.type == syms.atom: - if prev and t == token.DOT: - # dots, but not the first one. - return NO - - elif p.type == syms.dictsetmaker: - # dict unpacking - if prev and prev.type == token.DOUBLESTAR: - return NO - - elif p.type in {syms.factor, syms.star_expr}: - # unary ops - if not prev: - prevp = preceding_leaf(p) - if not prevp or prevp.type in OPENING_BRACKETS: - return NO - - prevp_parent = prevp.parent - assert prevp_parent is not None - if prevp.type == token.COLON and prevp_parent.type in { - syms.subscript, - syms.sliceop, - }: - return NO - - elif prevp.type == token.EQUAL and prevp_parent.type == syms.argument: - return NO - - elif t in {token.NAME, token.NUMBER, token.STRING}: - return NO - - elif p.type == syms.import_from: - if t == token.DOT: - if prev and prev.type == token.DOT: - return NO - - elif t == token.NAME: - if v == "import": - return SPACE - - if prev and prev.type == token.DOT: - return NO - - elif p.type == syms.sliceop: - return NO - - return SPACE - - -def preceding_leaf(node: Optional[LN]) -> Optional[Leaf]: - """Return the first leaf that precedes `node`, if any.""" - while node: - res = node.prev_sibling - if res: - if isinstance(res, Leaf): - return res - - try: - return list(res.leaves())[-1] - - except IndexError: - return None - - node = node.parent - return None - - -def child_towards(ancestor: Node, descendant: LN) -> Optional[LN]: - """Return the child of `ancestor` that contains `descendant`.""" - node: Optional[LN] = descendant - while node and node.parent != ancestor: - node = node.parent - return node - - -def container_of(leaf: Leaf) -> LN: - """Return `leaf` or one of its ancestors that is the topmost container of it. - - By "container" we mean a node where `leaf` is the very first child. - """ - same_prefix = leaf.prefix - container: LN = leaf - while container: - parent = container.parent - if parent is None: - break - - if parent.children[0].prefix != same_prefix: - break - - if parent.type == syms.file_input: - break - - if parent.prev_sibling is not None and parent.prev_sibling.type in BRACKETS: - break - - container = parent - return container - - -def is_split_after_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: - """Return the priority of the `leaf` delimiter, given a line break after it. - - The delimiter priorities returned here are from those delimiters that would - cause a line break after themselves. - - Higher numbers are higher priority. - """ - if leaf.type == token.COMMA: - return COMMA_PRIORITY - - return 0 - - -def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority: - """Return the priority of the `leaf` delimiter, given a line break before it. - - The delimiter priorities returned here are from those delimiters that would - cause a line break before themselves. - - Higher numbers are higher priority. - """ - if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS): - # * and ** might also be MATH_OPERATORS but in this case they are not. - # Don't treat them as a delimiter. - return 0 - - if ( - leaf.type == token.DOT - and leaf.parent - and leaf.parent.type not in {syms.import_from, syms.dotted_name} - and (previous is None or previous.type in CLOSING_BRACKETS) - ): - return DOT_PRIORITY - - if ( - leaf.type in MATH_OPERATORS - and leaf.parent - and leaf.parent.type not in {syms.factor, syms.star_expr} - ): - return MATH_PRIORITIES[leaf.type] - - if leaf.type in COMPARATORS: - return COMPARATOR_PRIORITY - - if ( - leaf.type == token.STRING - and previous is not None - and previous.type == token.STRING - ): - return STRING_PRIORITY - - if leaf.type not in {token.NAME, token.ASYNC}: - return 0 - - if ( - leaf.value == "for" - and leaf.parent - and leaf.parent.type in {syms.comp_for, syms.old_comp_for} - or leaf.type == token.ASYNC - ): - if ( - not isinstance(leaf.prev_sibling, Leaf) - or leaf.prev_sibling.value != "async" - ): - return COMPREHENSION_PRIORITY - - if ( - leaf.value == "if" - and leaf.parent - and leaf.parent.type in {syms.comp_if, syms.old_comp_if} - ): - return COMPREHENSION_PRIORITY - - if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test: - return TERNARY_PRIORITY - - if leaf.value == "is": - return COMPARATOR_PRIORITY - - if ( - leaf.value == "in" - and leaf.parent - and leaf.parent.type in {syms.comp_op, syms.comparison} - and not ( - previous is not None - and previous.type == token.NAME - and previous.value == "not" - ) - ): - return COMPARATOR_PRIORITY - - if ( - leaf.value == "not" - and leaf.parent - and leaf.parent.type == syms.comp_op - and not ( - previous is not None - and previous.type == token.NAME - and previous.value == "is" - ) - ): - return COMPARATOR_PRIORITY - - if leaf.value in LOGIC_OPERATORS and leaf.parent: - return LOGIC_PRIORITY - - return 0 - - -FMT_OFF = {"# fmt: off", "# fmt:off", "# yapf: disable"} -FMT_ON = {"# fmt: on", "# fmt:on", "# yapf: enable"} - - -def generate_comments(leaf: LN) -> Iterator[Leaf]: - """Clean the prefix of the `leaf` and generate comments from it, if any. - - Comments in lib2to3 are shoved into the whitespace prefix. This happens - in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation - move because it does away with modifying the grammar to include all the - possible places in which comments can be placed. - - The sad consequence for us though is that comments don't "belong" anywhere. - This is why this function generates simple parentless Leaf objects for - comments. We simply don't know what the correct parent should be. - - No matter though, we can live without this. We really only need to - differentiate between inline and standalone comments. The latter don't - share the line with any code. - - Inline comments are emitted as regular token.COMMENT leaves. Standalone - are emitted with a fake STANDALONE_COMMENT token identifier. - """ - for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER): - yield Leaf(pc.type, pc.value, prefix="\n" * pc.newlines) - - -@dataclass -class ProtoComment: - """Describes a piece of syntax that is a comment. - - It's not a :class:`blib2to3.pytree.Leaf` so that: - - * it can be cached (`Leaf` objects should not be reused more than once as - they store their lineno, column, prefix, and parent information); - * `newlines` and `consumed` fields are kept separate from the `value`. This - simplifies handling of special marker comments like ``# fmt: off/on``. - """ - - type: int # token.COMMENT or STANDALONE_COMMENT - value: str # content of the comment - newlines: int # how many newlines before the comment - consumed: int # how many characters of the original leaf's prefix did we consume - - -@lru_cache(maxsize=4096) -def list_comments(prefix: str, *, is_endmarker: bool) -> List[ProtoComment]: - """Return a list of :class:`ProtoComment` objects parsed from the given `prefix`.""" - result: List[ProtoComment] = [] - if not prefix or "#" not in prefix: - return result - - consumed = 0 - nlines = 0 - ignored_lines = 0 - for index, line in enumerate(prefix.split("\n")): - consumed += len(line) + 1 # adding the length of the split '\n' - line = line.lstrip() - if not line: - nlines += 1 - if not line.startswith("#"): - # Escaped newlines outside of a comment are not really newlines at - # all. We treat a single-line comment following an escaped newline - # as a simple trailing comment. - if line.endswith("\\"): - ignored_lines += 1 - continue - - if index == ignored_lines and not is_endmarker: - comment_type = token.COMMENT # simple trailing comment - else: - comment_type = STANDALONE_COMMENT - comment = make_comment(line) - result.append( - ProtoComment( - type=comment_type, value=comment, newlines=nlines, consumed=consumed - ) - ) - nlines = 0 - return result - - -def make_comment(content: str) -> str: - """Return a consistently formatted comment from the given `content` string. - - All comments (except for "##", "#!", "#:", '#'", "#%%") should have a single - space between the hash sign and the content. - - If `content` didn't start with a hash sign, one is provided. - """ - content = content.rstrip() - if not content: - return "#" - - if content[0] == "#": - content = content[1:] - if content and content[0] not in " !:#'%": - content = " " + content - return "#" + content - - -def split_line( - line: Line, - line_length: int, - inner: bool = False, - features: Collection[Feature] = (), -) -> Iterator[Line]: - """Split a `line` into potentially many lines. - - They should fit in the allotted `line_length` but might not be able to. - `inner` signifies that there were a pair of brackets somewhere around the - current `line`, possibly transitively. This means we can fallback to splitting - by delimiters if the LHS/RHS don't yield any results. - - `features` are syntactical features that may be used in the output. - """ - if line.is_comment: - yield line - return - - line_str = str(line).strip("\n") - - if ( - not line.contains_uncollapsable_type_comments() - and not line.should_explode - and not line.is_collection_with_optional_trailing_comma - and ( - is_line_short_enough(line, line_length=line_length, line_str=line_str) - or line.contains_unsplittable_type_ignore() - ) - ): - yield line - return - - split_funcs: List[SplitFunc] - if line.is_def: - split_funcs = [left_hand_split] - else: - - def rhs(line: Line, features: Collection[Feature]) -> Iterator[Line]: - for omit in generate_trailers_to_omit(line, line_length): - lines = list(right_hand_split(line, line_length, features, omit=omit)) - if is_line_short_enough(lines[0], line_length=line_length): - yield from lines - return - - # All splits failed, best effort split with no omits. - # This mostly happens to multiline strings that are by definition - # reported as not fitting a single line. - # line_length=1 here was historically a bug that somehow became a feature. - # See #762 and #781 for the full story. - yield from right_hand_split(line, line_length=1, features=features) - - if line.inside_brackets: - split_funcs = [delimiter_split, standalone_comment_split, rhs] - else: - split_funcs = [rhs] - for split_func in split_funcs: - # We are accumulating lines in `result` because we might want to abort - # mission and return the original line in the end, or attempt a different - # split altogether. - result: List[Line] = [] - try: - for l in split_func(line, features): - if str(l).strip("\n") == line_str: - raise CannotSplit("Split function returned an unchanged result") - - result.extend( - split_line( - l, line_length=line_length, inner=True, features=features - ) - ) - except CannotSplit: - continue - - else: - yield from result - break - - else: - yield line - - -def left_hand_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: - """Split line into many lines, starting with the first matching bracket pair. - - Note: this usually looks weird, only use this for function definitions. - Prefer RHS otherwise. This is why this function is not symmetrical with - :func:`right_hand_split` which also handles optional parentheses. - """ - tail_leaves: List[Leaf] = [] - body_leaves: List[Leaf] = [] - head_leaves: List[Leaf] = [] - current_leaves = head_leaves - matching_bracket = None - for leaf in line.leaves: - if ( - current_leaves is body_leaves - and leaf.type in CLOSING_BRACKETS - and leaf.opening_bracket is matching_bracket - ): - current_leaves = tail_leaves if body_leaves else head_leaves - current_leaves.append(leaf) - if current_leaves is head_leaves: - if leaf.type in OPENING_BRACKETS: - matching_bracket = leaf - current_leaves = body_leaves - if not matching_bracket: - raise CannotSplit("No brackets found") - - head = bracket_split_build_line(head_leaves, line, matching_bracket) - body = bracket_split_build_line(body_leaves, line, matching_bracket, is_body=True) - tail = bracket_split_build_line(tail_leaves, line, matching_bracket) - bracket_split_succeeded_or_raise(head, body, tail) - for result in (head, body, tail): - if result: - yield result - - -def right_hand_split( - line: Line, - line_length: int, - features: Collection[Feature] = (), - omit: Collection[LeafID] = (), -) -> Iterator[Line]: - """Split line into many lines, starting with the last matching bracket pair. - - If the split was by optional parentheses, attempt splitting without them, too. - `omit` is a collection of closing bracket IDs that shouldn't be considered for - this split. - - Note: running this function modifies `bracket_depth` on the leaves of `line`. - """ - tail_leaves: List[Leaf] = [] - body_leaves: List[Leaf] = [] - head_leaves: List[Leaf] = [] - current_leaves = tail_leaves - opening_bracket = None - closing_bracket = None - for leaf in reversed(line.leaves): - if current_leaves is body_leaves: - if leaf is opening_bracket: - current_leaves = head_leaves if body_leaves else tail_leaves - current_leaves.append(leaf) - if current_leaves is tail_leaves: - if leaf.type in CLOSING_BRACKETS and id(leaf) not in omit: - opening_bracket = leaf.opening_bracket - closing_bracket = leaf - current_leaves = body_leaves - if not (opening_bracket and closing_bracket and head_leaves): - # If there is no opening or closing_bracket that means the split failed and - # all content is in the tail. Otherwise, if `head_leaves` are empty, it means - # the matching `opening_bracket` wasn't available on `line` anymore. - raise CannotSplit("No brackets found") - - tail_leaves.reverse() - body_leaves.reverse() - head_leaves.reverse() - head = bracket_split_build_line(head_leaves, line, opening_bracket) - body = bracket_split_build_line(body_leaves, line, opening_bracket, is_body=True) - tail = bracket_split_build_line(tail_leaves, line, opening_bracket) - bracket_split_succeeded_or_raise(head, body, tail) - if ( - # the body shouldn't be exploded - not body.should_explode - # the opening bracket is an optional paren - and opening_bracket.type == token.LPAR - and not opening_bracket.value - # the closing bracket is an optional paren - and closing_bracket.type == token.RPAR - and not closing_bracket.value - # it's not an import (optional parens are the only thing we can split on - # in this case; attempting a split without them is a waste of time) - and not line.is_import - # there are no standalone comments in the body - and not body.contains_standalone_comments(0) - # and we can actually remove the parens - and can_omit_invisible_parens(body, line_length) - ): - omit = {id(closing_bracket), *omit} - try: - yield from right_hand_split(line, line_length, features=features, omit=omit) - return - - except CannotSplit: - if not ( - can_be_split(body) - or is_line_short_enough(body, line_length=line_length) - ): - raise CannotSplit( - "Splitting failed, body is still too long and can't be split." - ) - - elif head.contains_multiline_strings() or tail.contains_multiline_strings(): - raise CannotSplit( - "The current optional pair of parentheses is bound to fail to " - "satisfy the splitting algorithm because the head or the tail " - "contains multiline strings which by definition never fit one " - "line." - ) - - ensure_visible(opening_bracket) - ensure_visible(closing_bracket) - for result in (head, body, tail): - if result: - yield result - - -def bracket_split_succeeded_or_raise(head: Line, body: Line, tail: Line) -> None: - """Raise :exc:`CannotSplit` if the last left- or right-hand split failed. - - Do nothing otherwise. - - A left- or right-hand split is based on a pair of brackets. Content before - (and including) the opening bracket is left on one line, content inside the - brackets is put on a separate line, and finally content starting with and - following the closing bracket is put on a separate line. - - Those are called `head`, `body`, and `tail`, respectively. If the split - produced the same line (all content in `head`) or ended up with an empty `body` - and the `tail` is just the closing bracket, then it's considered failed. - """ - tail_len = len(str(tail).strip()) - if not body: - if tail_len == 0: - raise CannotSplit("Splitting brackets produced the same line") - - elif tail_len < 3: - raise CannotSplit( - f"Splitting brackets on an empty body to save " - f"{tail_len} characters is not worth it" - ) - - -def bracket_split_build_line( - leaves: List[Leaf], original: Line, opening_bracket: Leaf, *, is_body: bool = False -) -> Line: - """Return a new line with given `leaves` and respective comments from `original`. - - If `is_body` is True, the result line is one-indented inside brackets and as such - has its first leaf's prefix normalized and a trailing comma added when expected. - """ - result = Line(depth=original.depth) - if is_body: - result.inside_brackets = True - result.depth += 1 - if leaves: - # Since body is a new indent level, remove spurious leading whitespace. - normalize_prefix(leaves[0], inside_brackets=True) - # Ensure a trailing comma for imports and standalone function arguments, but - # be careful not to add one after any comments or within type annotations. - no_commas = ( - original.is_def - and opening_bracket.value == "(" - and not any(l.type == token.COMMA for l in leaves) - ) - - if original.is_import or no_commas: - for i in range(len(leaves) - 1, -1, -1): - if leaves[i].type == STANDALONE_COMMENT: - continue - - if leaves[i].type != token.COMMA: - leaves.insert(i + 1, Leaf(token.COMMA, ",")) - break - - # Populate the line - for leaf in leaves: - result.append(leaf, preformatted=True) - for comment_after in original.comments_after(leaf): - result.append(comment_after, preformatted=True) - if is_body: - result.should_explode = should_explode(result, opening_bracket) - return result - - -def dont_increase_indentation(split_func: SplitFunc) -> SplitFunc: - """Normalize prefix of the first leaf in every line returned by `split_func`. - - This is a decorator over relevant split functions. - """ - - @wraps(split_func) - def split_wrapper(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: - for l in split_func(line, features): - normalize_prefix(l.leaves[0], inside_brackets=True) - yield l - - return split_wrapper - - -@dont_increase_indentation -def delimiter_split(line: Line, features: Collection[Feature] = ()) -> Iterator[Line]: - """Split according to delimiters of the highest priority. - - If the appropriate Features are given, the split will add trailing commas - also in function signatures and calls that contain `*` and `**`. - """ - try: - last_leaf = line.leaves[-1] - except IndexError: - raise CannotSplit("Line empty") - - bt = line.bracket_tracker - try: - delimiter_priority = bt.max_delimiter_priority(exclude={id(last_leaf)}) - except ValueError: - raise CannotSplit("No delimiters found") - - if delimiter_priority == DOT_PRIORITY: - if bt.delimiter_count_with_priority(delimiter_priority) == 1: - raise CannotSplit("Splitting a single attribute from its owner looks wrong") - - current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) - lowest_depth = sys.maxsize - trailing_comma_safe = True - - def append_to_line(leaf: Leaf) -> Iterator[Line]: - """Append `leaf` to current line or to new line if appending impossible.""" - nonlocal current_line - try: - current_line.append_safe(leaf, preformatted=True) - except ValueError: - yield current_line - - current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) - current_line.append(leaf) - - for leaf in line.leaves: - yield from append_to_line(leaf) - - for comment_after in line.comments_after(leaf): - yield from append_to_line(comment_after) - - lowest_depth = min(lowest_depth, leaf.bracket_depth) - if leaf.bracket_depth == lowest_depth: - if is_vararg(leaf, within={syms.typedargslist}): - trailing_comma_safe = ( - trailing_comma_safe and Feature.TRAILING_COMMA_IN_DEF in features - ) - elif is_vararg(leaf, within={syms.arglist, syms.argument}): - trailing_comma_safe = ( - trailing_comma_safe and Feature.TRAILING_COMMA_IN_CALL in features - ) - - leaf_priority = bt.delimiters.get(id(leaf)) - if leaf_priority == delimiter_priority: - yield current_line - - current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) - if current_line: - if ( - trailing_comma_safe - and delimiter_priority == COMMA_PRIORITY - and current_line.leaves[-1].type != token.COMMA - and current_line.leaves[-1].type != STANDALONE_COMMENT - ): - current_line.append(Leaf(token.COMMA, ",")) - yield current_line - - -@dont_increase_indentation -def standalone_comment_split( - line: Line, features: Collection[Feature] = () -) -> Iterator[Line]: - """Split standalone comments from the rest of the line.""" - if not line.contains_standalone_comments(0): - raise CannotSplit("Line does not have any standalone comments") - - current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) - - def append_to_line(leaf: Leaf) -> Iterator[Line]: - """Append `leaf` to current line or to new line if appending impossible.""" - nonlocal current_line - try: - current_line.append_safe(leaf, preformatted=True) - except ValueError: - yield current_line - - current_line = Line(depth=line.depth, inside_brackets=line.inside_brackets) - current_line.append(leaf) - - for leaf in line.leaves: - yield from append_to_line(leaf) - - for comment_after in line.comments_after(leaf): - yield from append_to_line(comment_after) - - if current_line: - yield current_line - - -def is_import(leaf: Leaf) -> bool: - """Return True if the given leaf starts an import statement.""" - p = leaf.parent - t = leaf.type - v = leaf.value - return bool( - t == token.NAME - and ( - (v == "import" and p and p.type == syms.import_name) - or (v == "from" and p and p.type == syms.import_from) - ) - ) - - -def is_type_comment(leaf: Leaf, suffix: str = "") -> bool: - """Return True if the given leaf is a special comment. - Only returns true for type comments for now.""" - t = leaf.type - v = leaf.value - return t in {token.COMMENT, STANDALONE_COMMENT} and v.startswith("# type:" + suffix) - - -def normalize_prefix(leaf: Leaf, *, inside_brackets: bool) -> None: - """Leave existing extra newlines if not `inside_brackets`. Remove everything - else. - - Note: don't use backslashes for formatting or you'll lose your voting rights. - """ - if not inside_brackets: - spl = leaf.prefix.split("#") - if "\\" not in spl[0]: - nl_count = spl[-1].count("\n") - if len(spl) > 1: - nl_count -= 1 - leaf.prefix = "\n" * nl_count - return - - leaf.prefix = "" - - -def normalize_string_prefix(leaf: Leaf, remove_u_prefix: bool = False) -> None: - """Make all string prefixes lowercase. - - If remove_u_prefix is given, also removes any u prefix from the string. - - Note: Mutates its argument. - """ - match = re.match(r"^([furbFURB]*)(.*)$", leaf.value, re.DOTALL) - assert match is not None, f"failed to match string {leaf.value!r}" - orig_prefix = match.group(1) - new_prefix = orig_prefix.lower() - if remove_u_prefix: - new_prefix = new_prefix.replace("u", "") - leaf.value = f"{new_prefix}{match.group(2)}" - - -def normalize_string_quotes(leaf: Leaf) -> None: - """Prefer double quotes but only if it doesn't cause more escaping. - - Adds or removes backslashes as appropriate. Doesn't parse and fix - strings nested in f-strings (yet). - - Note: Mutates its argument. - """ - value = leaf.value.lstrip("furbFURB") - if value[:3] == '"""': - return - - elif value[:3] == "'''": - orig_quote = "'''" - new_quote = '"""' - elif value[0] == '"': - orig_quote = '"' - new_quote = "'" - else: - orig_quote = "'" - new_quote = '"' - first_quote_pos = leaf.value.find(orig_quote) - if first_quote_pos == -1: - return # There's an internal error - - prefix = leaf.value[:first_quote_pos] - unescaped_new_quote = re.compile(rf"(([^\\]|^)(\\\\)*){new_quote}") - escaped_new_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}") - escaped_orig_quote = re.compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}") - body = leaf.value[first_quote_pos + len(orig_quote) : -len(orig_quote)] - if "r" in prefix.casefold(): - if unescaped_new_quote.search(body): - # There's at least one unescaped new_quote in this raw string - # so converting is impossible - return - - # Do not introduce or remove backslashes in raw strings - new_body = body - else: - # remove unnecessary escapes - new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body) - if body != new_body: - # Consider the string without unnecessary escapes as the original - body = new_body - leaf.value = f"{prefix}{orig_quote}{body}{orig_quote}" - new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body) - new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body) - if "f" in prefix.casefold(): - matches = re.findall( - r""" - (?:[^{]|^)\{ # start of the string or a non-{ followed by a single { - ([^{].*?) # contents of the brackets except if begins with {{ - \}(?:[^}]|$) # A } followed by end of the string or a non-} - """, - new_body, - re.VERBOSE, - ) - for m in matches: - if "\\" in str(m): - # Do not introduce backslashes in interpolated expressions - return - - if new_quote == '"""' and new_body[-1:] == '"': - # edge case: - new_body = new_body[:-1] + '\\"' - orig_escape_count = body.count("\\") - new_escape_count = new_body.count("\\") - if new_escape_count > orig_escape_count: - return # Do not introduce more escaping - - if new_escape_count == orig_escape_count and orig_quote == '"': - return # Prefer double quotes - - leaf.value = f"{prefix}{new_quote}{new_body}{new_quote}" - - -def normalize_numeric_literal(leaf: Leaf) -> None: - """Normalizes numeric (float, int, and complex) literals. - - All letters used in the representation are normalized to lowercase (except - in Python 2 long literals). - """ - text = leaf.value.lower() - if text.startswith(("0o", "0b")): - # Leave octal and binary literals alone. - pass - elif text.startswith("0x"): - # Change hex literals to upper case. - before, after = text[:2], text[2:] - text = f"{before}{after.upper()}" - elif "e" in text: - before, after = text.split("e") - sign = "" - if after.startswith("-"): - after = after[1:] - sign = "-" - elif after.startswith("+"): - after = after[1:] - before = format_float_or_int_string(before) - text = f"{before}e{sign}{after}" - elif text.endswith(("j", "l")): - number = text[:-1] - suffix = text[-1] - # Capitalize in "2L" because "l" looks too similar to "1". - if suffix == "l": - suffix = "L" - text = f"{format_float_or_int_string(number)}{suffix}" - else: - text = format_float_or_int_string(text) - leaf.value = text - - -def format_float_or_int_string(text: str) -> str: - """Formats a float string like "1.0".""" - if "." not in text: - return text - - before, after = text.split(".") - return f"{before or 0}.{after or 0}" - - -def normalize_invisible_parens(node: Node, parens_after: Set[str]) -> None: - """Make existing optional parentheses invisible or create new ones. - - `parens_after` is a set of string leaf values immediately after which parens - should be put. - - Standardizes on visible parentheses for single-element tuples, and keeps - existing visible parentheses for other tuples and generator expressions. - """ - for pc in list_comments(node.prefix, is_endmarker=False): - if pc.value in FMT_OFF: - # This `node` has a prefix with `# fmt: off`, don't mess with parens. - return - check_lpar = False - for index, child in enumerate(list(node.children)): - # Add parentheses around long tuple unpacking in assignments. - if ( - index == 0 - and isinstance(child, Node) - and child.type == syms.testlist_star_expr - ): - check_lpar = True - - if check_lpar: - if is_walrus_assignment(child): - continue - - if child.type == syms.atom: - if maybe_make_parens_invisible_in_atom(child, parent=node): - wrap_in_parentheses(node, child, visible=False) - elif is_one_tuple(child): - wrap_in_parentheses(node, child, visible=True) - elif node.type == syms.import_from: - # "import from" nodes store parentheses directly as part of - # the statement - if child.type == token.LPAR: - # make parentheses invisible - child.value = "" # type: ignore - node.children[-1].value = "" # type: ignore - elif child.type != token.STAR: - # insert invisible parentheses - node.insert_child(index, Leaf(token.LPAR, "")) - node.append_child(Leaf(token.RPAR, "")) - break - - elif not (isinstance(child, Leaf) and is_multiline_string(child)): - wrap_in_parentheses(node, child, visible=False) - - check_lpar = isinstance(child, Leaf) and child.value in parens_after - - -def normalize_fmt_off(node: Node) -> None: - """Convert content between `# fmt: off`/`# fmt: on` into standalone comments.""" - try_again = True - while try_again: - try_again = convert_one_fmt_off_pair(node) - - -def convert_one_fmt_off_pair(node: Node) -> bool: - """Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment. - - Returns True if a pair was converted. - """ - for leaf in node.leaves(): - previous_consumed = 0 - for comment in list_comments(leaf.prefix, is_endmarker=False): - if comment.value in FMT_OFF: - # We only want standalone comments. If there's no previous leaf or - # the previous leaf is indentation, it's a standalone comment in - # disguise. - if comment.type != STANDALONE_COMMENT: - prev = preceding_leaf(leaf) - if prev and prev.type not in WHITESPACE: - continue - - ignored_nodes = list(generate_ignored_nodes(leaf)) - if not ignored_nodes: - continue - - first = ignored_nodes[0] # Can be a container node with the `leaf`. - parent = first.parent - prefix = first.prefix - first.prefix = prefix[comment.consumed :] - hidden_value = ( - comment.value + "\n" + "".join(str(n) for n in ignored_nodes) - ) - if hidden_value.endswith("\n"): - # That happens when one of the `ignored_nodes` ended with a NEWLINE - # leaf (possibly followed by a DEDENT). - hidden_value = hidden_value[:-1] - first_idx = None - for ignored in ignored_nodes: - index = ignored.remove() - if first_idx is None: - first_idx = index - assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)" - assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)" - parent.insert_child( - first_idx, - Leaf( - STANDALONE_COMMENT, - hidden_value, - prefix=prefix[:previous_consumed] + "\n" * comment.newlines, - ), - ) - return True - - previous_consumed = comment.consumed - - return False - - -def generate_ignored_nodes(leaf: Leaf) -> Iterator[LN]: - """Starting from the container of `leaf`, generate all leaves until `# fmt: on`. - - Stops at the end of the block. - """ - container: Optional[LN] = container_of(leaf) - while container is not None and container.type != token.ENDMARKER: - is_fmt_on = False - for comment in list_comments(container.prefix, is_endmarker=False): - if comment.value in FMT_ON: - is_fmt_on = True - elif comment.value in FMT_OFF: - is_fmt_on = False - if is_fmt_on: - return - - yield container - - container = container.next_sibling - - -def maybe_make_parens_invisible_in_atom(node: LN, parent: LN) -> bool: - """If it's safe, make the parens in the atom `node` invisible, recursively. - Additionally, remove repeated, adjacent invisible parens from the atom `node` - as they are redundant. - - Returns whether the node should itself be wrapped in invisible parentheses. - - """ - if ( - node.type != syms.atom - or is_empty_tuple(node) - or is_one_tuple(node) - or (is_yield(node) and parent.type != syms.expr_stmt) - or max_delimiter_priority_in_atom(node) >= COMMA_PRIORITY - ): - return False - - first = node.children[0] - last = node.children[-1] - if first.type == token.LPAR and last.type == token.RPAR: - middle = node.children[1] - # make parentheses invisible - first.value = "" # type: ignore - last.value = "" # type: ignore - maybe_make_parens_invisible_in_atom(middle, parent=parent) - - if is_atom_with_invisible_parens(middle): - # Strip the invisible parens from `middle` by replacing - # it with the child in-between the invisible parens - middle.replace(middle.children[1]) - - return False - - return True - - -def is_atom_with_invisible_parens(node: LN) -> bool: - """Given a `LN`, determines whether it's an atom `node` with invisible - parens. Useful in dedupe-ing and normalizing parens. - """ - if isinstance(node, Leaf) or node.type != syms.atom: - return False - - first, last = node.children[0], node.children[-1] - return ( - isinstance(first, Leaf) - and first.type == token.LPAR - and first.value == "" - and isinstance(last, Leaf) - and last.type == token.RPAR - and last.value == "" - ) - - -def is_empty_tuple(node: LN) -> bool: - """Return True if `node` holds an empty tuple.""" - return ( - node.type == syms.atom - and len(node.children) == 2 - and node.children[0].type == token.LPAR - and node.children[1].type == token.RPAR - ) - - -def unwrap_singleton_parenthesis(node: LN) -> Optional[LN]: - """Returns `wrapped` if `node` is of the shape ( wrapped ). - - Parenthesis can be optional. Returns None otherwise""" - if len(node.children) != 3: - return None - - lpar, wrapped, rpar = node.children - if not (lpar.type == token.LPAR and rpar.type == token.RPAR): - return None - - return wrapped - - -def wrap_in_parentheses(parent: Node, child: LN, *, visible: bool = True) -> None: - """Wrap `child` in parentheses. - - This replaces `child` with an atom holding the parentheses and the old - child. That requires moving the prefix. - - If `visible` is False, the leaves will be valueless (and thus invisible). - """ - lpar = Leaf(token.LPAR, "(" if visible else "") - rpar = Leaf(token.RPAR, ")" if visible else "") - prefix = child.prefix - child.prefix = "" - index = child.remove() or 0 - new_child = Node(syms.atom, [lpar, child, rpar]) - new_child.prefix = prefix - parent.insert_child(index, new_child) - - -def is_one_tuple(node: LN) -> bool: - """Return True if `node` holds a tuple with one element, with or without parens.""" - if node.type == syms.atom: - gexp = unwrap_singleton_parenthesis(node) - if gexp is None or gexp.type != syms.testlist_gexp: - return False - - return len(gexp.children) == 2 and gexp.children[1].type == token.COMMA - - return ( - node.type in IMPLICIT_TUPLE - and len(node.children) == 2 - and node.children[1].type == token.COMMA - ) - - -def is_walrus_assignment(node: LN) -> bool: - """Return True iff `node` is of the shape ( test := test )""" - inner = unwrap_singleton_parenthesis(node) - return inner is not None and inner.type == syms.namedexpr_test - - -def is_yield(node: LN) -> bool: - """Return True if `node` holds a `yield` or `yield from` expression.""" - if node.type == syms.yield_expr: - return True - - if node.type == token.NAME and node.value == "yield": # type: ignore - return True - - if node.type != syms.atom: - return False - - if len(node.children) != 3: - return False - - lpar, expr, rpar = node.children - if lpar.type == token.LPAR and rpar.type == token.RPAR: - return is_yield(expr) - - return False - - -def is_vararg(leaf: Leaf, within: Set[NodeType]) -> bool: - """Return True if `leaf` is a star or double star in a vararg or kwarg. - - If `within` includes VARARGS_PARENTS, this applies to function signatures. - If `within` includes UNPACKING_PARENTS, it applies to right hand-side - extended iterable unpacking (PEP 3132) and additional unpacking - generalizations (PEP 448). - """ - if leaf.type not in VARARGS_SPECIALS or not leaf.parent: - return False - - p = leaf.parent - if p.type == syms.star_expr: - # Star expressions are also used as assignment targets in extended - # iterable unpacking (PEP 3132). See what its parent is instead. - if not p.parent: - return False - - p = p.parent - - return p.type in within - - -def is_multiline_string(leaf: Leaf) -> bool: - """Return True if `leaf` is a multiline string that actually spans many lines.""" - value = leaf.value.lstrip("furbFURB") - return value[:3] in {'"""', "'''"} and "\n" in value - - -def is_stub_suite(node: Node) -> bool: - """Return True if `node` is a suite with a stub body.""" - if ( - len(node.children) != 4 - or node.children[0].type != token.NEWLINE - or node.children[1].type != token.INDENT - or node.children[3].type != token.DEDENT - ): - return False - - return is_stub_body(node.children[2]) - - -def is_stub_body(node: LN) -> bool: - """Return True if `node` is a simple statement containing an ellipsis.""" - if not isinstance(node, Node) or node.type != syms.simple_stmt: - return False - - if len(node.children) != 2: - return False - - child = node.children[0] - return ( - child.type == syms.atom - and len(child.children) == 3 - and all(leaf == Leaf(token.DOT, ".") for leaf in child.children) - ) - - -def max_delimiter_priority_in_atom(node: LN) -> Priority: - """Return maximum delimiter priority inside `node`. - - This is specific to atoms with contents contained in a pair of parentheses. - If `node` isn't an atom or there are no enclosing parentheses, returns 0. - """ - if node.type != syms.atom: - return 0 - - first = node.children[0] - last = node.children[-1] - if not (first.type == token.LPAR and last.type == token.RPAR): - return 0 - - bt = BracketTracker() - for c in node.children[1:-1]: - if isinstance(c, Leaf): - bt.mark(c) - else: - for leaf in c.leaves(): - bt.mark(leaf) - try: - return bt.max_delimiter_priority() - - except ValueError: - return 0 - - -def ensure_visible(leaf: Leaf) -> None: - """Make sure parentheses are visible. - - They could be invisible as part of some statements (see - :func:`normalize_invisible_parens` and :func:`visit_import_from`). - """ - if leaf.type == token.LPAR: - leaf.value = "(" - elif leaf.type == token.RPAR: - leaf.value = ")" - - -def should_explode(line: Line, opening_bracket: Leaf) -> bool: - """Should `line` immediately be split with `delimiter_split()` after RHS?""" - - if not ( - opening_bracket.parent - and opening_bracket.parent.type in {syms.atom, syms.import_from} - and opening_bracket.value in "[{(" - ): - return False - - try: - last_leaf = line.leaves[-1] - exclude = {id(last_leaf)} if last_leaf.type == token.COMMA else set() - max_priority = line.bracket_tracker.max_delimiter_priority(exclude=exclude) - except (IndexError, ValueError): - return False - - return max_priority == COMMA_PRIORITY - - -def get_features_used(node: Node) -> Set[Feature]: - """Return a set of (relatively) new Python features used in this file. - - Currently looking for: - - f-strings; - - underscores in numeric literals; - - trailing commas after * or ** in function signatures and calls; - - positional only arguments in function signatures and lambdas; - """ - features: Set[Feature] = set() - for n in node.pre_order(): - if n.type == token.STRING: - value_head = n.value[:2] # type: ignore - if value_head in {'f"', 'F"', "f'", "F'", "rf", "fr", "RF", "FR"}: - features.add(Feature.F_STRINGS) - - elif n.type == token.NUMBER: - if "_" in n.value: # type: ignore - features.add(Feature.NUMERIC_UNDERSCORES) - - elif n.type == token.SLASH: - if n.parent and n.parent.type in {syms.typedargslist, syms.arglist}: - features.add(Feature.POS_ONLY_ARGUMENTS) - - elif n.type == token.COLONEQUAL: - features.add(Feature.ASSIGNMENT_EXPRESSIONS) - - elif ( - n.type in {syms.typedargslist, syms.arglist} - and n.children - and n.children[-1].type == token.COMMA - ): - if n.type == syms.typedargslist: - feature = Feature.TRAILING_COMMA_IN_DEF - else: - feature = Feature.TRAILING_COMMA_IN_CALL - - for ch in n.children: - if ch.type in STARS: - features.add(feature) - - if ch.type == syms.argument: - for argch in ch.children: - if argch.type in STARS: - features.add(feature) - - return features - - -def detect_target_versions(node: Node) -> Set[TargetVersion]: - """Detect the version to target based on the nodes used.""" - features = get_features_used(node) - return { - version for version in TargetVersion if features <= VERSION_TO_FEATURES[version] - } - - -def generate_trailers_to_omit(line: Line, line_length: int) -> Iterator[Set[LeafID]]: - """Generate sets of closing bracket IDs that should be omitted in a RHS. - - Brackets can be omitted if the entire trailer up to and including - a preceding closing bracket fits in one line. - - Yielded sets are cumulative (contain results of previous yields, too). First - set is empty. - """ - - omit: Set[LeafID] = set() - yield omit - - length = 4 * line.depth - opening_bracket = None - closing_bracket = None - inner_brackets: Set[LeafID] = set() - for index, leaf, leaf_length in enumerate_with_length(line, reversed=True): - length += leaf_length - if length > line_length: - break - - has_inline_comment = leaf_length > len(leaf.value) + len(leaf.prefix) - if leaf.type == STANDALONE_COMMENT or has_inline_comment: - break - - if opening_bracket: - if leaf is opening_bracket: - opening_bracket = None - elif leaf.type in CLOSING_BRACKETS: - inner_brackets.add(id(leaf)) - elif leaf.type in CLOSING_BRACKETS: - if index > 0 and line.leaves[index - 1].type in OPENING_BRACKETS: - # Empty brackets would fail a split so treat them as "inner" - # brackets (e.g. only add them to the `omit` set if another - # pair of brackets was good enough. - inner_brackets.add(id(leaf)) - continue - - if closing_bracket: - omit.add(id(closing_bracket)) - omit.update(inner_brackets) - inner_brackets.clear() - yield omit - - if leaf.value: - opening_bracket = leaf.opening_bracket - closing_bracket = leaf - - -def get_future_imports(node: Node) -> Set[str]: - """Return a set of __future__ imports in the file.""" - imports: Set[str] = set() - - def get_imports_from_children(children: List[LN]) -> Generator[str, None, None]: - for child in children: - if isinstance(child, Leaf): - if child.type == token.NAME: - yield child.value - - elif child.type == syms.import_as_name: - orig_name = child.children[0] - assert isinstance(orig_name, Leaf), "Invalid syntax parsing imports" - assert orig_name.type == token.NAME, "Invalid syntax parsing imports" - yield orig_name.value - - elif child.type == syms.import_as_names: - yield from get_imports_from_children(child.children) - - else: - raise AssertionError("Invalid syntax parsing imports") - - for child in node.children: - if child.type != syms.simple_stmt: - break - - first_child = child.children[0] - if isinstance(first_child, Leaf): - # Continue looking if we see a docstring; otherwise stop. - if ( - len(child.children) == 2 - and first_child.type == token.STRING - and child.children[1].type == token.NEWLINE - ): - continue - - break - - elif first_child.type == syms.import_from: - module_name = first_child.children[1] - if not isinstance(module_name, Leaf) or module_name.value != "__future__": - break - - imports |= set(get_imports_from_children(first_child.children[3:])) - else: - break - - return imports - - -@lru_cache() -def get_gitignore(root: Path) -> PathSpec: - """ Return a PathSpec matching gitignore content if present.""" - gitignore = root / ".gitignore" - lines: List[str] = [] - if gitignore.is_file(): - with gitignore.open() as gf: - lines = gf.readlines() - return PathSpec.from_lines("gitwildmatch", lines) - - -def gen_python_files_in_dir( - path: Path, - root: Path, - include: Pattern[str], - exclude: Pattern[str], - report: "Report", - gitignore: PathSpec, -) -> Iterator[Path]: - """Generate all files under `path` whose paths are not excluded by the - `exclude` regex, but are included by the `include` regex. - - Symbolic links pointing outside of the `root` directory are ignored. - - `report` is where output about exclusions goes. - """ - assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}" - for child in path.iterdir(): - # First ignore files matching .gitignore - if gitignore.match_file(child.as_posix()): - report.path_ignored(child, f"matches the .gitignore file content") - continue - - # Then ignore with `exclude` option. - try: - normalized_path = "/" + child.resolve().relative_to(root).as_posix() - except OSError as e: - report.path_ignored(child, f"cannot be read because {e}") - continue - - except ValueError: - if child.is_symlink(): - report.path_ignored( - child, f"is a symbolic link that points outside {root}" - ) - continue - - raise - - if child.is_dir(): - normalized_path += "/" - - exclude_match = exclude.search(normalized_path) - if exclude_match and exclude_match.group(0): - report.path_ignored(child, f"matches the --exclude regular expression") - continue - - if child.is_dir(): - yield from gen_python_files_in_dir( - child, root, include, exclude, report, gitignore - ) - - elif child.is_file(): - include_match = include.search(normalized_path) - if include_match: - yield child - - -@lru_cache() -def find_project_root(srcs: Iterable[str]) -> Path: - """Return a directory containing .git, .hg, or pyproject.toml. - - That directory can be one of the directories passed in `srcs` or their - common parent. - - If no directory in the tree contains a marker that would specify it's the - project root, the root of the file system is returned. - """ - if not srcs: - return Path("/").resolve() - - common_base = min(Path(src).resolve() for src in srcs) - if common_base.is_dir(): - # Append a fake file so `parents` below returns `common_base_dir`, too. - common_base /= "fake-file" - for directory in common_base.parents: - if (directory / ".git").is_dir(): - return directory - - if (directory / ".hg").is_dir(): - return directory - - if (directory / "pyproject.toml").is_file(): - return directory - - return directory - - -@dataclass -class Report: - """Provides a reformatting counter. Can be rendered with `str(report)`.""" - - check: bool = False - quiet: bool = False - verbose: bool = False - change_count: int = 0 - same_count: int = 0 - failure_count: int = 0 - - def done(self, src: Path, changed: Changed) -> None: - """Increment the counter for successful reformatting. Write out a message.""" - if changed is Changed.YES: - reformatted = "would reformat" if self.check else "reformatted" - if self.verbose or not self.quiet: - out(f"{reformatted} {src}") - self.change_count += 1 - else: - if self.verbose: - if changed is Changed.NO: - msg = f"{src} already well formatted, good job." - else: - msg = f"{src} wasn't modified on disk since last run." - out(msg, bold=False) - self.same_count += 1 - - def failed(self, src: Path, message: str) -> None: - """Increment the counter for failed reformatting. Write out a message.""" - err(f"error: cannot format {src}: {message}") - self.failure_count += 1 - - def path_ignored(self, path: Path, message: str) -> None: - if self.verbose: - out(f"{path} ignored: {message}", bold=False) - - @property - def return_code(self) -> int: - """Return the exit code that the app should use. - - This considers the current state of changed files and failures: - - if there were any failures, return 123; - - if any files were changed and --check is being used, return 1; - - otherwise return 0. - """ - # According to http://tldp.org/LDP/abs/html/exitcodes.html starting with - # 126 we have special return codes reserved by the shell. - if self.failure_count: - return 123 - - elif self.change_count and self.check: - return 1 - - return 0 - - def __str__(self) -> str: - """Render a color report of the current state. - - Use `click.unstyle` to remove colors. - """ - if self.check: - reformatted = "would be reformatted" - unchanged = "would be left unchanged" - failed = "would fail to reformat" - else: - reformatted = "reformatted" - unchanged = "left unchanged" - failed = "failed to reformat" - report = [] - if self.change_count: - s = "s" if self.change_count > 1 else "" - report.append( - click.style(f"{self.change_count} file{s} {reformatted}", bold=True) - ) - if self.same_count: - s = "s" if self.same_count > 1 else "" - report.append(f"{self.same_count} file{s} {unchanged}") - if self.failure_count: - s = "s" if self.failure_count > 1 else "" - report.append( - click.style(f"{self.failure_count} file{s} {failed}", fg="red") - ) - return ", ".join(report) + "." - - -def parse_ast(src: str) -> Union[ast.AST, ast3.AST, ast27.AST]: - filename = "" - if sys.version_info >= (3, 8): - # TODO: support Python 4+ ;) - for minor_version in range(sys.version_info[1], 4, -1): - try: - return ast.parse(src, filename, feature_version=(3, minor_version)) - except SyntaxError: - continue - else: - for feature_version in (7, 6): - try: - return ast3.parse(src, filename, feature_version=feature_version) - except SyntaxError: - continue - - return ast27.parse(src) - - -def _fixup_ast_constants( - node: Union[ast.AST, ast3.AST, ast27.AST] -) -> Union[ast.AST, ast3.AST, ast27.AST]: - """Map ast nodes deprecated in 3.8 to Constant.""" - if isinstance(node, (ast.Str, ast3.Str, ast27.Str, ast.Bytes, ast3.Bytes)): - return ast.Constant(value=node.s) - - if isinstance(node, (ast.Num, ast3.Num, ast27.Num)): - return ast.Constant(value=node.n) - - if isinstance(node, (ast.NameConstant, ast3.NameConstant)): - return ast.Constant(value=node.value) - - return node - - -def assert_equivalent(src: str, dst: str) -> None: - """Raise AssertionError if `src` and `dst` aren't equivalent.""" - - def _v(node: Union[ast.AST, ast3.AST, ast27.AST], depth: int = 0) -> Iterator[str]: - """Simple visitor generating strings to compare ASTs by content.""" - - node = _fixup_ast_constants(node) - - yield f"{' ' * depth}{node.__class__.__name__}(" - - for field in sorted(node._fields): - # TypeIgnore has only one field 'lineno' which breaks this comparison - type_ignore_classes = (ast3.TypeIgnore, ast27.TypeIgnore) - if sys.version_info >= (3, 8): - type_ignore_classes += (ast.TypeIgnore,) - if isinstance(node, type_ignore_classes): - break - - try: - value = getattr(node, field) - except AttributeError: - continue - - yield f"{' ' * (depth+1)}{field}=" - - if isinstance(value, list): - for item in value: - # Ignore nested tuples within del statements, because we may insert - # parentheses and they change the AST. - if ( - field == "targets" - and isinstance(node, (ast.Delete, ast3.Delete, ast27.Delete)) - and isinstance(item, (ast.Tuple, ast3.Tuple, ast27.Tuple)) - ): - for item in item.elts: - yield from _v(item, depth + 2) - - elif isinstance(item, (ast.AST, ast3.AST, ast27.AST)): - yield from _v(item, depth + 2) - - elif isinstance(value, (ast.AST, ast3.AST, ast27.AST)): - yield from _v(value, depth + 2) - - else: - yield f"{' ' * (depth+2)}{value!r}, # {value.__class__.__name__}" - - yield f"{' ' * depth}) # /{node.__class__.__name__}" - - try: - src_ast = parse_ast(src) - except Exception as exc: - raise AssertionError( - f"cannot use --safe with this file; failed to parse source file. " - f"AST error message: {exc}" - ) - - try: - dst_ast = parse_ast(dst) - except Exception as exc: - log = dump_to_file("".join(traceback.format_tb(exc.__traceback__)), dst) - raise AssertionError( - f"INTERNAL ERROR: Black produced invalid code: {exc}. " - f"Please report a bug on https://github.com/psf/black/issues. " - f"This invalid output might be helpful: {log}" - ) from None - - src_ast_str = "\n".join(_v(src_ast)) - dst_ast_str = "\n".join(_v(dst_ast)) - if src_ast_str != dst_ast_str: - log = dump_to_file(diff(src_ast_str, dst_ast_str, "src", "dst")) - raise AssertionError( - f"INTERNAL ERROR: Black produced code that is not equivalent to " - f"the source. " - f"Please report a bug on https://github.com/psf/black/issues. " - f"This diff might be helpful: {log}" - ) from None - - -def assert_stable(src: str, dst: str, mode: FileMode) -> None: - """Raise AssertionError if `dst` reformats differently the second time.""" - newdst = format_str(dst, mode=mode) - if dst != newdst: - log = dump_to_file( - diff(src, dst, "source", "first pass"), - diff(dst, newdst, "first pass", "second pass"), - ) - raise AssertionError( - f"INTERNAL ERROR: Black produced different code on the second pass " - f"of the formatter. " - f"Please report a bug on https://github.com/psf/black/issues. " - f"This diff might be helpful: {log}" - ) from None - - -def dump_to_file(*output: str) -> str: - """Dump `output` to a temporary file. Return path to the file.""" - with tempfile.NamedTemporaryFile( - mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8" - ) as f: - for lines in output: - f.write(lines) - if lines and lines[-1] != "\n": - f.write("\n") - return f.name - - -@contextmanager -def nullcontext() -> Iterator[None]: - """Return an empty context manager. - - To be used like `nullcontext` in Python 3.7. - """ - yield - - -def diff(a: str, b: str, a_name: str, b_name: str) -> str: - """Return a unified diff string between strings `a` and `b`.""" - import difflib - - a_lines = [line + "\n" for line in a.split("\n")] - b_lines = [line + "\n" for line in b.split("\n")] - return "".join( - difflib.unified_diff(a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5) - ) - - -def cancel(tasks: Iterable[asyncio.Task]) -> None: - """asyncio signal handler that cancels all `tasks` and reports to stderr.""" - err("Aborted!") - for task in tasks: - task.cancel() - - -def shutdown(loop: asyncio.AbstractEventLoop) -> None: - """Cancel all pending tasks on `loop`, wait for them, and close the loop.""" - try: - if sys.version_info[:2] >= (3, 7): - all_tasks = asyncio.all_tasks - else: - all_tasks = asyncio.Task.all_tasks - # This part is borrowed from asyncio/runners.py in Python 3.7b2. - to_cancel = [task for task in all_tasks(loop) if not task.done()] - if not to_cancel: - return - - for task in to_cancel: - task.cancel() - loop.run_until_complete( - asyncio.gather(*to_cancel, loop=loop, return_exceptions=True) - ) - finally: - # `concurrent.futures.Future` objects cannot be cancelled once they - # are already running. There might be some when the `shutdown()` happened. - # Silence their logger's spew about the event loop being closed. - cf_logger = logging.getLogger("concurrent.futures") - cf_logger.setLevel(logging.CRITICAL) - loop.close() - - -def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str: - """Replace `regex` with `replacement` twice on `original`. - - This is used by string normalization to perform replaces on - overlapping matches. - """ - return regex.sub(replacement, regex.sub(replacement, original)) - - -def re_compile_maybe_verbose(regex: str) -> Pattern[str]: - """Compile a regular expression string in `regex`. - - If it contains newlines, use verbose mode. - """ - if "\n" in regex: - regex = "(?x)" + regex - compiled: Pattern[str] = re.compile(regex) - return compiled - - -def enumerate_reversed(sequence: Sequence[T]) -> Iterator[Tuple[Index, T]]: - """Like `reversed(enumerate(sequence))` if that were possible.""" - index = len(sequence) - 1 - for element in reversed(sequence): - yield (index, element) - index -= 1 - - -def enumerate_with_length( - line: Line, reversed: bool = False -) -> Iterator[Tuple[Index, Leaf, int]]: - """Return an enumeration of leaves with their length. - - Stops prematurely on multiline strings and standalone comments. - """ - op = cast( - Callable[[Sequence[Leaf]], Iterator[Tuple[Index, Leaf]]], - enumerate_reversed if reversed else enumerate, - ) - for index, leaf in op(line.leaves): - length = len(leaf.prefix) + len(leaf.value) - if "\n" in leaf.value: - return # Multiline strings, we can't continue. - - for comment in line.comments_after(leaf): - length += len(comment.value) - - yield index, leaf, length - - -def is_line_short_enough(line: Line, *, line_length: int, line_str: str = "") -> bool: - """Return True if `line` is no longer than `line_length`. - - Uses the provided `line_str` rendering, if any, otherwise computes a new one. - """ - if not line_str: - line_str = str(line).strip("\n") - return ( - len(line_str) <= line_length - and "\n" not in line_str # multiline strings - and not line.contains_standalone_comments() - ) - - -def can_be_split(line: Line) -> bool: - """Return False if the line cannot be split *for sure*. - - This is not an exhaustive search but a cheap heuristic that we can use to - avoid some unfortunate formattings (mostly around wrapping unsplittable code - in unnecessary parentheses). - """ - leaves = line.leaves - if len(leaves) < 2: - return False - - if leaves[0].type == token.STRING and leaves[1].type == token.DOT: - call_count = 0 - dot_count = 0 - next = leaves[-1] - for leaf in leaves[-2::-1]: - if leaf.type in OPENING_BRACKETS: - if next.type not in CLOSING_BRACKETS: - return False - - call_count += 1 - elif leaf.type == token.DOT: - dot_count += 1 - elif leaf.type == token.NAME: - if not (next.type == token.DOT or next.type in OPENING_BRACKETS): - return False - - elif leaf.type not in CLOSING_BRACKETS: - return False - - if dot_count > 1 and call_count > 1: - return False - - return True - - -def can_omit_invisible_parens(line: Line, line_length: int) -> bool: - """Does `line` have a shape safe to reformat without optional parens around it? - - Returns True for only a subset of potentially nice looking formattings but - the point is to not return false positives that end up producing lines that - are too long. - """ - bt = line.bracket_tracker - if not bt.delimiters: - # Without delimiters the optional parentheses are useless. - return True - - max_priority = bt.max_delimiter_priority() - if bt.delimiter_count_with_priority(max_priority) > 1: - # With more than one delimiter of a kind the optional parentheses read better. - return False - - if max_priority == DOT_PRIORITY: - # A single stranded method call doesn't require optional parentheses. - return True - - assert len(line.leaves) >= 2, "Stranded delimiter" - - first = line.leaves[0] - second = line.leaves[1] - penultimate = line.leaves[-2] - last = line.leaves[-1] - - # With a single delimiter, omit if the expression starts or ends with - # a bracket. - if first.type in OPENING_BRACKETS and second.type not in CLOSING_BRACKETS: - remainder = False - length = 4 * line.depth - for _index, leaf, leaf_length in enumerate_with_length(line): - if leaf.type in CLOSING_BRACKETS and leaf.opening_bracket is first: - remainder = True - if remainder: - length += leaf_length - if length > line_length: - break - - if leaf.type in OPENING_BRACKETS: - # There are brackets we can further split on. - remainder = False - - else: - # checked the entire string and line length wasn't exceeded - if len(line.leaves) == _index + 1: - return True - - # Note: we are not returning False here because a line might have *both* - # a leading opening bracket and a trailing closing bracket. If the - # opening bracket doesn't match our rule, maybe the closing will. - - if ( - last.type == token.RPAR - or last.type == token.RBRACE - or ( - # don't use indexing for omitting optional parentheses; - # it looks weird - last.type == token.RSQB - and last.parent - and last.parent.type != syms.trailer - ) - ): - if penultimate.type in OPENING_BRACKETS: - # Empty brackets don't help. - return False - - if is_multiline_string(first): - # Additional wrapping of a multiline string in this situation is - # unnecessary. - return True - - length = 4 * line.depth - seen_other_brackets = False - for _index, leaf, leaf_length in enumerate_with_length(line): - length += leaf_length - if leaf is last.opening_bracket: - if seen_other_brackets or length <= line_length: - return True - - elif leaf.type in OPENING_BRACKETS: - # There are brackets we can further split on. - seen_other_brackets = True - - return False - - -def get_cache_file(mode: FileMode) -> Path: - return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle" - - -def read_cache(mode: FileMode) -> Cache: - """Read the cache if it exists and is well formed. - - If it is not well formed, the call to write_cache later should resolve the issue. - """ - cache_file = get_cache_file(mode) - if not cache_file.exists(): - return {} - - with cache_file.open("rb") as fobj: - try: - cache: Cache = pickle.load(fobj) - except (pickle.UnpicklingError, ValueError): - return {} - - return cache - - -def get_cache_info(path: Path) -> CacheInfo: - """Return the information used to check if a file is already formatted or not.""" - stat = path.stat() - return stat.st_mtime, stat.st_size - - -def filter_cached(cache: Cache, sources: Iterable[Path]) -> Tuple[Set[Path], Set[Path]]: - """Split an iterable of paths in `sources` into two sets. - - The first contains paths of files that modified on disk or are not in the - cache. The other contains paths to non-modified files. - """ - todo, done = set(), set() - for src in sources: - src = src.resolve() - if cache.get(src) != get_cache_info(src): - todo.add(src) - else: - done.add(src) - return todo, done - - -def write_cache(cache: Cache, sources: Iterable[Path], mode: FileMode) -> None: - """Update the cache file.""" - cache_file = get_cache_file(mode) - try: - CACHE_DIR.mkdir(parents=True, exist_ok=True) - new_cache = {**cache, **{src.resolve(): get_cache_info(src) for src in sources}} - with tempfile.NamedTemporaryFile(dir=str(cache_file.parent), delete=False) as f: - pickle.dump(new_cache, f, protocol=4) - os.replace(f.name, cache_file) - except OSError: - pass - - -def patch_click() -> None: - """Make Click not crash. - - On certain misconfigured environments, Python 3 selects the ASCII encoding as the - default which restricts paths that it can access during the lifetime of the - application. Click refuses to work in this scenario by raising a RuntimeError. - - In case of Black the likelihood that non-ASCII characters are going to be used in - file paths is minimal since it's Python source code. Moreover, this crash was - spurious on Python 3.7 thanks to PEP 538 and PEP 540. - """ - try: - from click import core - from click import _unicodefun # type: ignore - except ModuleNotFoundError: - return - - for module in (core, _unicodefun): - if hasattr(module, "_verify_python3_env"): - module._verify_python3_env = lambda: None - - -def patched_main() -> None: - freeze_support() - patch_click() - main() - - -if __name__ == "__main__": - patched_main() diff --git a/venv/lib/python3.8/site-packages/blackd.py b/venv/lib/python3.8/site-packages/blackd.py deleted file mode 100644 index d79bfe75..00000000 --- a/venv/lib/python3.8/site-packages/blackd.py +++ /dev/null @@ -1,194 +0,0 @@ -import asyncio -from concurrent.futures import Executor, ProcessPoolExecutor -from datetime import datetime -from functools import partial -import logging -from multiprocessing import freeze_support -from typing import Set, Tuple - -from aiohttp import web -import aiohttp_cors -import black -import click - -from _black_version import version as __version__ - -# This is used internally by tests to shut down the server prematurely -_stop_signal = asyncio.Event() - -# Request headers -PROTOCOL_VERSION_HEADER = "X-Protocol-Version" -LINE_LENGTH_HEADER = "X-Line-Length" -PYTHON_VARIANT_HEADER = "X-Python-Variant" -SKIP_STRING_NORMALIZATION_HEADER = "X-Skip-String-Normalization" -FAST_OR_SAFE_HEADER = "X-Fast-Or-Safe" -DIFF_HEADER = "X-Diff" - -BLACK_HEADERS = [ - PROTOCOL_VERSION_HEADER, - LINE_LENGTH_HEADER, - PYTHON_VARIANT_HEADER, - SKIP_STRING_NORMALIZATION_HEADER, - FAST_OR_SAFE_HEADER, - DIFF_HEADER, -] - -# Response headers -BLACK_VERSION_HEADER = "X-Black-Version" - - -class InvalidVariantHeader(Exception): - pass - - -@click.command(context_settings={"help_option_names": ["-h", "--help"]}) -@click.option( - "--bind-host", type=str, help="Address to bind the server to.", default="localhost" -) -@click.option("--bind-port", type=int, help="Port to listen on", default=45484) -@click.version_option(version=black.__version__) -def main(bind_host: str, bind_port: int) -> None: - logging.basicConfig(level=logging.INFO) - app = make_app() - ver = black.__version__ - black.out(f"blackd version {ver} listening on {bind_host} port {bind_port}") - web.run_app(app, host=bind_host, port=bind_port, handle_signals=True, print=None) - - -def make_app() -> web.Application: - app = web.Application() - executor = ProcessPoolExecutor() - - cors = aiohttp_cors.setup(app) - resource = cors.add(app.router.add_resource("/")) - cors.add( - resource.add_route("POST", partial(handle, executor=executor)), - { - "*": aiohttp_cors.ResourceOptions( - allow_headers=(*BLACK_HEADERS, "Content-Type"), expose_headers="*" - ) - }, - ) - - return app - - -async def handle(request: web.Request, executor: Executor) -> web.Response: - headers = {BLACK_VERSION_HEADER: __version__} - try: - if request.headers.get(PROTOCOL_VERSION_HEADER, "1") != "1": - return web.Response( - status=501, text="This server only supports protocol version 1" - ) - try: - line_length = int( - request.headers.get(LINE_LENGTH_HEADER, black.DEFAULT_LINE_LENGTH) - ) - except ValueError: - return web.Response(status=400, text="Invalid line length header value") - - if PYTHON_VARIANT_HEADER in request.headers: - value = request.headers[PYTHON_VARIANT_HEADER] - try: - pyi, versions = parse_python_variant_header(value) - except InvalidVariantHeader as e: - return web.Response( - status=400, - text=f"Invalid value for {PYTHON_VARIANT_HEADER}: {e.args[0]}", - ) - else: - pyi = False - versions = set() - - skip_string_normalization = bool( - request.headers.get(SKIP_STRING_NORMALIZATION_HEADER, False) - ) - fast = False - if request.headers.get(FAST_OR_SAFE_HEADER, "safe") == "fast": - fast = True - mode = black.FileMode( - target_versions=versions, - is_pyi=pyi, - line_length=line_length, - string_normalization=not skip_string_normalization, - ) - req_bytes = await request.content.read() - charset = request.charset if request.charset is not None else "utf8" - req_str = req_bytes.decode(charset) - then = datetime.utcnow() - - loop = asyncio.get_event_loop() - formatted_str = await loop.run_in_executor( - executor, partial(black.format_file_contents, req_str, fast=fast, mode=mode) - ) - - # Only output the diff in the HTTP response - only_diff = bool(request.headers.get(DIFF_HEADER, False)) - if only_diff: - now = datetime.utcnow() - src_name = f"In\t{then} +0000" - dst_name = f"Out\t{now} +0000" - loop = asyncio.get_event_loop() - formatted_str = await loop.run_in_executor( - executor, - partial(black.diff, req_str, formatted_str, src_name, dst_name), - ) - - return web.Response( - content_type=request.content_type, - charset=charset, - headers=headers, - text=formatted_str, - ) - except black.NothingChanged: - return web.Response(status=204, headers=headers) - except black.InvalidInput as e: - return web.Response(status=400, headers=headers, text=str(e)) - except Exception as e: - logging.exception("Exception during handling a request") - return web.Response(status=500, headers=headers, text=str(e)) - - -def parse_python_variant_header(value: str) -> Tuple[bool, Set[black.TargetVersion]]: - if value == "pyi": - return True, set() - else: - versions = set() - for version in value.split(","): - if version.startswith("py"): - version = version[len("py") :] - if "." in version: - major_str, *rest = version.split(".") - else: - major_str = version[0] - rest = [version[1:]] if len(version) > 1 else [] - try: - major = int(major_str) - if major not in (2, 3): - raise InvalidVariantHeader("major version must be 2 or 3") - if len(rest) > 0: - minor = int(rest[0]) - if major == 2 and minor != 7: - raise InvalidVariantHeader( - "minor version must be 7 for Python 2" - ) - else: - # Default to lowest supported minor version. - minor = 7 if major == 2 else 3 - version_str = f"PY{major}{minor}" - if major == 3 and not hasattr(black.TargetVersion, version_str): - raise InvalidVariantHeader(f"3.{minor} is not supported") - versions.add(black.TargetVersion[version_str]) - except (KeyError, ValueError): - raise InvalidVariantHeader("expected e.g. '3.7', 'py3.5'") - return False, versions - - -def patched_main() -> None: - freeze_support() - black.patch_click() - main() - - -if __name__ == "__main__": - patched_main() diff --git a/venv/lib/python3.8/site-packages/blib2to3/Grammar.txt b/venv/lib/python3.8/site-packages/blib2to3/Grammar.txt deleted file mode 100644 index d90710f6..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/Grammar.txt +++ /dev/null @@ -1,215 +0,0 @@ -# Grammar for 2to3. This grammar supports Python 2.x and 3.x. - -# NOTE WELL: You should also follow all the steps listed at -# https://devguide.python.org/grammar/ - -# Start symbols for the grammar: -# file_input is a module or sequence of commands read from an input file; -# single_input is a single interactive statement; -# eval_input is the input for the eval() and input() functions. -# NB: compound_stmt in single_input is followed by extra NEWLINE! -file_input: (NEWLINE | stmt)* ENDMARKER -single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE -eval_input: testlist NEWLINE* ENDMARKER - -decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE -decorators: decorator+ -decorated: decorators (classdef | funcdef | async_funcdef) -async_funcdef: ASYNC funcdef -funcdef: 'def' NAME parameters ['->' test] ':' suite -parameters: '(' [typedargslist] ')' - -# The following definition for typedarglist is equivalent to this set of rules: -# -# arguments = argument (',' argument)* -# argument = tfpdef ['=' test] -# kwargs = '**' tname [','] -# args = '*' [tname] -# kwonly_kwargs = (',' argument)* [',' [kwargs]] -# args_kwonly_kwargs = args kwonly_kwargs | kwargs -# poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] -# typedargslist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs -# typedarglist = arguments ',' '/' [',' [typedargslist_no_posonly]])|(typedargslist_no_posonly)" -# -# It needs to be fully expanded to allow our LL(1) parser to work on it. - -typedargslist: tfpdef ['=' test] (',' tfpdef ['=' test])* ',' '/' [ - ',' [((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* - [',' ['**' tname [',']]] | '**' tname [',']) - | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])] - ] | ((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* - [',' ['**' tname [',']]] | '**' tname [',']) - | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) - -tname: NAME [':' test] -tfpdef: tname | '(' tfplist ')' -tfplist: tfpdef (',' tfpdef)* [','] - -# The following definition for varargslist is equivalent to this set of rules: -# -# arguments = argument (',' argument )* -# argument = vfpdef ['=' test] -# kwargs = '**' vname [','] -# args = '*' [vname] -# kwonly_kwargs = (',' argument )* [',' [kwargs]] -# args_kwonly_kwargs = args kwonly_kwargs | kwargs -# poskeyword_args_kwonly_kwargs = arguments [',' [args_kwonly_kwargs]] -# vararglist_no_posonly = poskeyword_args_kwonly_kwargs | args_kwonly_kwargs -# varargslist = arguments ',' '/' [','[(vararglist_no_posonly)]] | (vararglist_no_posonly) -# -# It needs to be fully expanded to allow our LL(1) parser to work on it. - -varargslist: vfpdef ['=' test ](',' vfpdef ['=' test])* ',' '/' [',' [ - ((vfpdef ['=' test] ',')* ('*' [vname] (',' vname ['=' test])* - [',' ['**' vname [',']]] | '**' vname [',']) - | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) - ]] | ((vfpdef ['=' test] ',')* - ('*' [vname] (',' vname ['=' test])* [',' ['**' vname [',']]]| '**' vname [',']) - | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) - -vname: NAME -vfpdef: vname | '(' vfplist ')' -vfplist: vfpdef (',' vfpdef)* [','] - -stmt: simple_stmt | compound_stmt -simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE -small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | - import_stmt | global_stmt | exec_stmt | assert_stmt) -expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) | - ('=' (yield_expr|testlist_star_expr))*) -annassign: ':' test ['=' test] -testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] -augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | - '<<=' | '>>=' | '**=' | '//=') -# For normal and annotated assignments, additional restrictions enforced by the interpreter -print_stmt: 'print' ( [ test (',' test)* [','] ] | - '>>' test [ (',' test)+ [','] ] ) -del_stmt: 'del' exprlist -pass_stmt: 'pass' -flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt -break_stmt: 'break' -continue_stmt: 'continue' -return_stmt: 'return' [testlist] -yield_stmt: yield_expr -raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] -import_stmt: import_name | import_from -import_name: 'import' dotted_as_names -import_from: ('from' ('.'* dotted_name | '.'+) - 'import' ('*' | '(' import_as_names ')' | import_as_names)) -import_as_name: NAME ['as' NAME] -dotted_as_name: dotted_name ['as' NAME] -import_as_names: import_as_name (',' import_as_name)* [','] -dotted_as_names: dotted_as_name (',' dotted_as_name)* -dotted_name: NAME ('.' NAME)* -global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* -exec_stmt: 'exec' expr ['in' test [',' test]] -assert_stmt: 'assert' test [',' test] - -compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt -async_stmt: ASYNC (funcdef | with_stmt | for_stmt) -if_stmt: 'if' namedexpr_test ':' suite ('elif' namedexpr_test ':' suite)* ['else' ':' suite] -while_stmt: 'while' namedexpr_test ':' suite ['else' ':' suite] -for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] -try_stmt: ('try' ':' suite - ((except_clause ':' suite)+ - ['else' ':' suite] - ['finally' ':' suite] | - 'finally' ':' suite)) -with_stmt: 'with' with_item (',' with_item)* ':' suite -with_item: test ['as' expr] -with_var: 'as' expr -# NB compile.c makes sure that the default except clause is last -except_clause: 'except' [test [(',' | 'as') test]] -suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT - -# Backward compatibility cruft to support: -# [ x for x in lambda: True, lambda: False if x() ] -# even while also allowing: -# lambda x: 5 if x else 2 -# (But not a mix of the two) -testlist_safe: old_test [(',' old_test)+ [',']] -old_test: or_test | old_lambdef -old_lambdef: 'lambda' [varargslist] ':' old_test - -namedexpr_test: test [':=' test] -test: or_test ['if' or_test 'else' test] | lambdef -or_test: and_test ('or' and_test)* -and_test: not_test ('and' not_test)* -not_test: 'not' not_test | comparison -comparison: expr (comp_op expr)* -comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' -star_expr: '*' expr -expr: xor_expr ('|' xor_expr)* -xor_expr: and_expr ('^' and_expr)* -and_expr: shift_expr ('&' shift_expr)* -shift_expr: arith_expr (('<<'|'>>') arith_expr)* -arith_expr: term (('+'|'-') term)* -term: factor (('*'|'@'|'/'|'%'|'//') factor)* -factor: ('+'|'-'|'~') factor | power -power: [AWAIT] atom trailer* ['**' factor] -atom: ('(' [yield_expr|testlist_gexp] ')' | - '[' [listmaker] ']' | - '{' [dictsetmaker] '}' | - '`' testlist1 '`' | - NAME | NUMBER | STRING+ | '.' '.' '.') -listmaker: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] ) -testlist_gexp: (namedexpr_test|star_expr) ( old_comp_for | (',' (namedexpr_test|star_expr))* [','] ) -lambdef: 'lambda' [varargslist] ':' test -trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME -subscriptlist: subscript (',' subscript)* [','] -subscript: test | [test] ':' [test] [sliceop] -sliceop: ':' [test] -exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] -testlist: test (',' test)* [','] -dictsetmaker: ( ((test ':' test | '**' expr) - (comp_for | (',' (test ':' test | '**' expr))* [','])) | - ((test | star_expr) - (comp_for | (',' (test | star_expr))* [','])) ) - -classdef: 'class' NAME ['(' [arglist] ')'] ':' suite - -arglist: argument (',' argument)* [','] - -# "test '=' test" is really "keyword '=' test", but we have no such token. -# These need to be in a single rule to avoid grammar that is ambiguous -# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, -# we explicitly match '*' here, too, to give it proper precedence. -# Illegal combinations and orderings are blocked in ast.c: -# multiple (test comp_for) arguments are blocked; keyword unpackings -# that precede iterable unpackings are blocked; etc. -argument: ( test [comp_for] | - test ':=' test | - test '=' test | - '**' test | - '*' test ) - -comp_iter: comp_for | comp_if -comp_for: [ASYNC] 'for' exprlist 'in' or_test [comp_iter] -comp_if: 'if' old_test [comp_iter] - -# As noted above, testlist_safe extends the syntax allowed in list -# comprehensions and generators. We can't use it indiscriminately in all -# derivations using a comp_for-like pattern because the testlist_safe derivation -# contains comma which clashes with trailing comma in arglist. -# -# This was an issue because the parser would not follow the correct derivation -# when parsing syntactically valid Python code. Since testlist_safe was created -# specifically to handle list comprehensions and generator expressions enclosed -# with parentheses, it's safe to only use it in those. That avoids the issue; we -# can parse code like set(x for x in [],). -# -# The syntax supported by this set of rules is not a valid Python 3 syntax, -# hence the prefix "old". -# -# See https://bugs.python.org/issue27494 -old_comp_iter: old_comp_for | old_comp_if -old_comp_for: [ASYNC] 'for' exprlist 'in' testlist_safe [old_comp_iter] -old_comp_if: 'if' old_test [old_comp_iter] - -testlist1: test (',' test)* - -# not used in grammar, but may appear in "node" passed from Parser to Compiler -encoding_decl: NAME - -yield_expr: 'yield' [yield_arg] -yield_arg: 'from' test | testlist diff --git a/venv/lib/python3.8/site-packages/blib2to3/PatternGrammar.txt b/venv/lib/python3.8/site-packages/blib2to3/PatternGrammar.txt deleted file mode 100644 index 36bf8148..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/PatternGrammar.txt +++ /dev/null @@ -1,28 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -# A grammar to describe tree matching patterns. -# Not shown here: -# - 'TOKEN' stands for any token (leaf node) -# - 'any' stands for any node (leaf or interior) -# With 'any' we can still specify the sub-structure. - -# The start symbol is 'Matcher'. - -Matcher: Alternatives ENDMARKER - -Alternatives: Alternative ('|' Alternative)* - -Alternative: (Unit | NegatedUnit)+ - -Unit: [NAME '='] ( STRING [Repeater] - | NAME [Details] [Repeater] - | '(' Alternatives ')' [Repeater] - | '[' Alternatives ']' - ) - -NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')') - -Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}' - -Details: '<' Alternatives '>' diff --git a/venv/lib/python3.8/site-packages/blib2to3/__init__.py b/venv/lib/python3.8/site-packages/blib2to3/__init__.py deleted file mode 100644 index 1bb8bf6d..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# empty diff --git a/venv/lib/python3.8/site-packages/blib2to3/pgen2/__init__.py b/venv/lib/python3.8/site-packages/blib2to3/pgen2/__init__.py deleted file mode 100644 index af390484..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/pgen2/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""The pgen2 package.""" diff --git a/venv/lib/python3.8/site-packages/blib2to3/pgen2/conv.py b/venv/lib/python3.8/site-packages/blib2to3/pgen2/conv.py deleted file mode 100644 index 728b1526..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/pgen2/conv.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Convert graminit.[ch] spit out by pgen to Python code. - -Pgen is the Python parser generator. It is useful to quickly create a -parser from a grammar file in Python's grammar notation. But I don't -want my parsers to be written in C (yet), so I'm translating the -parsing tables to Python data structures and writing a Python parse -engine. - -Note that the token numbers are constants determined by the standard -Python tokenizer. The standard token module defines these numbers and -their names (the names are not used much). The token numbers are -hardcoded into the Python tokenizer and into pgen. A Python -implementation of the Python tokenizer is also available, in the -standard tokenize module. - -On the other hand, symbol numbers (representing the grammar's -non-terminals) are assigned by pgen based on the actual grammar -input. - -Note: this module is pretty much obsolete; the pgen module generates -equivalent grammar tables directly from the Grammar.txt input file -without having to invoke the Python pgen C program. - -""" - -# Python imports -import regex as re - -# Local imports -from pgen2 import grammar, token - - -class Converter(grammar.Grammar): - """Grammar subclass that reads classic pgen output files. - - The run() method reads the tables as produced by the pgen parser - generator, typically contained in two C files, graminit.h and - graminit.c. The other methods are for internal use only. - - See the base class for more documentation. - - """ - - def run(self, graminit_h, graminit_c): - """Load the grammar tables from the text files written by pgen.""" - self.parse_graminit_h(graminit_h) - self.parse_graminit_c(graminit_c) - self.finish_off() - - def parse_graminit_h(self, filename): - """Parse the .h file written by pgen. (Internal) - - This file is a sequence of #define statements defining the - nonterminals of the grammar as numbers. We build two tables - mapping the numbers to names and back. - - """ - try: - f = open(filename) - except OSError as err: - print("Can't open %s: %s" % (filename, err)) - return False - self.symbol2number = {} - self.number2symbol = {} - lineno = 0 - for line in f: - lineno += 1 - mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line) - if not mo and line.strip(): - print("%s(%s): can't parse %s" % (filename, lineno, line.strip())) - else: - symbol, number = mo.groups() - number = int(number) - assert symbol not in self.symbol2number - assert number not in self.number2symbol - self.symbol2number[symbol] = number - self.number2symbol[number] = symbol - return True - - def parse_graminit_c(self, filename): - """Parse the .c file written by pgen. (Internal) - - The file looks as follows. The first two lines are always this: - - #include "pgenheaders.h" - #include "grammar.h" - - After that come four blocks: - - 1) one or more state definitions - 2) a table defining dfas - 3) a table defining labels - 4) a struct defining the grammar - - A state definition has the following form: - - one or more arc arrays, each of the form: - static arc arcs__[] = { - {, }, - ... - }; - - followed by a state array, of the form: - static state states_[] = { - {, arcs__}, - ... - }; - - """ - try: - f = open(filename) - except OSError as err: - print("Can't open %s: %s" % (filename, err)) - return False - # The code below essentially uses f's iterator-ness! - lineno = 0 - - # Expect the two #include lines - lineno, line = lineno + 1, next(f) - assert line == '#include "pgenheaders.h"\n', (lineno, line) - lineno, line = lineno + 1, next(f) - assert line == '#include "grammar.h"\n', (lineno, line) - - # Parse the state definitions - lineno, line = lineno + 1, next(f) - allarcs = {} - states = [] - while line.startswith("static arc "): - while line.startswith("static arc "): - mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$", line) - assert mo, (lineno, line) - n, m, k = list(map(int, mo.groups())) - arcs = [] - for _ in range(k): - lineno, line = lineno + 1, next(f) - mo = re.match(r"\s+{(\d+), (\d+)},$", line) - assert mo, (lineno, line) - i, j = list(map(int, mo.groups())) - arcs.append((i, j)) - lineno, line = lineno + 1, next(f) - assert line == "};\n", (lineno, line) - allarcs[(n, m)] = arcs - lineno, line = lineno + 1, next(f) - mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line) - assert mo, (lineno, line) - s, t = list(map(int, mo.groups())) - assert s == len(states), (lineno, line) - state = [] - for _ in range(t): - lineno, line = lineno + 1, next(f) - mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line) - assert mo, (lineno, line) - k, n, m = list(map(int, mo.groups())) - arcs = allarcs[n, m] - assert k == len(arcs), (lineno, line) - state.append(arcs) - states.append(state) - lineno, line = lineno + 1, next(f) - assert line == "};\n", (lineno, line) - lineno, line = lineno + 1, next(f) - self.states = states - - # Parse the dfas - dfas = {} - mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line) - assert mo, (lineno, line) - ndfas = int(mo.group(1)) - for i in range(ndfas): - lineno, line = lineno + 1, next(f) - mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$', line) - assert mo, (lineno, line) - symbol = mo.group(2) - number, x, y, z = list(map(int, mo.group(1, 3, 4, 5))) - assert self.symbol2number[symbol] == number, (lineno, line) - assert self.number2symbol[number] == symbol, (lineno, line) - assert x == 0, (lineno, line) - state = states[z] - assert y == len(state), (lineno, line) - lineno, line = lineno + 1, next(f) - mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line) - assert mo, (lineno, line) - first = {} - rawbitset = eval(mo.group(1)) - for i, c in enumerate(rawbitset): - byte = ord(c) - for j in range(8): - if byte & (1 << j): - first[i * 8 + j] = 1 - dfas[number] = (state, first) - lineno, line = lineno + 1, next(f) - assert line == "};\n", (lineno, line) - self.dfas = dfas - - # Parse the labels - labels = [] - lineno, line = lineno + 1, next(f) - mo = re.match(r"static label labels\[(\d+)\] = {$", line) - assert mo, (lineno, line) - nlabels = int(mo.group(1)) - for i in range(nlabels): - lineno, line = lineno + 1, next(f) - mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line) - assert mo, (lineno, line) - x, y = mo.groups() - x = int(x) - if y == "0": - y = None - else: - y = eval(y) - labels.append((x, y)) - lineno, line = lineno + 1, next(f) - assert line == "};\n", (lineno, line) - self.labels = labels - - # Parse the grammar struct - lineno, line = lineno + 1, next(f) - assert line == "grammar _PyParser_Grammar = {\n", (lineno, line) - lineno, line = lineno + 1, next(f) - mo = re.match(r"\s+(\d+),$", line) - assert mo, (lineno, line) - ndfas = int(mo.group(1)) - assert ndfas == len(self.dfas) - lineno, line = lineno + 1, next(f) - assert line == "\tdfas,\n", (lineno, line) - lineno, line = lineno + 1, next(f) - mo = re.match(r"\s+{(\d+), labels},$", line) - assert mo, (lineno, line) - nlabels = int(mo.group(1)) - assert nlabels == len(self.labels), (lineno, line) - lineno, line = lineno + 1, next(f) - mo = re.match(r"\s+(\d+)$", line) - assert mo, (lineno, line) - start = int(mo.group(1)) - assert start in self.number2symbol, (lineno, line) - self.start = start - lineno, line = lineno + 1, next(f) - assert line == "};\n", (lineno, line) - try: - lineno, line = lineno + 1, next(f) - except StopIteration: - pass - else: - assert 0, (lineno, line) - - def finish_off(self): - """Create additional useful structures. (Internal).""" - self.keywords = {} # map from keyword strings to arc labels - self.tokens = {} # map from numeric token values to arc labels - for ilabel, (type, value) in enumerate(self.labels): - if type == token.NAME and value is not None: - self.keywords[value] = ilabel - elif value is None: - self.tokens[type] = ilabel diff --git a/venv/lib/python3.8/site-packages/blib2to3/pgen2/driver.py b/venv/lib/python3.8/site-packages/blib2to3/pgen2/driver.py deleted file mode 100644 index 6ba6b929..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/pgen2/driver.py +++ /dev/null @@ -1,221 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -# Modifications: -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Parser driver. - -This provides a high-level interface to parse a file into a syntax tree. - -""" - -__author__ = "Guido van Rossum " - -__all__ = ["Driver", "load_grammar"] - -# Python imports -import codecs -import io -import os -import logging -import pkgutil -import sys - -# Pgen imports -from . import grammar, parse, token, tokenize, pgen - - -class Driver(object): - def __init__(self, grammar, convert=None, logger=None): - self.grammar = grammar - if logger is None: - logger = logging.getLogger(__name__) - self.logger = logger - self.convert = convert - - def parse_tokens(self, tokens, debug=False): - """Parse a series of tokens and return the syntax tree.""" - # XXX Move the prefix computation into a wrapper around tokenize. - p = parse.Parser(self.grammar, self.convert) - p.setup() - lineno = 1 - column = 0 - indent_columns = [] - type = value = start = end = line_text = None - prefix = "" - for quintuple in tokens: - type, value, start, end, line_text = quintuple - if start != (lineno, column): - assert (lineno, column) <= start, ((lineno, column), start) - s_lineno, s_column = start - if lineno < s_lineno: - prefix += "\n" * (s_lineno - lineno) - lineno = s_lineno - column = 0 - if column < s_column: - prefix += line_text[column:s_column] - column = s_column - if type in (tokenize.COMMENT, tokenize.NL): - prefix += value - lineno, column = end - if value.endswith("\n"): - lineno += 1 - column = 0 - continue - if type == token.OP: - type = grammar.opmap[value] - if debug: - self.logger.debug( - "%s %r (prefix=%r)", token.tok_name[type], value, prefix - ) - if type == token.INDENT: - indent_columns.append(len(value)) - _prefix = prefix + value - prefix = "" - value = "" - elif type == token.DEDENT: - _indent_col = indent_columns.pop() - prefix, _prefix = self._partially_consume_prefix(prefix, _indent_col) - if p.addtoken(type, value, (prefix, start)): - if debug: - self.logger.debug("Stop.") - break - prefix = "" - if type in {token.INDENT, token.DEDENT}: - prefix = _prefix - lineno, column = end - if value.endswith("\n"): - lineno += 1 - column = 0 - else: - # We never broke out -- EOF is too soon (how can this happen???) - raise parse.ParseError("incomplete input", type, value, (prefix, start)) - return p.rootnode - - def parse_stream_raw(self, stream, debug=False): - """Parse a stream and return the syntax tree.""" - tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar) - return self.parse_tokens(tokens, debug) - - def parse_stream(self, stream, debug=False): - """Parse a stream and return the syntax tree.""" - return self.parse_stream_raw(stream, debug) - - def parse_file(self, filename, encoding=None, debug=False): - """Parse a file and return the syntax tree.""" - with io.open(filename, "r", encoding=encoding) as stream: - return self.parse_stream(stream, debug) - - def parse_string(self, text, debug=False): - """Parse a string and return the syntax tree.""" - tokens = tokenize.generate_tokens( - io.StringIO(text).readline, grammar=self.grammar - ) - return self.parse_tokens(tokens, debug) - - def _partially_consume_prefix(self, prefix, column): - lines = [] - current_line = "" - current_column = 0 - wait_for_nl = False - for char in prefix: - current_line += char - if wait_for_nl: - if char == "\n": - if current_line.strip() and current_column < column: - res = "".join(lines) - return res, prefix[len(res) :] - - lines.append(current_line) - current_line = "" - current_column = 0 - wait_for_nl = False - elif char in " \t": - current_column += 1 - elif char == "\n": - # unexpected empty line - current_column = 0 - else: - # indent is finished - wait_for_nl = True - return "".join(lines), current_line - - -def _generate_pickle_name(gt, cache_dir=None): - head, tail = os.path.splitext(gt) - if tail == ".txt": - tail = "" - name = head + tail + ".".join(map(str, sys.version_info)) + ".pickle" - if cache_dir: - return os.path.join(cache_dir, os.path.basename(name)) - else: - return name - - -def load_grammar(gt="Grammar.txt", gp=None, save=True, force=False, logger=None): - """Load the grammar (maybe from a pickle).""" - if logger is None: - logger = logging.getLogger(__name__) - gp = _generate_pickle_name(gt) if gp is None else gp - if force or not _newer(gp, gt): - logger.info("Generating grammar tables from %s", gt) - g = pgen.generate_grammar(gt) - if save: - logger.info("Writing grammar tables to %s", gp) - try: - g.dump(gp) - except OSError as e: - logger.info("Writing failed: %s", e) - else: - g = grammar.Grammar() - g.load(gp) - return g - - -def _newer(a, b): - """Inquire whether file a was written since file b.""" - if not os.path.exists(a): - return False - if not os.path.exists(b): - return True - return os.path.getmtime(a) >= os.path.getmtime(b) - - -def load_packaged_grammar(package, grammar_source, cache_dir=None): - """Normally, loads a pickled grammar by doing - pkgutil.get_data(package, pickled_grammar) - where *pickled_grammar* is computed from *grammar_source* by adding the - Python version and using a ``.pickle`` extension. - - However, if *grammar_source* is an extant file, load_grammar(grammar_source) - is called instead. This facilitates using a packaged grammar file when needed - but preserves load_grammar's automatic regeneration behavior when possible. - - """ - if os.path.isfile(grammar_source): - gp = _generate_pickle_name(grammar_source, cache_dir) if cache_dir else None - return load_grammar(grammar_source, gp=gp) - pickled_name = _generate_pickle_name(os.path.basename(grammar_source), cache_dir) - data = pkgutil.get_data(package, pickled_name) - g = grammar.Grammar() - g.loads(data) - return g - - -def main(*args): - """Main program, when run as a script: produce grammar pickle files. - - Calls load_grammar for each argument, a path to a grammar text file. - """ - if not args: - args = sys.argv[1:] - logging.basicConfig(level=logging.INFO, stream=sys.stdout, format="%(message)s") - for gt in args: - load_grammar(gt, save=True, force=True) - return True - - -if __name__ == "__main__": - sys.exit(int(not main())) diff --git a/venv/lib/python3.8/site-packages/blib2to3/pgen2/grammar.py b/venv/lib/python3.8/site-packages/blib2to3/pgen2/grammar.py deleted file mode 100644 index d6f0fc2c..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/pgen2/grammar.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""This module defines the data structures used to represent a grammar. - -These are a bit arcane because they are derived from the data -structures used by Python's 'pgen' parser generator. - -There's also a table here mapping operators to their names in the -token module; the Python tokenize module reports all operators as the -fallback token code OP, but the parser needs the actual token code. - -""" - -# Python imports -import os -import pickle -import tempfile - -# Local imports -from . import token - - -class Grammar(object): - """Pgen parsing tables conversion class. - - Once initialized, this class supplies the grammar tables for the - parsing engine implemented by parse.py. The parsing engine - accesses the instance variables directly. The class here does not - provide initialization of the tables; several subclasses exist to - do this (see the conv and pgen modules). - - The load() method reads the tables from a pickle file, which is - much faster than the other ways offered by subclasses. The pickle - file is written by calling dump() (after loading the grammar - tables using a subclass). The report() method prints a readable - representation of the tables to stdout, for debugging. - - The instance variables are as follows: - - symbol2number -- a dict mapping symbol names to numbers. Symbol - numbers are always 256 or higher, to distinguish - them from token numbers, which are between 0 and - 255 (inclusive). - - number2symbol -- a dict mapping numbers to symbol names; - these two are each other's inverse. - - states -- a list of DFAs, where each DFA is a list of - states, each state is a list of arcs, and each - arc is a (i, j) pair where i is a label and j is - a state number. The DFA number is the index into - this list. (This name is slightly confusing.) - Final states are represented by a special arc of - the form (0, j) where j is its own state number. - - dfas -- a dict mapping symbol numbers to (DFA, first) - pairs, where DFA is an item from the states list - above, and first is a set of tokens that can - begin this grammar rule (represented by a dict - whose values are always 1). - - labels -- a list of (x, y) pairs where x is either a token - number or a symbol number, and y is either None - or a string; the strings are keywords. The label - number is the index in this list; label numbers - are used to mark state transitions (arcs) in the - DFAs. - - start -- the number of the grammar's start symbol. - - keywords -- a dict mapping keyword strings to arc labels. - - tokens -- a dict mapping token numbers to arc labels. - - """ - - def __init__(self): - self.symbol2number = {} - self.number2symbol = {} - self.states = [] - self.dfas = {} - self.labels = [(0, "EMPTY")] - self.keywords = {} - self.tokens = {} - self.symbol2label = {} - self.start = 256 - # Python 3.7+ parses async as a keyword, not an identifier - self.async_keywords = False - - def dump(self, filename): - """Dump the grammar tables to a pickle file.""" - with tempfile.NamedTemporaryFile( - dir=os.path.dirname(filename), delete=False - ) as f: - pickle.dump(self.__dict__, f, pickle.HIGHEST_PROTOCOL) - os.replace(f.name, filename) - - def load(self, filename): - """Load the grammar tables from a pickle file.""" - with open(filename, "rb") as f: - d = pickle.load(f) - self.__dict__.update(d) - - def loads(self, pkl): - """Load the grammar tables from a pickle bytes object.""" - self.__dict__.update(pickle.loads(pkl)) - - def copy(self): - """ - Copy the grammar. - """ - new = self.__class__() - for dict_attr in ( - "symbol2number", - "number2symbol", - "dfas", - "keywords", - "tokens", - "symbol2label", - ): - setattr(new, dict_attr, getattr(self, dict_attr).copy()) - new.labels = self.labels[:] - new.states = self.states[:] - new.start = self.start - new.async_keywords = self.async_keywords - return new - - def report(self): - """Dump the grammar tables to standard output, for debugging.""" - from pprint import pprint - - print("s2n") - pprint(self.symbol2number) - print("n2s") - pprint(self.number2symbol) - print("states") - pprint(self.states) - print("dfas") - pprint(self.dfas) - print("labels") - pprint(self.labels) - print("start", self.start) - - -# Map from operator to number (since tokenize doesn't do this) - -opmap_raw = """ -( LPAR -) RPAR -[ LSQB -] RSQB -: COLON -, COMMA -; SEMI -+ PLUS -- MINUS -* STAR -/ SLASH -| VBAR -& AMPER -< LESS -> GREATER -= EQUAL -. DOT -% PERCENT -` BACKQUOTE -{ LBRACE -} RBRACE -@ AT -@= ATEQUAL -== EQEQUAL -!= NOTEQUAL -<> NOTEQUAL -<= LESSEQUAL ->= GREATEREQUAL -~ TILDE -^ CIRCUMFLEX -<< LEFTSHIFT ->> RIGHTSHIFT -** DOUBLESTAR -+= PLUSEQUAL --= MINEQUAL -*= STAREQUAL -/= SLASHEQUAL -%= PERCENTEQUAL -&= AMPEREQUAL -|= VBAREQUAL -^= CIRCUMFLEXEQUAL -<<= LEFTSHIFTEQUAL ->>= RIGHTSHIFTEQUAL -**= DOUBLESTAREQUAL -// DOUBLESLASH -//= DOUBLESLASHEQUAL --> RARROW -:= COLONEQUAL -""" - -opmap = {} -for line in opmap_raw.splitlines(): - if line: - op, name = line.split() - opmap[op] = getattr(token, name) diff --git a/venv/lib/python3.8/site-packages/blib2to3/pgen2/literals.py b/venv/lib/python3.8/site-packages/blib2to3/pgen2/literals.py deleted file mode 100644 index 93bee528..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/pgen2/literals.py +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Safely evaluate Python string literals without using eval().""" - -import regex as re - -simple_escapes = { - "a": "\a", - "b": "\b", - "f": "\f", - "n": "\n", - "r": "\r", - "t": "\t", - "v": "\v", - "'": "'", - '"': '"', - "\\": "\\", -} - - -def escape(m): - all, tail = m.group(0, 1) - assert all.startswith("\\") - esc = simple_escapes.get(tail) - if esc is not None: - return esc - if tail.startswith("x"): - hexes = tail[1:] - if len(hexes) < 2: - raise ValueError("invalid hex string escape ('\\%s')" % tail) - try: - i = int(hexes, 16) - except ValueError: - raise ValueError("invalid hex string escape ('\\%s')" % tail) from None - else: - try: - i = int(tail, 8) - except ValueError: - raise ValueError("invalid octal string escape ('\\%s')" % tail) from None - return chr(i) - - -def evalString(s): - assert s.startswith("'") or s.startswith('"'), repr(s[:1]) - q = s[0] - if s[:3] == q * 3: - q = q * 3 - assert s.endswith(q), repr(s[-len(q) :]) - assert len(s) >= 2 * len(q) - s = s[len(q) : -len(q)] - return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s) - - -def test(): - for i in range(256): - c = chr(i) - s = repr(c) - e = evalString(s) - if e != c: - print(i, c, s, e) - - -if __name__ == "__main__": - test() diff --git a/venv/lib/python3.8/site-packages/blib2to3/pgen2/parse.py b/venv/lib/python3.8/site-packages/blib2to3/pgen2/parse.py deleted file mode 100644 index 22f14c89..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/pgen2/parse.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Parser engine for the grammar tables generated by pgen. - -The grammar table must be loaded first. - -See Parser/parser.c in the Python distribution for additional info on -how this parsing engine works. - -""" - -# Local imports -from . import token - - -class ParseError(Exception): - """Exception to signal the parser is stuck.""" - - def __init__(self, msg, type, value, context): - Exception.__init__( - self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context) - ) - self.msg = msg - self.type = type - self.value = value - self.context = context - - -class Parser(object): - """Parser engine. - - The proper usage sequence is: - - p = Parser(grammar, [converter]) # create instance - p.setup([start]) # prepare for parsing - : - if p.addtoken(...): # parse a token; may raise ParseError - break - root = p.rootnode # root of abstract syntax tree - - A Parser instance may be reused by calling setup() repeatedly. - - A Parser instance contains state pertaining to the current token - sequence, and should not be used concurrently by different threads - to parse separate token sequences. - - See driver.py for how to get input tokens by tokenizing a file or - string. - - Parsing is complete when addtoken() returns True; the root of the - abstract syntax tree can then be retrieved from the rootnode - instance variable. When a syntax error occurs, addtoken() raises - the ParseError exception. There is no error recovery; the parser - cannot be used after a syntax error was reported (but it can be - reinitialized by calling setup()). - - """ - - def __init__(self, grammar, convert=None): - """Constructor. - - The grammar argument is a grammar.Grammar instance; see the - grammar module for more information. - - The parser is not ready yet for parsing; you must call the - setup() method to get it started. - - The optional convert argument is a function mapping concrete - syntax tree nodes to abstract syntax tree nodes. If not - given, no conversion is done and the syntax tree produced is - the concrete syntax tree. If given, it must be a function of - two arguments, the first being the grammar (a grammar.Grammar - instance), and the second being the concrete syntax tree node - to be converted. The syntax tree is converted from the bottom - up. - - A concrete syntax tree node is a (type, value, context, nodes) - tuple, where type is the node type (a token or symbol number), - value is None for symbols and a string for tokens, context is - None or an opaque value used for error reporting (typically a - (lineno, offset) pair), and nodes is a list of children for - symbols, and None for tokens. - - An abstract syntax tree node may be anything; this is entirely - up to the converter function. - - """ - self.grammar = grammar - self.convert = convert or (lambda grammar, node: node) - - def setup(self, start=None): - """Prepare for parsing. - - This *must* be called before starting to parse. - - The optional argument is an alternative start symbol; it - defaults to the grammar's start symbol. - - You can use a Parser instance to parse any number of programs; - each time you call setup() the parser is reset to an initial - state determined by the (implicit or explicit) start symbol. - - """ - if start is None: - start = self.grammar.start - # Each stack entry is a tuple: (dfa, state, node). - # A node is a tuple: (type, value, context, children), - # where children is a list of nodes or None, and context may be None. - newnode = (start, None, None, []) - stackentry = (self.grammar.dfas[start], 0, newnode) - self.stack = [stackentry] - self.rootnode = None - self.used_names = set() # Aliased to self.rootnode.used_names in pop() - - def addtoken(self, type, value, context): - """Add a token; return True iff this is the end of the program.""" - # Map from token to label - ilabel = self.classify(type, value, context) - # Loop until the token is shifted; may raise exceptions - while True: - dfa, state, node = self.stack[-1] - states, first = dfa - arcs = states[state] - # Look for a state with this label - for i, newstate in arcs: - t, v = self.grammar.labels[i] - if ilabel == i: - # Look it up in the list of labels - assert t < 256 - # Shift a token; we're done with it - self.shift(type, value, newstate, context) - # Pop while we are in an accept-only state - state = newstate - while states[state] == [(0, state)]: - self.pop() - if not self.stack: - # Done parsing! - return True - dfa, state, node = self.stack[-1] - states, first = dfa - # Done with this token - return False - elif t >= 256: - # See if it's a symbol and if we're in its first set - itsdfa = self.grammar.dfas[t] - itsstates, itsfirst = itsdfa - if ilabel in itsfirst: - # Push a symbol - self.push(t, self.grammar.dfas[t], newstate, context) - break # To continue the outer while loop - else: - if (0, state) in arcs: - # An accepting state, pop it and try something else - self.pop() - if not self.stack: - # Done parsing, but another token is input - raise ParseError("too much input", type, value, context) - else: - # No success finding a transition - raise ParseError("bad input", type, value, context) - - def classify(self, type, value, context): - """Turn a token into a label. (Internal)""" - if type == token.NAME: - # Keep a listing of all used names - self.used_names.add(value) - # Check for reserved words - ilabel = self.grammar.keywords.get(value) - if ilabel is not None: - return ilabel - ilabel = self.grammar.tokens.get(type) - if ilabel is None: - raise ParseError("bad token", type, value, context) - return ilabel - - def shift(self, type, value, newstate, context): - """Shift a token. (Internal)""" - dfa, state, node = self.stack[-1] - newnode = (type, value, context, None) - newnode = self.convert(self.grammar, newnode) - if newnode is not None: - node[-1].append(newnode) - self.stack[-1] = (dfa, newstate, node) - - def push(self, type, newdfa, newstate, context): - """Push a nonterminal. (Internal)""" - dfa, state, node = self.stack[-1] - newnode = (type, None, context, []) - self.stack[-1] = (dfa, newstate, node) - self.stack.append((newdfa, 0, newnode)) - - def pop(self): - """Pop a nonterminal. (Internal)""" - popdfa, popstate, popnode = self.stack.pop() - newnode = self.convert(self.grammar, popnode) - if newnode is not None: - if self.stack: - dfa, state, node = self.stack[-1] - node[-1].append(newnode) - else: - self.rootnode = newnode - self.rootnode.used_names = self.used_names diff --git a/venv/lib/python3.8/site-packages/blib2to3/pgen2/pgen.py b/venv/lib/python3.8/site-packages/blib2to3/pgen2/pgen.py deleted file mode 100644 index 1da6925e..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/pgen2/pgen.py +++ /dev/null @@ -1,393 +0,0 @@ -# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -# Pgen imports -from . import grammar, token, tokenize - - -class PgenGrammar(grammar.Grammar): - pass - - -class ParserGenerator(object): - def __init__(self, filename, stream=None): - close_stream = None - if stream is None: - stream = open(filename) - close_stream = stream.close - self.filename = filename - self.stream = stream - self.generator = tokenize.generate_tokens(stream.readline) - self.gettoken() # Initialize lookahead - self.dfas, self.startsymbol = self.parse() - if close_stream is not None: - close_stream() - self.first = {} # map from symbol name to set of tokens - self.addfirstsets() - - def make_grammar(self): - c = PgenGrammar() - names = list(self.dfas.keys()) - names.sort() - names.remove(self.startsymbol) - names.insert(0, self.startsymbol) - for name in names: - i = 256 + len(c.symbol2number) - c.symbol2number[name] = i - c.number2symbol[i] = name - for name in names: - dfa = self.dfas[name] - states = [] - for state in dfa: - arcs = [] - for label, next in sorted(state.arcs.items()): - arcs.append((self.make_label(c, label), dfa.index(next))) - if state.isfinal: - arcs.append((0, dfa.index(state))) - states.append(arcs) - c.states.append(states) - c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) - c.start = c.symbol2number[self.startsymbol] - return c - - def make_first(self, c, name): - rawfirst = self.first[name] - first = {} - for label in sorted(rawfirst): - ilabel = self.make_label(c, label) - ##assert ilabel not in first # XXX failed on <> ... != - first[ilabel] = 1 - return first - - def make_label(self, c, label): - # XXX Maybe this should be a method on a subclass of converter? - ilabel = len(c.labels) - if label[0].isalpha(): - # Either a symbol name or a named token - if label in c.symbol2number: - # A symbol name (a non-terminal) - if label in c.symbol2label: - return c.symbol2label[label] - else: - c.labels.append((c.symbol2number[label], None)) - c.symbol2label[label] = ilabel - return ilabel - else: - # A named token (NAME, NUMBER, STRING) - itoken = getattr(token, label, None) - assert isinstance(itoken, int), label - assert itoken in token.tok_name, label - if itoken in c.tokens: - return c.tokens[itoken] - else: - c.labels.append((itoken, None)) - c.tokens[itoken] = ilabel - return ilabel - else: - # Either a keyword or an operator - assert label[0] in ('"', "'"), label - value = eval(label) - if value[0].isalpha(): - # A keyword - if value in c.keywords: - return c.keywords[value] - else: - c.labels.append((token.NAME, value)) - c.keywords[value] = ilabel - return ilabel - else: - # An operator (any non-numeric token) - itoken = grammar.opmap[value] # Fails if unknown token - if itoken in c.tokens: - return c.tokens[itoken] - else: - c.labels.append((itoken, None)) - c.tokens[itoken] = ilabel - return ilabel - - def addfirstsets(self): - names = list(self.dfas.keys()) - names.sort() - for name in names: - if name not in self.first: - self.calcfirst(name) - # print name, self.first[name].keys() - - def calcfirst(self, name): - dfa = self.dfas[name] - self.first[name] = None # dummy to detect left recursion - state = dfa[0] - totalset = {} - overlapcheck = {} - for label, next in state.arcs.items(): - if label in self.dfas: - if label in self.first: - fset = self.first[label] - if fset is None: - raise ValueError("recursion for rule %r" % name) - else: - self.calcfirst(label) - fset = self.first[label] - totalset.update(fset) - overlapcheck[label] = fset - else: - totalset[label] = 1 - overlapcheck[label] = {label: 1} - inverse = {} - for label, itsfirst in overlapcheck.items(): - for symbol in itsfirst: - if symbol in inverse: - raise ValueError( - "rule %s is ambiguous; %s is in the" - " first sets of %s as well as %s" - % (name, symbol, label, inverse[symbol]) - ) - inverse[symbol] = label - self.first[name] = totalset - - def parse(self): - dfas = {} - startsymbol = None - # MSTART: (NEWLINE | RULE)* ENDMARKER - while self.type != token.ENDMARKER: - while self.type == token.NEWLINE: - self.gettoken() - # RULE: NAME ':' RHS NEWLINE - name = self.expect(token.NAME) - self.expect(token.OP, ":") - a, z = self.parse_rhs() - self.expect(token.NEWLINE) - # self.dump_nfa(name, a, z) - dfa = self.make_dfa(a, z) - # self.dump_dfa(name, dfa) - oldlen = len(dfa) - self.simplify_dfa(dfa) - newlen = len(dfa) - dfas[name] = dfa - # print name, oldlen, newlen - if startsymbol is None: - startsymbol = name - return dfas, startsymbol - - def make_dfa(self, start, finish): - # To turn an NFA into a DFA, we define the states of the DFA - # to correspond to *sets* of states of the NFA. Then do some - # state reduction. Let's represent sets as dicts with 1 for - # values. - assert isinstance(start, NFAState) - assert isinstance(finish, NFAState) - - def closure(state): - base = {} - addclosure(state, base) - return base - - def addclosure(state, base): - assert isinstance(state, NFAState) - if state in base: - return - base[state] = 1 - for label, next in state.arcs: - if label is None: - addclosure(next, base) - - states = [DFAState(closure(start), finish)] - for state in states: # NB states grows while we're iterating - arcs = {} - for nfastate in state.nfaset: - for label, next in nfastate.arcs: - if label is not None: - addclosure(next, arcs.setdefault(label, {})) - for label, nfaset in sorted(arcs.items()): - for st in states: - if st.nfaset == nfaset: - break - else: - st = DFAState(nfaset, finish) - states.append(st) - state.addarc(st, label) - return states # List of DFAState instances; first one is start - - def dump_nfa(self, name, start, finish): - print("Dump of NFA for", name) - todo = [start] - for i, state in enumerate(todo): - print(" State", i, state is finish and "(final)" or "") - for label, next in state.arcs: - if next in todo: - j = todo.index(next) - else: - j = len(todo) - todo.append(next) - if label is None: - print(" -> %d" % j) - else: - print(" %s -> %d" % (label, j)) - - def dump_dfa(self, name, dfa): - print("Dump of DFA for", name) - for i, state in enumerate(dfa): - print(" State", i, state.isfinal and "(final)" or "") - for label, next in sorted(state.arcs.items()): - print(" %s -> %d" % (label, dfa.index(next))) - - def simplify_dfa(self, dfa): - # This is not theoretically optimal, but works well enough. - # Algorithm: repeatedly look for two states that have the same - # set of arcs (same labels pointing to the same nodes) and - # unify them, until things stop changing. - - # dfa is a list of DFAState instances - changes = True - while changes: - changes = False - for i, state_i in enumerate(dfa): - for j in range(i + 1, len(dfa)): - state_j = dfa[j] - if state_i == state_j: - # print " unify", i, j - del dfa[j] - for state in dfa: - state.unifystate(state_j, state_i) - changes = True - break - - def parse_rhs(self): - # RHS: ALT ('|' ALT)* - a, z = self.parse_alt() - if self.value != "|": - return a, z - else: - aa = NFAState() - zz = NFAState() - aa.addarc(a) - z.addarc(zz) - while self.value == "|": - self.gettoken() - a, z = self.parse_alt() - aa.addarc(a) - z.addarc(zz) - return aa, zz - - def parse_alt(self): - # ALT: ITEM+ - a, b = self.parse_item() - while self.value in ("(", "[") or self.type in (token.NAME, token.STRING): - c, d = self.parse_item() - b.addarc(c) - b = d - return a, b - - def parse_item(self): - # ITEM: '[' RHS ']' | ATOM ['+' | '*'] - if self.value == "[": - self.gettoken() - a, z = self.parse_rhs() - self.expect(token.OP, "]") - a.addarc(z) - return a, z - else: - a, z = self.parse_atom() - value = self.value - if value not in ("+", "*"): - return a, z - self.gettoken() - z.addarc(a) - if value == "+": - return a, z - else: - return a, a - - def parse_atom(self): - # ATOM: '(' RHS ')' | NAME | STRING - if self.value == "(": - self.gettoken() - a, z = self.parse_rhs() - self.expect(token.OP, ")") - return a, z - elif self.type in (token.NAME, token.STRING): - a = NFAState() - z = NFAState() - a.addarc(z, self.value) - self.gettoken() - return a, z - else: - self.raise_error( - "expected (...) or NAME or STRING, got %s/%s", self.type, self.value - ) - - def expect(self, type, value=None): - if self.type != type or (value is not None and self.value != value): - self.raise_error( - "expected %s/%s, got %s/%s", type, value, self.type, self.value - ) - value = self.value - self.gettoken() - return value - - def gettoken(self): - tup = next(self.generator) - while tup[0] in (tokenize.COMMENT, tokenize.NL): - tup = next(self.generator) - self.type, self.value, self.begin, self.end, self.line = tup - # print token.tok_name[self.type], repr(self.value) - - def raise_error(self, msg, *args): - if args: - try: - msg = msg % args - except: - msg = " ".join([msg] + list(map(str, args))) - raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line)) - - -class NFAState(object): - def __init__(self): - self.arcs = [] # list of (label, NFAState) pairs - - def addarc(self, next, label=None): - assert label is None or isinstance(label, str) - assert isinstance(next, NFAState) - self.arcs.append((label, next)) - - -class DFAState(object): - def __init__(self, nfaset, final): - assert isinstance(nfaset, dict) - assert isinstance(next(iter(nfaset)), NFAState) - assert isinstance(final, NFAState) - self.nfaset = nfaset - self.isfinal = final in nfaset - self.arcs = {} # map from label to DFAState - - def addarc(self, next, label): - assert isinstance(label, str) - assert label not in self.arcs - assert isinstance(next, DFAState) - self.arcs[label] = next - - def unifystate(self, old, new): - for label, next in self.arcs.items(): - if next is old: - self.arcs[label] = new - - def __eq__(self, other): - # Equality test -- ignore the nfaset instance variable - assert isinstance(other, DFAState) - if self.isfinal != other.isfinal: - return False - # Can't just return self.arcs == other.arcs, because that - # would invoke this method recursively, with cycles... - if len(self.arcs) != len(other.arcs): - return False - for label, next in self.arcs.items(): - if next is not other.arcs.get(label): - return False - return True - - __hash__ = None # For Py3 compatibility. - - -def generate_grammar(filename="Grammar.txt"): - p = ParserGenerator(filename) - return p.make_grammar() diff --git a/venv/lib/python3.8/site-packages/blib2to3/pgen2/token.py b/venv/lib/python3.8/site-packages/blib2to3/pgen2/token.py deleted file mode 100644 index 583a8a15..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/pgen2/token.py +++ /dev/null @@ -1,86 +0,0 @@ -"""Token constants (from "token.h").""" - -# Taken from Python (r53757) and modified to include some tokens -# originally monkeypatched in by pgen2.tokenize - -# --start constants-- -ENDMARKER = 0 -NAME = 1 -NUMBER = 2 -STRING = 3 -NEWLINE = 4 -INDENT = 5 -DEDENT = 6 -LPAR = 7 -RPAR = 8 -LSQB = 9 -RSQB = 10 -COLON = 11 -COMMA = 12 -SEMI = 13 -PLUS = 14 -MINUS = 15 -STAR = 16 -SLASH = 17 -VBAR = 18 -AMPER = 19 -LESS = 20 -GREATER = 21 -EQUAL = 22 -DOT = 23 -PERCENT = 24 -BACKQUOTE = 25 -LBRACE = 26 -RBRACE = 27 -EQEQUAL = 28 -NOTEQUAL = 29 -LESSEQUAL = 30 -GREATEREQUAL = 31 -TILDE = 32 -CIRCUMFLEX = 33 -LEFTSHIFT = 34 -RIGHTSHIFT = 35 -DOUBLESTAR = 36 -PLUSEQUAL = 37 -MINEQUAL = 38 -STAREQUAL = 39 -SLASHEQUAL = 40 -PERCENTEQUAL = 41 -AMPEREQUAL = 42 -VBAREQUAL = 43 -CIRCUMFLEXEQUAL = 44 -LEFTSHIFTEQUAL = 45 -RIGHTSHIFTEQUAL = 46 -DOUBLESTAREQUAL = 47 -DOUBLESLASH = 48 -DOUBLESLASHEQUAL = 49 -AT = 50 -ATEQUAL = 51 -OP = 52 -COMMENT = 53 -NL = 54 -RARROW = 55 -AWAIT = 56 -ASYNC = 57 -ERRORTOKEN = 58 -COLONEQUAL = 59 -N_TOKENS = 60 -NT_OFFSET = 256 -# --end constants-- - -tok_name = {} -for _name, _value in list(globals().items()): - if type(_value) is type(0): - tok_name[_value] = _name - - -def ISTERMINAL(x): - return x < NT_OFFSET - - -def ISNONTERMINAL(x): - return x >= NT_OFFSET - - -def ISEOF(x): - return x == ENDMARKER diff --git a/venv/lib/python3.8/site-packages/blib2to3/pgen2/tokenize.py b/venv/lib/python3.8/site-packages/blib2to3/pgen2/tokenize.py deleted file mode 100644 index 8c1c4cfa..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/pgen2/tokenize.py +++ /dev/null @@ -1,652 +0,0 @@ -# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation. -# All rights reserved. - -"""Tokenization help for Python programs. - -generate_tokens(readline) is a generator that breaks a stream of -text into Python tokens. It accepts a readline-like method which is called -repeatedly to get the next line of input (or "" for EOF). It generates -5-tuples with these members: - - the token type (see token.py) - the token (a string) - the starting (row, column) indices of the token (a 2-tuple of ints) - the ending (row, column) indices of the token (a 2-tuple of ints) - the original line (string) - -It is designed to match the working of the Python tokenizer exactly, except -that it produces COMMENT tokens for comments and gives type OP for all -operators - -Older entry points - tokenize_loop(readline, tokeneater) - tokenize(readline, tokeneater=printtoken) -are the same, except instead of generating tokens, tokeneater is a callback -function to which the 5 fields described above are passed as 5 arguments, -each time a new token is found.""" - -__author__ = "Ka-Ping Yee " -__credits__ = "GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro" - -import regex as re -from codecs import BOM_UTF8, lookup -from blib2to3.pgen2.token import * - -from . import token - -__all__ = [x for x in dir(token) if x[0] != "_"] + [ - "tokenize", - "generate_tokens", - "untokenize", -] -del token - -try: - bytes -except NameError: - # Support bytes type in Python <= 2.5, so 2to3 turns itself into - # valid Python 3 code. - bytes = str - - -def group(*choices): - return "(" + "|".join(choices) + ")" - - -def any(*choices): - return group(*choices) + "*" - - -def maybe(*choices): - return group(*choices) + "?" - - -def _combinations(*l): - return set(x + y for x in l for y in l + ("",) if x.casefold() != y.casefold()) - - -Whitespace = r"[ \f\t]*" -Comment = r"#[^\r\n]*" -Ignore = Whitespace + any(r"\\\r?\n" + Whitespace) + maybe(Comment) -Name = r"\w+" # this is invalid but it's fine because Name comes after Number in all groups - -Binnumber = r"0[bB]_?[01]+(?:_[01]+)*" -Hexnumber = r"0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?" -Octnumber = r"0[oO]?_?[0-7]+(?:_[0-7]+)*[lL]?" -Decnumber = group(r"[1-9]\d*(?:_\d+)*[lL]?", "0[lL]?") -Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber) -Exponent = r"[eE][-+]?\d+(?:_\d+)*" -Pointfloat = group(r"\d+(?:_\d+)*\.(?:\d+(?:_\d+)*)?", r"\.\d+(?:_\d+)*") + maybe( - Exponent -) -Expfloat = r"\d+(?:_\d+)*" + Exponent -Floatnumber = group(Pointfloat, Expfloat) -Imagnumber = group(r"\d+(?:_\d+)*[jJ]", Floatnumber + r"[jJ]") -Number = group(Imagnumber, Floatnumber, Intnumber) - -# Tail end of ' string. -Single = r"[^'\\]*(?:\\.[^'\\]*)*'" -# Tail end of " string. -Double = r'[^"\\]*(?:\\.[^"\\]*)*"' -# Tail end of ''' string. -Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" -# Tail end of """ string. -Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' -_litprefix = r"(?:[uUrRbBfF]|[rR][fFbB]|[fFbBuU][rR])?" -Triple = group(_litprefix + "'''", _litprefix + '"""') -# Single-line ' or " string. -String = group( - _litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", - _litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"', -) - -# Because of leftmost-then-longest match semantics, be sure to put the -# longest operators first (e.g., if = came before ==, == would get -# recognized as two instances of =). -Operator = group( - r"\*\*=?", - r">>=?", - r"<<=?", - r"<>", - r"!=", - r"//=?", - r"->", - r"[+\-*/%&@|^=<>:]=?", - r"~", -) - -Bracket = "[][(){}]" -Special = group(r"\r?\n", r"[:;.,`@]") -Funny = group(Operator, Bracket, Special) - -PlainToken = group(Number, Funny, String, Name) -Token = Ignore + PlainToken - -# First (or only) line of ' or " string. -ContStr = group( - _litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r"\\\r?\n"), - _litprefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r"\\\r?\n"), -) -PseudoExtras = group(r"\\\r?\n", Comment, Triple) -PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) - -tokenprog = re.compile(Token, re.UNICODE) -pseudoprog = re.compile(PseudoToken, re.UNICODE) -single3prog = re.compile(Single3) -double3prog = re.compile(Double3) - -_strprefixes = ( - _combinations("r", "R", "f", "F") - | _combinations("r", "R", "b", "B") - | {"u", "U", "ur", "uR", "Ur", "UR"} -) - -endprogs = { - "'": re.compile(Single), - '"': re.compile(Double), - "'''": single3prog, - '"""': double3prog, - **{f"{prefix}'''": single3prog for prefix in _strprefixes}, - **{f'{prefix}"""': double3prog for prefix in _strprefixes}, - **{prefix: None for prefix in _strprefixes}, -} - -triple_quoted = ( - {"'''", '"""'} - | {f"{prefix}'''" for prefix in _strprefixes} - | {f'{prefix}"""' for prefix in _strprefixes} -) -single_quoted = ( - {"'", '"'} - | {f"{prefix}'" for prefix in _strprefixes} - | {f'{prefix}"' for prefix in _strprefixes} -) - -tabsize = 8 - - -class TokenError(Exception): - pass - - -class StopTokenizing(Exception): - pass - - -def printtoken(type, token, xxx_todo_changeme, xxx_todo_changeme1, line): # for testing - (srow, scol) = xxx_todo_changeme - (erow, ecol) = xxx_todo_changeme1 - print( - "%d,%d-%d,%d:\t%s\t%s" % (srow, scol, erow, ecol, tok_name[type], repr(token)) - ) - - -def tokenize(readline, tokeneater=printtoken): - """ - The tokenize() function accepts two parameters: one representing the - input stream, and one providing an output mechanism for tokenize(). - - The first parameter, readline, must be a callable object which provides - the same interface as the readline() method of built-in file objects. - Each call to the function should return one line of input as a string. - - The second parameter, tokeneater, must also be a callable object. It is - called once for each token, with five arguments, corresponding to the - tuples generated by generate_tokens(). - """ - try: - tokenize_loop(readline, tokeneater) - except StopTokenizing: - pass - - -# backwards compatible interface -def tokenize_loop(readline, tokeneater): - for token_info in generate_tokens(readline): - tokeneater(*token_info) - - -class Untokenizer: - def __init__(self): - self.tokens = [] - self.prev_row = 1 - self.prev_col = 0 - - def add_whitespace(self, start): - row, col = start - assert row <= self.prev_row - col_offset = col - self.prev_col - if col_offset: - self.tokens.append(" " * col_offset) - - def untokenize(self, iterable): - for t in iterable: - if len(t) == 2: - self.compat(t, iterable) - break - tok_type, token, start, end, line = t - self.add_whitespace(start) - self.tokens.append(token) - self.prev_row, self.prev_col = end - if tok_type in (NEWLINE, NL): - self.prev_row += 1 - self.prev_col = 0 - return "".join(self.tokens) - - def compat(self, token, iterable): - startline = False - indents = [] - toks_append = self.tokens.append - toknum, tokval = token - if toknum in (NAME, NUMBER): - tokval += " " - if toknum in (NEWLINE, NL): - startline = True - for tok in iterable: - toknum, tokval = tok[:2] - - if toknum in (NAME, NUMBER, ASYNC, AWAIT): - tokval += " " - - if toknum == INDENT: - indents.append(tokval) - continue - elif toknum == DEDENT: - indents.pop() - continue - elif toknum in (NEWLINE, NL): - startline = True - elif startline and indents: - toks_append(indents[-1]) - startline = False - toks_append(tokval) - - -cookie_re = re.compile(r"^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)", re.ASCII) -blank_re = re.compile(br"^[ \t\f]*(?:[#\r\n]|$)", re.ASCII) - - -def _get_normal_name(orig_enc): - """Imitates get_normal_name in tokenizer.c.""" - # Only care about the first 12 characters. - enc = orig_enc[:12].lower().replace("_", "-") - if enc == "utf-8" or enc.startswith("utf-8-"): - return "utf-8" - if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or enc.startswith( - ("latin-1-", "iso-8859-1-", "iso-latin-1-") - ): - return "iso-8859-1" - return orig_enc - - -def detect_encoding(readline): - """ - The detect_encoding() function is used to detect the encoding that should - be used to decode a Python source file. It requires one argument, readline, - in the same way as the tokenize() generator. - - It will call readline a maximum of twice, and return the encoding used - (as a string) and a list of any lines (left as bytes) it has read - in. - - It detects the encoding from the presence of a utf-8 bom or an encoding - cookie as specified in pep-0263. If both a bom and a cookie are present, but - disagree, a SyntaxError will be raised. If the encoding cookie is an invalid - charset, raise a SyntaxError. Note that if a utf-8 bom is found, - 'utf-8-sig' is returned. - - If no encoding is specified, then the default of 'utf-8' will be returned. - """ - bom_found = False - encoding = None - default = "utf-8" - - def read_or_stop(): - try: - return readline() - except StopIteration: - return bytes() - - def find_cookie(line): - try: - line_string = line.decode("ascii") - except UnicodeDecodeError: - return None - match = cookie_re.match(line_string) - if not match: - return None - encoding = _get_normal_name(match.group(1)) - try: - codec = lookup(encoding) - except LookupError: - # This behaviour mimics the Python interpreter - raise SyntaxError("unknown encoding: " + encoding) - - if bom_found: - if codec.name != "utf-8": - # This behaviour mimics the Python interpreter - raise SyntaxError("encoding problem: utf-8") - encoding += "-sig" - return encoding - - first = read_or_stop() - if first.startswith(BOM_UTF8): - bom_found = True - first = first[3:] - default = "utf-8-sig" - if not first: - return default, [] - - encoding = find_cookie(first) - if encoding: - return encoding, [first] - if not blank_re.match(first): - return default, [first] - - second = read_or_stop() - if not second: - return default, [first] - - encoding = find_cookie(second) - if encoding: - return encoding, [first, second] - - return default, [first, second] - - -def untokenize(iterable): - """Transform tokens back into Python source code. - - Each element returned by the iterable must be a token sequence - with at least two elements, a token number and token value. If - only two tokens are passed, the resulting output is poor. - - Round-trip invariant for full input: - Untokenized source will match input source exactly - - Round-trip invariant for limited intput: - # Output text will tokenize the back to the input - t1 = [tok[:2] for tok in generate_tokens(f.readline)] - newcode = untokenize(t1) - readline = iter(newcode.splitlines(1)).next - t2 = [tok[:2] for tokin generate_tokens(readline)] - assert t1 == t2 - """ - ut = Untokenizer() - return ut.untokenize(iterable) - - -def generate_tokens(readline, grammar=None): - """ - The generate_tokens() generator requires one argument, readline, which - must be a callable object which provides the same interface as the - readline() method of built-in file objects. Each call to the function - should return one line of input as a string. Alternately, readline - can be a callable function terminating with StopIteration: - readline = open(myfile).next # Example of alternate readline - - The generator produces 5-tuples with these members: the token type; the - token string; a 2-tuple (srow, scol) of ints specifying the row and - column where the token begins in the source; a 2-tuple (erow, ecol) of - ints specifying the row and column where the token ends in the source; - and the line on which the token was found. The line passed is the - logical line; continuation lines are included. - """ - lnum = parenlev = continued = 0 - numchars = "0123456789" - contstr, needcont = "", 0 - contline = None - indents = [0] - - # If we know we're parsing 3.7+, we can unconditionally parse `async` and - # `await` as keywords. - async_keywords = False if grammar is None else grammar.async_keywords - # 'stashed' and 'async_*' are used for async/await parsing - stashed = None - async_def = False - async_def_indent = 0 - async_def_nl = False - - while 1: # loop over lines in stream - try: - line = readline() - except StopIteration: - line = "" - lnum = lnum + 1 - pos, max = 0, len(line) - - if contstr: # continued string - if not line: - raise TokenError("EOF in multi-line string", strstart) - endmatch = endprog.match(line) - if endmatch: - pos = end = endmatch.end(0) - yield ( - STRING, - contstr + line[:end], - strstart, - (lnum, end), - contline + line, - ) - contstr, needcont = "", 0 - contline = None - elif needcont and line[-2:] != "\\\n" and line[-3:] != "\\\r\n": - yield ( - ERRORTOKEN, - contstr + line, - strstart, - (lnum, len(line)), - contline, - ) - contstr = "" - contline = None - continue - else: - contstr = contstr + line - contline = contline + line - continue - - elif parenlev == 0 and not continued: # new statement - if not line: - break - column = 0 - while pos < max: # measure leading whitespace - if line[pos] == " ": - column = column + 1 - elif line[pos] == "\t": - column = (column // tabsize + 1) * tabsize - elif line[pos] == "\f": - column = 0 - else: - break - pos = pos + 1 - if pos == max: - break - - if stashed: - yield stashed - stashed = None - - if line[pos] in "\r\n": # skip blank lines - yield (NL, line[pos:], (lnum, pos), (lnum, len(line)), line) - continue - - if line[pos] == "#": # skip comments - comment_token = line[pos:].rstrip("\r\n") - nl_pos = pos + len(comment_token) - yield ( - COMMENT, - comment_token, - (lnum, pos), - (lnum, pos + len(comment_token)), - line, - ) - yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) - continue - - if column > indents[-1]: # count indents - indents.append(column) - yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) - - while column < indents[-1]: # count dedents - if column not in indents: - raise IndentationError( - "unindent does not match any outer indentation level", - ("", lnum, pos, line), - ) - indents = indents[:-1] - - if async_def and async_def_indent >= indents[-1]: - async_def = False - async_def_nl = False - async_def_indent = 0 - - yield (DEDENT, "", (lnum, pos), (lnum, pos), line) - - if async_def and async_def_nl and async_def_indent >= indents[-1]: - async_def = False - async_def_nl = False - async_def_indent = 0 - - else: # continued statement - if not line: - raise TokenError("EOF in multi-line statement", (lnum, 0)) - continued = 0 - - while pos < max: - pseudomatch = pseudoprog.match(line, pos) - if pseudomatch: # scan for tokens - start, end = pseudomatch.span(1) - spos, epos, pos = (lnum, start), (lnum, end), end - token, initial = line[start:end], line[start] - - if initial in numchars or ( - initial == "." and token != "." - ): # ordinary number - yield (NUMBER, token, spos, epos, line) - elif initial in "\r\n": - newline = NEWLINE - if parenlev > 0: - newline = NL - elif async_def: - async_def_nl = True - if stashed: - yield stashed - stashed = None - yield (newline, token, spos, epos, line) - - elif initial == "#": - assert not token.endswith("\n") - if stashed: - yield stashed - stashed = None - yield (COMMENT, token, spos, epos, line) - elif token in triple_quoted: - endprog = endprogs[token] - endmatch = endprog.match(line, pos) - if endmatch: # all on one line - pos = endmatch.end(0) - token = line[start:pos] - if stashed: - yield stashed - stashed = None - yield (STRING, token, spos, (lnum, pos), line) - else: - strstart = (lnum, start) # multiple lines - contstr = line[start:] - contline = line - break - elif ( - initial in single_quoted - or token[:2] in single_quoted - or token[:3] in single_quoted - ): - if token[-1] == "\n": # continued string - strstart = (lnum, start) - endprog = ( - endprogs[initial] - or endprogs[token[1]] - or endprogs[token[2]] - ) - contstr, needcont = line[start:], 1 - contline = line - break - else: # ordinary string - if stashed: - yield stashed - stashed = None - yield (STRING, token, spos, epos, line) - elif initial.isidentifier(): # ordinary name - if token in ("async", "await"): - if async_keywords or async_def: - yield ( - ASYNC if token == "async" else AWAIT, - token, - spos, - epos, - line, - ) - continue - - tok = (NAME, token, spos, epos, line) - if token == "async" and not stashed: - stashed = tok - continue - - if token in ("def", "for"): - if stashed and stashed[0] == NAME and stashed[1] == "async": - - if token == "def": - async_def = True - async_def_indent = indents[-1] - - yield ( - ASYNC, - stashed[1], - stashed[2], - stashed[3], - stashed[4], - ) - stashed = None - - if stashed: - yield stashed - stashed = None - - yield tok - elif initial == "\\": # continued stmt - # This yield is new; needed for better idempotency: - if stashed: - yield stashed - stashed = None - yield (NL, token, spos, (lnum, pos), line) - continued = 1 - else: - if initial in "([{": - parenlev = parenlev + 1 - elif initial in ")]}": - parenlev = parenlev - 1 - if stashed: - yield stashed - stashed = None - yield (OP, token, spos, epos, line) - else: - yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos + 1), line) - pos = pos + 1 - - if stashed: - yield stashed - stashed = None - - for indent in indents[1:]: # pop remaining indent levels - yield (DEDENT, "", (lnum, 0), (lnum, 0), "") - yield (ENDMARKER, "", (lnum, 0), (lnum, 0), "") - - -if __name__ == "__main__": # testing - import sys - - if len(sys.argv) > 1: - tokenize(open(sys.argv[1]).readline) - else: - tokenize(sys.stdin.readline) diff --git a/venv/lib/python3.8/site-packages/blib2to3/pygram.py b/venv/lib/python3.8/site-packages/blib2to3/pygram.py deleted file mode 100644 index 7614af73..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/pygram.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -"""Export the Python grammar and symbols.""" - -# Python imports -import os - -# Local imports -from .pgen2 import token -from .pgen2 import driver - -# The grammar file -_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") -_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "PatternGrammar.txt") - - -class Symbols(object): - def __init__(self, grammar): - """Initializer. - - Creates an attribute for each grammar symbol (nonterminal), - whose value is the symbol's type (an int >= 256). - """ - for name, symbol in grammar.symbol2number.items(): - setattr(self, name, symbol) - - -def initialize(cache_dir=None): - global python_grammar - global python_grammar_no_print_statement - global python_grammar_no_print_statement_no_exec_statement - global python_grammar_no_print_statement_no_exec_statement_async_keywords - global python_symbols - global pattern_grammar - global pattern_symbols - - # Python 2 - python_grammar = driver.load_packaged_grammar("blib2to3", _GRAMMAR_FILE, cache_dir) - - python_symbols = Symbols(python_grammar) - - # Python 2 + from __future__ import print_function - python_grammar_no_print_statement = python_grammar.copy() - del python_grammar_no_print_statement.keywords["print"] - - # Python 3.0-3.6 - python_grammar_no_print_statement_no_exec_statement = python_grammar.copy() - del python_grammar_no_print_statement_no_exec_statement.keywords["print"] - del python_grammar_no_print_statement_no_exec_statement.keywords["exec"] - - # Python 3.7+ - python_grammar_no_print_statement_no_exec_statement_async_keywords = ( - python_grammar_no_print_statement_no_exec_statement.copy() - ) - python_grammar_no_print_statement_no_exec_statement_async_keywords.async_keywords = ( - True - ) - - pattern_grammar = driver.load_packaged_grammar( - "blib2to3", _PATTERN_GRAMMAR_FILE, cache_dir - ) - pattern_symbols = Symbols(pattern_grammar) diff --git a/venv/lib/python3.8/site-packages/blib2to3/pytree.py b/venv/lib/python3.8/site-packages/blib2to3/pytree.py deleted file mode 100644 index 6776491c..00000000 --- a/venv/lib/python3.8/site-packages/blib2to3/pytree.py +++ /dev/null @@ -1,889 +0,0 @@ -# Copyright 2006 Google, Inc. All Rights Reserved. -# Licensed to PSF under a Contributor Agreement. - -""" -Python parse tree definitions. - -This is a very concrete parse tree; we need to keep every token and -even the comments and whitespace between tokens. - -There's also a pattern matching implementation here. -""" - -__author__ = "Guido van Rossum " - -import sys -from io import StringIO - -HUGE = 0x7FFFFFFF # maximum repeat count, default max - -_type_reprs = {} - - -def type_repr(type_num): - global _type_reprs - if not _type_reprs: - from .pygram import python_symbols - - # printing tokens is possible but not as useful - # from .pgen2 import token // token.__dict__.items(): - for name, val in python_symbols.__dict__.items(): - if type(val) == int: - _type_reprs[val] = name - return _type_reprs.setdefault(type_num, type_num) - - -class Base(object): - - """ - Abstract base class for Node and Leaf. - - This provides some default functionality and boilerplate using the - template pattern. - - A node may be a subnode of at most one parent. - """ - - # Default values for instance variables - type = None # int: token number (< 256) or symbol number (>= 256) - parent = None # Parent node pointer, or None - children = () # Tuple of subnodes - was_changed = False - was_checked = False - - def __new__(cls, *args, **kwds): - """Constructor that prevents Base from being instantiated.""" - assert cls is not Base, "Cannot instantiate Base" - return object.__new__(cls) - - def __eq__(self, other): - """ - Compare two nodes for equality. - - This calls the method _eq(). - """ - if self.__class__ is not other.__class__: - return NotImplemented - return self._eq(other) - - __hash__ = None # For Py3 compatibility. - - def _eq(self, other): - """ - Compare two nodes for equality. - - This is called by __eq__ and __ne__. It is only called if the two nodes - have the same type. This must be implemented by the concrete subclass. - Nodes should be considered equal if they have the same structure, - ignoring the prefix string and other context information. - """ - raise NotImplementedError - - def clone(self): - """ - Return a cloned (deep) copy of self. - - This must be implemented by the concrete subclass. - """ - raise NotImplementedError - - def post_order(self): - """ - Return a post-order iterator for the tree. - - This must be implemented by the concrete subclass. - """ - raise NotImplementedError - - def pre_order(self): - """ - Return a pre-order iterator for the tree. - - This must be implemented by the concrete subclass. - """ - raise NotImplementedError - - def replace(self, new): - """Replace this node with a new one in the parent.""" - assert self.parent is not None, str(self) - assert new is not None - if not isinstance(new, list): - new = [new] - l_children = [] - found = False - for ch in self.parent.children: - if ch is self: - assert not found, (self.parent.children, self, new) - if new is not None: - l_children.extend(new) - found = True - else: - l_children.append(ch) - assert found, (self.children, self, new) - self.parent.children = l_children - self.parent.changed() - self.parent.invalidate_sibling_maps() - for x in new: - x.parent = self.parent - self.parent = None - - def get_lineno(self): - """Return the line number which generated the invocant node.""" - node = self - while not isinstance(node, Leaf): - if not node.children: - return - node = node.children[0] - return node.lineno - - def changed(self): - if self.was_changed: - return - if self.parent: - self.parent.changed() - self.was_changed = True - - def remove(self): - """ - Remove the node from the tree. Returns the position of the node in its - parent's children before it was removed. - """ - if self.parent: - for i, node in enumerate(self.parent.children): - if node is self: - del self.parent.children[i] - self.parent.changed() - self.parent.invalidate_sibling_maps() - self.parent = None - return i - - @property - def next_sibling(self): - """ - The node immediately following the invocant in their parent's children - list. If the invocant does not have a next sibling, it is None - """ - if self.parent is None: - return None - - if self.parent.next_sibling_map is None: - self.parent.update_sibling_maps() - return self.parent.next_sibling_map[id(self)] - - @property - def prev_sibling(self): - """ - The node immediately preceding the invocant in their parent's children - list. If the invocant does not have a previous sibling, it is None. - """ - if self.parent is None: - return None - - if self.parent.prev_sibling_map is None: - self.parent.update_sibling_maps() - return self.parent.prev_sibling_map[id(self)] - - def leaves(self): - for child in self.children: - yield from child.leaves() - - def depth(self): - if self.parent is None: - return 0 - return 1 + self.parent.depth() - - def get_suffix(self): - """ - Return the string immediately following the invocant node. This is - effectively equivalent to node.next_sibling.prefix - """ - next_sib = self.next_sibling - if next_sib is None: - return "" - return next_sib.prefix - - if sys.version_info < (3, 0): - - def __str__(self): - return str(self).encode("ascii") - - -class Node(Base): - - """Concrete implementation for interior nodes.""" - - def __init__(self, type, children, context=None, prefix=None, fixers_applied=None): - """ - Initializer. - - Takes a type constant (a symbol number >= 256), a sequence of - child nodes, and an optional context keyword argument. - - As a side effect, the parent pointers of the children are updated. - """ - assert type >= 256, type - self.type = type - self.children = list(children) - for ch in self.children: - assert ch.parent is None, repr(ch) - ch.parent = self - self.invalidate_sibling_maps() - if prefix is not None: - self.prefix = prefix - if fixers_applied: - self.fixers_applied = fixers_applied[:] - else: - self.fixers_applied = None - - def __repr__(self): - """Return a canonical string representation.""" - return "%s(%s, %r)" % ( - self.__class__.__name__, - type_repr(self.type), - self.children, - ) - - def __unicode__(self): - """ - Return a pretty string representation. - - This reproduces the input source exactly. - """ - return "".join(map(str, self.children)) - - if sys.version_info > (3, 0): - __str__ = __unicode__ - - def _eq(self, other): - """Compare two nodes for equality.""" - return (self.type, self.children) == (other.type, other.children) - - def clone(self): - """Return a cloned (deep) copy of self.""" - return Node( - self.type, - [ch.clone() for ch in self.children], - fixers_applied=self.fixers_applied, - ) - - def post_order(self): - """Return a post-order iterator for the tree.""" - for child in self.children: - yield from child.post_order() - yield self - - def pre_order(self): - """Return a pre-order iterator for the tree.""" - yield self - for child in self.children: - yield from child.pre_order() - - @property - def prefix(self): - """ - The whitespace and comments preceding this node in the input. - """ - if not self.children: - return "" - return self.children[0].prefix - - @prefix.setter - def prefix(self, prefix): - if self.children: - self.children[0].prefix = prefix - - def set_child(self, i, child): - """ - Equivalent to 'node.children[i] = child'. This method also sets the - child's parent attribute appropriately. - """ - child.parent = self - self.children[i].parent = None - self.children[i] = child - self.changed() - self.invalidate_sibling_maps() - - def insert_child(self, i, child): - """ - Equivalent to 'node.children.insert(i, child)'. This method also sets - the child's parent attribute appropriately. - """ - child.parent = self - self.children.insert(i, child) - self.changed() - self.invalidate_sibling_maps() - - def append_child(self, child): - """ - Equivalent to 'node.children.append(child)'. This method also sets the - child's parent attribute appropriately. - """ - child.parent = self - self.children.append(child) - self.changed() - self.invalidate_sibling_maps() - - def invalidate_sibling_maps(self): - self.prev_sibling_map = None - self.next_sibling_map = None - - def update_sibling_maps(self): - self.prev_sibling_map = _prev = {} - self.next_sibling_map = _next = {} - previous = None - for current in self.children: - _prev[id(current)] = previous - _next[id(previous)] = current - previous = current - _next[id(current)] = None - - -class Leaf(Base): - - """Concrete implementation for leaf nodes.""" - - # Default values for instance variables - _prefix = "" # Whitespace and comments preceding this token in the input - lineno = 0 # Line where this token starts in the input - column = 0 # Column where this token starts in the input - - def __init__(self, type, value, context=None, prefix=None, fixers_applied=[]): - """ - Initializer. - - Takes a type constant (a token number < 256), a string value, and an - optional context keyword argument. - """ - assert 0 <= type < 256, type - if context is not None: - self._prefix, (self.lineno, self.column) = context - self.type = type - self.value = value - if prefix is not None: - self._prefix = prefix - self.fixers_applied = fixers_applied[:] - - def __repr__(self): - """Return a canonical string representation.""" - from .pgen2.token import tok_name - - return "%s(%s, %r)" % ( - self.__class__.__name__, - tok_name.get(self.type, self.type), - self.value, - ) - - def __unicode__(self): - """ - Return a pretty string representation. - - This reproduces the input source exactly. - """ - return self.prefix + str(self.value) - - if sys.version_info > (3, 0): - __str__ = __unicode__ - - def _eq(self, other): - """Compare two nodes for equality.""" - return (self.type, self.value) == (other.type, other.value) - - def clone(self): - """Return a cloned (deep) copy of self.""" - return Leaf( - self.type, - self.value, - (self.prefix, (self.lineno, self.column)), - fixers_applied=self.fixers_applied, - ) - - def leaves(self): - yield self - - def post_order(self): - """Return a post-order iterator for the tree.""" - yield self - - def pre_order(self): - """Return a pre-order iterator for the tree.""" - yield self - - @property - def prefix(self): - """ - The whitespace and comments preceding this token in the input. - """ - return self._prefix - - @prefix.setter - def prefix(self, prefix): - self.changed() - self._prefix = prefix - - -def convert(gr, raw_node): - """ - Convert raw node information to a Node or Leaf instance. - - This is passed to the parser driver which calls it whenever a reduction of a - grammar rule produces a new complete node, so that the tree is build - strictly bottom-up. - """ - type, value, context, children = raw_node - if children or type in gr.number2symbol: - # If there's exactly one child, return that child instead of - # creating a new node. - if len(children) == 1: - return children[0] - return Node(type, children, context=context) - else: - return Leaf(type, value, context=context) - - -class BasePattern(object): - - """ - A pattern is a tree matching pattern. - - It looks for a specific node type (token or symbol), and - optionally for a specific content. - - This is an abstract base class. There are three concrete - subclasses: - - - LeafPattern matches a single leaf node; - - NodePattern matches a single node (usually non-leaf); - - WildcardPattern matches a sequence of nodes of variable length. - """ - - # Defaults for instance variables - type = None # Node type (token if < 256, symbol if >= 256) - content = None # Optional content matching pattern - name = None # Optional name used to store match in results dict - - def __new__(cls, *args, **kwds): - """Constructor that prevents BasePattern from being instantiated.""" - assert cls is not BasePattern, "Cannot instantiate BasePattern" - return object.__new__(cls) - - def __repr__(self): - args = [type_repr(self.type), self.content, self.name] - while args and args[-1] is None: - del args[-1] - return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) - - def optimize(self): - """ - A subclass can define this as a hook for optimizations. - - Returns either self or another node with the same effect. - """ - return self - - def match(self, node, results=None): - """ - Does this pattern exactly match a node? - - Returns True if it matches, False if not. - - If results is not None, it must be a dict which will be - updated with the nodes matching named subpatterns. - - Default implementation for non-wildcard patterns. - """ - if self.type is not None and node.type != self.type: - return False - if self.content is not None: - r = None - if results is not None: - r = {} - if not self._submatch(node, r): - return False - if r: - results.update(r) - if results is not None and self.name: - results[self.name] = node - return True - - def match_seq(self, nodes, results=None): - """ - Does this pattern exactly match a sequence of nodes? - - Default implementation for non-wildcard patterns. - """ - if len(nodes) != 1: - return False - return self.match(nodes[0], results) - - def generate_matches(self, nodes): - """ - Generator yielding all matches for this pattern. - - Default implementation for non-wildcard patterns. - """ - r = {} - if nodes and self.match(nodes[0], r): - yield 1, r - - -class LeafPattern(BasePattern): - def __init__(self, type=None, content=None, name=None): - """ - Initializer. Takes optional type, content, and name. - - The type, if given must be a token type (< 256). If not given, - this matches any *leaf* node; the content may still be required. - - The content, if given, must be a string. - - If a name is given, the matching node is stored in the results - dict under that key. - """ - if type is not None: - assert 0 <= type < 256, type - if content is not None: - assert isinstance(content, str), repr(content) - self.type = type - self.content = content - self.name = name - - def match(self, node, results=None): - """Override match() to insist on a leaf node.""" - if not isinstance(node, Leaf): - return False - return BasePattern.match(self, node, results) - - def _submatch(self, node, results=None): - """ - Match the pattern's content to the node's children. - - This assumes the node type matches and self.content is not None. - - Returns True if it matches, False if not. - - If results is not None, it must be a dict which will be - updated with the nodes matching named subpatterns. - - When returning False, the results dict may still be updated. - """ - return self.content == node.value - - -class NodePattern(BasePattern): - - wildcards = False - - def __init__(self, type=None, content=None, name=None): - """ - Initializer. Takes optional type, content, and name. - - The type, if given, must be a symbol type (>= 256). If the - type is None this matches *any* single node (leaf or not), - except if content is not None, in which it only matches - non-leaf nodes that also match the content pattern. - - The content, if not None, must be a sequence of Patterns that - must match the node's children exactly. If the content is - given, the type must not be None. - - If a name is given, the matching node is stored in the results - dict under that key. - """ - if type is not None: - assert type >= 256, type - if content is not None: - assert not isinstance(content, str), repr(content) - content = list(content) - for i, item in enumerate(content): - assert isinstance(item, BasePattern), (i, item) - if isinstance(item, WildcardPattern): - self.wildcards = True - self.type = type - self.content = content - self.name = name - - def _submatch(self, node, results=None): - """ - Match the pattern's content to the node's children. - - This assumes the node type matches and self.content is not None. - - Returns True if it matches, False if not. - - If results is not None, it must be a dict which will be - updated with the nodes matching named subpatterns. - - When returning False, the results dict may still be updated. - """ - if self.wildcards: - for c, r in generate_matches(self.content, node.children): - if c == len(node.children): - if results is not None: - results.update(r) - return True - return False - if len(self.content) != len(node.children): - return False - for subpattern, child in zip(self.content, node.children): - if not subpattern.match(child, results): - return False - return True - - -class WildcardPattern(BasePattern): - - """ - A wildcard pattern can match zero or more nodes. - - This has all the flexibility needed to implement patterns like: - - .* .+ .? .{m,n} - (a b c | d e | f) - (...)* (...)+ (...)? (...){m,n} - - except it always uses non-greedy matching. - """ - - def __init__(self, content=None, min=0, max=HUGE, name=None): - """ - Initializer. - - Args: - content: optional sequence of subsequences of patterns; - if absent, matches one node; - if present, each subsequence is an alternative [*] - min: optional minimum number of times to match, default 0 - max: optional maximum number of times to match, default HUGE - name: optional name assigned to this match - - [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is - equivalent to (a b c | d e | f g h); if content is None, - this is equivalent to '.' in regular expression terms. - The min and max parameters work as follows: - min=0, max=maxint: .* - min=1, max=maxint: .+ - min=0, max=1: .? - min=1, max=1: . - If content is not None, replace the dot with the parenthesized - list of alternatives, e.g. (a b c | d e | f g h)* - """ - assert 0 <= min <= max <= HUGE, (min, max) - if content is not None: - content = tuple(map(tuple, content)) # Protect against alterations - # Check sanity of alternatives - assert len(content), repr(content) # Can't have zero alternatives - for alt in content: - assert len(alt), repr(alt) # Can have empty alternatives - self.content = content - self.min = min - self.max = max - self.name = name - - def optimize(self): - """Optimize certain stacked wildcard patterns.""" - subpattern = None - if ( - self.content is not None - and len(self.content) == 1 - and len(self.content[0]) == 1 - ): - subpattern = self.content[0][0] - if self.min == 1 and self.max == 1: - if self.content is None: - return NodePattern(name=self.name) - if subpattern is not None and self.name == subpattern.name: - return subpattern.optimize() - if ( - self.min <= 1 - and isinstance(subpattern, WildcardPattern) - and subpattern.min <= 1 - and self.name == subpattern.name - ): - return WildcardPattern( - subpattern.content, - self.min * subpattern.min, - self.max * subpattern.max, - subpattern.name, - ) - return self - - def match(self, node, results=None): - """Does this pattern exactly match a node?""" - return self.match_seq([node], results) - - def match_seq(self, nodes, results=None): - """Does this pattern exactly match a sequence of nodes?""" - for c, r in self.generate_matches(nodes): - if c == len(nodes): - if results is not None: - results.update(r) - if self.name: - results[self.name] = list(nodes) - return True - return False - - def generate_matches(self, nodes): - """ - Generator yielding matches for a sequence of nodes. - - Args: - nodes: sequence of nodes - - Yields: - (count, results) tuples where: - count: the match comprises nodes[:count]; - results: dict containing named submatches. - """ - if self.content is None: - # Shortcut for special case (see __init__.__doc__) - for count in range(self.min, 1 + min(len(nodes), self.max)): - r = {} - if self.name: - r[self.name] = nodes[:count] - yield count, r - elif self.name == "bare_name": - yield self._bare_name_matches(nodes) - else: - # The reason for this is that hitting the recursion limit usually - # results in some ugly messages about how RuntimeErrors are being - # ignored. We only have to do this on CPython, though, because other - # implementations don't have this nasty bug in the first place. - if hasattr(sys, "getrefcount"): - save_stderr = sys.stderr - sys.stderr = StringIO() - try: - for count, r in self._recursive_matches(nodes, 0): - if self.name: - r[self.name] = nodes[:count] - yield count, r - except RuntimeError: - # We fall back to the iterative pattern matching scheme if the recursive - # scheme hits the recursion limit. - for count, r in self._iterative_matches(nodes): - if self.name: - r[self.name] = nodes[:count] - yield count, r - finally: - if hasattr(sys, "getrefcount"): - sys.stderr = save_stderr - - def _iterative_matches(self, nodes): - """Helper to iteratively yield the matches.""" - nodelen = len(nodes) - if 0 >= self.min: - yield 0, {} - - results = [] - # generate matches that use just one alt from self.content - for alt in self.content: - for c, r in generate_matches(alt, nodes): - yield c, r - results.append((c, r)) - - # for each match, iterate down the nodes - while results: - new_results = [] - for c0, r0 in results: - # stop if the entire set of nodes has been matched - if c0 < nodelen and c0 <= self.max: - for alt in self.content: - for c1, r1 in generate_matches(alt, nodes[c0:]): - if c1 > 0: - r = {} - r.update(r0) - r.update(r1) - yield c0 + c1, r - new_results.append((c0 + c1, r)) - results = new_results - - def _bare_name_matches(self, nodes): - """Special optimized matcher for bare_name.""" - count = 0 - r = {} - done = False - max = len(nodes) - while not done and count < max: - done = True - for leaf in self.content: - if leaf[0].match(nodes[count], r): - count += 1 - done = False - break - r[self.name] = nodes[:count] - return count, r - - def _recursive_matches(self, nodes, count): - """Helper to recursively yield the matches.""" - assert self.content is not None - if count >= self.min: - yield 0, {} - if count < self.max: - for alt in self.content: - for c0, r0 in generate_matches(alt, nodes): - for c1, r1 in self._recursive_matches(nodes[c0:], count + 1): - r = {} - r.update(r0) - r.update(r1) - yield c0 + c1, r - - -class NegatedPattern(BasePattern): - def __init__(self, content=None): - """ - Initializer. - - The argument is either a pattern or None. If it is None, this - only matches an empty sequence (effectively '$' in regex - lingo). If it is not None, this matches whenever the argument - pattern doesn't have any matches. - """ - if content is not None: - assert isinstance(content, BasePattern), repr(content) - self.content = content - - def match(self, node): - # We never match a node in its entirety - return False - - def match_seq(self, nodes): - # We only match an empty sequence of nodes in its entirety - return len(nodes) == 0 - - def generate_matches(self, nodes): - if self.content is None: - # Return a match if there is an empty sequence - if len(nodes) == 0: - yield 0, {} - else: - # Return a match if the argument pattern has no matches - for c, r in self.content.generate_matches(nodes): - return - yield 0, {} - - -def generate_matches(patterns, nodes): - """ - Generator yielding matches for a sequence of patterns and nodes. - - Args: - patterns: a sequence of patterns - nodes: a sequence of nodes - - Yields: - (count, results) tuples where: - count: the entire sequence of patterns matches nodes[:count]; - results: dict containing named submatches. - """ - if not patterns: - yield 0, {} - else: - p, rest = patterns[0], patterns[1:] - for c0, r0 in p.generate_matches(nodes): - if not rest: - yield c0, r0 - else: - for c1, r1 in generate_matches(rest, nodes[c0:]): - r = {} - r.update(r0) - r.update(r1) - yield c0 + c1, r diff --git a/venv/lib/python3.8/site-packages/click/__init__.py b/venv/lib/python3.8/site-packages/click/__init__.py deleted file mode 100644 index d3c33660..00000000 --- a/venv/lib/python3.8/site-packages/click/__init__.py +++ /dev/null @@ -1,97 +0,0 @@ -# -*- coding: utf-8 -*- -""" -click -~~~~~ - -Click is a simple Python module inspired by the stdlib optparse to make -writing command line scripts fun. Unlike other modules, it's based -around a simple API that does not come with too much magic and is -composable. - -:copyright: © 2014 by the Pallets team. -:license: BSD, see LICENSE.rst for more details. -""" - -# Core classes -from .core import Context, BaseCommand, Command, MultiCommand, Group, \ - CommandCollection, Parameter, Option, Argument - -# Globals -from .globals import get_current_context - -# Decorators -from .decorators import pass_context, pass_obj, make_pass_decorator, \ - command, group, argument, option, confirmation_option, \ - password_option, version_option, help_option - -# Types -from .types import ParamType, File, Path, Choice, IntRange, Tuple, \ - DateTime, STRING, INT, FLOAT, BOOL, UUID, UNPROCESSED, FloatRange - -# Utilities -from .utils import echo, get_binary_stream, get_text_stream, open_file, \ - format_filename, get_app_dir, get_os_args - -# Terminal functions -from .termui import prompt, confirm, get_terminal_size, echo_via_pager, \ - progressbar, clear, style, unstyle, secho, edit, launch, getchar, \ - pause - -# Exceptions -from .exceptions import ClickException, UsageError, BadParameter, \ - FileError, Abort, NoSuchOption, BadOptionUsage, BadArgumentUsage, \ - MissingParameter - -# Formatting -from .formatting import HelpFormatter, wrap_text - -# Parsing -from .parser import OptionParser - - -__all__ = [ - # Core classes - 'Context', 'BaseCommand', 'Command', 'MultiCommand', 'Group', - 'CommandCollection', 'Parameter', 'Option', 'Argument', - - # Globals - 'get_current_context', - - # Decorators - 'pass_context', 'pass_obj', 'make_pass_decorator', 'command', 'group', - 'argument', 'option', 'confirmation_option', 'password_option', - 'version_option', 'help_option', - - # Types - 'ParamType', 'File', 'Path', 'Choice', 'IntRange', 'Tuple', - 'DateTime', 'STRING', 'INT', 'FLOAT', 'BOOL', 'UUID', 'UNPROCESSED', - 'FloatRange', - - # Utilities - 'echo', 'get_binary_stream', 'get_text_stream', 'open_file', - 'format_filename', 'get_app_dir', 'get_os_args', - - # Terminal functions - 'prompt', 'confirm', 'get_terminal_size', 'echo_via_pager', - 'progressbar', 'clear', 'style', 'unstyle', 'secho', 'edit', 'launch', - 'getchar', 'pause', - - # Exceptions - 'ClickException', 'UsageError', 'BadParameter', 'FileError', - 'Abort', 'NoSuchOption', 'BadOptionUsage', 'BadArgumentUsage', - 'MissingParameter', - - # Formatting - 'HelpFormatter', 'wrap_text', - - # Parsing - 'OptionParser', -] - - -# Controls if click should emit the warning about the use of unicode -# literals. -disable_unicode_literals_warning = False - - -__version__ = '7.0' diff --git a/venv/lib/python3.8/site-packages/click/_bashcomplete.py b/venv/lib/python3.8/site-packages/click/_bashcomplete.py deleted file mode 100644 index a5f1084c..00000000 --- a/venv/lib/python3.8/site-packages/click/_bashcomplete.py +++ /dev/null @@ -1,293 +0,0 @@ -import copy -import os -import re - -from .utils import echo -from .parser import split_arg_string -from .core import MultiCommand, Option, Argument -from .types import Choice - -try: - from collections import abc -except ImportError: - import collections as abc - -WORDBREAK = '=' - -# Note, only BASH version 4.4 and later have the nosort option. -COMPLETION_SCRIPT_BASH = ''' -%(complete_func)s() { - local IFS=$'\n' - COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\ - COMP_CWORD=$COMP_CWORD \\ - %(autocomplete_var)s=complete $1 ) ) - return 0 -} - -%(complete_func)setup() { - local COMPLETION_OPTIONS="" - local BASH_VERSION_ARR=(${BASH_VERSION//./ }) - # Only BASH version 4.4 and later have the nosort option. - if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] && [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then - COMPLETION_OPTIONS="-o nosort" - fi - - complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s -} - -%(complete_func)setup -''' - -COMPLETION_SCRIPT_ZSH = ''' -%(complete_func)s() { - local -a completions - local -a completions_with_descriptions - local -a response - response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\ - COMP_CWORD=$((CURRENT-1)) \\ - %(autocomplete_var)s=\"complete_zsh\" \\ - %(script_names)s )}") - - for key descr in ${(kv)response}; do - if [[ "$descr" == "_" ]]; then - completions+=("$key") - else - completions_with_descriptions+=("$key":"$descr") - fi - done - - if [ -n "$completions_with_descriptions" ]; then - _describe -V unsorted completions_with_descriptions -U -Q - fi - - if [ -n "$completions" ]; then - compadd -U -V unsorted -Q -a completions - fi - compstate[insert]="automenu" -} - -compdef %(complete_func)s %(script_names)s -''' - -_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]') - - -def get_completion_script(prog_name, complete_var, shell): - cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_')) - script = COMPLETION_SCRIPT_ZSH if shell == 'zsh' else COMPLETION_SCRIPT_BASH - return (script % { - 'complete_func': '_%s_completion' % cf_name, - 'script_names': prog_name, - 'autocomplete_var': complete_var, - }).strip() + ';' - - -def resolve_ctx(cli, prog_name, args): - """ - Parse into a hierarchy of contexts. Contexts are connected through the parent variable. - :param cli: command definition - :param prog_name: the program that is running - :param args: full list of args - :return: the final context/command parsed - """ - ctx = cli.make_context(prog_name, args, resilient_parsing=True) - args = ctx.protected_args + ctx.args - while args: - if isinstance(ctx.command, MultiCommand): - if not ctx.command.chain: - cmd_name, cmd, args = ctx.command.resolve_command(ctx, args) - if cmd is None: - return ctx - ctx = cmd.make_context(cmd_name, args, parent=ctx, - resilient_parsing=True) - args = ctx.protected_args + ctx.args - else: - # Walk chained subcommand contexts saving the last one. - while args: - cmd_name, cmd, args = ctx.command.resolve_command(ctx, args) - if cmd is None: - return ctx - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False, - resilient_parsing=True) - args = sub_ctx.args - ctx = sub_ctx - args = sub_ctx.protected_args + sub_ctx.args - else: - break - return ctx - - -def start_of_option(param_str): - """ - :param param_str: param_str to check - :return: whether or not this is the start of an option declaration (i.e. starts "-" or "--") - """ - return param_str and param_str[:1] == '-' - - -def is_incomplete_option(all_args, cmd_param): - """ - :param all_args: the full original list of args supplied - :param cmd_param: the current command paramter - :return: whether or not the last option declaration (i.e. starts "-" or "--") is incomplete and - corresponds to this cmd_param. In other words whether this cmd_param option can still accept - values - """ - if not isinstance(cmd_param, Option): - return False - if cmd_param.is_flag: - return False - last_option = None - for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])): - if index + 1 > cmd_param.nargs: - break - if start_of_option(arg_str): - last_option = arg_str - - return True if last_option and last_option in cmd_param.opts else False - - -def is_incomplete_argument(current_params, cmd_param): - """ - :param current_params: the current params and values for this argument as already entered - :param cmd_param: the current command parameter - :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In - other words whether or not the this cmd_param argument can still accept values - """ - if not isinstance(cmd_param, Argument): - return False - current_param_values = current_params[cmd_param.name] - if current_param_values is None: - return True - if cmd_param.nargs == -1: - return True - if isinstance(current_param_values, abc.Iterable) \ - and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs: - return True - return False - - -def get_user_autocompletions(ctx, args, incomplete, cmd_param): - """ - :param ctx: context associated with the parsed command - :param args: full list of args - :param incomplete: the incomplete text to autocomplete - :param cmd_param: command definition - :return: all the possible user-specified completions for the param - """ - results = [] - if isinstance(cmd_param.type, Choice): - # Choices don't support descriptions. - results = [(c, None) - for c in cmd_param.type.choices if str(c).startswith(incomplete)] - elif cmd_param.autocompletion is not None: - dynamic_completions = cmd_param.autocompletion(ctx=ctx, - args=args, - incomplete=incomplete) - results = [c if isinstance(c, tuple) else (c, None) - for c in dynamic_completions] - return results - - -def get_visible_commands_starting_with(ctx, starts_with): - """ - :param ctx: context associated with the parsed command - :starts_with: string that visible commands must start with. - :return: all visible (not hidden) commands that start with starts_with. - """ - for c in ctx.command.list_commands(ctx): - if c.startswith(starts_with): - command = ctx.command.get_command(ctx, c) - if not command.hidden: - yield command - - -def add_subcommand_completions(ctx, incomplete, completions_out): - # Add subcommand completions. - if isinstance(ctx.command, MultiCommand): - completions_out.extend( - [(c.name, c.get_short_help_str()) for c in get_visible_commands_starting_with(ctx, incomplete)]) - - # Walk up the context list and add any other completion possibilities from chained commands - while ctx.parent is not None: - ctx = ctx.parent - if isinstance(ctx.command, MultiCommand) and ctx.command.chain: - remaining_commands = [c for c in get_visible_commands_starting_with(ctx, incomplete) - if c.name not in ctx.protected_args] - completions_out.extend([(c.name, c.get_short_help_str()) for c in remaining_commands]) - - -def get_choices(cli, prog_name, args, incomplete): - """ - :param cli: command definition - :param prog_name: the program that is running - :param args: full list of args - :param incomplete: the incomplete text to autocomplete - :return: all the possible completions for the incomplete - """ - all_args = copy.deepcopy(args) - - ctx = resolve_ctx(cli, prog_name, args) - if ctx is None: - return [] - - # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse - # without the '=' - if start_of_option(incomplete) and WORDBREAK in incomplete: - partition_incomplete = incomplete.partition(WORDBREAK) - all_args.append(partition_incomplete[0]) - incomplete = partition_incomplete[2] - elif incomplete == WORDBREAK: - incomplete = '' - - completions = [] - if start_of_option(incomplete): - # completions for partial options - for param in ctx.command.params: - if isinstance(param, Option) and not param.hidden: - param_opts = [param_opt for param_opt in param.opts + - param.secondary_opts if param_opt not in all_args or param.multiple] - completions.extend([(o, param.help) for o in param_opts if o.startswith(incomplete)]) - return completions - # completion for option values from user supplied values - for param in ctx.command.params: - if is_incomplete_option(all_args, param): - return get_user_autocompletions(ctx, all_args, incomplete, param) - # completion for argument values from user supplied values - for param in ctx.command.params: - if is_incomplete_argument(ctx.params, param): - return get_user_autocompletions(ctx, all_args, incomplete, param) - - add_subcommand_completions(ctx, incomplete, completions) - # Sort before returning so that proper ordering can be enforced in custom types. - return sorted(completions) - - -def do_complete(cli, prog_name, include_descriptions): - cwords = split_arg_string(os.environ['COMP_WORDS']) - cword = int(os.environ['COMP_CWORD']) - args = cwords[1:cword] - try: - incomplete = cwords[cword] - except IndexError: - incomplete = '' - - for item in get_choices(cli, prog_name, args, incomplete): - echo(item[0]) - if include_descriptions: - # ZSH has trouble dealing with empty array parameters when returned from commands, so use a well defined character '_' to indicate no description is present. - echo(item[1] if item[1] else '_') - - return True - - -def bashcomplete(cli, prog_name, complete_var, complete_instr): - if complete_instr.startswith('source'): - shell = 'zsh' if complete_instr == 'source_zsh' else 'bash' - echo(get_completion_script(prog_name, complete_var, shell)) - return True - elif complete_instr == 'complete' or complete_instr == 'complete_zsh': - return do_complete(cli, prog_name, complete_instr == 'complete_zsh') - return False diff --git a/venv/lib/python3.8/site-packages/click/_compat.py b/venv/lib/python3.8/site-packages/click/_compat.py deleted file mode 100644 index 937e2301..00000000 --- a/venv/lib/python3.8/site-packages/click/_compat.py +++ /dev/null @@ -1,703 +0,0 @@ -import re -import io -import os -import sys -import codecs -from weakref import WeakKeyDictionary - - -PY2 = sys.version_info[0] == 2 -CYGWIN = sys.platform.startswith('cygwin') -# Determine local App Engine environment, per Google's own suggestion -APP_ENGINE = ('APPENGINE_RUNTIME' in os.environ and - 'Development/' in os.environ['SERVER_SOFTWARE']) -WIN = sys.platform.startswith('win') and not APP_ENGINE -DEFAULT_COLUMNS = 80 - - -_ansi_re = re.compile(r'\033\[((?:\d|;)*)([a-zA-Z])') - - -def get_filesystem_encoding(): - return sys.getfilesystemencoding() or sys.getdefaultencoding() - - -def _make_text_stream(stream, encoding, errors, - force_readable=False, force_writable=False): - if encoding is None: - encoding = get_best_encoding(stream) - if errors is None: - errors = 'replace' - return _NonClosingTextIOWrapper(stream, encoding, errors, - line_buffering=True, - force_readable=force_readable, - force_writable=force_writable) - - -def is_ascii_encoding(encoding): - """Checks if a given encoding is ascii.""" - try: - return codecs.lookup(encoding).name == 'ascii' - except LookupError: - return False - - -def get_best_encoding(stream): - """Returns the default stream encoding if not found.""" - rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding() - if is_ascii_encoding(rv): - return 'utf-8' - return rv - - -class _NonClosingTextIOWrapper(io.TextIOWrapper): - - def __init__(self, stream, encoding, errors, - force_readable=False, force_writable=False, **extra): - self._stream = stream = _FixupStream(stream, force_readable, - force_writable) - io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra) - - # The io module is a place where the Python 3 text behavior - # was forced upon Python 2, so we need to unbreak - # it to look like Python 2. - if PY2: - def write(self, x): - if isinstance(x, str) or is_bytes(x): - try: - self.flush() - except Exception: - pass - return self.buffer.write(str(x)) - return io.TextIOWrapper.write(self, x) - - def writelines(self, lines): - for line in lines: - self.write(line) - - def __del__(self): - try: - self.detach() - except Exception: - pass - - def isatty(self): - # https://bitbucket.org/pypy/pypy/issue/1803 - return self._stream.isatty() - - -class _FixupStream(object): - """The new io interface needs more from streams than streams - traditionally implement. As such, this fix-up code is necessary in - some circumstances. - - The forcing of readable and writable flags are there because some tools - put badly patched objects on sys (one such offender are certain version - of jupyter notebook). - """ - - def __init__(self, stream, force_readable=False, force_writable=False): - self._stream = stream - self._force_readable = force_readable - self._force_writable = force_writable - - def __getattr__(self, name): - return getattr(self._stream, name) - - def read1(self, size): - f = getattr(self._stream, 'read1', None) - if f is not None: - return f(size) - # We only dispatch to readline instead of read in Python 2 as we - # do not want cause problems with the different implementation - # of line buffering. - if PY2: - return self._stream.readline(size) - return self._stream.read(size) - - def readable(self): - if self._force_readable: - return True - x = getattr(self._stream, 'readable', None) - if x is not None: - return x() - try: - self._stream.read(0) - except Exception: - return False - return True - - def writable(self): - if self._force_writable: - return True - x = getattr(self._stream, 'writable', None) - if x is not None: - return x() - try: - self._stream.write('') - except Exception: - try: - self._stream.write(b'') - except Exception: - return False - return True - - def seekable(self): - x = getattr(self._stream, 'seekable', None) - if x is not None: - return x() - try: - self._stream.seek(self._stream.tell()) - except Exception: - return False - return True - - -if PY2: - text_type = unicode - bytes = str - raw_input = raw_input - string_types = (str, unicode) - int_types = (int, long) - iteritems = lambda x: x.iteritems() - range_type = xrange - - def is_bytes(x): - return isinstance(x, (buffer, bytearray)) - - _identifier_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$') - - # For Windows, we need to force stdout/stdin/stderr to binary if it's - # fetched for that. This obviously is not the most correct way to do - # it as it changes global state. Unfortunately, there does not seem to - # be a clear better way to do it as just reopening the file in binary - # mode does not change anything. - # - # An option would be to do what Python 3 does and to open the file as - # binary only, patch it back to the system, and then use a wrapper - # stream that converts newlines. It's not quite clear what's the - # correct option here. - # - # This code also lives in _winconsole for the fallback to the console - # emulation stream. - # - # There are also Windows environments where the `msvcrt` module is not - # available (which is why we use try-catch instead of the WIN variable - # here), such as the Google App Engine development server on Windows. In - # those cases there is just nothing we can do. - def set_binary_mode(f): - return f - - try: - import msvcrt - except ImportError: - pass - else: - def set_binary_mode(f): - try: - fileno = f.fileno() - except Exception: - pass - else: - msvcrt.setmode(fileno, os.O_BINARY) - return f - - try: - import fcntl - except ImportError: - pass - else: - def set_binary_mode(f): - try: - fileno = f.fileno() - except Exception: - pass - else: - flags = fcntl.fcntl(fileno, fcntl.F_GETFL) - fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) - return f - - def isidentifier(x): - return _identifier_re.search(x) is not None - - def get_binary_stdin(): - return set_binary_mode(sys.stdin) - - def get_binary_stdout(): - _wrap_std_stream('stdout') - return set_binary_mode(sys.stdout) - - def get_binary_stderr(): - _wrap_std_stream('stderr') - return set_binary_mode(sys.stderr) - - def get_text_stdin(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdin, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stdin, encoding, errors, - force_readable=True) - - def get_text_stdout(encoding=None, errors=None): - _wrap_std_stream('stdout') - rv = _get_windows_console_stream(sys.stdout, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stdout, encoding, errors, - force_writable=True) - - def get_text_stderr(encoding=None, errors=None): - _wrap_std_stream('stderr') - rv = _get_windows_console_stream(sys.stderr, encoding, errors) - if rv is not None: - return rv - return _make_text_stream(sys.stderr, encoding, errors, - force_writable=True) - - def filename_to_ui(value): - if isinstance(value, bytes): - value = value.decode(get_filesystem_encoding(), 'replace') - return value -else: - import io - text_type = str - raw_input = input - string_types = (str,) - int_types = (int,) - range_type = range - isidentifier = lambda x: x.isidentifier() - iteritems = lambda x: iter(x.items()) - - def is_bytes(x): - return isinstance(x, (bytes, memoryview, bytearray)) - - def _is_binary_reader(stream, default=False): - try: - return isinstance(stream.read(0), bytes) - except Exception: - return default - # This happens in some cases where the stream was already - # closed. In this case, we assume the default. - - def _is_binary_writer(stream, default=False): - try: - stream.write(b'') - except Exception: - try: - stream.write('') - return False - except Exception: - pass - return default - return True - - def _find_binary_reader(stream): - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detaching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_reader(stream, False): - return stream - - buf = getattr(stream, 'buffer', None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_reader(buf, True): - return buf - - def _find_binary_writer(stream): - # We need to figure out if the given stream is already binary. - # This can happen because the official docs recommend detatching - # the streams to get binary streams. Some code might do this, so - # we need to deal with this case explicitly. - if _is_binary_writer(stream, False): - return stream - - buf = getattr(stream, 'buffer', None) - - # Same situation here; this time we assume that the buffer is - # actually binary in case it's closed. - if buf is not None and _is_binary_writer(buf, True): - return buf - - def _stream_is_misconfigured(stream): - """A stream is misconfigured if its encoding is ASCII.""" - # If the stream does not have an encoding set, we assume it's set - # to ASCII. This appears to happen in certain unittest - # environments. It's not quite clear what the correct behavior is - # but this at least will force Click to recover somehow. - return is_ascii_encoding(getattr(stream, 'encoding', None) or 'ascii') - - def _is_compatible_text_stream(stream, encoding, errors): - stream_encoding = getattr(stream, 'encoding', None) - stream_errors = getattr(stream, 'errors', None) - - # Perfect match. - if stream_encoding == encoding and stream_errors == errors: - return True - - # Otherwise, it's only a compatible stream if we did not ask for - # an encoding. - if encoding is None: - return stream_encoding is not None - - return False - - def _force_correct_text_reader(text_reader, encoding, errors, - force_readable=False): - if _is_binary_reader(text_reader, False): - binary_reader = text_reader - else: - # If there is no target encoding set, we need to verify that the - # reader is not actually misconfigured. - if encoding is None and not _stream_is_misconfigured(text_reader): - return text_reader - - if _is_compatible_text_stream(text_reader, encoding, errors): - return text_reader - - # If the reader has no encoding, we try to find the underlying - # binary reader for it. If that fails because the environment is - # misconfigured, we silently go with the same reader because this - # is too common to happen. In that case, mojibake is better than - # exceptions. - binary_reader = _find_binary_reader(text_reader) - if binary_reader is None: - return text_reader - - # At this point, we default the errors to replace instead of strict - # because nobody handles those errors anyways and at this point - # we're so fundamentally fucked that nothing can repair it. - if errors is None: - errors = 'replace' - return _make_text_stream(binary_reader, encoding, errors, - force_readable=force_readable) - - def _force_correct_text_writer(text_writer, encoding, errors, - force_writable=False): - if _is_binary_writer(text_writer, False): - binary_writer = text_writer - else: - # If there is no target encoding set, we need to verify that the - # writer is not actually misconfigured. - if encoding is None and not _stream_is_misconfigured(text_writer): - return text_writer - - if _is_compatible_text_stream(text_writer, encoding, errors): - return text_writer - - # If the writer has no encoding, we try to find the underlying - # binary writer for it. If that fails because the environment is - # misconfigured, we silently go with the same writer because this - # is too common to happen. In that case, mojibake is better than - # exceptions. - binary_writer = _find_binary_writer(text_writer) - if binary_writer is None: - return text_writer - - # At this point, we default the errors to replace instead of strict - # because nobody handles those errors anyways and at this point - # we're so fundamentally fucked that nothing can repair it. - if errors is None: - errors = 'replace' - return _make_text_stream(binary_writer, encoding, errors, - force_writable=force_writable) - - def get_binary_stdin(): - reader = _find_binary_reader(sys.stdin) - if reader is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stdin.') - return reader - - def get_binary_stdout(): - writer = _find_binary_writer(sys.stdout) - if writer is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stdout.') - return writer - - def get_binary_stderr(): - writer = _find_binary_writer(sys.stderr) - if writer is None: - raise RuntimeError('Was not able to determine binary ' - 'stream for sys.stderr.') - return writer - - def get_text_stdin(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdin, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_reader(sys.stdin, encoding, errors, - force_readable=True) - - def get_text_stdout(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stdout, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer(sys.stdout, encoding, errors, - force_writable=True) - - def get_text_stderr(encoding=None, errors=None): - rv = _get_windows_console_stream(sys.stderr, encoding, errors) - if rv is not None: - return rv - return _force_correct_text_writer(sys.stderr, encoding, errors, - force_writable=True) - - def filename_to_ui(value): - if isinstance(value, bytes): - value = value.decode(get_filesystem_encoding(), 'replace') - else: - value = value.encode('utf-8', 'surrogateescape') \ - .decode('utf-8', 'replace') - return value - - -def get_streerror(e, default=None): - if hasattr(e, 'strerror'): - msg = e.strerror - else: - if default is not None: - msg = default - else: - msg = str(e) - if isinstance(msg, bytes): - msg = msg.decode('utf-8', 'replace') - return msg - - -def open_stream(filename, mode='r', encoding=None, errors='strict', - atomic=False): - # Standard streams first. These are simple because they don't need - # special handling for the atomic flag. It's entirely ignored. - if filename == '-': - if any(m in mode for m in ['w', 'a', 'x']): - if 'b' in mode: - return get_binary_stdout(), False - return get_text_stdout(encoding=encoding, errors=errors), False - if 'b' in mode: - return get_binary_stdin(), False - return get_text_stdin(encoding=encoding, errors=errors), False - - # Non-atomic writes directly go out through the regular open functions. - if not atomic: - if encoding is None: - return open(filename, mode), True - return io.open(filename, mode, encoding=encoding, errors=errors), True - - # Some usability stuff for atomic writes - if 'a' in mode: - raise ValueError( - 'Appending to an existing file is not supported, because that ' - 'would involve an expensive `copy`-operation to a temporary ' - 'file. Open the file in normal `w`-mode and copy explicitly ' - 'if that\'s what you\'re after.' - ) - if 'x' in mode: - raise ValueError('Use the `overwrite`-parameter instead.') - if 'w' not in mode: - raise ValueError('Atomic writes only make sense with `w`-mode.') - - # Atomic writes are more complicated. They work by opening a file - # as a proxy in the same folder and then using the fdopen - # functionality to wrap it in a Python file. Then we wrap it in an - # atomic file that moves the file over on close. - import tempfile - fd, tmp_filename = tempfile.mkstemp(dir=os.path.dirname(filename), - prefix='.__atomic-write') - - if encoding is not None: - f = io.open(fd, mode, encoding=encoding, errors=errors) - else: - f = os.fdopen(fd, mode) - - return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True - - -# Used in a destructor call, needs extra protection from interpreter cleanup. -if hasattr(os, 'replace'): - _replace = os.replace - _can_replace = True -else: - _replace = os.rename - _can_replace = not WIN - - -class _AtomicFile(object): - - def __init__(self, f, tmp_filename, real_filename): - self._f = f - self._tmp_filename = tmp_filename - self._real_filename = real_filename - self.closed = False - - @property - def name(self): - return self._real_filename - - def close(self, delete=False): - if self.closed: - return - self._f.close() - if not _can_replace: - try: - os.remove(self._real_filename) - except OSError: - pass - _replace(self._tmp_filename, self._real_filename) - self.closed = True - - def __getattr__(self, name): - return getattr(self._f, name) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close(delete=exc_type is not None) - - def __repr__(self): - return repr(self._f) - - -auto_wrap_for_ansi = None -colorama = None -get_winterm_size = None - - -def strip_ansi(value): - return _ansi_re.sub('', value) - - -def should_strip_ansi(stream=None, color=None): - if color is None: - if stream is None: - stream = sys.stdin - return not isatty(stream) - return not color - - -# If we're on Windows, we provide transparent integration through -# colorama. This will make ANSI colors through the echo function -# work automatically. -if WIN: - # Windows has a smaller terminal - DEFAULT_COLUMNS = 79 - - from ._winconsole import _get_windows_console_stream, _wrap_std_stream - - def _get_argv_encoding(): - import locale - return locale.getpreferredencoding() - - if PY2: - def raw_input(prompt=''): - sys.stderr.flush() - if prompt: - stdout = _default_text_stdout() - stdout.write(prompt) - stdin = _default_text_stdin() - return stdin.readline().rstrip('\r\n') - - try: - import colorama - except ImportError: - pass - else: - _ansi_stream_wrappers = WeakKeyDictionary() - - def auto_wrap_for_ansi(stream, color=None): - """This function wraps a stream so that calls through colorama - are issued to the win32 console API to recolor on demand. It - also ensures to reset the colors if a write call is interrupted - to not destroy the console afterwards. - """ - try: - cached = _ansi_stream_wrappers.get(stream) - except Exception: - cached = None - if cached is not None: - return cached - strip = should_strip_ansi(stream, color) - ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip) - rv = ansi_wrapper.stream - _write = rv.write - - def _safe_write(s): - try: - return _write(s) - except: - ansi_wrapper.reset_all() - raise - - rv.write = _safe_write - try: - _ansi_stream_wrappers[stream] = rv - except Exception: - pass - return rv - - def get_winterm_size(): - win = colorama.win32.GetConsoleScreenBufferInfo( - colorama.win32.STDOUT).srWindow - return win.Right - win.Left, win.Bottom - win.Top -else: - def _get_argv_encoding(): - return getattr(sys.stdin, 'encoding', None) or get_filesystem_encoding() - - _get_windows_console_stream = lambda *x: None - _wrap_std_stream = lambda *x: None - - -def term_len(x): - return len(strip_ansi(x)) - - -def isatty(stream): - try: - return stream.isatty() - except Exception: - return False - - -def _make_cached_stream_func(src_func, wrapper_func): - cache = WeakKeyDictionary() - def func(): - stream = src_func() - try: - rv = cache.get(stream) - except Exception: - rv = None - if rv is not None: - return rv - rv = wrapper_func() - try: - stream = src_func() # In case wrapper_func() modified the stream - cache[stream] = rv - except Exception: - pass - return rv - return func - - -_default_text_stdin = _make_cached_stream_func( - lambda: sys.stdin, get_text_stdin) -_default_text_stdout = _make_cached_stream_func( - lambda: sys.stdout, get_text_stdout) -_default_text_stderr = _make_cached_stream_func( - lambda: sys.stderr, get_text_stderr) - - -binary_streams = { - 'stdin': get_binary_stdin, - 'stdout': get_binary_stdout, - 'stderr': get_binary_stderr, -} - -text_streams = { - 'stdin': get_text_stdin, - 'stdout': get_text_stdout, - 'stderr': get_text_stderr, -} diff --git a/venv/lib/python3.8/site-packages/click/_termui_impl.py b/venv/lib/python3.8/site-packages/click/_termui_impl.py deleted file mode 100644 index 00a8e5ef..00000000 --- a/venv/lib/python3.8/site-packages/click/_termui_impl.py +++ /dev/null @@ -1,621 +0,0 @@ -# -*- coding: utf-8 -*- -""" -click._termui_impl -~~~~~~~~~~~~~~~~~~ - -This module contains implementations for the termui module. To keep the -import time of Click down, some infrequently used functionality is -placed in this module and only imported as needed. - -:copyright: © 2014 by the Pallets team. -:license: BSD, see LICENSE.rst for more details. -""" - -import os -import sys -import time -import math -import contextlib -from ._compat import _default_text_stdout, range_type, PY2, isatty, \ - open_stream, strip_ansi, term_len, get_best_encoding, WIN, int_types, \ - CYGWIN -from .utils import echo -from .exceptions import ClickException - - -if os.name == 'nt': - BEFORE_BAR = '\r' - AFTER_BAR = '\n' -else: - BEFORE_BAR = '\r\033[?25l' - AFTER_BAR = '\033[?25h\n' - - -def _length_hint(obj): - """Returns the length hint of an object.""" - try: - return len(obj) - except (AttributeError, TypeError): - try: - get_hint = type(obj).__length_hint__ - except AttributeError: - return None - try: - hint = get_hint(obj) - except TypeError: - return None - if hint is NotImplemented or \ - not isinstance(hint, int_types) or \ - hint < 0: - return None - return hint - - -class ProgressBar(object): - - def __init__(self, iterable, length=None, fill_char='#', empty_char=' ', - bar_template='%(bar)s', info_sep=' ', show_eta=True, - show_percent=None, show_pos=False, item_show_func=None, - label=None, file=None, color=None, width=30): - self.fill_char = fill_char - self.empty_char = empty_char - self.bar_template = bar_template - self.info_sep = info_sep - self.show_eta = show_eta - self.show_percent = show_percent - self.show_pos = show_pos - self.item_show_func = item_show_func - self.label = label or '' - if file is None: - file = _default_text_stdout() - self.file = file - self.color = color - self.width = width - self.autowidth = width == 0 - - if length is None: - length = _length_hint(iterable) - if iterable is None: - if length is None: - raise TypeError('iterable or length is required') - iterable = range_type(length) - self.iter = iter(iterable) - self.length = length - self.length_known = length is not None - self.pos = 0 - self.avg = [] - self.start = self.last_eta = time.time() - self.eta_known = False - self.finished = False - self.max_width = None - self.entered = False - self.current_item = None - self.is_hidden = not isatty(self.file) - self._last_line = None - self.short_limit = 0.5 - - def __enter__(self): - self.entered = True - self.render_progress() - return self - - def __exit__(self, exc_type, exc_value, tb): - self.render_finish() - - def __iter__(self): - if not self.entered: - raise RuntimeError('You need to use progress bars in a with block.') - self.render_progress() - return self.generator() - - def is_fast(self): - return time.time() - self.start <= self.short_limit - - def render_finish(self): - if self.is_hidden or self.is_fast(): - return - self.file.write(AFTER_BAR) - self.file.flush() - - @property - def pct(self): - if self.finished: - return 1.0 - return min(self.pos / (float(self.length) or 1), 1.0) - - @property - def time_per_iteration(self): - if not self.avg: - return 0.0 - return sum(self.avg) / float(len(self.avg)) - - @property - def eta(self): - if self.length_known and not self.finished: - return self.time_per_iteration * (self.length - self.pos) - return 0.0 - - def format_eta(self): - if self.eta_known: - t = int(self.eta) - seconds = t % 60 - t //= 60 - minutes = t % 60 - t //= 60 - hours = t % 24 - t //= 24 - if t > 0: - days = t - return '%dd %02d:%02d:%02d' % (days, hours, minutes, seconds) - else: - return '%02d:%02d:%02d' % (hours, minutes, seconds) - return '' - - def format_pos(self): - pos = str(self.pos) - if self.length_known: - pos += '/%s' % self.length - return pos - - def format_pct(self): - return ('% 4d%%' % int(self.pct * 100))[1:] - - def format_bar(self): - if self.length_known: - bar_length = int(self.pct * self.width) - bar = self.fill_char * bar_length - bar += self.empty_char * (self.width - bar_length) - elif self.finished: - bar = self.fill_char * self.width - else: - bar = list(self.empty_char * (self.width or 1)) - if self.time_per_iteration != 0: - bar[int((math.cos(self.pos * self.time_per_iteration) - / 2.0 + 0.5) * self.width)] = self.fill_char - bar = ''.join(bar) - return bar - - def format_progress_line(self): - show_percent = self.show_percent - - info_bits = [] - if self.length_known and show_percent is None: - show_percent = not self.show_pos - - if self.show_pos: - info_bits.append(self.format_pos()) - if show_percent: - info_bits.append(self.format_pct()) - if self.show_eta and self.eta_known and not self.finished: - info_bits.append(self.format_eta()) - if self.item_show_func is not None: - item_info = self.item_show_func(self.current_item) - if item_info is not None: - info_bits.append(item_info) - - return (self.bar_template % { - 'label': self.label, - 'bar': self.format_bar(), - 'info': self.info_sep.join(info_bits) - }).rstrip() - - def render_progress(self): - from .termui import get_terminal_size - - if self.is_hidden: - return - - buf = [] - # Update width in case the terminal has been resized - if self.autowidth: - old_width = self.width - self.width = 0 - clutter_length = term_len(self.format_progress_line()) - new_width = max(0, get_terminal_size()[0] - clutter_length) - if new_width < old_width: - buf.append(BEFORE_BAR) - buf.append(' ' * self.max_width) - self.max_width = new_width - self.width = new_width - - clear_width = self.width - if self.max_width is not None: - clear_width = self.max_width - - buf.append(BEFORE_BAR) - line = self.format_progress_line() - line_len = term_len(line) - if self.max_width is None or self.max_width < line_len: - self.max_width = line_len - - buf.append(line) - buf.append(' ' * (clear_width - line_len)) - line = ''.join(buf) - # Render the line only if it changed. - - if line != self._last_line and not self.is_fast(): - self._last_line = line - echo(line, file=self.file, color=self.color, nl=False) - self.file.flush() - - def make_step(self, n_steps): - self.pos += n_steps - if self.length_known and self.pos >= self.length: - self.finished = True - - if (time.time() - self.last_eta) < 1.0: - return - - self.last_eta = time.time() - - # self.avg is a rolling list of length <= 7 of steps where steps are - # defined as time elapsed divided by the total progress through - # self.length. - if self.pos: - step = (time.time() - self.start) / self.pos - else: - step = time.time() - self.start - - self.avg = self.avg[-6:] + [step] - - self.eta_known = self.length_known - - def update(self, n_steps): - self.make_step(n_steps) - self.render_progress() - - def finish(self): - self.eta_known = 0 - self.current_item = None - self.finished = True - - def generator(self): - """ - Returns a generator which yields the items added to the bar during - construction, and updates the progress bar *after* the yielded block - returns. - """ - if not self.entered: - raise RuntimeError('You need to use progress bars in a with block.') - - if self.is_hidden: - for rv in self.iter: - yield rv - else: - for rv in self.iter: - self.current_item = rv - yield rv - self.update(1) - self.finish() - self.render_progress() - - -def pager(generator, color=None): - """Decide what method to use for paging through text.""" - stdout = _default_text_stdout() - if not isatty(sys.stdin) or not isatty(stdout): - return _nullpager(stdout, generator, color) - pager_cmd = (os.environ.get('PAGER', None) or '').strip() - if pager_cmd: - if WIN: - return _tempfilepager(generator, pager_cmd, color) - return _pipepager(generator, pager_cmd, color) - if os.environ.get('TERM') in ('dumb', 'emacs'): - return _nullpager(stdout, generator, color) - if WIN or sys.platform.startswith('os2'): - return _tempfilepager(generator, 'more <', color) - if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0: - return _pipepager(generator, 'less', color) - - import tempfile - fd, filename = tempfile.mkstemp() - os.close(fd) - try: - if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0: - return _pipepager(generator, 'more', color) - return _nullpager(stdout, generator, color) - finally: - os.unlink(filename) - - -def _pipepager(generator, cmd, color): - """Page through text by feeding it to another program. Invoking a - pager through this might support colors. - """ - import subprocess - env = dict(os.environ) - - # If we're piping to less we might support colors under the - # condition that - cmd_detail = cmd.rsplit('/', 1)[-1].split() - if color is None and cmd_detail[0] == 'less': - less_flags = os.environ.get('LESS', '') + ' '.join(cmd_detail[1:]) - if not less_flags: - env['LESS'] = '-R' - color = True - elif 'r' in less_flags or 'R' in less_flags: - color = True - - c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, - env=env) - encoding = get_best_encoding(c.stdin) - try: - for text in generator: - if not color: - text = strip_ansi(text) - - c.stdin.write(text.encode(encoding, 'replace')) - except (IOError, KeyboardInterrupt): - pass - else: - c.stdin.close() - - # Less doesn't respect ^C, but catches it for its own UI purposes (aborting - # search or other commands inside less). - # - # That means when the user hits ^C, the parent process (click) terminates, - # but less is still alive, paging the output and messing up the terminal. - # - # If the user wants to make the pager exit on ^C, they should set - # `LESS='-K'`. It's not our decision to make. - while True: - try: - c.wait() - except KeyboardInterrupt: - pass - else: - break - - -def _tempfilepager(generator, cmd, color): - """Page through text by invoking a program on a temporary file.""" - import tempfile - filename = tempfile.mktemp() - # TODO: This never terminates if the passed generator never terminates. - text = "".join(generator) - if not color: - text = strip_ansi(text) - encoding = get_best_encoding(sys.stdout) - with open_stream(filename, 'wb')[0] as f: - f.write(text.encode(encoding)) - try: - os.system(cmd + ' "' + filename + '"') - finally: - os.unlink(filename) - - -def _nullpager(stream, generator, color): - """Simply print unformatted text. This is the ultimate fallback.""" - for text in generator: - if not color: - text = strip_ansi(text) - stream.write(text) - - -class Editor(object): - - def __init__(self, editor=None, env=None, require_save=True, - extension='.txt'): - self.editor = editor - self.env = env - self.require_save = require_save - self.extension = extension - - def get_editor(self): - if self.editor is not None: - return self.editor - for key in 'VISUAL', 'EDITOR': - rv = os.environ.get(key) - if rv: - return rv - if WIN: - return 'notepad' - for editor in 'vim', 'nano': - if os.system('which %s >/dev/null 2>&1' % editor) == 0: - return editor - return 'vi' - - def edit_file(self, filename): - import subprocess - editor = self.get_editor() - if self.env: - environ = os.environ.copy() - environ.update(self.env) - else: - environ = None - try: - c = subprocess.Popen('%s "%s"' % (editor, filename), - env=environ, shell=True) - exit_code = c.wait() - if exit_code != 0: - raise ClickException('%s: Editing failed!' % editor) - except OSError as e: - raise ClickException('%s: Editing failed: %s' % (editor, e)) - - def edit(self, text): - import tempfile - - text = text or '' - if text and not text.endswith('\n'): - text += '\n' - - fd, name = tempfile.mkstemp(prefix='editor-', suffix=self.extension) - try: - if WIN: - encoding = 'utf-8-sig' - text = text.replace('\n', '\r\n') - else: - encoding = 'utf-8' - text = text.encode(encoding) - - f = os.fdopen(fd, 'wb') - f.write(text) - f.close() - timestamp = os.path.getmtime(name) - - self.edit_file(name) - - if self.require_save \ - and os.path.getmtime(name) == timestamp: - return None - - f = open(name, 'rb') - try: - rv = f.read() - finally: - f.close() - return rv.decode('utf-8-sig').replace('\r\n', '\n') - finally: - os.unlink(name) - - -def open_url(url, wait=False, locate=False): - import subprocess - - def _unquote_file(url): - try: - import urllib - except ImportError: - import urllib - if url.startswith('file://'): - url = urllib.unquote(url[7:]) - return url - - if sys.platform == 'darwin': - args = ['open'] - if wait: - args.append('-W') - if locate: - args.append('-R') - args.append(_unquote_file(url)) - null = open('/dev/null', 'w') - try: - return subprocess.Popen(args, stderr=null).wait() - finally: - null.close() - elif WIN: - if locate: - url = _unquote_file(url) - args = 'explorer /select,"%s"' % _unquote_file( - url.replace('"', '')) - else: - args = 'start %s "" "%s"' % ( - wait and '/WAIT' or '', url.replace('"', '')) - return os.system(args) - elif CYGWIN: - if locate: - url = _unquote_file(url) - args = 'cygstart "%s"' % (os.path.dirname(url).replace('"', '')) - else: - args = 'cygstart %s "%s"' % ( - wait and '-w' or '', url.replace('"', '')) - return os.system(args) - - try: - if locate: - url = os.path.dirname(_unquote_file(url)) or '.' - else: - url = _unquote_file(url) - c = subprocess.Popen(['xdg-open', url]) - if wait: - return c.wait() - return 0 - except OSError: - if url.startswith(('http://', 'https://')) and not locate and not wait: - import webbrowser - webbrowser.open(url) - return 0 - return 1 - - -def _translate_ch_to_exc(ch): - if ch == u'\x03': - raise KeyboardInterrupt() - if ch == u'\x04' and not WIN: # Unix-like, Ctrl+D - raise EOFError() - if ch == u'\x1a' and WIN: # Windows, Ctrl+Z - raise EOFError() - - -if WIN: - import msvcrt - - @contextlib.contextmanager - def raw_terminal(): - yield - - def getchar(echo): - # The function `getch` will return a bytes object corresponding to - # the pressed character. Since Windows 10 build 1803, it will also - # return \x00 when called a second time after pressing a regular key. - # - # `getwch` does not share this probably-bugged behavior. Moreover, it - # returns a Unicode object by default, which is what we want. - # - # Either of these functions will return \x00 or \xe0 to indicate - # a special key, and you need to call the same function again to get - # the "rest" of the code. The fun part is that \u00e0 is - # "latin small letter a with grave", so if you type that on a French - # keyboard, you _also_ get a \xe0. - # E.g., consider the Up arrow. This returns \xe0 and then \x48. The - # resulting Unicode string reads as "a with grave" + "capital H". - # This is indistinguishable from when the user actually types - # "a with grave" and then "capital H". - # - # When \xe0 is returned, we assume it's part of a special-key sequence - # and call `getwch` again, but that means that when the user types - # the \u00e0 character, `getchar` doesn't return until a second - # character is typed. - # The alternative is returning immediately, but that would mess up - # cross-platform handling of arrow keys and others that start with - # \xe0. Another option is using `getch`, but then we can't reliably - # read non-ASCII characters, because return values of `getch` are - # limited to the current 8-bit codepage. - # - # Anyway, Click doesn't claim to do this Right(tm), and using `getwch` - # is doing the right thing in more situations than with `getch`. - if echo: - func = msvcrt.getwche - else: - func = msvcrt.getwch - - rv = func() - if rv in (u'\x00', u'\xe0'): - # \x00 and \xe0 are control characters that indicate special key, - # see above. - rv += func() - _translate_ch_to_exc(rv) - return rv -else: - import tty - import termios - - @contextlib.contextmanager - def raw_terminal(): - if not isatty(sys.stdin): - f = open('/dev/tty') - fd = f.fileno() - else: - fd = sys.stdin.fileno() - f = None - try: - old_settings = termios.tcgetattr(fd) - try: - tty.setraw(fd) - yield fd - finally: - termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) - sys.stdout.flush() - if f is not None: - f.close() - except termios.error: - pass - - def getchar(echo): - with raw_terminal() as fd: - ch = os.read(fd, 32) - ch = ch.decode(get_best_encoding(sys.stdin), 'replace') - if echo and isatty(sys.stdout): - sys.stdout.write(ch) - _translate_ch_to_exc(ch) - return ch diff --git a/venv/lib/python3.8/site-packages/click/_textwrap.py b/venv/lib/python3.8/site-packages/click/_textwrap.py deleted file mode 100644 index 7e776031..00000000 --- a/venv/lib/python3.8/site-packages/click/_textwrap.py +++ /dev/null @@ -1,38 +0,0 @@ -import textwrap -from contextlib import contextmanager - - -class TextWrapper(textwrap.TextWrapper): - - def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): - space_left = max(width - cur_len, 1) - - if self.break_long_words: - last = reversed_chunks[-1] - cut = last[:space_left] - res = last[space_left:] - cur_line.append(cut) - reversed_chunks[-1] = res - elif not cur_line: - cur_line.append(reversed_chunks.pop()) - - @contextmanager - def extra_indent(self, indent): - old_initial_indent = self.initial_indent - old_subsequent_indent = self.subsequent_indent - self.initial_indent += indent - self.subsequent_indent += indent - try: - yield - finally: - self.initial_indent = old_initial_indent - self.subsequent_indent = old_subsequent_indent - - def indent_only(self, text): - rv = [] - for idx, line in enumerate(text.splitlines()): - indent = self.initial_indent - if idx > 0: - indent = self.subsequent_indent - rv.append(indent + line) - return '\n'.join(rv) diff --git a/venv/lib/python3.8/site-packages/click/_unicodefun.py b/venv/lib/python3.8/site-packages/click/_unicodefun.py deleted file mode 100644 index 620edff3..00000000 --- a/venv/lib/python3.8/site-packages/click/_unicodefun.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -import sys -import codecs - -from ._compat import PY2 - - -# If someone wants to vendor click, we want to ensure the -# correct package is discovered. Ideally we could use a -# relative import here but unfortunately Python does not -# support that. -click = sys.modules[__name__.rsplit('.', 1)[0]] - - -def _find_unicode_literals_frame(): - import __future__ - if not hasattr(sys, '_getframe'): # not all Python implementations have it - return 0 - frm = sys._getframe(1) - idx = 1 - while frm is not None: - if frm.f_globals.get('__name__', '').startswith('click.'): - frm = frm.f_back - idx += 1 - elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag: - return idx - else: - break - return 0 - - -def _check_for_unicode_literals(): - if not __debug__: - return - if not PY2 or click.disable_unicode_literals_warning: - return - bad_frame = _find_unicode_literals_frame() - if bad_frame <= 0: - return - from warnings import warn - warn(Warning('Click detected the use of the unicode_literals ' - '__future__ import. This is heavily discouraged ' - 'because it can introduce subtle bugs in your ' - 'code. You should instead use explicit u"" literals ' - 'for your unicode strings. For more information see ' - 'https://click.palletsprojects.com/python3/'), - stacklevel=bad_frame) - - -def _verify_python3_env(): - """Ensures that the environment is good for unicode on Python 3.""" - if PY2: - return - try: - import locale - fs_enc = codecs.lookup(locale.getpreferredencoding()).name - except Exception: - fs_enc = 'ascii' - if fs_enc != 'ascii': - return - - extra = '' - if os.name == 'posix': - import subprocess - try: - rv = subprocess.Popen(['locale', '-a'], stdout=subprocess.PIPE, - stderr=subprocess.PIPE).communicate()[0] - except OSError: - rv = b'' - good_locales = set() - has_c_utf8 = False - - # Make sure we're operating on text here. - if isinstance(rv, bytes): - rv = rv.decode('ascii', 'replace') - - for line in rv.splitlines(): - locale = line.strip() - if locale.lower().endswith(('.utf-8', '.utf8')): - good_locales.add(locale) - if locale.lower() in ('c.utf8', 'c.utf-8'): - has_c_utf8 = True - - extra += '\n\n' - if not good_locales: - extra += ( - 'Additional information: on this system no suitable UTF-8\n' - 'locales were discovered. This most likely requires resolving\n' - 'by reconfiguring the locale system.' - ) - elif has_c_utf8: - extra += ( - 'This system supports the C.UTF-8 locale which is recommended.\n' - 'You might be able to resolve your issue by exporting the\n' - 'following environment variables:\n\n' - ' export LC_ALL=C.UTF-8\n' - ' export LANG=C.UTF-8' - ) - else: - extra += ( - 'This system lists a couple of UTF-8 supporting locales that\n' - 'you can pick from. The following suitable locales were\n' - 'discovered: %s' - ) % ', '.join(sorted(good_locales)) - - bad_locale = None - for locale in os.environ.get('LC_ALL'), os.environ.get('LANG'): - if locale and locale.lower().endswith(('.utf-8', '.utf8')): - bad_locale = locale - if locale is not None: - break - if bad_locale is not None: - extra += ( - '\n\nClick discovered that you exported a UTF-8 locale\n' - 'but the locale system could not pick up from it because\n' - 'it does not exist. The exported locale is "%s" but it\n' - 'is not supported' - ) % bad_locale - - raise RuntimeError( - 'Click will abort further execution because Python 3 was' - ' configured to use ASCII as encoding for the environment.' - ' Consult https://click.palletsprojects.com/en/7.x/python3/ for' - ' mitigation steps.' + extra - ) diff --git a/venv/lib/python3.8/site-packages/click/_winconsole.py b/venv/lib/python3.8/site-packages/click/_winconsole.py deleted file mode 100644 index bbb080dd..00000000 --- a/venv/lib/python3.8/site-packages/click/_winconsole.py +++ /dev/null @@ -1,307 +0,0 @@ -# -*- coding: utf-8 -*- -# This module is based on the excellent work by Adam Bartoš who -# provided a lot of what went into the implementation here in -# the discussion to issue1602 in the Python bug tracker. -# -# There are some general differences in regards to how this works -# compared to the original patches as we do not need to patch -# the entire interpreter but just work in our little world of -# echo and prmopt. - -import io -import os -import sys -import zlib -import time -import ctypes -import msvcrt -from ._compat import _NonClosingTextIOWrapper, text_type, PY2 -from ctypes import byref, POINTER, c_int, c_char, c_char_p, \ - c_void_p, py_object, c_ssize_t, c_ulong, windll, WINFUNCTYPE -try: - from ctypes import pythonapi - PyObject_GetBuffer = pythonapi.PyObject_GetBuffer - PyBuffer_Release = pythonapi.PyBuffer_Release -except ImportError: - pythonapi = None -from ctypes.wintypes import LPWSTR, LPCWSTR - - -c_ssize_p = POINTER(c_ssize_t) - -kernel32 = windll.kernel32 -GetStdHandle = kernel32.GetStdHandle -ReadConsoleW = kernel32.ReadConsoleW -WriteConsoleW = kernel32.WriteConsoleW -GetLastError = kernel32.GetLastError -GetCommandLineW = WINFUNCTYPE(LPWSTR)( - ('GetCommandLineW', windll.kernel32)) -CommandLineToArgvW = WINFUNCTYPE( - POINTER(LPWSTR), LPCWSTR, POINTER(c_int))( - ('CommandLineToArgvW', windll.shell32)) - - -STDIN_HANDLE = GetStdHandle(-10) -STDOUT_HANDLE = GetStdHandle(-11) -STDERR_HANDLE = GetStdHandle(-12) - - -PyBUF_SIMPLE = 0 -PyBUF_WRITABLE = 1 - -ERROR_SUCCESS = 0 -ERROR_NOT_ENOUGH_MEMORY = 8 -ERROR_OPERATION_ABORTED = 995 - -STDIN_FILENO = 0 -STDOUT_FILENO = 1 -STDERR_FILENO = 2 - -EOF = b'\x1a' -MAX_BYTES_WRITTEN = 32767 - - -class Py_buffer(ctypes.Structure): - _fields_ = [ - ('buf', c_void_p), - ('obj', py_object), - ('len', c_ssize_t), - ('itemsize', c_ssize_t), - ('readonly', c_int), - ('ndim', c_int), - ('format', c_char_p), - ('shape', c_ssize_p), - ('strides', c_ssize_p), - ('suboffsets', c_ssize_p), - ('internal', c_void_p) - ] - - if PY2: - _fields_.insert(-1, ('smalltable', c_ssize_t * 2)) - - -# On PyPy we cannot get buffers so our ability to operate here is -# serverly limited. -if pythonapi is None: - get_buffer = None -else: - def get_buffer(obj, writable=False): - buf = Py_buffer() - flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE - PyObject_GetBuffer(py_object(obj), byref(buf), flags) - try: - buffer_type = c_char * buf.len - return buffer_type.from_address(buf.buf) - finally: - PyBuffer_Release(byref(buf)) - - -class _WindowsConsoleRawIOBase(io.RawIOBase): - - def __init__(self, handle): - self.handle = handle - - def isatty(self): - io.RawIOBase.isatty(self) - return True - - -class _WindowsConsoleReader(_WindowsConsoleRawIOBase): - - def readable(self): - return True - - def readinto(self, b): - bytes_to_be_read = len(b) - if not bytes_to_be_read: - return 0 - elif bytes_to_be_read % 2: - raise ValueError('cannot read odd number of bytes from ' - 'UTF-16-LE encoded console') - - buffer = get_buffer(b, writable=True) - code_units_to_be_read = bytes_to_be_read // 2 - code_units_read = c_ulong() - - rv = ReadConsoleW(self.handle, buffer, code_units_to_be_read, - byref(code_units_read), None) - if GetLastError() == ERROR_OPERATION_ABORTED: - # wait for KeyboardInterrupt - time.sleep(0.1) - if not rv: - raise OSError('Windows error: %s' % GetLastError()) - - if buffer[0] == EOF: - return 0 - return 2 * code_units_read.value - - -class _WindowsConsoleWriter(_WindowsConsoleRawIOBase): - - def writable(self): - return True - - @staticmethod - def _get_error_message(errno): - if errno == ERROR_SUCCESS: - return 'ERROR_SUCCESS' - elif errno == ERROR_NOT_ENOUGH_MEMORY: - return 'ERROR_NOT_ENOUGH_MEMORY' - return 'Windows error %s' % errno - - def write(self, b): - bytes_to_be_written = len(b) - buf = get_buffer(b) - code_units_to_be_written = min(bytes_to_be_written, - MAX_BYTES_WRITTEN) // 2 - code_units_written = c_ulong() - - WriteConsoleW(self.handle, buf, code_units_to_be_written, - byref(code_units_written), None) - bytes_written = 2 * code_units_written.value - - if bytes_written == 0 and bytes_to_be_written > 0: - raise OSError(self._get_error_message(GetLastError())) - return bytes_written - - -class ConsoleStream(object): - - def __init__(self, text_stream, byte_stream): - self._text_stream = text_stream - self.buffer = byte_stream - - @property - def name(self): - return self.buffer.name - - def write(self, x): - if isinstance(x, text_type): - return self._text_stream.write(x) - try: - self.flush() - except Exception: - pass - return self.buffer.write(x) - - def writelines(self, lines): - for line in lines: - self.write(line) - - def __getattr__(self, name): - return getattr(self._text_stream, name) - - def isatty(self): - return self.buffer.isatty() - - def __repr__(self): - return '' % ( - self.name, - self.encoding, - ) - - -class WindowsChunkedWriter(object): - """ - Wraps a stream (such as stdout), acting as a transparent proxy for all - attribute access apart from method 'write()' which we wrap to write in - limited chunks due to a Windows limitation on binary console streams. - """ - def __init__(self, wrapped): - # double-underscore everything to prevent clashes with names of - # attributes on the wrapped stream object. - self.__wrapped = wrapped - - def __getattr__(self, name): - return getattr(self.__wrapped, name) - - def write(self, text): - total_to_write = len(text) - written = 0 - - while written < total_to_write: - to_write = min(total_to_write - written, MAX_BYTES_WRITTEN) - self.__wrapped.write(text[written:written+to_write]) - written += to_write - - -_wrapped_std_streams = set() - - -def _wrap_std_stream(name): - # Python 2 & Windows 7 and below - if PY2 and sys.getwindowsversion()[:2] <= (6, 1) and name not in _wrapped_std_streams: - setattr(sys, name, WindowsChunkedWriter(getattr(sys, name))) - _wrapped_std_streams.add(name) - - -def _get_text_stdin(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), - 'utf-16-le', 'strict', line_buffering=True) - return ConsoleStream(text_stream, buffer_stream) - - -def _get_text_stdout(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)), - 'utf-16-le', 'strict', line_buffering=True) - return ConsoleStream(text_stream, buffer_stream) - - -def _get_text_stderr(buffer_stream): - text_stream = _NonClosingTextIOWrapper( - io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)), - 'utf-16-le', 'strict', line_buffering=True) - return ConsoleStream(text_stream, buffer_stream) - - -if PY2: - def _hash_py_argv(): - return zlib.crc32('\x00'.join(sys.argv[1:])) - - _initial_argv_hash = _hash_py_argv() - - def _get_windows_argv(): - argc = c_int(0) - argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc)) - argv = [argv_unicode[i] for i in range(0, argc.value)] - - if not hasattr(sys, 'frozen'): - argv = argv[1:] - while len(argv) > 0: - arg = argv[0] - if not arg.startswith('-') or arg == '-': - break - argv = argv[1:] - if arg.startswith(('-c', '-m')): - break - - return argv[1:] - - -_stream_factories = { - 0: _get_text_stdin, - 1: _get_text_stdout, - 2: _get_text_stderr, -} - - -def _get_windows_console_stream(f, encoding, errors): - if get_buffer is not None and \ - encoding in ('utf-16-le', None) \ - and errors in ('strict', None) and \ - hasattr(f, 'isatty') and f.isatty(): - func = _stream_factories.get(f.fileno()) - if func is not None: - if not PY2: - f = getattr(f, 'buffer', None) - if f is None: - return None - else: - # If we are on Python 2 we need to set the stream that we - # deal with to binary mode as otherwise the exercise if a - # bit moot. The same problems apply as for - # get_binary_stdin and friends from _compat. - msvcrt.setmode(f.fileno(), os.O_BINARY) - return func(f) diff --git a/venv/lib/python3.8/site-packages/click/core.py b/venv/lib/python3.8/site-packages/click/core.py deleted file mode 100644 index 7a1e3422..00000000 --- a/venv/lib/python3.8/site-packages/click/core.py +++ /dev/null @@ -1,1856 +0,0 @@ -import errno -import inspect -import os -import sys -from contextlib import contextmanager -from itertools import repeat -from functools import update_wrapper - -from .types import convert_type, IntRange, BOOL -from .utils import PacifyFlushWrapper, make_str, make_default_short_help, \ - echo, get_os_args -from .exceptions import ClickException, UsageError, BadParameter, Abort, \ - MissingParameter, Exit -from .termui import prompt, confirm, style -from .formatting import HelpFormatter, join_options -from .parser import OptionParser, split_opt -from .globals import push_context, pop_context - -from ._compat import PY2, isidentifier, iteritems, string_types -from ._unicodefun import _check_for_unicode_literals, _verify_python3_env - - -_missing = object() - - -SUBCOMMAND_METAVAR = 'COMMAND [ARGS]...' -SUBCOMMANDS_METAVAR = 'COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]...' - -DEPRECATED_HELP_NOTICE = ' (DEPRECATED)' -DEPRECATED_INVOKE_NOTICE = 'DeprecationWarning: ' + \ - 'The command %(name)s is deprecated.' - - -def _maybe_show_deprecated_notice(cmd): - if cmd.deprecated: - echo(style(DEPRECATED_INVOKE_NOTICE % {'name': cmd.name}, fg='red'), err=True) - - -def fast_exit(code): - """Exit without garbage collection, this speeds up exit by about 10ms for - things like bash completion. - """ - sys.stdout.flush() - sys.stderr.flush() - os._exit(code) - - -def _bashcomplete(cmd, prog_name, complete_var=None): - """Internal handler for the bash completion support.""" - if complete_var is None: - complete_var = '_%s_COMPLETE' % (prog_name.replace('-', '_')).upper() - complete_instr = os.environ.get(complete_var) - if not complete_instr: - return - - from ._bashcomplete import bashcomplete - if bashcomplete(cmd, prog_name, complete_var, complete_instr): - fast_exit(1) - - -def _check_multicommand(base_command, cmd_name, cmd, register=False): - if not base_command.chain or not isinstance(cmd, MultiCommand): - return - if register: - hint = 'It is not possible to add multi commands as children to ' \ - 'another multi command that is in chain mode' - else: - hint = 'Found a multi command as subcommand to a multi command ' \ - 'that is in chain mode. This is not supported' - raise RuntimeError('%s. Command "%s" is set to chain and "%s" was ' - 'added as subcommand but it in itself is a ' - 'multi command. ("%s" is a %s within a chained ' - '%s named "%s").' % ( - hint, base_command.name, cmd_name, - cmd_name, cmd.__class__.__name__, - base_command.__class__.__name__, - base_command.name)) - - -def batch(iterable, batch_size): - return list(zip(*repeat(iter(iterable), batch_size))) - - -def invoke_param_callback(callback, ctx, param, value): - code = getattr(callback, '__code__', None) - args = getattr(code, 'co_argcount', 3) - - if args < 3: - # This will become a warning in Click 3.0: - from warnings import warn - warn(Warning('Invoked legacy parameter callback "%s". The new ' - 'signature for such callbacks starting with ' - 'click 2.0 is (ctx, param, value).' - % callback), stacklevel=3) - return callback(ctx, value) - return callback(ctx, param, value) - - -@contextmanager -def augment_usage_errors(ctx, param=None): - """Context manager that attaches extra information to exceptions that - fly. - """ - try: - yield - except BadParameter as e: - if e.ctx is None: - e.ctx = ctx - if param is not None and e.param is None: - e.param = param - raise - except UsageError as e: - if e.ctx is None: - e.ctx = ctx - raise - - -def iter_params_for_processing(invocation_order, declaration_order): - """Given a sequence of parameters in the order as should be considered - for processing and an iterable of parameters that exist, this returns - a list in the correct order as they should be processed. - """ - def sort_key(item): - try: - idx = invocation_order.index(item) - except ValueError: - idx = float('inf') - return (not item.is_eager, idx) - - return sorted(declaration_order, key=sort_key) - - -class Context(object): - """The context is a special internal object that holds state relevant - for the script execution at every single level. It's normally invisible - to commands unless they opt-in to getting access to it. - - The context is useful as it can pass internal objects around and can - control special execution features such as reading data from - environment variables. - - A context can be used as context manager in which case it will call - :meth:`close` on teardown. - - .. versionadded:: 2.0 - Added the `resilient_parsing`, `help_option_names`, - `token_normalize_func` parameters. - - .. versionadded:: 3.0 - Added the `allow_extra_args` and `allow_interspersed_args` - parameters. - - .. versionadded:: 4.0 - Added the `color`, `ignore_unknown_options`, and - `max_content_width` parameters. - - :param command: the command class for this context. - :param parent: the parent context. - :param info_name: the info name for this invocation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it is usually - the name of the script, for commands below it it's - the name of the script. - :param obj: an arbitrary object of user data. - :param auto_envvar_prefix: the prefix to use for automatic environment - variables. If this is `None` then reading - from environment variables is disabled. This - does not affect manually set environment - variables which are always read. - :param default_map: a dictionary (like object) with default values - for parameters. - :param terminal_width: the width of the terminal. The default is - inherit from parent context. If no context - defines the terminal width then auto - detection will be applied. - :param max_content_width: the maximum width for content rendered by - Click (this currently only affects help - pages). This defaults to 80 characters if - not overridden. In other words: even if the - terminal is larger than that, Click will not - format things wider than 80 characters by - default. In addition to that, formatters might - add some safety mapping on the right. - :param resilient_parsing: if this flag is enabled then Click will - parse without any interactivity or callback - invocation. Default values will also be - ignored. This is useful for implementing - things such as completion support. - :param allow_extra_args: if this is set to `True` then extra arguments - at the end will not raise an error and will be - kept on the context. The default is to inherit - from the command. - :param allow_interspersed_args: if this is set to `False` then options - and arguments cannot be mixed. The - default is to inherit from the command. - :param ignore_unknown_options: instructs click to ignore options it does - not know and keeps them for later - processing. - :param help_option_names: optionally a list of strings that define how - the default help parameter is named. The - default is ``['--help']``. - :param token_normalize_func: an optional function that is used to - normalize tokens (options, choices, - etc.). This for instance can be used to - implement case insensitive behavior. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. This is only needed if ANSI - codes are used in texts that Click prints which is by - default not the case. This for instance would affect - help output. - """ - - def __init__(self, command, parent=None, info_name=None, obj=None, - auto_envvar_prefix=None, default_map=None, - terminal_width=None, max_content_width=None, - resilient_parsing=False, allow_extra_args=None, - allow_interspersed_args=None, - ignore_unknown_options=None, help_option_names=None, - token_normalize_func=None, color=None): - #: the parent context or `None` if none exists. - self.parent = parent - #: the :class:`Command` for this context. - self.command = command - #: the descriptive information name - self.info_name = info_name - #: the parsed parameters except if the value is hidden in which - #: case it's not remembered. - self.params = {} - #: the leftover arguments. - self.args = [] - #: protected arguments. These are arguments that are prepended - #: to `args` when certain parsing scenarios are encountered but - #: must be never propagated to another arguments. This is used - #: to implement nested parsing. - self.protected_args = [] - if obj is None and parent is not None: - obj = parent.obj - #: the user object stored. - self.obj = obj - self._meta = getattr(parent, 'meta', {}) - - #: A dictionary (-like object) with defaults for parameters. - if default_map is None \ - and parent is not None \ - and parent.default_map is not None: - default_map = parent.default_map.get(info_name) - self.default_map = default_map - - #: This flag indicates if a subcommand is going to be executed. A - #: group callback can use this information to figure out if it's - #: being executed directly or because the execution flow passes - #: onwards to a subcommand. By default it's None, but it can be - #: the name of the subcommand to execute. - #: - #: If chaining is enabled this will be set to ``'*'`` in case - #: any commands are executed. It is however not possible to - #: figure out which ones. If you require this knowledge you - #: should use a :func:`resultcallback`. - self.invoked_subcommand = None - - if terminal_width is None and parent is not None: - terminal_width = parent.terminal_width - #: The width of the terminal (None is autodetection). - self.terminal_width = terminal_width - - if max_content_width is None and parent is not None: - max_content_width = parent.max_content_width - #: The maximum width of formatted content (None implies a sensible - #: default which is 80 for most things). - self.max_content_width = max_content_width - - if allow_extra_args is None: - allow_extra_args = command.allow_extra_args - #: Indicates if the context allows extra args or if it should - #: fail on parsing. - #: - #: .. versionadded:: 3.0 - self.allow_extra_args = allow_extra_args - - if allow_interspersed_args is None: - allow_interspersed_args = command.allow_interspersed_args - #: Indicates if the context allows mixing of arguments and - #: options or not. - #: - #: .. versionadded:: 3.0 - self.allow_interspersed_args = allow_interspersed_args - - if ignore_unknown_options is None: - ignore_unknown_options = command.ignore_unknown_options - #: Instructs click to ignore options that a command does not - #: understand and will store it on the context for later - #: processing. This is primarily useful for situations where you - #: want to call into external programs. Generally this pattern is - #: strongly discouraged because it's not possibly to losslessly - #: forward all arguments. - #: - #: .. versionadded:: 4.0 - self.ignore_unknown_options = ignore_unknown_options - - if help_option_names is None: - if parent is not None: - help_option_names = parent.help_option_names - else: - help_option_names = ['--help'] - - #: The names for the help options. - self.help_option_names = help_option_names - - if token_normalize_func is None and parent is not None: - token_normalize_func = parent.token_normalize_func - - #: An optional normalization function for tokens. This is - #: options, choices, commands etc. - self.token_normalize_func = token_normalize_func - - #: Indicates if resilient parsing is enabled. In that case Click - #: will do its best to not cause any failures and default values - #: will be ignored. Useful for completion. - self.resilient_parsing = resilient_parsing - - # If there is no envvar prefix yet, but the parent has one and - # the command on this level has a name, we can expand the envvar - # prefix automatically. - if auto_envvar_prefix is None: - if parent is not None \ - and parent.auto_envvar_prefix is not None and \ - self.info_name is not None: - auto_envvar_prefix = '%s_%s' % (parent.auto_envvar_prefix, - self.info_name.upper()) - else: - auto_envvar_prefix = auto_envvar_prefix.upper() - self.auto_envvar_prefix = auto_envvar_prefix - - if color is None and parent is not None: - color = parent.color - - #: Controls if styling output is wanted or not. - self.color = color - - self._close_callbacks = [] - self._depth = 0 - - def __enter__(self): - self._depth += 1 - push_context(self) - return self - - def __exit__(self, exc_type, exc_value, tb): - self._depth -= 1 - if self._depth == 0: - self.close() - pop_context() - - @contextmanager - def scope(self, cleanup=True): - """This helper method can be used with the context object to promote - it to the current thread local (see :func:`get_current_context`). - The default behavior of this is to invoke the cleanup functions which - can be disabled by setting `cleanup` to `False`. The cleanup - functions are typically used for things such as closing file handles. - - If the cleanup is intended the context object can also be directly - used as a context manager. - - Example usage:: - - with ctx.scope(): - assert get_current_context() is ctx - - This is equivalent:: - - with ctx: - assert get_current_context() is ctx - - .. versionadded:: 5.0 - - :param cleanup: controls if the cleanup functions should be run or - not. The default is to run these functions. In - some situations the context only wants to be - temporarily pushed in which case this can be disabled. - Nested pushes automatically defer the cleanup. - """ - if not cleanup: - self._depth += 1 - try: - with self as rv: - yield rv - finally: - if not cleanup: - self._depth -= 1 - - @property - def meta(self): - """This is a dictionary which is shared with all the contexts - that are nested. It exists so that click utilities can store some - state here if they need to. It is however the responsibility of - that code to manage this dictionary well. - - The keys are supposed to be unique dotted strings. For instance - module paths are a good choice for it. What is stored in there is - irrelevant for the operation of click. However what is important is - that code that places data here adheres to the general semantics of - the system. - - Example usage:: - - LANG_KEY = __name__ + '.lang' - - def set_language(value): - ctx = get_current_context() - ctx.meta[LANG_KEY] = value - - def get_language(): - return get_current_context().meta.get(LANG_KEY, 'en_US') - - .. versionadded:: 5.0 - """ - return self._meta - - def make_formatter(self): - """Creates the formatter for the help and usage output.""" - return HelpFormatter(width=self.terminal_width, - max_width=self.max_content_width) - - def call_on_close(self, f): - """This decorator remembers a function as callback that should be - executed when the context tears down. This is most useful to bind - resource handling to the script execution. For instance, file objects - opened by the :class:`File` type will register their close callbacks - here. - - :param f: the function to execute on teardown. - """ - self._close_callbacks.append(f) - return f - - def close(self): - """Invokes all close callbacks.""" - for cb in self._close_callbacks: - cb() - self._close_callbacks = [] - - @property - def command_path(self): - """The computed command path. This is used for the ``usage`` - information on the help page. It's automatically created by - combining the info names of the chain of contexts to the root. - """ - rv = '' - if self.info_name is not None: - rv = self.info_name - if self.parent is not None: - rv = self.parent.command_path + ' ' + rv - return rv.lstrip() - - def find_root(self): - """Finds the outermost context.""" - node = self - while node.parent is not None: - node = node.parent - return node - - def find_object(self, object_type): - """Finds the closest object of a given type.""" - node = self - while node is not None: - if isinstance(node.obj, object_type): - return node.obj - node = node.parent - - def ensure_object(self, object_type): - """Like :meth:`find_object` but sets the innermost object to a - new instance of `object_type` if it does not exist. - """ - rv = self.find_object(object_type) - if rv is None: - self.obj = rv = object_type() - return rv - - def lookup_default(self, name): - """Looks up the default for a parameter name. This by default - looks into the :attr:`default_map` if available. - """ - if self.default_map is not None: - rv = self.default_map.get(name) - if callable(rv): - rv = rv() - return rv - - def fail(self, message): - """Aborts the execution of the program with a specific error - message. - - :param message: the error message to fail with. - """ - raise UsageError(message, self) - - def abort(self): - """Aborts the script.""" - raise Abort() - - def exit(self, code=0): - """Exits the application with a given exit code.""" - raise Exit(code) - - def get_usage(self): - """Helper method to get formatted usage string for the current - context and command. - """ - return self.command.get_usage(self) - - def get_help(self): - """Helper method to get formatted help page for the current - context and command. - """ - return self.command.get_help(self) - - def invoke(*args, **kwargs): - """Invokes a command callback in exactly the way it expects. There - are two ways to invoke this method: - - 1. the first argument can be a callback and all other arguments and - keyword arguments are forwarded directly to the function. - 2. the first argument is a click command object. In that case all - arguments are forwarded as well but proper click parameters - (options and click arguments) must be keyword arguments and Click - will fill in defaults. - - Note that before Click 3.2 keyword arguments were not properly filled - in against the intention of this code and no context was created. For - more information about this change and why it was done in a bugfix - release see :ref:`upgrade-to-3.2`. - """ - self, callback = args[:2] - ctx = self - - # It's also possible to invoke another command which might or - # might not have a callback. In that case we also fill - # in defaults and make a new context for this command. - if isinstance(callback, Command): - other_cmd = callback - callback = other_cmd.callback - ctx = Context(other_cmd, info_name=other_cmd.name, parent=self) - if callback is None: - raise TypeError('The given command does not have a ' - 'callback that can be invoked.') - - for param in other_cmd.params: - if param.name not in kwargs and param.expose_value: - kwargs[param.name] = param.get_default(ctx) - - args = args[2:] - with augment_usage_errors(self): - with ctx: - return callback(*args, **kwargs) - - def forward(*args, **kwargs): - """Similar to :meth:`invoke` but fills in default keyword - arguments from the current context if the other command expects - it. This cannot invoke callbacks directly, only other commands. - """ - self, cmd = args[:2] - - # It's also possible to invoke another command which might or - # might not have a callback. - if not isinstance(cmd, Command): - raise TypeError('Callback is not a command.') - - for param in self.params: - if param not in kwargs: - kwargs[param] = self.params[param] - - return self.invoke(cmd, **kwargs) - - -class BaseCommand(object): - """The base command implements the minimal API contract of commands. - Most code will never use this as it does not implement a lot of useful - functionality but it can act as the direct subclass of alternative - parsing methods that do not depend on the Click parser. - - For instance, this can be used to bridge Click and other systems like - argparse or docopt. - - Because base commands do not implement a lot of the API that other - parts of Click take for granted, they are not supported for all - operations. For instance, they cannot be used with the decorators - usually and they have no built-in callback system. - - .. versionchanged:: 2.0 - Added the `context_settings` parameter. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - """ - #: the default for the :attr:`Context.allow_extra_args` flag. - allow_extra_args = False - #: the default for the :attr:`Context.allow_interspersed_args` flag. - allow_interspersed_args = True - #: the default for the :attr:`Context.ignore_unknown_options` flag. - ignore_unknown_options = False - - def __init__(self, name, context_settings=None): - #: the name the command thinks it has. Upon registering a command - #: on a :class:`Group` the group will default the command name - #: with this information. You should instead use the - #: :class:`Context`\'s :attr:`~Context.info_name` attribute. - self.name = name - if context_settings is None: - context_settings = {} - #: an optional dictionary with defaults passed to the context. - self.context_settings = context_settings - - def get_usage(self, ctx): - raise NotImplementedError('Base commands cannot get usage') - - def get_help(self, ctx): - raise NotImplementedError('Base commands cannot get help') - - def make_context(self, info_name, args, parent=None, **extra): - """This function when given an info name and arguments will kick - off the parsing and create a new :class:`Context`. It does not - invoke the actual command callback though. - - :param info_name: the info name for this invokation. Generally this - is the most descriptive name for the script or - command. For the toplevel script it's usually - the name of the script, for commands below it it's - the name of the script. - :param args: the arguments to parse as list of strings. - :param parent: the parent context if available. - :param extra: extra keyword arguments forwarded to the context - constructor. - """ - for key, value in iteritems(self.context_settings): - if key not in extra: - extra[key] = value - ctx = Context(self, info_name=info_name, parent=parent, **extra) - with ctx.scope(cleanup=False): - self.parse_args(ctx, args) - return ctx - - def parse_args(self, ctx, args): - """Given a context and a list of arguments this creates the parser - and parses the arguments, then modifies the context as necessary. - This is automatically invoked by :meth:`make_context`. - """ - raise NotImplementedError('Base commands do not know how to parse ' - 'arguments.') - - def invoke(self, ctx): - """Given a context, this invokes the command. The default - implementation is raising a not implemented error. - """ - raise NotImplementedError('Base commands are not invokable by default') - - def main(self, args=None, prog_name=None, complete_var=None, - standalone_mode=True, **extra): - """This is the way to invoke a script with all the bells and - whistles as a command line application. This will always terminate - the application after a call. If this is not wanted, ``SystemExit`` - needs to be caught. - - This method is also available by directly calling the instance of - a :class:`Command`. - - .. versionadded:: 3.0 - Added the `standalone_mode` flag to control the standalone mode. - - :param args: the arguments that should be used for parsing. If not - provided, ``sys.argv[1:]`` is used. - :param prog_name: the program name that should be used. By default - the program name is constructed by taking the file - name from ``sys.argv[0]``. - :param complete_var: the environment variable that controls the - bash completion support. The default is - ``"__COMPLETE"`` with prog_name in - uppercase. - :param standalone_mode: the default behavior is to invoke the script - in standalone mode. Click will then - handle exceptions and convert them into - error messages and the function will never - return but shut down the interpreter. If - this is set to `False` they will be - propagated to the caller and the return - value of this function is the return value - of :meth:`invoke`. - :param extra: extra keyword arguments are forwarded to the context - constructor. See :class:`Context` for more information. - """ - # If we are in Python 3, we will verify that the environment is - # sane at this point or reject further execution to avoid a - # broken script. - if not PY2: - _verify_python3_env() - else: - _check_for_unicode_literals() - - if args is None: - args = get_os_args() - else: - args = list(args) - - if prog_name is None: - prog_name = make_str(os.path.basename( - sys.argv and sys.argv[0] or __file__)) - - # Hook for the Bash completion. This only activates if the Bash - # completion is actually enabled, otherwise this is quite a fast - # noop. - _bashcomplete(self, prog_name, complete_var) - - try: - try: - with self.make_context(prog_name, args, **extra) as ctx: - rv = self.invoke(ctx) - if not standalone_mode: - return rv - # it's not safe to `ctx.exit(rv)` here! - # note that `rv` may actually contain data like "1" which - # has obvious effects - # more subtle case: `rv=[None, None]` can come out of - # chained commands which all returned `None` -- so it's not - # even always obvious that `rv` indicates success/failure - # by its truthiness/falsiness - ctx.exit() - except (EOFError, KeyboardInterrupt): - echo(file=sys.stderr) - raise Abort() - except ClickException as e: - if not standalone_mode: - raise - e.show() - sys.exit(e.exit_code) - except IOError as e: - if e.errno == errno.EPIPE: - sys.stdout = PacifyFlushWrapper(sys.stdout) - sys.stderr = PacifyFlushWrapper(sys.stderr) - sys.exit(1) - else: - raise - except Exit as e: - if standalone_mode: - sys.exit(e.exit_code) - else: - # in non-standalone mode, return the exit code - # note that this is only reached if `self.invoke` above raises - # an Exit explicitly -- thus bypassing the check there which - # would return its result - # the results of non-standalone execution may therefore be - # somewhat ambiguous: if there are codepaths which lead to - # `ctx.exit(1)` and to `return 1`, the caller won't be able to - # tell the difference between the two - return e.exit_code - except Abort: - if not standalone_mode: - raise - echo('Aborted!', file=sys.stderr) - sys.exit(1) - - def __call__(self, *args, **kwargs): - """Alias for :meth:`main`.""" - return self.main(*args, **kwargs) - - -class Command(BaseCommand): - """Commands are the basic building block of command line interfaces in - Click. A basic command handles command line parsing and might dispatch - more parsing to commands nested below it. - - .. versionchanged:: 2.0 - Added the `context_settings` parameter. - - :param name: the name of the command to use unless a group overrides it. - :param context_settings: an optional dictionary with defaults that are - passed to the context object. - :param callback: the callback to invoke. This is optional. - :param params: the parameters to register with this command. This can - be either :class:`Option` or :class:`Argument` objects. - :param help: the help string to use for this command. - :param epilog: like the help string but it's printed at the end of the - help page after everything else. - :param short_help: the short help to use for this command. This is - shown on the command listing of the parent command. - :param add_help_option: by default each command registers a ``--help`` - option. This can be disabled by this parameter. - :param hidden: hide this command from help outputs. - - :param deprecated: issues a message indicating that - the command is deprecated. - """ - - def __init__(self, name, context_settings=None, callback=None, - params=None, help=None, epilog=None, short_help=None, - options_metavar='[OPTIONS]', add_help_option=True, - hidden=False, deprecated=False): - BaseCommand.__init__(self, name, context_settings) - #: the callback to execute when the command fires. This might be - #: `None` in which case nothing happens. - self.callback = callback - #: the list of parameters for this command in the order they - #: should show up in the help page and execute. Eager parameters - #: will automatically be handled before non eager ones. - self.params = params or [] - # if a form feed (page break) is found in the help text, truncate help - # text to the content preceding the first form feed - if help and '\f' in help: - help = help.split('\f', 1)[0] - self.help = help - self.epilog = epilog - self.options_metavar = options_metavar - self.short_help = short_help - self.add_help_option = add_help_option - self.hidden = hidden - self.deprecated = deprecated - - def get_usage(self, ctx): - formatter = ctx.make_formatter() - self.format_usage(ctx, formatter) - return formatter.getvalue().rstrip('\n') - - def get_params(self, ctx): - rv = self.params - help_option = self.get_help_option(ctx) - if help_option is not None: - rv = rv + [help_option] - return rv - - def format_usage(self, ctx, formatter): - """Writes the usage line into the formatter.""" - pieces = self.collect_usage_pieces(ctx) - formatter.write_usage(ctx.command_path, ' '.join(pieces)) - - def collect_usage_pieces(self, ctx): - """Returns all the pieces that go into the usage line and returns - it as a list of strings. - """ - rv = [self.options_metavar] - for param in self.get_params(ctx): - rv.extend(param.get_usage_pieces(ctx)) - return rv - - def get_help_option_names(self, ctx): - """Returns the names for the help option.""" - all_names = set(ctx.help_option_names) - for param in self.params: - all_names.difference_update(param.opts) - all_names.difference_update(param.secondary_opts) - return all_names - - def get_help_option(self, ctx): - """Returns the help option object.""" - help_options = self.get_help_option_names(ctx) - if not help_options or not self.add_help_option: - return - - def show_help(ctx, param, value): - if value and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - return Option(help_options, is_flag=True, - is_eager=True, expose_value=False, - callback=show_help, - help='Show this message and exit.') - - def make_parser(self, ctx): - """Creates the underlying option parser for this command.""" - parser = OptionParser(ctx) - for param in self.get_params(ctx): - param.add_to_parser(parser, ctx) - return parser - - def get_help(self, ctx): - """Formats the help into a string and returns it. This creates a - formatter and will call into the following formatting methods: - """ - formatter = ctx.make_formatter() - self.format_help(ctx, formatter) - return formatter.getvalue().rstrip('\n') - - def get_short_help_str(self, limit=45): - """Gets short help for the command or makes it by shortening the long help string.""" - return self.short_help or self.help and make_default_short_help(self.help, limit) or '' - - def format_help(self, ctx, formatter): - """Writes the help into the formatter if it exists. - - This calls into the following methods: - - - :meth:`format_usage` - - :meth:`format_help_text` - - :meth:`format_options` - - :meth:`format_epilog` - """ - self.format_usage(ctx, formatter) - self.format_help_text(ctx, formatter) - self.format_options(ctx, formatter) - self.format_epilog(ctx, formatter) - - def format_help_text(self, ctx, formatter): - """Writes the help text to the formatter if it exists.""" - if self.help: - formatter.write_paragraph() - with formatter.indentation(): - help_text = self.help - if self.deprecated: - help_text += DEPRECATED_HELP_NOTICE - formatter.write_text(help_text) - elif self.deprecated: - formatter.write_paragraph() - with formatter.indentation(): - formatter.write_text(DEPRECATED_HELP_NOTICE) - - def format_options(self, ctx, formatter): - """Writes all the options into the formatter if they exist.""" - opts = [] - for param in self.get_params(ctx): - rv = param.get_help_record(ctx) - if rv is not None: - opts.append(rv) - - if opts: - with formatter.section('Options'): - formatter.write_dl(opts) - - def format_epilog(self, ctx, formatter): - """Writes the epilog into the formatter if it exists.""" - if self.epilog: - formatter.write_paragraph() - with formatter.indentation(): - formatter.write_text(self.epilog) - - def parse_args(self, ctx, args): - parser = self.make_parser(ctx) - opts, args, param_order = parser.parse_args(args=args) - - for param in iter_params_for_processing( - param_order, self.get_params(ctx)): - value, args = param.handle_parse_result(ctx, opts, args) - - if args and not ctx.allow_extra_args and not ctx.resilient_parsing: - ctx.fail('Got unexpected extra argument%s (%s)' - % (len(args) != 1 and 's' or '', - ' '.join(map(make_str, args)))) - - ctx.args = args - return args - - def invoke(self, ctx): - """Given a context, this invokes the attached callback (if it exists) - in the right way. - """ - _maybe_show_deprecated_notice(self) - if self.callback is not None: - return ctx.invoke(self.callback, **ctx.params) - - -class MultiCommand(Command): - """A multi command is the basic implementation of a command that - dispatches to subcommands. The most common version is the - :class:`Group`. - - :param invoke_without_command: this controls how the multi command itself - is invoked. By default it's only invoked - if a subcommand is provided. - :param no_args_is_help: this controls what happens if no arguments are - provided. This option is enabled by default if - `invoke_without_command` is disabled or disabled - if it's enabled. If enabled this will add - ``--help`` as argument if no arguments are - passed. - :param subcommand_metavar: the string that is used in the documentation - to indicate the subcommand place. - :param chain: if this is set to `True` chaining of multiple subcommands - is enabled. This restricts the form of commands in that - they cannot have optional arguments but it allows - multiple commands to be chained together. - :param result_callback: the result callback to attach to this multi - command. - """ - allow_extra_args = True - allow_interspersed_args = False - - def __init__(self, name=None, invoke_without_command=False, - no_args_is_help=None, subcommand_metavar=None, - chain=False, result_callback=None, **attrs): - Command.__init__(self, name, **attrs) - if no_args_is_help is None: - no_args_is_help = not invoke_without_command - self.no_args_is_help = no_args_is_help - self.invoke_without_command = invoke_without_command - if subcommand_metavar is None: - if chain: - subcommand_metavar = SUBCOMMANDS_METAVAR - else: - subcommand_metavar = SUBCOMMAND_METAVAR - self.subcommand_metavar = subcommand_metavar - self.chain = chain - #: The result callback that is stored. This can be set or - #: overridden with the :func:`resultcallback` decorator. - self.result_callback = result_callback - - if self.chain: - for param in self.params: - if isinstance(param, Argument) and not param.required: - raise RuntimeError('Multi commands in chain mode cannot ' - 'have optional arguments.') - - def collect_usage_pieces(self, ctx): - rv = Command.collect_usage_pieces(self, ctx) - rv.append(self.subcommand_metavar) - return rv - - def format_options(self, ctx, formatter): - Command.format_options(self, ctx, formatter) - self.format_commands(ctx, formatter) - - def resultcallback(self, replace=False): - """Adds a result callback to the chain command. By default if a - result callback is already registered this will chain them but - this can be disabled with the `replace` parameter. The result - callback is invoked with the return value of the subcommand - (or the list of return values from all subcommands if chaining - is enabled) as well as the parameters as they would be passed - to the main callback. - - Example:: - - @click.group() - @click.option('-i', '--input', default=23) - def cli(input): - return 42 - - @cli.resultcallback() - def process_result(result, input): - return result + input - - .. versionadded:: 3.0 - - :param replace: if set to `True` an already existing result - callback will be removed. - """ - def decorator(f): - old_callback = self.result_callback - if old_callback is None or replace: - self.result_callback = f - return f - def function(__value, *args, **kwargs): - return f(old_callback(__value, *args, **kwargs), - *args, **kwargs) - self.result_callback = rv = update_wrapper(function, f) - return rv - return decorator - - def format_commands(self, ctx, formatter): - """Extra format methods for multi methods that adds all the commands - after the options. - """ - commands = [] - for subcommand in self.list_commands(ctx): - cmd = self.get_command(ctx, subcommand) - # What is this, the tool lied about a command. Ignore it - if cmd is None: - continue - if cmd.hidden: - continue - - commands.append((subcommand, cmd)) - - # allow for 3 times the default spacing - if len(commands): - limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands) - - rows = [] - for subcommand, cmd in commands: - help = cmd.get_short_help_str(limit) - rows.append((subcommand, help)) - - if rows: - with formatter.section('Commands'): - formatter.write_dl(rows) - - def parse_args(self, ctx, args): - if not args and self.no_args_is_help and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - - rest = Command.parse_args(self, ctx, args) - if self.chain: - ctx.protected_args = rest - ctx.args = [] - elif rest: - ctx.protected_args, ctx.args = rest[:1], rest[1:] - - return ctx.args - - def invoke(self, ctx): - def _process_result(value): - if self.result_callback is not None: - value = ctx.invoke(self.result_callback, value, - **ctx.params) - return value - - if not ctx.protected_args: - # If we are invoked without command the chain flag controls - # how this happens. If we are not in chain mode, the return - # value here is the return value of the command. - # If however we are in chain mode, the return value is the - # return value of the result processor invoked with an empty - # list (which means that no subcommand actually was executed). - if self.invoke_without_command: - if not self.chain: - return Command.invoke(self, ctx) - with ctx: - Command.invoke(self, ctx) - return _process_result([]) - ctx.fail('Missing command.') - - # Fetch args back out - args = ctx.protected_args + ctx.args - ctx.args = [] - ctx.protected_args = [] - - # If we're not in chain mode, we only allow the invocation of a - # single command but we also inform the current context about the - # name of the command to invoke. - if not self.chain: - # Make sure the context is entered so we do not clean up - # resources until the result processor has worked. - with ctx: - cmd_name, cmd, args = self.resolve_command(ctx, args) - ctx.invoked_subcommand = cmd_name - Command.invoke(self, ctx) - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx) - with sub_ctx: - return _process_result(sub_ctx.command.invoke(sub_ctx)) - - # In chain mode we create the contexts step by step, but after the - # base command has been invoked. Because at that point we do not - # know the subcommands yet, the invoked subcommand attribute is - # set to ``*`` to inform the command that subcommands are executed - # but nothing else. - with ctx: - ctx.invoked_subcommand = args and '*' or None - Command.invoke(self, ctx) - - # Otherwise we make every single context and invoke them in a - # chain. In that case the return value to the result processor - # is the list of all invoked subcommand's results. - contexts = [] - while args: - cmd_name, cmd, args = self.resolve_command(ctx, args) - sub_ctx = cmd.make_context(cmd_name, args, parent=ctx, - allow_extra_args=True, - allow_interspersed_args=False) - contexts.append(sub_ctx) - args, sub_ctx.args = sub_ctx.args, [] - - rv = [] - for sub_ctx in contexts: - with sub_ctx: - rv.append(sub_ctx.command.invoke(sub_ctx)) - return _process_result(rv) - - def resolve_command(self, ctx, args): - cmd_name = make_str(args[0]) - original_cmd_name = cmd_name - - # Get the command - cmd = self.get_command(ctx, cmd_name) - - # If we can't find the command but there is a normalization - # function available, we try with that one. - if cmd is None and ctx.token_normalize_func is not None: - cmd_name = ctx.token_normalize_func(cmd_name) - cmd = self.get_command(ctx, cmd_name) - - # If we don't find the command we want to show an error message - # to the user that it was not provided. However, there is - # something else we should do: if the first argument looks like - # an option we want to kick off parsing again for arguments to - # resolve things like --help which now should go to the main - # place. - if cmd is None and not ctx.resilient_parsing: - if split_opt(cmd_name)[0]: - self.parse_args(ctx, ctx.args) - ctx.fail('No such command "%s".' % original_cmd_name) - - return cmd_name, cmd, args[1:] - - def get_command(self, ctx, cmd_name): - """Given a context and a command name, this returns a - :class:`Command` object if it exists or returns `None`. - """ - raise NotImplementedError() - - def list_commands(self, ctx): - """Returns a list of subcommand names in the order they should - appear. - """ - return [] - - -class Group(MultiCommand): - """A group allows a command to have subcommands attached. This is the - most common way to implement nesting in Click. - - :param commands: a dictionary of commands. - """ - - def __init__(self, name=None, commands=None, **attrs): - MultiCommand.__init__(self, name, **attrs) - #: the registered subcommands by their exported names. - self.commands = commands or {} - - def add_command(self, cmd, name=None): - """Registers another :class:`Command` with this group. If the name - is not provided, the name of the command is used. - """ - name = name or cmd.name - if name is None: - raise TypeError('Command has no name.') - _check_multicommand(self, name, cmd, register=True) - self.commands[name] = cmd - - def command(self, *args, **kwargs): - """A shortcut decorator for declaring and attaching a command to - the group. This takes the same arguments as :func:`command` but - immediately registers the created command with this instance by - calling into :meth:`add_command`. - """ - def decorator(f): - cmd = command(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - return decorator - - def group(self, *args, **kwargs): - """A shortcut decorator for declaring and attaching a group to - the group. This takes the same arguments as :func:`group` but - immediately registers the created command with this instance by - calling into :meth:`add_command`. - """ - def decorator(f): - cmd = group(*args, **kwargs)(f) - self.add_command(cmd) - return cmd - return decorator - - def get_command(self, ctx, cmd_name): - return self.commands.get(cmd_name) - - def list_commands(self, ctx): - return sorted(self.commands) - - -class CommandCollection(MultiCommand): - """A command collection is a multi command that merges multiple multi - commands together into one. This is a straightforward implementation - that accepts a list of different multi commands as sources and - provides all the commands for each of them. - """ - - def __init__(self, name=None, sources=None, **attrs): - MultiCommand.__init__(self, name, **attrs) - #: The list of registered multi commands. - self.sources = sources or [] - - def add_source(self, multi_cmd): - """Adds a new multi command to the chain dispatcher.""" - self.sources.append(multi_cmd) - - def get_command(self, ctx, cmd_name): - for source in self.sources: - rv = source.get_command(ctx, cmd_name) - if rv is not None: - if self.chain: - _check_multicommand(self, cmd_name, rv) - return rv - - def list_commands(self, ctx): - rv = set() - for source in self.sources: - rv.update(source.list_commands(ctx)) - return sorted(rv) - - -class Parameter(object): - r"""A parameter to a command comes in two versions: they are either - :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently - not supported by design as some of the internals for parsing are - intentionally not finalized. - - Some settings are supported by both options and arguments. - - .. versionchanged:: 2.0 - Changed signature for parameter callback to also be passed the - parameter. In Click 2.0, the old callback format will still work, - but it will raise a warning to give you change to migrate the - code easier. - - :param param_decls: the parameter declarations for this option or - argument. This is a list of flags or argument - names. - :param type: the type that should be used. Either a :class:`ParamType` - or a Python type. The later is converted into the former - automatically if supported. - :param required: controls if this is optional or not. - :param default: the default value if omitted. This can also be a callable, - in which case it's invoked when the default is needed - without any arguments. - :param callback: a callback that should be executed after the parameter - was matched. This is called as ``fn(ctx, param, - value)`` and needs to return the value. Before Click - 2.0, the signature was ``(ctx, value)``. - :param nargs: the number of arguments to match. If not ``1`` the return - value is a tuple instead of single value. The default for - nargs is ``1`` (except if the type is a tuple, then it's - the arity of the tuple). - :param metavar: how the value is represented in the help page. - :param expose_value: if this is `True` then the value is passed onwards - to the command callback and stored on the context, - otherwise it's skipped. - :param is_eager: eager values are processed before non eager ones. This - should not be set for arguments or it will inverse the - order of processing. - :param envvar: a string or list of strings that are environment variables - that should be checked. - """ - param_type_name = 'parameter' - - def __init__(self, param_decls=None, type=None, required=False, - default=None, callback=None, nargs=None, metavar=None, - expose_value=True, is_eager=False, envvar=None, - autocompletion=None): - self.name, self.opts, self.secondary_opts = \ - self._parse_decls(param_decls or (), expose_value) - - self.type = convert_type(type, default) - - # Default nargs to what the type tells us if we have that - # information available. - if nargs is None: - if self.type.is_composite: - nargs = self.type.arity - else: - nargs = 1 - - self.required = required - self.callback = callback - self.nargs = nargs - self.multiple = False - self.expose_value = expose_value - self.default = default - self.is_eager = is_eager - self.metavar = metavar - self.envvar = envvar - self.autocompletion = autocompletion - - @property - def human_readable_name(self): - """Returns the human readable name of this parameter. This is the - same as the name for options, but the metavar for arguments. - """ - return self.name - - def make_metavar(self): - if self.metavar is not None: - return self.metavar - metavar = self.type.get_metavar(self) - if metavar is None: - metavar = self.type.name.upper() - if self.nargs != 1: - metavar += '...' - return metavar - - def get_default(self, ctx): - """Given a context variable this calculates the default value.""" - # Otherwise go with the regular default. - if callable(self.default): - rv = self.default() - else: - rv = self.default - return self.type_cast_value(ctx, rv) - - def add_to_parser(self, parser, ctx): - pass - - def consume_value(self, ctx, opts): - value = opts.get(self.name) - if value is None: - value = self.value_from_envvar(ctx) - if value is None: - value = ctx.lookup_default(self.name) - return value - - def type_cast_value(self, ctx, value): - """Given a value this runs it properly through the type system. - This automatically handles things like `nargs` and `multiple` as - well as composite types. - """ - if self.type.is_composite: - if self.nargs <= 1: - raise TypeError('Attempted to invoke composite type ' - 'but nargs has been set to %s. This is ' - 'not supported; nargs needs to be set to ' - 'a fixed value > 1.' % self.nargs) - if self.multiple: - return tuple(self.type(x or (), self, ctx) for x in value or ()) - return self.type(value or (), self, ctx) - - def _convert(value, level): - if level == 0: - return self.type(value, self, ctx) - return tuple(_convert(x, level - 1) for x in value or ()) - return _convert(value, (self.nargs != 1) + bool(self.multiple)) - - def process_value(self, ctx, value): - """Given a value and context this runs the logic to convert the - value as necessary. - """ - # If the value we were given is None we do nothing. This way - # code that calls this can easily figure out if something was - # not provided. Otherwise it would be converted into an empty - # tuple for multiple invocations which is inconvenient. - if value is not None: - return self.type_cast_value(ctx, value) - - def value_is_missing(self, value): - if value is None: - return True - if (self.nargs != 1 or self.multiple) and value == (): - return True - return False - - def full_process_value(self, ctx, value): - value = self.process_value(ctx, value) - - if value is None and not ctx.resilient_parsing: - value = self.get_default(ctx) - - if self.required and self.value_is_missing(value): - raise MissingParameter(ctx=ctx, param=self) - - return value - - def resolve_envvar_value(self, ctx): - if self.envvar is None: - return - if isinstance(self.envvar, (tuple, list)): - for envvar in self.envvar: - rv = os.environ.get(envvar) - if rv is not None: - return rv - else: - return os.environ.get(self.envvar) - - def value_from_envvar(self, ctx): - rv = self.resolve_envvar_value(ctx) - if rv is not None and self.nargs != 1: - rv = self.type.split_envvar_value(rv) - return rv - - def handle_parse_result(self, ctx, opts, args): - with augment_usage_errors(ctx, param=self): - value = self.consume_value(ctx, opts) - try: - value = self.full_process_value(ctx, value) - except Exception: - if not ctx.resilient_parsing: - raise - value = None - if self.callback is not None: - try: - value = invoke_param_callback( - self.callback, ctx, self, value) - except Exception: - if not ctx.resilient_parsing: - raise - - if self.expose_value: - ctx.params[self.name] = value - return value, args - - def get_help_record(self, ctx): - pass - - def get_usage_pieces(self, ctx): - return [] - - def get_error_hint(self, ctx): - """Get a stringified version of the param for use in error messages to - indicate which param caused the error. - """ - hint_list = self.opts or [self.human_readable_name] - return ' / '.join('"%s"' % x for x in hint_list) - - -class Option(Parameter): - """Options are usually optional values on the command line and - have some extra features that arguments don't have. - - All other parameters are passed onwards to the parameter constructor. - - :param show_default: controls if the default value should be shown on the - help page. Normally, defaults are not shown. If this - value is a string, it shows the string instead of the - value. This is particularly useful for dynamic options. - :param show_envvar: controls if an environment variable should be shown on - the help page. Normally, environment variables - are not shown. - :param prompt: if set to `True` or a non empty string then the user will be - prompted for input. If set to `True` the prompt will be the - option name capitalized. - :param confirmation_prompt: if set then the value will need to be confirmed - if it was prompted for. - :param hide_input: if this is `True` then the input on the prompt will be - hidden from the user. This is useful for password - input. - :param is_flag: forces this option to act as a flag. The default is - auto detection. - :param flag_value: which value should be used for this flag if it's - enabled. This is set to a boolean automatically if - the option string contains a slash to mark two options. - :param multiple: if this is set to `True` then the argument is accepted - multiple times and recorded. This is similar to ``nargs`` - in how it works but supports arbitrary number of - arguments. - :param count: this flag makes an option increment an integer. - :param allow_from_autoenv: if this is enabled then the value of this - parameter will be pulled from an environment - variable in case a prefix is defined on the - context. - :param help: the help string. - :param hidden: hide this option from help outputs. - """ - param_type_name = 'option' - - def __init__(self, param_decls=None, show_default=False, - prompt=False, confirmation_prompt=False, - hide_input=False, is_flag=None, flag_value=None, - multiple=False, count=False, allow_from_autoenv=True, - type=None, help=None, hidden=False, show_choices=True, - show_envvar=False, **attrs): - default_is_missing = attrs.get('default', _missing) is _missing - Parameter.__init__(self, param_decls, type=type, **attrs) - - if prompt is True: - prompt_text = self.name.replace('_', ' ').capitalize() - elif prompt is False: - prompt_text = None - else: - prompt_text = prompt - self.prompt = prompt_text - self.confirmation_prompt = confirmation_prompt - self.hide_input = hide_input - self.hidden = hidden - - # Flags - if is_flag is None: - if flag_value is not None: - is_flag = True - else: - is_flag = bool(self.secondary_opts) - if is_flag and default_is_missing: - self.default = False - if flag_value is None: - flag_value = not self.default - self.is_flag = is_flag - self.flag_value = flag_value - if self.is_flag and isinstance(self.flag_value, bool) \ - and type is None: - self.type = BOOL - self.is_bool_flag = True - else: - self.is_bool_flag = False - - # Counting - self.count = count - if count: - if type is None: - self.type = IntRange(min=0) - if default_is_missing: - self.default = 0 - - self.multiple = multiple - self.allow_from_autoenv = allow_from_autoenv - self.help = help - self.show_default = show_default - self.show_choices = show_choices - self.show_envvar = show_envvar - - # Sanity check for stuff we don't support - if __debug__: - if self.nargs < 0: - raise TypeError('Options cannot have nargs < 0') - if self.prompt and self.is_flag and not self.is_bool_flag: - raise TypeError('Cannot prompt for flags that are not bools.') - if not self.is_bool_flag and self.secondary_opts: - raise TypeError('Got secondary option for non boolean flag.') - if self.is_bool_flag and self.hide_input \ - and self.prompt is not None: - raise TypeError('Hidden input does not work with boolean ' - 'flag prompts.') - if self.count: - if self.multiple: - raise TypeError('Options cannot be multiple and count ' - 'at the same time.') - elif self.is_flag: - raise TypeError('Options cannot be count and flags at ' - 'the same time.') - - def _parse_decls(self, decls, expose_value): - opts = [] - secondary_opts = [] - name = None - possible_names = [] - - for decl in decls: - if isidentifier(decl): - if name is not None: - raise TypeError('Name defined twice') - name = decl - else: - split_char = decl[:1] == '/' and ';' or '/' - if split_char in decl: - first, second = decl.split(split_char, 1) - first = first.rstrip() - if first: - possible_names.append(split_opt(first)) - opts.append(first) - second = second.lstrip() - if second: - secondary_opts.append(second.lstrip()) - else: - possible_names.append(split_opt(decl)) - opts.append(decl) - - if name is None and possible_names: - possible_names.sort(key=lambda x: -len(x[0])) # group long options first - name = possible_names[0][1].replace('-', '_').lower() - if not isidentifier(name): - name = None - - if name is None: - if not expose_value: - return None, opts, secondary_opts - raise TypeError('Could not determine name for option') - - if not opts and not secondary_opts: - raise TypeError('No options defined but a name was passed (%s). ' - 'Did you mean to declare an argument instead ' - 'of an option?' % name) - - return name, opts, secondary_opts - - def add_to_parser(self, parser, ctx): - kwargs = { - 'dest': self.name, - 'nargs': self.nargs, - 'obj': self, - } - - if self.multiple: - action = 'append' - elif self.count: - action = 'count' - else: - action = 'store' - - if self.is_flag: - kwargs.pop('nargs', None) - if self.is_bool_flag and self.secondary_opts: - parser.add_option(self.opts, action=action + '_const', - const=True, **kwargs) - parser.add_option(self.secondary_opts, action=action + - '_const', const=False, **kwargs) - else: - parser.add_option(self.opts, action=action + '_const', - const=self.flag_value, - **kwargs) - else: - kwargs['action'] = action - parser.add_option(self.opts, **kwargs) - - def get_help_record(self, ctx): - if self.hidden: - return - any_prefix_is_slash = [] - - def _write_opts(opts): - rv, any_slashes = join_options(opts) - if any_slashes: - any_prefix_is_slash[:] = [True] - if not self.is_flag and not self.count: - rv += ' ' + self.make_metavar() - return rv - - rv = [_write_opts(self.opts)] - if self.secondary_opts: - rv.append(_write_opts(self.secondary_opts)) - - help = self.help or '' - extra = [] - if self.show_envvar: - envvar = self.envvar - if envvar is None: - if self.allow_from_autoenv and \ - ctx.auto_envvar_prefix is not None: - envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper()) - if envvar is not None: - extra.append('env var: %s' % ( - ', '.join('%s' % d for d in envvar) - if isinstance(envvar, (list, tuple)) - else envvar, )) - if self.default is not None and self.show_default: - if isinstance(self.show_default, string_types): - default_string = '({})'.format(self.show_default) - elif isinstance(self.default, (list, tuple)): - default_string = ', '.join('%s' % d for d in self.default) - elif inspect.isfunction(self.default): - default_string = "(dynamic)" - else: - default_string = self.default - extra.append('default: {}'.format(default_string)) - - if self.required: - extra.append('required') - if extra: - help = '%s[%s]' % (help and help + ' ' or '', '; '.join(extra)) - - return ((any_prefix_is_slash and '; ' or ' / ').join(rv), help) - - def get_default(self, ctx): - # If we're a non boolean flag out default is more complex because - # we need to look at all flags in the same group to figure out - # if we're the the default one in which case we return the flag - # value as default. - if self.is_flag and not self.is_bool_flag: - for param in ctx.command.params: - if param.name == self.name and param.default: - return param.flag_value - return None - return Parameter.get_default(self, ctx) - - def prompt_for_value(self, ctx): - """This is an alternative flow that can be activated in the full - value processing if a value does not exist. It will prompt the - user until a valid value exists and then returns the processed - value as result. - """ - # Calculate the default before prompting anything to be stable. - default = self.get_default(ctx) - - # If this is a prompt for a flag we need to handle this - # differently. - if self.is_bool_flag: - return confirm(self.prompt, default) - - return prompt(self.prompt, default=default, type=self.type, - hide_input=self.hide_input, show_choices=self.show_choices, - confirmation_prompt=self.confirmation_prompt, - value_proc=lambda x: self.process_value(ctx, x)) - - def resolve_envvar_value(self, ctx): - rv = Parameter.resolve_envvar_value(self, ctx) - if rv is not None: - return rv - if self.allow_from_autoenv and \ - ctx.auto_envvar_prefix is not None: - envvar = '%s_%s' % (ctx.auto_envvar_prefix, self.name.upper()) - return os.environ.get(envvar) - - def value_from_envvar(self, ctx): - rv = self.resolve_envvar_value(ctx) - if rv is None: - return None - value_depth = (self.nargs != 1) + bool(self.multiple) - if value_depth > 0 and rv is not None: - rv = self.type.split_envvar_value(rv) - if self.multiple and self.nargs != 1: - rv = batch(rv, self.nargs) - return rv - - def full_process_value(self, ctx, value): - if value is None and self.prompt is not None \ - and not ctx.resilient_parsing: - return self.prompt_for_value(ctx) - return Parameter.full_process_value(self, ctx, value) - - -class Argument(Parameter): - """Arguments are positional parameters to a command. They generally - provide fewer features than options but can have infinite ``nargs`` - and are required by default. - - All parameters are passed onwards to the parameter constructor. - """ - param_type_name = 'argument' - - def __init__(self, param_decls, required=None, **attrs): - if required is None: - if attrs.get('default') is not None: - required = False - else: - required = attrs.get('nargs', 1) > 0 - Parameter.__init__(self, param_decls, required=required, **attrs) - if self.default is not None and self.nargs < 0: - raise TypeError('nargs=-1 in combination with a default value ' - 'is not supported.') - - @property - def human_readable_name(self): - if self.metavar is not None: - return self.metavar - return self.name.upper() - - def make_metavar(self): - if self.metavar is not None: - return self.metavar - var = self.type.get_metavar(self) - if not var: - var = self.name.upper() - if not self.required: - var = '[%s]' % var - if self.nargs != 1: - var += '...' - return var - - def _parse_decls(self, decls, expose_value): - if not decls: - if not expose_value: - return None, [], [] - raise TypeError('Could not determine name for argument') - if len(decls) == 1: - name = arg = decls[0] - name = name.replace('-', '_').lower() - else: - raise TypeError('Arguments take exactly one ' - 'parameter declaration, got %d' % len(decls)) - return name, [arg], [] - - def get_usage_pieces(self, ctx): - return [self.make_metavar()] - - def get_error_hint(self, ctx): - return '"%s"' % self.make_metavar() - - def add_to_parser(self, parser, ctx): - parser.add_argument(dest=self.name, nargs=self.nargs, - obj=self) - - -# Circular dependency between decorators and core -from .decorators import command, group diff --git a/venv/lib/python3.8/site-packages/click/decorators.py b/venv/lib/python3.8/site-packages/click/decorators.py deleted file mode 100644 index c57c5308..00000000 --- a/venv/lib/python3.8/site-packages/click/decorators.py +++ /dev/null @@ -1,311 +0,0 @@ -import sys -import inspect - -from functools import update_wrapper - -from ._compat import iteritems -from ._unicodefun import _check_for_unicode_literals -from .utils import echo -from .globals import get_current_context - - -def pass_context(f): - """Marks a callback as wanting to receive the current context - object as first argument. - """ - def new_func(*args, **kwargs): - return f(get_current_context(), *args, **kwargs) - return update_wrapper(new_func, f) - - -def pass_obj(f): - """Similar to :func:`pass_context`, but only pass the object on the - context onwards (:attr:`Context.obj`). This is useful if that object - represents the state of a nested system. - """ - def new_func(*args, **kwargs): - return f(get_current_context().obj, *args, **kwargs) - return update_wrapper(new_func, f) - - -def make_pass_decorator(object_type, ensure=False): - """Given an object type this creates a decorator that will work - similar to :func:`pass_obj` but instead of passing the object of the - current context, it will find the innermost context of type - :func:`object_type`. - - This generates a decorator that works roughly like this:: - - from functools import update_wrapper - - def decorator(f): - @pass_context - def new_func(ctx, *args, **kwargs): - obj = ctx.find_object(object_type) - return ctx.invoke(f, obj, *args, **kwargs) - return update_wrapper(new_func, f) - return decorator - - :param object_type: the type of the object to pass. - :param ensure: if set to `True`, a new object will be created and - remembered on the context if it's not there yet. - """ - def decorator(f): - def new_func(*args, **kwargs): - ctx = get_current_context() - if ensure: - obj = ctx.ensure_object(object_type) - else: - obj = ctx.find_object(object_type) - if obj is None: - raise RuntimeError('Managed to invoke callback without a ' - 'context object of type %r existing' - % object_type.__name__) - return ctx.invoke(f, obj, *args, **kwargs) - return update_wrapper(new_func, f) - return decorator - - -def _make_command(f, name, attrs, cls): - if isinstance(f, Command): - raise TypeError('Attempted to convert a callback into a ' - 'command twice.') - try: - params = f.__click_params__ - params.reverse() - del f.__click_params__ - except AttributeError: - params = [] - help = attrs.get('help') - if help is None: - help = inspect.getdoc(f) - if isinstance(help, bytes): - help = help.decode('utf-8') - else: - help = inspect.cleandoc(help) - attrs['help'] = help - _check_for_unicode_literals() - return cls(name=name or f.__name__.lower().replace('_', '-'), - callback=f, params=params, **attrs) - - -def command(name=None, cls=None, **attrs): - r"""Creates a new :class:`Command` and uses the decorated function as - callback. This will also automatically attach all decorated - :func:`option`\s and :func:`argument`\s as parameters to the command. - - The name of the command defaults to the name of the function. If you - want to change that, you can pass the intended name as the first - argument. - - All keyword arguments are forwarded to the underlying command class. - - Once decorated the function turns into a :class:`Command` instance - that can be invoked as a command line utility or be attached to a - command :class:`Group`. - - :param name: the name of the command. This defaults to the function - name with underscores replaced by dashes. - :param cls: the command class to instantiate. This defaults to - :class:`Command`. - """ - if cls is None: - cls = Command - def decorator(f): - cmd = _make_command(f, name, attrs, cls) - cmd.__doc__ = f.__doc__ - return cmd - return decorator - - -def group(name=None, **attrs): - """Creates a new :class:`Group` with a function as callback. This - works otherwise the same as :func:`command` just that the `cls` - parameter is set to :class:`Group`. - """ - attrs.setdefault('cls', Group) - return command(name, **attrs) - - -def _param_memo(f, param): - if isinstance(f, Command): - f.params.append(param) - else: - if not hasattr(f, '__click_params__'): - f.__click_params__ = [] - f.__click_params__.append(param) - - -def argument(*param_decls, **attrs): - """Attaches an argument to the command. All positional arguments are - passed as parameter declarations to :class:`Argument`; all keyword - arguments are forwarded unchanged (except ``cls``). - This is equivalent to creating an :class:`Argument` instance manually - and attaching it to the :attr:`Command.params` list. - - :param cls: the argument class to instantiate. This defaults to - :class:`Argument`. - """ - def decorator(f): - ArgumentClass = attrs.pop('cls', Argument) - _param_memo(f, ArgumentClass(param_decls, **attrs)) - return f - return decorator - - -def option(*param_decls, **attrs): - """Attaches an option to the command. All positional arguments are - passed as parameter declarations to :class:`Option`; all keyword - arguments are forwarded unchanged (except ``cls``). - This is equivalent to creating an :class:`Option` instance manually - and attaching it to the :attr:`Command.params` list. - - :param cls: the option class to instantiate. This defaults to - :class:`Option`. - """ - def decorator(f): - # Issue 926, copy attrs, so pre-defined options can re-use the same cls= - option_attrs = attrs.copy() - - if 'help' in option_attrs: - option_attrs['help'] = inspect.cleandoc(option_attrs['help']) - OptionClass = option_attrs.pop('cls', Option) - _param_memo(f, OptionClass(param_decls, **option_attrs)) - return f - return decorator - - -def confirmation_option(*param_decls, **attrs): - """Shortcut for confirmation prompts that can be ignored by passing - ``--yes`` as parameter. - - This is equivalent to decorating a function with :func:`option` with - the following parameters:: - - def callback(ctx, param, value): - if not value: - ctx.abort() - - @click.command() - @click.option('--yes', is_flag=True, callback=callback, - expose_value=False, prompt='Do you want to continue?') - def dropdb(): - pass - """ - def decorator(f): - def callback(ctx, param, value): - if not value: - ctx.abort() - attrs.setdefault('is_flag', True) - attrs.setdefault('callback', callback) - attrs.setdefault('expose_value', False) - attrs.setdefault('prompt', 'Do you want to continue?') - attrs.setdefault('help', 'Confirm the action without prompting.') - return option(*(param_decls or ('--yes',)), **attrs)(f) - return decorator - - -def password_option(*param_decls, **attrs): - """Shortcut for password prompts. - - This is equivalent to decorating a function with :func:`option` with - the following parameters:: - - @click.command() - @click.option('--password', prompt=True, confirmation_prompt=True, - hide_input=True) - def changeadmin(password): - pass - """ - def decorator(f): - attrs.setdefault('prompt', True) - attrs.setdefault('confirmation_prompt', True) - attrs.setdefault('hide_input', True) - return option(*(param_decls or ('--password',)), **attrs)(f) - return decorator - - -def version_option(version=None, *param_decls, **attrs): - """Adds a ``--version`` option which immediately ends the program - printing out the version number. This is implemented as an eager - option that prints the version and exits the program in the callback. - - :param version: the version number to show. If not provided Click - attempts an auto discovery via setuptools. - :param prog_name: the name of the program (defaults to autodetection) - :param message: custom message to show instead of the default - (``'%(prog)s, version %(version)s'``) - :param others: everything else is forwarded to :func:`option`. - """ - if version is None: - if hasattr(sys, '_getframe'): - module = sys._getframe(1).f_globals.get('__name__') - else: - module = '' - - def decorator(f): - prog_name = attrs.pop('prog_name', None) - message = attrs.pop('message', '%(prog)s, version %(version)s') - - def callback(ctx, param, value): - if not value or ctx.resilient_parsing: - return - prog = prog_name - if prog is None: - prog = ctx.find_root().info_name - ver = version - if ver is None: - try: - import pkg_resources - except ImportError: - pass - else: - for dist in pkg_resources.working_set: - scripts = dist.get_entry_map().get('console_scripts') or {} - for script_name, entry_point in iteritems(scripts): - if entry_point.module_name == module: - ver = dist.version - break - if ver is None: - raise RuntimeError('Could not determine version') - echo(message % { - 'prog': prog, - 'version': ver, - }, color=ctx.color) - ctx.exit() - - attrs.setdefault('is_flag', True) - attrs.setdefault('expose_value', False) - attrs.setdefault('is_eager', True) - attrs.setdefault('help', 'Show the version and exit.') - attrs['callback'] = callback - return option(*(param_decls or ('--version',)), **attrs)(f) - return decorator - - -def help_option(*param_decls, **attrs): - """Adds a ``--help`` option which immediately ends the program - printing out the help page. This is usually unnecessary to add as - this is added by default to all commands unless suppressed. - - Like :func:`version_option`, this is implemented as eager option that - prints in the callback and exits. - - All arguments are forwarded to :func:`option`. - """ - def decorator(f): - def callback(ctx, param, value): - if value and not ctx.resilient_parsing: - echo(ctx.get_help(), color=ctx.color) - ctx.exit() - attrs.setdefault('is_flag', True) - attrs.setdefault('expose_value', False) - attrs.setdefault('help', 'Show this message and exit.') - attrs.setdefault('is_eager', True) - attrs['callback'] = callback - return option(*(param_decls or ('--help',)), **attrs)(f) - return decorator - - -# Circular dependencies between core and decorators -from .core import Command, Group, Argument, Option diff --git a/venv/lib/python3.8/site-packages/click/exceptions.py b/venv/lib/python3.8/site-packages/click/exceptions.py deleted file mode 100644 index 6fa17658..00000000 --- a/venv/lib/python3.8/site-packages/click/exceptions.py +++ /dev/null @@ -1,235 +0,0 @@ -from ._compat import PY2, filename_to_ui, get_text_stderr -from .utils import echo - - -def _join_param_hints(param_hint): - if isinstance(param_hint, (tuple, list)): - return ' / '.join('"%s"' % x for x in param_hint) - return param_hint - - -class ClickException(Exception): - """An exception that Click can handle and show to the user.""" - - #: The exit code for this exception - exit_code = 1 - - def __init__(self, message): - ctor_msg = message - if PY2: - if ctor_msg is not None: - ctor_msg = ctor_msg.encode('utf-8') - Exception.__init__(self, ctor_msg) - self.message = message - - def format_message(self): - return self.message - - def __str__(self): - return self.message - - if PY2: - __unicode__ = __str__ - - def __str__(self): - return self.message.encode('utf-8') - - def show(self, file=None): - if file is None: - file = get_text_stderr() - echo('Error: %s' % self.format_message(), file=file) - - -class UsageError(ClickException): - """An internal exception that signals a usage error. This typically - aborts any further handling. - - :param message: the error message to display. - :param ctx: optionally the context that caused this error. Click will - fill in the context automatically in some situations. - """ - exit_code = 2 - - def __init__(self, message, ctx=None): - ClickException.__init__(self, message) - self.ctx = ctx - self.cmd = self.ctx and self.ctx.command or None - - def show(self, file=None): - if file is None: - file = get_text_stderr() - color = None - hint = '' - if (self.cmd is not None and - self.cmd.get_help_option(self.ctx) is not None): - hint = ('Try "%s %s" for help.\n' - % (self.ctx.command_path, self.ctx.help_option_names[0])) - if self.ctx is not None: - color = self.ctx.color - echo(self.ctx.get_usage() + '\n%s' % hint, file=file, color=color) - echo('Error: %s' % self.format_message(), file=file, color=color) - - -class BadParameter(UsageError): - """An exception that formats out a standardized error message for a - bad parameter. This is useful when thrown from a callback or type as - Click will attach contextual information to it (for instance, which - parameter it is). - - .. versionadded:: 2.0 - - :param param: the parameter object that caused this error. This can - be left out, and Click will attach this info itself - if possible. - :param param_hint: a string that shows up as parameter name. This - can be used as alternative to `param` in cases - where custom validation should happen. If it is - a string it's used as such, if it's a list then - each item is quoted and separated. - """ - - def __init__(self, message, ctx=None, param=None, - param_hint=None): - UsageError.__init__(self, message, ctx) - self.param = param - self.param_hint = param_hint - - def format_message(self): - if self.param_hint is not None: - param_hint = self.param_hint - elif self.param is not None: - param_hint = self.param.get_error_hint(self.ctx) - else: - return 'Invalid value: %s' % self.message - param_hint = _join_param_hints(param_hint) - - return 'Invalid value for %s: %s' % (param_hint, self.message) - - -class MissingParameter(BadParameter): - """Raised if click required an option or argument but it was not - provided when invoking the script. - - .. versionadded:: 4.0 - - :param param_type: a string that indicates the type of the parameter. - The default is to inherit the parameter type from - the given `param`. Valid values are ``'parameter'``, - ``'option'`` or ``'argument'``. - """ - - def __init__(self, message=None, ctx=None, param=None, - param_hint=None, param_type=None): - BadParameter.__init__(self, message, ctx, param, param_hint) - self.param_type = param_type - - def format_message(self): - if self.param_hint is not None: - param_hint = self.param_hint - elif self.param is not None: - param_hint = self.param.get_error_hint(self.ctx) - else: - param_hint = None - param_hint = _join_param_hints(param_hint) - - param_type = self.param_type - if param_type is None and self.param is not None: - param_type = self.param.param_type_name - - msg = self.message - if self.param is not None: - msg_extra = self.param.type.get_missing_message(self.param) - if msg_extra: - if msg: - msg += '. ' + msg_extra - else: - msg = msg_extra - - return 'Missing %s%s%s%s' % ( - param_type, - param_hint and ' %s' % param_hint or '', - msg and '. ' or '.', - msg or '', - ) - - -class NoSuchOption(UsageError): - """Raised if click attempted to handle an option that does not - exist. - - .. versionadded:: 4.0 - """ - - def __init__(self, option_name, message=None, possibilities=None, - ctx=None): - if message is None: - message = 'no such option: %s' % option_name - UsageError.__init__(self, message, ctx) - self.option_name = option_name - self.possibilities = possibilities - - def format_message(self): - bits = [self.message] - if self.possibilities: - if len(self.possibilities) == 1: - bits.append('Did you mean %s?' % self.possibilities[0]) - else: - possibilities = sorted(self.possibilities) - bits.append('(Possible options: %s)' % ', '.join(possibilities)) - return ' '.join(bits) - - -class BadOptionUsage(UsageError): - """Raised if an option is generally supplied but the use of the option - was incorrect. This is for instance raised if the number of arguments - for an option is not correct. - - .. versionadded:: 4.0 - - :param option_name: the name of the option being used incorrectly. - """ - - def __init__(self, option_name, message, ctx=None): - UsageError.__init__(self, message, ctx) - self.option_name = option_name - - -class BadArgumentUsage(UsageError): - """Raised if an argument is generally supplied but the use of the argument - was incorrect. This is for instance raised if the number of values - for an argument is not correct. - - .. versionadded:: 6.0 - """ - - def __init__(self, message, ctx=None): - UsageError.__init__(self, message, ctx) - - -class FileError(ClickException): - """Raised if a file cannot be opened.""" - - def __init__(self, filename, hint=None): - ui_filename = filename_to_ui(filename) - if hint is None: - hint = 'unknown error' - ClickException.__init__(self, hint) - self.ui_filename = ui_filename - self.filename = filename - - def format_message(self): - return 'Could not open file %s: %s' % (self.ui_filename, self.message) - - -class Abort(RuntimeError): - """An internal signalling exception that signals Click to abort.""" - - -class Exit(RuntimeError): - """An exception that indicates that the application should exit with some - status code. - - :param code: the status code to exit with. - """ - def __init__(self, code=0): - self.exit_code = code diff --git a/venv/lib/python3.8/site-packages/click/formatting.py b/venv/lib/python3.8/site-packages/click/formatting.py deleted file mode 100644 index a3d6a4d3..00000000 --- a/venv/lib/python3.8/site-packages/click/formatting.py +++ /dev/null @@ -1,256 +0,0 @@ -from contextlib import contextmanager -from .termui import get_terminal_size -from .parser import split_opt -from ._compat import term_len - - -# Can force a width. This is used by the test system -FORCED_WIDTH = None - - -def measure_table(rows): - widths = {} - for row in rows: - for idx, col in enumerate(row): - widths[idx] = max(widths.get(idx, 0), term_len(col)) - return tuple(y for x, y in sorted(widths.items())) - - -def iter_rows(rows, col_count): - for row in rows: - row = tuple(row) - yield row + ('',) * (col_count - len(row)) - - -def wrap_text(text, width=78, initial_indent='', subsequent_indent='', - preserve_paragraphs=False): - """A helper function that intelligently wraps text. By default, it - assumes that it operates on a single paragraph of text but if the - `preserve_paragraphs` parameter is provided it will intelligently - handle paragraphs (defined by two empty lines). - - If paragraphs are handled, a paragraph can be prefixed with an empty - line containing the ``\\b`` character (``\\x08``) to indicate that - no rewrapping should happen in that block. - - :param text: the text that should be rewrapped. - :param width: the maximum width for the text. - :param initial_indent: the initial indent that should be placed on the - first line as a string. - :param subsequent_indent: the indent string that should be placed on - each consecutive line. - :param preserve_paragraphs: if this flag is set then the wrapping will - intelligently handle paragraphs. - """ - from ._textwrap import TextWrapper - text = text.expandtabs() - wrapper = TextWrapper(width, initial_indent=initial_indent, - subsequent_indent=subsequent_indent, - replace_whitespace=False) - if not preserve_paragraphs: - return wrapper.fill(text) - - p = [] - buf = [] - indent = None - - def _flush_par(): - if not buf: - return - if buf[0].strip() == '\b': - p.append((indent or 0, True, '\n'.join(buf[1:]))) - else: - p.append((indent or 0, False, ' '.join(buf))) - del buf[:] - - for line in text.splitlines(): - if not line: - _flush_par() - indent = None - else: - if indent is None: - orig_len = term_len(line) - line = line.lstrip() - indent = orig_len - term_len(line) - buf.append(line) - _flush_par() - - rv = [] - for indent, raw, text in p: - with wrapper.extra_indent(' ' * indent): - if raw: - rv.append(wrapper.indent_only(text)) - else: - rv.append(wrapper.fill(text)) - - return '\n\n'.join(rv) - - -class HelpFormatter(object): - """This class helps with formatting text-based help pages. It's - usually just needed for very special internal cases, but it's also - exposed so that developers can write their own fancy outputs. - - At present, it always writes into memory. - - :param indent_increment: the additional increment for each level. - :param width: the width for the text. This defaults to the terminal - width clamped to a maximum of 78. - """ - - def __init__(self, indent_increment=2, width=None, max_width=None): - self.indent_increment = indent_increment - if max_width is None: - max_width = 80 - if width is None: - width = FORCED_WIDTH - if width is None: - width = max(min(get_terminal_size()[0], max_width) - 2, 50) - self.width = width - self.current_indent = 0 - self.buffer = [] - - def write(self, string): - """Writes a unicode string into the internal buffer.""" - self.buffer.append(string) - - def indent(self): - """Increases the indentation.""" - self.current_indent += self.indent_increment - - def dedent(self): - """Decreases the indentation.""" - self.current_indent -= self.indent_increment - - def write_usage(self, prog, args='', prefix='Usage: '): - """Writes a usage line into the buffer. - - :param prog: the program name. - :param args: whitespace separated list of arguments. - :param prefix: the prefix for the first line. - """ - usage_prefix = '%*s%s ' % (self.current_indent, prefix, prog) - text_width = self.width - self.current_indent - - if text_width >= (term_len(usage_prefix) + 20): - # The arguments will fit to the right of the prefix. - indent = ' ' * term_len(usage_prefix) - self.write(wrap_text(args, text_width, - initial_indent=usage_prefix, - subsequent_indent=indent)) - else: - # The prefix is too long, put the arguments on the next line. - self.write(usage_prefix) - self.write('\n') - indent = ' ' * (max(self.current_indent, term_len(prefix)) + 4) - self.write(wrap_text(args, text_width, - initial_indent=indent, - subsequent_indent=indent)) - - self.write('\n') - - def write_heading(self, heading): - """Writes a heading into the buffer.""" - self.write('%*s%s:\n' % (self.current_indent, '', heading)) - - def write_paragraph(self): - """Writes a paragraph into the buffer.""" - if self.buffer: - self.write('\n') - - def write_text(self, text): - """Writes re-indented text into the buffer. This rewraps and - preserves paragraphs. - """ - text_width = max(self.width - self.current_indent, 11) - indent = ' ' * self.current_indent - self.write(wrap_text(text, text_width, - initial_indent=indent, - subsequent_indent=indent, - preserve_paragraphs=True)) - self.write('\n') - - def write_dl(self, rows, col_max=30, col_spacing=2): - """Writes a definition list into the buffer. This is how options - and commands are usually formatted. - - :param rows: a list of two item tuples for the terms and values. - :param col_max: the maximum width of the first column. - :param col_spacing: the number of spaces between the first and - second column. - """ - rows = list(rows) - widths = measure_table(rows) - if len(widths) != 2: - raise TypeError('Expected two columns for definition list') - - first_col = min(widths[0], col_max) + col_spacing - - for first, second in iter_rows(rows, len(widths)): - self.write('%*s%s' % (self.current_indent, '', first)) - if not second: - self.write('\n') - continue - if term_len(first) <= first_col - col_spacing: - self.write(' ' * (first_col - term_len(first))) - else: - self.write('\n') - self.write(' ' * (first_col + self.current_indent)) - - text_width = max(self.width - first_col - 2, 10) - lines = iter(wrap_text(second, text_width).splitlines()) - if lines: - self.write(next(lines) + '\n') - for line in lines: - self.write('%*s%s\n' % ( - first_col + self.current_indent, '', line)) - else: - self.write('\n') - - @contextmanager - def section(self, name): - """Helpful context manager that writes a paragraph, a heading, - and the indents. - - :param name: the section name that is written as heading. - """ - self.write_paragraph() - self.write_heading(name) - self.indent() - try: - yield - finally: - self.dedent() - - @contextmanager - def indentation(self): - """A context manager that increases the indentation.""" - self.indent() - try: - yield - finally: - self.dedent() - - def getvalue(self): - """Returns the buffer contents.""" - return ''.join(self.buffer) - - -def join_options(options): - """Given a list of option strings this joins them in the most appropriate - way and returns them in the form ``(formatted_string, - any_prefix_is_slash)`` where the second item in the tuple is a flag that - indicates if any of the option prefixes was a slash. - """ - rv = [] - any_prefix_is_slash = False - for opt in options: - prefix = split_opt(opt)[0] - if prefix == '/': - any_prefix_is_slash = True - rv.append((len(prefix), opt)) - - rv.sort(key=lambda x: x[0]) - - rv = ', '.join(x[1] for x in rv) - return rv, any_prefix_is_slash diff --git a/venv/lib/python3.8/site-packages/click/globals.py b/venv/lib/python3.8/site-packages/click/globals.py deleted file mode 100644 index 843b594a..00000000 --- a/venv/lib/python3.8/site-packages/click/globals.py +++ /dev/null @@ -1,48 +0,0 @@ -from threading import local - - -_local = local() - - -def get_current_context(silent=False): - """Returns the current click context. This can be used as a way to - access the current context object from anywhere. This is a more implicit - alternative to the :func:`pass_context` decorator. This function is - primarily useful for helpers such as :func:`echo` which might be - interested in changing its behavior based on the current context. - - To push the current context, :meth:`Context.scope` can be used. - - .. versionadded:: 5.0 - - :param silent: is set to `True` the return value is `None` if no context - is available. The default behavior is to raise a - :exc:`RuntimeError`. - """ - try: - return getattr(_local, 'stack')[-1] - except (AttributeError, IndexError): - if not silent: - raise RuntimeError('There is no active click context.') - - -def push_context(ctx): - """Pushes a new context to the current stack.""" - _local.__dict__.setdefault('stack', []).append(ctx) - - -def pop_context(): - """Removes the top level from the stack.""" - _local.stack.pop() - - -def resolve_color_default(color=None): - """"Internal helper to get the default value of the color flag. If a - value is passed it's returned unchanged, otherwise it's looked up from - the current context. - """ - if color is not None: - return color - ctx = get_current_context(silent=True) - if ctx is not None: - return ctx.color diff --git a/venv/lib/python3.8/site-packages/click/parser.py b/venv/lib/python3.8/site-packages/click/parser.py deleted file mode 100644 index 1c3ae9c8..00000000 --- a/venv/lib/python3.8/site-packages/click/parser.py +++ /dev/null @@ -1,427 +0,0 @@ -# -*- coding: utf-8 -*- -""" -click.parser -~~~~~~~~~~~~ - -This module started out as largely a copy paste from the stdlib's -optparse module with the features removed that we do not need from -optparse because we implement them in Click on a higher level (for -instance type handling, help formatting and a lot more). - -The plan is to remove more and more from here over time. - -The reason this is a different module and not optparse from the stdlib -is that there are differences in 2.x and 3.x about the error messages -generated and optparse in the stdlib uses gettext for no good reason -and might cause us issues. -""" - -import re -from collections import deque -from .exceptions import UsageError, NoSuchOption, BadOptionUsage, \ - BadArgumentUsage - - -def _unpack_args(args, nargs_spec): - """Given an iterable of arguments and an iterable of nargs specifications, - it returns a tuple with all the unpacked arguments at the first index - and all remaining arguments as the second. - - The nargs specification is the number of arguments that should be consumed - or `-1` to indicate that this position should eat up all the remainders. - - Missing items are filled with `None`. - """ - args = deque(args) - nargs_spec = deque(nargs_spec) - rv = [] - spos = None - - def _fetch(c): - try: - if spos is None: - return c.popleft() - else: - return c.pop() - except IndexError: - return None - - while nargs_spec: - nargs = _fetch(nargs_spec) - if nargs == 1: - rv.append(_fetch(args)) - elif nargs > 1: - x = [_fetch(args) for _ in range(nargs)] - # If we're reversed, we're pulling in the arguments in reverse, - # so we need to turn them around. - if spos is not None: - x.reverse() - rv.append(tuple(x)) - elif nargs < 0: - if spos is not None: - raise TypeError('Cannot have two nargs < 0') - spos = len(rv) - rv.append(None) - - # spos is the position of the wildcard (star). If it's not `None`, - # we fill it with the remainder. - if spos is not None: - rv[spos] = tuple(args) - args = [] - rv[spos + 1:] = reversed(rv[spos + 1:]) - - return tuple(rv), list(args) - - -def _error_opt_args(nargs, opt): - if nargs == 1: - raise BadOptionUsage(opt, '%s option requires an argument' % opt) - raise BadOptionUsage(opt, '%s option requires %d arguments' % (opt, nargs)) - - -def split_opt(opt): - first = opt[:1] - if first.isalnum(): - return '', opt - if opt[1:2] == first: - return opt[:2], opt[2:] - return first, opt[1:] - - -def normalize_opt(opt, ctx): - if ctx is None or ctx.token_normalize_func is None: - return opt - prefix, opt = split_opt(opt) - return prefix + ctx.token_normalize_func(opt) - - -def split_arg_string(string): - """Given an argument string this attempts to split it into small parts.""" - rv = [] - for match in re.finditer(r"('([^'\\]*(?:\\.[^'\\]*)*)'" - r'|"([^"\\]*(?:\\.[^"\\]*)*)"' - r'|\S+)\s*', string, re.S): - arg = match.group().strip() - if arg[:1] == arg[-1:] and arg[:1] in '"\'': - arg = arg[1:-1].encode('ascii', 'backslashreplace') \ - .decode('unicode-escape') - try: - arg = type(string)(arg) - except UnicodeError: - pass - rv.append(arg) - return rv - - -class Option(object): - - def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None): - self._short_opts = [] - self._long_opts = [] - self.prefixes = set() - - for opt in opts: - prefix, value = split_opt(opt) - if not prefix: - raise ValueError('Invalid start character for option (%s)' - % opt) - self.prefixes.add(prefix[0]) - if len(prefix) == 1 and len(value) == 1: - self._short_opts.append(opt) - else: - self._long_opts.append(opt) - self.prefixes.add(prefix) - - if action is None: - action = 'store' - - self.dest = dest - self.action = action - self.nargs = nargs - self.const = const - self.obj = obj - - @property - def takes_value(self): - return self.action in ('store', 'append') - - def process(self, value, state): - if self.action == 'store': - state.opts[self.dest] = value - elif self.action == 'store_const': - state.opts[self.dest] = self.const - elif self.action == 'append': - state.opts.setdefault(self.dest, []).append(value) - elif self.action == 'append_const': - state.opts.setdefault(self.dest, []).append(self.const) - elif self.action == 'count': - state.opts[self.dest] = state.opts.get(self.dest, 0) + 1 - else: - raise ValueError('unknown action %r' % self.action) - state.order.append(self.obj) - - -class Argument(object): - - def __init__(self, dest, nargs=1, obj=None): - self.dest = dest - self.nargs = nargs - self.obj = obj - - def process(self, value, state): - if self.nargs > 1: - holes = sum(1 for x in value if x is None) - if holes == len(value): - value = None - elif holes != 0: - raise BadArgumentUsage('argument %s takes %d values' - % (self.dest, self.nargs)) - state.opts[self.dest] = value - state.order.append(self.obj) - - -class ParsingState(object): - - def __init__(self, rargs): - self.opts = {} - self.largs = [] - self.rargs = rargs - self.order = [] - - -class OptionParser(object): - """The option parser is an internal class that is ultimately used to - parse options and arguments. It's modelled after optparse and brings - a similar but vastly simplified API. It should generally not be used - directly as the high level Click classes wrap it for you. - - It's not nearly as extensible as optparse or argparse as it does not - implement features that are implemented on a higher level (such as - types or defaults). - - :param ctx: optionally the :class:`~click.Context` where this parser - should go with. - """ - - def __init__(self, ctx=None): - #: The :class:`~click.Context` for this parser. This might be - #: `None` for some advanced use cases. - self.ctx = ctx - #: This controls how the parser deals with interspersed arguments. - #: If this is set to `False`, the parser will stop on the first - #: non-option. Click uses this to implement nested subcommands - #: safely. - self.allow_interspersed_args = True - #: This tells the parser how to deal with unknown options. By - #: default it will error out (which is sensible), but there is a - #: second mode where it will ignore it and continue processing - #: after shifting all the unknown options into the resulting args. - self.ignore_unknown_options = False - if ctx is not None: - self.allow_interspersed_args = ctx.allow_interspersed_args - self.ignore_unknown_options = ctx.ignore_unknown_options - self._short_opt = {} - self._long_opt = {} - self._opt_prefixes = set(['-', '--']) - self._args = [] - - def add_option(self, opts, dest, action=None, nargs=1, const=None, - obj=None): - """Adds a new option named `dest` to the parser. The destination - is not inferred (unlike with optparse) and needs to be explicitly - provided. Action can be any of ``store``, ``store_const``, - ``append``, ``appnd_const`` or ``count``. - - The `obj` can be used to identify the option in the order list - that is returned from the parser. - """ - if obj is None: - obj = dest - opts = [normalize_opt(opt, self.ctx) for opt in opts] - option = Option(opts, dest, action=action, nargs=nargs, - const=const, obj=obj) - self._opt_prefixes.update(option.prefixes) - for opt in option._short_opts: - self._short_opt[opt] = option - for opt in option._long_opts: - self._long_opt[opt] = option - - def add_argument(self, dest, nargs=1, obj=None): - """Adds a positional argument named `dest` to the parser. - - The `obj` can be used to identify the option in the order list - that is returned from the parser. - """ - if obj is None: - obj = dest - self._args.append(Argument(dest=dest, nargs=nargs, obj=obj)) - - def parse_args(self, args): - """Parses positional arguments and returns ``(values, args, order)`` - for the parsed options and arguments as well as the leftover - arguments if there are any. The order is a list of objects as they - appear on the command line. If arguments appear multiple times they - will be memorized multiple times as well. - """ - state = ParsingState(args) - try: - self._process_args_for_options(state) - self._process_args_for_args(state) - except UsageError: - if self.ctx is None or not self.ctx.resilient_parsing: - raise - return state.opts, state.largs, state.order - - def _process_args_for_args(self, state): - pargs, args = _unpack_args(state.largs + state.rargs, - [x.nargs for x in self._args]) - - for idx, arg in enumerate(self._args): - arg.process(pargs[idx], state) - - state.largs = args - state.rargs = [] - - def _process_args_for_options(self, state): - while state.rargs: - arg = state.rargs.pop(0) - arglen = len(arg) - # Double dashes always handled explicitly regardless of what - # prefixes are valid. - if arg == '--': - return - elif arg[:1] in self._opt_prefixes and arglen > 1: - self._process_opts(arg, state) - elif self.allow_interspersed_args: - state.largs.append(arg) - else: - state.rargs.insert(0, arg) - return - - # Say this is the original argument list: - # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)] - # ^ - # (we are about to process arg(i)). - # - # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of - # [arg0, ..., arg(i-1)] (any options and their arguments will have - # been removed from largs). - # - # The while loop will usually consume 1 or more arguments per pass. - # If it consumes 1 (eg. arg is an option that takes no arguments), - # then after _process_arg() is done the situation is: - # - # largs = subset of [arg0, ..., arg(i)] - # rargs = [arg(i+1), ..., arg(N-1)] - # - # If allow_interspersed_args is false, largs will always be - # *empty* -- still a subset of [arg0, ..., arg(i-1)], but - # not a very interesting subset! - - def _match_long_opt(self, opt, explicit_value, state): - if opt not in self._long_opt: - possibilities = [word for word in self._long_opt - if word.startswith(opt)] - raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx) - - option = self._long_opt[opt] - if option.takes_value: - # At this point it's safe to modify rargs by injecting the - # explicit value, because no exception is raised in this - # branch. This means that the inserted value will be fully - # consumed. - if explicit_value is not None: - state.rargs.insert(0, explicit_value) - - nargs = option.nargs - if len(state.rargs) < nargs: - _error_opt_args(nargs, opt) - elif nargs == 1: - value = state.rargs.pop(0) - else: - value = tuple(state.rargs[:nargs]) - del state.rargs[:nargs] - - elif explicit_value is not None: - raise BadOptionUsage(opt, '%s option does not take a value' % opt) - - else: - value = None - - option.process(value, state) - - def _match_short_opt(self, arg, state): - stop = False - i = 1 - prefix = arg[0] - unknown_options = [] - - for ch in arg[1:]: - opt = normalize_opt(prefix + ch, self.ctx) - option = self._short_opt.get(opt) - i += 1 - - if not option: - if self.ignore_unknown_options: - unknown_options.append(ch) - continue - raise NoSuchOption(opt, ctx=self.ctx) - if option.takes_value: - # Any characters left in arg? Pretend they're the - # next arg, and stop consuming characters of arg. - if i < len(arg): - state.rargs.insert(0, arg[i:]) - stop = True - - nargs = option.nargs - if len(state.rargs) < nargs: - _error_opt_args(nargs, opt) - elif nargs == 1: - value = state.rargs.pop(0) - else: - value = tuple(state.rargs[:nargs]) - del state.rargs[:nargs] - - else: - value = None - - option.process(value, state) - - if stop: - break - - # If we got any unknown options we re-combinate the string of the - # remaining options and re-attach the prefix, then report that - # to the state as new larg. This way there is basic combinatorics - # that can be achieved while still ignoring unknown arguments. - if self.ignore_unknown_options and unknown_options: - state.largs.append(prefix + ''.join(unknown_options)) - - def _process_opts(self, arg, state): - explicit_value = None - # Long option handling happens in two parts. The first part is - # supporting explicitly attached values. In any case, we will try - # to long match the option first. - if '=' in arg: - long_opt, explicit_value = arg.split('=', 1) - else: - long_opt = arg - norm_long_opt = normalize_opt(long_opt, self.ctx) - - # At this point we will match the (assumed) long option through - # the long option matching code. Note that this allows options - # like "-foo" to be matched as long options. - try: - self._match_long_opt(norm_long_opt, explicit_value, state) - except NoSuchOption: - # At this point the long option matching failed, and we need - # to try with short options. However there is a special rule - # which says, that if we have a two character options prefix - # (applies to "--foo" for instance), we do not dispatch to the - # short option code and will instead raise the no option - # error. - if arg[:2] not in self._opt_prefixes: - return self._match_short_opt(arg, state) - if not self.ignore_unknown_options: - raise - state.largs.append(arg) diff --git a/venv/lib/python3.8/site-packages/click/termui.py b/venv/lib/python3.8/site-packages/click/termui.py deleted file mode 100644 index bf9a3aa1..00000000 --- a/venv/lib/python3.8/site-packages/click/termui.py +++ /dev/null @@ -1,606 +0,0 @@ -import os -import sys -import struct -import inspect -import itertools - -from ._compat import raw_input, text_type, string_types, \ - isatty, strip_ansi, get_winterm_size, DEFAULT_COLUMNS, WIN -from .utils import echo -from .exceptions import Abort, UsageError -from .types import convert_type, Choice, Path -from .globals import resolve_color_default - - -# The prompt functions to use. The doc tools currently override these -# functions to customize how they work. -visible_prompt_func = raw_input - -_ansi_colors = { - 'black': 30, - 'red': 31, - 'green': 32, - 'yellow': 33, - 'blue': 34, - 'magenta': 35, - 'cyan': 36, - 'white': 37, - 'reset': 39, - 'bright_black': 90, - 'bright_red': 91, - 'bright_green': 92, - 'bright_yellow': 93, - 'bright_blue': 94, - 'bright_magenta': 95, - 'bright_cyan': 96, - 'bright_white': 97, -} -_ansi_reset_all = '\033[0m' - - -def hidden_prompt_func(prompt): - import getpass - return getpass.getpass(prompt) - - -def _build_prompt(text, suffix, show_default=False, default=None, show_choices=True, type=None): - prompt = text - if type is not None and show_choices and isinstance(type, Choice): - prompt += ' (' + ", ".join(map(str, type.choices)) + ')' - if default is not None and show_default: - prompt = '%s [%s]' % (prompt, default) - return prompt + suffix - - -def prompt(text, default=None, hide_input=False, confirmation_prompt=False, - type=None, value_proc=None, prompt_suffix=': ', show_default=True, - err=False, show_choices=True): - """Prompts a user for input. This is a convenience function that can - be used to prompt a user for input later. - - If the user aborts the input by sending a interrupt signal, this - function will catch it and raise a :exc:`Abort` exception. - - .. versionadded:: 7.0 - Added the show_choices parameter. - - .. versionadded:: 6.0 - Added unicode support for cmd.exe on Windows. - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param text: the text to show for the prompt. - :param default: the default value to use if no input happens. If this - is not given it will prompt until it's aborted. - :param hide_input: if this is set to true then the input value will - be hidden. - :param confirmation_prompt: asks for confirmation for the value. - :param type: the type to use to check the value against. - :param value_proc: if this parameter is provided it's a function that - is invoked instead of the type conversion to - convert a value. - :param prompt_suffix: a suffix that should be added to the prompt. - :param show_default: shows or hides the default value in the prompt. - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``, the same as with echo. - :param show_choices: Show or hide choices if the passed type is a Choice. - For example if type is a Choice of either day or week, - show_choices is true and text is "Group by" then the - prompt will be "Group by (day, week): ". - """ - result = None - - def prompt_func(text): - f = hide_input and hidden_prompt_func or visible_prompt_func - try: - # Write the prompt separately so that we get nice - # coloring through colorama on Windows - echo(text, nl=False, err=err) - return f('') - except (KeyboardInterrupt, EOFError): - # getpass doesn't print a newline if the user aborts input with ^C. - # Allegedly this behavior is inherited from getpass(3). - # A doc bug has been filed at https://bugs.python.org/issue24711 - if hide_input: - echo(None, err=err) - raise Abort() - - if value_proc is None: - value_proc = convert_type(type, default) - - prompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) - - while 1: - while 1: - value = prompt_func(prompt) - if value: - break - elif default is not None: - if isinstance(value_proc, Path): - # validate Path default value(exists, dir_okay etc.) - value = default - break - return default - try: - result = value_proc(value) - except UsageError as e: - echo('Error: %s' % e.message, err=err) - continue - if not confirmation_prompt: - return result - while 1: - value2 = prompt_func('Repeat for confirmation: ') - if value2: - break - if value == value2: - return result - echo('Error: the two entered values do not match', err=err) - - -def confirm(text, default=False, abort=False, prompt_suffix=': ', - show_default=True, err=False): - """Prompts for confirmation (yes/no question). - - If the user aborts the input by sending a interrupt signal this - function will catch it and raise a :exc:`Abort` exception. - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param text: the question to ask. - :param default: the default for the prompt. - :param abort: if this is set to `True` a negative answer aborts the - exception by raising :exc:`Abort`. - :param prompt_suffix: a suffix that should be added to the prompt. - :param show_default: shows or hides the default value in the prompt. - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``, the same as with echo. - """ - prompt = _build_prompt(text, prompt_suffix, show_default, - default and 'Y/n' or 'y/N') - while 1: - try: - # Write the prompt separately so that we get nice - # coloring through colorama on Windows - echo(prompt, nl=False, err=err) - value = visible_prompt_func('').lower().strip() - except (KeyboardInterrupt, EOFError): - raise Abort() - if value in ('y', 'yes'): - rv = True - elif value in ('n', 'no'): - rv = False - elif value == '': - rv = default - else: - echo('Error: invalid input', err=err) - continue - break - if abort and not rv: - raise Abort() - return rv - - -def get_terminal_size(): - """Returns the current size of the terminal as tuple in the form - ``(width, height)`` in columns and rows. - """ - # If shutil has get_terminal_size() (Python 3.3 and later) use that - if sys.version_info >= (3, 3): - import shutil - shutil_get_terminal_size = getattr(shutil, 'get_terminal_size', None) - if shutil_get_terminal_size: - sz = shutil_get_terminal_size() - return sz.columns, sz.lines - - # We provide a sensible default for get_winterm_size() when being invoked - # inside a subprocess. Without this, it would not provide a useful input. - if get_winterm_size is not None: - size = get_winterm_size() - if size == (0, 0): - return (79, 24) - else: - return size - - def ioctl_gwinsz(fd): - try: - import fcntl - import termios - cr = struct.unpack( - 'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) - except Exception: - return - return cr - - cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2) - if not cr: - try: - fd = os.open(os.ctermid(), os.O_RDONLY) - try: - cr = ioctl_gwinsz(fd) - finally: - os.close(fd) - except Exception: - pass - if not cr or not cr[0] or not cr[1]: - cr = (os.environ.get('LINES', 25), - os.environ.get('COLUMNS', DEFAULT_COLUMNS)) - return int(cr[1]), int(cr[0]) - - -def echo_via_pager(text_or_generator, color=None): - """This function takes a text and shows it via an environment specific - pager on stdout. - - .. versionchanged:: 3.0 - Added the `color` flag. - - :param text_or_generator: the text to page, or alternatively, a - generator emitting the text to page. - :param color: controls if the pager supports ANSI colors or not. The - default is autodetection. - """ - color = resolve_color_default(color) - - if inspect.isgeneratorfunction(text_or_generator): - i = text_or_generator() - elif isinstance(text_or_generator, string_types): - i = [text_or_generator] - else: - i = iter(text_or_generator) - - # convert every element of i to a text type if necessary - text_generator = (el if isinstance(el, string_types) else text_type(el) - for el in i) - - from ._termui_impl import pager - return pager(itertools.chain(text_generator, "\n"), color) - - -def progressbar(iterable=None, length=None, label=None, show_eta=True, - show_percent=None, show_pos=False, - item_show_func=None, fill_char='#', empty_char='-', - bar_template='%(label)s [%(bar)s] %(info)s', - info_sep=' ', width=36, file=None, color=None): - """This function creates an iterable context manager that can be used - to iterate over something while showing a progress bar. It will - either iterate over the `iterable` or `length` items (that are counted - up). While iteration happens, this function will print a rendered - progress bar to the given `file` (defaults to stdout) and will attempt - to calculate remaining time and more. By default, this progress bar - will not be rendered if the file is not a terminal. - - The context manager creates the progress bar. When the context - manager is entered the progress bar is already displayed. With every - iteration over the progress bar, the iterable passed to the bar is - advanced and the bar is updated. When the context manager exits, - a newline is printed and the progress bar is finalized on screen. - - No printing must happen or the progress bar will be unintentionally - destroyed. - - Example usage:: - - with progressbar(items) as bar: - for item in bar: - do_something_with(item) - - Alternatively, if no iterable is specified, one can manually update the - progress bar through the `update()` method instead of directly - iterating over the progress bar. The update method accepts the number - of steps to increment the bar with:: - - with progressbar(length=chunks.total_bytes) as bar: - for chunk in chunks: - process_chunk(chunk) - bar.update(chunks.bytes) - - .. versionadded:: 2.0 - - .. versionadded:: 4.0 - Added the `color` parameter. Added a `update` method to the - progressbar object. - - :param iterable: an iterable to iterate over. If not provided the length - is required. - :param length: the number of items to iterate over. By default the - progressbar will attempt to ask the iterator about its - length, which might or might not work. If an iterable is - also provided this parameter can be used to override the - length. If an iterable is not provided the progress bar - will iterate over a range of that length. - :param label: the label to show next to the progress bar. - :param show_eta: enables or disables the estimated time display. This is - automatically disabled if the length cannot be - determined. - :param show_percent: enables or disables the percentage display. The - default is `True` if the iterable has a length or - `False` if not. - :param show_pos: enables or disables the absolute position display. The - default is `False`. - :param item_show_func: a function called with the current item which - can return a string to show the current item - next to the progress bar. Note that the current - item can be `None`! - :param fill_char: the character to use to show the filled part of the - progress bar. - :param empty_char: the character to use to show the non-filled part of - the progress bar. - :param bar_template: the format string to use as template for the bar. - The parameters in it are ``label`` for the label, - ``bar`` for the progress bar and ``info`` for the - info section. - :param info_sep: the separator between multiple info items (eta etc.) - :param width: the width of the progress bar in characters, 0 means full - terminal width - :param file: the file to write to. If this is not a terminal then - only the label is printed. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. This is only needed if ANSI - codes are included anywhere in the progress bar output - which is not the case by default. - """ - from ._termui_impl import ProgressBar - color = resolve_color_default(color) - return ProgressBar(iterable=iterable, length=length, show_eta=show_eta, - show_percent=show_percent, show_pos=show_pos, - item_show_func=item_show_func, fill_char=fill_char, - empty_char=empty_char, bar_template=bar_template, - info_sep=info_sep, file=file, label=label, - width=width, color=color) - - -def clear(): - """Clears the terminal screen. This will have the effect of clearing - the whole visible space of the terminal and moving the cursor to the - top left. This does not do anything if not connected to a terminal. - - .. versionadded:: 2.0 - """ - if not isatty(sys.stdout): - return - # If we're on Windows and we don't have colorama available, then we - # clear the screen by shelling out. Otherwise we can use an escape - # sequence. - if WIN: - os.system('cls') - else: - sys.stdout.write('\033[2J\033[1;1H') - - -def style(text, fg=None, bg=None, bold=None, dim=None, underline=None, - blink=None, reverse=None, reset=True): - """Styles a text with ANSI styles and returns the new string. By - default the styling is self contained which means that at the end - of the string a reset code is issued. This can be prevented by - passing ``reset=False``. - - Examples:: - - click.echo(click.style('Hello World!', fg='green')) - click.echo(click.style('ATTENTION!', blink=True)) - click.echo(click.style('Some things', reverse=True, fg='cyan')) - - Supported color names: - - * ``black`` (might be a gray) - * ``red`` - * ``green`` - * ``yellow`` (might be an orange) - * ``blue`` - * ``magenta`` - * ``cyan`` - * ``white`` (might be light gray) - * ``bright_black`` - * ``bright_red`` - * ``bright_green`` - * ``bright_yellow`` - * ``bright_blue`` - * ``bright_magenta`` - * ``bright_cyan`` - * ``bright_white`` - * ``reset`` (reset the color code only) - - .. versionadded:: 2.0 - - .. versionadded:: 7.0 - Added support for bright colors. - - :param text: the string to style with ansi codes. - :param fg: if provided this will become the foreground color. - :param bg: if provided this will become the background color. - :param bold: if provided this will enable or disable bold mode. - :param dim: if provided this will enable or disable dim mode. This is - badly supported. - :param underline: if provided this will enable or disable underline. - :param blink: if provided this will enable or disable blinking. - :param reverse: if provided this will enable or disable inverse - rendering (foreground becomes background and the - other way round). - :param reset: by default a reset-all code is added at the end of the - string which means that styles do not carry over. This - can be disabled to compose styles. - """ - bits = [] - if fg: - try: - bits.append('\033[%dm' % (_ansi_colors[fg])) - except KeyError: - raise TypeError('Unknown color %r' % fg) - if bg: - try: - bits.append('\033[%dm' % (_ansi_colors[bg] + 10)) - except KeyError: - raise TypeError('Unknown color %r' % bg) - if bold is not None: - bits.append('\033[%dm' % (1 if bold else 22)) - if dim is not None: - bits.append('\033[%dm' % (2 if dim else 22)) - if underline is not None: - bits.append('\033[%dm' % (4 if underline else 24)) - if blink is not None: - bits.append('\033[%dm' % (5 if blink else 25)) - if reverse is not None: - bits.append('\033[%dm' % (7 if reverse else 27)) - bits.append(text) - if reset: - bits.append(_ansi_reset_all) - return ''.join(bits) - - -def unstyle(text): - """Removes ANSI styling information from a string. Usually it's not - necessary to use this function as Click's echo function will - automatically remove styling if necessary. - - .. versionadded:: 2.0 - - :param text: the text to remove style information from. - """ - return strip_ansi(text) - - -def secho(message=None, file=None, nl=True, err=False, color=None, **styles): - """This function combines :func:`echo` and :func:`style` into one - call. As such the following two calls are the same:: - - click.secho('Hello World!', fg='green') - click.echo(click.style('Hello World!', fg='green')) - - All keyword arguments are forwarded to the underlying functions - depending on which one they go with. - - .. versionadded:: 2.0 - """ - if message is not None: - message = style(message, **styles) - return echo(message, file=file, nl=nl, err=err, color=color) - - -def edit(text=None, editor=None, env=None, require_save=True, - extension='.txt', filename=None): - r"""Edits the given text in the defined editor. If an editor is given - (should be the full path to the executable but the regular operating - system search path is used for finding the executable) it overrides - the detected editor. Optionally, some environment variables can be - used. If the editor is closed without changes, `None` is returned. In - case a file is edited directly the return value is always `None` and - `require_save` and `extension` are ignored. - - If the editor cannot be opened a :exc:`UsageError` is raised. - - Note for Windows: to simplify cross-platform usage, the newlines are - automatically converted from POSIX to Windows and vice versa. As such, - the message here will have ``\n`` as newline markers. - - :param text: the text to edit. - :param editor: optionally the editor to use. Defaults to automatic - detection. - :param env: environment variables to forward to the editor. - :param require_save: if this is true, then not saving in the editor - will make the return value become `None`. - :param extension: the extension to tell the editor about. This defaults - to `.txt` but changing this might change syntax - highlighting. - :param filename: if provided it will edit this file instead of the - provided text contents. It will not use a temporary - file as an indirection in that case. - """ - from ._termui_impl import Editor - editor = Editor(editor=editor, env=env, require_save=require_save, - extension=extension) - if filename is None: - return editor.edit(text) - editor.edit_file(filename) - - -def launch(url, wait=False, locate=False): - """This function launches the given URL (or filename) in the default - viewer application for this file type. If this is an executable, it - might launch the executable in a new session. The return value is - the exit code of the launched application. Usually, ``0`` indicates - success. - - Examples:: - - click.launch('https://click.palletsprojects.com/') - click.launch('/my/downloaded/file', locate=True) - - .. versionadded:: 2.0 - - :param url: URL or filename of the thing to launch. - :param wait: waits for the program to stop. - :param locate: if this is set to `True` then instead of launching the - application associated with the URL it will attempt to - launch a file manager with the file located. This - might have weird effects if the URL does not point to - the filesystem. - """ - from ._termui_impl import open_url - return open_url(url, wait=wait, locate=locate) - - -# If this is provided, getchar() calls into this instead. This is used -# for unittesting purposes. -_getchar = None - - -def getchar(echo=False): - """Fetches a single character from the terminal and returns it. This - will always return a unicode character and under certain rare - circumstances this might return more than one character. The - situations which more than one character is returned is when for - whatever reason multiple characters end up in the terminal buffer or - standard input was not actually a terminal. - - Note that this will always read from the terminal, even if something - is piped into the standard input. - - Note for Windows: in rare cases when typing non-ASCII characters, this - function might wait for a second character and then return both at once. - This is because certain Unicode characters look like special-key markers. - - .. versionadded:: 2.0 - - :param echo: if set to `True`, the character read will also show up on - the terminal. The default is to not show it. - """ - f = _getchar - if f is None: - from ._termui_impl import getchar as f - return f(echo) - - -def raw_terminal(): - from ._termui_impl import raw_terminal as f - return f() - - -def pause(info='Press any key to continue ...', err=False): - """This command stops execution and waits for the user to press any - key to continue. This is similar to the Windows batch "pause" - command. If the program is not run through a terminal, this command - will instead do nothing. - - .. versionadded:: 2.0 - - .. versionadded:: 4.0 - Added the `err` parameter. - - :param info: the info string to print before pausing. - :param err: if set to message goes to ``stderr`` instead of - ``stdout``, the same as with echo. - """ - if not isatty(sys.stdin) or not isatty(sys.stdout): - return - try: - if info: - echo(info, nl=False, err=err) - try: - getchar() - except (KeyboardInterrupt, EOFError): - pass - finally: - if info: - echo(err=err) diff --git a/venv/lib/python3.8/site-packages/click/testing.py b/venv/lib/python3.8/site-packages/click/testing.py deleted file mode 100644 index 1b2924e0..00000000 --- a/venv/lib/python3.8/site-packages/click/testing.py +++ /dev/null @@ -1,374 +0,0 @@ -import os -import sys -import shutil -import tempfile -import contextlib -import shlex - -from ._compat import iteritems, PY2, string_types - - -# If someone wants to vendor click, we want to ensure the -# correct package is discovered. Ideally we could use a -# relative import here but unfortunately Python does not -# support that. -clickpkg = sys.modules[__name__.rsplit('.', 1)[0]] - - -if PY2: - from cStringIO import StringIO -else: - import io - from ._compat import _find_binary_reader - - -class EchoingStdin(object): - - def __init__(self, input, output): - self._input = input - self._output = output - - def __getattr__(self, x): - return getattr(self._input, x) - - def _echo(self, rv): - self._output.write(rv) - return rv - - def read(self, n=-1): - return self._echo(self._input.read(n)) - - def readline(self, n=-1): - return self._echo(self._input.readline(n)) - - def readlines(self): - return [self._echo(x) for x in self._input.readlines()] - - def __iter__(self): - return iter(self._echo(x) for x in self._input) - - def __repr__(self): - return repr(self._input) - - -def make_input_stream(input, charset): - # Is already an input stream. - if hasattr(input, 'read'): - if PY2: - return input - rv = _find_binary_reader(input) - if rv is not None: - return rv - raise TypeError('Could not find binary reader for input stream.') - - if input is None: - input = b'' - elif not isinstance(input, bytes): - input = input.encode(charset) - if PY2: - return StringIO(input) - return io.BytesIO(input) - - -class Result(object): - """Holds the captured result of an invoked CLI script.""" - - def __init__(self, runner, stdout_bytes, stderr_bytes, exit_code, - exception, exc_info=None): - #: The runner that created the result - self.runner = runner - #: The standard output as bytes. - self.stdout_bytes = stdout_bytes - #: The standard error as bytes, or False(y) if not available - self.stderr_bytes = stderr_bytes - #: The exit code as integer. - self.exit_code = exit_code - #: The exception that happened if one did. - self.exception = exception - #: The traceback - self.exc_info = exc_info - - @property - def output(self): - """The (standard) output as unicode string.""" - return self.stdout - - @property - def stdout(self): - """The standard output as unicode string.""" - return self.stdout_bytes.decode(self.runner.charset, 'replace') \ - .replace('\r\n', '\n') - - @property - def stderr(self): - """The standard error as unicode string.""" - if not self.stderr_bytes: - raise ValueError("stderr not separately captured") - return self.stderr_bytes.decode(self.runner.charset, 'replace') \ - .replace('\r\n', '\n') - - - def __repr__(self): - return '<%s %s>' % ( - type(self).__name__, - self.exception and repr(self.exception) or 'okay', - ) - - -class CliRunner(object): - """The CLI runner provides functionality to invoke a Click command line - script for unittesting purposes in a isolated environment. This only - works in single-threaded systems without any concurrency as it changes the - global interpreter state. - - :param charset: the character set for the input and output data. This is - UTF-8 by default and should not be changed currently as - the reporting to Click only works in Python 2 properly. - :param env: a dictionary with environment variables for overriding. - :param echo_stdin: if this is set to `True`, then reading from stdin writes - to stdout. This is useful for showing examples in - some circumstances. Note that regular prompts - will automatically echo the input. - :param mix_stderr: if this is set to `False`, then stdout and stderr are - preserved as independent streams. This is useful for - Unix-philosophy apps that have predictable stdout and - noisy stderr, such that each may be measured - independently - """ - - def __init__(self, charset=None, env=None, echo_stdin=False, - mix_stderr=True): - if charset is None: - charset = 'utf-8' - self.charset = charset - self.env = env or {} - self.echo_stdin = echo_stdin - self.mix_stderr = mix_stderr - - def get_default_prog_name(self, cli): - """Given a command object it will return the default program name - for it. The default is the `name` attribute or ``"root"`` if not - set. - """ - return cli.name or 'root' - - def make_env(self, overrides=None): - """Returns the environment overrides for invoking a script.""" - rv = dict(self.env) - if overrides: - rv.update(overrides) - return rv - - @contextlib.contextmanager - def isolation(self, input=None, env=None, color=False): - """A context manager that sets up the isolation for invoking of a - command line tool. This sets up stdin with the given input data - and `os.environ` with the overrides from the given dictionary. - This also rebinds some internals in Click to be mocked (like the - prompt functionality). - - This is automatically done in the :meth:`invoke` method. - - .. versionadded:: 4.0 - The ``color`` parameter was added. - - :param input: the input stream to put into sys.stdin. - :param env: the environment overrides as dictionary. - :param color: whether the output should contain color codes. The - application can still override this explicitly. - """ - input = make_input_stream(input, self.charset) - - old_stdin = sys.stdin - old_stdout = sys.stdout - old_stderr = sys.stderr - old_forced_width = clickpkg.formatting.FORCED_WIDTH - clickpkg.formatting.FORCED_WIDTH = 80 - - env = self.make_env(env) - - if PY2: - bytes_output = StringIO() - if self.echo_stdin: - input = EchoingStdin(input, bytes_output) - sys.stdout = bytes_output - if not self.mix_stderr: - bytes_error = StringIO() - sys.stderr = bytes_error - else: - bytes_output = io.BytesIO() - if self.echo_stdin: - input = EchoingStdin(input, bytes_output) - input = io.TextIOWrapper(input, encoding=self.charset) - sys.stdout = io.TextIOWrapper( - bytes_output, encoding=self.charset) - if not self.mix_stderr: - bytes_error = io.BytesIO() - sys.stderr = io.TextIOWrapper( - bytes_error, encoding=self.charset) - - if self.mix_stderr: - sys.stderr = sys.stdout - - sys.stdin = input - - def visible_input(prompt=None): - sys.stdout.write(prompt or '') - val = input.readline().rstrip('\r\n') - sys.stdout.write(val + '\n') - sys.stdout.flush() - return val - - def hidden_input(prompt=None): - sys.stdout.write((prompt or '') + '\n') - sys.stdout.flush() - return input.readline().rstrip('\r\n') - - def _getchar(echo): - char = sys.stdin.read(1) - if echo: - sys.stdout.write(char) - sys.stdout.flush() - return char - - default_color = color - - def should_strip_ansi(stream=None, color=None): - if color is None: - return not default_color - return not color - - old_visible_prompt_func = clickpkg.termui.visible_prompt_func - old_hidden_prompt_func = clickpkg.termui.hidden_prompt_func - old__getchar_func = clickpkg.termui._getchar - old_should_strip_ansi = clickpkg.utils.should_strip_ansi - clickpkg.termui.visible_prompt_func = visible_input - clickpkg.termui.hidden_prompt_func = hidden_input - clickpkg.termui._getchar = _getchar - clickpkg.utils.should_strip_ansi = should_strip_ansi - - old_env = {} - try: - for key, value in iteritems(env): - old_env[key] = os.environ.get(key) - if value is None: - try: - del os.environ[key] - except Exception: - pass - else: - os.environ[key] = value - yield (bytes_output, not self.mix_stderr and bytes_error) - finally: - for key, value in iteritems(old_env): - if value is None: - try: - del os.environ[key] - except Exception: - pass - else: - os.environ[key] = value - sys.stdout = old_stdout - sys.stderr = old_stderr - sys.stdin = old_stdin - clickpkg.termui.visible_prompt_func = old_visible_prompt_func - clickpkg.termui.hidden_prompt_func = old_hidden_prompt_func - clickpkg.termui._getchar = old__getchar_func - clickpkg.utils.should_strip_ansi = old_should_strip_ansi - clickpkg.formatting.FORCED_WIDTH = old_forced_width - - def invoke(self, cli, args=None, input=None, env=None, - catch_exceptions=True, color=False, mix_stderr=False, **extra): - """Invokes a command in an isolated environment. The arguments are - forwarded directly to the command line script, the `extra` keyword - arguments are passed to the :meth:`~clickpkg.Command.main` function of - the command. - - This returns a :class:`Result` object. - - .. versionadded:: 3.0 - The ``catch_exceptions`` parameter was added. - - .. versionchanged:: 3.0 - The result object now has an `exc_info` attribute with the - traceback if available. - - .. versionadded:: 4.0 - The ``color`` parameter was added. - - :param cli: the command to invoke - :param args: the arguments to invoke. It may be given as an iterable - or a string. When given as string it will be interpreted - as a Unix shell command. More details at - :func:`shlex.split`. - :param input: the input data for `sys.stdin`. - :param env: the environment overrides. - :param catch_exceptions: Whether to catch any other exceptions than - ``SystemExit``. - :param extra: the keyword arguments to pass to :meth:`main`. - :param color: whether the output should contain color codes. The - application can still override this explicitly. - """ - exc_info = None - with self.isolation(input=input, env=env, color=color) as outstreams: - exception = None - exit_code = 0 - - if isinstance(args, string_types): - args = shlex.split(args) - - try: - prog_name = extra.pop("prog_name") - except KeyError: - prog_name = self.get_default_prog_name(cli) - - try: - cli.main(args=args or (), prog_name=prog_name, **extra) - except SystemExit as e: - exc_info = sys.exc_info() - exit_code = e.code - if exit_code is None: - exit_code = 0 - - if exit_code != 0: - exception = e - - if not isinstance(exit_code, int): - sys.stdout.write(str(exit_code)) - sys.stdout.write('\n') - exit_code = 1 - - except Exception as e: - if not catch_exceptions: - raise - exception = e - exit_code = 1 - exc_info = sys.exc_info() - finally: - sys.stdout.flush() - stdout = outstreams[0].getvalue() - stderr = outstreams[1] and outstreams[1].getvalue() - - return Result(runner=self, - stdout_bytes=stdout, - stderr_bytes=stderr, - exit_code=exit_code, - exception=exception, - exc_info=exc_info) - - @contextlib.contextmanager - def isolated_filesystem(self): - """A context manager that creates a temporary folder and changes - the current working directory to it for isolated filesystem tests. - """ - cwd = os.getcwd() - t = tempfile.mkdtemp() - os.chdir(t) - try: - yield t - finally: - os.chdir(cwd) - try: - shutil.rmtree(t) - except (OSError, IOError): - pass diff --git a/venv/lib/python3.8/site-packages/click/types.py b/venv/lib/python3.8/site-packages/click/types.py deleted file mode 100644 index 1f88032f..00000000 --- a/venv/lib/python3.8/site-packages/click/types.py +++ /dev/null @@ -1,668 +0,0 @@ -import os -import stat -from datetime import datetime - -from ._compat import open_stream, text_type, filename_to_ui, \ - get_filesystem_encoding, get_streerror, _get_argv_encoding, PY2 -from .exceptions import BadParameter -from .utils import safecall, LazyFile - - -class ParamType(object): - """Helper for converting values through types. The following is - necessary for a valid type: - - * it needs a name - * it needs to pass through None unchanged - * it needs to convert from a string - * it needs to convert its result type through unchanged - (eg: needs to be idempotent) - * it needs to be able to deal with param and context being `None`. - This can be the case when the object is used with prompt - inputs. - """ - is_composite = False - - #: the descriptive name of this type - name = None - - #: if a list of this type is expected and the value is pulled from a - #: string environment variable, this is what splits it up. `None` - #: means any whitespace. For all parameters the general rule is that - #: whitespace splits them up. The exception are paths and files which - #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on - #: Windows). - envvar_list_splitter = None - - def __call__(self, value, param=None, ctx=None): - if value is not None: - return self.convert(value, param, ctx) - - def get_metavar(self, param): - """Returns the metavar default for this param if it provides one.""" - - def get_missing_message(self, param): - """Optionally might return extra information about a missing - parameter. - - .. versionadded:: 2.0 - """ - - def convert(self, value, param, ctx): - """Converts the value. This is not invoked for values that are - `None` (the missing value). - """ - return value - - def split_envvar_value(self, rv): - """Given a value from an environment variable this splits it up - into small chunks depending on the defined envvar list splitter. - - If the splitter is set to `None`, which means that whitespace splits, - then leading and trailing whitespace is ignored. Otherwise, leading - and trailing splitters usually lead to empty items being included. - """ - return (rv or '').split(self.envvar_list_splitter) - - def fail(self, message, param=None, ctx=None): - """Helper method to fail with an invalid value message.""" - raise BadParameter(message, ctx=ctx, param=param) - - -class CompositeParamType(ParamType): - is_composite = True - - @property - def arity(self): - raise NotImplementedError() - - -class FuncParamType(ParamType): - - def __init__(self, func): - self.name = func.__name__ - self.func = func - - def convert(self, value, param, ctx): - try: - return self.func(value) - except ValueError: - try: - value = text_type(value) - except UnicodeError: - value = str(value).decode('utf-8', 'replace') - self.fail(value, param, ctx) - - -class UnprocessedParamType(ParamType): - name = 'text' - - def convert(self, value, param, ctx): - return value - - def __repr__(self): - return 'UNPROCESSED' - - -class StringParamType(ParamType): - name = 'text' - - def convert(self, value, param, ctx): - if isinstance(value, bytes): - enc = _get_argv_encoding() - try: - value = value.decode(enc) - except UnicodeError: - fs_enc = get_filesystem_encoding() - if fs_enc != enc: - try: - value = value.decode(fs_enc) - except UnicodeError: - value = value.decode('utf-8', 'replace') - return value - return value - - def __repr__(self): - return 'STRING' - - -class Choice(ParamType): - """The choice type allows a value to be checked against a fixed set - of supported values. All of these values have to be strings. - - You should only pass a list or tuple of choices. Other iterables - (like generators) may lead to surprising results. - - See :ref:`choice-opts` for an example. - - :param case_sensitive: Set to false to make choices case - insensitive. Defaults to true. - """ - - name = 'choice' - - def __init__(self, choices, case_sensitive=True): - self.choices = choices - self.case_sensitive = case_sensitive - - def get_metavar(self, param): - return '[%s]' % '|'.join(self.choices) - - def get_missing_message(self, param): - return 'Choose from:\n\t%s.' % ',\n\t'.join(self.choices) - - def convert(self, value, param, ctx): - # Exact match - if value in self.choices: - return value - - # Match through normalization and case sensitivity - # first do token_normalize_func, then lowercase - # preserve original `value` to produce an accurate message in - # `self.fail` - normed_value = value - normed_choices = self.choices - - if ctx is not None and \ - ctx.token_normalize_func is not None: - normed_value = ctx.token_normalize_func(value) - normed_choices = [ctx.token_normalize_func(choice) for choice in - self.choices] - - if not self.case_sensitive: - normed_value = normed_value.lower() - normed_choices = [choice.lower() for choice in normed_choices] - - if normed_value in normed_choices: - return normed_value - - self.fail('invalid choice: %s. (choose from %s)' % - (value, ', '.join(self.choices)), param, ctx) - - def __repr__(self): - return 'Choice(%r)' % list(self.choices) - - -class DateTime(ParamType): - """The DateTime type converts date strings into `datetime` objects. - - The format strings which are checked are configurable, but default to some - common (non-timezone aware) ISO 8601 formats. - - When specifying *DateTime* formats, you should only pass a list or a tuple. - Other iterables, like generators, may lead to surprising results. - - The format strings are processed using ``datetime.strptime``, and this - consequently defines the format strings which are allowed. - - Parsing is tried using each format, in order, and the first format which - parses successfully is used. - - :param formats: A list or tuple of date format strings, in the order in - which they should be tried. Defaults to - ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``, - ``'%Y-%m-%d %H:%M:%S'``. - """ - name = 'datetime' - - def __init__(self, formats=None): - self.formats = formats or [ - '%Y-%m-%d', - '%Y-%m-%dT%H:%M:%S', - '%Y-%m-%d %H:%M:%S' - ] - - def get_metavar(self, param): - return '[{}]'.format('|'.join(self.formats)) - - def _try_to_convert_date(self, value, format): - try: - return datetime.strptime(value, format) - except ValueError: - return None - - def convert(self, value, param, ctx): - # Exact match - for format in self.formats: - dtime = self._try_to_convert_date(value, format) - if dtime: - return dtime - - self.fail( - 'invalid datetime format: {}. (choose from {})'.format( - value, ', '.join(self.formats))) - - def __repr__(self): - return 'DateTime' - - -class IntParamType(ParamType): - name = 'integer' - - def convert(self, value, param, ctx): - try: - return int(value) - except (ValueError, UnicodeError): - self.fail('%s is not a valid integer' % value, param, ctx) - - def __repr__(self): - return 'INT' - - -class IntRange(IntParamType): - """A parameter that works similar to :data:`click.INT` but restricts - the value to fit into a range. The default behavior is to fail if the - value falls outside the range, but it can also be silently clamped - between the two edges. - - See :ref:`ranges` for an example. - """ - name = 'integer range' - - def __init__(self, min=None, max=None, clamp=False): - self.min = min - self.max = max - self.clamp = clamp - - def convert(self, value, param, ctx): - rv = IntParamType.convert(self, value, param, ctx) - if self.clamp: - if self.min is not None and rv < self.min: - return self.min - if self.max is not None and rv > self.max: - return self.max - if self.min is not None and rv < self.min or \ - self.max is not None and rv > self.max: - if self.min is None: - self.fail('%s is bigger than the maximum valid value ' - '%s.' % (rv, self.max), param, ctx) - elif self.max is None: - self.fail('%s is smaller than the minimum valid value ' - '%s.' % (rv, self.min), param, ctx) - else: - self.fail('%s is not in the valid range of %s to %s.' - % (rv, self.min, self.max), param, ctx) - return rv - - def __repr__(self): - return 'IntRange(%r, %r)' % (self.min, self.max) - - -class FloatParamType(ParamType): - name = 'float' - - def convert(self, value, param, ctx): - try: - return float(value) - except (UnicodeError, ValueError): - self.fail('%s is not a valid floating point value' % - value, param, ctx) - - def __repr__(self): - return 'FLOAT' - - -class FloatRange(FloatParamType): - """A parameter that works similar to :data:`click.FLOAT` but restricts - the value to fit into a range. The default behavior is to fail if the - value falls outside the range, but it can also be silently clamped - between the two edges. - - See :ref:`ranges` for an example. - """ - name = 'float range' - - def __init__(self, min=None, max=None, clamp=False): - self.min = min - self.max = max - self.clamp = clamp - - def convert(self, value, param, ctx): - rv = FloatParamType.convert(self, value, param, ctx) - if self.clamp: - if self.min is not None and rv < self.min: - return self.min - if self.max is not None and rv > self.max: - return self.max - if self.min is not None and rv < self.min or \ - self.max is not None and rv > self.max: - if self.min is None: - self.fail('%s is bigger than the maximum valid value ' - '%s.' % (rv, self.max), param, ctx) - elif self.max is None: - self.fail('%s is smaller than the minimum valid value ' - '%s.' % (rv, self.min), param, ctx) - else: - self.fail('%s is not in the valid range of %s to %s.' - % (rv, self.min, self.max), param, ctx) - return rv - - def __repr__(self): - return 'FloatRange(%r, %r)' % (self.min, self.max) - - -class BoolParamType(ParamType): - name = 'boolean' - - def convert(self, value, param, ctx): - if isinstance(value, bool): - return bool(value) - value = value.lower() - if value in ('true', 't', '1', 'yes', 'y'): - return True - elif value in ('false', 'f', '0', 'no', 'n'): - return False - self.fail('%s is not a valid boolean' % value, param, ctx) - - def __repr__(self): - return 'BOOL' - - -class UUIDParameterType(ParamType): - name = 'uuid' - - def convert(self, value, param, ctx): - import uuid - try: - if PY2 and isinstance(value, text_type): - value = value.encode('ascii') - return uuid.UUID(value) - except (UnicodeError, ValueError): - self.fail('%s is not a valid UUID value' % value, param, ctx) - - def __repr__(self): - return 'UUID' - - -class File(ParamType): - """Declares a parameter to be a file for reading or writing. The file - is automatically closed once the context tears down (after the command - finished working). - - Files can be opened for reading or writing. The special value ``-`` - indicates stdin or stdout depending on the mode. - - By default, the file is opened for reading text data, but it can also be - opened in binary mode or for writing. The encoding parameter can be used - to force a specific encoding. - - The `lazy` flag controls if the file should be opened immediately or upon - first IO. The default is to be non-lazy for standard input and output - streams as well as files opened for reading, `lazy` otherwise. When opening a - file lazily for reading, it is still opened temporarily for validation, but - will not be held open until first IO. lazy is mainly useful when opening - for writing to avoid creating the file until it is needed. - - Starting with Click 2.0, files can also be opened atomically in which - case all writes go into a separate file in the same folder and upon - completion the file will be moved over to the original location. This - is useful if a file regularly read by other users is modified. - - See :ref:`file-args` for more information. - """ - name = 'filename' - envvar_list_splitter = os.path.pathsep - - def __init__(self, mode='r', encoding=None, errors='strict', lazy=None, - atomic=False): - self.mode = mode - self.encoding = encoding - self.errors = errors - self.lazy = lazy - self.atomic = atomic - - def resolve_lazy_flag(self, value): - if self.lazy is not None: - return self.lazy - if value == '-': - return False - elif 'w' in self.mode: - return True - return False - - def convert(self, value, param, ctx): - try: - if hasattr(value, 'read') or hasattr(value, 'write'): - return value - - lazy = self.resolve_lazy_flag(value) - - if lazy: - f = LazyFile(value, self.mode, self.encoding, self.errors, - atomic=self.atomic) - if ctx is not None: - ctx.call_on_close(f.close_intelligently) - return f - - f, should_close = open_stream(value, self.mode, - self.encoding, self.errors, - atomic=self.atomic) - # If a context is provided, we automatically close the file - # at the end of the context execution (or flush out). If a - # context does not exist, it's the caller's responsibility to - # properly close the file. This for instance happens when the - # type is used with prompts. - if ctx is not None: - if should_close: - ctx.call_on_close(safecall(f.close)) - else: - ctx.call_on_close(safecall(f.flush)) - return f - except (IOError, OSError) as e: - self.fail('Could not open file: %s: %s' % ( - filename_to_ui(value), - get_streerror(e), - ), param, ctx) - - -class Path(ParamType): - """The path type is similar to the :class:`File` type but it performs - different checks. First of all, instead of returning an open file - handle it returns just the filename. Secondly, it can perform various - basic checks about what the file or directory should be. - - .. versionchanged:: 6.0 - `allow_dash` was added. - - :param exists: if set to true, the file or directory needs to exist for - this value to be valid. If this is not required and a - file does indeed not exist, then all further checks are - silently skipped. - :param file_okay: controls if a file is a possible value. - :param dir_okay: controls if a directory is a possible value. - :param writable: if true, a writable check is performed. - :param readable: if true, a readable check is performed. - :param resolve_path: if this is true, then the path is fully resolved - before the value is passed onwards. This means - that it's absolute and symlinks are resolved. It - will not expand a tilde-prefix, as this is - supposed to be done by the shell only. - :param allow_dash: If this is set to `True`, a single dash to indicate - standard streams is permitted. - :param path_type: optionally a string type that should be used to - represent the path. The default is `None` which - means the return value will be either bytes or - unicode depending on what makes most sense given the - input data Click deals with. - """ - envvar_list_splitter = os.path.pathsep - - def __init__(self, exists=False, file_okay=True, dir_okay=True, - writable=False, readable=True, resolve_path=False, - allow_dash=False, path_type=None): - self.exists = exists - self.file_okay = file_okay - self.dir_okay = dir_okay - self.writable = writable - self.readable = readable - self.resolve_path = resolve_path - self.allow_dash = allow_dash - self.type = path_type - - if self.file_okay and not self.dir_okay: - self.name = 'file' - self.path_type = 'File' - elif self.dir_okay and not self.file_okay: - self.name = 'directory' - self.path_type = 'Directory' - else: - self.name = 'path' - self.path_type = 'Path' - - def coerce_path_result(self, rv): - if self.type is not None and not isinstance(rv, self.type): - if self.type is text_type: - rv = rv.decode(get_filesystem_encoding()) - else: - rv = rv.encode(get_filesystem_encoding()) - return rv - - def convert(self, value, param, ctx): - rv = value - - is_dash = self.file_okay and self.allow_dash and rv in (b'-', '-') - - if not is_dash: - if self.resolve_path: - rv = os.path.realpath(rv) - - try: - st = os.stat(rv) - except OSError: - if not self.exists: - return self.coerce_path_result(rv) - self.fail('%s "%s" does not exist.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - - if not self.file_okay and stat.S_ISREG(st.st_mode): - self.fail('%s "%s" is a file.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - if not self.dir_okay and stat.S_ISDIR(st.st_mode): - self.fail('%s "%s" is a directory.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - if self.writable and not os.access(value, os.W_OK): - self.fail('%s "%s" is not writable.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - if self.readable and not os.access(value, os.R_OK): - self.fail('%s "%s" is not readable.' % ( - self.path_type, - filename_to_ui(value) - ), param, ctx) - - return self.coerce_path_result(rv) - - -class Tuple(CompositeParamType): - """The default behavior of Click is to apply a type on a value directly. - This works well in most cases, except for when `nargs` is set to a fixed - count and different types should be used for different items. In this - case the :class:`Tuple` type can be used. This type can only be used - if `nargs` is set to a fixed number. - - For more information see :ref:`tuple-type`. - - This can be selected by using a Python tuple literal as a type. - - :param types: a list of types that should be used for the tuple items. - """ - - def __init__(self, types): - self.types = [convert_type(ty) for ty in types] - - @property - def name(self): - return "<" + " ".join(ty.name for ty in self.types) + ">" - - @property - def arity(self): - return len(self.types) - - def convert(self, value, param, ctx): - if len(value) != len(self.types): - raise TypeError('It would appear that nargs is set to conflict ' - 'with the composite type arity.') - return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value)) - - -def convert_type(ty, default=None): - """Converts a callable or python ty into the most appropriate param - ty. - """ - guessed_type = False - if ty is None and default is not None: - if isinstance(default, tuple): - ty = tuple(map(type, default)) - else: - ty = type(default) - guessed_type = True - - if isinstance(ty, tuple): - return Tuple(ty) - if isinstance(ty, ParamType): - return ty - if ty is text_type or ty is str or ty is None: - return STRING - if ty is int: - return INT - # Booleans are only okay if not guessed. This is done because for - # flags the default value is actually a bit of a lie in that it - # indicates which of the flags is the one we want. See get_default() - # for more information. - if ty is bool and not guessed_type: - return BOOL - if ty is float: - return FLOAT - if guessed_type: - return STRING - - # Catch a common mistake - if __debug__: - try: - if issubclass(ty, ParamType): - raise AssertionError('Attempted to use an uninstantiated ' - 'parameter type (%s).' % ty) - except TypeError: - pass - return FuncParamType(ty) - - -#: A dummy parameter type that just does nothing. From a user's -#: perspective this appears to just be the same as `STRING` but internally -#: no string conversion takes place. This is necessary to achieve the -#: same bytes/unicode behavior on Python 2/3 in situations where you want -#: to not convert argument types. This is usually useful when working -#: with file paths as they can appear in bytes and unicode. -#: -#: For path related uses the :class:`Path` type is a better choice but -#: there are situations where an unprocessed type is useful which is why -#: it is is provided. -#: -#: .. versionadded:: 4.0 -UNPROCESSED = UnprocessedParamType() - -#: A unicode string parameter type which is the implicit default. This -#: can also be selected by using ``str`` as type. -STRING = StringParamType() - -#: An integer parameter. This can also be selected by using ``int`` as -#: type. -INT = IntParamType() - -#: A floating point value parameter. This can also be selected by using -#: ``float`` as type. -FLOAT = FloatParamType() - -#: A boolean parameter. This is the default for boolean flags. This can -#: also be selected by using ``bool`` as a type. -BOOL = BoolParamType() - -#: A UUID parameter. -UUID = UUIDParameterType() diff --git a/venv/lib/python3.8/site-packages/click/utils.py b/venv/lib/python3.8/site-packages/click/utils.py deleted file mode 100644 index fc84369f..00000000 --- a/venv/lib/python3.8/site-packages/click/utils.py +++ /dev/null @@ -1,440 +0,0 @@ -import os -import sys - -from .globals import resolve_color_default - -from ._compat import text_type, open_stream, get_filesystem_encoding, \ - get_streerror, string_types, PY2, binary_streams, text_streams, \ - filename_to_ui, auto_wrap_for_ansi, strip_ansi, should_strip_ansi, \ - _default_text_stdout, _default_text_stderr, is_bytes, WIN - -if not PY2: - from ._compat import _find_binary_writer -elif WIN: - from ._winconsole import _get_windows_argv, \ - _hash_py_argv, _initial_argv_hash - - -echo_native_types = string_types + (bytes, bytearray) - - -def _posixify(name): - return '-'.join(name.split()).lower() - - -def safecall(func): - """Wraps a function so that it swallows exceptions.""" - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except Exception: - pass - return wrapper - - -def make_str(value): - """Converts a value into a valid string.""" - if isinstance(value, bytes): - try: - return value.decode(get_filesystem_encoding()) - except UnicodeError: - return value.decode('utf-8', 'replace') - return text_type(value) - - -def make_default_short_help(help, max_length=45): - """Return a condensed version of help string.""" - words = help.split() - total_length = 0 - result = [] - done = False - - for word in words: - if word[-1:] == '.': - done = True - new_length = result and 1 + len(word) or len(word) - if total_length + new_length > max_length: - result.append('...') - done = True - else: - if result: - result.append(' ') - result.append(word) - if done: - break - total_length += new_length - - return ''.join(result) - - -class LazyFile(object): - """A lazy file works like a regular file but it does not fully open - the file but it does perform some basic checks early to see if the - filename parameter does make sense. This is useful for safely opening - files for writing. - """ - - def __init__(self, filename, mode='r', encoding=None, errors='strict', - atomic=False): - self.name = filename - self.mode = mode - self.encoding = encoding - self.errors = errors - self.atomic = atomic - - if filename == '-': - self._f, self.should_close = open_stream(filename, mode, - encoding, errors) - else: - if 'r' in mode: - # Open and close the file in case we're opening it for - # reading so that we can catch at least some errors in - # some cases early. - open(filename, mode).close() - self._f = None - self.should_close = True - - def __getattr__(self, name): - return getattr(self.open(), name) - - def __repr__(self): - if self._f is not None: - return repr(self._f) - return '' % (self.name, self.mode) - - def open(self): - """Opens the file if it's not yet open. This call might fail with - a :exc:`FileError`. Not handling this error will produce an error - that Click shows. - """ - if self._f is not None: - return self._f - try: - rv, self.should_close = open_stream(self.name, self.mode, - self.encoding, - self.errors, - atomic=self.atomic) - except (IOError, OSError) as e: - from .exceptions import FileError - raise FileError(self.name, hint=get_streerror(e)) - self._f = rv - return rv - - def close(self): - """Closes the underlying file, no matter what.""" - if self._f is not None: - self._f.close() - - def close_intelligently(self): - """This function only closes the file if it was opened by the lazy - file wrapper. For instance this will never close stdin. - """ - if self.should_close: - self.close() - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - self.close_intelligently() - - def __iter__(self): - self.open() - return iter(self._f) - - -class KeepOpenFile(object): - - def __init__(self, file): - self._file = file - - def __getattr__(self, name): - return getattr(self._file, name) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - pass - - def __repr__(self): - return repr(self._file) - - def __iter__(self): - return iter(self._file) - - -def echo(message=None, file=None, nl=True, err=False, color=None): - """Prints a message plus a newline to the given file or stdout. On - first sight, this looks like the print function, but it has improved - support for handling Unicode and binary data that does not fail no - matter how badly configured the system is. - - Primarily it means that you can print binary data as well as Unicode - data on both 2.x and 3.x to the given file in the most appropriate way - possible. This is a very carefree function in that it will try its - best to not fail. As of Click 6.0 this includes support for unicode - output on the Windows console. - - In addition to that, if `colorama`_ is installed, the echo function will - also support clever handling of ANSI codes. Essentially it will then - do the following: - - - add transparent handling of ANSI color codes on Windows. - - hide ANSI codes automatically if the destination file is not a - terminal. - - .. _colorama: https://pypi.org/project/colorama/ - - .. versionchanged:: 6.0 - As of Click 6.0 the echo function will properly support unicode - output on the windows console. Not that click does not modify - the interpreter in any way which means that `sys.stdout` or the - print statement or function will still not provide unicode support. - - .. versionchanged:: 2.0 - Starting with version 2.0 of Click, the echo function will work - with colorama if it's installed. - - .. versionadded:: 3.0 - The `err` parameter was added. - - .. versionchanged:: 4.0 - Added the `color` flag. - - :param message: the message to print - :param file: the file to write to (defaults to ``stdout``) - :param err: if set to true the file defaults to ``stderr`` instead of - ``stdout``. This is faster and easier than calling - :func:`get_text_stderr` yourself. - :param nl: if set to `True` (the default) a newline is printed afterwards. - :param color: controls if the terminal supports ANSI colors or not. The - default is autodetection. - """ - if file is None: - if err: - file = _default_text_stderr() - else: - file = _default_text_stdout() - - # Convert non bytes/text into the native string type. - if message is not None and not isinstance(message, echo_native_types): - message = text_type(message) - - if nl: - message = message or u'' - if isinstance(message, text_type): - message += u'\n' - else: - message += b'\n' - - # If there is a message, and we're in Python 3, and the value looks - # like bytes, we manually need to find the binary stream and write the - # message in there. This is done separately so that most stream - # types will work as you would expect. Eg: you can write to StringIO - # for other cases. - if message and not PY2 and is_bytes(message): - binary_file = _find_binary_writer(file) - if binary_file is not None: - file.flush() - binary_file.write(message) - binary_file.flush() - return - - # ANSI-style support. If there is no message or we are dealing with - # bytes nothing is happening. If we are connected to a file we want - # to strip colors. If we are on windows we either wrap the stream - # to strip the color or we use the colorama support to translate the - # ansi codes to API calls. - if message and not is_bytes(message): - color = resolve_color_default(color) - if should_strip_ansi(file, color): - message = strip_ansi(message) - elif WIN: - if auto_wrap_for_ansi is not None: - file = auto_wrap_for_ansi(file) - elif not color: - message = strip_ansi(message) - - if message: - file.write(message) - file.flush() - - -def get_binary_stream(name): - """Returns a system stream for byte processing. This essentially - returns the stream from the sys module with the given name but it - solves some compatibility issues between different Python versions. - Primarily this function is necessary for getting binary streams on - Python 3. - - :param name: the name of the stream to open. Valid names are ``'stdin'``, - ``'stdout'`` and ``'stderr'`` - """ - opener = binary_streams.get(name) - if opener is None: - raise TypeError('Unknown standard stream %r' % name) - return opener() - - -def get_text_stream(name, encoding=None, errors='strict'): - """Returns a system stream for text processing. This usually returns - a wrapped stream around a binary stream returned from - :func:`get_binary_stream` but it also can take shortcuts on Python 3 - for already correctly configured streams. - - :param name: the name of the stream to open. Valid names are ``'stdin'``, - ``'stdout'`` and ``'stderr'`` - :param encoding: overrides the detected default encoding. - :param errors: overrides the default error mode. - """ - opener = text_streams.get(name) - if opener is None: - raise TypeError('Unknown standard stream %r' % name) - return opener(encoding, errors) - - -def open_file(filename, mode='r', encoding=None, errors='strict', - lazy=False, atomic=False): - """This is similar to how the :class:`File` works but for manual - usage. Files are opened non lazy by default. This can open regular - files as well as stdin/stdout if ``'-'`` is passed. - - If stdin/stdout is returned the stream is wrapped so that the context - manager will not close the stream accidentally. This makes it possible - to always use the function like this without having to worry to - accidentally close a standard stream:: - - with open_file(filename) as f: - ... - - .. versionadded:: 3.0 - - :param filename: the name of the file to open (or ``'-'`` for stdin/stdout). - :param mode: the mode in which to open the file. - :param encoding: the encoding to use. - :param errors: the error handling for this file. - :param lazy: can be flipped to true to open the file lazily. - :param atomic: in atomic mode writes go into a temporary file and it's - moved on close. - """ - if lazy: - return LazyFile(filename, mode, encoding, errors, atomic=atomic) - f, should_close = open_stream(filename, mode, encoding, errors, - atomic=atomic) - if not should_close: - f = KeepOpenFile(f) - return f - - -def get_os_args(): - """This returns the argument part of sys.argv in the most appropriate - form for processing. What this means is that this return value is in - a format that works for Click to process but does not necessarily - correspond well to what's actually standard for the interpreter. - - On most environments the return value is ``sys.argv[:1]`` unchanged. - However if you are on Windows and running Python 2 the return value - will actually be a list of unicode strings instead because the - default behavior on that platform otherwise will not be able to - carry all possible values that sys.argv can have. - - .. versionadded:: 6.0 - """ - # We can only extract the unicode argv if sys.argv has not been - # changed since the startup of the application. - if PY2 and WIN and _initial_argv_hash == _hash_py_argv(): - return _get_windows_argv() - return sys.argv[1:] - - -def format_filename(filename, shorten=False): - """Formats a filename for user display. The main purpose of this - function is to ensure that the filename can be displayed at all. This - will decode the filename to unicode if necessary in a way that it will - not fail. Optionally, it can shorten the filename to not include the - full path to the filename. - - :param filename: formats a filename for UI display. This will also convert - the filename into unicode without failing. - :param shorten: this optionally shortens the filename to strip of the - path that leads up to it. - """ - if shorten: - filename = os.path.basename(filename) - return filename_to_ui(filename) - - -def get_app_dir(app_name, roaming=True, force_posix=False): - r"""Returns the config folder for the application. The default behavior - is to return whatever is most appropriate for the operating system. - - To give you an idea, for an app called ``"Foo Bar"``, something like - the following folders could be returned: - - Mac OS X: - ``~/Library/Application Support/Foo Bar`` - Mac OS X (POSIX): - ``~/.foo-bar`` - Unix: - ``~/.config/foo-bar`` - Unix (POSIX): - ``~/.foo-bar`` - Win XP (roaming): - ``C:\Documents and Settings\\Local Settings\Application Data\Foo Bar`` - Win XP (not roaming): - ``C:\Documents and Settings\\Application Data\Foo Bar`` - Win 7 (roaming): - ``C:\Users\\AppData\Roaming\Foo Bar`` - Win 7 (not roaming): - ``C:\Users\\AppData\Local\Foo Bar`` - - .. versionadded:: 2.0 - - :param app_name: the application name. This should be properly capitalized - and can contain whitespace. - :param roaming: controls if the folder should be roaming or not on Windows. - Has no affect otherwise. - :param force_posix: if this is set to `True` then on any POSIX system the - folder will be stored in the home folder with a leading - dot instead of the XDG config home or darwin's - application support folder. - """ - if WIN: - key = roaming and 'APPDATA' or 'LOCALAPPDATA' - folder = os.environ.get(key) - if folder is None: - folder = os.path.expanduser('~') - return os.path.join(folder, app_name) - if force_posix: - return os.path.join(os.path.expanduser('~/.' + _posixify(app_name))) - if sys.platform == 'darwin': - return os.path.join(os.path.expanduser( - '~/Library/Application Support'), app_name) - return os.path.join( - os.environ.get('XDG_CONFIG_HOME', os.path.expanduser('~/.config')), - _posixify(app_name)) - - -class PacifyFlushWrapper(object): - """This wrapper is used to catch and suppress BrokenPipeErrors resulting - from ``.flush()`` being called on broken pipe during the shutdown/final-GC - of the Python interpreter. Notably ``.flush()`` is always called on - ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any - other cleanup code, and the case where the underlying file is not a broken - pipe, all calls and attributes are proxied. - """ - - def __init__(self, wrapped): - self.wrapped = wrapped - - def flush(self): - try: - self.wrapped.flush() - except IOError as e: - import errno - if e.errno != errno.EPIPE: - raise - - def __getattr__(self, attr): - return getattr(self.wrapped, attr) diff --git a/venv/lib/python3.8/site-packages/diagrams.egg-link b/venv/lib/python3.8/site-packages/diagrams.egg-link deleted file mode 100644 index d2eb6b66..00000000 --- a/venv/lib/python3.8/site-packages/diagrams.egg-link +++ /dev/null @@ -1,2 +0,0 @@ -/home/nicolas/Projects/diagrams -. \ No newline at end of file diff --git a/venv/lib/python3.8/site-packages/easy-install.pth b/venv/lib/python3.8/site-packages/easy-install.pth deleted file mode 100644 index d93dff8b..00000000 --- a/venv/lib/python3.8/site-packages/easy-install.pth +++ /dev/null @@ -1 +0,0 @@ -/home/nicolas/Projects/diagrams diff --git a/venv/lib/python3.8/site-packages/easy_install.py b/venv/lib/python3.8/site-packages/easy_install.py deleted file mode 100644 index d87e9840..00000000 --- a/venv/lib/python3.8/site-packages/easy_install.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Run the EasyInstall command""" - -if __name__ == '__main__': - from setuptools.command.easy_install import main - main() diff --git a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/INSTALLER b/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/INSTALLER deleted file mode 100644 index a1b589e3..00000000 --- a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/LICENSE.txt b/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/LICENSE.txt deleted file mode 100644 index bd1f954b..00000000 --- a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/LICENSE.txt +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013-2019 Sebastian Bank - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/METADATA b/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/METADATA deleted file mode 100644 index ee706da6..00000000 --- a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/METADATA +++ /dev/null @@ -1,196 +0,0 @@ -Metadata-Version: 2.1 -Name: graphviz -Version: 0.13.2 -Summary: Simple Python interface for Graphviz -Home-page: https://github.com/xflr6/graphviz -Author: Sebastian Bank -Author-email: sebastian.bank@uni-leipzig.de -License: MIT -Keywords: graph visualization dot render -Platform: any -Classifier: Development Status :: 4 - Beta -Classifier: Intended Audience :: Developers -Classifier: Intended Audience :: Science/Research -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: 3.8 -Classifier: Topic :: Scientific/Engineering :: Visualization -Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.* -Provides-Extra: dev -Requires-Dist: tox (>=3.0) ; extra == 'dev' -Requires-Dist: flake8 ; extra == 'dev' -Requires-Dist: pep8-naming ; extra == 'dev' -Requires-Dist: wheel ; extra == 'dev' -Requires-Dist: twine ; extra == 'dev' -Provides-Extra: docs -Requires-Dist: sphinx (>=1.7) ; extra == 'docs' -Requires-Dist: sphinx-rtd-theme ; extra == 'docs' -Provides-Extra: test -Requires-Dist: mock (>=2) ; extra == 'test' -Requires-Dist: pytest (!=3.10.0,>=3.4) ; extra == 'test' -Requires-Dist: pytest-mock (>=1.8) ; extra == 'test' -Requires-Dist: pytest-cov ; extra == 'test' - -Graphviz -======== - -|PyPI version| |License| |Supported Python| |Format| |Docs| - -|Travis| |Codecov| - -This package facilitates the creation and rendering of graph descriptions in -the DOT_ language of the Graphviz_ graph drawing software (`master repo`_) from -Python. - -Create a graph object, assemble the graph by adding nodes and edges, and -retrieve its DOT source code string. Save the source code to a file and render -it with the Graphviz installation of your system. - -Use the ``view`` option/method to directly inspect the resulting (PDF, PNG, -SVG, etc.) file with its default application. Graphs can also be rendered -and displayed within `Jupyter notebooks`_ (formerly known as -`IPython notebooks`_, example_) as well as the `Jupyter Qt Console`_. - - -Links ------ - -- GitHub: https://github.com/xflr6/graphviz -- PyPI: https://pypi.org/project/graphviz/ -- Documentation: https://graphviz.readthedocs.io -- Changelog: https://graphviz.readthedocs.io/en/latest/changelog.html -- Issue Tracker: https://github.com/xflr6/graphviz/issues -- Download: https://pypi.org/project/graphviz/#files - - -Installation ------------- - -This package runs under Python 2.7, and 3.5+, use pip_ to install: - -.. code:: bash - - $ pip install graphviz - -To render the generated DOT source code, you also need to install Graphviz -(`download page`_). - -Make sure that the directory containing the ``dot`` executable is on your -systems' path. - - -Quickstart ----------- - -Create a graph object: - -.. code:: python - - >>> from graphviz import Digraph - - >>> dot = Digraph(comment='The Round Table') - - >>> dot #doctest: +ELLIPSIS - - -Add nodes and edges: - -.. code:: python - - >>> dot.node('A', 'King Arthur') - >>> dot.node('B', 'Sir Bedevere the Wise') - >>> dot.node('L', 'Sir Lancelot the Brave') - - >>> dot.edges(['AB', 'AL']) - >>> dot.edge('B', 'L', constraint='false') - -Check the generated source code: - -.. code:: python - - >>> print(dot.source) # doctest: +NORMALIZE_WHITESPACE - // The Round Table - digraph { - A [label="King Arthur"] - B [label="Sir Bedevere the Wise"] - L [label="Sir Lancelot the Brave"] - A -> B - A -> L - B -> L [constraint=false] - } - -Save and render the source code, optionally view the result: - -.. code:: python - - >>> dot.render('test-output/round-table.gv', view=True) # doctest: +SKIP - 'test-output/round-table.gv.pdf' - -.. image:: https://raw.github.com/xflr6/graphviz/master/docs/round-table.png - :align: center - - -See also --------- - -- pygraphviz_ |--| full-blown interface wrapping the Graphviz C library with SWIG -- graphviz-python_ |--| official Python bindings (documentation_) -- pydot_ |--| stable pure-Python approach, requires pyparsing - - -License -------- - -This package is distributed under the `MIT license`_. - - -.. _pip: https://pip.readthedocs.io -.. _Graphviz: https://www.graphviz.org -.. _master repo: https://gitlab.com/graphviz/graphviz/ -.. _download page: https://www.graphviz.org/download/ -.. _DOT: https://www.graphviz.org/doc/info/lang.html -.. _Jupyter notebooks: https://jupyter.org -.. _IPython notebooks: https://ipython.org/notebook.html -.. _example: https://nbviewer.jupyter.org/github/xflr6/graphviz/blob/master/examples/notebook.ipynb -.. _Jupyter Qt Console: https://qtconsole.readthedocs.io - -.. _pygraphviz: https://pypi.org/project/pygraphviz/ -.. _graphviz-python: https://pypi.org/project/graphviz-python/ -.. _documentation: https://www.graphviz.org/pdf/gv.3python.pdf -.. _pydot: https://pypi.org/project/pydot/ - -.. _MIT license: https://opensource.org/licenses/MIT - - -.. |--| unicode:: U+2013 - - -.. |PyPI version| image:: https://img.shields.io/pypi/v/graphviz.svg - :target: https://pypi.org/project/graphviz/ - :alt: Latest PyPI Version -.. |License| image:: https://img.shields.io/pypi/l/graphviz.svg - :target: https://pypi.org/project/graphviz/ - :alt: License -.. |Supported Python| image:: https://img.shields.io/pypi/pyversions/graphviz.svg - :target: https://pypi.org/project/graphviz/ - :alt: Supported Python Versions -.. |Format| image:: https://img.shields.io/pypi/format/graphviz.svg - :target: https://pypi.org/project/graphviz/ - :alt: Format -.. |Docs| image:: https://readthedocs.org/projects/graphviz/badge/?version=stable - :target: https://graphviz.readthedocs.io/en/stable/ - :alt: Readthedocs -.. |Travis| image:: https://img.shields.io/travis/xflr6/graphviz.svg - :target: https://travis-ci.org/xflr6/graphviz - :alt: Travis -.. |Codecov| image:: https://codecov.io/gh/xflr6/graphviz/branch/master/graph/badge.svg - :target: https://codecov.io/gh/xflr6/graphviz - :alt: Codecov - - diff --git a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/RECORD b/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/RECORD deleted file mode 100644 index 3ebc4fb8..00000000 --- a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/RECORD +++ /dev/null @@ -1,20 +0,0 @@ -graphviz-0.13.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -graphviz-0.13.2.dist-info/LICENSE.txt,sha256=Cp8paorgibA0zoH4UIRyKOEit14IYo-qAy4syY_fn9E,1107 -graphviz-0.13.2.dist-info/METADATA,sha256=WpLbRxRv3ybyidif59Y3EvrNB6Fxi_lgtAsR7tDBdTM,6401 -graphviz-0.13.2.dist-info/RECORD,, -graphviz-0.13.2.dist-info/WHEEL,sha256=JZXtYepZFsf4IoivNpnSgKIc4qTHan08DRd56koo_DM,116 -graphviz-0.13.2.dist-info/top_level.txt,sha256=Ee90R0icUEXS0AvIJbqVuNvagnakuhP5Wips0LDkQuw,9 -graphviz/__init__.py,sha256=d6PAMokPlsGL6PtOFKGHe_w7Ny7Xn1LiHGmfmBOTh3Y,1823 -graphviz/__pycache__/__init__.cpython-38.pyc,, -graphviz/__pycache__/_compat.cpython-38.pyc,, -graphviz/__pycache__/backend.cpython-38.pyc,, -graphviz/__pycache__/dot.cpython-38.pyc,, -graphviz/__pycache__/files.cpython-38.pyc,, -graphviz/__pycache__/lang.cpython-38.pyc,, -graphviz/__pycache__/tools.cpython-38.pyc,, -graphviz/_compat.py,sha256=HclJmoLOoBLxKpVRgNc16z4FmhAaeWIHqKDOj2hn3K0,2107 -graphviz/backend.py,sha256=FMhWZptUtcAOGvRI5IQCZNi5omEdl0mIIC1E14WU8xw,9872 -graphviz/dot.py,sha256=J5-QsguPfRv9_4zpzY8Z4JzA74-8Y-PG7Ym-oL22MJQ,10692 -graphviz/files.py,sha256=ud5zJQkMjUkfFWc8jGXFpH5Tk-_u_iQD1bFDNRxU_y8,11906 -graphviz/lang.py,sha256=3Jr7JaULpCiWpFJ4hSDRfYzEGYZyN3Otbx5e6OTqovU,5230 -graphviz/tools.py,sha256=w8T82b6UqiZk9FDcdgHDWK0nV6CbhYd-S-ClliPNuRk,1322 diff --git a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/WHEEL b/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/WHEEL deleted file mode 100644 index b1da2442..00000000 --- a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.6) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/top_level.txt b/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/top_level.txt deleted file mode 100644 index 4d956093..00000000 --- a/venv/lib/python3.8/site-packages/graphviz-0.13.2.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -graphviz diff --git a/venv/lib/python3.8/site-packages/graphviz/__init__.py b/venv/lib/python3.8/site-packages/graphviz/__init__.py deleted file mode 100644 index 0a457442..00000000 --- a/venv/lib/python3.8/site-packages/graphviz/__init__.py +++ /dev/null @@ -1,63 +0,0 @@ -# graphviz - create dot, save, render, view - -"""Assemble DOT source code and render it with Graphviz. - ->>> dot = Digraph(comment='The Round Table') - ->>> dot.node('A', 'King Arthur') ->>> dot.node('B', 'Sir Bedevere the Wise') ->>> dot.node('L', 'Sir Lancelot the Brave') - ->>> dot.edges(['AB', 'AL']) - ->>> dot.edge('B', 'L', constraint='false') - ->>> print(dot) #doctest: +NORMALIZE_WHITESPACE -// The Round Table -digraph { - A [label="King Arthur"] - B [label="Sir Bedevere the Wise"] - L [label="Sir Lancelot the Brave"] - A -> B - A -> L - B -> L [constraint=false] -} -""" - -from .dot import Graph, Digraph -from .files import Source -from .lang import escape, nohtml -from .backend import (render, pipe, version, view, - ENGINES, FORMATS, RENDERERS, FORMATTERS, - ExecutableNotFound, RequiredArgumentError) - -__all__ = [ - 'Graph', 'Digraph', - 'Source', - 'escape', 'nohtml', - 'render', 'pipe', 'version', 'view', - 'ENGINES', 'FORMATS', 'RENDERERS', 'FORMATTERS', - 'ExecutableNotFound', 'RequiredArgumentError', -] - -__title__ = 'graphviz' -__version__ = '0.13.2' -__author__ = 'Sebastian Bank ' -__license__ = 'MIT, see LICENSE.txt' -__copyright__ = 'Copyright (c) 2013-2019 Sebastian Bank' - -#: Set of known layout commands used for rendering (``'dot'``, ``'neato'``, ...) -ENGINES = ENGINES - -#: Set of known output formats for rendering (``'pdf'``, ``'png'``, ...) -FORMATS = FORMATS - -#: Set of known output formatters for rendering (``'cairo'``, ``'gd'``, ...) -FORMATTERS = FORMATTERS - -#: Set of known output renderers for rendering (``'cairo'``, ``'gd'``, ...) -RENDERERS = RENDERERS - -ExecutableNotFound = ExecutableNotFound - -RequiredArgumentError = RequiredArgumentError diff --git a/venv/lib/python3.8/site-packages/graphviz/_compat.py b/venv/lib/python3.8/site-packages/graphviz/_compat.py deleted file mode 100644 index bac4cba0..00000000 --- a/venv/lib/python3.8/site-packages/graphviz/_compat.py +++ /dev/null @@ -1,69 +0,0 @@ -# _compat.py - Python 2/3 compatibility - -import os -import sys -import operator -import subprocess - -PY2 = (sys.version_info.major == 2) - - -if PY2: - string_classes = (str, unicode) # needed individually for sublassing - text_type = unicode - - iteritems = operator.methodcaller('iteritems') - - def makedirs(name, mode=0o777, exist_ok=False): - try: - os.makedirs(name, mode) - except OSError: - if not exist_ok or not os.path.isdir(name): - raise - - def stderr_write_bytes(data, flush=False): - """Write data str to sys.stderr (flush if requested).""" - sys.stderr.write(data) - if flush: - sys.stderr.flush() - - def Popen_stderr_devnull(*args, **kwargs): # noqa: N802 - with open(os.devnull, 'w') as f: - return subprocess.Popen(*args, stderr=f, **kwargs) - - class CalledProcessError(subprocess.CalledProcessError): - - def __init__(self, returncode, cmd, output=None, stderr=None): - super(CalledProcessError, self).__init__(returncode, cmd, output) - self.stderr = stderr - - @property # pragma: no cover - def stdout(self): - return self.output - - @stdout.setter # pragma: no cover - def stdout(self, value): - self.output = value - - -else: - string_classes = (str,) - text_type = str - - def iteritems(d): - return iter(d.items()) - - def makedirs(name, mode=0o777, exist_ok=False): # allow os.makedirs mocking - return os.makedirs(name, mode, exist_ok=exist_ok) - - def stderr_write_bytes(data, flush=False): - """Encode data str and write to sys.stderr (flush if requested).""" - encoding = sys.stderr.encoding or sys.getdefaultencoding() - sys.stderr.write(data.decode(encoding)) - if flush: - sys.stderr.flush() - - def Popen_stderr_devnull(*args, **kwargs): # noqa: N802 - return subprocess.Popen(*args, stderr=subprocess.DEVNULL, **kwargs) - - CalledProcessError = subprocess.CalledProcessError diff --git a/venv/lib/python3.8/site-packages/graphviz/backend.py b/venv/lib/python3.8/site-packages/graphviz/backend.py deleted file mode 100644 index eec219d8..00000000 --- a/venv/lib/python3.8/site-packages/graphviz/backend.py +++ /dev/null @@ -1,297 +0,0 @@ -# backend.py - execute rendering, open files in viewer - -import os -import re -import errno -import logging -import platform -import subprocess - -from . import _compat - -from . import tools - -__all__ = [ - 'render', 'pipe', 'version', 'view', - 'ENGINES', 'FORMATS', 'RENDERERS', 'FORMATTERS', - 'ExecutableNotFound', 'RequiredArgumentError', -] - -ENGINES = { # http://www.graphviz.org/pdf/dot.1.pdf - 'dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage', -} - -FORMATS = { # http://www.graphviz.org/doc/info/output.html - 'bmp', - 'canon', 'dot', 'gv', 'xdot', 'xdot1.2', 'xdot1.4', - 'cgimage', - 'cmap', - 'eps', - 'exr', - 'fig', - 'gd', 'gd2', - 'gif', - 'gtk', - 'ico', - 'imap', 'cmapx', - 'imap_np', 'cmapx_np', - 'ismap', - 'jp2', - 'jpg', 'jpeg', 'jpe', - 'json', 'json0', 'dot_json', 'xdot_json', # Graphviz 2.40 - 'pct', 'pict', - 'pdf', - 'pic', - 'plain', 'plain-ext', - 'png', - 'pov', - 'ps', - 'ps2', - 'psd', - 'sgi', - 'svg', 'svgz', - 'tga', - 'tif', 'tiff', - 'tk', - 'vml', 'vmlz', - 'vrml', - 'wbmp', - 'webp', - 'xlib', - 'x11', -} - -RENDERERS = { # $ dot -T: - 'cairo', - 'dot', - 'fig', - 'gd', - 'gdiplus', - 'map', - 'pic', - 'pov', - 'ps', - 'svg', - 'tk', - 'vml', - 'vrml', - 'xdot', -} - -FORMATTERS = {'cairo', 'core', 'gd', 'gdiplus', 'gdwbmp', 'xlib'} - -PLATFORM = platform.system().lower() - - -log = logging.getLogger(__name__) - - -class ExecutableNotFound(RuntimeError): - """Exception raised if the Graphviz executable is not found.""" - - _msg = ('failed to execute %r, ' - 'make sure the Graphviz executables are on your systems\' PATH') - - def __init__(self, args): - super(ExecutableNotFound, self).__init__(self._msg % args) - - -class RequiredArgumentError(Exception): - """Exception raised if a required argument is missing.""" - - -class CalledProcessError(_compat.CalledProcessError): - - def __str__(self): - s = super(CalledProcessError, self).__str__() - return '%s [stderr: %r]' % (s, self.stderr) - - -def command(engine, format_, filepath=None, renderer=None, formatter=None): - """Return args list for ``subprocess.Popen`` and name of the rendered file.""" - if formatter is not None and renderer is None: - raise RequiredArgumentError('formatter given without renderer') - - if engine not in ENGINES: - raise ValueError('unknown engine: %r' % engine) - if format_ not in FORMATS: - raise ValueError('unknown format: %r' % format_) - if renderer is not None and renderer not in RENDERERS: - raise ValueError('unknown renderer: %r' % renderer) - if formatter is not None and formatter not in FORMATTERS: - raise ValueError('unknown formatter: %r' % formatter) - - output_format = [f for f in (format_, renderer, formatter) if f is not None] - cmd = [engine, '-T%s' % ':'.join(output_format)] - - if filepath is None: - rendered = None - else: - cmd.extend(['-O', filepath]) - suffix = '.'.join(reversed(output_format)) - rendered = '%s.%s' % (filepath, suffix) - - return cmd, rendered - - -if PLATFORM == 'windows': # pragma: no cover - def get_startupinfo(): - """Return subprocess.STARTUPINFO instance hiding the console window.""" - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - startupinfo.wShowWindow = subprocess.SW_HIDE - return startupinfo -else: - def get_startupinfo(): - """Return None for startupinfo argument of ``subprocess.Popen``.""" - return None - - -def run(cmd, input=None, capture_output=False, check=False, quiet=False, **kwargs): - """Run the command described by cmd and return its (stdout, stderr) tuple.""" - log.debug('run %r', cmd) - if input is not None: - kwargs['stdin'] = subprocess.PIPE - if capture_output: - kwargs['stdout'] = kwargs['stderr'] = subprocess.PIPE - - try: - proc = subprocess.Popen(cmd, startupinfo=get_startupinfo(), **kwargs) - except OSError as e: - if e.errno == errno.ENOENT: - raise ExecutableNotFound(cmd) - else: - raise - - out, err = proc.communicate(input) - - if not quiet and err: - _compat.stderr_write_bytes(err, flush=True) - if check and proc.returncode: - raise CalledProcessError(proc.returncode, cmd, - output=out, stderr=err) - - return out, err - - -def render(engine, format, filepath, renderer=None, formatter=None, quiet=False): - """Render file with Graphviz ``engine`` into ``format``, return result filename. - - Args: - engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...). - format: The output format used for rendering (``'pdf'``, ``'png'``, ...). - filepath: Path to the DOT source file to render. - renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). - formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). - quiet (bool): Suppress ``stderr`` output from the layout subprocess. - Returns: - The (possibly relative) path of the rendered file. - Raises: - ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known. - graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. - graphviz.ExecutableNotFound: If the Graphviz executable is not found. - subprocess.CalledProcessError: If the exit status is non-zero. - - The layout command is started from the directory of ``filepath``, so that - references to external files (e.g. ``[image=...]``) can be given as paths - relative to the DOT source file. - """ - dirname, filename = os.path.split(filepath) - cmd, rendered = command(engine, format, filename, renderer, formatter) - if dirname: - cwd = dirname - rendered = os.path.join(dirname, rendered) - else: - cwd = None - run(cmd, capture_output=True, cwd=cwd, check=True, quiet=quiet) - return rendered - - -def pipe(engine, format, data, renderer=None, formatter=None, quiet=False): - """Return ``data`` piped through Graphviz ``engine`` into ``format``. - - Args: - engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...). - format: The output format used for rendering (``'pdf'``, ``'png'``, ...). - data: The binary (encoded) DOT source string to render. - renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). - formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). - quiet (bool): Suppress ``stderr`` output from the layout subprocess. - Returns: - Binary (encoded) stdout of the layout command. - Raises: - ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known. - graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. - graphviz.ExecutableNotFound: If the Graphviz executable is not found. - subprocess.CalledProcessError: If the exit status is non-zero. - """ - cmd, _ = command(engine, format, None, renderer, formatter) - out, _ = run(cmd, input=data, capture_output=True, check=True, quiet=quiet) - return out - - -def version(): - """Return the version number tuple from the ``stderr`` output of ``dot -V``. - - Returns: - Two, three, or four ``int`` version ``tuple``. - Raises: - graphviz.ExecutableNotFound: If the Graphviz executable is not found. - subprocess.CalledProcessError: If the exit status is non-zero. - RuntimmeError: If the output cannot be parsed into a version number. - """ - cmd = ['dot', '-V'] - out, _ = run(cmd, check=True, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - - info = out.decode('ascii') - ma = re.search(r'graphviz version (\d+\.\d+(?:\.\d+){,2}) ', info) - if ma is None: - raise RuntimeError('cannot parse %r output: %r' % (cmd, info)) - return tuple(int(d) for d in ma.group(1).split('.')) - - -def view(filepath, quiet=False): - """Open filepath with its default viewing application (platform-specific). - - Args: - filepath: Path to the file to open in viewer. - quiet (bool): Suppress ``stderr`` output from the viewer process - (ineffective on Windows). - Raises: - RuntimeError: If the current platform is not supported. - """ - try: - view_func = getattr(view, PLATFORM) - except AttributeError: - raise RuntimeError('platform %r not supported' % PLATFORM) - view_func(filepath, quiet) - - -@tools.attach(view, 'darwin') -def view_darwin(filepath, quiet): - """Open filepath with its default application (mac).""" - cmd = ['open', filepath] - log.debug('view: %r', cmd) - popen_func = _compat.Popen_stderr_devnull if quiet else subprocess.Popen - popen_func(cmd) - - -@tools.attach(view, 'linux') -@tools.attach(view, 'freebsd') -def view_unixoid(filepath, quiet): - """Open filepath in the user's preferred application (linux, freebsd).""" - cmd = ['xdg-open', filepath] - log.debug('view: %r', cmd) - popen_func = _compat.Popen_stderr_devnull if quiet else subprocess.Popen - popen_func(cmd) - - -@tools.attach(view, 'windows') -def view_windows(filepath, quiet): - """Start filepath with its associated application (windows).""" - # TODO: implement quiet=True - filepath = os.path.normpath(filepath) - log.debug('view: %r', filepath) - os.startfile(filepath) diff --git a/venv/lib/python3.8/site-packages/graphviz/dot.py b/venv/lib/python3.8/site-packages/graphviz/dot.py deleted file mode 100644 index 535ae21d..00000000 --- a/venv/lib/python3.8/site-packages/graphviz/dot.py +++ /dev/null @@ -1,287 +0,0 @@ -# dot.py - create dot code - -r"""Assemble DOT source code objects. - ->>> dot = Graph(comment=u'M\xf8nti Pyth\xf8n ik den H\xf8lie Grailen') - ->>> dot.node(u'M\xf8\xf8se') ->>> dot.node('trained_by', u'trained by') ->>> dot.node('tutte', u'TUTTE HERMSGERVORDENBROTBORDA') - ->>> dot.edge(u'M\xf8\xf8se', 'trained_by') ->>> dot.edge('trained_by', 'tutte') - ->>> dot.node_attr['shape'] = 'rectangle' - ->>> print(dot.source.replace(u'\xf8', '0')) #doctest: +NORMALIZE_WHITESPACE -// M0nti Pyth0n ik den H0lie Grailen -graph { - node [shape=rectangle] - "M00se" - trained_by [label="trained by"] - tutte [label="TUTTE HERMSGERVORDENBROTBORDA"] - "M00se" -- trained_by - trained_by -- tutte -} - ->>> dot.view('test-output/m00se.gv') # doctest: +SKIP -'test-output/m00se.gv.pdf' -""" - -from . import lang -from . import files - -__all__ = ['Graph', 'Digraph'] - - -class Dot(files.File): - """Assemble, save, and render DOT source code, open result in viewer.""" - - _comment = '// %s' - _subgraph = 'subgraph %s{' - _subgraph_plain = '%s{' - _node = _attr = '\t%s%s' - _attr_plain = _attr % ('%s', '') - _tail = '}' - - _quote = staticmethod(lang.quote) - _quote_edge = staticmethod(lang.quote_edge) - - _a_list = staticmethod(lang.a_list) - _attr_list = staticmethod(lang.attr_list) - - def __init__(self, name=None, comment=None, - filename=None, directory=None, - format=None, engine=None, encoding=files.ENCODING, - graph_attr=None, node_attr=None, edge_attr=None, body=None, - strict=False): - self.name = name - self.comment = comment - - super(Dot, self).__init__(filename, directory, format, engine, encoding) - - self.graph_attr = dict(graph_attr) if graph_attr is not None else {} - self.node_attr = dict(node_attr) if node_attr is not None else {} - self.edge_attr = dict(edge_attr) if edge_attr is not None else {} - - self.body = list(body) if body is not None else [] - - self.strict = strict - - def _kwargs(self): - result = super(Dot, self)._kwargs() - result.update(name=self.name, - comment=self.comment, - graph_attr=dict(self.graph_attr), - node_attr=dict(self.node_attr), - edge_attr=dict(self.edge_attr), - body=list(self.body), - strict=self.strict) - return result - - def clear(self, keep_attrs=False): - """Reset content to an empty body, clear graph/node/egde_attr mappings. - - Args: - keep_attrs (bool): preserve graph/node/egde_attr mappings - """ - if not keep_attrs: - for a in (self.graph_attr, self.node_attr, self.edge_attr): - a.clear() - del self.body[:] - - def __iter__(self, subgraph=False): - """Yield the DOT source code line by line (as graph or subgraph).""" - if self.comment: - yield self._comment % self.comment - - if subgraph: - if self.strict: - raise ValueError('subgraphs cannot be strict') - head = self._subgraph if self.name else self._subgraph_plain - else: - head = self._head_strict if self.strict else self._head - yield head % (self._quote(self.name) + ' ' if self.name else '') - - for kw in ('graph', 'node', 'edge'): - attrs = getattr(self, '%s_attr' % kw) - if attrs: - yield self._attr % (kw, self._attr_list(None, attrs)) - - for line in self.body: - yield line - - yield self._tail - - def __str__(self): - """The DOT source code as string.""" - return '\n'.join(self) - - source = property(__str__, doc=__str__.__doc__) - - def node(self, name, label=None, _attributes=None, **attrs): - """Create a node. - - Args: - name: Unique identifier for the node inside the source. - label: Caption to be displayed (defaults to the node ``name``). - attrs: Any additional node attributes (must be strings). - """ - name = self._quote(name) - attr_list = self._attr_list(label, attrs, _attributes) - line = self._node % (name, attr_list) - self.body.append(line) - - def edge(self, tail_name, head_name, label=None, _attributes=None, **attrs): - """Create an edge between two nodes. - - Args: - tail_name: Start node identifier. - head_name: End node identifier. - label: Caption to be displayed near the edge. - attrs: Any additional edge attributes (must be strings). - """ - tail_name = self._quote_edge(tail_name) - head_name = self._quote_edge(head_name) - attr_list = self._attr_list(label, attrs, _attributes) - line = self._edge % (tail_name, head_name, attr_list) - self.body.append(line) - - def edges(self, tail_head_iter): - """Create a bunch of edges. - - Args: - tail_head_iter: Iterable of ``(tail_name, head_name)`` pairs. - """ - edge = self._edge_plain - quote = self._quote_edge - lines = (edge % (quote(t), quote(h)) for t, h in tail_head_iter) - self.body.extend(lines) - - def attr(self, kw=None, _attributes=None, **attrs): - """Add a general or graph/node/edge attribute statement. - - Args: - kw: Attributes target (``None`` or ``'graph'``, ``'node'``, ``'edge'``). - attrs: Attributes to be set (must be strings, may be empty). - - See the :ref:`usage examples in the User Guide `. - """ - if kw is not None and kw.lower() not in ('graph', 'node', 'edge'): - raise ValueError('attr statement must target graph, node, or edge: ' - '%r' % kw) - if attrs or _attributes: - if kw is None: - a_list = self._a_list(None, attrs, _attributes) - line = self._attr_plain % a_list - else: - attr_list = self._attr_list(None, attrs, _attributes) - line = self._attr % (kw, attr_list) - self.body.append(line) - - def subgraph(self, graph=None, name=None, comment=None, - graph_attr=None, node_attr=None, edge_attr=None, body=None): - """Add the current content of the given sole ``graph`` argument as subgraph \ - or return a context manager returning a new graph instance created \ - with the given (``name``, ``comment``, etc.) arguments whose content is \ - added as subgraph when leaving the context manager's ``with``-block. - - Args: - graph: An instance of the same kind (:class:`.Graph`, :class:`.Digraph`) - as the current graph (sole argument in non-with-block use). - name: Subgraph name (``with``-block use). - comment: Subgraph comment (``with``-block use). - graph_attr: Subgraph-level attribute-value mapping (``with``-block use). - node_attr: Node-level attribute-value mapping (``with``-block use). - edge_attr: Edge-level attribute-value mapping (``with``-block use). - body: Verbatim lines to add to the subgraph ``body`` (``with``-block use). - - See the :ref:`usage examples in the User Guide `. - - .. note:: - If the ``name`` of the subgraph begins with ``'cluster'`` (all lowercase) - the layout engine will treat it as a special cluster subgraph. - """ - if graph is None: - return SubgraphContext(self, {'name': name, - 'comment': comment, - 'graph_attr': graph_attr, - 'node_attr': node_attr, - 'edge_attr': edge_attr, - 'body': body}) - - args = [name, comment, graph_attr, node_attr, edge_attr, body] - if not all(a is None for a in args): - raise ValueError('graph must be sole argument of subgraph()') - - if graph.directed != self.directed: - raise ValueError('%r cannot add subgraph of different kind:' - ' %r' % (self, graph)) - - lines = ['\t' + line for line in graph.__iter__(subgraph=True)] - self.body.extend(lines) - - -class SubgraphContext(object): - """Return a blank instance of the parent and add as subgraph on exit.""" - - def __init__(self, parent, kwargs): - self.parent = parent - self.graph = parent.__class__(**kwargs) - - def __enter__(self): - return self.graph - - def __exit__(self, type_, value, traceback): - if type_ is None: - self.parent.subgraph(self.graph) - - -class Graph(Dot): - """Graph source code in the DOT language. - - Args: - name: Graph name used in the source code. - comment: Comment added to the first line of the source. - filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``). - directory: (Sub)directory for source saving and rendering. - format: Rendering output format (``'pdf'``, ``'png'``, ...). - engine: Layout command used (``'dot'``, ``'neato'``, ...). - encoding: Encoding for saving the source. - graph_attr: Mapping of ``(attribute, value)`` pairs for the graph. - node_attr: Mapping of ``(attribute, value)`` pairs set for all nodes. - edge_attr: Mapping of ``(attribute, value)`` pairs set for all edges. - body: Iterable of verbatim lines to add to the graph ``body``. - strict (bool): Rendering should merge multi-edges. - - Note: - All parameters are optional and can be changed under their - corresponding attribute name after instance creation. - """ - - _head = 'graph %s{' - _head_strict = 'strict %s' % _head - _edge = '\t%s -- %s%s' - _edge_plain = _edge % ('%s', '%s', '') - - @property - def directed(self): - """``False``""" - return False - - -class Digraph(Dot): - """Directed graph source code in the DOT language.""" - - if Graph.__doc__ is not None: - __doc__ += Graph.__doc__.partition('.')[2] - - _head = 'digraph %s{' - _head_strict = 'strict %s' % _head - _edge = '\t%s -> %s%s' - _edge_plain = _edge % ('%s', '%s', '') - - @property - def directed(self): - """``True``""" - return True diff --git a/venv/lib/python3.8/site-packages/graphviz/files.py b/venv/lib/python3.8/site-packages/graphviz/files.py deleted file mode 100644 index 36db8a41..00000000 --- a/venv/lib/python3.8/site-packages/graphviz/files.py +++ /dev/null @@ -1,311 +0,0 @@ -# files.py - save, render, view - -"""Save DOT code objects, render with Graphviz dot, and open in viewer.""" - -import os -import io -import codecs -import locale -import logging - -from ._compat import text_type - -from . import backend -from . import tools - -__all__ = ['File', 'Source'] - -ENCODING = 'utf-8' - - -log = logging.getLogger(__name__) - - -class Base(object): - - _format = 'pdf' - _engine = 'dot' - _encoding = ENCODING - - @property - def format(self): - """The output format used for rendering (``'pdf'``, ``'png'``, ...).""" - return self._format - - @format.setter - def format(self, format): - format = format.lower() - if format not in backend.FORMATS: - raise ValueError('unknown format: %r' % format) - self._format = format - - @property - def engine(self): - """The layout commmand used for rendering (``'dot'``, ``'neato'``, ...).""" - return self._engine - - @engine.setter - def engine(self, engine): - engine = engine.lower() - if engine not in backend.ENGINES: - raise ValueError('unknown engine: %r' % engine) - self._engine = engine - - @property - def encoding(self): - """The encoding for the saved source file.""" - return self._encoding - - @encoding.setter - def encoding(self, encoding): - if encoding is None: - encoding = locale.getpreferredencoding() - codecs.lookup(encoding) # raise early - self._encoding = encoding - - def copy(self): - """Return a copied instance of the object. - - Returns: - An independent copy of the current object. - """ - kwargs = self._kwargs() - return self.__class__(**kwargs) - - def _kwargs(self): - ns = self.__dict__ - return {a[1:]: ns[a] for a in ('_format', '_engine', '_encoding') - if a in ns} - - -class File(Base): - - directory = '' - - _default_extension = 'gv' - - def __init__(self, filename=None, directory=None, - format=None, engine=None, encoding=ENCODING): - if filename is None: - name = getattr(self, 'name', None) or self.__class__.__name__ - filename = '%s.%s' % (name, self._default_extension) - self.filename = filename - - if directory is not None: - self.directory = directory - - if format is not None: - self.format = format - - if engine is not None: - self.engine = engine - - self.encoding = encoding - - def _kwargs(self): - result = super(File, self)._kwargs() - result['filename'] = self.filename - if 'directory' in self.__dict__: - result['directory'] = self.directory - return result - - def _repr_svg_(self): - return self.pipe(format='svg').decode(self._encoding) - - def pipe(self, format=None, renderer=None, formatter=None, quiet=False): - """Return the source piped through the Graphviz layout command. - - Args: - format: The output format used for rendering (``'pdf'``, ``'png'``, etc.). - renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). - formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). - quiet (bool): Suppress ``stderr`` output from the layout subprocess. - Returns: - Binary (encoded) stdout of the layout command. - Raises: - ValueError: If ``format``, ``renderer``, or ``formatter`` are not known. - graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. - graphviz.ExecutableNotFound: If the Graphviz executable is not found. - subprocess.CalledProcessError: If the exit status is non-zero. - """ - if format is None: - format = self._format - - data = text_type(self.source).encode(self._encoding) - - out = backend.pipe(self._engine, format, data, - renderer=renderer, formatter=formatter, - quiet=quiet) - - return out - - @property - def filepath(self): - return os.path.join(self.directory, self.filename) - - def save(self, filename=None, directory=None): - """Save the DOT source to file. Ensure the file ends with a newline. - - Args: - filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``) - directory: (Sub)directory for source saving and rendering. - Returns: - The (possibly relative) path of the saved source file. - """ - if filename is not None: - self.filename = filename - if directory is not None: - self.directory = directory - - filepath = self.filepath - tools.mkdirs(filepath) - - data = text_type(self.source) - - log.debug('write %d bytes to %r', len(data), filepath) - with io.open(filepath, 'w', encoding=self.encoding) as fd: - fd.write(data) - if not data.endswith(u'\n'): - fd.write(u'\n') - - return filepath - - def render(self, filename=None, directory=None, view=False, cleanup=False, - format=None, renderer=None, formatter=None, - quiet=False, quiet_view=False): - """Save the source to file and render with the Graphviz engine. - - Args: - filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``) - directory: (Sub)directory for source saving and rendering. - view (bool): Open the rendered result with the default application. - cleanup (bool): Delete the source file after rendering. - format: The output format used for rendering (``'pdf'``, ``'png'``, etc.). - renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). - formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). - quiet (bool): Suppress ``stderr`` output from the layout subprocess. - quiet_view (bool): Suppress ``stderr`` output from the viewer process - (implies ``view=True``, ineffective on Windows). - Returns: - The (possibly relative) path of the rendered file. - Raises: - ValueError: If ``format``, ``renderer``, or ``formatter`` are not known. - graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. - graphviz.ExecutableNotFound: If the Graphviz executable is not found. - subprocess.CalledProcessError: If the exit status is non-zero. - RuntimeError: If viewer opening is requested but not supported. - - The layout command is started from the directory of ``filepath``, so that - references to external files (e.g. ``[image=...]``) can be given as paths - relative to the DOT source file. - """ - filepath = self.save(filename, directory) - - if format is None: - format = self._format - - rendered = backend.render(self._engine, format, filepath, - renderer=renderer, formatter=formatter, - quiet=quiet) - - if cleanup: - log.debug('delete %r', filepath) - os.remove(filepath) - - if quiet_view or view: - self._view(rendered, self._format, quiet_view) - - return rendered - - def view(self, filename=None, directory=None, cleanup=False, - quiet=False, quiet_view=False): - """Save the source to file, open the rendered result in a viewer. - - Args: - filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``) - directory: (Sub)directory for source saving and rendering. - cleanup (bool): Delete the source file after rendering. - quiet (bool): Suppress ``stderr`` output from the layout subprocess. - quiet_view (bool): Suppress ``stderr`` output from the viewer process - (ineffective on Windows). - Returns: - The (possibly relative) path of the rendered file. - Raises: - graphviz.ExecutableNotFound: If the Graphviz executable is not found. - subprocess.CalledProcessError: If the exit status is non-zero. - RuntimeError: If opening the viewer is not supported. - - Short-cut method for calling :meth:`.render` with ``view=True``. - """ - return self.render(filename=filename, directory=directory, - view=True, cleanup=cleanup, - quiet=quiet, quiet_view=quiet_view) - - def _view(self, filepath, format, quiet): - """Start the right viewer based on file format and platform.""" - methodnames = [ - '_view_%s_%s' % (format, backend.PLATFORM), - '_view_%s' % backend.PLATFORM, - ] - for name in methodnames: - view_method = getattr(self, name, None) - if view_method is not None: - break - else: - raise RuntimeError('%r has no built-in viewer support for %r' - ' on %r platform' % (self.__class__, format, - backend.PLATFORM)) - view_method(filepath, quiet) - - _view_darwin = staticmethod(backend.view.darwin) - _view_freebsd = staticmethod(backend.view.freebsd) - _view_linux = staticmethod(backend.view.linux) - _view_windows = staticmethod(backend.view.windows) - - -class Source(File): - """Verbatim DOT source code string to be rendered by Graphviz. - - Args: - source: The verbatim DOT source code string. - filename: Filename for saving the source (defaults to ``'Source.gv'``). - directory: (Sub)directory for source saving and rendering. - format: Rendering output format (``'pdf'``, ``'png'``, ...). - engine: Layout command used (``'dot'``, ``'neato'``, ...). - encoding: Encoding for saving the source. - - Note: - All parameters except ``source`` are optional. All of them can be changed - under their corresponding attribute name after instance creation. - """ - - @classmethod - def from_file(cls, filename, directory=None, - format=None, engine=None, encoding=ENCODING): - """Return an instance with the source string read from the given file. - - Args: - filename: Filename for loading/saving the source. - directory: (Sub)directory for source loading/saving and rendering. - format: Rendering output format (``'pdf'``, ``'png'``, ...). - engine: Layout command used (``'dot'``, ``'neato'``, ...). - encoding: Encoding for loading/saving the source. - """ - filepath = os.path.join(directory or '', filename) - if encoding is None: - encoding = locale.getpreferredencoding() - log.debug('read %r with encoding %r', filepath, encoding) - with io.open(filepath, encoding=encoding) as fd: - source = fd.read() - return cls(source, filename, directory, format, engine, encoding) - - def __init__(self, source, filename=None, directory=None, - format=None, engine=None, encoding=ENCODING): - super(Source, self).__init__(filename, directory, - format, engine, encoding) - self.source = source #: The verbatim DOT source code string. - - def _kwargs(self): - result = super(Source, self)._kwargs() - result['source'] = self.source - return result diff --git a/venv/lib/python3.8/site-packages/graphviz/lang.py b/venv/lib/python3.8/site-packages/graphviz/lang.py deleted file mode 100644 index ee1de545..00000000 --- a/venv/lib/python3.8/site-packages/graphviz/lang.py +++ /dev/null @@ -1,185 +0,0 @@ -# lang.py - dot language creation helpers - -"""Quote strings to be valid DOT identifiers, assemble attribute lists.""" - -import re -import collections -import functools - -from . import _compat - -from . import tools - -__all__ = ['quote', 'quote_edge', 'a_list', 'attr_list', 'escape', 'nohtml'] - -# https://www.graphviz.org/doc/info/lang.html -# https://www.graphviz.org/doc/info/attrs.html#k:escString - -HTML_STRING = re.compile(r'<.*>$', re.DOTALL) - -ID = re.compile(r'([a-zA-Z_][a-zA-Z0-9_]*|-?(\.[0-9]+|[0-9]+(\.[0-9]*)?))$') - -KEYWORDS = {'node', 'edge', 'graph', 'digraph', 'subgraph', 'strict'} - -COMPASS = {'n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw', 'c', '_'} # TODO - -ESCAPE_UNESCAPED_QUOTES = functools.partial(re.compile(r'(?!\\)"').sub, r'\\"') - - -def quote(identifier, - is_html_string=HTML_STRING.match, - is_valid_id=ID.match, dot_keywords=KEYWORDS, - escape_unescaped_quotes=ESCAPE_UNESCAPED_QUOTES): - r"""Return DOT identifier from string, quote if needed. - - >>> quote('') - '""' - - >>> quote('spam') - 'spam' - - >>> quote('spam spam') - '"spam spam"' - - >>> quote('-4.2') - '-4.2' - - >>> quote('.42') - '.42' - - >>> quote('<spam>') - '<spam>' - - >>> quote(nohtml('<>')) - '"<>"' - - >>> print(quote('"')) - "\"" - - >>> print(quote('\\"')) - "\\"" - """ - if is_html_string(identifier) and not isinstance(identifier, NoHtml): - pass - elif not is_valid_id(identifier) or identifier.lower() in dot_keywords: - return '"%s"' % escape_unescaped_quotes(identifier) - return identifier - - -def quote_edge(identifier): - """Return DOT edge statement node_id from string, quote if needed. - - >>> quote_edge('spam') - 'spam' - - >>> quote_edge('spam spam:eggs eggs') - '"spam spam":"eggs eggs"' - - >>> quote_edge('spam:eggs:s') - 'spam:eggs:s' - """ - node, _, rest = identifier.partition(':') - parts = [quote(node)] - if rest: - port, _, compass = rest.partition(':') - parts.append(quote(port)) - if compass: - parts.append(compass) - return ':'.join(parts) - - -def a_list(label=None, kwargs=None, attributes=None): - """Return assembled DOT a_list string. - - >>> a_list('spam', {'spam': None, 'ham': 'ham ham', 'eggs': ''}) - 'label=spam eggs="" ham="ham ham"' - """ - result = ['label=%s' % quote(label)] if label is not None else [] - if kwargs: - items = ['%s=%s' % (quote(k), quote(v)) - for k, v in tools.mapping_items(kwargs) if v is not None] - result.extend(items) - if attributes: - if hasattr(attributes, 'items'): - attributes = tools.mapping_items(attributes) - items = ['%s=%s' % (quote(k), quote(v)) - for k, v in attributes if v is not None] - result.extend(items) - return ' '.join(result) - - -def attr_list(label=None, kwargs=None, attributes=None): - """Return assembled DOT attribute list string. - - Sorts ``kwargs`` and ``attributes`` if they are plain dicts (to avoid - unpredictable order from hash randomization in Python 3 versions). - - >>> attr_list() - '' - - >>> attr_list('spam spam', kwargs={'eggs': 'eggs', 'ham': 'ham ham'}) - ' [label="spam spam" eggs=eggs ham="ham ham"]' - - >>> attr_list(kwargs={'spam': None, 'eggs': ''}) - ' [eggs=""]' - """ - content = a_list(label, kwargs, attributes) - if not content: - return '' - return ' [%s]' % content - - -def escape(s): - r"""Return ``s`` as literal disabling special meaning of backslashes and ``'<...>'``. - - see also https://www.graphviz.org/doc/info/attrs.html#k:escString - - Args: - s: String in which backslashes and ``'<...>'`` should be treated as literal. - Raises: - TypeError: If ``s`` is not a ``str`` on Python 3, or a ``str``/``unicode`` on Python 2. - - >>> print(escape(r'\l')) - \\l - """ - return nohtml(s.replace('\\', '\\\\')) - - -class NoHtml(object): - """Mixin for string subclasses disabling fall-through of ``'<...>'``.""" - - __slots__ = () - - _doc = "%s subclass that does not treat ``'<...>'`` as DOT HTML string." - - @classmethod - def _subcls(cls, other): - name = '%s_%s' % (cls.__name__, other.__name__) - bases = (other, cls) - ns = {'__doc__': cls._doc % other.__name__} - return type(name, bases, ns) - - -NOHTML = collections.OrderedDict((c, NoHtml._subcls(c)) for c in _compat.string_classes) - - -def nohtml(s): - """Return copy of ``s`` that will not treat ``'<...>'`` as DOT HTML string in quoting. - - Args: - s: String in which leading ``'<'`` and trailing ``'>'`` should be treated as literal. - Raises: - TypeError: If ``s`` is not a ``str`` on Python 3, or a ``str``/``unicode`` on Python 2. - - >>> quote('<>-*-<>') - '<>-*-<>' - - >>> quote(nohtml('<>-*-<>')) - '"<>-*-<>"' - """ - try: - subcls = NOHTML[type(s)] - except KeyError: - raise TypeError('%r does not have one of the required types:' - ' %r' % (s, list(NOHTML))) - return subcls(s) diff --git a/venv/lib/python3.8/site-packages/graphviz/tools.py b/venv/lib/python3.8/site-packages/graphviz/tools.py deleted file mode 100644 index 2ea121bf..00000000 --- a/venv/lib/python3.8/site-packages/graphviz/tools.py +++ /dev/null @@ -1,47 +0,0 @@ -# tools.py - generic helpers - -import os - -from . import _compat - -__all__ = ['attach', 'mkdirs', 'mapping_items'] - - -def attach(object, name): - """Return a decorator doing ``setattr(object, name)`` with its argument. - - >>> spam = type('Spam', (object,), {})() - >>> @attach(spam, 'eggs') - ... def func(): - ... pass - >>> spam.eggs # doctest: +ELLIPSIS - - """ - def decorator(func): - setattr(object, name, func) - return func - return decorator - - -def mkdirs(filename, mode=0o777): - """Recursively create directories up to the path of ``filename`` as needed.""" - dirname = os.path.dirname(filename) - if not dirname: - return - _compat.makedirs(dirname, mode=mode, exist_ok=True) - - -def mapping_items(mapping): - """Return an iterator over the ``mapping`` items, sort if it's a plain dict. - - >>> list(mapping_items({'spam': 0, 'ham': 1, 'eggs': 2})) - [('eggs', 2), ('ham', 1), ('spam', 0)] - - >>> from collections import OrderedDict - >>> list(mapping_items(OrderedDict(enumerate(['spam', 'ham', 'eggs'])))) - [(0, 'spam'), (1, 'ham'), (2, 'eggs')] - """ - result = _compat.iteritems(mapping) - if type(mapping) is dict: - result = iter(sorted(result)) - return result diff --git a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/INSTALLER b/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/INSTALLER deleted file mode 100644 index a1b589e3..00000000 --- a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/LICENSE b/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/LICENSE deleted file mode 100755 index b5083a50..00000000 --- a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Timothy Edmund Crosley - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/METADATA b/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/METADATA deleted file mode 100644 index fbc7f6c4..00000000 --- a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/METADATA +++ /dev/null @@ -1,697 +0,0 @@ -Metadata-Version: 2.1 -Name: isort -Version: 4.3.21 -Summary: A Python utility / library to sort Python imports. -Home-page: https://github.com/timothycrosley/isort -Author: Timothy Crosley -Author-email: timothy.crosley@gmail.com -License: MIT -Keywords: Refactor,Python,Python2,Python3,Refactoring,Imports,Sort,Clean -Platform: UNKNOWN -Classifier: Development Status :: 6 - Mature -Classifier: Intended Audience :: Developers -Classifier: Natural Language :: English -Classifier: Environment :: Console -Classifier: License :: OSI Approved :: MIT License -Classifier: Programming Language :: Python -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Programming Language :: Python :: 3.7 -Classifier: Programming Language :: Python :: Implementation :: CPython -Classifier: Programming Language :: Python :: Implementation :: PyPy -Classifier: Topic :: Software Development :: Libraries -Classifier: Topic :: Utilities -Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* -Requires-Dist: futures ; python_version < "3.2" -Requires-Dist: backports.functools-lru-cache ; python_version < "3.2" -Provides-Extra: pipfile -Requires-Dist: pipreqs ; extra == 'pipfile' -Requires-Dist: requirementslib ; extra == 'pipfile' -Provides-Extra: pyproject -Requires-Dist: toml ; extra == 'pyproject' -Provides-Extra: requirements -Requires-Dist: pipreqs ; extra == 'requirements' -Requires-Dist: pip-api ; extra == 'requirements' -Provides-Extra: xdg_home -Requires-Dist: appdirs (>=1.4.0) ; extra == 'xdg_home' - -.. image:: https://raw.github.com/timothycrosley/isort/master/logo.png - :alt: isort - -######## - -.. image:: https://badge.fury.io/py/isort.svg - :target: https://badge.fury.io/py/isort - :alt: PyPI version - -.. image:: https://travis-ci.org/timothycrosley/isort.svg?branch=master - :target: https://travis-ci.org/timothycrosley/isort - :alt: Build Status - - -.. image:: https://coveralls.io/repos/timothycrosley/isort/badge.svg?branch=release%2F2.6.0&service=github - :target: https://coveralls.io/github/timothycrosley/isort?branch=release%2F2.6.0 - :alt: Coverage - -.. image:: https://img.shields.io/github/license/mashape/apistatus.svg - :target: https://pypi.org/project/hug/ - :alt: License - -.. image:: https://badges.gitter.im/Join%20Chat.svg - :alt: Join the chat at https://gitter.im/timothycrosley/isort - :target: https://gitter.im/timothycrosley/isort?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge - -.. image:: https://pepy.tech/badge/isort - :alt: Downloads - :target: https://pepy.tech/project/isort - -isort your python imports for you so you don't have to. - -isort is a Python utility / library to sort imports alphabetically, and automatically separated into sections. -It provides a command line utility, Python library and `plugins for various editors `_ to quickly sort all your imports. -It currently cleanly supports Python 2.7 and 3.4+ without any dependencies. - -.. image:: https://raw.github.com/timothycrosley/isort/develop/example.gif - :alt: Example Usage - -Before isort: - -.. code-block:: python - - from my_lib import Object - - print("Hey") - - import os - - from my_lib import Object3 - - from my_lib import Object2 - - import sys - - from third_party import lib15, lib1, lib2, lib3, lib4, lib5, lib6, lib7, lib8, lib9, lib10, lib11, lib12, lib13, lib14 - - import sys - - from __future__ import absolute_import - - from third_party import lib3 - - print("yo") - -After isort: - -.. code-block:: python - - from __future__ import absolute_import - - import os - import sys - - from third_party import (lib1, lib2, lib3, lib4, lib5, lib6, lib7, lib8, - lib9, lib10, lib11, lib12, lib13, lib14, lib15) - - from my_lib import Object, Object2, Object3 - - print("Hey") - print("yo") - -Installing isort -================ - -Installing isort is as simple as: - -.. code-block:: bash - - pip install isort - -Install isort with requirements.txt support: - -.. code-block:: bash - - pip install isort[requirements] - -Install isort with Pipfile support: - -.. code-block:: bash - - pip install isort[pipfile] - -Install isort with both formats support: - -.. code-block:: bash - - pip install isort[requirements,pipfile] - -Using isort -=========== - -**From the command line**: - -.. code-block:: bash - - isort mypythonfile.py mypythonfile2.py - -or recursively: - -.. code-block:: bash - - isort -rc . - -*which is equivalent to:* - -.. code-block:: bash - - isort **/*.py - -or to see the proposed changes without applying them: - -.. code-block:: bash - - isort mypythonfile.py --diff - -Finally, to atomically run isort against a project, only applying changes if they don't introduce syntax errors do: - -.. code-block:: bash - - isort -rc --atomic . - -(Note: this is disabled by default as it keeps isort from being able to run against code written using a different version of Python) - -**From within Python**: - -.. code-block:: bash - - from isort import SortImports - - SortImports("pythonfile.py") - -or: - -.. code-block:: bash - - from isort import SortImports - - new_contents = SortImports(file_contents=old_contents).output - -**From within Kate:** - -.. code-block:: bash - - ctrl+[ - -or: - -.. code-block:: bash - - menu > Python > Sort Imports - -Installing isort's Kate plugin -============================== - -For KDE 4.13+ / Pate 2.0+: - -.. code-block:: bash - - wget https://raw.github.com/timothycrosley/isort/master/kate_plugin/isort_plugin.py --output-document ~/.kde/share/apps/kate/pate/isort_plugin.py - wget https://raw.github.com/timothycrosley/isort/master/kate_plugin/isort_plugin_ui.rc --output-document ~/.kde/share/apps/kate/pate/isort_plugin_ui.rc - wget https://raw.github.com/timothycrosley/isort/master/kate_plugin/katepart_isort.desktop --output-document ~/.kde/share/kde4/services/katepart_isort.desktop - -For all older versions: - -.. code-block:: bash - - wget https://raw.github.com/timothycrosley/isort/master/kate_plugin/isort_plugin_old.py --output-document ~/.kde/share/apps/kate/pate/isort_plugin.py - -You will then need to restart kate and enable Python Plugins as well as the isort plugin itself. - -Installing isort's for your preferred text editor -================================================= - -Several plugins have been written that enable to use isort from within a variety of text-editors. -You can find a full list of them `on the isort wiki `_. -Additionally, I will enthusiastically accept pull requests that include plugins for other text editors -and add documentation for them as I am notified. - -How does isort work? -==================== - -isort parses specified files for global level import lines (imports outside of try / except blocks, functions, etc..) -and puts them all at the top of the file grouped together by the type of import: - -- Future -- Python Standard Library -- Third Party -- Current Python Project -- Explicitly Local (. before import, as in: ``from . import x``) -- Custom Separate Sections (Defined by forced_separate list in configuration file) -- Custom Sections (Defined by sections list in configuration file) - -Inside of each section the imports are sorted alphabetically. isort automatically removes duplicate python imports, -and wraps long from imports to the specified line length (defaults to 79). - -When will isort not work? -========================= - -If you ever have the situation where you need to have a try / except block in the middle of top-level imports or if -your import order is directly linked to precedence. - -For example: a common practice in Django settings files is importing * from various settings files to form -a new settings file. In this case if any of the imports change order you are changing the settings definition itself. - -However, you can configure isort to skip over just these files - or even to force certain imports to the top. - -Configuring isort -================= - -If you find the default isort settings do not work well for your project, isort provides several ways to adjust -the behavior. - -To configure isort for a single user create a ``~/.isort.cfg`` or ``$XDG_CONFIG_HOME/isort.cfg`` file: - -.. code-block:: ini - - [settings] - line_length=120 - force_to_top=file1.py,file2.py - skip=file3.py,file4.py - known_future_library=future,pies - known_standard_library=std,std2 - known_third_party=randomthirdparty - known_first_party=mylib1,mylib2 - indent=' ' - multi_line_output=3 - length_sort=1 - forced_separate=django.contrib,django.utils - default_section=FIRSTPARTY - no_lines_before=LOCALFOLDER - -Additionally, you can specify project level configuration simply by placing a ``.isort.cfg`` file at the root of your -project. isort will look up to 25 directories up, from the file it is ran against, to find a project specific configuration. - -Or, if you prefer, you can add an ``isort`` or ``tool:isort`` section to your project's ``setup.cfg`` or ``tox.ini`` file with any desired settings. - -You can also add your desired settings under a ``[tool.isort]`` section in your ``pyproject.toml`` file. - -You can then override any of these settings by using command line arguments, or by passing in override values to the -SortImports class. - -Finally, as of version 3.0 isort supports editorconfig files using the standard syntax defined here: -https://editorconfig.org/ - -Meaning you place any standard isort configuration parameters within a .editorconfig file under the ``*.py`` section -and they will be honored. - -For a full list of isort settings and their meanings `take a look at the isort wiki `_. - -Multi line output modes -======================= - -You will notice above the "multi_line_output" setting. This setting defines how from imports wrap when they extend -past the line_length limit and has 6 possible settings: - -**0 - Grid** - -.. code-block:: python - - from third_party import (lib1, lib2, lib3, - lib4, lib5, ...) - -**1 - Vertical** - -.. code-block:: python - - from third_party import (lib1, - lib2, - lib3 - lib4, - lib5, - ...) - -**2 - Hanging Indent** - -.. code-block:: python - - from third_party import \ - lib1, lib2, lib3, \ - lib4, lib5, lib6 - -**3 - Vertical Hanging Indent** - -.. code-block:: python - - from third_party import ( - lib1, - lib2, - lib3, - lib4, - ) - -**4 - Hanging Grid** - -.. code-block:: python - - from third_party import ( - lib1, lib2, lib3, lib4, - lib5, ...) - -**5 - Hanging Grid Grouped** - -.. code-block:: python - - from third_party import ( - lib1, lib2, lib3, lib4, - lib5, ... - ) - -**6 - Hanging Grid Grouped, No Trailing Comma** - -In Mode 5 isort leaves a single extra space to maintain consistency of output when a comma is added at the end. -Mode 6 is the same - except that no extra space is maintained leading to the possibility of lines one character longer. -You can enforce a trailing comma by using this in conjunction with `-tc` or `trailing_comma: True`. - -.. code-block:: python - - from third_party import ( - lib1, lib2, lib3, lib4, - lib5 - ) - -**7 - NOQA** - -.. code-block:: python - - from third_party import lib1, lib2, lib3, ... # NOQA - -Alternatively, you can set ``force_single_line`` to ``True`` (``-sl`` on the command line) and every import will appear on its -own line: - -.. code-block:: python - - from third_party import lib1 - from third_party import lib2 - from third_party import lib3 - ... - -Note: to change the how constant indents appear - simply change the indent property with the following accepted formats: - -* Number of spaces you would like. For example: 4 would cause standard 4 space indentation. -* Tab -* A verbatim string with quotes around it. - -For example: - -.. code-block:: python - - " " - -is equivalent to 4. - -For the import styles that use parentheses, you can control whether or not to -include a trailing comma after the last import with the ``include_trailing_comma`` -option (defaults to ``False``). - -Intelligently Balanced Multi-line Imports -========================================= - -As of isort 3.1.0 support for balanced multi-line imports has been added. -With this enabled isort will dynamically change the import length to the one that produces the most balanced grid, -while staying below the maximum import length defined. - -Example: - -.. code-block:: python - - from __future__ import (absolute_import, division, - print_function, unicode_literals) - -Will be produced instead of: - -.. code-block:: python - - from __future__ import (absolute_import, division, print_function, - unicode_literals) - -To enable this set ``balanced_wrapping`` to ``True`` in your config or pass the ``-e`` option into the command line utility. - -Custom Sections and Ordering -============================ - -You can change the section order with ``sections`` option from the default of: - -.. code-block:: ini - - FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER - -to your preference: - -.. code-block:: ini - - sections=FUTURE,STDLIB,FIRSTPARTY,THIRDPARTY,LOCALFOLDER - -You also can define your own sections and their order. - -Example: - -.. code-block:: ini - - known_django=django - known_pandas=pandas,numpy - sections=FUTURE,STDLIB,DJANGO,THIRDPARTY,PANDAS,FIRSTPARTY,LOCALFOLDER - -would create two new sections with the specified known modules. - -The ``no_lines_before`` option will prevent the listed sections from being split from the previous section by an empty line. - -Example: - -.. code-block:: ini - - sections=FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER - no_lines_before=LOCALFOLDER - -would produce a section with both FIRSTPARTY and LOCALFOLDER modules combined. - -Auto-comment import sections -============================ - -Some projects prefer to have import sections uniquely titled to aid in identifying the sections quickly -when visually scanning. isort can automate this as well. To do this simply set the ``import_heading_{section_name}`` -setting for each section you wish to have auto commented - to the desired comment. - -For Example: - -.. code-block:: ini - - import_heading_stdlib=Standard Library - import_heading_firstparty=My Stuff - -Would lead to output looking like the following: - -.. code-block:: python - - # Standard Library - import os - import sys - - import django.settings - - # My Stuff - import myproject.test - -Ordering by import length -========================= - -isort also makes it easy to sort your imports by length, simply by setting the ``length_sort`` option to ``True``. -This will result in the following output style: - -.. code-block:: python - - from evn.util import ( - Pool, - Dict, - Options, - Constant, - DecayDict, - UnexpectedCodePath, - ) - -It is also possible to opt-in to sorting imports by length for only specific -sections by using ``length_sort_`` followed by the section name as a -configuration item, e.g.:: - - length_sort_stdlib=1 - -Skip processing of imports (outside of configuration) -===================================================== - -To make isort ignore a single import simply add a comment at the end of the import line containing the text ``isort:skip``: - -.. code-block:: python - - import module # isort:skip - -or: - -.. code-block:: python - - from xyz import (abc, # isort:skip - yo, - hey) - -To make isort skip an entire file simply add ``isort:skip_file`` to the module's doc string: - -.. code-block:: python - - """ my_module.py - Best module ever - - isort:skip_file - """ - - import b - import a - -Adding an import to multiple files -================================== - -isort makes it easy to add an import statement across multiple files, while being assured it's correctly placed. - -From the command line: - -.. code-block:: bash - - isort -a "from __future__ import print_function" *.py - -from within Kate: - -.. code-block:: - - ctrl+] - -or: - -.. code-block:: - - menu > Python > Add Import - -Removing an import from multiple files -====================================== - -isort also makes it easy to remove an import from multiple files, without having to be concerned with how it was originally -formatted. - -From the command line: - -.. code-block:: bash - - isort -rm "os.system" *.py - -from within Kate: - -.. code-block:: - - ctrl+shift+] - -or: - -.. code-block:: - - menu > Python > Remove Import - -Using isort to verify code -========================== - -The ``--check-only`` option ---------------------------- - -isort can also be used to used to verify that code is correctly formatted by running it with ``-c``. -Any files that contain incorrectly sorted and/or formatted imports will be outputted to ``stderr``. - -.. code-block:: bash - - isort **/*.py -c -vb - - SUCCESS: /home/timothy/Projects/Open_Source/isort/isort_kate_plugin.py Everything Looks Good! - ERROR: /home/timothy/Projects/Open_Source/isort/isort/isort.py Imports are incorrectly sorted. - -One great place this can be used is with a pre-commit git hook, such as this one by @acdha: - -https://gist.github.com/acdha/8717683 - -This can help to ensure a certain level of code quality throughout a project. - - -Git hook --------- - -isort provides a hook function that can be integrated into your Git pre-commit script to check -Python code before committing. - -To cause the commit to fail if there are isort errors (strict mode), include the following in -``.git/hooks/pre-commit``: - -.. code-block:: python - - #!/usr/bin/env python - import sys - from isort.hooks import git_hook - - sys.exit(git_hook(strict=True, modify=True)) - -If you just want to display warnings, but allow the commit to happen anyway, call ``git_hook`` without -the `strict` parameter. If you want to display warnings, but not also fix the code, call ``git_hook`` without -the `modify` parameter. - -Setuptools integration ----------------------- - -Upon installation, isort enables a ``setuptools`` command that checks Python files -declared by your project. - -Running ``python setup.py isort`` on the command line will check the files -listed in your ``py_modules`` and ``packages``. If any warning is found, -the command will exit with an error code: - -.. code-block:: bash - - $ python setup.py isort - -Also, to allow users to be able to use the command without having to install -isort themselves, add isort to the setup_requires of your ``setup()`` like so: - -.. code-block:: python - - setup( - name="project", - packages=["project"], - - setup_requires=[ - "isort" - ] - ) - - -Why isort? -========== - -isort simply stands for import sort. It was originally called "sortImports" however I got tired of typing the extra -characters and came to the realization camelCase is not pythonic. - -I wrote isort because in an organization I used to work in the manager came in one day and decided all code must -have alphabetically sorted imports. The code base was huge - and he meant for us to do it by hand. However, being a -programmer - I'm too lazy to spend 8 hours mindlessly performing a function, but not too lazy to spend 16 -hours automating it. I was given permission to open source sortImports and here we are :) - --------------------------------------------- - -Thanks and I hope you find isort useful! - -~Timothy Crosley - - diff --git a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/RECORD b/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/RECORD deleted file mode 100644 index 67adf5fd..00000000 --- a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/RECORD +++ /dev/null @@ -1,30 +0,0 @@ -../../../bin/isort,sha256=sa07W3U-_T9YemBDnQCMWEt_FIn2e9RyBMiatFf6nrU,244 -isort-4.3.21.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -isort-4.3.21.dist-info/LICENSE,sha256=BjKUABw9Uj26y6ud1UrCKZgnVsyvWSylMkCysM3YIGU,1089 -isort-4.3.21.dist-info/METADATA,sha256=8fY1DuLOn_UnCH58A8AcsCUZpYWeLCsQF-n-GIXlxOM,19749 -isort-4.3.21.dist-info/RECORD,, -isort-4.3.21.dist-info/WHEEL,sha256=h_aVn5OB2IERUjMbi2pucmR_zzWJtk303YXvhh60NJ8,110 -isort-4.3.21.dist-info/entry_points.txt,sha256=2M99eSFpnteDm3ekW8jya2a3A0-vFntKdT1fP93Tyks,148 -isort-4.3.21.dist-info/top_level.txt,sha256=mrBLoVpJnQWBbnMOSdzkjN1E9Z-e3Zd-MRlo88bstUk,6 -isort/__init__.py,sha256=_DTTMASePJCqsKnRJPf_YgFv5ZJOHg1uPLkrQHcqA2c,1380 -isort/__main__.py,sha256=9tThPqyOnY86bHaxJ0WciTd-rfKN3O-PQoNGBO2xhio,205 -isort/__pycache__/__init__.cpython-38.pyc,, -isort/__pycache__/__main__.cpython-38.pyc,, -isort/__pycache__/finders.cpython-38.pyc,, -isort/__pycache__/hooks.cpython-38.pyc,, -isort/__pycache__/isort.cpython-38.pyc,, -isort/__pycache__/main.cpython-38.pyc,, -isort/__pycache__/natural.cpython-38.pyc,, -isort/__pycache__/pie_slice.cpython-38.pyc,, -isort/__pycache__/pylama_isort.cpython-38.pyc,, -isort/__pycache__/settings.cpython-38.pyc,, -isort/__pycache__/utils.cpython-38.pyc,, -isort/finders.py,sha256=0w39ygCFuOv40OHixflrOv_Xna8K78usa5ySwS9GkWE,13185 -isort/hooks.py,sha256=GcyPMF7raq3B1z4ubbzIWoMiY5DePDni0Nct5U87uMQ,3230 -isort/isort.py,sha256=krLW0QgwnVjUD3hYTpQmWkMa5TDEZxx6AbX80vlVNoA,53910 -isort/main.py,sha256=rS7dAu_0T-8Jxi3sDWu00IOjt4j0r3vJi6bXZn6RnQg,21974 -isort/natural.py,sha256=hlcWsGKfIUC4Atjp5IIqDCmg1madY6ave9oNiTGjJ0Q,1794 -isort/pie_slice.py,sha256=hO6l1XocvGAd8XTR8526r-G7XIncUQB53_DHQ4AZEYI,5612 -isort/pylama_isort.py,sha256=wF6NOEVuibme0l-5pH9pCW1j4vGaFamuwll494TnzDI,785 -isort/settings.py,sha256=4_Jf-9GaBy9fi6UJctLqesIAMAegWekRIJdJmH5TBNE,17452 -isort/utils.py,sha256=KtazEoeX9XmtcrUGP6xl5lBX7Ye2N08ACGaWxiGcIaE,1344 diff --git a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/WHEEL b/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/WHEEL deleted file mode 100644 index 78e6f69d..00000000 --- a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.33.4) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/entry_points.txt b/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/entry_points.txt deleted file mode 100644 index 3a77a18c..00000000 --- a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/entry_points.txt +++ /dev/null @@ -1,9 +0,0 @@ -[console_scripts] -isort = isort.main:main - -[distutils.commands] -isort = isort.main:ISortCommand - -[pylama.linter] -isort = isort.pylama_isort:Linter - diff --git a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/top_level.txt b/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/top_level.txt deleted file mode 100644 index 2a792435..00000000 --- a/venv/lib/python3.8/site-packages/isort-4.3.21.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -isort diff --git a/venv/lib/python3.8/site-packages/isort/__init__.py b/venv/lib/python3.8/site-packages/isort/__init__.py deleted file mode 100644 index 9a0a0731..00000000 --- a/venv/lib/python3.8/site-packages/isort/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -"""__init__.py. - -Defines the isort module to include the SortImports utility class as well as any defined settings. - -Copyright (C) 2013 Timothy Edmund Crosley - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" - -from __future__ import absolute_import, division, print_function, unicode_literals - -from . import settings # noqa: F401 -from .isort import SortImports # noqa: F401 - -__version__ = "4.3.21" diff --git a/venv/lib/python3.8/site-packages/isort/__main__.py b/venv/lib/python3.8/site-packages/isort/__main__.py deleted file mode 100644 index 91cc154d..00000000 --- a/venv/lib/python3.8/site-packages/isort/__main__.py +++ /dev/null @@ -1,9 +0,0 @@ -from __future__ import absolute_import - -from isort.pie_slice import apply_changes_to_python_environment - -apply_changes_to_python_environment() - -from isort.main import main # noqa: E402 isort:skip - -main() diff --git a/venv/lib/python3.8/site-packages/isort/finders.py b/venv/lib/python3.8/site-packages/isort/finders.py deleted file mode 100644 index 225bd121..00000000 --- a/venv/lib/python3.8/site-packages/isort/finders.py +++ /dev/null @@ -1,382 +0,0 @@ -"""Finders try to find right section for passed module name -""" -from __future__ import absolute_import, division, print_function, unicode_literals - -import inspect -import os -import os.path -import re -import sys -import sysconfig -from fnmatch import fnmatch -from glob import glob - -from .pie_slice import PY2 -from .utils import chdir, exists_case_sensitive - -try: - from pipreqs import pipreqs -except ImportError: - pipreqs = None - -try: - from pip_api import parse_requirements -except ImportError: - parse_requirements = None - -try: - from requirementslib import Pipfile -except ImportError: - Pipfile = None - -try: - from functools import lru_cache -except ImportError: - from backports.functools_lru_cache import lru_cache - - -KNOWN_SECTION_MAPPING = { - 'STDLIB': 'STANDARD_LIBRARY', - 'FUTURE': 'FUTURE_LIBRARY', - 'FIRSTPARTY': 'FIRST_PARTY', - 'THIRDPARTY': 'THIRD_PARTY', -} - - -class BaseFinder(object): - def __init__(self, config, sections): - self.config = config - self.sections = sections - - -class ForcedSeparateFinder(BaseFinder): - def find(self, module_name): - for forced_separate in self.config['forced_separate']: - # Ensure all forced_separate patterns will match to end of string - path_glob = forced_separate - if not forced_separate.endswith('*'): - path_glob = '%s*' % forced_separate - - if fnmatch(module_name, path_glob) or fnmatch(module_name, '.' + path_glob): - return forced_separate - - -class LocalFinder(BaseFinder): - def find(self, module_name): - if module_name.startswith("."): - return self.sections.LOCALFOLDER - - -class KnownPatternFinder(BaseFinder): - def __init__(self, config, sections): - super(KnownPatternFinder, self).__init__(config, sections) - - self.known_patterns = [] - for placement in reversed(self.sections): - known_placement = KNOWN_SECTION_MAPPING.get(placement, placement) - config_key = 'known_{0}'.format(known_placement.lower()) - known_patterns = self.config.get(config_key, []) - known_patterns = [ - pattern - for known_pattern in known_patterns - for pattern in self._parse_known_pattern(known_pattern) - ] - for known_pattern in known_patterns: - regexp = '^' + known_pattern.replace('*', '.*').replace('?', '.?') + '$' - self.known_patterns.append((re.compile(regexp), placement)) - - @staticmethod - def _is_package(path): - """ - Evaluates if path is a python package - """ - if PY2: - return os.path.exists(os.path.join(path, '__init__.py')) - else: - return os.path.isdir(path) - - def _parse_known_pattern(self, pattern): - """ - Expand pattern if identified as a directory and return found sub packages - """ - if pattern.endswith(os.path.sep): - patterns = [ - filename - for filename in os.listdir(pattern) - if self._is_package(os.path.join(pattern, filename)) - ] - else: - patterns = [pattern] - - return patterns - - def find(self, module_name): - # Try to find most specific placement instruction match (if any) - parts = module_name.split('.') - module_names_to_check = ('.'.join(parts[:first_k]) for first_k in range(len(parts), 0, -1)) - for module_name_to_check in module_names_to_check: - for pattern, placement in self.known_patterns: - if pattern.match(module_name_to_check): - return placement - - -class PathFinder(BaseFinder): - def __init__(self, config, sections): - super(PathFinder, self).__init__(config, sections) - - # restore the original import path (i.e. not the path to bin/isort) - self.paths = [os.getcwd()] - - # virtual env - self.virtual_env = self.config.get('virtual_env') or os.environ.get('VIRTUAL_ENV') - if self.virtual_env: - self.virtual_env = os.path.realpath(self.virtual_env) - self.virtual_env_src = False - if self.virtual_env: - self.virtual_env_src = '{0}/src/'.format(self.virtual_env) - for path in glob('{0}/lib/python*/site-packages'.format(self.virtual_env)): - if path not in self.paths: - self.paths.append(path) - for path in glob('{0}/lib/python*/*/site-packages'.format(self.virtual_env)): - if path not in self.paths: - self.paths.append(path) - for path in glob('{0}/src/*'.format(self.virtual_env)): - if os.path.isdir(path): - self.paths.append(path) - - # conda - self.conda_env = self.config.get('conda_env') or os.environ.get('CONDA_PREFIX') - if self.conda_env: - self.conda_env = os.path.realpath(self.conda_env) - for path in glob('{0}/lib/python*/site-packages'.format(self.conda_env)): - if path not in self.paths: - self.paths.append(path) - for path in glob('{0}/lib/python*/*/site-packages'.format(self.conda_env)): - if path not in self.paths: - self.paths.append(path) - - # handle case-insensitive paths on windows - self.stdlib_lib_prefix = os.path.normcase(sysconfig.get_paths()['stdlib']) - if self.stdlib_lib_prefix not in self.paths: - self.paths.append(self.stdlib_lib_prefix) - - # handle compiled libraries - self.ext_suffix = sysconfig.get_config_var("EXT_SUFFIX") or ".so" - - # add system paths - for path in sys.path[1:]: - if path not in self.paths: - self.paths.append(path) - - def find(self, module_name): - for prefix in self.paths: - package_path = "/".join((prefix, module_name.split(".")[0])) - is_module = (exists_case_sensitive(package_path + ".py") or - exists_case_sensitive(package_path + ".so") or - exists_case_sensitive(package_path + self.ext_suffix) or - exists_case_sensitive(package_path + "/__init__.py")) - is_package = exists_case_sensitive(package_path) and os.path.isdir(package_path) - if is_module or is_package: - if 'site-packages' in prefix: - return self.sections.THIRDPARTY - if 'dist-packages' in prefix: - return self.sections.THIRDPARTY - if self.virtual_env and self.virtual_env_src in prefix: - return self.sections.THIRDPARTY - if self.conda_env and self.conda_env in prefix: - return self.sections.THIRDPARTY - if os.path.normcase(prefix).startswith(self.stdlib_lib_prefix): - return self.sections.STDLIB - return self.config['default_section'] - - -class ReqsBaseFinder(BaseFinder): - def __init__(self, config, sections, path='.'): - super(ReqsBaseFinder, self).__init__(config, sections) - self.path = path - if self.enabled: - self.mapping = self._load_mapping() - self.names = self._load_names() - - @staticmethod - def _load_mapping(): - """Return list of mappings `package_name -> module_name` - - Example: - django-haystack -> haystack - """ - if not pipreqs: - return - path = os.path.dirname(inspect.getfile(pipreqs)) - path = os.path.join(path, 'mapping') - with open(path) as f: - # pypi_name: import_name - return dict(line.strip().split(":")[::-1] for line in f) - - def _load_names(self): - """Return list of thirdparty modules from requirements - """ - names = [] - for path in self._get_files(): - for name in self._get_names(path): - names.append(self._normalize_name(name)) - return names - - @staticmethod - def _get_parents(path): - prev = '' - while path != prev: - prev = path - yield path - path = os.path.dirname(path) - - def _get_files(self): - """Return paths to all requirements files - """ - path = os.path.abspath(self.path) - if os.path.isfile(path): - path = os.path.dirname(path) - - for path in self._get_parents(path): - for file_path in self._get_files_from_dir(path): - yield file_path - - def _normalize_name(self, name): - """Convert package name to module name - - Examples: - Django -> django - django-haystack -> haystack - Flask-RESTFul -> flask_restful - """ - if self.mapping: - name = self.mapping.get(name, name) - return name.lower().replace('-', '_') - - def find(self, module_name): - # required lib not installed yet - if not self.enabled: - return - - module_name, _sep, _submodules = module_name.partition('.') - module_name = module_name.lower() - if not module_name: - return - - for name in self.names: - if module_name == name: - return self.sections.THIRDPARTY - - -class RequirementsFinder(ReqsBaseFinder): - exts = ('.txt', '.in') - enabled = bool(parse_requirements) - - def _get_files_from_dir(self, path): - """Return paths to requirements files from passed dir. - """ - return RequirementsFinder._get_files_from_dir_cached(path) - - @classmethod - @lru_cache(maxsize=16) - def _get_files_from_dir_cached(cls, path): - result = [] - - for fname in os.listdir(path): - if 'requirements' not in fname: - continue - full_path = os.path.join(path, fname) - - # *requirements*/*.{txt,in} - if os.path.isdir(full_path): - for subfile_name in os.listdir(path): - for ext in cls.exts: - if subfile_name.endswith(ext): - result.append(os.path.join(path, subfile_name)) - continue - - # *requirements*.{txt,in} - if os.path.isfile(full_path): - for ext in cls.exts: - if fname.endswith(ext): - result.append(full_path) - break - - return result - - def _get_names(self, path): - """Load required packages from path to requirements file - """ - return RequirementsFinder._get_names_cached(path) - - @classmethod - @lru_cache(maxsize=16) - def _get_names_cached(cls, path): - results = [] - - with chdir(os.path.dirname(path)): - requirements = parse_requirements(path) - for req in requirements.values(): - if req.name: - results.append(req.name) - - return results - - -class PipfileFinder(ReqsBaseFinder): - enabled = bool(Pipfile) - - def _get_names(self, path): - with chdir(path): - project = Pipfile.load(path) - for req in project.packages: - yield req.name - - def _get_files_from_dir(self, path): - if 'Pipfile' in os.listdir(path): - yield path - - -class DefaultFinder(BaseFinder): - def find(self, module_name): - return self.config['default_section'] - - -class FindersManager(object): - finders = ( - ForcedSeparateFinder, - LocalFinder, - KnownPatternFinder, - PathFinder, - PipfileFinder, - RequirementsFinder, - DefaultFinder, - ) - - def __init__(self, config, sections, finders=None): - self.verbose = config.get('verbose', False) - - finders = self.finders if finders is None else finders - self.finders = [] - for finder in finders: - try: - self.finders.append(finder(config, sections)) - except Exception as exception: - # if one finder fails to instantiate isort can continue using the rest - if self.verbose: - print('{} encountered an error ({}) during instantiation and cannot be used'.format(finder.__name__, - str(exception))) - self.finders = tuple(self.finders) - - def find(self, module_name): - for finder in self.finders: - try: - section = finder.find(module_name) - except Exception as exception: - # isort has to be able to keep trying to identify the correct import section even if one approach fails - if self.verbose: - print('{} encountered an error ({}) while trying to identify the {} module'.format(finder.__name__, - str(exception), - module_name)) - if section is not None: - return section diff --git a/venv/lib/python3.8/site-packages/isort/hooks.py b/venv/lib/python3.8/site-packages/isort/hooks.py deleted file mode 100644 index 16a16e1f..00000000 --- a/venv/lib/python3.8/site-packages/isort/hooks.py +++ /dev/null @@ -1,91 +0,0 @@ -"""isort.py. - -Defines a git hook to allow pre-commit warnings and errors about import order. - -usage: - exit_code = git_hook(strict=True|False, modify=True|False) - -Copyright (C) 2015 Helen Sherwood-Taylor - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" -import subprocess - -from isort import SortImports - - -def get_output(command): - """ - Run a command and return raw output - - :param str command: the command to run - :returns: the stdout output of the command - """ - return subprocess.check_output(command.split()) - - -def get_lines(command): - """ - Run a command and return lines of output - - :param str command: the command to run - :returns: list of whitespace-stripped lines output by command - """ - stdout = get_output(command) - return [line.strip().decode('utf-8') for line in stdout.splitlines()] - - -def git_hook(strict=False, modify=False): - """ - Git pre-commit hook to check staged files for isort errors - - :param bool strict - if True, return number of errors on exit, - causing the hook to fail. If False, return zero so it will - just act as a warning. - :param bool modify - if True, fix the sources if they are not - sorted properly. If False, only report result without - modifying anything. - - :return number of errors if in strict mode, 0 otherwise. - """ - - # Get list of files modified and staged - diff_cmd = "git diff-index --cached --name-only --diff-filter=ACMRTUXB HEAD" - files_modified = get_lines(diff_cmd) - - errors = 0 - for filename in files_modified: - if filename.endswith('.py'): - # Get the staged contents of the file - staged_cmd = "git show :%s" % filename - staged_contents = get_output(staged_cmd) - - sort = SortImports( - file_path=filename, - file_contents=staged_contents.decode(), - check=True - ) - - if sort.incorrectly_sorted: - errors += 1 - if modify: - SortImports( - file_path=filename, - file_contents=staged_contents.decode(), - check=False, - ) - - return errors if strict else 0 diff --git a/venv/lib/python3.8/site-packages/isort/isort.py b/venv/lib/python3.8/site-packages/isort/isort.py deleted file mode 100644 index 245e53f1..00000000 --- a/venv/lib/python3.8/site-packages/isort/isort.py +++ /dev/null @@ -1,1060 +0,0 @@ -"""isort.py. - -Exposes a simple library to sort through imports within Python code - -usage: - SortImports(file_name) -or: - sorted = SortImports(file_contents=file_contents).output - -Copyright (C) 2013 Timothy Edmund Crosley - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" -from __future__ import absolute_import, division, print_function, unicode_literals - -import codecs -import copy -import io -import itertools -import os -import re -import sys -from collections import OrderedDict, namedtuple -from datetime import datetime -from difflib import unified_diff - -from . import settings -from .finders import FindersManager -from .natural import nsorted -from .pie_slice import input - - -class SortImports(object): - incorrectly_sorted = False - skipped = False - - def __init__(self, file_path=None, file_contents=None, file_=None, write_to_stdout=False, check=False, - show_diff=False, settings_path=None, ask_to_apply=False, run_path='', check_skip=True, - extension=None, **setting_overrides): - if not settings_path and file_path: - settings_path = os.path.dirname(os.path.abspath(file_path)) - settings_path = settings_path or os.getcwd() - - self.config = settings.from_path(settings_path).copy() - for key, value in setting_overrides.items(): - access_key = key.replace('not_', '').lower() - # The sections config needs to retain order and can't be converted to a set. - if access_key != 'sections' and type(self.config.get(access_key)) in (list, tuple): - if key.startswith('not_'): - self.config[access_key] = list(set(self.config[access_key]).difference(value)) - else: - self.config[access_key] = list(set(self.config[access_key]).union(value)) - else: - self.config[key] = value - - if self.config['force_alphabetical_sort']: - self.config.update({'force_alphabetical_sort_within_sections': True, - 'no_sections': True, - 'lines_between_types': 1, - 'from_first': True}) - - indent = str(self.config['indent']) - if indent.isdigit(): - indent = " " * int(indent) - else: - indent = indent.strip("'").strip('"') - if indent.lower() == "tab": - indent = "\t" - self.config['indent'] = indent - - self.config['comment_prefix'] = self.config['comment_prefix'].strip("'").strip('"') - - self.place_imports = {} - self.import_placements = {} - self.remove_imports = [self._format_simplified(removal) for removal in self.config['remove_imports']] - self.add_imports = [self._format_natural(addition) for addition in self.config['add_imports']] - self._section_comments = ["# " + value for key, value in self.config.items() if - key.startswith('import_heading') and value] - - self.file_encoding = 'utf-8' - file_name = file_path - self.file_path = file_path or "" - if file_path: - file_path = os.path.abspath(file_path) - if check_skip: - if run_path and file_path.startswith(run_path): - file_name = file_path.replace(run_path, '', 1) - else: - file_name = file_path - run_path = '' - - if settings.should_skip(file_name, self.config, run_path): - self.skipped = True - if self.config['verbose']: - print("WARNING: {0} was skipped as it's listed in 'skip' setting" - " or matches a glob in 'skip_glob' setting".format(file_path)) - file_contents = None - if not self.skipped and not file_contents: - with io.open(file_path, 'rb') as f: - file_encoding = coding_check(f) - with io.open(file_path, encoding=file_encoding, newline='') as file_to_import_sort: - try: - file_contents = file_to_import_sort.read() - self.file_path = file_path - self.file_encoding = file_encoding - encoding_success = True - except UnicodeDecodeError: - encoding_success = False - - if not encoding_success: - with io.open(file_path, newline='') as file_to_import_sort: - try: - file_contents = file_to_import_sort.read() - self.file_path = file_path - self.file_encoding = file_to_import_sort.encoding - except UnicodeDecodeError: - encoding_success = False - file_contents = None - self.skipped = True - if self.config['verbose']: - print("WARNING: {} was skipped as it couldn't be opened with the given " - "{} encoding or {} fallback encoding".format(file_path, - self.file_encoding, - file_to_import_sort.encoding)) - elif file_: - try: - file_.seek(0) - self.file_encoding = coding_check(file_) - file_.seek(0) - except (io.UnsupportedOperation, IOError): - pass - reader = codecs.getreader(self.file_encoding) - file_contents = reader(file_).read() - - # try to decode file_contents - if file_contents: - try: - basestring - # python 2 - need_decode = (str, bytes) - except NameError: - # python 3 - need_decode = bytes - - if isinstance(file_contents, need_decode): - file_contents = file_contents.decode(coding_check(file_contents.splitlines())) - - if file_contents is None or ("isort:" + "skip_file") in file_contents: - self.skipped = True - self.output = None - if write_to_stdout and file_contents: - sys.stdout.write(file_contents) - return - - if self.config['line_ending']: - self.line_separator = self.config['line_ending'] - else: - if '\r\n' in file_contents: - self.line_separator = '\r\n' - elif '\r' in file_contents: - self.line_separator = '\r' - else: - self.line_separator = '\n' - self.in_lines = file_contents.split(self.line_separator) - self.original_length = len(self.in_lines) - if (self.original_length > 1 or self.in_lines[:1] not in ([], [""])) or self.config['force_adds']: - for add_import in self.add_imports: - self.in_lines.append(add_import) - self.number_of_lines = len(self.in_lines) - - if not extension: - self.extension = file_name.split('.')[-1] if file_name else "py" - else: - self.extension = extension - - self.out_lines = [] - self.comments = {'from': {}, 'straight': {}, 'nested': {}, 'above': {'straight': {}, 'from': {}}} - self.imports = OrderedDict() - self.as_map = {} - - section_names = self.config['sections'] - self.sections = namedtuple('Sections', section_names)(*[name for name in section_names]) - for section in itertools.chain(self.sections, self.config['forced_separate']): - self.imports[section] = {'straight': OrderedDict(), 'from': OrderedDict()} - - self.finder = FindersManager(config=self.config, sections=self.sections) - - self.index = 0 - self.import_index = -1 - self._first_comment_index_start = -1 - self._first_comment_index_end = -1 - self._parse() - if self.import_index != -1: - self._add_formatted_imports() - self.length_change = len(self.out_lines) - self.original_length - while self.out_lines and self.out_lines[-1].strip() == "": - self.out_lines.pop(-1) - self.out_lines.append("") - self.output = self.line_separator.join(self.out_lines) - if self.config['atomic']: - try: - compile(self._strip_top_comments(self.out_lines, self.line_separator), self.file_path, 'exec', 0, 1) - except SyntaxError: - self.output = file_contents - self.incorrectly_sorted = True - try: - compile(self._strip_top_comments(self.in_lines, self.line_separator), self.file_path, 'exec', 0, 1) - print("ERROR: {0} isort would have introduced syntax errors, please report to the project!". - format(self.file_path)) - except SyntaxError: - print("ERROR: {0} File contains syntax errors.".format(self.file_path)) - - return - if check: - check_output = self.output - check_against = file_contents - if self.config['ignore_whitespace']: - check_output = check_output.replace(self.line_separator, "").replace(" ", "").replace("\x0c", "") - check_against = check_against.replace(self.line_separator, "").replace(" ", "").replace("\x0c", "") - - if check_output.strip() == check_against.strip(): - if self.config['verbose']: - print("SUCCESS: {0} Everything Looks Good!".format(self.file_path)) - return - - print("ERROR: {0} Imports are incorrectly sorted.".format(self.file_path)) - self.incorrectly_sorted = True - if show_diff or self.config['show_diff']: - self._show_diff(file_contents) - elif write_to_stdout: - if sys.version_info[0] < 3: - self.output = self.output.encode(self.file_encoding) - sys.stdout.write(self.output) - elif file_name and not check: - if self.output == file_contents: - return - - if ask_to_apply: - self._show_diff(file_contents) - answer = None - while answer not in ('yes', 'y', 'no', 'n', 'quit', 'q'): - answer = input("Apply suggested changes to '{0}' [y/n/q]? ".format(self.file_path)).lower() - if answer in ('no', 'n'): - return - if answer in ('quit', 'q'): - sys.exit(1) - with io.open(self.file_path, encoding=self.file_encoding, mode='w', newline='') as output_file: - if not self.config['quiet']: - print("Fixing {0}".format(self.file_path)) - output_file.write(self.output) - - @property - def correctly_sorted(self): - return not self.incorrectly_sorted - - def _show_diff(self, file_contents): - for line in unified_diff( - file_contents.splitlines(1), - self.output.splitlines(1), - fromfile=self.file_path + ':before', - tofile=self.file_path + ':after', - fromfiledate=str(datetime.fromtimestamp(os.path.getmtime(self.file_path)) - if self.file_path else datetime.now()), - tofiledate=str(datetime.now()) - ): - sys.stdout.write(line) - - @staticmethod - def _strip_top_comments(lines, line_separator): - """Strips # comments that exist at the top of the given lines""" - lines = copy.copy(lines) - while lines and lines[0].startswith("#"): - lines = lines[1:] - return line_separator.join(lines) - - def place_module(self, module_name): - """Tries to determine if a module is a python std import, third party import, or project code: - - if it can't determine - it assumes it is project code - - """ - return self.finder.find(module_name) - - def _get_line(self): - """Returns the current line from the file while incrementing the index.""" - line = self.in_lines[self.index] - self.index += 1 - return line - - @staticmethod - def _import_type(line): - """If the current line is an import line it will return its type (from or straight)""" - if "isort:skip" in line: - return - elif line.startswith('import '): - return "straight" - elif line.startswith('from '): - return "from" - - def _at_end(self): - """returns True if we are at the end of the file.""" - return self.index == self.number_of_lines - - @staticmethod - def _module_key(module_name, config, sub_imports=False, ignore_case=False, section_name=None): - match = re.match(r'^(\.+)\s*(.*)', module_name) - if match: - sep = ' ' if config['reverse_relative'] else '_' - module_name = sep.join(match.groups()) - - prefix = "" - if ignore_case: - module_name = str(module_name).lower() - else: - module_name = str(module_name) - - if sub_imports and config['order_by_type']: - if module_name.isupper() and len(module_name) > 1: - prefix = "A" - elif module_name[0:1].isupper(): - prefix = "B" - else: - prefix = "C" - if not config['case_sensitive']: - module_name = module_name.lower() - if section_name is None or 'length_sort_' + str(section_name).lower() not in config: - length_sort = config['length_sort'] - else: - length_sort = config['length_sort_' + str(section_name).lower()] - return "{0}{1}{2}".format(module_name in config['force_to_top'] and "A" or "B", prefix, - length_sort and (str(len(module_name)) + ":" + module_name) or module_name) - - def _add_comments(self, comments, original_string=""): - """ - Returns a string with comments added if ignore_comments is not set. - """ - - if not self.config['ignore_comments']: - return comments and "{0}{1} {2}".format(self._strip_comments(original_string)[0], - self.config['comment_prefix'], - "; ".join(comments)) or original_string - - return comments and self._strip_comments(original_string)[0] - - def _wrap(self, line): - """ - Returns an import wrapped to the specified line-length, if possible. - """ - wrap_mode = self.config['multi_line_output'] - if len(line) > self.config['line_length'] and wrap_mode != settings.WrapModes.NOQA: - line_without_comment = line - comment = None - if '#' in line: - line_without_comment, comment = line.split('#', 1) - for splitter in ("import ", ".", "as "): - exp = r"\b" + re.escape(splitter) + r"\b" - if re.search(exp, line_without_comment) and not line_without_comment.strip().startswith(splitter): - line_parts = re.split(exp, line_without_comment) - if comment: - line_parts[-1] = '{0}#{1}'.format(line_parts[-1], comment) - next_line = [] - while (len(line) + 2) > (self.config['wrap_length'] or self.config['line_length']) and line_parts: - next_line.append(line_parts.pop()) - line = splitter.join(line_parts) - if not line: - line = next_line.pop() - - cont_line = self._wrap(self.config['indent'] + splitter.join(next_line).lstrip()) - if self.config['use_parentheses']: - if splitter == "as ": - output = "{0}{1}{2}".format(line, splitter, cont_line.lstrip()) - else: - output = "{0}{1}({2}{3}{4}{5})".format( - line, splitter, self.line_separator, cont_line, - "," if self.config['include_trailing_comma'] else "", - self.line_separator if wrap_mode in (settings.WrapModes.VERTICAL_HANGING_INDENT, - settings.WrapModes.VERTICAL_GRID_GROUPED) - else "") - lines = output.split(self.line_separator) - if self.config['comment_prefix'] in lines[-1] and lines[-1].endswith(')'): - line, comment = lines[-1].split(self.config['comment_prefix'], 1) - lines[-1] = line + ')' + self.config['comment_prefix'] + comment[:-1] - return self.line_separator.join(lines) - return "{0}{1}\\{2}{3}".format(line, splitter, self.line_separator, cont_line) - elif len(line) > self.config['line_length'] and wrap_mode == settings.WrapModes.NOQA: - if "# NOQA" not in line: - return "{0}{1} NOQA".format(line, self.config['comment_prefix']) - - return line - - def _add_straight_imports(self, straight_modules, section, section_output): - for module in straight_modules: - if module in self.remove_imports: - continue - - if module in self.as_map: - import_definition = '' - if self.config['keep_direct_and_as_imports']: - import_definition = "import {0}\n".format(module) - import_definition += "import {0} as {1}".format(module, self.as_map[module]) - else: - import_definition = "import {0}".format(module) - - comments_above = self.comments['above']['straight'].pop(module, None) - if comments_above: - section_output.extend(comments_above) - section_output.append(self._add_comments(self.comments['straight'].get(module), import_definition)) - - def _add_from_imports(self, from_modules, section, section_output, ignore_case): - for module in from_modules: - if module in self.remove_imports: - continue - - import_start = "from {0} import ".format(module) - from_imports = list(self.imports[section]['from'][module]) - if not self.config['no_inline_sort'] or self.config['force_single_line']: - from_imports = nsorted(from_imports, key=lambda key: self._module_key(key, self.config, True, ignore_case, section_name=section)) - if self.remove_imports: - from_imports = [line for line in from_imports if not "{0}.{1}".format(module, line) in - self.remove_imports] - - sub_modules = ['{0}.{1}'.format(module, from_import) for from_import in from_imports] - as_imports = { - from_import: "{0} as {1}".format(from_import, self.as_map[sub_module]) - for from_import, sub_module in zip(from_imports, sub_modules) - if sub_module in self.as_map - } - if self.config['combine_as_imports'] and not ("*" in from_imports and self.config['combine_star']): - for from_import in copy.copy(from_imports): - if from_import in as_imports: - from_imports[from_imports.index(from_import)] = as_imports.pop(from_import) - - while from_imports: - comments = self.comments['from'].pop(module, ()) - if "*" in from_imports and self.config['combine_star']: - import_statement = self._wrap(self._add_comments(comments, "{0}*".format(import_start))) - from_imports = None - elif self.config['force_single_line']: - import_statements = [] - while from_imports: - from_import = from_imports.pop(0) - if from_import in as_imports: - from_comments = self.comments['straight'].get('{}.{}'.format(module, from_import)) - import_statements.append(self._add_comments(from_comments, - self._wrap(import_start + as_imports[from_import]))) - continue - single_import_line = self._add_comments(comments, import_start + from_import) - comment = self.comments['nested'].get(module, {}).pop(from_import, None) - if comment: - single_import_line += "{0} {1}".format(comments and ";" or self.config['comment_prefix'], - comment) - import_statements.append(self._wrap(single_import_line)) - comments = None - import_statement = self.line_separator.join(import_statements) - else: - while from_imports and from_imports[0] in as_imports: - from_import = from_imports.pop(0) - from_comments = self.comments['straight'].get('{}.{}'.format(module, from_import)) - above_comments = self.comments['above']['from'].pop(module, None) - if above_comments: - section_output.extend(above_comments) - - section_output.append(self._add_comments(from_comments, - self._wrap(import_start + as_imports[from_import]))) - - star_import = False - if "*" in from_imports: - section_output.append(self._add_comments(comments, "{0}*".format(import_start))) - from_imports.remove('*') - star_import = True - comments = None - - for from_import in copy.copy(from_imports): - if from_import in as_imports: - continue - comment = self.comments['nested'].get(module, {}).pop(from_import, None) - if comment: - single_import_line = self._add_comments(comments, import_start + from_import) - single_import_line += "{0} {1}".format(comments and ";" or self.config['comment_prefix'], - comment) - above_comments = self.comments['above']['from'].pop(module, None) - if above_comments: - section_output.extend(above_comments) - section_output.append(self._wrap(single_import_line)) - from_imports.remove(from_import) - comments = None - - from_import_section = [] - while from_imports and from_imports[0] not in as_imports: - from_import_section.append(from_imports.pop(0)) - if star_import: - import_statement = import_start + (", ").join(from_import_section) - else: - import_statement = self._add_comments(comments, import_start + (", ").join(from_import_section)) - if not from_import_section: - import_statement = "" - - do_multiline_reformat = False - - force_grid_wrap = self.config['force_grid_wrap'] - if force_grid_wrap and len(from_import_section) >= force_grid_wrap: - do_multiline_reformat = True - - if len(import_statement) > self.config['line_length'] and len(from_import_section) > 1: - do_multiline_reformat = True - - # If line too long AND have imports AND we are NOT using GRID or VERTICAL wrap modes - if (len(import_statement) > self.config['line_length'] and len(from_import_section) > 0 and - self.config['multi_line_output'] not in (1, 0)): - do_multiline_reformat = True - - if do_multiline_reformat: - import_statement = self._multi_line_reformat(import_start, from_import_section, comments) - if self.config['multi_line_output'] == 0: - self.config['multi_line_output'] = 4 - try: - other_import_statement = self._multi_line_reformat(import_start, from_import_section, comments) - if (max(len(x) - for x in import_statement.split('\n')) > self.config['line_length']): - import_statement = other_import_statement - finally: - self.config['multi_line_output'] = 0 - if not do_multiline_reformat and len(import_statement) > self.config['line_length']: - import_statement = self._wrap(import_statement) - - if import_statement: - above_comments = self.comments['above']['from'].pop(module, None) - if above_comments: - section_output.extend(above_comments) - section_output.append(import_statement) - - def _multi_line_reformat(self, import_start, from_imports, comments): - output_mode = settings.WrapModes._fields[self.config['multi_line_output']].lower() - formatter = getattr(self, "_output_" + output_mode, self._output_grid) - dynamic_indent = " " * (len(import_start) + 1) - indent = self.config['indent'] - line_length = self.config['wrap_length'] or self.config['line_length'] - import_statement = formatter(import_start, copy.copy(from_imports), - dynamic_indent, indent, line_length, comments) - if self.config['balanced_wrapping']: - lines = import_statement.split(self.line_separator) - line_count = len(lines) - if len(lines) > 1: - minimum_length = min(len(line) for line in lines[:-1]) - else: - minimum_length = 0 - new_import_statement = import_statement - while (len(lines[-1]) < minimum_length and - len(lines) == line_count and line_length > 10): - import_statement = new_import_statement - line_length -= 1 - new_import_statement = formatter(import_start, copy.copy(from_imports), - dynamic_indent, indent, line_length, comments) - lines = new_import_statement.split(self.line_separator) - if import_statement.count(self.line_separator) == 0: - return self._wrap(import_statement) - return import_statement - - def _add_formatted_imports(self): - """Adds the imports back to the file. - - (at the index of the first import) sorted alphabetically and split between groups - - """ - sort_ignore_case = self.config['force_alphabetical_sort_within_sections'] - sections = itertools.chain(self.sections, self.config['forced_separate']) - - if self.config['no_sections']: - self.imports['no_sections'] = {'straight': [], 'from': {}} - for section in sections: - self.imports['no_sections']['straight'].extend(self.imports[section].get('straight', [])) - self.imports['no_sections']['from'].update(self.imports[section].get('from', {})) - sections = ('no_sections', ) - - output = [] - pending_lines_before = False - for section in sections: - straight_modules = self.imports[section]['straight'] - straight_modules = nsorted(straight_modules, key=lambda key: self._module_key(key, self.config, section_name=section)) - from_modules = self.imports[section]['from'] - from_modules = nsorted(from_modules, key=lambda key: self._module_key(key, self.config, section_name=section)) - - section_output = [] - if self.config['from_first']: - self._add_from_imports(from_modules, section, section_output, sort_ignore_case) - if self.config['lines_between_types'] and from_modules and straight_modules: - section_output.extend([''] * self.config['lines_between_types']) - self._add_straight_imports(straight_modules, section, section_output) - else: - self._add_straight_imports(straight_modules, section, section_output) - if self.config['lines_between_types'] and from_modules and straight_modules: - section_output.extend([''] * self.config['lines_between_types']) - self._add_from_imports(from_modules, section, section_output, sort_ignore_case) - - if self.config['force_sort_within_sections']: - def by_module(line): - section = 'B' - if line.startswith('#'): - return 'AA' - - line = re.sub('^from ', '', line) - line = re.sub('^import ', '', line) - if line.split(' ')[0] in self.config['force_to_top']: - section = 'A' - if not self.config['order_by_type']: - line = line.lower() - return '{0}{1}'.format(section, line) - section_output = nsorted(section_output, key=by_module) - - section_name = section - no_lines_before = section_name in self.config['no_lines_before'] - - if section_output: - if section_name in self.place_imports: - self.place_imports[section_name] = section_output - continue - - section_title = self.config.get('import_heading_' + str(section_name).lower(), '') - if section_title: - section_comment = "# {0}".format(section_title) - if section_comment not in self.out_lines[0:1] and section_comment not in self.in_lines[0:1]: - section_output.insert(0, section_comment) - - if pending_lines_before or not no_lines_before: - output += ([''] * self.config['lines_between_sections']) - - output += section_output - - pending_lines_before = False - else: - pending_lines_before = pending_lines_before or not no_lines_before - - while output and output[-1].strip() == '': - output.pop() - while output and output[0].strip() == '': - output.pop(0) - - output_at = 0 - if self.import_index < self.original_length: - output_at = self.import_index - elif self._first_comment_index_end != -1 and self._first_comment_index_start <= 2: - output_at = self._first_comment_index_end - self.out_lines[output_at:0] = output - - imports_tail = output_at + len(output) - while [character.strip() for character in self.out_lines[imports_tail: imports_tail + 1]] == [""]: - self.out_lines.pop(imports_tail) - - if len(self.out_lines) > imports_tail: - next_construct = "" - self._in_quote = False - tail = self.out_lines[imports_tail:] - - for index, line in enumerate(tail): - in_quote = self._in_quote - if not self._skip_line(line) and line.strip(): - if line.strip().startswith("#") and len(tail) > (index + 1) and tail[index + 1].strip(): - continue - next_construct = line - break - elif not in_quote: - parts = line.split() - if len(parts) >= 3 and parts[1] == '=' and "'" not in parts[0] and '"' not in parts[0]: - next_construct = line - break - - if self.config['lines_after_imports'] != -1: - self.out_lines[imports_tail:0] = ["" for line in range(self.config['lines_after_imports'])] - elif self.extension != "pyi" and (next_construct.startswith("def ") or - next_construct.startswith("class ") or - next_construct.startswith("@") or - next_construct.startswith("async def")): - self.out_lines[imports_tail:0] = ["", ""] - else: - self.out_lines[imports_tail:0] = [""] - - if self.place_imports: - new_out_lines = [] - for index, line in enumerate(self.out_lines): - new_out_lines.append(line) - if line in self.import_placements: - new_out_lines.extend(self.place_imports[self.import_placements[line]]) - if len(self.out_lines) <= index or self.out_lines[index + 1].strip() != "": - new_out_lines.append("") - self.out_lines = new_out_lines - - def _output_grid(self, statement, imports, white_space, indent, line_length, comments): - statement += "(" + imports.pop(0) - while imports: - next_import = imports.pop(0) - next_statement = self._add_comments(comments, statement + ", " + next_import) - if len(next_statement.split(self.line_separator)[-1]) + 1 > line_length: - lines = ['{0}{1}'.format(white_space, next_import.split(" ")[0])] - for part in next_import.split(" ")[1:]: - new_line = '{0} {1}'.format(lines[-1], part) - if len(new_line) + 1 > line_length: - lines.append('{0}{1}'.format(white_space, part)) - else: - lines[-1] = new_line - next_import = self.line_separator.join(lines) - statement = (self._add_comments(comments, "{0},".format(statement)) + - "{0}{1}".format(self.line_separator, next_import)) - comments = None - else: - statement += ", " + next_import - return statement + ("," if self.config['include_trailing_comma'] else "") + ")" - - def _output_vertical(self, statement, imports, white_space, indent, line_length, comments): - first_import = self._add_comments(comments, imports.pop(0) + ",") + self.line_separator + white_space - return "{0}({1}{2}{3})".format( - statement, - first_import, - ("," + self.line_separator + white_space).join(imports), - "," if self.config['include_trailing_comma'] else "", - ) - - def _output_hanging_indent(self, statement, imports, white_space, indent, line_length, comments): - statement += imports.pop(0) - while imports: - next_import = imports.pop(0) - next_statement = self._add_comments(comments, statement + ", " + next_import) - if len(next_statement.split(self.line_separator)[-1]) + 3 > line_length: - next_statement = (self._add_comments(comments, "{0}, \\".format(statement)) + - "{0}{1}{2}".format(self.line_separator, indent, next_import)) - comments = None - statement = next_statement - return statement - - def _output_vertical_hanging_indent(self, statement, imports, white_space, indent, line_length, comments): - return "{0}({1}{2}{3}{4}{5}{2})".format( - statement, - self._add_comments(comments), - self.line_separator, - indent, - ("," + self.line_separator + indent).join(imports), - "," if self.config['include_trailing_comma'] else "", - ) - - def _output_vertical_grid_common(self, statement, imports, white_space, indent, line_length, comments, - need_trailing_char): - statement += self._add_comments(comments, "(") + self.line_separator + indent + imports.pop(0) - while imports: - next_import = imports.pop(0) - next_statement = "{0}, {1}".format(statement, next_import) - current_line_length = len(next_statement.split(self.line_separator)[-1]) - if imports or need_trailing_char: - # If we have more imports we need to account for a comma after this import - # We might also need to account for a closing ) we're going to add. - current_line_length += 1 - if current_line_length > line_length: - next_statement = "{0},{1}{2}{3}".format(statement, self.line_separator, indent, next_import) - statement = next_statement - if self.config['include_trailing_comma']: - statement += ',' - return statement - - def _output_vertical_grid(self, statement, imports, white_space, indent, line_length, comments): - return self._output_vertical_grid_common(statement, imports, white_space, indent, line_length, comments, - True) + ")" - - def _output_vertical_grid_grouped(self, statement, imports, white_space, indent, line_length, comments): - return self._output_vertical_grid_common(statement, imports, white_space, indent, line_length, comments, - True) + self.line_separator + ")" - - def _output_vertical_grid_grouped_no_comma(self, statement, imports, white_space, indent, line_length, comments): - return self._output_vertical_grid_common(statement, imports, white_space, indent, line_length, comments, - False) + self.line_separator + ")" - - def _output_noqa(self, statement, imports, white_space, indent, line_length, comments): - retval = '{0}{1}'.format(statement, ', '.join(imports)) - comment_str = ' '.join(comments) - if comments: - if len(retval) + len(self.config['comment_prefix']) + 1 + len(comment_str) <= line_length: - return '{0}{1} {2}'.format(retval, self.config['comment_prefix'], comment_str) - else: - if len(retval) <= line_length: - return retval - if comments: - if "NOQA" in comments: - return '{0}{1} {2}'.format(retval, self.config['comment_prefix'], comment_str) - else: - return '{0}{1} NOQA {2}'.format(retval, self.config['comment_prefix'], comment_str) - else: - return '{0}{1} NOQA'.format(retval, self.config['comment_prefix']) - - @staticmethod - def _strip_comments(line, comments=None): - """Removes comments from import line.""" - if comments is None: - comments = [] - - new_comments = False - comment_start = line.find("#") - if comment_start != -1: - comments.append(line[comment_start + 1:].strip()) - new_comments = True - line = line[:comment_start] - - return line, comments, new_comments - - @staticmethod - def _format_simplified(import_line): - import_line = import_line.strip() - if import_line.startswith("from "): - import_line = import_line.replace("from ", "") - import_line = import_line.replace(" import ", ".") - elif import_line.startswith("import "): - import_line = import_line.replace("import ", "") - - return import_line - - @staticmethod - def _format_natural(import_line): - import_line = import_line.strip() - if not import_line.startswith("from ") and not import_line.startswith("import "): - if "." not in import_line: - return "import {0}".format(import_line) - parts = import_line.split(".") - end = parts.pop(-1) - return "from {0} import {1}".format(".".join(parts), end) - - return import_line - - def _skip_line(self, line): - skip_line = self._in_quote - if self.index == 1 and line.startswith("#"): - self._in_top_comment = True - return True - elif self._in_top_comment: - if not line.startswith("#") or line in self._section_comments: - self._in_top_comment = False - self._first_comment_index_end = self.index - 1 - - if '"' in line or "'" in line: - index = 0 - if self._first_comment_index_start == -1 and (line.startswith('"') or line.startswith("'")): - self._first_comment_index_start = self.index - while index < len(line): - if line[index] == "\\": - index += 1 - elif self._in_quote: - if line[index:index + len(self._in_quote)] == self._in_quote: - self._in_quote = False - if self._first_comment_index_end < self._first_comment_index_start: - self._first_comment_index_end = self.index - elif line[index] in ("'", '"'): - long_quote = line[index:index + 3] - if long_quote in ('"""', "'''"): - self._in_quote = long_quote - index += 2 - else: - self._in_quote = line[index] - elif line[index] == "#": - break - index += 1 - - return skip_line or self._in_quote or self._in_top_comment - - def _strip_syntax(self, import_string): - import_string = import_string.replace("_import", "[[i]]") - for remove_syntax in ['\\', '(', ')', ',']: - import_string = import_string.replace(remove_syntax, " ") - import_list = import_string.split() - for key in ('from', 'import'): - if key in import_list: - import_list.remove(key) - import_string = ' '.join(import_list) - import_string = import_string.replace("[[i]]", "_import") - return import_string.replace("{ ", "{|").replace(" }", "|}") - - def _parse(self): - """Parses a python file taking out and categorizing imports.""" - self._in_quote = False - self._in_top_comment = False - while not self._at_end(): - raw_line = line = self._get_line() - line = line.replace("from.import ", "from . import ") - line = line.replace("\t", " ").replace('import*', 'import *') - line = line.replace(" .import ", " . import ") - statement_index = self.index - skip_line = self._skip_line(line) - - if line in self._section_comments and not skip_line: - if self.import_index == -1: - self.import_index = self.index - 1 - continue - - if "isort:imports-" in line and line.startswith("#"): - section = line.split("isort:imports-")[-1].split()[0].upper() - self.place_imports[section] = [] - self.import_placements[line] = section - - if ";" in line: - for part in (part.strip() for part in line.split(";")): - if part and not part.startswith("from ") and not part.startswith("import "): - skip_line = True - - import_type = self._import_type(line) - if not import_type or skip_line: - self.out_lines.append(raw_line) - continue - - for line in (line.strip() for line in line.split(";")): - import_type = self._import_type(line) - if not import_type: - self.out_lines.append(line) - continue - - if self.import_index == -1: - self.import_index = self.index - 1 - nested_comments = {} - import_string, comments, new_comments = self._strip_comments(line) - stripped_line = [part for part in self._strip_syntax(import_string).strip().split(" ") if part] - if import_type == "from" and len(stripped_line) == 2 and stripped_line[1] != "*" and new_comments: - nested_comments[stripped_line[-1]] = comments[0] - - if "(" in line.split("#")[0] and not self._at_end(): - while not line.strip().endswith(")") and not self._at_end(): - line, comments, new_comments = self._strip_comments(self._get_line(), comments) - stripped_line = self._strip_syntax(line).strip() - if import_type == "from" and stripped_line and " " not in stripped_line and new_comments: - nested_comments[stripped_line] = comments[-1] - import_string += self.line_separator + line - else: - while line.strip().endswith("\\"): - line, comments, new_comments = self._strip_comments(self._get_line(), comments) - - # Still need to check for parentheses after an escaped line - if "(" in line.split("#")[0] and ")" not in line.split("#")[0] and not self._at_end(): - stripped_line = self._strip_syntax(line).strip() - if import_type == "from" and stripped_line and " " not in stripped_line and new_comments: - nested_comments[stripped_line] = comments[-1] - import_string += self.line_separator + line - - while not line.strip().endswith(")") and not self._at_end(): - line, comments, new_comments = self._strip_comments(self._get_line(), comments) - stripped_line = self._strip_syntax(line).strip() - if import_type == "from" and stripped_line and " " not in stripped_line and new_comments: - nested_comments[stripped_line] = comments[-1] - import_string += self.line_separator + line - - stripped_line = self._strip_syntax(line).strip() - if import_type == "from" and stripped_line and " " not in stripped_line and new_comments: - nested_comments[stripped_line] = comments[-1] - if import_string.strip().endswith(" import") or line.strip().startswith("import "): - import_string += self.line_separator + line - else: - import_string = import_string.rstrip().rstrip("\\") + " " + line.lstrip() - - if import_type == "from": - import_string = import_string.replace("import(", "import (") - parts = import_string.split(" import ") - from_import = parts[0].split(" ") - import_string = " import ".join([from_import[0] + " " + "".join(from_import[1:])] + parts[1:]) - - imports = [item.replace("{|", "{ ").replace("|}", " }") for item in - self._strip_syntax(import_string).split()] - if "as" in imports and (imports.index('as') + 1) < len(imports): - while "as" in imports: - index = imports.index('as') - if import_type == "from": - module = imports[0] + "." + imports[index - 1] - self.as_map[module] = imports[index + 1] - else: - module = imports[index - 1] - self.as_map[module] = imports[index + 1] - if not self.config['combine_as_imports']: - self.comments['straight'][module] = comments - comments = [] - del imports[index:index + 2] - if import_type == "from": - import_from = imports.pop(0) - placed_module = self.place_module(import_from) - if self.config['verbose']: - print("from-type place_module for %s returned %s" % (import_from, placed_module)) - if placed_module == '': - print( - "WARNING: could not place module {0} of line {1} --" - " Do you need to define a default section?".format(import_from, line) - ) - root = self.imports[placed_module][import_type] - for import_name in imports: - associated_comment = nested_comments.get(import_name) - if associated_comment: - self.comments['nested'].setdefault(import_from, {})[import_name] = associated_comment - comments.pop(comments.index(associated_comment)) - if comments: - self.comments['from'].setdefault(import_from, []).extend(comments) - - if len(self.out_lines) > max(self.import_index, self._first_comment_index_end + 1, 1) - 1: - last = self.out_lines and self.out_lines[-1].rstrip() or "" - while (last.startswith("#") and not last.endswith('"""') and not last.endswith("'''") and - 'isort:imports-' not in last): - self.comments['above']['from'].setdefault(import_from, []).insert(0, self.out_lines.pop(-1)) - if len(self.out_lines) > max(self.import_index - 1, self._first_comment_index_end + 1, 1) - 1: - last = self.out_lines[-1].rstrip() - else: - last = "" - if statement_index - 1 == self.import_index: - self.import_index -= len(self.comments['above']['from'].get(import_from, [])) - - if import_from not in root: - root[import_from] = OrderedDict() - root[import_from].update((module, None) for module in imports) - else: - for module in imports: - if comments: - self.comments['straight'][module] = comments - comments = None - - if len(self.out_lines) > max(self.import_index, self._first_comment_index_end + 1, 1) - 1: - - last = self.out_lines and self.out_lines[-1].rstrip() or "" - while (last.startswith("#") and not last.endswith('"""') and not last.endswith("'''") and - 'isort:imports-' not in last): - self.comments['above']['straight'].setdefault(module, []).insert(0, - self.out_lines.pop(-1)) - if len(self.out_lines) > 0 and len(self.out_lines) != self._first_comment_index_end: - last = self.out_lines[-1].rstrip() - else: - last = "" - if self.index - 1 == self.import_index: - self.import_index -= len(self.comments['above']['straight'].get(module, [])) - placed_module = self.place_module(module) - if self.config['verbose']: - print("else-type place_module for %s returned %s" % (module, placed_module)) - if placed_module == '': - print( - "WARNING: could not place module {0} of line {1} --" - " Do you need to define a default section?".format(import_from, line) - ) - self.imports[placed_module][import_type][module] = None - - -def coding_check(lines, default='utf-8'): - - # see https://www.python.org/dev/peps/pep-0263/ - pattern = re.compile(br'coding[:=]\s*([-\w.]+)') - - for line_number, line in enumerate(lines, 1): - groups = re.findall(pattern, line) - if groups: - return groups[0].decode('ascii') - if line_number > 2: - break - - return default diff --git a/venv/lib/python3.8/site-packages/isort/natural.py b/venv/lib/python3.8/site-packages/isort/natural.py deleted file mode 100644 index c02b42c3..00000000 --- a/venv/lib/python3.8/site-packages/isort/natural.py +++ /dev/null @@ -1,47 +0,0 @@ -"""isort/natural.py. - -Enables sorting strings that contain numbers naturally - -usage: - natural.nsorted(list) - -Copyright (C) 2013 Timothy Edmund Crosley - -Implementation originally from @HappyLeapSecond stack overflow user in response to: - https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" -import re - - -def _atoi(text): - return int(text) if text.isdigit() else text - - -def _natural_keys(text): - return [_atoi(c) for c in re.split(r'(\d+)', text)] - - -def nsorted(to_sort, key=None): - """Returns a naturally sorted list""" - if key is None: - key_callback = _natural_keys - else: - def key_callback(item): - return _natural_keys(key(item)) - - return sorted(to_sort, key=key_callback) diff --git a/venv/lib/python3.8/site-packages/isort/pie_slice.py b/venv/lib/python3.8/site-packages/isort/pie_slice.py deleted file mode 100644 index 569ea76e..00000000 --- a/venv/lib/python3.8/site-packages/isort/pie_slice.py +++ /dev/null @@ -1,154 +0,0 @@ -"""pie_slice/overrides.py. - -Overrides Python syntax to conform to the Python3 version as much as possible using a '*' import - -Copyright (C) 2013 Timothy Edmund Crosley - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" -from __future__ import absolute_import - -import collections -import sys - -__version__ = "1.1.0" - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -VERSION = sys.version_info - -__all__ = ['PY2', 'PY3', 'lru_cache', 'apply_changes_to_python_environment'] - - -if PY3: - input = input - - def apply_changes_to_python_environment(): - pass -else: - input = raw_input # noqa: F821 - - python_environment_changes_applied = False - - import sys - stdout = sys.stdout - stderr = sys.stderr - - def apply_changes_to_python_environment(): - global python_environment_changes_applied - if python_environment_changes_applied or sys.getdefaultencoding() == 'utf-8': - python_environment_changes_applied = True - return - - try: - reload(sys) - sys.stdout = stdout - sys.stderr = stderr - sys.setdefaultencoding('utf-8') - except NameError: # Python 3 - sys.exit('This should not happen!') - - python_environment_changes_applied = True - - -if sys.version_info < (3, 2): - try: - from threading import Lock - except ImportError: - from dummy_threading import Lock - - from functools import wraps - - _CacheInfo = collections.namedtuple("CacheInfo", "hits misses maxsize currsize") - - def lru_cache(maxsize=100): - """Least-recently-used cache decorator. - Taking from: https://github.com/MiCHiLU/python-functools32/blob/master/functools32/functools32.py - with slight modifications. - If *maxsize* is set to None, the LRU features are disabled and the cache - can grow without bound. - Arguments to the cached function must be hashable. - View the cache statistics named tuple (hits, misses, maxsize, currsize) with - f.cache_info(). Clear the cache and statistics with f.cache_clear(). - Access the underlying function with f.__wrapped__. - See: https://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used - - """ - def decorating_function(user_function, tuple=tuple, sorted=sorted, len=len, KeyError=KeyError): - hits, misses = [0], [0] - kwd_mark = (object(),) # separates positional and keyword args - lock = Lock() - - if maxsize is None: - CACHE = {} - - @wraps(user_function) - def wrapper(*args, **kwds): - key = args - if kwds: - key += kwd_mark + tuple(sorted(kwds.items())) - try: - result = CACHE[key] - hits[0] += 1 - return result - except KeyError: - pass - result = user_function(*args, **kwds) - CACHE[key] = result - misses[0] += 1 - return result - else: - CACHE = collections.OrderedDict() - - @wraps(user_function) - def wrapper(*args, **kwds): - key = args - if kwds: - key += kwd_mark + tuple(sorted(kwds.items())) - with lock: - cached = CACHE.get(key, None) - if cached: - del CACHE[key] - CACHE[key] = cached - hits[0] += 1 - return cached - result = user_function(*args, **kwds) - with lock: - CACHE[key] = result # record recent use of this key - misses[0] += 1 - while len(CACHE) > maxsize: - CACHE.popitem(last=False) - return result - - def cache_info(): - """Report CACHE statistics.""" - with lock: - return _CacheInfo(hits[0], misses[0], maxsize, len(CACHE)) - - def cache_clear(): - """Clear the CACHE and CACHE statistics.""" - with lock: - CACHE.clear() - hits[0] = misses[0] = 0 - - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear - return wrapper - - return decorating_function - -else: - from functools import lru_cache diff --git a/venv/lib/python3.8/site-packages/isort/pylama_isort.py b/venv/lib/python3.8/site-packages/isort/pylama_isort.py deleted file mode 100644 index 6fa235f9..00000000 --- a/venv/lib/python3.8/site-packages/isort/pylama_isort.py +++ /dev/null @@ -1,29 +0,0 @@ -import os -import sys - -from pylama.lint import Linter as BaseLinter - -from .isort import SortImports - - -class Linter(BaseLinter): - - def allow(self, path): - """Determine if this path should be linted.""" - return path.endswith('.py') - - def run(self, path, **meta): - """Lint the file. Return an array of error dicts if appropriate.""" - with open(os.devnull, 'w') as devnull: - # Suppress isort messages - sys.stdout = devnull - - if SortImports(path, check=True).incorrectly_sorted: - return [{ - 'lnum': 0, - 'col': 0, - 'text': 'Incorrectly sorted imports.', - 'type': 'ISORT' - }] - else: - return [] diff --git a/venv/lib/python3.8/site-packages/isort/settings.py b/venv/lib/python3.8/site-packages/isort/settings.py deleted file mode 100644 index a69471e8..00000000 --- a/venv/lib/python3.8/site-packages/isort/settings.py +++ /dev/null @@ -1,356 +0,0 @@ -"""isort/settings.py. - -Defines how the default settings for isort should be loaded - -(First from the default setting dictionary at the top of the file, then overridden by any settings - in ~/.isort.cfg or $XDG_CONFIG_HOME/isort.cfg if there are any) - -Copyright (C) 2013 Timothy Edmund Crosley - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and -to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or -substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED -TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -""" -from __future__ import absolute_import, division, print_function, unicode_literals - -import fnmatch -import io -import os -import posixpath -import re -import sys -import warnings -from collections import namedtuple -from distutils.util import strtobool - -from .pie_slice import lru_cache -from .utils import difference, union - -try: - import configparser -except ImportError: - import ConfigParser as configparser - -try: - import toml -except ImportError: - toml = False - -try: - import appdirs - if appdirs.system == 'darwin': - appdirs.system = 'linux2' -except ImportError: - appdirs = None - -MAX_CONFIG_SEARCH_DEPTH = 25 # The number of parent directories isort will look for a config file within -DEFAULT_SECTIONS = ('FUTURE', 'STDLIB', 'THIRDPARTY', 'FIRSTPARTY', 'LOCALFOLDER') - -safety_exclude_re = re.compile( - r"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist|\.pants\.d" - r"|lib/python[0-9].[0-9]+)/" -) - -WrapModes = ('GRID', 'VERTICAL', 'HANGING_INDENT', 'VERTICAL_HANGING_INDENT', 'VERTICAL_GRID', 'VERTICAL_GRID_GROUPED', - 'VERTICAL_GRID_GROUPED_NO_COMMA', 'NOQA') -WrapModes = namedtuple('WrapModes', WrapModes)(*range(len(WrapModes))) - -# Note that none of these lists must be complete as they are simply fallbacks for when included auto-detection fails. -default = {'force_to_top': [], - 'skip': [], - 'skip_glob': [], - 'line_length': 79, - 'wrap_length': 0, - 'line_ending': None, - 'sections': DEFAULT_SECTIONS, - 'no_sections': False, - 'known_future_library': ['__future__'], - 'known_standard_library': ['AL', 'BaseHTTPServer', 'Bastion', 'CGIHTTPServer', 'Carbon', 'ColorPicker', - 'ConfigParser', 'Cookie', 'DEVICE', 'DocXMLRPCServer', 'EasyDialogs', 'FL', - 'FrameWork', 'GL', 'HTMLParser', 'MacOS', 'MimeWriter', 'MiniAEFrame', 'Nav', - 'PixMapWrapper', 'Queue', 'SUNAUDIODEV', 'ScrolledText', 'SimpleHTTPServer', - 'SimpleXMLRPCServer', 'SocketServer', 'StringIO', 'Tix', 'Tkinter', 'UserDict', - 'UserList', 'UserString', 'W', '__builtin__', 'abc', 'aepack', 'aetools', - 'aetypes', 'aifc', 'al', 'anydbm', 'applesingle', 'argparse', 'array', 'ast', - 'asynchat', 'asyncio', 'asyncore', 'atexit', 'audioop', 'autoGIL', 'base64', - 'bdb', 'binascii', 'binhex', 'bisect', 'bsddb', 'buildtools', 'builtins', - 'bz2', 'cPickle', 'cProfile', 'cStringIO', 'calendar', 'cd', 'cfmfile', 'cgi', - 'cgitb', 'chunk', 'cmath', 'cmd', 'code', 'codecs', 'codeop', 'collections', - 'colorsys', 'commands', 'compileall', 'compiler', 'concurrent', 'configparser', - 'contextlib', 'contextvars', 'cookielib', 'copy', 'copy_reg', 'copyreg', 'crypt', 'csv', - 'ctypes', 'curses', 'dataclasses', 'datetime', 'dbhash', 'dbm', 'decimal', 'difflib', - 'dircache', 'dis', 'distutils', 'dl', 'doctest', 'dumbdbm', 'dummy_thread', - 'dummy_threading', 'email', 'encodings', 'ensurepip', 'enum', 'errno', - 'exceptions', 'faulthandler', 'fcntl', 'filecmp', 'fileinput', 'findertools', - 'fl', 'flp', 'fm', 'fnmatch', 'formatter', 'fpectl', 'fpformat', 'fractions', - 'ftplib', 'functools', 'future_builtins', 'gc', 'gdbm', 'gensuitemodule', - 'getopt', 'getpass', 'gettext', 'gl', 'glob', 'grp', 'gzip', 'hashlib', - 'heapq', 'hmac', 'hotshot', 'html', 'htmlentitydefs', 'htmllib', 'http', - 'httplib', 'ic', 'icopen', 'imageop', 'imaplib', 'imgfile', 'imghdr', 'imp', - 'importlib', 'imputil', 'inspect', 'io', 'ipaddress', 'itertools', 'jpeg', - 'json', 'keyword', 'lib2to3', 'linecache', 'locale', 'logging', 'lzma', - 'macerrors', 'macostools', 'macpath', 'macresource', 'mailbox', 'mailcap', - 'marshal', 'math', 'md5', 'mhlib', 'mimetools', 'mimetypes', 'mimify', 'mmap', - 'modulefinder', 'msilib', 'msvcrt', 'multifile', 'multiprocessing', 'mutex', - 'netrc', 'new', 'nis', 'nntplib', 'numbers', 'operator', 'optparse', 'os', - 'ossaudiodev', 'parser', 'pathlib', 'pdb', 'pickle', 'pickletools', 'pipes', - 'pkgutil', 'platform', 'plistlib', 'popen2', 'poplib', 'posix', 'posixfile', - 'pprint', 'profile', 'pstats', 'pty', 'pwd', 'py_compile', 'pyclbr', 'pydoc', - 'queue', 'quopri', 'random', 're', 'readline', 'reprlib', 'resource', 'rexec', - 'rfc822', 'rlcompleter', 'robotparser', 'runpy', 'sched', 'secrets', 'select', - 'selectors', 'sets', 'sgmllib', 'sha', 'shelve', 'shlex', 'shutil', 'signal', - 'site', 'sitecustomize', 'smtpd', 'smtplib', 'sndhdr', 'socket', 'socketserver', - 'spwd', 'sqlite3', 'ssl', 'stat', 'statistics', 'statvfs', 'string', 'stringprep', - 'struct', 'subprocess', 'sunau', 'sunaudiodev', 'symbol', 'symtable', 'sys', - 'sysconfig', 'syslog', 'tabnanny', 'tarfile', 'telnetlib', 'tempfile', 'termios', - 'test', 'textwrap', 'this', 'thread', 'threading', 'time', 'timeit', 'tkinter', - 'token', 'tokenize', 'trace', 'traceback', 'tracemalloc', 'ttk', 'tty', 'turtle', - 'turtledemo', 'types', 'typing', 'unicodedata', 'unittest', 'urllib', 'urllib2', - 'urlparse', 'usercustomize', 'uu', 'uuid', 'venv', 'videoreader', - 'warnings', 'wave', 'weakref', 'webbrowser', 'whichdb', 'winreg', 'winsound', - 'wsgiref', 'xdrlib', 'xml', 'xmlrpc', 'xmlrpclib', 'zipapp', 'zipfile', - 'zipimport', 'zlib'], - 'known_third_party': ['google.appengine.api'], - 'known_first_party': [], - 'multi_line_output': WrapModes.GRID, - 'forced_separate': [], - 'indent': ' ' * 4, - 'comment_prefix': ' #', - 'length_sort': False, - 'add_imports': [], - 'remove_imports': [], - 'reverse_relative': False, - 'force_single_line': False, - 'default_section': 'FIRSTPARTY', - 'import_heading_future': '', - 'import_heading_stdlib': '', - 'import_heading_thirdparty': '', - 'import_heading_firstparty': '', - 'import_heading_localfolder': '', - 'balanced_wrapping': False, - 'use_parentheses': False, - 'order_by_type': True, - 'atomic': False, - 'lines_after_imports': -1, - 'lines_between_sections': 1, - 'lines_between_types': 0, - 'combine_as_imports': False, - 'combine_star': False, - 'keep_direct_and_as_imports': False, - 'include_trailing_comma': False, - 'from_first': False, - 'verbose': False, - 'quiet': False, - 'force_adds': False, - 'force_alphabetical_sort_within_sections': False, - 'force_alphabetical_sort': False, - 'force_grid_wrap': 0, - 'force_sort_within_sections': False, - 'show_diff': False, - 'ignore_whitespace': False, - 'no_lines_before': [], - 'no_inline_sort': False, - 'ignore_comments': False, - 'safety_excludes': True, - 'case_sensitive': False} - - -@lru_cache() -def from_path(path): - computed_settings = default.copy() - isort_defaults = ['~/.isort.cfg'] - if appdirs: - isort_defaults = [appdirs.user_config_dir('isort.cfg')] + isort_defaults - - _update_settings_with_config(path, '.editorconfig', ['~/.editorconfig'], ('*', '*.py', '**.py'), computed_settings) - _update_settings_with_config(path, 'pyproject.toml', [], ('tool.isort', ), computed_settings) - _update_settings_with_config(path, '.isort.cfg', isort_defaults, ('settings', 'isort'), computed_settings) - _update_settings_with_config(path, 'setup.cfg', [], ('isort', 'tool:isort'), computed_settings) - _update_settings_with_config(path, 'tox.ini', [], ('isort', 'tool:isort'), computed_settings) - return computed_settings - - -def _update_settings_with_config(path, name, default, sections, computed_settings): - editor_config_file = None - for potential_settings_path in default: - expanded = os.path.expanduser(potential_settings_path) - if os.path.exists(expanded): - editor_config_file = expanded - break - - tries = 0 - current_directory = path - while current_directory and tries < MAX_CONFIG_SEARCH_DEPTH: - potential_path = os.path.join(current_directory, str(name)) - if os.path.exists(potential_path): - editor_config_file = potential_path - break - - new_directory = os.path.split(current_directory)[0] - if current_directory == new_directory: - break - current_directory = new_directory - tries += 1 - - if editor_config_file and os.path.exists(editor_config_file): - _update_with_config_file(editor_config_file, sections, computed_settings) - - -def _update_with_config_file(file_path, sections, computed_settings): - cwd = os.path.dirname(file_path) - settings = _get_config_data(file_path, sections).copy() - if not settings: - return - - if file_path.endswith('.editorconfig'): - indent_style = settings.pop('indent_style', '').strip() - indent_size = settings.pop('indent_size', '').strip() - if indent_size == "tab": - indent_size = settings.pop('tab_width', '').strip() - - if indent_style == 'space': - computed_settings['indent'] = ' ' * (indent_size and int(indent_size) or 4) - elif indent_style == 'tab': - computed_settings['indent'] = '\t' * (indent_size and int(indent_size) or 1) - - max_line_length = settings.pop('max_line_length', '').strip() - if max_line_length: - computed_settings['line_length'] = float('inf') if max_line_length == 'off' else int(max_line_length) - - for key, value in settings.items(): - access_key = key.replace('not_', '').lower() - existing_value_type = type(default.get(access_key, '')) - if existing_value_type in (list, tuple): - # sections has fixed order values; no adding or substraction from any set - if access_key == 'sections': - computed_settings[access_key] = tuple(_as_list(value)) - else: - existing_data = set(computed_settings.get(access_key, default.get(access_key))) - if key.startswith('not_'): - computed_settings[access_key] = difference(existing_data, _as_list(value)) - elif key.startswith('known_'): - computed_settings[access_key] = union(existing_data, _abspaths(cwd, _as_list(value))) - else: - computed_settings[access_key] = union(existing_data, _as_list(value)) - elif existing_value_type == bool: - # Only some configuration formats support native boolean values. - if not isinstance(value, bool): - value = bool(strtobool(value)) - computed_settings[access_key] = value - elif key.startswith('known_'): - computed_settings[access_key] = list(_abspaths(cwd, _as_list(value))) - elif key == 'force_grid_wrap': - try: - result = existing_value_type(value) - except ValueError: - # backwards compat - result = default.get(access_key) if value.lower().strip() == 'false' else 2 - computed_settings[access_key] = result - else: - computed_settings[access_key] = existing_value_type(value) - - -def _as_list(value): - if not isinstance(value, list): - value = value.replace('\n', ',').split(',') - - return filter(bool, [item.strip() for item in value]) - - -def _abspaths(cwd, values): - paths = [ - os.path.join(cwd, value) - if not value.startswith(os.path.sep) and value.endswith(os.path.sep) - else value - for value in values - ] - return paths - - -@lru_cache() -def _get_config_data(file_path, sections): - settings = {} - - with io.open(file_path) as config_file: - if file_path.endswith('.toml'): - if toml: - config = toml.load(config_file) - for section in sections: - config_section = config - for key in section.split('.'): - config_section = config_section.get(key, {}) - settings.update(config_section) - else: - if '[tool.isort]' in config_file.read(): - warnings.warn("Found {} with [tool.isort] section, but toml package is not installed. " - "To configure isort with {}, install with 'isort[pyproject]'.".format(file_path, - file_path)) - else: - if file_path.endswith('.editorconfig'): - line = '\n' - last_position = config_file.tell() - while line: - line = config_file.readline() - if '[' in line: - config_file.seek(last_position) - break - last_position = config_file.tell() - - if sys.version_info >= (3, 2): - config = configparser.ConfigParser(strict=False) - config.read_file(config_file) - else: - config = configparser.SafeConfigParser() - config.readfp(config_file) - - for section in sections: - if config.has_section(section): - settings.update(config.items(section)) - - return settings - - -def should_skip(filename, config, path=''): - """Returns True if the file and/or folder should be skipped based on the passed in settings.""" - os_path = os.path.join(path, filename) - - normalized_path = os_path.replace('\\', '/') - if normalized_path[1:2] == ':': - normalized_path = normalized_path[2:] - - if path and config['safety_excludes']: - check_exclude = '/' + filename.replace('\\', '/') + '/' - if path and os.path.basename(path) in ('lib', ): - check_exclude = '/' + os.path.basename(path) + check_exclude - if safety_exclude_re.search(check_exclude): - return True - - for skip_path in config['skip']: - if posixpath.abspath(normalized_path) == posixpath.abspath(skip_path.replace('\\', '/')): - return True - - position = os.path.split(filename) - while position[1]: - if position[1] in config['skip']: - return True - position = os.path.split(position[0]) - - for glob in config['skip_glob']: - if fnmatch.fnmatch(filename, glob) or fnmatch.fnmatch('/' + filename, glob): - return True - - if not (os.path.isfile(os_path) or os.path.isdir(os_path) or os.path.islink(os_path)): - return True - - return False diff --git a/venv/lib/python3.8/site-packages/isort/utils.py b/venv/lib/python3.8/site-packages/isort/utils.py deleted file mode 100644 index ce4e588e..00000000 --- a/venv/lib/python3.8/site-packages/isort/utils.py +++ /dev/null @@ -1,53 +0,0 @@ -import os -import sys -from contextlib import contextmanager - - -def exists_case_sensitive(path): - """ - Returns if the given path exists and also matches the case on Windows. - - When finding files that can be imported, it is important for the cases to match because while - file os.path.exists("module.py") and os.path.exists("MODULE.py") both return True on Windows, Python - can only import using the case of the real file. - """ - result = os.path.exists(path) - if (sys.platform.startswith('win') or sys.platform == 'darwin') and result: - directory, basename = os.path.split(path) - result = basename in os.listdir(directory) - return result - - -@contextmanager -def chdir(path): - """Context manager for changing dir and restoring previous workdir after exit. - """ - curdir = os.getcwd() - os.chdir(path) - try: - yield - finally: - os.chdir(curdir) - - -def union(a, b): - """ Return a list of items that are in `a` or `b` - """ - u = [] - for item in a: - if item not in u: - u.append(item) - for item in b: - if item not in u: - u.append(item) - return u - - -def difference(a, b): - """ Return a list of items from `a` that are not in `b`. - """ - d = [] - for item in a: - if item not in b: - d.append(item) - return d diff --git a/venv/lib/python3.8/site-packages/jinja2/__init__.py b/venv/lib/python3.8/site-packages/jinja2/__init__.py deleted file mode 100644 index 7f4a1c55..00000000 --- a/venv/lib/python3.8/site-packages/jinja2/__init__.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -"""Jinja is a template engine written in pure Python. It provides a -non-XML syntax that supports inline expressions and an optional -sandboxed environment. -""" -from markupsafe import escape -from markupsafe import Markup - -from .bccache import BytecodeCache -from .bccache import FileSystemBytecodeCache -from .bccache import MemcachedBytecodeCache -from .environment import Environment -from .environment import Template -from .exceptions import TemplateAssertionError -from .exceptions import TemplateError -from .exceptions import TemplateNotFound -from .exceptions import TemplateRuntimeError -from .exceptions import TemplatesNotFound -from .exceptions import TemplateSyntaxError -from .exceptions import UndefinedError -from .filters import contextfilter -from .filters import environmentfilter -from .filters import evalcontextfilter -from .loaders import BaseLoader -from .loaders import ChoiceLoader -from .loaders import DictLoader -from .loaders import FileSystemLoader -from .loaders import FunctionLoader -from .loaders import ModuleLoader -from .loaders import PackageLoader -from .loaders import PrefixLoader -from .runtime import ChainableUndefined -from .runtime import DebugUndefined -from .runtime import make_logging_undefined -from .runtime import StrictUndefined -from .runtime import Undefined -from .utils import clear_caches -from .utils import contextfunction -from .utils import environmentfunction -from .utils import evalcontextfunction -from .utils import is_undefined -from .utils import select_autoescape - -__version__ = "2.11.1" diff --git a/venv/lib/python3.8/site-packages/jinja2/_compat.py b/venv/lib/python3.8/site-packages/jinja2/_compat.py deleted file mode 100644 index 1f044954..00000000 --- a/venv/lib/python3.8/site-packages/jinja2/_compat.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# flake8: noqa -import marshal -import sys - -PY2 = sys.version_info[0] == 2 -PYPY = hasattr(sys, "pypy_translation_info") -_identity = lambda x: x - -if not PY2: - unichr = chr - range_type = range - text_type = str - string_types = (str,) - integer_types = (int,) - - iterkeys = lambda d: iter(d.keys()) - itervalues = lambda d: iter(d.values()) - iteritems = lambda d: iter(d.items()) - - import pickle - from io import BytesIO, StringIO - - NativeStringIO = StringIO - - def reraise(tp, value, tb=None): - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - - ifilter = filter - imap = map - izip = zip - intern = sys.intern - - implements_iterator = _identity - implements_to_string = _identity - encode_filename = _identity - - marshal_dump = marshal.dump - marshal_load = marshal.load - -else: - unichr = unichr - text_type = unicode - range_type = xrange - string_types = (str, unicode) - integer_types = (int, long) - - iterkeys = lambda d: d.iterkeys() - itervalues = lambda d: d.itervalues() - iteritems = lambda d: d.iteritems() - - import cPickle as pickle - from cStringIO import StringIO as BytesIO, StringIO - - NativeStringIO = BytesIO - - exec("def reraise(tp, value, tb=None):\n raise tp, value, tb") - - from itertools import imap, izip, ifilter - - intern = intern - - def implements_iterator(cls): - cls.next = cls.__next__ - del cls.__next__ - return cls - - def implements_to_string(cls): - cls.__unicode__ = cls.__str__ - cls.__str__ = lambda x: x.__unicode__().encode("utf-8") - return cls - - def encode_filename(filename): - if isinstance(filename, unicode): - return filename.encode("utf-8") - return filename - - def marshal_dump(code, f): - if isinstance(f, file): - marshal.dump(code, f) - else: - f.write(marshal.dumps(code)) - - def marshal_load(f): - if isinstance(f, file): - return marshal.load(f) - return marshal.loads(f.read()) - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a - # dummy metaclass for one level of class instantiation that replaces - # itself with the actual metaclass. - class metaclass(type): - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - - return type.__new__(metaclass, "temporary_class", (), {}) - - -try: - from urllib.parse import quote_from_bytes as url_quote -except ImportError: - from urllib import quote as url_quote - - -try: - from collections import abc -except ImportError: - import collections as abc - - -try: - from os import fspath -except ImportError: - try: - from pathlib import PurePath - except ImportError: - PurePath = None - - def fspath(path): - if hasattr(path, "__fspath__"): - return path.__fspath__() - - # Python 3.5 doesn't have __fspath__ yet, use str. - if PurePath is not None and isinstance(path, PurePath): - return str(path) - - return path diff --git a/venv/lib/python3.8/site-packages/jinja2/_identifier.py b/venv/lib/python3.8/site-packages/jinja2/_identifier.py deleted file mode 100644 index 224d5449..00000000 --- a/venv/lib/python3.8/site-packages/jinja2/_identifier.py +++ /dev/null @@ -1,6 +0,0 @@ -import re - -# generated by scripts/generate_identifier_pattern.py -pattern = re.compile( - r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950 -) diff --git a/venv/lib/python3.8/site-packages/jinja2/asyncfilters.py b/venv/lib/python3.8/site-packages/jinja2/asyncfilters.py deleted file mode 100644 index d29f6c62..00000000 --- a/venv/lib/python3.8/site-packages/jinja2/asyncfilters.py +++ /dev/null @@ -1,159 +0,0 @@ -from functools import wraps - -from . import filters -from .asyncsupport import auto_aiter -from .asyncsupport import auto_await - - -async def auto_to_seq(value): - seq = [] - if hasattr(value, "__aiter__"): - async for item in value: - seq.append(item) - else: - for item in value: - seq.append(item) - return seq - - -async def async_select_or_reject(args, kwargs, modfunc, lookup_attr): - seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr) - if seq: - async for item in auto_aiter(seq): - if func(item): - yield item - - -def dualfilter(normal_filter, async_filter): - wrap_evalctx = False - if getattr(normal_filter, "environmentfilter", False): - - def is_async(args): - return args[0].is_async - - wrap_evalctx = False - else: - if not getattr(normal_filter, "evalcontextfilter", False) and not getattr( - normal_filter, "contextfilter", False - ): - wrap_evalctx = True - - def is_async(args): - return args[0].environment.is_async - - @wraps(normal_filter) - def wrapper(*args, **kwargs): - b = is_async(args) - if wrap_evalctx: - args = args[1:] - if b: - return async_filter(*args, **kwargs) - return normal_filter(*args, **kwargs) - - if wrap_evalctx: - wrapper.evalcontextfilter = True - - wrapper.asyncfiltervariant = True - - return wrapper - - -def asyncfiltervariant(original): - def decorator(f): - return dualfilter(original, f) - - return decorator - - -@asyncfiltervariant(filters.do_first) -async def do_first(environment, seq): - try: - return await auto_aiter(seq).__anext__() - except StopAsyncIteration: - return environment.undefined("No first item, sequence was empty.") - - -@asyncfiltervariant(filters.do_groupby) -async def do_groupby(environment, value, attribute): - expr = filters.make_attrgetter(environment, attribute) - return [ - filters._GroupTuple(key, await auto_to_seq(values)) - for key, values in filters.groupby( - sorted(await auto_to_seq(value), key=expr), expr - ) - ] - - -@asyncfiltervariant(filters.do_join) -async def do_join(eval_ctx, value, d=u"", attribute=None): - return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute) - - -@asyncfiltervariant(filters.do_list) -async def do_list(value): - return await auto_to_seq(value) - - -@asyncfiltervariant(filters.do_reject) -async def do_reject(*args, **kwargs): - return async_select_or_reject(args, kwargs, lambda x: not x, False) - - -@asyncfiltervariant(filters.do_rejectattr) -async def do_rejectattr(*args, **kwargs): - return async_select_or_reject(args, kwargs, lambda x: not x, True) - - -@asyncfiltervariant(filters.do_select) -async def do_select(*args, **kwargs): - return async_select_or_reject(args, kwargs, lambda x: x, False) - - -@asyncfiltervariant(filters.do_selectattr) -async def do_selectattr(*args, **kwargs): - return async_select_or_reject(args, kwargs, lambda x: x, True) - - -@asyncfiltervariant(filters.do_map) -async def do_map(*args, **kwargs): - seq, func = filters.prepare_map(args, kwargs) - if seq: - async for item in auto_aiter(seq): - yield await auto_await(func(item)) - - -@asyncfiltervariant(filters.do_sum) -async def do_sum(environment, iterable, attribute=None, start=0): - rv = start - if attribute is not None: - func = filters.make_attrgetter(environment, attribute) - else: - - def func(x): - return x - - async for item in auto_aiter(iterable): - rv += func(item) - return rv - - -@asyncfiltervariant(filters.do_slice) -async def do_slice(value, slices, fill_with=None): - return filters.do_slice(await auto_to_seq(value), slices, fill_with) - - -ASYNC_FILTERS = { - "first": do_first, - "groupby": do_groupby, - "join": do_join, - "list": do_list, - # we intentionally do not support do_last because that would be - # ridiculous - "reject": do_reject, - "rejectattr": do_rejectattr, - "map": do_map, - "select": do_select, - "selectattr": do_selectattr, - "sum": do_sum, - "slice": do_slice, -} diff --git a/venv/lib/python3.8/site-packages/jinja2/asyncsupport.py b/venv/lib/python3.8/site-packages/jinja2/asyncsupport.py deleted file mode 100644 index 78ba3739..00000000 --- a/venv/lib/python3.8/site-packages/jinja2/asyncsupport.py +++ /dev/null @@ -1,264 +0,0 @@ -# -*- coding: utf-8 -*- -"""The code for async support. Importing this patches Jinja on supported -Python versions. -""" -import asyncio -import inspect -from functools import update_wrapper - -from markupsafe import Markup - -from .environment import TemplateModule -from .runtime import LoopContext -from .utils import concat -from .utils import internalcode -from .utils import missing - - -async def concat_async(async_gen): - rv = [] - - async def collect(): - async for event in async_gen: - rv.append(event) - - await collect() - return concat(rv) - - -async def generate_async(self, *args, **kwargs): - vars = dict(*args, **kwargs) - try: - async for event in self.root_render_func(self.new_context(vars)): - yield event - except Exception: - yield self.environment.handle_exception() - - -def wrap_generate_func(original_generate): - def _convert_generator(self, loop, args, kwargs): - async_gen = self.generate_async(*args, **kwargs) - try: - while 1: - yield loop.run_until_complete(async_gen.__anext__()) - except StopAsyncIteration: - pass - - def generate(self, *args, **kwargs): - if not self.environment.is_async: - return original_generate(self, *args, **kwargs) - return _convert_generator(self, asyncio.get_event_loop(), args, kwargs) - - return update_wrapper(generate, original_generate) - - -async def render_async(self, *args, **kwargs): - if not self.environment.is_async: - raise RuntimeError("The environment was not created with async mode enabled.") - - vars = dict(*args, **kwargs) - ctx = self.new_context(vars) - - try: - return await concat_async(self.root_render_func(ctx)) - except Exception: - return self.environment.handle_exception() - - -def wrap_render_func(original_render): - def render(self, *args, **kwargs): - if not self.environment.is_async: - return original_render(self, *args, **kwargs) - loop = asyncio.get_event_loop() - return loop.run_until_complete(self.render_async(*args, **kwargs)) - - return update_wrapper(render, original_render) - - -def wrap_block_reference_call(original_call): - @internalcode - async def async_call(self): - rv = await concat_async(self._stack[self._depth](self._context)) - if self._context.eval_ctx.autoescape: - rv = Markup(rv) - return rv - - @internalcode - def __call__(self): - if not self._context.environment.is_async: - return original_call(self) - return async_call(self) - - return update_wrapper(__call__, original_call) - - -def wrap_macro_invoke(original_invoke): - @internalcode - async def async_invoke(self, arguments, autoescape): - rv = await self._func(*arguments) - if autoescape: - rv = Markup(rv) - return rv - - @internalcode - def _invoke(self, arguments, autoescape): - if not self._environment.is_async: - return original_invoke(self, arguments, autoescape) - return async_invoke(self, arguments, autoescape) - - return update_wrapper(_invoke, original_invoke) - - -@internalcode -async def get_default_module_async(self): - if self._module is not None: - return self._module - self._module = rv = await self.make_module_async() - return rv - - -def wrap_default_module(original_default_module): - @internalcode - def _get_default_module(self): - if self.environment.is_async: - raise RuntimeError("Template module attribute is unavailable in async mode") - return original_default_module(self) - - return _get_default_module - - -async def make_module_async(self, vars=None, shared=False, locals=None): - context = self.new_context(vars, shared, locals) - body_stream = [] - async for item in self.root_render_func(context): - body_stream.append(item) - return TemplateModule(self, context, body_stream) - - -def patch_template(): - from . import Template - - Template.generate = wrap_generate_func(Template.generate) - Template.generate_async = update_wrapper(generate_async, Template.generate_async) - Template.render_async = update_wrapper(render_async, Template.render_async) - Template.render = wrap_render_func(Template.render) - Template._get_default_module = wrap_default_module(Template._get_default_module) - Template._get_default_module_async = get_default_module_async - Template.make_module_async = update_wrapper( - make_module_async, Template.make_module_async - ) - - -def patch_runtime(): - from .runtime import BlockReference, Macro - - BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__) - Macro._invoke = wrap_macro_invoke(Macro._invoke) - - -def patch_filters(): - from .filters import FILTERS - from .asyncfilters import ASYNC_FILTERS - - FILTERS.update(ASYNC_FILTERS) - - -def patch_all(): - patch_template() - patch_runtime() - patch_filters() - - -async def auto_await(value): - if inspect.isawaitable(value): - return await value - return value - - -async def auto_aiter(iterable): - if hasattr(iterable, "__aiter__"): - async for item in iterable: - yield item - return - for item in iterable: - yield item - - -class AsyncLoopContext(LoopContext): - _to_iterator = staticmethod(auto_aiter) - - @property - async def length(self): - if self._length is not None: - return self._length - - try: - self._length = len(self._iterable) - except TypeError: - iterable = [x async for x in self._iterator] - self._iterator = self._to_iterator(iterable) - self._length = len(iterable) + self.index + (self._after is not missing) - - return self._length - - @property - async def revindex0(self): - return await self.length - self.index - - @property - async def revindex(self): - return await self.length - self.index0 - - async def _peek_next(self): - if self._after is not missing: - return self._after - - try: - self._after = await self._iterator.__anext__() - except StopAsyncIteration: - self._after = missing - - return self._after - - @property - async def last(self): - return await self._peek_next() is missing - - @property - async def nextitem(self): - rv = await self._peek_next() - - if rv is missing: - return self._undefined("there is no next item") - - return rv - - def __aiter__(self): - return self - - async def __anext__(self): - if self._after is not missing: - rv = self._after - self._after = missing - else: - rv = await self._iterator.__anext__() - - self.index0 += 1 - self._before = self._current - self._current = rv - return rv, self - - -async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0): - import warnings - - warnings.warn( - "This template must be recompiled with at least Jinja 2.11, or" - " it will fail in 3.0.", - DeprecationWarning, - stacklevel=2, - ) - return AsyncLoopContext(iterable, undefined, recurse, depth0) - - -patch_all() diff --git a/venv/lib/python3.8/site-packages/jinja2/bccache.py b/venv/lib/python3.8/site-packages/jinja2/bccache.py deleted file mode 100644 index 9c066103..00000000 --- a/venv/lib/python3.8/site-packages/jinja2/bccache.py +++ /dev/null @@ -1,350 +0,0 @@ -# -*- coding: utf-8 -*- -"""The optional bytecode cache system. This is useful if you have very -complex template situations and the compilation of all those templates -slows down your application too much. - -Situations where this is useful are often forking web applications that -are initialized on the first request. -""" -import errno -import fnmatch -import os -import stat -import sys -import tempfile -from hashlib import sha1 -from os import listdir -from os import path - -from ._compat import BytesIO -from ._compat import marshal_dump -from ._compat import marshal_load -from ._compat import pickle -from ._compat import text_type -from .utils import open_if_exists - -bc_version = 4 -# Magic bytes to identify Jinja bytecode cache files. Contains the -# Python major and minor version to avoid loading incompatible bytecode -# if a project upgrades its Python version. -bc_magic = ( - b"j2" - + pickle.dumps(bc_version, 2) - + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2) -) - - -class Bucket(object): - """Buckets are used to store the bytecode for one template. It's created - and initialized by the bytecode cache and passed to the loading functions. - - The buckets get an internal checksum from the cache assigned and use this - to automatically reject outdated cache material. Individual bytecode - cache subclasses don't have to care about cache invalidation. - """ - - def __init__(self, environment, key, checksum): - self.environment = environment - self.key = key - self.checksum = checksum - self.reset() - - def reset(self): - """Resets the bucket (unloads the bytecode).""" - self.code = None - - def load_bytecode(self, f): - """Loads bytecode from a file or file like object.""" - # make sure the magic header is correct - magic = f.read(len(bc_magic)) - if magic != bc_magic: - self.reset() - return - # the source code of the file changed, we need to reload - checksum = pickle.load(f) - if self.checksum != checksum: - self.reset() - return - # if marshal_load fails then we need to reload - try: - self.code = marshal_load(f) - except (EOFError, ValueError, TypeError): - self.reset() - return - - def write_bytecode(self, f): - """Dump the bytecode into the file or file like object passed.""" - if self.code is None: - raise TypeError("can't write empty bucket") - f.write(bc_magic) - pickle.dump(self.checksum, f, 2) - marshal_dump(self.code, f) - - def bytecode_from_string(self, string): - """Load bytecode from a string.""" - self.load_bytecode(BytesIO(string)) - - def bytecode_to_string(self): - """Return the bytecode as string.""" - out = BytesIO() - self.write_bytecode(out) - return out.getvalue() - - -class BytecodeCache(object): - """To implement your own bytecode cache you have to subclass this class - and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of - these methods are passed a :class:`~jinja2.bccache.Bucket`. - - A very basic bytecode cache that saves the bytecode on the file system:: - - from os import path - - class MyCache(BytecodeCache): - - def __init__(self, directory): - self.directory = directory - - def load_bytecode(self, bucket): - filename = path.join(self.directory, bucket.key) - if path.exists(filename): - with open(filename, 'rb') as f: - bucket.load_bytecode(f) - - def dump_bytecode(self, bucket): - filename = path.join(self.directory, bucket.key) - with open(filename, 'wb') as f: - bucket.write_bytecode(f) - - A more advanced version of a filesystem based bytecode cache is part of - Jinja. - """ - - def load_bytecode(self, bucket): - """Subclasses have to override this method to load bytecode into a - bucket. If they are not able to find code in the cache for the - bucket, it must not do anything. - """ - raise NotImplementedError() - - def dump_bytecode(self, bucket): - """Subclasses have to override this method to write the bytecode - from a bucket back to the cache. If it unable to do so it must not - fail silently but raise an exception. - """ - raise NotImplementedError() - - def clear(self): - """Clears the cache. This method is not used by Jinja but should be - implemented to allow applications to clear the bytecode cache used - by a particular environment. - """ - - def get_cache_key(self, name, filename=None): - """Returns the unique hash key for this template name.""" - hash = sha1(name.encode("utf-8")) - if filename is not None: - filename = "|" + filename - if isinstance(filename, text_type): - filename = filename.encode("utf-8") - hash.update(filename) - return hash.hexdigest() - - def get_source_checksum(self, source): - """Returns a checksum for the source.""" - return sha1(source.encode("utf-8")).hexdigest() - - def get_bucket(self, environment, name, filename, source): - """Return a cache bucket for the given template. All arguments are - mandatory but filename may be `None`. - """ - key = self.get_cache_key(name, filename) - checksum = self.get_source_checksum(source) - bucket = Bucket(environment, key, checksum) - self.load_bytecode(bucket) - return bucket - - def set_bucket(self, bucket): - """Put the bucket into the cache.""" - self.dump_bytecode(bucket) - - -class FileSystemBytecodeCache(BytecodeCache): - """A bytecode cache that stores bytecode on the filesystem. It accepts - two arguments: The directory where the cache items are stored and a - pattern string that is used to build the filename. - - If no directory is specified a default cache directory is selected. On - Windows the user's temp directory is used, on UNIX systems a directory - is created for the user in the system temp directory. - - The pattern can be used to have multiple separate caches operate on the - same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s`` - is replaced with the cache key. - - >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache') - - This bytecode cache supports clearing of the cache using the clear method. - """ - - def __init__(self, directory=None, pattern="__jinja2_%s.cache"): - if directory is None: - directory = self._get_default_cache_dir() - self.directory = directory - self.pattern = pattern - - def _get_default_cache_dir(self): - def _unsafe_dir(): - raise RuntimeError( - "Cannot determine safe temp directory. You " - "need to explicitly provide one." - ) - - tmpdir = tempfile.gettempdir() - - # On windows the temporary directory is used specific unless - # explicitly forced otherwise. We can just use that. - if os.name == "nt": - return tmpdir - if not hasattr(os, "getuid"): - _unsafe_dir() - - dirname = "_jinja2-cache-%d" % os.getuid() - actual_dir = os.path.join(tmpdir, dirname) - - try: - os.mkdir(actual_dir, stat.S_IRWXU) - except OSError as e: - if e.errno != errno.EEXIST: - raise - try: - os.chmod(actual_dir, stat.S_IRWXU) - actual_dir_stat = os.lstat(actual_dir) - if ( - actual_dir_stat.st_uid != os.getuid() - or not stat.S_ISDIR(actual_dir_stat.st_mode) - or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU - ): - _unsafe_dir() - except OSError as e: - if e.errno != errno.EEXIST: - raise - - actual_dir_stat = os.lstat(actual_dir) - if ( - actual_dir_stat.st_uid != os.getuid() - or not stat.S_ISDIR(actual_dir_stat.st_mode) - or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU - ): - _unsafe_dir() - - return actual_dir - - def _get_cache_filename(self, bucket): - return path.join(self.directory, self.pattern % bucket.key) - - def load_bytecode(self, bucket): - f = open_if_exists(self._get_cache_filename(bucket), "rb") - if f is not None: - try: - bucket.load_bytecode(f) - finally: - f.close() - - def dump_bytecode(self, bucket): - f = open(self._get_cache_filename(bucket), "wb") - try: - bucket.write_bytecode(f) - finally: - f.close() - - def clear(self): - # imported lazily here because google app-engine doesn't support - # write access on the file system and the function does not exist - # normally. - from os import remove - - files = fnmatch.filter(listdir(self.directory), self.pattern % "*") - for filename in files: - try: - remove(path.join(self.directory, filename)) - except OSError: - pass - - -class MemcachedBytecodeCache(BytecodeCache): - """This class implements a bytecode cache that uses a memcache cache for - storing the information. It does not enforce a specific memcache library - (tummy's memcache or cmemcache) but will accept any class that provides - the minimal interface required. - - Libraries compatible with this class: - - - `cachelib `_ - - `python-memcached `_ - - (Unfortunately the django cache interface is not compatible because it - does not support storing binary data, only unicode. You can however pass - the underlying cache client to the bytecode cache which is available - as `django.core.cache.cache._client`.) - - The minimal interface for the client passed to the constructor is this: - - .. class:: MinimalClientInterface - - .. method:: set(key, value[, timeout]) - - Stores the bytecode in the cache. `value` is a string and - `timeout` the timeout of the key. If timeout is not provided - a default timeout or no timeout should be assumed, if it's - provided it's an integer with the number of seconds the cache - item should exist. - - .. method:: get(key) - - Returns the value for the cache key. If the item does not - exist in the cache the return value must be `None`. - - The other arguments to the constructor are the prefix for all keys that - is added before the actual cache key and the timeout for the bytecode in - the cache system. We recommend a high (or no) timeout. - - This bytecode cache does not support clearing of used items in the cache. - The clear method is a no-operation function. - - .. versionadded:: 2.7 - Added support for ignoring memcache errors through the - `ignore_memcache_errors` parameter. - """ - - def __init__( - self, - client, - prefix="jinja2/bytecode/", - timeout=None, - ignore_memcache_errors=True, - ): - self.client = client - self.prefix = prefix - self.timeout = timeout - self.ignore_memcache_errors = ignore_memcache_errors - - def load_bytecode(self, bucket): - try: - code = self.client.get(self.prefix + bucket.key) - except Exception: - if not self.ignore_memcache_errors: - raise - code = None - if code is not None: - bucket.bytecode_from_string(code) - - def dump_bytecode(self, bucket): - args = (self.prefix + bucket.key, bucket.bytecode_to_string()) - if self.timeout is not None: - args += (self.timeout,) - try: - self.client.set(*args) - except Exception: - if not self.ignore_memcache_errors: - raise diff --git a/venv/lib/python3.8/site-packages/jinja2/compiler.py b/venv/lib/python3.8/site-packages/jinja2/compiler.py deleted file mode 100644 index f450ec6e..00000000 --- a/venv/lib/python3.8/site-packages/jinja2/compiler.py +++ /dev/null @@ -1,1843 +0,0 @@ -# -*- coding: utf-8 -*- -"""Compiles nodes from the parser into Python code.""" -from collections import namedtuple -from functools import update_wrapper -from itertools import chain -from keyword import iskeyword as is_python_keyword - -from markupsafe import escape -from markupsafe import Markup - -from . import nodes -from ._compat import imap -from ._compat import iteritems -from ._compat import izip -from ._compat import NativeStringIO -from ._compat import range_type -from ._compat import string_types -from ._compat import text_type -from .exceptions import TemplateAssertionError -from .idtracking import Symbols -from .idtracking import VAR_LOAD_ALIAS -from .idtracking import VAR_LOAD_PARAMETER -from .idtracking import VAR_LOAD_RESOLVE -from .idtracking import VAR_LOAD_UNDEFINED -from .nodes import EvalContext -from .optimizer import Optimizer -from .utils import concat -from .visitor import NodeVisitor - -operators = { - "eq": "==", - "ne": "!=", - "gt": ">", - "gteq": ">=", - "lt": "<", - "lteq": "<=", - "in": "in", - "notin": "not in", -} - -# what method to iterate over items do we want to use for dict iteration -# in generated code? on 2.x let's go with iteritems, on 3.x with items -if hasattr(dict, "iteritems"): - dict_item_iter = "iteritems" -else: - dict_item_iter = "items" - -code_features = ["division"] - -# does this python version support generator stops? (PEP 0479) -try: - exec("from __future__ import generator_stop") - code_features.append("generator_stop") -except SyntaxError: - pass - -# does this python version support yield from? -try: - exec("def f(): yield from x()") -except SyntaxError: - supports_yield_from = False -else: - supports_yield_from = True - - -def optimizeconst(f): - def new_func(self, node, frame, **kwargs): - # Only optimize if the frame is not volatile - if self.optimized and not frame.eval_ctx.volatile: - new_node = self.optimizer.visit(node, frame.eval_ctx) - if new_node != node: - return self.visit(new_node, frame) - return f(self, node, frame, **kwargs) - - return update_wrapper(new_func, f) - - -def generate( - node, environment, name, filename, stream=None, defer_init=False, optimized=True -): - """Generate the python source for a node tree.""" - if not isinstance(node, nodes.Template): - raise TypeError("Can't compile non template nodes") - generator = environment.code_generator_class( - environment, name, filename, stream, defer_init, optimized - ) - generator.visit(node) - if stream is None: - return generator.stream.getvalue() - - -def has_safe_repr(value): - """Does the node have a safe representation?""" - if value is None or value is NotImplemented or value is Ellipsis: - return True - if type(value) in (bool, int, float, complex, range_type, Markup) + string_types: - return True - if type(value) in (tuple, list, set, frozenset): - for item in value: - if not has_safe_repr(item): - return False - return True - elif type(value) is dict: - for key, value in iteritems(value): - if not has_safe_repr(key): - return False - if not has_safe_repr(value): - return False - return True - return False - - -def find_undeclared(nodes, names): - """Check if the names passed are accessed undeclared. The return value - is a set of all the undeclared names from the sequence of names found. - """ - visitor = UndeclaredNameVisitor(names) - try: - for node in nodes: - visitor.visit(node) - except VisitorExit: - pass - return visitor.undeclared - - -class MacroRef(object): - def __init__(self, node): - self.node = node - self.accesses_caller = False - self.accesses_kwargs = False - self.accesses_varargs = False - - -class Frame(object): - """Holds compile time information for us.""" - - def __init__(self, eval_ctx, parent=None, level=None): - self.eval_ctx = eval_ctx - self.symbols = Symbols(parent and parent.symbols or None, level=level) - - # a toplevel frame is the root + soft frames such as if conditions. - self.toplevel = False - - # the root frame is basically just the outermost frame, so no if - # conditions. This information is used to optimize inheritance - # situations. - self.rootlevel = False - - # in some dynamic inheritance situations the compiler needs to add - # write tests around output statements. - self.require_output_check = parent and parent.require_output_check - - # inside some tags we are using a buffer rather than yield statements. - # this for example affects {% filter %} or {% macro %}. If a frame - # is buffered this variable points to the name of the list used as - # buffer. - self.buffer = None - - # the name of the block we're in, otherwise None. - self.block = parent and parent.block or None - - # the parent of this frame - self.parent = parent - - if parent is not None: - self.buffer = parent.buffer - - def copy(self): - """Create a copy of the current one.""" - rv = object.__new__(self.__class__) - rv.__dict__.update(self.__dict__) - rv.symbols = self.symbols.copy() - return rv - - def inner(self, isolated=False): - """Return an inner frame.""" - if isolated: - return Frame(self.eval_ctx, level=self.symbols.level + 1) - return Frame(self.eval_ctx, self) - - def soft(self): - """Return a soft frame. A soft frame may not be modified as - standalone thing as it shares the resources with the frame it - was created of, but it's not a rootlevel frame any longer. - - This is only used to implement if-statements. - """ - rv = self.copy() - rv.rootlevel = False - return rv - - __copy__ = copy - - -class VisitorExit(RuntimeError): - """Exception used by the `UndeclaredNameVisitor` to signal a stop.""" - - -class DependencyFinderVisitor(NodeVisitor): - """A visitor that collects filter and test calls.""" - - def __init__(self): - self.filters = set() - self.tests = set() - - def visit_Filter(self, node): - self.generic_visit(node) - self.filters.add(node.name) - - def visit_Test(self, node): - self.generic_visit(node) - self.tests.add(node.name) - - def visit_Block(self, node): - """Stop visiting at blocks.""" - - -class UndeclaredNameVisitor(NodeVisitor): - """A visitor that checks if a name is accessed without being - declared. This is different from the frame visitor as it will - not stop at closure frames. - """ - - def __init__(self, names): - self.names = set(names) - self.undeclared = set() - - def visit_Name(self, node): - if node.ctx == "load" and node.name in self.names: - self.undeclared.add(node.name) - if self.undeclared == self.names: - raise VisitorExit() - else: - self.names.discard(node.name) - - def visit_Block(self, node): - """Stop visiting a blocks.""" - - -class CompilerExit(Exception): - """Raised if the compiler encountered a situation where it just - doesn't make sense to further process the code. Any block that - raises such an exception is not further processed. - """ - - -class CodeGenerator(NodeVisitor): - def __init__( - self, environment, name, filename, stream=None, defer_init=False, optimized=True - ): - if stream is None: - stream = NativeStringIO() - self.environment = environment - self.name = name - self.filename = filename - self.stream = stream - self.created_block_context = False - self.defer_init = defer_init - self.optimized = optimized - if optimized: - self.optimizer = Optimizer(environment) - - # aliases for imports - self.import_aliases = {} - - # a registry for all blocks. Because blocks are moved out - # into the global python scope they are registered here - self.blocks = {} - - # the number of extends statements so far - self.extends_so_far = 0 - - # some templates have a rootlevel extends. In this case we - # can safely assume that we're a child template and do some - # more optimizations. - self.has_known_extends = False - - # the current line number - self.code_lineno = 1 - - # registry of all filters and tests (global, not block local) - self.tests = {} - self.filters = {} - - # the debug information - self.debug_info = [] - self._write_debug_info = None - - # the number of new lines before the next write() - self._new_lines = 0 - - # the line number of the last written statement - self._last_line = 0 - - # true if nothing was written so far. - self._first_write = True - - # used by the `temporary_identifier` method to get new - # unique, temporary identifier - self._last_identifier = 0 - - # the current indentation - self._indentation = 0 - - # Tracks toplevel assignments - self._assign_stack = [] - - # Tracks parameter definition blocks - self._param_def_block = [] - - # Tracks the current context. - self._context_reference_stack = ["context"] - - # -- Various compilation helpers - - def fail(self, msg, lineno): - """Fail with a :exc:`TemplateAssertionError`.""" - raise TemplateAssertionError(msg, lineno, self.name, self.filename) - - def temporary_identifier(self): - """Get a new unique identifier.""" - self._last_identifier += 1 - return "t_%d" % self._last_identifier - - def buffer(self, frame): - """Enable buffering for the frame from that point onwards.""" - frame.buffer = self.temporary_identifier() - self.writeline("%s = []" % frame.buffer) - - def return_buffer_contents(self, frame, force_unescaped=False): - """Return the buffer contents of the frame.""" - if not force_unescaped: - if frame.eval_ctx.volatile: - self.writeline("if context.eval_ctx.autoescape:") - self.indent() - self.writeline("return Markup(concat(%s))" % frame.buffer) - self.outdent() - self.writeline("else:") - self.indent() - self.writeline("return concat(%s)" % frame.buffer) - self.outdent() - return - elif frame.eval_ctx.autoescape: - self.writeline("return Markup(concat(%s))" % frame.buffer) - return - self.writeline("return concat(%s)" % frame.buffer) - - def indent(self): - """Indent by one.""" - self._indentation += 1 - - def outdent(self, step=1): - """Outdent by step.""" - self._indentation -= step - - def start_write(self, frame, node=None): - """Yield or write into the frame buffer.""" - if frame.buffer is None: - self.writeline("yield ", node) - else: - self.writeline("%s.append(" % frame.buffer, node) - - def end_write(self, frame): - """End the writing process started by `start_write`.""" - if frame.buffer is not None: - self.write(")") - - def simple_write(self, s, frame, node=None): - """Simple shortcut for start_write + write + end_write.""" - self.start_write(frame, node) - self.write(s) - self.end_write(frame) - - def blockvisit(self, nodes, frame): - """Visit a list of nodes as block in a frame. If the current frame - is no buffer a dummy ``if 0: yield None`` is written automatically. - """ - try: - self.writeline("pass") - for node in nodes: - self.visit(node, frame) - except CompilerExit: - pass - - def write(self, x): - """Write a string into the output stream.""" - if self._new_lines: - if not self._first_write: - self.stream.write("\n" * self._new_lines) - self.code_lineno += self._new_lines - if self._write_debug_info is not None: - self.debug_info.append((self._write_debug_info, self.code_lineno)) - self._write_debug_info = None - self._first_write = False - self.stream.write(" " * self._indentation) - self._new_lines = 0 - self.stream.write(x) - - def writeline(self, x, node=None, extra=0): - """Combination of newline and write.""" - self.newline(node, extra) - self.write(x) - - def newline(self, node=None, extra=0): - """Add one or more newlines before the next write.""" - self._new_lines = max(self._new_lines, 1 + extra) - if node is not None and node.lineno != self._last_line: - self._write_debug_info = node.lineno - self._last_line = node.lineno - - def signature(self, node, frame, extra_kwargs=None): - """Writes a function call to the stream for the current node. - A leading comma is added automatically. The extra keyword - arguments may not include python keywords otherwise a syntax - error could occur. The extra keyword arguments should be given - as python dict. - """ - # if any of the given keyword arguments is a python keyword - # we have to make sure that no invalid call is created. - kwarg_workaround = False - for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()): - if is_python_keyword(kwarg): - kwarg_workaround = True - break - - for arg in node.args: - self.write(", ") - self.visit(arg, frame) - - if not kwarg_workaround: - for kwarg in node.kwargs: - self.write(", ") - self.visit(kwarg, frame) - if extra_kwargs is not None: - for key, value in iteritems(extra_kwargs): - self.write(", %s=%s" % (key, value)) - if node.dyn_args: - self.write(", *") - self.visit(node.dyn_args, frame) - - if kwarg_workaround: - if node.dyn_kwargs is not None: - self.write(", **dict({") - else: - self.write(", **{") - for kwarg in node.kwargs: - self.write("%r: " % kwarg.key) - self.visit(kwarg.value, frame) - self.write(", ") - if extra_kwargs is not None: - for key, value in iteritems(extra_kwargs): - self.write("%r: %s, " % (key, value)) - if node.dyn_kwargs is not None: - self.write("}, **") - self.visit(node.dyn_kwargs, frame) - self.write(")") - else: - self.write("}") - - elif node.dyn_kwargs is not None: - self.write(", **") - self.visit(node.dyn_kwargs, frame) - - def pull_dependencies(self, nodes): - """Pull all the dependencies.""" - visitor = DependencyFinderVisitor() - for node in nodes: - visitor.visit(node) - for dependency in "filters", "tests": - mapping = getattr(self, dependency) - for name in getattr(visitor, dependency): - if name not in mapping: - mapping[name] = self.temporary_identifier() - self.writeline( - "%s = environment.%s[%r]" % (mapping[name], dependency, name) - ) - - def enter_frame(self, frame): - undefs = [] - for target, (action, param) in iteritems(frame.symbols.loads): - if action == VAR_LOAD_PARAMETER: - pass - elif action == VAR_LOAD_RESOLVE: - self.writeline("%s = %s(%r)" % (target, self.get_resolve_func(), param)) - elif action == VAR_LOAD_ALIAS: - self.writeline("%s = %s" % (target, param)) - elif action == VAR_LOAD_UNDEFINED: - undefs.append(target) - else: - raise NotImplementedError("unknown load instruction") - if undefs: - self.writeline("%s = missing" % " = ".join(undefs)) - - def leave_frame(self, frame, with_python_scope=False): - if not with_python_scope: - undefs = [] - for target, _ in iteritems(frame.symbols.loads): - undefs.append(target) - if undefs: - self.writeline("%s = missing" % " = ".join(undefs)) - - def func(self, name): - if self.environment.is_async: - return "async def %s" % name - return "def %s" % name - - def macro_body(self, node, frame): - """Dump the function def of a macro or call block.""" - frame = frame.inner() - frame.symbols.analyze_node(node) - macro_ref = MacroRef(node) - - explicit_caller = None - skip_special_params = set() - args = [] - for idx, arg in enumerate(node.args): - if arg.name == "caller": - explicit_caller = idx - if arg.name in ("kwargs", "varargs"): - skip_special_params.add(arg.name) - args.append(frame.symbols.ref(arg.name)) - - undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs")) - - if "caller" in undeclared: - # In older Jinja versions there was a bug that allowed caller - # to retain the special behavior even if it was mentioned in - # the argument list. However thankfully this was only really - # working if it was the last argument. So we are explicitly - # checking this now and error out if it is anywhere else in - # the argument list. - if explicit_caller is not None: - try: - node.defaults[explicit_caller - len(node.args)] - except IndexError: - self.fail( - "When defining macros or call blocks the " - 'special "caller" argument must be omitted ' - "or be given a default.", - node.lineno, - ) - else: - args.append(frame.symbols.declare_parameter("caller")) - macro_ref.accesses_caller = True - if "kwargs" in undeclared and "kwargs" not in skip_special_params: - args.append(frame.symbols.declare_parameter("kwargs")) - macro_ref.accesses_kwargs = True - if "varargs" in undeclared and "varargs" not in skip_special_params: - args.append(frame.symbols.declare_parameter("varargs")) - macro_ref.accesses_varargs = True - - # macros are delayed, they never require output checks - frame.require_output_check = False - frame.symbols.analyze_node(node) - self.writeline("%s(%s):" % (self.func("macro"), ", ".join(args)), node) - self.indent() - - self.buffer(frame) - self.enter_frame(frame) - - self.push_parameter_definitions(frame) - for idx, arg in enumerate(node.args): - ref = frame.symbols.ref(arg.name) - self.writeline("if %s is missing:" % ref) - self.indent() - try: - default = node.defaults[idx - len(node.args)] - except IndexError: - self.writeline( - "%s = undefined(%r, name=%r)" - % (ref, "parameter %r was not provided" % arg.name, arg.name) - ) - else: - self.writeline("%s = " % ref) - self.visit(default, frame) - self.mark_parameter_stored(ref) - self.outdent() - self.pop_parameter_definitions() - - self.blockvisit(node.body, frame) - self.return_buffer_contents(frame, force_unescaped=True) - self.leave_frame(frame, with_python_scope=True) - self.outdent() - - return frame, macro_ref - - def macro_def(self, macro_ref, frame): - """Dump the macro definition for the def created by macro_body.""" - arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args) - name = getattr(macro_ref.node, "name", None) - if len(macro_ref.node.args) == 1: - arg_tuple += "," - self.write( - "Macro(environment, macro, %r, (%s), %r, %r, %r, " - "context.eval_ctx.autoescape)" - % ( - name, - arg_tuple, - macro_ref.accesses_kwargs, - macro_ref.accesses_varargs, - macro_ref.accesses_caller, - ) - ) - - def position(self, node): - """Return a human readable position for the node.""" - rv = "line %d" % node.lineno - if self.name is not None: - rv += " in " + repr(self.name) - return rv - - def dump_local_context(self, frame): - return "{%s}" % ", ".join( - "%r: %s" % (name, target) - for name, target in iteritems(frame.symbols.dump_stores()) - ) - - def write_commons(self): - """Writes a common preamble that is used by root and block functions. - Primarily this sets up common local helpers and enforces a generator - through a dead branch. - """ - self.writeline("resolve = context.resolve_or_missing") - self.writeline("undefined = environment.undefined") - # always use the standard Undefined class for the implicit else of - # conditional expressions - self.writeline("cond_expr_undefined = Undefined") - self.writeline("if 0: yield None") - - def push_parameter_definitions(self, frame): - """Pushes all parameter targets from the given frame into a local - stack that permits tracking of yet to be assigned parameters. In - particular this enables the optimization from `visit_Name` to skip - undefined expressions for parameters in macros as macros can reference - otherwise unbound parameters. - """ - self._param_def_block.append(frame.symbols.dump_param_targets()) - - def pop_parameter_definitions(self): - """Pops the current parameter definitions set.""" - self._param_def_block.pop() - - def mark_parameter_stored(self, target): - """Marks a parameter in the current parameter definitions as stored. - This will skip the enforced undefined checks. - """ - if self._param_def_block: - self._param_def_block[-1].discard(target) - - def push_context_reference(self, target): - self._context_reference_stack.append(target) - - def pop_context_reference(self): - self._context_reference_stack.pop() - - def get_context_ref(self): - return self._context_reference_stack[-1] - - def get_resolve_func(self): - target = self._context_reference_stack[-1] - if target == "context": - return "resolve" - return "%s.resolve" % target - - def derive_context(self, frame): - return "%s.derived(%s)" % ( - self.get_context_ref(), - self.dump_local_context(frame), - ) - - def parameter_is_undeclared(self, target): - """Checks if a given target is an undeclared parameter.""" - if not self._param_def_block: - return False - return target in self._param_def_block[-1] - - def push_assign_tracking(self): - """Pushes a new layer for assignment tracking.""" - self._assign_stack.append(set()) - - def pop_assign_tracking(self, frame): - """Pops the topmost level for assignment tracking and updates the - context variables if necessary. - """ - vars = self._assign_stack.pop() - if not frame.toplevel or not vars: - return - public_names = [x for x in vars if x[:1] != "_"] - if len(vars) == 1: - name = next(iter(vars)) - ref = frame.symbols.ref(name) - self.writeline("context.vars[%r] = %s" % (name, ref)) - else: - self.writeline("context.vars.update({") - for idx, name in enumerate(vars): - if idx: - self.write(", ") - ref = frame.symbols.ref(name) - self.write("%r: %s" % (name, ref)) - self.write("})") - if public_names: - if len(public_names) == 1: - self.writeline("context.exported_vars.add(%r)" % public_names[0]) - else: - self.writeline( - "context.exported_vars.update((%s))" - % ", ".join(imap(repr, public_names)) - ) - - # -- Statement Visitors - - def visit_Template(self, node, frame=None): - assert frame is None, "no root frame allowed" - eval_ctx = EvalContext(self.environment, self.name) - - from .runtime import exported - - self.writeline("from __future__ import %s" % ", ".join(code_features)) - self.writeline("from jinja2.runtime import " + ", ".join(exported)) - - if self.environment.is_async: - self.writeline( - "from jinja2.asyncsupport import auto_await, " - "auto_aiter, AsyncLoopContext" - ) - - # if we want a deferred initialization we cannot move the - # environment into a local name - envenv = not self.defer_init and ", environment=environment" or "" - - # do we have an extends tag at all? If not, we can save some - # overhead by just not processing any inheritance code. - have_extends = node.find(nodes.Extends) is not None - - # find all blocks - for block in node.find_all(nodes.Block): - if block.name in self.blocks: - self.fail("block %r defined twice" % block.name, block.lineno) - self.blocks[block.name] = block - - # find all imports and import them - for import_ in node.find_all(nodes.ImportedName): - if import_.importname not in self.import_aliases: - imp = import_.importname - self.import_aliases[imp] = alias = self.temporary_identifier() - if "." in imp: - module, obj = imp.rsplit(".", 1) - self.writeline("from %s import %s as %s" % (module, obj, alias)) - else: - self.writeline("import %s as %s" % (imp, alias)) - - # add the load name - self.writeline("name = %r" % self.name) - - # generate the root render function. - self.writeline( - "%s(context, missing=missing%s):" % (self.func("root"), envenv), extra=1 - ) - self.indent() - self.write_commons() - - # process the root - frame = Frame(eval_ctx) - if "self" in find_undeclared(node.body, ("self",)): - ref = frame.symbols.declare_parameter("self") - self.writeline("%s = TemplateReference(context)" % ref) - frame.symbols.analyze_node(node) - frame.toplevel = frame.rootlevel = True - frame.require_output_check = have_extends and not self.has_known_extends - if have_extends: - self.writeline("parent_template = None") - self.enter_frame(frame) - self.pull_dependencies(node.body) - self.blockvisit(node.body, frame) - self.leave_frame(frame, with_python_scope=True) - self.outdent() - - # make sure that the parent root is called. - if have_extends: - if not self.has_known_extends: - self.indent() - self.writeline("if parent_template is not None:") - self.indent() - if supports_yield_from and not self.environment.is_async: - self.writeline("yield from parent_template.root_render_func(context)") - else: - self.writeline( - "%sfor event in parent_template." - "root_render_func(context):" - % (self.environment.is_async and "async " or "") - ) - self.indent() - self.writeline("yield event") - self.outdent() - self.outdent(1 + (not self.has_known_extends)) - - # at this point we now have the blocks collected and can visit them too. - for name, block in iteritems(self.blocks): - self.writeline( - "%s(context, missing=missing%s):" - % (self.func("block_" + name), envenv), - block, - 1, - ) - self.indent() - self.write_commons() - # It's important that we do not make this frame a child of the - # toplevel template. This would cause a variety of - # interesting issues with identifier tracking. - block_frame = Frame(eval_ctx) - undeclared = find_undeclared(block.body, ("self", "super")) - if "self" in undeclared: - ref = block_frame.symbols.declare_parameter("self") - self.writeline("%s = TemplateReference(context)" % ref) - if "super" in undeclared: - ref = block_frame.symbols.declare_parameter("super") - self.writeline("%s = context.super(%r, block_%s)" % (ref, name, name)) - block_frame.symbols.analyze_node(block) - block_frame.block = name - self.enter_frame(block_frame) - self.pull_dependencies(block.body) - self.blockvisit(block.body, block_frame) - self.leave_frame(block_frame, with_python_scope=True) - self.outdent() - - self.writeline( - "blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks), - extra=1, - ) - - # add a function that returns the debug info - self.writeline( - "debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info) - ) - - def visit_Block(self, node, frame): - """Call a block and register it for the template.""" - level = 0 - if frame.toplevel: - # if we know that we are a child template, there is no need to - # check if we are one - if self.has_known_extends: - return - if self.extends_so_far > 0: - self.writeline("if parent_template is None:") - self.indent() - level += 1 - - if node.scoped: - context = self.derive_context(frame) - else: - context = self.get_context_ref() - - if ( - supports_yield_from - and not self.environment.is_async - and frame.buffer is None - ): - self.writeline( - "yield from context.blocks[%r][0](%s)" % (node.name, context), node - ) - else: - loop = self.environment.is_async and "async for" or "for" - self.writeline( - "%s event in context.blocks[%r][0](%s):" % (loop, node.name, context), - node, - ) - self.indent() - self.simple_write("event", frame) - self.outdent() - - self.outdent(level) - - def visit_Extends(self, node, frame): - """Calls the extender.""" - if not frame.toplevel: - self.fail("cannot use extend from a non top-level scope", node.lineno) - - # if the number of extends statements in general is zero so - # far, we don't have to add a check if something extended - # the template before this one. - if self.extends_so_far > 0: - - # if we have a known extends we just add a template runtime - # error into the generated code. We could catch that at compile - # time too, but i welcome it not to confuse users by throwing the - # same error at different times just "because we can". - if not self.has_known_extends: - self.writeline("if parent_template is not None:") - self.indent() - self.writeline("raise TemplateRuntimeError(%r)" % "extended multiple times") - - # if we have a known extends already we don't need that code here - # as we know that the template execution will end here. - if self.has_known_extends: - raise CompilerExit() - else: - self.outdent() - - self.writeline("parent_template = environment.get_template(", node) - self.visit(node.template, frame) - self.write(", %r)" % self.name) - self.writeline( - "for name, parent_block in parent_template.blocks.%s():" % dict_item_iter - ) - self.indent() - self.writeline("context.blocks.setdefault(name, []).append(parent_block)") - self.outdent() - - # if this extends statement was in the root level we can take - # advantage of that information and simplify the generated code - # in the top level from this point onwards - if frame.rootlevel: - self.has_known_extends = True - - # and now we have one more - self.extends_so_far += 1 - - def visit_Include(self, node, frame): - """Handles includes.""" - if node.ignore_missing: - self.writeline("try:") - self.indent() - - func_name = "get_or_select_template" - if isinstance(node.template, nodes.Const): - if isinstance(node.template.value, string_types): - func_name = "get_template" - elif isinstance(node.template.value, (tuple, list)): - func_name = "select_template" - elif isinstance(node.template, (nodes.Tuple, nodes.List)): - func_name = "select_template" - - self.writeline("template = environment.%s(" % func_name, node) - self.visit(node.template, frame) - self.write(", %r)" % self.name) - if node.ignore_missing: - self.outdent() - self.writeline("except TemplateNotFound:") - self.indent() - self.writeline("pass") - self.outdent() - self.writeline("else:") - self.indent() - - skip_event_yield = False - if node.with_context: - loop = self.environment.is_async and "async for" or "for" - self.writeline( - "%s event in template.root_render_func(" - "template.new_context(context.get_all(), True, " - "%s)):" % (loop, self.dump_local_context(frame)) - ) - elif self.environment.is_async: - self.writeline( - "for event in (await " - "template._get_default_module_async())" - "._body_stream:" - ) - else: - if supports_yield_from: - self.writeline("yield from template._get_default_module()._body_stream") - skip_event_yield = True - else: - self.writeline( - "for event in template._get_default_module()._body_stream:" - ) - - if not skip_event_yield: - self.indent() - self.simple_write("event", frame) - self.outdent() - - if node.ignore_missing: - self.outdent() - - def visit_Import(self, node, frame): - """Visit regular imports.""" - self.writeline("%s = " % frame.symbols.ref(node.target), node) - if frame.toplevel: - self.write("context.vars[%r] = " % node.target) - if self.environment.is_async: - self.write("await ") - self.write("environment.get_template(") - self.visit(node.template, frame) - self.write(", %r)." % self.name) - if node.with_context: - self.write( - "make_module%s(context.get_all(), True, %s)" - % ( - self.environment.is_async and "_async" or "", - self.dump_local_context(frame), - ) - ) - elif self.environment.is_async: - self.write("_get_default_module_async()") - else: - self.write("_get_default_module()") - if frame.toplevel and not node.target.startswith("_"): - self.writeline("context.exported_vars.discard(%r)" % node.target) - - def visit_FromImport(self, node, frame): - """Visit named imports.""" - self.newline(node) - self.write( - "included_template = %senvironment.get_template(" - % (self.environment.is_async and "await " or "") - ) - self.visit(node.template, frame) - self.write(", %r)." % self.name) - if node.with_context: - self.write( - "make_module%s(context.get_all(), True, %s)" - % ( - self.environment.is_async and "_async" or "", - self.dump_local_context(frame), - ) - ) - elif self.environment.is_async: - self.write("_get_default_module_async()") - else: - self.write("_get_default_module()") - - var_names = [] - discarded_names = [] - for name in node.names: - if isinstance(name, tuple): - name, alias = name - else: - alias = name - self.writeline( - "%s = getattr(included_template, " - "%r, missing)" % (frame.symbols.ref(alias), name) - ) - self.writeline("if %s is missing:" % frame.symbols.ref(alias)) - self.indent() - self.writeline( - "%s = undefined(%r %% " - "included_template.__name__, " - "name=%r)" - % ( - frame.symbols.ref(alias), - "the template %%r (imported on %s) does " - "not export the requested name %s" - % (self.position(node), repr(name)), - name, - ) - ) - self.outdent() - if frame.toplevel: - var_names.append(alias) - if not alias.startswith("_"): - discarded_names.append(alias) - - if var_names: - if len(var_names) == 1: - name = var_names[0] - self.writeline( - "context.vars[%r] = %s" % (name, frame.symbols.ref(name)) - ) - else: - self.writeline( - "context.vars.update({%s})" - % ", ".join( - "%r: %s" % (name, frame.symbols.ref(name)) for name in var_names - ) - ) - if discarded_names: - if len(discarded_names) == 1: - self.writeline("context.exported_vars.discard(%r)" % discarded_names[0]) - else: - self.writeline( - "context.exported_vars.difference_" - "update((%s))" % ", ".join(imap(repr, discarded_names)) - ) - - def visit_For(self, node, frame): - loop_frame = frame.inner() - test_frame = frame.inner() - else_frame = frame.inner() - - # try to figure out if we have an extended loop. An extended loop - # is necessary if the loop is in recursive mode if the special loop - # variable is accessed in the body. - extended_loop = node.recursive or "loop" in find_undeclared( - node.iter_child_nodes(only=("body",)), ("loop",) - ) - - loop_ref = None - if extended_loop: - loop_ref = loop_frame.symbols.declare_parameter("loop") - - loop_frame.symbols.analyze_node(node, for_branch="body") - if node.else_: - else_frame.symbols.analyze_node(node, for_branch="else") - - if node.test: - loop_filter_func = self.temporary_identifier() - test_frame.symbols.analyze_node(node, for_branch="test") - self.writeline("%s(fiter):" % self.func(loop_filter_func), node.test) - self.indent() - self.enter_frame(test_frame) - self.writeline(self.environment.is_async and "async for " or "for ") - self.visit(node.target, loop_frame) - self.write(" in ") - self.write(self.environment.is_async and "auto_aiter(fiter)" or "fiter") - self.write(":") - self.indent() - self.writeline("if ", node.test) - self.visit(node.test, test_frame) - self.write(":") - self.indent() - self.writeline("yield ") - self.visit(node.target, loop_frame) - self.outdent(3) - self.leave_frame(test_frame, with_python_scope=True) - - # if we don't have an recursive loop we have to find the shadowed - # variables at that point. Because loops can be nested but the loop - # variable is a special one we have to enforce aliasing for it. - if node.recursive: - self.writeline( - "%s(reciter, loop_render_func, depth=0):" % self.func("loop"), node - ) - self.indent() - self.buffer(loop_frame) - - # Use the same buffer for the else frame - else_frame.buffer = loop_frame.buffer - - # make sure the loop variable is a special one and raise a template - # assertion error if a loop tries to write to loop - if extended_loop: - self.writeline("%s = missing" % loop_ref) - - for name in node.find_all(nodes.Name): - if name.ctx == "store" and name.name == "loop": - self.fail( - "Can't assign to special loop variable in for-loop target", - name.lineno, - ) - - if node.else_: - iteration_indicator = self.temporary_identifier() - self.writeline("%s = 1" % iteration_indicator) - - self.writeline(self.environment.is_async and "async for " or "for ", node) - self.visit(node.target, loop_frame) - if extended_loop: - if self.environment.is_async: - self.write(", %s in AsyncLoopContext(" % loop_ref) - else: - self.write(", %s in LoopContext(" % loop_ref) - else: - self.write(" in ") - - if node.test: - self.write("%s(" % loop_filter_func) - if node.recursive: - self.write("reciter") - else: - if self.environment.is_async and not extended_loop: - self.write("auto_aiter(") - self.visit(node.iter, frame) - if self.environment.is_async and not extended_loop: - self.write(")") - if node.test: - self.write(")") - - if node.recursive: - self.write(", undefined, loop_render_func, depth):") - else: - self.write(extended_loop and ", undefined):" or ":") - - self.indent() - self.enter_frame(loop_frame) - - self.blockvisit(node.body, loop_frame) - if node.else_: - self.writeline("%s = 0" % iteration_indicator) - self.outdent() - self.leave_frame( - loop_frame, with_python_scope=node.recursive and not node.else_ - ) - - if node.else_: - self.writeline("if %s:" % iteration_indicator) - self.indent() - self.enter_frame(else_frame) - self.blockvisit(node.else_, else_frame) - self.leave_frame(else_frame) - self.outdent() - - # if the node was recursive we have to return the buffer contents - # and start the iteration code - if node.recursive: - self.return_buffer_contents(loop_frame) - self.outdent() - self.start_write(frame, node) - if self.environment.is_async: - self.write("await ") - self.write("loop(") - if self.environment.is_async: - self.write("auto_aiter(") - self.visit(node.iter, frame) - if self.environment.is_async: - self.write(")") - self.write(", loop)") - self.end_write(frame) - - def visit_If(self, node, frame): - if_frame = frame.soft() - self.writeline("if ", node) - self.visit(node.test, if_frame) - self.write(":") - self.indent() - self.blockvisit(node.body, if_frame) - self.outdent() - for elif_ in node.elif_: - self.writeline("elif ", elif_) - self.visit(elif_.test, if_frame) - self.write(":") - self.indent() - self.blockvisit(elif_.body, if_frame) - self.outdent() - if node.else_: - self.writeline("else:") - self.indent() - self.blockvisit(node.else_, if_frame) - self.outdent() - - def visit_Macro(self, node, frame): - macro_frame, macro_ref = self.macro_body(node, frame) - self.newline() - if frame.toplevel: - if not node.name.startswith("_"): - self.write("context.exported_vars.add(%r)" % node.name) - self.writeline("context.vars[%r] = " % node.name) - self.write("%s = " % frame.symbols.ref(node.name)) - self.macro_def(macro_ref, macro_frame) - - def visit_CallBlock(self, node, frame): - call_frame, macro_ref = self.macro_body(node, frame) - self.writeline("caller = ") - self.macro_def(macro_ref, call_frame) - self.start_write(frame, node) - self.visit_Call(node.call, frame, forward_caller=True) - self.end_write(frame) - - def visit_FilterBlock(self, node, frame): - filter_frame = frame.inner() - filter_frame.symbols.analyze_node(node) - self.enter_frame(filter_frame) - self.buffer(filter_frame) - self.blockvisit(node.body, filter_frame) - self.start_write(frame, node) - self.visit_Filter(node.filter, filter_frame) - self.end_write(frame) - self.leave_frame(filter_frame) - - def visit_With(self, node, frame): - with_frame = frame.inner() - with_frame.symbols.analyze_node(node) - self.enter_frame(with_frame) - for target, expr in izip(node.targets, node.values): - self.newline() - self.visit(target, with_frame) - self.write(" = ") - self.visit(expr, frame) - self.blockvisit(node.body, with_frame) - self.leave_frame(with_frame) - - def visit_ExprStmt(self, node, frame): - self.newline(node) - self.visit(node.node, frame) - - _FinalizeInfo = namedtuple("_FinalizeInfo", ("const", "src")) - #: The default finalize function if the environment isn't configured - #: with one. Or if the environment has one, this is called on that - #: function's output for constants. - _default_finalize = text_type - _finalize = None - - def _make_finalize(self): - """Build the finalize function to be used on constants and at - runtime. Cached so it's only created once for all output nodes. - - Returns a ``namedtuple`` with the following attributes: - - ``const`` - A function to finalize constant data at compile time. - - ``src`` - Source code to output around nodes to be evaluated at - runtime. - """ - if self._finalize is not None: - return self._finalize - - finalize = default = self._default_finalize - src = None - - if self.environment.finalize: - src = "environment.finalize(" - env_finalize = self.environment.finalize - - def finalize(value): - return default(env_finalize(value)) - - if getattr(env_finalize, "contextfunction", False): - src += "context, " - finalize = None # noqa: F811 - elif getattr(env_finalize, "evalcontextfunction", False): - src += "context.eval_ctx, " - finalize = None - elif getattr(env_finalize, "environmentfunction", False): - src += "environment, " - - def finalize(value): - return default(env_finalize(self.environment, value)) - - self._finalize = self._FinalizeInfo(finalize, src) - return self._finalize - - def _output_const_repr(self, group): - """Given a group of constant values converted from ``Output`` - child nodes, produce a string to write to the template module - source. - """ - return repr(concat(group)) - - def _output_child_to_const(self, node, frame, finalize): - """Try to optimize a child of an ``Output`` node by trying to - convert it to constant, finalized data at compile time. - - If :exc:`Impossible` is raised, the node is not constant and - will be evaluated at runtime. Any other exception will also be - evaluated at runtime for easier debugging. - """ - const = node.as_const(frame.eval_ctx) - - if frame.eval_ctx.autoescape: - const = escape(const) - - # Template data doesn't go through finalize. - if isinstance(node, nodes.TemplateData): - return text_type(const) - - return finalize.const(const) - - def _output_child_pre(self, node, frame, finalize): - """Output extra source code before visiting a child of an - ``Output`` node. - """ - if frame.eval_ctx.volatile: - self.write("(escape if context.eval_ctx.autoescape else to_string)(") - elif frame.eval_ctx.autoescape: - self.write("escape(") - else: - self.write("to_string(") - - if finalize.src is not None: - self.write(finalize.src) - - def _output_child_post(self, node, frame, finalize): - """Output extra source code after visiting a child of an - ``Output`` node. - """ - self.write(")") - - if finalize.src is not None: - self.write(")") - - def visit_Output(self, node, frame): - # If an extends is active, don't render outside a block. - if frame.require_output_check: - # A top-level extends is known to exist at compile time. - if self.has_known_extends: - return - - self.writeline("if parent_template is None:") - self.indent() - - finalize = self._make_finalize() - body = [] - - # Evaluate constants at compile time if possible. Each item in - # body will be either a list of static data or a node to be - # evaluated at runtime. - for child in node.nodes: - try: - if not ( - # If the finalize function requires runtime context, - # constants can't be evaluated at compile time. - finalize.const - # Unless it's basic template data that won't be - # finalized anyway. - or isinstance(child, nodes.TemplateData) - ): - raise nodes.Impossible() - - const = self._output_child_to_const(child, frame, finalize) - except (nodes.Impossible, Exception): - # The node was not constant and needs to be evaluated at - # runtime. Or another error was raised, which is easier - # to debug at runtime. - body.append(child) - continue - - if body and isinstance(body[-1], list): - body[-1].append(const) - else: - body.append([const]) - - if frame.buffer is not None: - if len(body) == 1: - self.writeline("%s.append(" % frame.buffer) - else: - self.writeline("%s.extend((" % frame.buffer) - - self.indent() - - for item in body: - if isinstance(item, list): - # A group of constant data to join and output. - val = self._output_const_repr(item) - - if frame.buffer is None: - self.writeline("yield " + val) - else: - self.writeline(val + ",") - else: - if frame.buffer is None: - self.writeline("yield ", item) - else: - self.newline(item) - - # A node to be evaluated at runtime. - self._output_child_pre(item, frame, finalize) - self.visit(item, frame) - self._output_child_post(item, frame, finalize) - - if frame.buffer is not None: - self.write(",") - - if frame.buffer is not None: - self.outdent() - self.writeline(")" if len(body) == 1 else "))") - - if frame.require_output_check: - self.outdent() - - def visit_Assign(self, node, frame): - self.push_assign_tracking() - self.newline(node) - self.visit(node.target, frame) - self.write(" = ") - self.visit(node.node, frame) - self.pop_assign_tracking(frame) - - def visit_AssignBlock(self, node, frame): - self.push_assign_tracking() - block_frame = frame.inner() - # This is a special case. Since a set block always captures we - # will disable output checks. This way one can use set blocks - # toplevel even in extended templates. - block_frame.require_output_check = False - block_frame.symbols.analyze_node(node) - self.enter_frame(block_frame) - self.buffer(block_frame) - self.blockvisit(node.body, block_frame) - self.newline(node) - self.visit(node.target, frame) - self.write(" = (Markup if context.eval_ctx.autoescape else identity)(") - if node.filter is not None: - self.visit_Filter(node.filter, block_frame) - else: - self.write("concat(%s)" % block_frame.buffer) - self.write(")") - self.pop_assign_tracking(frame) - self.leave_frame(block_frame) - - # -- Expression Visitors - - def visit_Name(self, node, frame): - if node.ctx == "store" and frame.toplevel: - if self._assign_stack: - self._assign_stack[-1].add(node.name) - ref = frame.symbols.ref(node.name) - - # If we are looking up a variable we might have to deal with the - # case where it's undefined. We can skip that case if the load - # instruction indicates a parameter which are always defined. - if node.ctx == "load": - load = frame.symbols.find_load(ref) - if not ( - load is not None - and load[0] == VAR_LOAD_PARAMETER - and not self.parameter_is_undeclared(ref) - ): - self.write( - "(undefined(name=%r) if %s is missing else %s)" - % (node.name, ref, ref) - ) - return - - self.write(ref) - - def visit_NSRef(self, node, frame): - # NSRefs can only be used to store values; since they use the normal - # `foo.bar` notation they will be parsed as a normal attribute access - # when used anywhere but in a `set` context - ref = frame.symbols.ref(node.name) - self.writeline("if not isinstance(%s, Namespace):" % ref) - self.indent() - self.writeline( - "raise TemplateRuntimeError(%r)" - % "cannot assign attribute on non-namespace object" - ) - self.outdent() - self.writeline("%s[%r]" % (ref, node.attr)) - - def visit_Const(self, node, frame): - val = node.as_const(frame.eval_ctx) - if isinstance(val, float): - self.write(str(val)) - else: - self.write(repr(val)) - - def visit_TemplateData(self, node, frame): - try: - self.write(repr(node.as_const(frame.eval_ctx))) - except nodes.Impossible: - self.write( - "(Markup if context.eval_ctx.autoescape else identity)(%r)" % node.data - ) - - def visit_Tuple(self, node, frame): - self.write("(") - idx = -1 - for idx, item in enumerate(node.items): - if idx: - self.write(", ") - self.visit(item, frame) - self.write(idx == 0 and ",)" or ")") - - def visit_List(self, node, frame): - self.write("[") - for idx, item in enumerate(node.items): - if idx: - self.write(", ") - self.visit(item, frame) - self.write("]") - - def visit_Dict(self, node, frame): - self.write("{") - for idx, item in enumerate(node.items): - if idx: - self.write(", ") - self.visit(item.key, frame) - self.write(": ") - self.visit(item.value, frame) - self.write("}") - - def binop(operator, interceptable=True): # noqa: B902 - @optimizeconst - def visitor(self, node, frame): - if ( - self.environment.sandboxed - and operator in self.environment.intercepted_binops - ): - self.write("environment.call_binop(context, %r, " % operator) - self.visit(node.left, frame) - self.write(", ") - self.visit(node.right, frame) - else: - self.write("(") - self.visit(node.left, frame) - self.write(" %s " % operator) - self.visit(node.right, frame) - self.write(")") - - return visitor - - def uaop(operator, interceptable=True): # noqa: B902 - @optimizeconst - def visitor(self, node, frame): - if ( - self.environment.sandboxed - and operator in self.environment.intercepted_unops - ): - self.write("environment.call_unop(context, %r, " % operator) - self.visit(node.node, frame) - else: - self.write("(" + operator) - self.visit(node.node, frame) - self.write(")") - - return visitor - - visit_Add = binop("+") - visit_Sub = binop("-") - visit_Mul = binop("*") - visit_Div = binop("/") - visit_FloorDiv = binop("//") - visit_Pow = binop("**") - visit_Mod = binop("%") - visit_And = binop("and", interceptable=False) - visit_Or = binop("or", interceptable=False) - visit_Pos = uaop("+") - visit_Neg = uaop("-") - visit_Not = uaop("not ", interceptable=False) - del binop, uaop - - @optimizeconst - def visit_Concat(self, node, frame): - if frame.eval_ctx.volatile: - func_name = "(context.eval_ctx.volatile and markup_join or unicode_join)" - elif frame.eval_ctx.autoescape: - func_name = "markup_join" - else: - func_name = "unicode_join" - self.write("%s((" % func_name) - for arg in node.nodes: - self.visit(arg, frame) - self.write(", ") - self.write("))") - - @optimizeconst - def visit_Compare(self, node, frame): - self.write("(") - self.visit(node.expr, frame) - for op in node.ops: - self.visit(op, frame) - self.write(")") - - def visit_Operand(self, node, frame): - self.write(" %s " % operators[node.op]) - self.visit(node.expr, frame) - - @optimizeconst - def visit_Getattr(self, node, frame): - if self.environment.is_async: - self.write("(await auto_await(") - - self.write("environment.getattr(") - self.visit(node.node, frame) - self.write(", %r)" % node.attr) - - if self.environment.is_async: - self.write("))") - - @optimizeconst - def visit_Getitem(self, node, frame): - # slices bypass the environment getitem method. - if isinstance(node.arg, nodes.Slice): - self.visit(node.node, frame) - self.write("[") - self.visit(node.arg, frame) - self.write("]") - else: - if self.environment.is_async: - self.write("(await auto_await(") - - self.write("environment.getitem(") - self.visit(node.node, frame) - self.write(", ") - self.visit(node.arg, frame) - self.write(")") - - if self.environment.is_async: - self.write("))") - - def visit_Slice(self, node, frame): - if node.start is not None: - self.visit(node.start, frame) - self.write(":") - if node.stop is not None: - self.visit(node.stop, frame) - if node.step is not None: - self.write(":") - self.visit(node.step, frame) - - @optimizeconst - def visit_Filter(self, node, frame): - if self.environment.is_async: - self.write("await auto_await(") - self.write(self.filters[node.name] + "(") - func = self.environment.filters.get(node.name) - if func is None: - self.fail("no filter named %r" % node.name, node.lineno) - if getattr(func, "contextfilter", False): - self.write("context, ") - elif getattr(func, "evalcontextfilter", False): - self.write("context.eval_ctx, ") - elif getattr(func, "environmentfilter", False): - self.write("environment, ") - - # if the filter node is None we are inside a filter block - # and want to write to the current buffer - if node.node is not None: - self.visit(node.node, frame) - elif frame.eval_ctx.volatile: - self.write( - "(context.eval_ctx.autoescape and" - " Markup(concat(%s)) or concat(%s))" % (frame.buffer, frame.buffer) - ) - elif frame.eval_ctx.autoescape: - self.write("Markup(concat(%s))" % frame.buffer) - else: - self.write("concat(%s)" % frame.buffer) - self.signature(node, frame) - self.write(")") - if self.environment.is_async: - self.write(")") - - @optimizeconst - def visit_Test(self, node, frame): - self.write(self.tests[node.name] + "(") - if node.name not in self.environment.tests: - self.fail("no test named %r" % node.name, node.lineno) - self.visit(node.node, frame) - self.signature(node, frame) - self.write(")") - - @optimizeconst - def visit_CondExpr(self, node, frame): - def write_expr2(): - if node.expr2 is not None: - return self.visit(node.expr2, frame) - self.write( - "cond_expr_undefined(%r)" - % ( - "the inline if-" - "expression on %s evaluated to false and " - "no else section was defined." % self.position(node) - ) - ) - - self.write("(") - self.visit(node.expr1, frame) - self.write(" if ") - self.visit(node.test, frame) - self.write(" else ") - write_expr2() - self.write(")") - - @optimizeconst - def visit_Call(self, node, frame, forward_caller=False): - if self.environment.is_async: - self.write("await auto_await(") - if self.environment.sandboxed: - self.write("environment.call(context, ") - else: - self.write("context.call(") - self.visit(node.node, frame) - extra_kwargs = forward_caller and {"caller": "caller"} or None - self.signature(node, frame, extra_kwargs) - self.write(")") - if self.environment.is_async: - self.write(")") - - def visit_Keyword(self, node, frame): - self.write(node.key + "=") - self.visit(node.value, frame) - - # -- Unused nodes for extensions - - def visit_MarkSafe(self, node, frame): - self.write("Markup(") - self.visit(node.expr, frame) - self.write(")") - - def visit_MarkSafeIfAutoescape(self, node, frame): - self.write("(context.eval_ctx.autoescape and Markup or identity)(") - self.visit(node.expr, frame) - self.write(")") - - def visit_EnvironmentAttribute(self, node, frame): - self.write("environment." + node.name) - - def visit_ExtensionAttribute(self, node, frame): - self.write("environment.extensions[%r].%s" % (node.identifier, node.name)) - - def visit_ImportedName(self, node, frame): - self.write(self.import_aliases[node.importname]) - - def visit_InternalName(self, node, frame): - self.write(node.name) - - def visit_ContextReference(self, node, frame): - self.write("context") - - def visit_DerivedContextReference(self, node, frame): - self.write(self.derive_context(frame)) - - def visit_Continue(self, node, frame): - self.writeline("continue", node) - - def visit_Break(self, node, frame): - self.writeline("break", node) - - def visit_Scope(self, node, frame): - scope_frame = frame.inner() - scope_frame.symbols.analyze_node(node) - self.enter_frame(scope_frame) - self.blockvisit(node.body, scope_frame) - self.leave_frame(scope_frame) - - def visit_OverlayScope(self, node, frame): - ctx = self.temporary_identifier() - self.writeline("%s = %s" % (ctx, self.derive_context(frame))) - self.writeline("%s.vars = " % ctx) - self.visit(node.context, frame) - self.push_context_reference(ctx) - - scope_frame = frame.inner(isolated=True) - scope_frame.symbols.analyze_node(node) - self.enter_frame(scope_frame) - self.blockvisit(node.body, scope_frame) - self.leave_frame(scope_frame) - self.pop_context_reference() - - def visit_EvalContextModifier(self, node, frame): - for keyword in node.options: - self.writeline("context.eval_ctx.%s = " % keyword.key) - self.visit(keyword.value, frame) - try: - val = keyword.value.as_const(frame.eval_ctx) - except nodes.Impossible: - frame.eval_ctx.volatile = True - else: - setattr(frame.eval_ctx, keyword.key, val) - - def visit_ScopedEvalContextModifier(self, node, frame): - old_ctx_name = self.temporary_identifier() - saved_ctx = frame.eval_ctx.save() - self.writeline("%s = context.eval_ctx.save()" % old_ctx_name) - self.visit_EvalContextModifier(node, frame) - for child in node.body: - self.visit(child, frame) - frame.eval_ctx.revert(saved_ctx) - self.writeline("context.eval_ctx.revert(%s)" % old_ctx_name) diff --git a/venv/lib/python3.8/site-packages/jinja2/constants.py b/venv/lib/python3.8/site-packages/jinja2/constants.py deleted file mode 100644 index bf7f2ca7..00000000 --- a/venv/lib/python3.8/site-packages/jinja2/constants.py +++ /dev/null @@ -1,21 +0,0 @@ -# -*- coding: utf-8 -*- -#: list of lorem ipsum words used by the lipsum() helper function -LOREM_IPSUM_WORDS = u"""\ -a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at -auctor augue bibendum blandit class commodo condimentum congue consectetuer -consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus -diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend -elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames -faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac -hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum -justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem -luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie -mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non -nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque -penatibus per pharetra phasellus placerat platea porta porttitor posuere -potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus -ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit -sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor -tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices -ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus -viverra volutpat vulputate""" diff --git a/venv/lib/python3.8/site-packages/jinja2/debug.py b/venv/lib/python3.8/site-packages/jinja2/debug.py deleted file mode 100644 index d2c5a06b..00000000 --- a/venv/lib/python3.8/site-packages/jinja2/debug.py +++ /dev/null @@ -1,271 +0,0 @@ -import sys -from types import CodeType - -from . import TemplateSyntaxError -from ._compat import PYPY -from .utils import internal_code -from .utils import missing - - -def rewrite_traceback_stack(source=None): - """Rewrite the current exception to replace any tracebacks from - within compiled template code with tracebacks that look like they - came from the template source. - - This must be called within an ``except`` block. - - :param exc_info: A :meth:`sys.exc_info` tuple. If not provided, - the current ``exc_info`` is used. - :param source: For ``TemplateSyntaxError``, the original source if - known. - :return: A :meth:`sys.exc_info` tuple that can be re-raised. - """ - exc_type, exc_value, tb = sys.exc_info() - - if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated: - exc_value.translated = True - exc_value.source = source - - try: - # Remove the old traceback on Python 3, otherwise the frames - # from the compiler still show up. - exc_value.with_traceback(None) - except AttributeError: - pass - - # Outside of runtime, so the frame isn't executing template - # code, but it still needs to point at the template. - tb = fake_traceback( - exc_value, None, exc_value.filename or "", exc_value.lineno - ) - else: - # Skip the frame for the render function. - tb = tb.tb_next - - stack = [] - - # Build the stack of traceback object, replacing any in template - # code with the source file and line information. - while tb is not None: - # Skip frames decorated with @internalcode. These are internal - # calls that aren't useful in template debugging output. - if tb.tb_frame.f_code in internal_code: - tb = tb.tb_next - continue - - template = tb.tb_frame.f_globals.get("__jinja_template__") - - if template is not None: - lineno = template.get_corresponding_lineno(tb.tb_lineno) - fake_tb = fake_traceback(exc_value, tb, template.filename, lineno) - stack.append(fake_tb) - else: - stack.append(tb) - - tb = tb.tb_next - - tb_next = None - - # Assign tb_next in reverse to avoid circular references. - for tb in reversed(stack): - tb_next = tb_set_next(tb, tb_next) - - return exc_type, exc_value, tb_next - - -def fake_traceback(exc_value, tb, filename, lineno): - """Produce a new traceback object that looks like it came from the - template source instead of the compiled code. The filename, line - number, and location name will point to the template, and the local - variables will be the current template context. - - :param exc_value: The original exception to be re-raised to create - the new traceback. - :param tb: The original traceback to get the local variables and - code info from. - :param filename: The template filename. - :param lineno: The line number in the template source. - """ - if tb is not None: - # Replace the real locals with the context that would be - # available at that point in the template. - locals = get_template_locals(tb.tb_frame.f_locals) - locals.pop("__jinja_exception__", None) - else: - locals = {} - - globals = { - "__name__": filename, - "__file__": filename, - "__jinja_exception__": exc_value, - } - # Raise an exception at the correct line number. - code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec") - - # Build a new code object that points to the template file and - # replaces the location with a block name. - try: - location = "template" - - if tb is not None: - function = tb.tb_frame.f_code.co_name - - if function == "root": - location = "top-level template code" - elif function.startswith("block_"): - location = 'block "%s"' % function[6:] - - # Collect arguments for the new code object. CodeType only - # accepts positional arguments, and arguments were inserted in - # new Python versions. - code_args = [] - - for attr in ( - "argcount", - "posonlyargcount", # Python 3.8 - "kwonlyargcount", # Python 3 - "nlocals", - "stacksize", - "flags", - "code", # codestring - "consts", # constants - "names", - "varnames", - ("filename", filename), - ("name", location), - "firstlineno", - "lnotab", - "freevars", - "cellvars", - ): - if isinstance(attr, tuple): - # Replace with given value. - code_args.append(attr[1]) - continue - - try: - # Copy original value if it exists. - code_args.append(getattr(code, "co_" + attr)) - except AttributeError: - # Some arguments were added later. - continue - - code = CodeType(*code_args) - except Exception: - # Some environments such as Google App Engine don't support - # modifying code objects. - pass - - # Execute the new code, which is guaranteed to raise, and return - # the new traceback without this frame. - try: - exec(code, globals, locals) - except BaseException: - return sys.exc_info()[2].tb_next - - -def get_template_locals(real_locals): - """Based on the runtime locals, get the context that would be - available at that point in the template. - """ - # Start with the current template context. - ctx = real_locals.get("context") - - if ctx: - data = ctx.get_all().copy() - else: - data = {} - - # Might be in a derived context that only sets local variables - # rather than pushing a context. Local variables follow the scheme - # l_depth_name. Find the highest-depth local that has a value for - # each name. - local_overrides = {} - - for name, value in real_locals.items(): - if not name.startswith("l_") or value is missing: - # Not a template variable, or no longer relevant. - continue - - try: - _, depth, name = name.split("_", 2) - depth = int(depth) - except ValueError: - continue - - cur_depth = local_overrides.get(name, (-1,))[0] - - if cur_depth < depth: - local_overrides[name] = (depth, value) - - # Modify the context with any derived context. - for name, (_, value) in local_overrides.items(): - if value is missing: - data.pop(name, None) - else: - data[name] = value - - return data - - -if sys.version_info >= (3, 7): - # tb_next is directly assignable as of Python 3.7 - def tb_set_next(tb, tb_next): - tb.tb_next = tb_next - return tb - - -elif PYPY: - # PyPy might have special support, and won't work with ctypes. - try: - import tputil - except ImportError: - # Without tproxy support, use the original traceback. - def tb_set_next(tb, tb_next): - return tb - - else: - # With tproxy support, create a proxy around the traceback that - # returns the new tb_next. - def tb_set_next(tb, tb_next): - def controller(op): - if op.opname == "__getattribute__" and op.args[0] == "tb_next": - return tb_next - - return op.delegate() - - return tputil.make_proxy(controller, obj=tb) - - -else: - # Use ctypes to assign tb_next at the C level since it's read-only - # from Python. - import ctypes - - class _CTraceback(ctypes.Structure): - _fields_ = [ - # Extra PyObject slots when compiled with Py_TRACE_REFS. - ( - "PyObject_HEAD", - ctypes.c_byte * (32 if hasattr(sys, "getobjects") else 16), - ), - # Only care about tb_next as an object, not a traceback. - ("tb_next", ctypes.py_object), - ] - - def tb_set_next(tb, tb_next): - c_tb = _CTraceback.from_address(id(tb)) - - # Clear out the old tb_next. - if tb.tb_next is not None: - c_tb_next = ctypes.py_object(tb.tb_next) - c_tb.tb_next = ctypes.py_object() - ctypes.pythonapi.Py_DecRef(c_tb_next) - - # Assign the new tb_next. - if tb_next is not None: - c_tb_next = ctypes.py_object(tb_next) - ctypes.pythonapi.Py_IncRef(c_tb_next) - c_tb.tb_next = c_tb_next - - return tb diff --git a/venv/lib/python3.8/site-packages/jinja2/defaults.py b/venv/lib/python3.8/site-packages/jinja2/defaults.py deleted file mode 100644 index 8e0e7d77..00000000 --- a/venv/lib/python3.8/site-packages/jinja2/defaults.py +++ /dev/null @@ -1,44 +0,0 @@ -# -*- coding: utf-8 -*- -from ._compat import range_type -from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401 -from .tests import TESTS as DEFAULT_TESTS # noqa: F401 -from .utils import Cycler -from .utils import generate_lorem_ipsum -from .utils import Joiner -from .utils import Namespace - -# defaults for the parser / lexer -BLOCK_START_STRING = "{%" -BLOCK_END_STRING = "%}" -VARIABLE_START_STRING = "{{" -VARIABLE_END_STRING = "}}" -COMMENT_START_STRING = "{#" -COMMENT_END_STRING = "#}" -LINE_STATEMENT_PREFIX = None -LINE_COMMENT_PREFIX = None -TRIM_BLOCKS = False -LSTRIP_BLOCKS = False -NEWLINE_SEQUENCE = "\n" -KEEP_TRAILING_NEWLINE = False - -# default filters, tests and namespace - -DEFAULT_NAMESPACE = { - "range": range_type, - "dict": dict, - "lipsum": generate_lorem_ipsum, - "cycler": Cycler, - "joiner": Joiner, - "namespace": Namespace, -} - -# default policies -DEFAULT_POLICIES = { - "compiler.ascii_str": True, - "urlize.rel": "noopener", - "urlize.target": None, - "truncate.leeway": 5, - "json.dumps_function": None, - "json.dumps_kwargs": {"sort_keys": True}, - "ext.i18n.trimmed": False, -} diff --git a/venv/lib/python3.8/site-packages/jinja2/environment.py b/venv/lib/python3.8/site-packages/jinja2/environment.py deleted file mode 100644 index bf44b9de..00000000 --- a/venv/lib/python3.8/site-packages/jinja2/environment.py +++ /dev/null @@ -1,1362 +0,0 @@ -# -*- coding: utf-8 -*- -"""Classes for managing templates and their runtime and compile time -options. -""" -import os -import sys -import weakref -from functools import partial -from functools import reduce - -from markupsafe import Markup - -from . import nodes -from ._compat import encode_filename -from ._compat import implements_iterator -from ._compat import implements_to_string -from ._compat import iteritems -from ._compat import PY2 -from ._compat import PYPY -from ._compat import reraise -from ._compat import string_types -from ._compat import text_type -from .compiler import CodeGenerator -from .compiler import generate -from .defaults import BLOCK_END_STRING -from .defaults import BLOCK_START_STRING -from .defaults import COMMENT_END_STRING -from .defaults import COMMENT_START_STRING -from .defaults import DEFAULT_FILTERS -from .defaults import DEFAULT_NAMESPACE -from .defaults import DEFAULT_POLICIES -from .defaults import DEFAULT_TESTS -from .defaults import KEEP_TRAILING_NEWLINE -from .defaults import LINE_COMMENT_PREFIX -from .defaults import LINE_STATEMENT_PREFIX -from .defaults import LSTRIP_BLOCKS -from .defaults import NEWLINE_SEQUENCE -from .defaults import TRIM_BLOCKS -from .defaults import VARIABLE_END_STRING -from .defaults import VARIABLE_START_STRING -from .exceptions import TemplateNotFound -from .exceptions import TemplateRuntimeError -from .exceptions import TemplatesNotFound -from .exceptions import TemplateSyntaxError -from .exceptions import UndefinedError -from .lexer import get_lexer -from .lexer import TokenStream -from .nodes import EvalContext -from .parser import Parser -from .runtime import Context -from .runtime import new_context -from .runtime import Undefined -from .utils import concat -from .utils import consume -from .utils import have_async_gen -from .utils import import_string -from .utils import internalcode -from .utils import LRUCache -from .utils import missing - -# for direct template usage we have up to ten living environments -_spontaneous_environments = LRUCache(10) - - -def get_spontaneous_environment(cls, *args): - """Return a new spontaneous environment. A spontaneous environment - is used for templates created directly rather than through an - existing environment. - - :param cls: Environment class to create. - :param args: Positional arguments passed to environment. - """ - key = (cls, args) - - try: - return _spontaneous_environments[key] - except KeyError: - _spontaneous_environments[key] = env = cls(*args) - env.shared = True - return env - - -def create_cache(size): - """Return the cache class for the given size.""" - if size == 0: - return None - if size < 0: - return {} - return LRUCache(size) - - -def copy_cache(cache): - """Create an empty copy of the given cache.""" - if cache is None: - return None - elif type(cache) is dict: - return {} - return LRUCache(cache.capacity) - - -def load_extensions(environment, extensions): - """Load the extensions from the list and bind it to the environment. - Returns a dict of instantiated environments. - """ - result = {} - for extension in extensions: - if isinstance(extension, string_types): - extension = import_string(extension) - result[extension.identifier] = extension(environment) - return result - - -def fail_for_missing_callable(string, name): - msg = string % name - if isinstance(name, Undefined): - try: - name._fail_with_undefined_error() - except Exception as e: - msg = "%s (%s; did you forget to quote the callable name?)" % (msg, e) - raise TemplateRuntimeError(msg) - - -def _environment_sanity_check(environment): - """Perform a sanity check on the environment.""" - assert issubclass( - environment.undefined, Undefined - ), "undefined must be a subclass of undefined because filters depend on it." - assert ( - environment.block_start_string - != environment.variable_start_string - != environment.comment_start_string - ), "block, variable and comment start strings must be different" - assert environment.newline_sequence in ( - "\r", - "\r\n", - "\n", - ), "newline_sequence set to unknown line ending string." - return environment - - -class Environment(object): - r"""The core component of Jinja is the `Environment`. It contains - important shared variables like configuration, filters, tests, - globals and others. Instances of this class may be modified if - they are not shared and if no template was loaded so far. - Modifications on environments after the first template was loaded - will lead to surprising effects and undefined behavior. - - Here are the possible initialization parameters: - - `block_start_string` - The string marking the beginning of a block. Defaults to ``'{%'``. - - `block_end_string` - The string marking the end of a block. Defaults to ``'%}'``. - - `variable_start_string` - The string marking the beginning of a print statement. - Defaults to ``'{{'``. - - `variable_end_string` - The string marking the end of a print statement. Defaults to - ``'}}'``. - - `comment_start_string` - The string marking the beginning of a comment. Defaults to ``'{#'``. - - `comment_end_string` - The string marking the end of a comment. Defaults to ``'#}'``. - - `line_statement_prefix` - If given and a string, this will be used as prefix for line based - statements. See also :ref:`line-statements`. - - `line_comment_prefix` - If given and a string, this will be used as prefix for line based - comments. See also :ref:`line-statements`. - - .. versionadded:: 2.2 - - `trim_blocks` - If this is set to ``True`` the first newline after a block is - removed (block, not variable tag!). Defaults to `False`. - - `lstrip_blocks` - If this is set to ``True`` leading spaces and tabs are stripped - from the start of a line to a block. Defaults to `False`. - - `newline_sequence` - The sequence that starts a newline. Must be one of ``'\r'``, - ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a - useful default for Linux and OS X systems as well as web - applications. - - `keep_trailing_newline` - Preserve the trailing newline when rendering templates. - The default is ``False``, which causes a single newline, - if present, to be stripped from the end of the template. - - .. versionadded:: 2.7 - - `extensions` - List of Jinja extensions to use. This can either be import paths - as strings or extension classes. For more information have a - look at :ref:`the extensions documentation `. - - `optimized` - should the optimizer be enabled? Default is ``True``. - - `undefined` - :class:`Undefined` or a subclass of it that is used to represent - undefined values in the template. - - `finalize` - A callable that can be used to process the result of a variable - expression before it is output. For example one can convert - ``None`` implicitly into an empty string here. - - `autoescape` - If set to ``True`` the XML/HTML autoescaping feature is enabled by - default. For more details about autoescaping see - :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also - be a callable that is passed the template name and has to - return ``True`` or ``False`` depending on autoescape should be - enabled by default. - - .. versionchanged:: 2.4 - `autoescape` can now be a function - - `loader` - The template loader for this environment. - - `cache_size` - The size of the cache. Per default this is ``400`` which means - that if more than 400 templates are loaded the loader will clean - out the least recently used template. If the cache size is set to - ``0`` templates are recompiled all the time, if the cache size is - ``-1`` the cache will not be cleaned. - - .. versionchanged:: 2.8 - The cache size was increased to 400 from a low 50. - - `auto_reload` - Some loaders load templates from locations where the template - sources may change (ie: file system or database). If - ``auto_reload`` is set to ``True`` (default) every time a template is - requested the loader checks if the source changed and if yes, it - will reload the template. For higher performance it's possible to - disable that. - - `bytecode_cache` - If set to a bytecode cache object, this object will provide a - cache for the internal Jinja bytecode so that templates don't - have to be parsed if they were not changed. - - See :ref:`bytecode-cache` for more information. - - `enable_async` - If set to true this enables async template execution which allows - you to take advantage of newer Python features. This requires - Python 3.6 or later. - """ - - #: if this environment is sandboxed. Modifying this variable won't make - #: the environment sandboxed though. For a real sandboxed environment - #: have a look at jinja2.sandbox. This flag alone controls the code - #: generation by the compiler. - sandboxed = False - - #: True if the environment is just an overlay - overlayed = False - - #: the environment this environment is linked to if it is an overlay - linked_to = None - - #: shared environments have this set to `True`. A shared environment - #: must not be modified - shared = False - - #: the class that is used for code generation. See - #: :class:`~jinja2.compiler.CodeGenerator` for more information. - code_generator_class = CodeGenerator - - #: the context class thatis used for templates. See - #: :class:`~jinja2.runtime.Context` for more information. - context_class = Context - - def __init__( - self, - block_start_string=BLOCK_START_STRING, - block_end_string=BLOCK_END_STRING, - variable_start_string=VARIABLE_START_STRING, - variable_end_string=VARIABLE_END_STRING, - comment_start_string=COMMENT_START_STRING, - comment_end_string=COMMENT_END_STRING, - line_statement_prefix=LINE_STATEMENT_PREFIX, - line_comment_prefix=LINE_COMMENT_PREFIX, - trim_blocks=TRIM_BLOCKS, - lstrip_blocks=LSTRIP_BLOCKS, - newline_sequence=NEWLINE_SEQUENCE, - keep_trailing_newline=KEEP_TRAILING_NEWLINE, - extensions=(), - optimized=True, - undefined=Undefined, - finalize=None, - autoescape=False, - loader=None, - cache_size=400, - auto_reload=True, - bytecode_cache=None, - enable_async=False, - ): - # !!Important notice!! - # The constructor accepts quite a few arguments that should be - # passed by keyword rather than position. However it's important to - # not change the order of arguments because it's used at least - # internally in those cases: - # - spontaneous environments (i18n extension and Template) - # - unittests - # If parameter changes are required only add parameters at the end - # and don't change the arguments (or the defaults!) of the arguments - # existing already. - - # lexer / parser information - self.block_start_string = block_start_string - self.block_end_string = block_end_string - self.variable_start_string = variable_start_string - self.variable_end_string = variable_end_string - self.comment_start_string = comment_start_string - self.comment_end_string = comment_end_string - self.line_statement_prefix = line_statement_prefix - self.line_comment_prefix = line_comment_prefix - self.trim_blocks = trim_blocks - self.lstrip_blocks = lstrip_blocks - self.newline_sequence = newline_sequence - self.keep_trailing_newline = keep_trailing_newline - - # runtime information - self.undefined = undefined - self.optimized = optimized - self.finalize = finalize - self.autoescape = autoescape - - # defaults - self.filters = DEFAULT_FILTERS.copy() - self.tests = DEFAULT_TESTS.copy() - self.globals = DEFAULT_NAMESPACE.copy() - - # set the loader provided - self.loader = loader - self.cache = create_cache(cache_size) - self.bytecode_cache = bytecode_cache - self.auto_reload = auto_reload - - # configurable policies - self.policies = DEFAULT_POLICIES.copy() - - # load extensions - self.extensions = load_extensions(self, extensions) - - self.enable_async = enable_async - self.is_async = self.enable_async and have_async_gen - if self.is_async: - # runs patch_all() to enable async support - from . import asyncsupport # noqa: F401 - - _environment_sanity_check(self) - - def add_extension(self, extension): - """Adds an extension after the environment was created. - - .. versionadded:: 2.5 - """ - self.extensions.update(load_extensions(self, [extension])) - - def extend(self, **attributes): - """Add the items to the instance of the environment if they do not exist - yet. This is used by :ref:`extensions ` to register - callbacks and configuration values without breaking inheritance. - """ - for key, value in iteritems(attributes): - if not hasattr(self, key): - setattr(self, key, value) - - def overlay( - self, - block_start_string=missing, - block_end_string=missing, - variable_start_string=missing, - variable_end_string=missing, - comment_start_string=missing, - comment_end_string=missing, - line_statement_prefix=missing, - line_comment_prefix=missing, - trim_blocks=missing, - lstrip_blocks=missing, - extensions=missing, - optimized=missing, - undefined=missing, - finalize=missing, - autoescape=missing, - loader=missing, - cache_size=missing, - auto_reload=missing, - bytecode_cache=missing, - ): - """Create a new overlay environment that shares all the data with the - current environment except for cache and the overridden attributes. - Extensions cannot be removed for an overlayed environment. An overlayed - environment automatically gets all the extensions of the environment it - is linked to plus optional extra extensions. - - Creating overlays should happen after the initial environment was set - up completely. Not all attributes are truly linked, some are just - copied over so modifications on the original environment may not shine - through. - """ - args = dict(locals()) - del args["self"], args["cache_size"], args["extensions"] - - rv = object.__new__(self.__class__) - rv.__dict__.update(self.__dict__) - rv.overlayed = True - rv.linked_to = self - - for key, value in iteritems(args): - if value is not missing: - setattr(rv, key, value) - - if cache_size is not missing: - rv.cache = create_cache(cache_size) - else: - rv.cache = copy_cache(self.cache) - - rv.extensions = {} - for key, value in iteritems(self.extensions): - rv.extensions[key] = value.bind(rv) - if extensions is not missing: - rv.extensions.update(load_extensions(rv, extensions)) - - return _environment_sanity_check(rv) - - lexer = property(get_lexer, doc="The lexer for this environment.") - - def iter_extensions(self): - """Iterates over the extensions by priority.""" - return iter(sorted(self.extensions.values(), key=lambda x: x.priority)) - - def getitem(self, obj, argument): - """Get an item or attribute of an object but prefer the item.""" - try: - return obj[argument] - except (AttributeError, TypeError, LookupError): - if isinstance(argument, string_types): - try: - attr = str(argument) - except Exception: - pass - else: - try: - return getattr(obj, attr) - except AttributeError: - pass - return self.undefined(obj=obj, name=argument) - - def getattr(self, obj, attribute): - """Get an item or attribute of an object but prefer the attribute. - Unlike :meth:`getitem` the attribute *must* be a bytestring. - """ - try: - return getattr(obj, attribute) - except AttributeError: - pass - try: - return obj[attribute] - except (TypeError, LookupError, AttributeError): - return self.undefined(obj=obj, name=attribute) - - def call_filter( - self, name, value, args=None, kwargs=None, context=None, eval_ctx=None - ): - """Invokes a filter on a value the same way the compiler does it. - - Note that on Python 3 this might return a coroutine in case the - filter is running from an environment in async mode and the filter - supports async execution. It's your responsibility to await this - if needed. - - .. versionadded:: 2.7 - """ - func = self.filters.get(name) - if func is None: - fail_for_missing_callable("no filter named %r", name) - args = [value] + list(args or ()) - if getattr(func, "contextfilter", False): - if context is None: - raise TemplateRuntimeError( - "Attempted to invoke context filter without context" - ) - args.insert(0, context) - elif getattr(func, "evalcontextfilter", False): - if eval_ctx is None: - if context is not None: - eval_ctx = context.eval_ctx - else: - eval_ctx = EvalContext(self) - args.insert(0, eval_ctx) - elif getattr(func, "environmentfilter", False): - args.insert(0, self) - return func(*args, **(kwargs or {})) - - def call_test(self, name, value, args=None, kwargs=None): - """Invokes a test on a value the same way the compiler does it. - - .. versionadded:: 2.7 - """ - func = self.tests.get(name) - if func is None: - fail_for_missing_callable("no test named %r", name) - return func(value, *(args or ()), **(kwargs or {})) - - @internalcode - def parse(self, source, name=None, filename=None): - """Parse the sourcecode and return the abstract syntax tree. This - tree of nodes is used by the compiler to convert the template into - executable source- or bytecode. This is useful for debugging or to - extract information from templates. - - If you are :ref:`developing Jinja extensions ` - this gives you a good overview of the node tree generated. - """ - try: - return self._parse(source, name, filename) - except TemplateSyntaxError: - self.handle_exception(source=source) - - def _parse(self, source, name, filename): - """Internal parsing function used by `parse` and `compile`.""" - return Parser(self, source, name, encode_filename(filename)).parse() - - def lex(self, source, name=None, filename=None): - """Lex the given sourcecode and return a generator that yields - tokens as tuples in the form ``(lineno, token_type, value)``. - This can be useful for :ref:`extension development ` - and debugging templates. - - This does not perform preprocessing. If you want the preprocessing - of the extensions to be applied you have to filter source through - the :meth:`preprocess` method. - """ - source = text_type(source) - try: - return self.lexer.tokeniter(source, name, filename) - except TemplateSyntaxError: - self.handle_exception(source=source) - - def preprocess(self, source, name=None, filename=None): - """Preprocesses the source with all extensions. This is automatically - called for all parsing and compiling methods but *not* for :meth:`lex` - because there you usually only want the actual source tokenized. - """ - return reduce( - lambda s, e: e.preprocess(s, name, filename), - self.iter_extensions(), - text_type(source), - ) - - def _tokenize(self, source, name, filename=None, state=None): - """Called by the parser to do the preprocessing and filtering - for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`. - """ - source = self.preprocess(source, name, filename) - stream = self.lexer.tokenize(source, name, filename, state) - for ext in self.iter_extensions(): - stream = ext.filter_stream(stream) - if not isinstance(stream, TokenStream): - stream = TokenStream(stream, name, filename) - return stream - - def _generate(self, source, name, filename, defer_init=False): - """Internal hook that can be overridden to hook a different generate - method in. - - .. versionadded:: 2.5 - """ - return generate( - source, - self, - name, - filename, - defer_init=defer_init, - optimized=self.optimized, - ) - - def _compile(self, source, filename): - """Internal hook that can be overridden to hook a different compile - method in. - - .. versionadded:: 2.5 - """ - return compile(source, filename, "exec") - - @internalcode - def compile(self, source, name=None, filename=None, raw=False, defer_init=False): - """Compile a node or template source code. The `name` parameter is - the load name of the template after it was joined using - :meth:`join_path` if necessary, not the filename on the file system. - the `filename` parameter is the estimated filename of the template on - the file system. If the template came from a database or memory this - can be omitted. - - The return value of this method is a python code object. If the `raw` - parameter is `True` the return value will be a string with python - code equivalent to the bytecode returned otherwise. This method is - mainly used internally. - - `defer_init` is use internally to aid the module code generator. This - causes the generated code to be able to import without the global - environment variable to be set. - - .. versionadded:: 2.4 - `defer_init` parameter added. - """ - source_hint = None - try: - if isinstance(source, string_types): - source_hint = source - source = self._parse(source, name, filename) - source = self._generate(source, name, filename, defer_init=defer_init) - if raw: - return source - if filename is None: - filename = "